public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:5.5 commit in: /
Date: Mon, 24 Feb 2020 11:10:59 +0000 (UTC)	[thread overview]
Message-ID: <1582542644.5bd1273749aedbf70bead7dc7523d3759c12600e.mpagano@gentoo> (raw)

commit:     5bd1273749aedbf70bead7dc7523d3759c12600e
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Feb 24 11:10:44 2020 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Feb 24 11:10:44 2020 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=5bd12737

Linux patch 5.5.6

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README            |     4 +
 1005_linux-5.5.6.patch | 16853 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 16857 insertions(+)

diff --git a/0000_README b/0000_README
index 7eb2076..ff99e11 100644
--- a/0000_README
+++ b/0000_README
@@ -63,6 +63,10 @@ Patch:  1004_linux-5.5.5.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.5.5
 
+Patch:  1005_linux-5.5.6.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.5.6
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1005_linux-5.5.6.patch b/1005_linux-5.5.6.patch
new file mode 100644
index 0000000..b234b2b
--- /dev/null
+++ b/1005_linux-5.5.6.patch
@@ -0,0 +1,16853 @@
+diff --git a/Documentation/admin-guide/device-mapper/dm-raid.rst b/Documentation/admin-guide/device-mapper/dm-raid.rst
+index f6344675e395..695a2ea1d1ae 100644
+--- a/Documentation/admin-guide/device-mapper/dm-raid.rst
++++ b/Documentation/admin-guide/device-mapper/dm-raid.rst
+@@ -419,3 +419,5 @@ Version History
+ 	rebuild errors.
+  1.15.0 Fix size extensions not being synchronized in case of new MD bitmap
+         pages allocated;  also fix those not occuring after previous reductions
++ 1.15.1 Fix argument count and arguments for rebuild/write_mostly/journal_(dev|mode)
++        on the status line.
+diff --git a/Documentation/fb/fbcon.rst b/Documentation/fb/fbcon.rst
+index ebca41785abe..65ba40255137 100644
+--- a/Documentation/fb/fbcon.rst
++++ b/Documentation/fb/fbcon.rst
+@@ -127,7 +127,7 @@ C. Boot options
+ 	is typically located on the same video card.  Thus, the consoles that
+ 	are controlled by the VGA console will be garbled.
+ 
+-4. fbcon=rotate:<n>
++5. fbcon=rotate:<n>
+ 
+ 	This option changes the orientation angle of the console display. The
+ 	value 'n' accepts the following:
+@@ -152,21 +152,21 @@ C. Boot options
+ 	Actually, the underlying fb driver is totally ignorant of console
+ 	rotation.
+ 
+-5. fbcon=margin:<color>
++6. fbcon=margin:<color>
+ 
+ 	This option specifies the color of the margins. The margins are the
+ 	leftover area at the right and the bottom of the screen that are not
+ 	used by text. By default, this area will be black. The 'color' value
+ 	is an integer number that depends on the framebuffer driver being used.
+ 
+-6. fbcon=nodefer
++7. fbcon=nodefer
+ 
+ 	If the kernel is compiled with deferred fbcon takeover support, normally
+ 	the framebuffer contents, left in place by the firmware/bootloader, will
+ 	be preserved until there actually is some text is output to the console.
+ 	This option causes fbcon to bind immediately to the fbdev device.
+ 
+-7. fbcon=logo-pos:<location>
++8. fbcon=logo-pos:<location>
+ 
+ 	The only possible 'location' is 'center' (without quotes), and when
+ 	given, the bootup logo is moved from the default top-left corner
+diff --git a/Makefile b/Makefile
+index 1f7dc3a2e1dd..7fb236f30926 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 5
+-SUBLEVEL = 5
++SUBLEVEL = 6
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+ 
+@@ -1691,7 +1691,7 @@ PHONY += descend $(build-dirs)
+ descend: $(build-dirs)
+ $(build-dirs): prepare
+ 	$(Q)$(MAKE) $(build)=$@ \
+-	single-build=$(if $(filter-out $@/, $(single-no-ko)),1) \
++	single-build=$(if $(filter-out $@/, $(filter $@/%, $(single-no-ko))),1) \
+ 	need-builtin=1 need-modorder=1
+ 
+ clean-dirs := $(addprefix _clean_, $(clean-dirs))
+diff --git a/arch/Kconfig b/arch/Kconfig
+index 208aad121630..5e907a954532 100644
+--- a/arch/Kconfig
++++ b/arch/Kconfig
+@@ -399,6 +399,9 @@ config HAVE_RCU_TABLE_FREE
+ config HAVE_MMU_GATHER_PAGE_SIZE
+ 	bool
+ 
++config MMU_GATHER_NO_RANGE
++	bool
++
+ config HAVE_MMU_GATHER_NO_GATHER
+ 	bool
+ 
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index 96dab76da3b3..7ef1916fcbf4 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -74,7 +74,7 @@ config ARM
+ 	select HAVE_CONTEXT_TRACKING
+ 	select HAVE_COPY_THREAD_TLS
+ 	select HAVE_C_RECORDMCOUNT
+-	select HAVE_DEBUG_KMEMLEAK
++	select HAVE_DEBUG_KMEMLEAK if !XIP_KERNEL
+ 	select HAVE_DMA_CONTIGUOUS if MMU
+ 	select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
+ 	select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
+@@ -1905,7 +1905,7 @@ config XIP_DEFLATED_DATA
+ config KEXEC
+ 	bool "Kexec system call (EXPERIMENTAL)"
+ 	depends on (!SMP || PM_SLEEP_SMP)
+-	depends on !CPU_V7M
++	depends on MMU
+ 	select KEXEC_CORE
+ 	help
+ 	  kexec is a system call that implements the ability to shutdown your
+diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
+index ead21e5f2b80..469a2b3b60c0 100644
+--- a/arch/arm/boot/compressed/head.S
++++ b/arch/arm/boot/compressed/head.S
+@@ -140,6 +140,17 @@
+ #endif
+ 		.endm
+ 
++		.macro	enable_cp15_barriers, reg
++		mrc	p15, 0, \reg, c1, c0, 0	@ read SCTLR
++		tst	\reg, #(1 << 5)		@ CP15BEN bit set?
++		bne	.L_\@
++		orr	\reg, \reg, #(1 << 5)	@ CP15 barrier instructions
++		mcr	p15, 0, \reg, c1, c0, 0	@ write SCTLR
++ ARM(		.inst   0xf57ff06f		@ v7+ isb	)
++ THUMB(		isb						)
++.L_\@:
++		.endm
++
+ 		.section ".start", "ax"
+ /*
+  * sort out different calling conventions
+@@ -820,6 +831,7 @@ __armv4_mmu_cache_on:
+ 		mov	pc, r12
+ 
+ __armv7_mmu_cache_on:
++		enable_cp15_barriers	r11
+ 		mov	r12, lr
+ #ifdef CONFIG_MMU
+ 		mrc	p15, 0, r11, c0, c1, 4	@ read ID_MMFR0
+@@ -1209,6 +1221,7 @@ __armv6_mmu_cache_flush:
+ 		mov	pc, lr
+ 
+ __armv7_mmu_cache_flush:
++		enable_cp15_barriers	r10
+ 		tst	r4, #1
+ 		bne	iflush
+ 		mrc	p15, 0, r10, c0, c1, 5	@ read ID_MMFR1
+diff --git a/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi b/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi
+index a2a4f33a3e3e..cbafadbe86f4 100644
+--- a/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi
+@@ -629,7 +629,7 @@
+ 	pinctrl-0 = <&pinctrl_usdhc2>;
+ 	bus-width = <4>;
+ 	cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
+-	wp-gpios = <&gpio2 3 GPIO_ACTIVE_HIGH>;
++	disable-wp;
+ 	vmmc-supply = <&reg_3p3v_sd>;
+ 	vqmmc-supply = <&reg_3p3v>;
+ 	no-1-8-v;
+@@ -642,7 +642,7 @@
+ 	pinctrl-0 = <&pinctrl_usdhc3>;
+ 	bus-width = <4>;
+ 	cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
+-	wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
++	disable-wp;
+ 	vmmc-supply = <&reg_3p3v_sd>;
+ 	vqmmc-supply = <&reg_3p3v>;
+ 	no-1-8-v;
+@@ -776,6 +776,7 @@
+ &usbh1 {
+ 	vbus-supply = <&reg_5p0v_main>;
+ 	disable-over-current;
++	maximum-speed = "full-speed";
+ 	status = "okay";
+ };
+ 
+@@ -1056,7 +1057,6 @@
+ 			MX6QDL_PAD_SD2_DAT1__SD2_DATA1		0x17059
+ 			MX6QDL_PAD_SD2_DAT2__SD2_DATA2		0x17059
+ 			MX6QDL_PAD_SD2_DAT3__SD2_DATA3		0x17059
+-			MX6QDL_PAD_NANDF_D3__GPIO2_IO03		0x40010040
+ 			MX6QDL_PAD_NANDF_D2__GPIO2_IO02		0x40010040
+ 		>;
+ 	};
+@@ -1069,7 +1069,6 @@
+ 			MX6QDL_PAD_SD3_DAT1__SD3_DATA1		0x17059
+ 			MX6QDL_PAD_SD3_DAT2__SD3_DATA2		0x17059
+ 			MX6QDL_PAD_SD3_DAT3__SD3_DATA3		0x17059
+-			MX6QDL_PAD_NANDF_D1__GPIO2_IO01		0x40010040
+ 			MX6QDL_PAD_NANDF_D0__GPIO2_IO00		0x40010040
+ 
+ 		>;
+diff --git a/arch/arm/boot/dts/r8a7779.dtsi b/arch/arm/boot/dts/r8a7779.dtsi
+index ebf5b7cfe215..63341635bddf 100644
+--- a/arch/arm/boot/dts/r8a7779.dtsi
++++ b/arch/arm/boot/dts/r8a7779.dtsi
+@@ -68,6 +68,14 @@
+ 		      <0xf0000100 0x100>;
+ 	};
+ 
++	timer@f0000200 {
++		compatible = "arm,cortex-a9-global-timer";
++		reg = <0xf0000200 0x100>;
++		interrupts = <GIC_PPI 11
++			(GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
++		clocks = <&cpg_clocks R8A7779_CLK_ZS>;
++	};
++
+ 	timer@f0000600 {
+ 		compatible = "arm,cortex-a9-twd-timer";
+ 		reg = <0xf0000600 0x20>;
+diff --git a/arch/arm/boot/dts/rk3188-bqedison2qc.dts b/arch/arm/boot/dts/rk3188-bqedison2qc.dts
+index c8b62bbd6a4a..ad1afd403052 100644
+--- a/arch/arm/boot/dts/rk3188-bqedison2qc.dts
++++ b/arch/arm/boot/dts/rk3188-bqedison2qc.dts
+@@ -466,9 +466,12 @@
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&sd1_clk>, <&sd1_cmd>, <&sd1_bus4>;
+ 	vmmcq-supply = <&vccio_wl>;
++	#address-cells = <1>;
++	#size-cells = <0>;
+ 	status = "okay";
+ 
+ 	brcmf: wifi@1 {
++		reg = <1>;
+ 		compatible = "brcm,bcm4329-fmac";
+ 		interrupt-parent = <&gpio3>;
+ 		interrupts = <RK_PD2 GPIO_ACTIVE_HIGH>;
+diff --git a/arch/arm/boot/dts/stm32f469-disco.dts b/arch/arm/boot/dts/stm32f469-disco.dts
+index f3ce477b7bae..9397db0c43de 100644
+--- a/arch/arm/boot/dts/stm32f469-disco.dts
++++ b/arch/arm/boot/dts/stm32f469-disco.dts
+@@ -76,6 +76,13 @@
+ 		regulator-max-microvolt = <3300000>;
+ 	};
+ 
++	vdd_dsi: vdd-dsi {
++		compatible = "regulator-fixed";
++		regulator-name = "vdd_dsi";
++		regulator-min-microvolt = <3300000>;
++		regulator-max-microvolt = <3300000>;
++	};
++
+ 	soc {
+ 		dma-ranges = <0xc0000000 0x0 0x10000000>;
+ 	};
+@@ -155,6 +162,7 @@
+ 		compatible = "orisetech,otm8009a";
+ 		reg = <0>; /* dsi virtual channel (0..3) */
+ 		reset-gpios = <&gpioh 7 GPIO_ACTIVE_LOW>;
++		power-supply = <&vdd_dsi>;
+ 		status = "okay";
+ 
+ 		port {
+diff --git a/arch/arm/boot/dts/sun8i-h3.dtsi b/arch/arm/boot/dts/sun8i-h3.dtsi
+index fe773c72a69b..b4f1673df9ee 100644
+--- a/arch/arm/boot/dts/sun8i-h3.dtsi
++++ b/arch/arm/boot/dts/sun8i-h3.dtsi
+@@ -80,7 +80,7 @@
+ 			#cooling-cells = <2>;
+ 		};
+ 
+-		cpu@1 {
++		cpu1: cpu@1 {
+ 			compatible = "arm,cortex-a7";
+ 			device_type = "cpu";
+ 			reg = <1>;
+@@ -90,7 +90,7 @@
+ 			#cooling-cells = <2>;
+ 		};
+ 
+-		cpu@2 {
++		cpu2: cpu@2 {
+ 			compatible = "arm,cortex-a7";
+ 			device_type = "cpu";
+ 			reg = <2>;
+@@ -100,7 +100,7 @@
+ 			#cooling-cells = <2>;
+ 		};
+ 
+-		cpu@3 {
++		cpu3: cpu@3 {
+ 			compatible = "arm,cortex-a7";
+ 			device_type = "cpu";
+ 			reg = <3>;
+@@ -111,6 +111,15 @@
+ 		};
+ 	};
+ 
++	pmu {
++		compatible = "arm,cortex-a7-pmu";
++		interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
++			     <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
++			     <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
++			     <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
++		interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>;
++	};
++
+ 	timer {
+ 		compatible = "arm,armv7-timer";
+ 		interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig
+index fde84f123fbb..ead8348ec999 100644
+--- a/arch/arm/configs/exynos_defconfig
++++ b/arch/arm/configs/exynos_defconfig
+@@ -38,6 +38,7 @@ CONFIG_CRYPTO_SHA256_ARM=m
+ CONFIG_CRYPTO_SHA512_ARM=m
+ CONFIG_CRYPTO_AES_ARM_BS=m
+ CONFIG_CRYPTO_CHACHA20_NEON=m
++CONFIG_KALLSYMS_ALL=y
+ CONFIG_MODULES=y
+ CONFIG_MODULE_UNLOAD=y
+ CONFIG_PARTITION_ADVANCED=y
+@@ -92,6 +93,7 @@ CONFIG_BLK_DEV_LOOP=y
+ CONFIG_BLK_DEV_CRYPTOLOOP=y
+ CONFIG_BLK_DEV_RAM=y
+ CONFIG_BLK_DEV_RAM_SIZE=8192
++CONFIG_SCSI=y
+ CONFIG_BLK_DEV_SD=y
+ CONFIG_CHR_DEV_SG=y
+ CONFIG_ATA=y
+@@ -291,6 +293,7 @@ CONFIG_CROS_EC_SPI=y
+ CONFIG_COMMON_CLK_MAX77686=y
+ CONFIG_COMMON_CLK_S2MPS11=y
+ CONFIG_EXYNOS_IOMMU=y
++CONFIG_PM_DEVFREQ=y
+ CONFIG_DEVFREQ_GOV_PERFORMANCE=y
+ CONFIG_DEVFREQ_GOV_POWERSAVE=y
+ CONFIG_DEVFREQ_GOV_USERSPACE=y
+@@ -356,4 +359,7 @@ CONFIG_SOFTLOCKUP_DETECTOR=y
+ # CONFIG_DETECT_HUNG_TASK is not set
+ CONFIG_PROVE_LOCKING=y
+ CONFIG_DEBUG_ATOMIC_SLEEP=y
++CONFIG_DEBUG_RT_MUTEXES=y
++CONFIG_DEBUG_SPINLOCK=y
++CONFIG_DEBUG_MUTEXES=y
+ CONFIG_DEBUG_USER=y
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
+index e92c4de5bf3b..7c775a918a4e 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
++++ b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
+@@ -54,21 +54,21 @@
+ 			enable-method = "psci";
+ 		};
+ 
+-		cpu@1 {
++		cpu1: cpu@1 {
+ 			compatible = "arm,cortex-a53";
+ 			device_type = "cpu";
+ 			reg = <1>;
+ 			enable-method = "psci";
+ 		};
+ 
+-		cpu@2 {
++		cpu2: cpu@2 {
+ 			compatible = "arm,cortex-a53";
+ 			device_type = "cpu";
+ 			reg = <2>;
+ 			enable-method = "psci";
+ 		};
+ 
+-		cpu@3 {
++		cpu3: cpu@3 {
+ 			compatible = "arm,cortex-a53";
+ 			device_type = "cpu";
+ 			reg = <3>;
+@@ -76,6 +76,16 @@
+ 		};
+ 	};
+ 
++	pmu {
++		compatible = "arm,cortex-a53-pmu",
++			     "arm,armv8-pmuv3";
++		interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
++			     <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
++			     <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
++			     <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>;
++		interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>;
++	};
++
+ 	psci {
+ 		compatible = "arm,psci-0.2";
+ 		method = "smc";
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
+index 29824081b43b..24ffe2dcbddb 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
++++ b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
+@@ -70,6 +70,16 @@
+ 		clock-output-names = "ext_osc32k";
+ 	};
+ 
++	pmu {
++		compatible = "arm,cortex-a53-pmu",
++			     "arm,armv8-pmuv3";
++		interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>,
++			     <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
++			     <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++			     <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
++		interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>;
++	};
++
+ 	psci {
+ 		compatible = "arm,psci-0.2";
+ 		method = "smc";
+diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
+index 4ca2e7b44559..1eed3c41521a 100644
+--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
+@@ -1602,6 +1602,8 @@
+ 				interrupts = <0 138 IRQ_TYPE_LEVEL_HIGH>;
+ 				phys = <&hsusb_phy2>;
+ 				phy-names = "usb2-phy";
++				snps,dis_u2_susphy_quirk;
++				snps,dis_enblslpm_quirk;
+ 			};
+ 		};
+ 
+@@ -1632,6 +1634,8 @@
+ 				interrupts = <0 131 IRQ_TYPE_LEVEL_HIGH>;
+ 				phys = <&hsusb_phy1>, <&ssusb_phy_0>;
+ 				phy-names = "usb2-phy", "usb3-phy";
++				snps,dis_u2_susphy_quirk;
++				snps,dis_enblslpm_quirk;
+ 			};
+ 		};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+index d100f46791a6..912ba745c0fc 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
++++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+@@ -529,6 +529,8 @@
+ 	vdd-1.8-xo-supply = <&vreg_l7a_1p8>;
+ 	vdd-1.3-rfa-supply = <&vreg_l17a_1p3>;
+ 	vdd-3.3-ch0-supply = <&vreg_l25a_3p3>;
++
++	qcom,snoc-host-cap-8bit-quirk;
+ };
+ 
+ /* PINCTRL - additions to nodes defined in sdm845.dtsi */
+diff --git a/arch/arm64/boot/dts/rockchip/px30.dtsi b/arch/arm64/boot/dts/rockchip/px30.dtsi
+index 8812b70f3911..5acd5ce714d4 100644
+--- a/arch/arm64/boot/dts/rockchip/px30.dtsi
++++ b/arch/arm64/boot/dts/rockchip/px30.dtsi
+@@ -826,7 +826,7 @@
+ 		interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
+ 		clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>,
+ 			 <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>;
+-		clock-names = "biu", "ciu", "ciu-drv", "ciu-sample";
++		clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
+ 		fifo-depth = <0x100>;
+ 		max-frequency = <150000000>;
+ 		pinctrl-names = "default";
+@@ -841,7 +841,7 @@
+ 		interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
+ 		clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>,
+ 			 <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>;
+-		clock-names = "biu", "ciu", "ciu-drv", "ciu-sample";
++		clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
+ 		fifo-depth = <0x100>;
+ 		max-frequency = <150000000>;
+ 		pinctrl-names = "default";
+@@ -856,7 +856,7 @@
+ 		interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
+ 		clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>,
+ 			 <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>;
+-		clock-names = "biu", "ciu", "ciu-drv", "ciu-sample";
++		clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
+ 		fifo-depth = <0x100>;
+ 		max-frequency = <150000000>;
+ 		pinctrl-names = "default";
+diff --git a/arch/arm64/boot/dts/rockchip/rk3308.dtsi b/arch/arm64/boot/dts/rockchip/rk3308.dtsi
+index 8bdc66c62975..fa0d55f1a587 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3308.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3308.dtsi
+@@ -591,7 +591,7 @@
+ 		bus-width = <4>;
+ 		clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>,
+ 			 <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>;
+-		clock-names = "biu", "ciu", "ciu-drv", "ciu-sample";
++		clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
+ 		fifo-depth = <0x100>;
+ 		max-frequency = <150000000>;
+ 		pinctrl-names = "default";
+@@ -606,7 +606,7 @@
+ 		bus-width = <8>;
+ 		clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>,
+ 			 <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>;
+-		clock-names = "biu", "ciu", "ciu-drv", "ciu-sample";
++		clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
+ 		fifo-depth = <0x100>;
+ 		max-frequency = <150000000>;
+ 		status = "disabled";
+@@ -619,7 +619,7 @@
+ 		bus-width = <4>;
+ 		clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>,
+ 			 <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>;
+-		clock-names = "biu", "ciu", "ciu-drv", "ciu-sample";
++		clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
+ 		fifo-depth = <0x100>;
+ 		max-frequency = <150000000>;
+ 		pinctrl-names = "default";
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
+index c706db0ee9ec..76f5db696009 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
+@@ -669,9 +669,12 @@
+ 	vqmmc-supply = &vcc1v8_s3;	/* IO line */
+ 	vmmc-supply = &vcc_sdio;	/* card's power */
+ 
++	#address-cells = <1>;
++	#size-cells = <0>;
+ 	status = "okay";
+ 
+ 	brcmf: wifi@1 {
++		reg = <1>;
+ 		compatible = "brcm,bcm4329-fmac";
+ 		interrupt-parent = <&gpio0>;
+ 		interrupts = <RK_PA3 GPIO_ACTIVE_HIGH>;
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi
+index 4944d78a0a1c..e87a04477440 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi
+@@ -654,9 +654,12 @@
+ 	sd-uhs-sdr104;
+ 	vqmmc-supply = <&vcc1v8_s3>;
+ 	vmmc-supply = <&vccio_sd>;
++	#address-cells = <1>;
++	#size-cells = <0>;
+ 	status = "okay";
+ 
+ 	brcmf: wifi@1 {
++		reg = <1>;
+ 		compatible = "brcm,bcm4329-fmac";
+ 		interrupt-parent = <&gpio0>;
+ 		interrupts = <RK_PA3 GPIO_ACTIVE_HIGH>;
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-nanopc-t4.dts b/arch/arm64/boot/dts/rockchip/rk3399-nanopc-t4.dts
+index 2a127985ab17..d3ed8e5e770f 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-nanopc-t4.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3399-nanopc-t4.dts
+@@ -94,33 +94,6 @@
+ 	};
+ };
+ 
+-&gpu_thermal {
+-	trips {
+-		gpu_warm: gpu_warm {
+-			temperature = <55000>;
+-			hysteresis = <2000>;
+-			type = "active";
+-		};
+-
+-		gpu_hot: gpu_hot {
+-			temperature = <65000>;
+-			hysteresis = <2000>;
+-			type = "active";
+-		};
+-	};
+-	cooling-maps {
+-		map1 {
+-			trip = <&gpu_warm>;
+-			cooling-device = <&fan THERMAL_NO_LIMIT 1>;
+-		};
+-
+-		map2 {
+-			trip = <&gpu_hot>;
+-			cooling-device = <&fan 2 THERMAL_NO_LIMIT>;
+-		};
+-	};
+-};
+-
+ &pinctrl {
+ 	ir {
+ 		ir_rx: ir-rx {
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts b/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts
+index 0541dfce924d..9c659f3115c8 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts
+@@ -648,9 +648,12 @@
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&sdio0_bus4 &sdio0_cmd &sdio0_clk>;
+ 	sd-uhs-sdr104;
++	#address-cells = <1>;
++	#size-cells = <0>;
+ 	status = "okay";
+ 
+ 	brcmf: wifi@1 {
++		reg = <1>;
+ 		compatible = "brcm,bcm4329-fmac";
+ 		interrupt-parent = <&gpio0>;
+ 		interrupts = <RK_PA3 GPIO_ACTIVE_HIGH>;
+diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
+index 1e4c2b78d66d..68d478af7a3e 100644
+--- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
+@@ -43,6 +43,7 @@
+ 	smmu0: smmu@36600000 {
+ 		compatible = "arm,smmu-v3";
+ 		reg = <0x0 0x36600000 0x0 0x100000>;
++		power-domains = <&k3_pds 229 TI_SCI_PD_EXCLUSIVE>;
+ 		interrupt-parent = <&gic500>;
+ 		interrupts = <GIC_SPI 772 IRQ_TYPE_EDGE_RISING>,
+ 			     <GIC_SPI 768 IRQ_TYPE_EDGE_RISING>;
+diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
+index b9f8d787eea9..324e7d5ab37e 100644
+--- a/arch/arm64/include/asm/alternative.h
++++ b/arch/arm64/include/asm/alternative.h
+@@ -35,13 +35,16 @@ void apply_alternatives_module(void *start, size_t length);
+ static inline void apply_alternatives_module(void *start, size_t length) { }
+ #endif
+ 
+-#define ALTINSTR_ENTRY(feature,cb)					      \
++#define ALTINSTR_ENTRY(feature)					              \
+ 	" .word 661b - .\n"				/* label           */ \
+-	" .if " __stringify(cb) " == 0\n"				      \
+ 	" .word 663f - .\n"				/* new instruction */ \
+-	" .else\n"							      \
++	" .hword " __stringify(feature) "\n"		/* feature bit     */ \
++	" .byte 662b-661b\n"				/* source len      */ \
++	" .byte 664f-663f\n"				/* replacement len */
++
++#define ALTINSTR_ENTRY_CB(feature, cb)					      \
++	" .word 661b - .\n"				/* label           */ \
+ 	" .word " __stringify(cb) "- .\n"		/* callback */	      \
+-	" .endif\n"							      \
+ 	" .hword " __stringify(feature) "\n"		/* feature bit     */ \
+ 	" .byte 662b-661b\n"				/* source len      */ \
+ 	" .byte 664f-663f\n"				/* replacement len */
+@@ -62,15 +65,14 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
+  *
+  * Alternatives with callbacks do not generate replacement instructions.
+  */
+-#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled, cb)	\
++#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled)	\
+ 	".if "__stringify(cfg_enabled)" == 1\n"				\
+ 	"661:\n\t"							\
+ 	oldinstr "\n"							\
+ 	"662:\n"							\
+ 	".pushsection .altinstructions,\"a\"\n"				\
+-	ALTINSTR_ENTRY(feature,cb)					\
++	ALTINSTR_ENTRY(feature)						\
+ 	".popsection\n"							\
+-	" .if " __stringify(cb) " == 0\n"				\
+ 	".pushsection .altinstr_replacement, \"a\"\n"			\
+ 	"663:\n\t"							\
+ 	newinstr "\n"							\
+@@ -78,17 +80,25 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
+ 	".popsection\n\t"						\
+ 	".org	. - (664b-663b) + (662b-661b)\n\t"			\
+ 	".org	. - (662b-661b) + (664b-663b)\n"			\
+-	".else\n\t"							\
++	".endif\n"
++
++#define __ALTERNATIVE_CFG_CB(oldinstr, feature, cfg_enabled, cb)	\
++	".if "__stringify(cfg_enabled)" == 1\n"				\
++	"661:\n\t"							\
++	oldinstr "\n"							\
++	"662:\n"							\
++	".pushsection .altinstructions,\"a\"\n"				\
++	ALTINSTR_ENTRY_CB(feature, cb)					\
++	".popsection\n"							\
+ 	"663:\n\t"							\
+ 	"664:\n\t"							\
+-	".endif\n"							\
+ 	".endif\n"
+ 
+ #define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...)	\
+-	__ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg), 0)
++	__ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg))
+ 
+ #define ALTERNATIVE_CB(oldinstr, cb) \
+-	__ALTERNATIVE_CFG(oldinstr, "NOT_AN_INSTRUCTION", ARM64_CB_PATCH, 1, cb)
++	__ALTERNATIVE_CFG_CB(oldinstr, ARM64_CB_PATCH, 1, cb)
+ #else
+ 
+ #include <asm/assembler.h>
+diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
+index 574808b9df4c..da3280f639cd 100644
+--- a/arch/arm64/include/asm/atomic_lse.h
++++ b/arch/arm64/include/asm/atomic_lse.h
+@@ -14,6 +14,7 @@
+ static inline void __lse_atomic_##op(int i, atomic_t *v)			\
+ {									\
+ 	asm volatile(							\
++	__LSE_PREAMBLE							\
+ "	" #asm_op "	%w[i], %[v]\n"					\
+ 	: [i] "+r" (i), [v] "+Q" (v->counter)				\
+ 	: "r" (v));							\
+@@ -30,6 +31,7 @@ ATOMIC_OP(add, stadd)
+ static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v)	\
+ {									\
+ 	asm volatile(							\
++	__LSE_PREAMBLE							\
+ "	" #asm_op #mb "	%w[i], %w[i], %[v]"				\
+ 	: [i] "+r" (i), [v] "+Q" (v->counter)				\
+ 	: "r" (v)							\
+@@ -58,6 +60,7 @@ static inline int __lse_atomic_add_return##name(int i, atomic_t *v)	\
+ 	u32 tmp;							\
+ 									\
+ 	asm volatile(							\
++	__LSE_PREAMBLE							\
+ 	"	ldadd" #mb "	%w[i], %w[tmp], %[v]\n"			\
+ 	"	add	%w[i], %w[i], %w[tmp]"				\
+ 	: [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)	\
+@@ -77,6 +80,7 @@ ATOMIC_OP_ADD_RETURN(        , al, "memory")
+ static inline void __lse_atomic_and(int i, atomic_t *v)
+ {
+ 	asm volatile(
++	__LSE_PREAMBLE
+ 	"	mvn	%w[i], %w[i]\n"
+ 	"	stclr	%w[i], %[v]"
+ 	: [i] "+&r" (i), [v] "+Q" (v->counter)
+@@ -87,6 +91,7 @@ static inline void __lse_atomic_and(int i, atomic_t *v)
+ static inline int __lse_atomic_fetch_and##name(int i, atomic_t *v)	\
+ {									\
+ 	asm volatile(							\
++	__LSE_PREAMBLE							\
+ 	"	mvn	%w[i], %w[i]\n"					\
+ 	"	ldclr" #mb "	%w[i], %w[i], %[v]"			\
+ 	: [i] "+&r" (i), [v] "+Q" (v->counter)				\
+@@ -106,6 +111,7 @@ ATOMIC_FETCH_OP_AND(        , al, "memory")
+ static inline void __lse_atomic_sub(int i, atomic_t *v)
+ {
+ 	asm volatile(
++	__LSE_PREAMBLE
+ 	"	neg	%w[i], %w[i]\n"
+ 	"	stadd	%w[i], %[v]"
+ 	: [i] "+&r" (i), [v] "+Q" (v->counter)
+@@ -118,6 +124,7 @@ static inline int __lse_atomic_sub_return##name(int i, atomic_t *v)	\
+ 	u32 tmp;							\
+ 									\
+ 	asm volatile(							\
++	__LSE_PREAMBLE							\
+ 	"	neg	%w[i], %w[i]\n"					\
+ 	"	ldadd" #mb "	%w[i], %w[tmp], %[v]\n"			\
+ 	"	add	%w[i], %w[i], %w[tmp]"				\
+@@ -139,6 +146,7 @@ ATOMIC_OP_SUB_RETURN(        , al, "memory")
+ static inline int __lse_atomic_fetch_sub##name(int i, atomic_t *v)	\
+ {									\
+ 	asm volatile(							\
++	__LSE_PREAMBLE							\
+ 	"	neg	%w[i], %w[i]\n"					\
+ 	"	ldadd" #mb "	%w[i], %w[i], %[v]"			\
+ 	: [i] "+&r" (i), [v] "+Q" (v->counter)				\
+@@ -159,6 +167,7 @@ ATOMIC_FETCH_OP_SUB(        , al, "memory")
+ static inline void __lse_atomic64_##op(s64 i, atomic64_t *v)		\
+ {									\
+ 	asm volatile(							\
++	__LSE_PREAMBLE							\
+ "	" #asm_op "	%[i], %[v]\n"					\
+ 	: [i] "+r" (i), [v] "+Q" (v->counter)				\
+ 	: "r" (v));							\
+@@ -175,6 +184,7 @@ ATOMIC64_OP(add, stadd)
+ static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\
+ {									\
+ 	asm volatile(							\
++	__LSE_PREAMBLE							\
+ "	" #asm_op #mb "	%[i], %[i], %[v]"				\
+ 	: [i] "+r" (i), [v] "+Q" (v->counter)				\
+ 	: "r" (v)							\
+@@ -203,6 +213,7 @@ static inline long __lse_atomic64_add_return##name(s64 i, atomic64_t *v)\
+ 	unsigned long tmp;						\
+ 									\
+ 	asm volatile(							\
++	__LSE_PREAMBLE							\
+ 	"	ldadd" #mb "	%[i], %x[tmp], %[v]\n"			\
+ 	"	add	%[i], %[i], %x[tmp]"				\
+ 	: [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)	\
+@@ -222,6 +233,7 @@ ATOMIC64_OP_ADD_RETURN(        , al, "memory")
+ static inline void __lse_atomic64_and(s64 i, atomic64_t *v)
+ {
+ 	asm volatile(
++	__LSE_PREAMBLE
+ 	"	mvn	%[i], %[i]\n"
+ 	"	stclr	%[i], %[v]"
+ 	: [i] "+&r" (i), [v] "+Q" (v->counter)
+@@ -232,6 +244,7 @@ static inline void __lse_atomic64_and(s64 i, atomic64_t *v)
+ static inline long __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v)	\
+ {									\
+ 	asm volatile(							\
++	__LSE_PREAMBLE							\
+ 	"	mvn	%[i], %[i]\n"					\
+ 	"	ldclr" #mb "	%[i], %[i], %[v]"			\
+ 	: [i] "+&r" (i), [v] "+Q" (v->counter)				\
+@@ -251,6 +264,7 @@ ATOMIC64_FETCH_OP_AND(        , al, "memory")
+ static inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
+ {
+ 	asm volatile(
++	__LSE_PREAMBLE
+ 	"	neg	%[i], %[i]\n"
+ 	"	stadd	%[i], %[v]"
+ 	: [i] "+&r" (i), [v] "+Q" (v->counter)
+@@ -263,6 +277,7 @@ static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v)	\
+ 	unsigned long tmp;						\
+ 									\
+ 	asm volatile(							\
++	__LSE_PREAMBLE							\
+ 	"	neg	%[i], %[i]\n"					\
+ 	"	ldadd" #mb "	%[i], %x[tmp], %[v]\n"			\
+ 	"	add	%[i], %[i], %x[tmp]"				\
+@@ -284,6 +299,7 @@ ATOMIC64_OP_SUB_RETURN(        , al, "memory")
+ static inline long __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v)	\
+ {									\
+ 	asm volatile(							\
++	__LSE_PREAMBLE							\
+ 	"	neg	%[i], %[i]\n"					\
+ 	"	ldadd" #mb "	%[i], %[i], %[v]"			\
+ 	: [i] "+&r" (i), [v] "+Q" (v->counter)				\
+@@ -305,6 +321,7 @@ static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v)
+ 	unsigned long tmp;
+ 
+ 	asm volatile(
++	__LSE_PREAMBLE
+ 	"1:	ldr	%x[tmp], %[v]\n"
+ 	"	subs	%[ret], %x[tmp], #1\n"
+ 	"	b.lt	2f\n"
+@@ -332,6 +349,7 @@ __lse__cmpxchg_case_##name##sz(volatile void *ptr,			\
+ 	unsigned long tmp;						\
+ 									\
+ 	asm volatile(							\
++	__LSE_PREAMBLE							\
+ 	"	mov	%" #w "[tmp], %" #w "[old]\n"			\
+ 	"	cas" #mb #sfx "\t%" #w "[tmp], %" #w "[new], %[v]\n"	\
+ 	"	mov	%" #w "[ret], %" #w "[tmp]"			\
+@@ -379,6 +397,7 @@ __lse__cmpxchg_double##name(unsigned long old1,				\
+ 	register unsigned long x4 asm ("x4") = (unsigned long)ptr;	\
+ 									\
+ 	asm volatile(							\
++	__LSE_PREAMBLE							\
+ 	"	casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
+ 	"	eor	%[old1], %[old1], %[oldval1]\n"			\
+ 	"	eor	%[old2], %[old2], %[oldval2]\n"			\
+diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h
+index 80b388278149..73834996c4b6 100644
+--- a/arch/arm64/include/asm/lse.h
++++ b/arch/arm64/include/asm/lse.h
+@@ -6,6 +6,8 @@
+ 
+ #if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
+ 
++#define __LSE_PREAMBLE	".arch armv8-a+lse\n"
++
+ #include <linux/compiler_types.h>
+ #include <linux/export.h>
+ #include <linux/jump_label.h>
+@@ -14,8 +16,6 @@
+ #include <asm/atomic_lse.h>
+ #include <asm/cpucaps.h>
+ 
+-__asm__(".arch_extension	lse");
+-
+ extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
+ extern struct static_key_false arm64_const_caps_ready;
+ 
+@@ -34,7 +34,7 @@ static inline bool system_uses_lse_atomics(void)
+ 
+ /* In-line patching at runtime */
+ #define ARM64_LSE_ATOMIC_INSN(llsc, lse)				\
+-	ALTERNATIVE(llsc, lse, ARM64_HAS_LSE_ATOMICS)
++	ALTERNATIVE(llsc, __LSE_PREAMBLE lse, ARM64_HAS_LSE_ATOMICS)
+ 
+ #else	/* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
+ 
+diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c
+index 0bde47e4fa69..dcba53803fa5 100644
+--- a/arch/microblaze/kernel/cpu/cache.c
++++ b/arch/microblaze/kernel/cpu/cache.c
+@@ -92,7 +92,8 @@ static inline void __disable_dcache_nomsr(void)
+ #define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size)	\
+ do {									\
+ 	int align = ~(cache_line_length - 1);				\
+-	end = min(start + cache_size, end);				\
++	if (start <  UINT_MAX - cache_size)				\
++		end = min(start + cache_size, end);			\
+ 	start &= align;							\
+ } while (0)
+ 
+diff --git a/arch/mips/boot/dts/ralink/gardena_smart_gateway_mt7688.dts b/arch/mips/boot/dts/ralink/gardena_smart_gateway_mt7688.dts
+index aa5caaa31104..aad9a8a8669b 100644
+--- a/arch/mips/boot/dts/ralink/gardena_smart_gateway_mt7688.dts
++++ b/arch/mips/boot/dts/ralink/gardena_smart_gateway_mt7688.dts
+@@ -177,6 +177,9 @@
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinmux_i2s_gpio>;		/* GPIO0..3 */
+ 
++	fifo-size = <8>;
++	tx-threshold = <8>;
++
+ 	rts-gpios = <&gpio 1 GPIO_ACTIVE_LOW>;
+ 	cts-gpios = <&gpio 2 GPIO_ACTIVE_LOW>;
+ };
+diff --git a/arch/powerpc/Makefile.postlink b/arch/powerpc/Makefile.postlink
+index 134f12f89b92..2268396ff4bb 100644
+--- a/arch/powerpc/Makefile.postlink
++++ b/arch/powerpc/Makefile.postlink
+@@ -17,11 +17,11 @@ quiet_cmd_head_check = CHKHEAD $@
+ quiet_cmd_relocs_check = CHKREL  $@
+ ifdef CONFIG_PPC_BOOK3S_64
+       cmd_relocs_check =						\
+-	$(CONFIG_SHELL) $(srctree)/arch/powerpc/tools/relocs_check.sh "$(OBJDUMP)" "$@" ; \
++	$(CONFIG_SHELL) $(srctree)/arch/powerpc/tools/relocs_check.sh "$(OBJDUMP)" "$(NM)" "$@" ; \
+ 	$(BASH) $(srctree)/arch/powerpc/tools/unrel_branch_check.sh "$(OBJDUMP)" "$@"
+ else
+       cmd_relocs_check =						\
+-	$(CONFIG_SHELL) $(srctree)/arch/powerpc/tools/relocs_check.sh "$(OBJDUMP)" "$@"
++	$(CONFIG_SHELL) $(srctree)/arch/powerpc/tools/relocs_check.sh "$(OBJDUMP)" "$(NM)" "$@"
+ endif
+ 
+ # `@true` prevents complaint when there is nothing to be done
+diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
+index 3dd1a422fc29..a1eaffe868de 100644
+--- a/arch/powerpc/kernel/eeh_driver.c
++++ b/arch/powerpc/kernel/eeh_driver.c
+@@ -525,12 +525,6 @@ static void eeh_rmv_device(struct eeh_dev *edev, void *userdata)
+ 
+ 		pci_iov_remove_virtfn(edev->physfn, pdn->vf_index);
+ 		edev->pdev = NULL;
+-
+-		/*
+-		 * We have to set the VF PE number to invalid one, which is
+-		 * required to plug the VF successfully.
+-		 */
+-		pdn->pe_number = IODA_INVALID_PE;
+ #endif
+ 		if (rmv_data)
+ 			list_add(&edev->rmv_entry, &rmv_data->removed_vf_list);
+diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c
+index 9524009ca1ae..d876eda92609 100644
+--- a/arch/powerpc/kernel/pci_dn.c
++++ b/arch/powerpc/kernel/pci_dn.c
+@@ -244,9 +244,22 @@ void remove_dev_pci_data(struct pci_dev *pdev)
+ 				continue;
+ 
+ #ifdef CONFIG_EEH
+-			/* Release EEH device for the VF */
++			/*
++			 * Release EEH state for this VF. The PCI core
++			 * has already torn down the pci_dev for this VF, but
++			 * we're responsible to removing the eeh_dev since it
++			 * has the same lifetime as the pci_dn that spawned it.
++			 */
+ 			edev = pdn_to_eeh_dev(pdn);
+ 			if (edev) {
++				/*
++				 * We allocate pci_dn's for the totalvfs count,
++				 * but only only the vfs that were activated
++				 * have a configured PE.
++				 */
++				if (edev->pe)
++					eeh_rmv_from_parent_pe(edev);
++
+ 				pdn->edev = NULL;
+ 				kfree(edev);
+ 			}
+diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c
+index 2de264fc3156..5914fbfa5e0a 100644
+--- a/arch/powerpc/kvm/book3s_hv_uvmem.c
++++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
+@@ -543,7 +543,7 @@ kvmppc_svm_page_out(struct vm_area_struct *vma, unsigned long start,
+ 
+ 	ret = migrate_vma_setup(&mig);
+ 	if (ret)
+-		return ret;
++		goto out;
+ 
+ 	spage = migrate_pfn_to_page(*mig.src);
+ 	if (!spage || !(*mig.src & MIGRATE_PFN_MIGRATE))
+diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c
+index 2e496eb86e94..1139bc56e004 100644
+--- a/arch/powerpc/kvm/emulate_loadstore.c
++++ b/arch/powerpc/kvm/emulate_loadstore.c
+@@ -73,7 +73,6 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
+ {
+ 	struct kvm_run *run = vcpu->run;
+ 	u32 inst;
+-	int ra, rs, rt;
+ 	enum emulation_result emulated = EMULATE_FAIL;
+ 	int advance = 1;
+ 	struct instruction_op op;
+@@ -85,10 +84,6 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
+ 	if (emulated != EMULATE_DONE)
+ 		return emulated;
+ 
+-	ra = get_ra(inst);
+-	rs = get_rs(inst);
+-	rt = get_rt(inst);
+-
+ 	vcpu->arch.mmio_vsx_copy_nums = 0;
+ 	vcpu->arch.mmio_vsx_offset = 0;
+ 	vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_NONE;
+diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
+index 1baeb045f7f4..e083a9f67f70 100644
+--- a/arch/powerpc/mm/fault.c
++++ b/arch/powerpc/mm/fault.c
+@@ -354,6 +354,9 @@ static void sanity_check_fault(bool is_write, bool is_user,
+ 	 * Userspace trying to access kernel address, we get PROTFAULT for that.
+ 	 */
+ 	if (is_user && address >= TASK_SIZE) {
++		if ((long)address == -1)
++			return;
++
+ 		pr_crit_ratelimited("%s[%d]: User access of kernel address (%lx) - exploit attempt? (uid: %d)\n",
+ 				   current->comm, current->pid, address,
+ 				   from_kuid(&init_user_ns, current_uid()));
+diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
+index da1068a9c263..67e4628dd527 100644
+--- a/arch/powerpc/platforms/powernv/pci-ioda.c
++++ b/arch/powerpc/platforms/powernv/pci-ioda.c
+@@ -1558,6 +1558,10 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
+ 
+ 	/* Reserve PE for each VF */
+ 	for (vf_index = 0; vf_index < num_vfs; vf_index++) {
++		int vf_devfn = pci_iov_virtfn_devfn(pdev, vf_index);
++		int vf_bus = pci_iov_virtfn_bus(pdev, vf_index);
++		struct pci_dn *vf_pdn;
++
+ 		if (pdn->m64_single_mode)
+ 			pe_num = pdn->pe_num_map[vf_index];
+ 		else
+@@ -1570,13 +1574,11 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
+ 		pe->pbus = NULL;
+ 		pe->parent_dev = pdev;
+ 		pe->mve_number = -1;
+-		pe->rid = (pci_iov_virtfn_bus(pdev, vf_index) << 8) |
+-			   pci_iov_virtfn_devfn(pdev, vf_index);
++		pe->rid = (vf_bus << 8) | vf_devfn;
+ 
+ 		pe_info(pe, "VF %04d:%02d:%02d.%d associated with PE#%x\n",
+ 			hose->global_number, pdev->bus->number,
+-			PCI_SLOT(pci_iov_virtfn_devfn(pdev, vf_index)),
+-			PCI_FUNC(pci_iov_virtfn_devfn(pdev, vf_index)), pe_num);
++			PCI_SLOT(vf_devfn), PCI_FUNC(vf_devfn), pe_num);
+ 
+ 		if (pnv_ioda_configure_pe(phb, pe)) {
+ 			/* XXX What do we do here ? */
+@@ -1590,6 +1592,15 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
+ 		list_add_tail(&pe->list, &phb->ioda.pe_list);
+ 		mutex_unlock(&phb->ioda.pe_list_mutex);
+ 
++		/* associate this pe to it's pdn */
++		list_for_each_entry(vf_pdn, &pdn->parent->child_list, list) {
++			if (vf_pdn->busno == vf_bus &&
++			    vf_pdn->devfn == vf_devfn) {
++				vf_pdn->pe_number = pe_num;
++				break;
++			}
++		}
++
+ 		pnv_pci_ioda2_setup_dma_pe(phb, pe);
+ #ifdef CONFIG_IOMMU_API
+ 		iommu_register_group(&pe->table_group,
+@@ -2889,9 +2900,6 @@ static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev)
+ 	struct pci_dn *pdn;
+ 	int mul, total_vfs;
+ 
+-	if (!pdev->is_physfn || pci_dev_is_added(pdev))
+-		return;
+-
+ 	pdn = pci_get_pdn(pdev);
+ 	pdn->vfs_expanded = 0;
+ 	pdn->m64_single_mode = false;
+@@ -2966,6 +2974,30 @@ truncate_iov:
+ 		res->end = res->start - 1;
+ 	}
+ }
++
++static void pnv_pci_ioda_fixup_iov(struct pci_dev *pdev)
++{
++	if (WARN_ON(pci_dev_is_added(pdev)))
++		return;
++
++	if (pdev->is_virtfn) {
++		struct pnv_ioda_pe *pe = pnv_ioda_get_pe(pdev);
++
++		/*
++		 * VF PEs are single-device PEs so their pdev pointer needs to
++		 * be set. The pdev doesn't exist when the PE is allocated (in
++		 * (pcibios_sriov_enable()) so we fix it up here.
++		 */
++		pe->pdev = pdev;
++		WARN_ON(!(pe->flags & PNV_IODA_PE_VF));
++	} else if (pdev->is_physfn) {
++		/*
++		 * For PFs adjust their allocated IOV resources to match what
++		 * the PHB can support using it's M64 BAR table.
++		 */
++		pnv_pci_ioda_fixup_iov_resources(pdev);
++	}
++}
+ #endif /* CONFIG_PCI_IOV */
+ 
+ static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe,
+@@ -3862,7 +3894,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
+ 	ppc_md.pcibios_default_alignment = pnv_pci_default_alignment;
+ 
+ #ifdef CONFIG_PCI_IOV
+-	ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov_resources;
++	ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov;
+ 	ppc_md.pcibios_iov_resource_alignment = pnv_pci_iov_resource_alignment;
+ 	ppc_md.pcibios_sriov_enable = pnv_pcibios_sriov_enable;
+ 	ppc_md.pcibios_sriov_disable = pnv_pcibios_sriov_disable;
+diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
+index c0bea75ac27b..8307e1f4086c 100644
+--- a/arch/powerpc/platforms/powernv/pci.c
++++ b/arch/powerpc/platforms/powernv/pci.c
+@@ -814,24 +814,6 @@ void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
+ {
+ 	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
+ 	struct pnv_phb *phb = hose->private_data;
+-#ifdef CONFIG_PCI_IOV
+-	struct pnv_ioda_pe *pe;
+-	struct pci_dn *pdn;
+-
+-	/* Fix the VF pdn PE number */
+-	if (pdev->is_virtfn) {
+-		pdn = pci_get_pdn(pdev);
+-		WARN_ON(pdn->pe_number != IODA_INVALID_PE);
+-		list_for_each_entry(pe, &phb->ioda.pe_list, list) {
+-			if (pe->rid == ((pdev->bus->number << 8) |
+-			    (pdev->devfn & 0xff))) {
+-				pdn->pe_number = pe->pe_number;
+-				pe->pdev = pdev;
+-				break;
+-			}
+-		}
+-	}
+-#endif /* CONFIG_PCI_IOV */
+ 
+ 	if (phb && phb->dma_dev_setup)
+ 		phb->dma_dev_setup(phb, pdev);
+diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c
+index e33e8bc4b69b..38c306551f76 100644
+--- a/arch/powerpc/platforms/pseries/lparcfg.c
++++ b/arch/powerpc/platforms/pseries/lparcfg.c
+@@ -435,10 +435,10 @@ static void maxmem_data(struct seq_file *m)
+ {
+ 	unsigned long maxmem = 0;
+ 
+-	maxmem += drmem_info->n_lmbs * drmem_info->lmb_size;
++	maxmem += (unsigned long)drmem_info->n_lmbs * drmem_info->lmb_size;
+ 	maxmem += hugetlb_total_pages() * PAGE_SIZE;
+ 
+-	seq_printf(m, "MaxMem=%ld\n", maxmem);
++	seq_printf(m, "MaxMem=%lu\n", maxmem);
+ }
+ 
+ static int pseries_lparcfg_data(struct seq_file *m, void *v)
+diff --git a/arch/powerpc/tools/relocs_check.sh b/arch/powerpc/tools/relocs_check.sh
+index 7b9fe0a567cf..014e00e74d2b 100755
+--- a/arch/powerpc/tools/relocs_check.sh
++++ b/arch/powerpc/tools/relocs_check.sh
+@@ -10,14 +10,21 @@
+ # based on relocs_check.pl
+ # Copyright © 2009 IBM Corporation
+ 
+-if [ $# -lt 2 ]; then
+-	echo "$0 [path to objdump] [path to vmlinux]" 1>&2
++if [ $# -lt 3 ]; then
++	echo "$0 [path to objdump] [path to nm] [path to vmlinux]" 1>&2
+ 	exit 1
+ fi
+ 
+-# Have Kbuild supply the path to objdump so we handle cross compilation.
++# Have Kbuild supply the path to objdump and nm so we handle cross compilation.
+ objdump="$1"
+-vmlinux="$2"
++nm="$2"
++vmlinux="$3"
++
++# Remove from the bad relocations those that match an undefined weak symbol
++# which will result in an absolute relocation to 0.
++# Weak unresolved symbols are of that form in nm output:
++# "                  w _binary__btf_vmlinux_bin_end"
++undef_weak_symbols=$($nm "$vmlinux" | awk '$1 ~ /w/ { print $2 }')
+ 
+ bad_relocs=$(
+ $objdump -R "$vmlinux" |
+@@ -26,8 +33,6 @@ $objdump -R "$vmlinux" |
+ 	# These relocations are okay
+ 	# On PPC64:
+ 	#	R_PPC64_RELATIVE, R_PPC64_NONE
+-	#	R_PPC64_ADDR64 mach_<name>
+-	#	R_PPC64_ADDR64 __crc_<name>
+ 	# On PPC:
+ 	#	R_PPC_RELATIVE, R_PPC_ADDR16_HI,
+ 	#	R_PPC_ADDR16_HA,R_PPC_ADDR16_LO,
+@@ -39,8 +44,7 @@ R_PPC_ADDR16_HI
+ R_PPC_ADDR16_HA
+ R_PPC_RELATIVE
+ R_PPC_NONE' |
+-	grep -E -v '\<R_PPC64_ADDR64[[:space:]]+mach_' |
+-	grep -E -v '\<R_PPC64_ADDR64[[:space:]]+__crc_'
++	([ "$undef_weak_symbols" ] && grep -F -w -v "$undef_weak_symbols" || cat)
+ )
+ 
+ if [ -z "$bad_relocs" ]; then
+diff --git a/arch/s390/Makefile b/arch/s390/Makefile
+index ba8556bb0fb1..e0e3a465bbfd 100644
+--- a/arch/s390/Makefile
++++ b/arch/s390/Makefile
+@@ -69,7 +69,7 @@ cflags-y += -Wa,-I$(srctree)/arch/$(ARCH)/include
+ #
+ cflags-$(CONFIG_FRAME_POINTER) += -fno-optimize-sibling-calls
+ 
+-ifeq ($(call cc-option-yn,-mpacked-stack),y)
++ifeq ($(call cc-option-yn,-mpacked-stack -mbackchain -msoft-float),y)
+ cflags-$(CONFIG_PACK_STACK)  += -mpacked-stack -D__PACK_STACK
+ aflags-$(CONFIG_PACK_STACK)  += -D__PACK_STACK
+ endif
+diff --git a/arch/s390/boot/head.S b/arch/s390/boot/head.S
+index 4b86a8d3c121..dae10961d072 100644
+--- a/arch/s390/boot/head.S
++++ b/arch/s390/boot/head.S
+@@ -329,7 +329,7 @@ ENTRY(startup_kdump)
+ 	.quad	.Lduct			# cr5: primary-aste origin
+ 	.quad	0			# cr6:	I/O interrupts
+ 	.quad	0			# cr7:	secondary space segment table
+-	.quad	0			# cr8:	access registers translation
++	.quad	0x0000000000008000	# cr8:	access registers translation
+ 	.quad	0			# cr9:	tracing off
+ 	.quad	0			# cr10: tracing off
+ 	.quad	0			# cr11: tracing off
+diff --git a/arch/s390/include/asm/bug.h b/arch/s390/include/asm/bug.h
+index a2b11ac00f60..7725f8006fdf 100644
+--- a/arch/s390/include/asm/bug.h
++++ b/arch/s390/include/asm/bug.h
+@@ -10,15 +10,14 @@
+ 
+ #define __EMIT_BUG(x) do {					\
+ 	asm_inline volatile(					\
+-		"0:	j	0b+2\n"				\
+-		"1:\n"						\
++		"0:	mc	0,0\n"				\
+ 		".section .rodata.str,\"aMS\",@progbits,1\n"	\
+-		"2:	.asciz	\""__FILE__"\"\n"		\
++		"1:	.asciz	\""__FILE__"\"\n"		\
+ 		".previous\n"					\
+ 		".section __bug_table,\"awM\",@progbits,%2\n"	\
+-		"3:	.long	1b-3b,2b-3b\n"			\
++		"2:	.long	0b-2b,1b-2b\n"			\
+ 		"	.short	%0,%1\n"			\
+-		"	.org	3b+%2\n"			\
++		"	.org	2b+%2\n"			\
+ 		".previous\n"					\
+ 		: : "i" (__LINE__),				\
+ 		    "i" (x),					\
+@@ -29,12 +28,11 @@
+ 
+ #define __EMIT_BUG(x) do {					\
+ 	asm_inline volatile(					\
+-		"0:	j	0b+2\n"				\
+-		"1:\n"						\
++		"0:	mc	0,0\n"				\
+ 		".section __bug_table,\"awM\",@progbits,%1\n"	\
+-		"2:	.long	1b-2b\n"			\
++		"1:	.long	0b-1b\n"			\
+ 		"	.short	%0\n"				\
+-		"	.org	2b+%1\n"			\
++		"	.org	1b+%1\n"			\
+ 		".previous\n"					\
+ 		: : "i" (x),					\
+ 		    "i" (sizeof(struct bug_entry)));		\
+diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
+index 3a06c264ea53..b05187ce5dbd 100644
+--- a/arch/s390/include/asm/pci.h
++++ b/arch/s390/include/asm/pci.h
+@@ -180,7 +180,7 @@ void zpci_remove_reserved_devices(void);
+ /* CLP */
+ int clp_scan_pci_devices(void);
+ int clp_rescan_pci_devices(void);
+-int clp_rescan_pci_devices_simple(void);
++int clp_rescan_pci_devices_simple(u32 *fid);
+ int clp_add_pci_device(u32, u32, int);
+ int clp_enable_fh(struct zpci_dev *, u8);
+ int clp_disable_fh(struct zpci_dev *);
+diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
+index b2956d49b6ad..1d3927e01a5f 100644
+--- a/arch/s390/kernel/entry.h
++++ b/arch/s390/kernel/entry.h
+@@ -45,6 +45,7 @@ void specification_exception(struct pt_regs *regs);
+ void transaction_exception(struct pt_regs *regs);
+ void translation_exception(struct pt_regs *regs);
+ void vector_exception(struct pt_regs *regs);
++void monitor_event_exception(struct pt_regs *regs);
+ 
+ void do_per_trap(struct pt_regs *regs);
+ void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str);
+diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
+index c3597d2e2ae0..f942341429b1 100644
+--- a/arch/s390/kernel/mcount.S
++++ b/arch/s390/kernel/mcount.S
+@@ -26,6 +26,12 @@ ENDPROC(ftrace_stub)
+ #define STACK_PTREGS	  (STACK_FRAME_OVERHEAD)
+ #define STACK_PTREGS_GPRS (STACK_PTREGS + __PT_GPRS)
+ #define STACK_PTREGS_PSW  (STACK_PTREGS + __PT_PSW)
++#ifdef __PACK_STACK
++/* allocate just enough for r14, r15 and backchain */
++#define TRACED_FUNC_FRAME_SIZE	24
++#else
++#define TRACED_FUNC_FRAME_SIZE	STACK_FRAME_OVERHEAD
++#endif
+ 
+ ENTRY(_mcount)
+ 	BR_EX	%r14
+@@ -40,9 +46,16 @@ ENTRY(ftrace_caller)
+ #if !(defined(CC_USING_HOTPATCH) || defined(CC_USING_NOP_MCOUNT))
+ 	aghi	%r0,MCOUNT_RETURN_FIXUP
+ #endif
+-	aghi	%r15,-STACK_FRAME_SIZE
++	# allocate stack frame for ftrace_caller to contain traced function
++	aghi	%r15,-TRACED_FUNC_FRAME_SIZE
+ 	stg	%r1,__SF_BACKCHAIN(%r15)
++	stg	%r0,(__SF_GPRS+8*8)(%r15)
++	stg	%r15,(__SF_GPRS+9*8)(%r15)
++	# allocate pt_regs and stack frame for ftrace_trace_function
++	aghi	%r15,-STACK_FRAME_SIZE
+ 	stg	%r1,(STACK_PTREGS_GPRS+15*8)(%r15)
++	aghi	%r1,-TRACED_FUNC_FRAME_SIZE
++	stg	%r1,__SF_BACKCHAIN(%r15)
+ 	stg	%r0,(STACK_PTREGS_PSW+8)(%r15)
+ 	stmg	%r2,%r14,(STACK_PTREGS_GPRS+2*8)(%r15)
+ #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+diff --git a/arch/s390/kernel/pgm_check.S b/arch/s390/kernel/pgm_check.S
+index 59dee9d3bebf..eee3a482195a 100644
+--- a/arch/s390/kernel/pgm_check.S
++++ b/arch/s390/kernel/pgm_check.S
+@@ -81,7 +81,7 @@ PGM_CHECK_DEFAULT			/* 3c */
+ PGM_CHECK_DEFAULT			/* 3d */
+ PGM_CHECK_DEFAULT			/* 3e */
+ PGM_CHECK_DEFAULT			/* 3f */
+-PGM_CHECK_DEFAULT			/* 40 */
++PGM_CHECK(monitor_event_exception)	/* 40 */
+ PGM_CHECK_DEFAULT			/* 41 */
+ PGM_CHECK_DEFAULT			/* 42 */
+ PGM_CHECK_DEFAULT			/* 43 */
+diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
+index 164c0282b41a..dc75588d7894 100644
+--- a/arch/s390/kernel/traps.c
++++ b/arch/s390/kernel/traps.c
+@@ -53,11 +53,6 @@ void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str)
+                 if (fixup)
+ 			regs->psw.addr = extable_fixup(fixup);
+ 		else {
+-			enum bug_trap_type btt;
+-
+-			btt = report_bug(regs->psw.addr, regs);
+-			if (btt == BUG_TRAP_TYPE_WARN)
+-				return;
+ 			die(regs, str);
+ 		}
+         }
+@@ -245,6 +240,27 @@ void space_switch_exception(struct pt_regs *regs)
+ 	do_trap(regs, SIGILL, ILL_PRVOPC, "space switch event");
+ }
+ 
++void monitor_event_exception(struct pt_regs *regs)
++{
++	const struct exception_table_entry *fixup;
++
++	if (user_mode(regs))
++		return;
++
++	switch (report_bug(regs->psw.addr - (regs->int_code >> 16), regs)) {
++	case BUG_TRAP_TYPE_NONE:
++		fixup = s390_search_extables(regs->psw.addr);
++		if (fixup)
++			regs->psw.addr = extable_fixup(fixup);
++		break;
++	case BUG_TRAP_TYPE_WARN:
++		break;
++	case BUG_TRAP_TYPE_BUG:
++		die(regs, "monitor event");
++		break;
++	}
++}
++
+ void kernel_stack_overflow(struct pt_regs *regs)
+ {
+ 	bust_spinlocks(1);
+@@ -255,8 +271,23 @@ void kernel_stack_overflow(struct pt_regs *regs)
+ }
+ NOKPROBE_SYMBOL(kernel_stack_overflow);
+ 
++static void test_monitor_call(void)
++{
++	int val = 1;
++
++	asm volatile(
++		"	mc	0,0\n"
++		"0:	xgr	%0,%0\n"
++		"1:\n"
++		EX_TABLE(0b,1b)
++		: "+d" (val));
++	if (!val)
++		panic("Monitor call doesn't work!\n");
++}
++
+ void __init trap_init(void)
+ {
+ 	sort_extable(__start_dma_ex_table, __stop_dma_ex_table);
+ 	local_mcck_enable();
++	test_monitor_call();
+ }
+diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
+index 165dea4c7f19..c06c89d370a7 100644
+--- a/arch/s390/kvm/interrupt.c
++++ b/arch/s390/kvm/interrupt.c
+@@ -2190,7 +2190,7 @@ static int flic_ais_mode_get_all(struct kvm *kvm, struct kvm_device_attr *attr)
+ 		return -EINVAL;
+ 
+ 	if (!test_kvm_facility(kvm, 72))
+-		return -ENOTSUPP;
++		return -EOPNOTSUPP;
+ 
+ 	mutex_lock(&fi->ais_lock);
+ 	ais.simm = fi->simm;
+@@ -2499,7 +2499,7 @@ static int modify_ais_mode(struct kvm *kvm, struct kvm_device_attr *attr)
+ 	int ret = 0;
+ 
+ 	if (!test_kvm_facility(kvm, 72))
+-		return -ENOTSUPP;
++		return -EOPNOTSUPP;
+ 
+ 	if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
+ 		return -EFAULT;
+@@ -2579,7 +2579,7 @@ static int flic_ais_mode_set_all(struct kvm *kvm, struct kvm_device_attr *attr)
+ 	struct kvm_s390_ais_all ais;
+ 
+ 	if (!test_kvm_facility(kvm, 72))
+-		return -ENOTSUPP;
++		return -EOPNOTSUPP;
+ 
+ 	if (copy_from_user(&ais, (void __user *)attr->addr, sizeof(ais)))
+ 		return -EFAULT;
+diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
+index 8e872951c07b..bc61ea18e88d 100644
+--- a/arch/s390/pci/pci.c
++++ b/arch/s390/pci/pci.c
+@@ -939,5 +939,5 @@ subsys_initcall_sync(pci_base_init);
+ void zpci_rescan(void)
+ {
+ 	if (zpci_is_enabled())
+-		clp_rescan_pci_devices_simple();
++		clp_rescan_pci_devices_simple(NULL);
+ }
+diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
+index 4c613e569fe0..0d3d8f170ea4 100644
+--- a/arch/s390/pci/pci_clp.c
++++ b/arch/s390/pci/pci_clp.c
+@@ -240,12 +240,14 @@ error:
+ }
+ 
+ /*
+- * Enable/Disable a given PCI function defined by its function handle.
++ * Enable/Disable a given PCI function and update its function handle if
++ * necessary
+  */
+-static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command)
++static int clp_set_pci_fn(struct zpci_dev *zdev, u8 nr_dma_as, u8 command)
+ {
+ 	struct clp_req_rsp_set_pci *rrb;
+ 	int rc, retries = 100;
++	u32 fid = zdev->fid;
+ 
+ 	rrb = clp_alloc_block(GFP_KERNEL);
+ 	if (!rrb)
+@@ -256,7 +258,7 @@ static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command)
+ 		rrb->request.hdr.len = sizeof(rrb->request);
+ 		rrb->request.hdr.cmd = CLP_SET_PCI_FN;
+ 		rrb->response.hdr.len = sizeof(rrb->response);
+-		rrb->request.fh = *fh;
++		rrb->request.fh = zdev->fh;
+ 		rrb->request.oc = command;
+ 		rrb->request.ndas = nr_dma_as;
+ 
+@@ -269,12 +271,17 @@ static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command)
+ 		}
+ 	} while (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY);
+ 
+-	if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
+-		*fh = rrb->response.fh;
+-	else {
++	if (rc || rrb->response.hdr.rsp != CLP_RC_OK) {
+ 		zpci_err("Set PCI FN:\n");
+ 		zpci_err_clp(rrb->response.hdr.rsp, rc);
+-		rc = -EIO;
++	}
++
++	if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
++		zdev->fh = rrb->response.fh;
++	} else if (!rc && rrb->response.hdr.rsp == CLP_RC_SETPCIFN_ALRDY &&
++			rrb->response.fh == 0) {
++		/* Function is already in desired state - update handle */
++		rc = clp_rescan_pci_devices_simple(&fid);
+ 	}
+ 	clp_free_block(rrb);
+ 	return rc;
+@@ -282,18 +289,17 @@ static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command)
+ 
+ int clp_enable_fh(struct zpci_dev *zdev, u8 nr_dma_as)
+ {
+-	u32 fh = zdev->fh;
+ 	int rc;
+ 
+-	rc = clp_set_pci_fn(&fh, nr_dma_as, CLP_SET_ENABLE_PCI_FN);
+-	zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, fh, rc);
++	rc = clp_set_pci_fn(zdev, nr_dma_as, CLP_SET_ENABLE_PCI_FN);
++	zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
+ 	if (rc)
+ 		goto out;
+ 
+-	zdev->fh = fh;
+ 	if (zpci_use_mio(zdev)) {
+-		rc = clp_set_pci_fn(&fh, nr_dma_as, CLP_SET_ENABLE_MIO);
+-		zpci_dbg(3, "ena mio fid:%x, fh:%x, rc:%d\n", zdev->fid, fh, rc);
++		rc = clp_set_pci_fn(zdev, nr_dma_as, CLP_SET_ENABLE_MIO);
++		zpci_dbg(3, "ena mio fid:%x, fh:%x, rc:%d\n",
++				zdev->fid, zdev->fh, rc);
+ 		if (rc)
+ 			clp_disable_fh(zdev);
+ 	}
+@@ -309,11 +315,8 @@ int clp_disable_fh(struct zpci_dev *zdev)
+ 	if (!zdev_enabled(zdev))
+ 		return 0;
+ 
+-	rc = clp_set_pci_fn(&fh, 0, CLP_SET_DISABLE_PCI_FN);
++	rc = clp_set_pci_fn(zdev, 0, CLP_SET_DISABLE_PCI_FN);
+ 	zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, fh, rc);
+-	if (!rc)
+-		zdev->fh = fh;
+-
+ 	return rc;
+ }
+ 
+@@ -370,10 +373,14 @@ static void __clp_add(struct clp_fh_list_entry *entry, void *data)
+ static void __clp_update(struct clp_fh_list_entry *entry, void *data)
+ {
+ 	struct zpci_dev *zdev;
++	u32 *fid = data;
+ 
+ 	if (!entry->vendor_id)
+ 		return;
+ 
++	if (fid && *fid != entry->fid)
++		return;
++
+ 	zdev = get_zdev_by_fid(entry->fid);
+ 	if (!zdev)
+ 		return;
+@@ -413,7 +420,10 @@ int clp_rescan_pci_devices(void)
+ 	return rc;
+ }
+ 
+-int clp_rescan_pci_devices_simple(void)
++/* Rescan PCI functions and refresh function handles. If fid is non-NULL only
++ * refresh the handle of the function matching @fid
++ */
++int clp_rescan_pci_devices_simple(u32 *fid)
+ {
+ 	struct clp_req_rsp_list_pci *rrb;
+ 	int rc;
+@@ -422,7 +432,7 @@ int clp_rescan_pci_devices_simple(void)
+ 	if (!rrb)
+ 		return -ENOMEM;
+ 
+-	rc = clp_list_pci(rrb, NULL, __clp_update);
++	rc = clp_list_pci(rrb, fid, __clp_update);
+ 
+ 	clp_free_block(rrb);
+ 	return rc;
+diff --git a/arch/s390/pci/pci_sysfs.c b/arch/s390/pci/pci_sysfs.c
+index a433ba01a317..215f17437a4f 100644
+--- a/arch/s390/pci/pci_sysfs.c
++++ b/arch/s390/pci/pci_sysfs.c
+@@ -13,6 +13,8 @@
+ #include <linux/stat.h>
+ #include <linux/pci.h>
+ 
++#include "../../../drivers/pci/pci.h"
++
+ #include <asm/sclp.h>
+ 
+ #define zpci_attr(name, fmt, member)					\
+@@ -49,31 +51,50 @@ static DEVICE_ATTR_RO(mio_enabled);
+ static ssize_t recover_store(struct device *dev, struct device_attribute *attr,
+ 			     const char *buf, size_t count)
+ {
++	struct kernfs_node *kn;
+ 	struct pci_dev *pdev = to_pci_dev(dev);
+ 	struct zpci_dev *zdev = to_zpci(pdev);
+-	int ret;
+-
+-	if (!device_remove_file_self(dev, attr))
+-		return count;
+-
++	int ret = 0;
++
++	/* Can't use device_remove_self() here as that would lead us to lock
++	 * the pci_rescan_remove_lock while holding the device' kernfs lock.
++	 * This would create a possible deadlock with disable_slot() which is
++	 * not directly protected by the device' kernfs lock but takes it
++	 * during the device removal which happens under
++	 * pci_rescan_remove_lock.
++	 *
++	 * This is analogous to sdev_store_delete() in
++	 * drivers/scsi/scsi_sysfs.c
++	 */
++	kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
++	WARN_ON_ONCE(!kn);
++	/* device_remove_file() serializes concurrent calls ignoring all but
++	 * the first
++	 */
++	device_remove_file(dev, attr);
++
++	/* A concurrent call to recover_store() may slip between
++	 * sysfs_break_active_protection() and the sysfs file removal.
++	 * Once it unblocks from pci_lock_rescan_remove() the original pdev
++	 * will already be removed.
++	 */
+ 	pci_lock_rescan_remove();
+-	pci_stop_and_remove_bus_device(pdev);
+-	ret = zpci_disable_device(zdev);
+-	if (ret)
+-		goto error;
+-
+-	ret = zpci_enable_device(zdev);
+-	if (ret)
+-		goto error;
+-
+-	pci_rescan_bus(zdev->bus);
++	if (pci_dev_is_added(pdev)) {
++		pci_stop_and_remove_bus_device(pdev);
++		ret = zpci_disable_device(zdev);
++		if (ret)
++			goto out;
++
++		ret = zpci_enable_device(zdev);
++		if (ret)
++			goto out;
++		pci_rescan_bus(zdev->bus);
++	}
++out:
+ 	pci_unlock_rescan_remove();
+-
+-	return count;
+-
+-error:
+-	pci_unlock_rescan_remove();
+-	return ret;
++	if (kn)
++		sysfs_unbreak_active_protection(kn);
++	return ret ? ret : count;
+ }
+ static DEVICE_ATTR_WO(recover);
+ 
+diff --git a/arch/sh/include/cpu-sh2a/cpu/sh7269.h b/arch/sh/include/cpu-sh2a/cpu/sh7269.h
+index d516e5d48818..b887cc402b71 100644
+--- a/arch/sh/include/cpu-sh2a/cpu/sh7269.h
++++ b/arch/sh/include/cpu-sh2a/cpu/sh7269.h
+@@ -78,8 +78,15 @@ enum {
+ 	GPIO_FN_WDTOVF,
+ 
+ 	/* CAN */
+-	GPIO_FN_CTX1, GPIO_FN_CRX1, GPIO_FN_CTX0, GPIO_FN_CTX0_CTX1,
+-	GPIO_FN_CRX0, GPIO_FN_CRX0_CRX1, GPIO_FN_CRX0_CRX1_CRX2,
++	GPIO_FN_CTX2, GPIO_FN_CRX2,
++	GPIO_FN_CTX1, GPIO_FN_CRX1,
++	GPIO_FN_CTX0, GPIO_FN_CRX0,
++	GPIO_FN_CTX0_CTX1, GPIO_FN_CRX0_CRX1,
++	GPIO_FN_CTX0_CTX1_CTX2, GPIO_FN_CRX0_CRX1_CRX2,
++	GPIO_FN_CTX2_PJ21, GPIO_FN_CRX2_PJ20,
++	GPIO_FN_CTX1_PJ23, GPIO_FN_CRX1_PJ22,
++	GPIO_FN_CTX0_CTX1_PJ23, GPIO_FN_CRX0_CRX1_PJ22,
++	GPIO_FN_CTX0_CTX1_CTX2_PJ21, GPIO_FN_CRX0_CRX1_CRX2_PJ20,
+ 
+ 	/* DMAC */
+ 	GPIO_FN_TEND0, GPIO_FN_DACK0, GPIO_FN_DREQ0,
+diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
+index 7ec79918b566..f99e99e58075 100644
+--- a/arch/sparc/kernel/vmlinux.lds.S
++++ b/arch/sparc/kernel/vmlinux.lds.S
+@@ -171,12 +171,14 @@ SECTIONS
+ 	}
+ 	PERCPU_SECTION(SMP_CACHE_BYTES)
+ 
+-#ifdef CONFIG_JUMP_LABEL
+ 	. = ALIGN(PAGE_SIZE);
+ 	.exit.text : {
+ 		EXIT_TEXT
+ 	}
+-#endif
++
++	.exit.data : {
++		EXIT_DATA
++	}
+ 
+ 	. = ALIGN(PAGE_SIZE);
+ 	__init_end = .;
+diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
+index 95410d6ee2ff..748b6d28a91d 100644
+--- a/arch/x86/boot/Makefile
++++ b/arch/x86/boot/Makefile
+@@ -88,7 +88,7 @@ $(obj)/vmlinux.bin: $(obj)/compressed/vmlinux FORCE
+ 
+ SETUP_OBJS = $(addprefix $(obj)/,$(setup-y))
+ 
+-sed-zoffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(startup_32\|startup_64\|efi32_stub_entry\|efi64_stub_entry\|efi_pe_entry\|input_data\|kernel_info\|_end\|_ehead\|_text\|z_.*\)$$/\#define ZO_\2 0x\1/p'
++sed-zoffset := -e 's/^\([0-9a-fA-F]*\) [a-zA-Z] \(startup_32\|startup_64\|efi32_stub_entry\|efi64_stub_entry\|efi_pe_entry\|input_data\|kernel_info\|_end\|_ehead\|_text\|z_.*\)$$/\#define ZO_\2 0x\1/p'
+ 
+ quiet_cmd_zoffset = ZOFFSET $@
+       cmd_zoffset = $(NM) $< | sed -n $(sed-zoffset) > $@
+diff --git a/arch/x86/entry/vdso/vdso32-setup.c b/arch/x86/entry/vdso/vdso32-setup.c
+index 240626e7f55a..43842fade8fa 100644
+--- a/arch/x86/entry/vdso/vdso32-setup.c
++++ b/arch/x86/entry/vdso/vdso32-setup.c
+@@ -11,6 +11,7 @@
+ #include <linux/smp.h>
+ #include <linux/kernel.h>
+ #include <linux/mm_types.h>
++#include <linux/elf.h>
+ 
+ #include <asm/processor.h>
+ #include <asm/vdso.h>
+diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
+index dede714b46e8..3253797fa8a1 100644
+--- a/arch/x86/events/amd/core.c
++++ b/arch/x86/events/amd/core.c
+@@ -302,6 +302,25 @@ static inline int amd_pmu_addr_offset(int index, bool eventsel)
+ 	return offset;
+ }
+ 
++/*
++ * AMD64 events are detected based on their event codes.
++ */
++static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc)
++{
++	return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff);
++}
++
++static inline bool amd_is_pair_event_code(struct hw_perf_event *hwc)
++{
++	if (!(x86_pmu.flags & PMU_FL_PAIR))
++		return false;
++
++	switch (amd_get_event_code(hwc)) {
++	case 0x003:	return true;	/* Retired SSE/AVX FLOPs */
++	default:	return false;
++	}
++}
++
+ static int amd_core_hw_config(struct perf_event *event)
+ {
+ 	if (event->attr.exclude_host && event->attr.exclude_guest)
+@@ -320,14 +339,6 @@ static int amd_core_hw_config(struct perf_event *event)
+ 	return 0;
+ }
+ 
+-/*
+- * AMD64 events are detected based on their event codes.
+- */
+-static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc)
+-{
+-	return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff);
+-}
+-
+ static inline int amd_is_nb_event(struct hw_perf_event *hwc)
+ {
+ 	return (hwc->config & 0xe0) == 0xe0;
+@@ -856,6 +867,20 @@ amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, int idx,
+ 	}
+ }
+ 
++static struct event_constraint pair_constraint;
++
++static struct event_constraint *
++amd_get_event_constraints_f17h(struct cpu_hw_events *cpuc, int idx,
++			       struct perf_event *event)
++{
++	struct hw_perf_event *hwc = &event->hw;
++
++	if (amd_is_pair_event_code(hwc))
++		return &pair_constraint;
++
++	return &unconstrained;
++}
++
+ static ssize_t amd_event_sysfs_show(char *page, u64 config)
+ {
+ 	u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT) |
+@@ -899,33 +924,15 @@ static __initconst const struct x86_pmu amd_pmu = {
+ 
+ static int __init amd_core_pmu_init(void)
+ {
++	u64 even_ctr_mask = 0ULL;
++	int i;
++
+ 	if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
+ 		return 0;
+ 
+-	/* Avoid calulating the value each time in the NMI handler */
++	/* Avoid calculating the value each time in the NMI handler */
+ 	perf_nmi_window = msecs_to_jiffies(100);
+ 
+-	switch (boot_cpu_data.x86) {
+-	case 0x15:
+-		pr_cont("Fam15h ");
+-		x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
+-		break;
+-	case 0x17:
+-		pr_cont("Fam17h ");
+-		/*
+-		 * In family 17h, there are no event constraints in the PMC hardware.
+-		 * We fallback to using default amd_get_event_constraints.
+-		 */
+-		break;
+-	case 0x18:
+-		pr_cont("Fam18h ");
+-		/* Using default amd_get_event_constraints. */
+-		break;
+-	default:
+-		pr_err("core perfctr but no constraints; unknown hardware!\n");
+-		return -ENODEV;
+-	}
+-
+ 	/*
+ 	 * If core performance counter extensions exists, we must use
+ 	 * MSR_F15H_PERF_CTL/MSR_F15H_PERF_CTR msrs. See also
+@@ -940,6 +947,30 @@ static int __init amd_core_pmu_init(void)
+ 	 */
+ 	x86_pmu.amd_nb_constraints = 0;
+ 
++	if (boot_cpu_data.x86 == 0x15) {
++		pr_cont("Fam15h ");
++		x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
++	}
++	if (boot_cpu_data.x86 >= 0x17) {
++		pr_cont("Fam17h+ ");
++		/*
++		 * Family 17h and compatibles have constraints for Large
++		 * Increment per Cycle events: they may only be assigned an
++		 * even numbered counter that has a consecutive adjacent odd
++		 * numbered counter following it.
++		 */
++		for (i = 0; i < x86_pmu.num_counters - 1; i += 2)
++			even_ctr_mask |= 1 << i;
++
++		pair_constraint = (struct event_constraint)
++				    __EVENT_CONSTRAINT(0, even_ctr_mask, 0,
++				    x86_pmu.num_counters / 2, 0,
++				    PERF_X86_EVENT_PAIR);
++
++		x86_pmu.get_event_constraints = amd_get_event_constraints_f17h;
++		x86_pmu.flags |= PMU_FL_PAIR;
++	}
++
+ 	pr_cont("core perfctr, ");
+ 	return 0;
+ }
+diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
+index 930611db8f9a..e2fd363de649 100644
+--- a/arch/x86/events/perf_event.h
++++ b/arch/x86/events/perf_event.h
+@@ -77,6 +77,7 @@ static inline bool constraint_match(struct event_constraint *c, u64 ecode)
+ #define PERF_X86_EVENT_AUTO_RELOAD	0x0200 /* use PEBS auto-reload */
+ #define PERF_X86_EVENT_LARGE_PEBS	0x0400 /* use large PEBS */
+ #define PERF_X86_EVENT_PEBS_VIA_PT	0x0800 /* use PT buffer for PEBS */
++#define PERF_X86_EVENT_PAIR		0x1000 /* Large Increment per Cycle */
+ 
+ struct amd_nb {
+ 	int nb_id;  /* NorthBridge id */
+@@ -743,6 +744,7 @@ do {									\
+ #define PMU_FL_EXCL_ENABLED	0x8 /* exclusive counter active */
+ #define PMU_FL_PEBS_ALL		0x10 /* all events are valid PEBS events */
+ #define PMU_FL_TFA		0x20 /* deal with TSX force abort */
++#define PMU_FL_PAIR		0x40 /* merge counters for large incr. events */
+ 
+ #define EVENT_VAR(_id)  event_attr_##_id
+ #define EVENT_PTR(_id) &event_attr_##_id.attr.attr
+diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
+index 75ded1d13d98..9d5d949e662e 100644
+--- a/arch/x86/include/asm/nmi.h
++++ b/arch/x86/include/asm/nmi.h
+@@ -41,7 +41,6 @@ struct nmiaction {
+ 	struct list_head	list;
+ 	nmi_handler_t		handler;
+ 	u64			max_duration;
+-	struct irq_work		irq_work;
+ 	unsigned long		flags;
+ 	const char		*name;
+ };
+diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
+index d5b51a740524..ad53b2abc859 100644
+--- a/arch/x86/kernel/apic/x2apic_uv_x.c
++++ b/arch/x86/kernel/apic/x2apic_uv_x.c
+@@ -1493,65 +1493,34 @@ static void check_efi_reboot(void)
+ }
+ 
+ /* Setup user proc fs files */
+-static int proc_hubbed_show(struct seq_file *file, void *data)
++static int __maybe_unused proc_hubbed_show(struct seq_file *file, void *data)
+ {
+ 	seq_printf(file, "0x%x\n", uv_hubbed_system);
+ 	return 0;
+ }
+ 
+-static int proc_hubless_show(struct seq_file *file, void *data)
++static int __maybe_unused proc_hubless_show(struct seq_file *file, void *data)
+ {
+ 	seq_printf(file, "0x%x\n", uv_hubless_system);
+ 	return 0;
+ }
+ 
+-static int proc_oemid_show(struct seq_file *file, void *data)
++static int __maybe_unused proc_oemid_show(struct seq_file *file, void *data)
+ {
+ 	seq_printf(file, "%s/%s\n", oem_id, oem_table_id);
+ 	return 0;
+ }
+ 
+-static int proc_hubbed_open(struct inode *inode, struct file *file)
+-{
+-	return single_open(file, proc_hubbed_show, (void *)NULL);
+-}
+-
+-static int proc_hubless_open(struct inode *inode, struct file *file)
+-{
+-	return single_open(file, proc_hubless_show, (void *)NULL);
+-}
+-
+-static int proc_oemid_open(struct inode *inode, struct file *file)
+-{
+-	return single_open(file, proc_oemid_show, (void *)NULL);
+-}
+-
+-/* (struct is "non-const" as open function is set at runtime) */
+-static struct file_operations proc_version_fops = {
+-	.read		= seq_read,
+-	.llseek		= seq_lseek,
+-	.release	= single_release,
+-};
+-
+-static const struct file_operations proc_oemid_fops = {
+-	.open		= proc_oemid_open,
+-	.read		= seq_read,
+-	.llseek		= seq_lseek,
+-	.release	= single_release,
+-};
+-
+ static __init void uv_setup_proc_files(int hubless)
+ {
+ 	struct proc_dir_entry *pde;
+-	char *name = hubless ? "hubless" : "hubbed";
+ 
+ 	pde = proc_mkdir(UV_PROC_NODE, NULL);
+-	proc_create("oemid", 0, pde, &proc_oemid_fops);
+-	proc_create(name, 0, pde, &proc_version_fops);
++	proc_create_single("oemid", 0, pde, proc_oemid_show);
+ 	if (hubless)
+-		proc_version_fops.open = proc_hubless_open;
++		proc_create_single("hubless", 0, pde, proc_hubless_show);
+ 	else
+-		proc_version_fops.open = proc_hubbed_open;
++		proc_create_single("hubbed", 0, pde, proc_hubbed_show);
+ }
+ 
+ /* Initialize UV hubless systems */
+diff --git a/arch/x86/kernel/cpu/mce/therm_throt.c b/arch/x86/kernel/cpu/mce/therm_throt.c
+index 6c3e1c92f183..58b4ee3cda77 100644
+--- a/arch/x86/kernel/cpu/mce/therm_throt.c
++++ b/arch/x86/kernel/cpu/mce/therm_throt.c
+@@ -235,7 +235,7 @@ static void get_therm_status(int level, bool *proc_hot, u8 *temp)
+ 	*temp = (msr_val >> 16) & 0x7F;
+ }
+ 
+-static void throttle_active_work(struct work_struct *work)
++static void __maybe_unused throttle_active_work(struct work_struct *work)
+ {
+ 	struct _thermal_state *state = container_of(to_delayed_work(work),
+ 						struct _thermal_state, therm_work);
+diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
+index 0071b794ed19..400a05e1c1c5 100644
+--- a/arch/x86/kernel/fpu/signal.c
++++ b/arch/x86/kernel/fpu/signal.c
+@@ -352,6 +352,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
+ 			fpregs_unlock();
+ 			return 0;
+ 		}
++		fpregs_deactivate(fpu);
+ 		fpregs_unlock();
+ 	}
+ 
+@@ -403,6 +404,8 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
+ 	}
+ 	if (!ret)
+ 		fpregs_mark_activate();
++	else
++		fpregs_deactivate(fpu);
+ 	fpregs_unlock();
+ 
+ err_out:
+diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
+index e676a9916c49..54c21d6abd5a 100644
+--- a/arch/x86/kernel/nmi.c
++++ b/arch/x86/kernel/nmi.c
+@@ -104,18 +104,22 @@ static int __init nmi_warning_debugfs(void)
+ }
+ fs_initcall(nmi_warning_debugfs);
+ 
+-static void nmi_max_handler(struct irq_work *w)
++static void nmi_check_duration(struct nmiaction *action, u64 duration)
+ {
+-	struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
++	u64 whole_msecs = READ_ONCE(action->max_duration);
+ 	int remainder_ns, decimal_msecs;
+-	u64 whole_msecs = READ_ONCE(a->max_duration);
++
++	if (duration < nmi_longest_ns || duration < action->max_duration)
++		return;
++
++	action->max_duration = duration;
+ 
+ 	remainder_ns = do_div(whole_msecs, (1000 * 1000));
+ 	decimal_msecs = remainder_ns / 1000;
+ 
+ 	printk_ratelimited(KERN_INFO
+ 		"INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
+-		a->handler, whole_msecs, decimal_msecs);
++		action->handler, whole_msecs, decimal_msecs);
+ }
+ 
+ static int nmi_handle(unsigned int type, struct pt_regs *regs)
+@@ -142,11 +146,7 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs)
+ 		delta = sched_clock() - delta;
+ 		trace_nmi_handler(a->handler, (int)delta, thishandled);
+ 
+-		if (delta < nmi_longest_ns || delta < a->max_duration)
+-			continue;
+-
+-		a->max_duration = delta;
+-		irq_work_queue(&a->irq_work);
++		nmi_check_duration(a, delta);
+ 	}
+ 
+ 	rcu_read_unlock();
+@@ -164,8 +164,6 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
+ 	if (!action->handler)
+ 		return -EINVAL;
+ 
+-	init_irq_work(&action->irq_work, nmi_max_handler);
+-
+ 	raw_spin_lock_irqsave(&desc->lock, flags);
+ 
+ 	/*
+diff --git a/arch/x86/kernel/sysfb_simplefb.c b/arch/x86/kernel/sysfb_simplefb.c
+index 01f0e2263b86..298fc1edd9c9 100644
+--- a/arch/x86/kernel/sysfb_simplefb.c
++++ b/arch/x86/kernel/sysfb_simplefb.c
+@@ -90,11 +90,11 @@ __init int create_simplefb(const struct screen_info *si,
+ 	if (si->orig_video_isVGA == VIDEO_TYPE_VLFB)
+ 		size <<= 16;
+ 	length = mode->height * mode->stride;
+-	length = PAGE_ALIGN(length);
+ 	if (length > size) {
+ 		printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n");
+ 		return -EINVAL;
+ 	}
++	length = PAGE_ALIGN(length);
+ 
+ 	/* setup IORESOURCE_MEM as framebuffer memory */
+ 	memset(&res, 0, sizeof(res));
+diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
+index 8908c58bd6cd..53adc1762ec0 100644
+--- a/arch/x86/lib/x86-opcode-map.txt
++++ b/arch/x86/lib/x86-opcode-map.txt
+@@ -929,7 +929,7 @@ EndTable
+ 
+ GrpTable: Grp3_2
+ 0: TEST Ev,Iz
+-1:
++1: TEST Ev,Iz
+ 2: NOT Ev
+ 3: NEG Ev
+ 4: MUL rAX,Ev
+diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
+index 1b99ad05b117..f42780ba0893 100644
+--- a/arch/x86/mm/pageattr.c
++++ b/arch/x86/mm/pageattr.c
+@@ -2215,7 +2215,7 @@ int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
+ 		.pgd = pgd,
+ 		.numpages = numpages,
+ 		.mask_set = __pgprot(0),
+-		.mask_clr = __pgprot(0),
++		.mask_clr = __pgprot(~page_flags & (_PAGE_NX|_PAGE_RW)),
+ 		.flags = 0,
+ 	};
+ 
+@@ -2224,12 +2224,6 @@ int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
+ 	if (!(__supported_pte_mask & _PAGE_NX))
+ 		goto out;
+ 
+-	if (!(page_flags & _PAGE_NX))
+-		cpa.mask_clr = __pgprot(_PAGE_NX);
+-
+-	if (!(page_flags & _PAGE_RW))
+-		cpa.mask_clr = __pgprot(_PAGE_RW);
+-
+ 	if (!(page_flags & _PAGE_ENC))
+ 		cpa.mask_clr = pgprot_encrypted(cpa.mask_clr);
+ 
+diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
+index 38d44f36d5ed..ad4dd3a97753 100644
+--- a/arch/x86/platform/efi/efi.c
++++ b/arch/x86/platform/efi/efi.c
+@@ -541,7 +541,6 @@ void __init efi_init(void)
+ 	efi_char16_t *c16;
+ 	char vendor[100] = "unknown";
+ 	int i = 0;
+-	void *tmp;
+ 
+ #ifdef CONFIG_X86_32
+ 	if (boot_params.efi_info.efi_systab_hi ||
+@@ -566,14 +565,16 @@ void __init efi_init(void)
+ 	/*
+ 	 * Show what we know for posterity
+ 	 */
+-	c16 = tmp = early_memremap(efi.systab->fw_vendor, 2);
++	c16 = early_memremap_ro(efi.systab->fw_vendor,
++				sizeof(vendor) * sizeof(efi_char16_t));
+ 	if (c16) {
+-		for (i = 0; i < sizeof(vendor) - 1 && *c16; ++i)
+-			vendor[i] = *c16++;
++		for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i)
++			vendor[i] = c16[i];
+ 		vendor[i] = '\0';
+-	} else
++		early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t));
++	} else {
+ 		pr_err("Could not map the firmware vendor!\n");
+-	early_memunmap(tmp, 2);
++	}
+ 
+ 	pr_info("EFI v%u.%.02u by %s\n",
+ 		efi.systab->hdr.revision >> 16,
+@@ -999,16 +1000,14 @@ static void __init __efi_enter_virtual_mode(void)
+ 
+ 	if (efi_alloc_page_tables()) {
+ 		pr_err("Failed to allocate EFI page tables\n");
+-		clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+-		return;
++		goto err;
+ 	}
+ 
+ 	efi_merge_regions();
+ 	new_memmap = efi_map_regions(&count, &pg_shift);
+ 	if (!new_memmap) {
+ 		pr_err("Error reallocating memory, EFI runtime non-functional!\n");
+-		clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+-		return;
++		goto err;
+ 	}
+ 
+ 	pa = __pa(new_memmap);
+@@ -1022,8 +1021,7 @@ static void __init __efi_enter_virtual_mode(void)
+ 
+ 	if (efi_memmap_init_late(pa, efi.memmap.desc_size * count)) {
+ 		pr_err("Failed to remap late EFI memory map\n");
+-		clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+-		return;
++		goto err;
+ 	}
+ 
+ 	if (efi_enabled(EFI_DBG)) {
+@@ -1031,12 +1029,11 @@ static void __init __efi_enter_virtual_mode(void)
+ 		efi_print_memmap();
+ 	}
+ 
+-	BUG_ON(!efi.systab);
++	if (WARN_ON(!efi.systab))
++		goto err;
+ 
+-	if (efi_setup_page_tables(pa, 1 << pg_shift)) {
+-		clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+-		return;
+-	}
++	if (efi_setup_page_tables(pa, 1 << pg_shift))
++		goto err;
+ 
+ 	efi_sync_low_kernel_mappings();
+ 
+@@ -1056,9 +1053,9 @@ static void __init __efi_enter_virtual_mode(void)
+ 	}
+ 
+ 	if (status != EFI_SUCCESS) {
+-		pr_alert("Unable to switch EFI into virtual mode (status=%lx)!\n",
+-			 status);
+-		panic("EFI call to SetVirtualAddressMap() failed!");
++		pr_err("Unable to switch EFI into virtual mode (status=%lx)!\n",
++		       status);
++		goto err;
+ 	}
+ 
+ 	efi_free_boot_services();
+@@ -1087,6 +1084,10 @@ static void __init __efi_enter_virtual_mode(void)
+ 
+ 	/* clean DUMMY object */
+ 	efi_delete_dummy_variable();
++	return;
++
++err:
++	clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+ }
+ 
+ void __init efi_enter_virtual_mode(void)
+diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
+index 08ce8177c3af..52a1e5192fa8 100644
+--- a/arch/x86/platform/efi/efi_64.c
++++ b/arch/x86/platform/efi/efi_64.c
+@@ -392,11 +392,12 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
+ 		return 0;
+ 
+ 	page = alloc_page(GFP_KERNEL|__GFP_DMA32);
+-	if (!page)
+-		panic("Unable to allocate EFI runtime stack < 4GB\n");
++	if (!page) {
++		pr_err("Unable to allocate EFI runtime stack < 4GB\n");
++		return 1;
++	}
+ 
+-	efi_scratch.phys_stack = virt_to_phys(page_address(page));
+-	efi_scratch.phys_stack += PAGE_SIZE; /* stack grows down */
++	efi_scratch.phys_stack = page_to_phys(page + 1); /* stack grows down */
+ 
+ 	npages = (_etext - _text) >> PAGE_SHIFT;
+ 	text = __pa(_text);
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
+index ad4af4aaf2ce..5c239c540c47 100644
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -3444,6 +3444,10 @@ static void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
+ static bool idling_needed_for_service_guarantees(struct bfq_data *bfqd,
+ 						 struct bfq_queue *bfqq)
+ {
++	/* No point in idling for bfqq if it won't get requests any longer */
++	if (unlikely(!bfqq_process_refs(bfqq)))
++		return false;
++
+ 	return (bfqq->wr_coeff > 1 &&
+ 		(bfqd->wr_busy_queues <
+ 		 bfq_tot_busy_queues(bfqd) ||
+@@ -4077,6 +4081,10 @@ static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd,
+ 		bfqq_sequential_and_IO_bound,
+ 		idling_boosts_thr;
+ 
++	/* No point in idling for bfqq if it won't get requests any longer */
++	if (unlikely(!bfqq_process_refs(bfqq)))
++		return false;
++
+ 	bfqq_sequential_and_IO_bound = !BFQQ_SEEKY(bfqq) &&
+ 		bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_has_short_ttime(bfqq);
+ 
+@@ -4170,6 +4178,10 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq)
+ 	struct bfq_data *bfqd = bfqq->bfqd;
+ 	bool idling_boosts_thr_with_no_issue, idling_needed_for_service_guar;
+ 
++	/* No point in idling for bfqq if it won't get requests any longer */
++	if (unlikely(!bfqq_process_refs(bfqq)))
++		return false;
++
+ 	if (unlikely(bfqd->strict_guarantees))
+ 		return true;
+ 
+diff --git a/crypto/Kconfig b/crypto/Kconfig
+index 5575d48473bd..cdb51d4272d0 100644
+--- a/crypto/Kconfig
++++ b/crypto/Kconfig
+@@ -511,10 +511,10 @@ config CRYPTO_ESSIV
+ 	  encryption.
+ 
+ 	  This driver implements a crypto API template that can be
+-	  instantiated either as a skcipher or as a aead (depending on the
++	  instantiated either as an skcipher or as an AEAD (depending on the
+ 	  type of the first template argument), and which defers encryption
+ 	  and decryption requests to the encapsulated cipher after applying
+-	  ESSIV to the input IV. Note that in the aead case, it is assumed
++	  ESSIV to the input IV. Note that in the AEAD case, it is assumed
+ 	  that the keys are presented in the same format used by the authenc
+ 	  template, and that the IV appears at the end of the authenticated
+ 	  associated data (AAD) region (which is how dm-crypt uses it.)
+diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
+index faa38a22263a..ae713d746c8b 100644
+--- a/drivers/acpi/acpica/dsfield.c
++++ b/drivers/acpi/acpica/dsfield.c
+@@ -243,7 +243,7 @@ cleanup:
+  * FUNCTION:    acpi_ds_get_field_names
+  *
+  * PARAMETERS:  info            - create_field info structure
+- *  `           walk_state      - Current method state
++ *              walk_state      - Current method state
+  *              arg             - First parser arg for the field name list
+  *
+  * RETURN:      Status
+diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
+index c88fd31208a5..4bcf15bf03de 100644
+--- a/drivers/acpi/acpica/dswload.c
++++ b/drivers/acpi/acpica/dswload.c
+@@ -410,6 +410,27 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
+ 	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op,
+ 			  walk_state));
+ 
++	/*
++	 * Disassembler: handle create field operators here.
++	 *
++	 * create_buffer_field is a deferred op that is typically processed in load
++	 * pass 2. However, disassembly of control method contents walk the parse
++	 * tree with ACPI_PARSE_LOAD_PASS1 and AML_CREATE operators are processed
++	 * in a later walk. This is a problem when there is a control method that
++	 * has the same name as the AML_CREATE object. In this case, any use of the
++	 * name segment will be detected as a method call rather than a reference
++	 * to a buffer field.
++	 *
++	 * This earlier creation during disassembly solves this issue by inserting
++	 * the named object in the ACPI namespace so that references to this name
++	 * would be a name string rather than a method call.
++	 */
++	if ((walk_state->parse_flags & ACPI_PARSE_DISASSEMBLE) &&
++	    (walk_state->op_info->flags & AML_CREATE)) {
++		status = acpi_ds_create_buffer_field(op, walk_state);
++		return_ACPI_STATUS(status);
++	}
++
+ 	/* We are only interested in opcodes that have an associated name */
+ 
+ 	if (!(walk_state->op_info->flags & (AML_NAMED | AML_FIELD))) {
+diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
+index b758b45737f5..f6925f16c4a2 100644
+--- a/drivers/acpi/button.c
++++ b/drivers/acpi/button.c
+@@ -122,6 +122,17 @@ static const struct dmi_system_id dmi_lid_quirks[] = {
+ 		},
+ 		.driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
+ 	},
++	{
++		/*
++		 * Razer Blade Stealth 13 late 2019, notification of the LID device
++		 * only happens on close, not on open and _LID always returns closed.
++		 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Razer"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Razer Blade Stealth 13 Late 2019"),
++		},
++		.driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
++	},
+ 	{}
+ };
+ 
+diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
+index f1a500205313..8fbd36eb8941 100644
+--- a/drivers/atm/fore200e.c
++++ b/drivers/atm/fore200e.c
+@@ -1414,12 +1414,14 @@ fore200e_open(struct atm_vcc *vcc)
+ static void
+ fore200e_close(struct atm_vcc* vcc)
+ {
+-    struct fore200e*        fore200e = FORE200E_DEV(vcc->dev);
+     struct fore200e_vcc*    fore200e_vcc;
++    struct fore200e*        fore200e;
+     struct fore200e_vc_map* vc_map;
+     unsigned long           flags;
+ 
+     ASSERT(vcc);
++    fore200e = FORE200E_DEV(vcc->dev);
++
+     ASSERT((vcc->vpi >= 0) && (vcc->vpi < 1<<FORE200E_VPI_BITS));
+     ASSERT((vcc->vci >= 0) && (vcc->vci < 1<<FORE200E_VCI_BITS));
+ 
+@@ -1464,10 +1466,10 @@ fore200e_close(struct atm_vcc* vcc)
+ static int
+ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
+ {
+-    struct fore200e*        fore200e     = FORE200E_DEV(vcc->dev);
+-    struct fore200e_vcc*    fore200e_vcc = FORE200E_VCC(vcc);
++    struct fore200e*        fore200e;
++    struct fore200e_vcc*    fore200e_vcc;
+     struct fore200e_vc_map* vc_map;
+-    struct host_txq*        txq          = &fore200e->host_txq;
++    struct host_txq*        txq;
+     struct host_txq_entry*  entry;
+     struct tpd*             tpd;
+     struct tpd_haddr        tpd_haddr;
+@@ -1480,9 +1482,18 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
+     unsigned char*          data;
+     unsigned long           flags;
+ 
+-    ASSERT(vcc);
+-    ASSERT(fore200e);
+-    ASSERT(fore200e_vcc);
++    if (!vcc)
++        return -EINVAL;
++
++    fore200e = FORE200E_DEV(vcc->dev);
++    fore200e_vcc = FORE200E_VCC(vcc);
++
++    if (!fore200e)
++        return -EINVAL;
++
++    txq = &fore200e->host_txq;
++    if (!fore200e_vcc)
++        return -EINVAL;
+ 
+     if (!test_bit(ATM_VF_READY, &vcc->flags)) {
+ 	DPRINTK(1, "VC %d.%d.%d not ready for tx\n", vcc->itf, vcc->vpi, vcc->vpi);
+diff --git a/drivers/base/dd.c b/drivers/base/dd.c
+index d811e60610d3..b25bcab2a26b 100644
+--- a/drivers/base/dd.c
++++ b/drivers/base/dd.c
+@@ -516,7 +516,10 @@ static int really_probe(struct device *dev, struct device_driver *drv)
+ 	atomic_inc(&probe_count);
+ 	pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
+ 		 drv->bus->name, __func__, drv->name, dev_name(dev));
+-	WARN_ON(!list_empty(&dev->devres_head));
++	if (!list_empty(&dev->devres_head)) {
++		dev_crit(dev, "Resources present before probing\n");
++		return -EBUSY;
++	}
+ 
+ re_probe:
+ 	dev->driver = drv;
+diff --git a/drivers/base/platform.c b/drivers/base/platform.c
+index cf6b6b722e5c..7fa654f1288b 100644
+--- a/drivers/base/platform.c
++++ b/drivers/base/platform.c
+@@ -27,6 +27,7 @@
+ #include <linux/limits.h>
+ #include <linux/property.h>
+ #include <linux/kmemleak.h>
++#include <linux/types.h>
+ 
+ #include "base.h"
+ #include "power/power.h"
+@@ -48,7 +49,7 @@ EXPORT_SYMBOL_GPL(platform_bus);
+ struct resource *platform_get_resource(struct platform_device *dev,
+ 				       unsigned int type, unsigned int num)
+ {
+-	int i;
++	u32 i;
+ 
+ 	for (i = 0; i < dev->num_resources; i++) {
+ 		struct resource *r = &dev->resource[i];
+@@ -255,7 +256,7 @@ struct resource *platform_get_resource_byname(struct platform_device *dev,
+ 					      unsigned int type,
+ 					      const char *name)
+ {
+-	int i;
++	u32 i;
+ 
+ 	for (i = 0; i < dev->num_resources; i++) {
+ 		struct resource *r = &dev->resource[i];
+@@ -501,7 +502,8 @@ EXPORT_SYMBOL_GPL(platform_device_add_properties);
+  */
+ int platform_device_add(struct platform_device *pdev)
+ {
+-	int i, ret;
++	u32 i;
++	int ret;
+ 
+ 	if (!pdev)
+ 		return -EINVAL;
+@@ -569,7 +571,7 @@ int platform_device_add(struct platform_device *pdev)
+ 		pdev->id = PLATFORM_DEVID_AUTO;
+ 	}
+ 
+-	while (--i >= 0) {
++	while (i--) {
+ 		struct resource *r = &pdev->resource[i];
+ 		if (r->parent)
+ 			release_resource(r);
+@@ -590,7 +592,7 @@ EXPORT_SYMBOL_GPL(platform_device_add);
+  */
+ void platform_device_del(struct platform_device *pdev)
+ {
+-	int i;
++	u32 i;
+ 
+ 	if (!IS_ERR_OR_NULL(pdev)) {
+ 		device_del(&pdev->dev);
+diff --git a/drivers/block/brd.c b/drivers/block/brd.c
+index a8730cc4db10..220c5e18aba0 100644
+--- a/drivers/block/brd.c
++++ b/drivers/block/brd.c
+@@ -473,6 +473,25 @@ static struct kobject *brd_probe(dev_t dev, int *part, void *data)
+ 	return kobj;
+ }
+ 
++static inline void brd_check_and_reset_par(void)
++{
++	if (unlikely(!max_part))
++		max_part = 1;
++
++	/*
++	 * make sure 'max_part' can be divided exactly by (1U << MINORBITS),
++	 * otherwise, it is possiable to get same dev_t when adding partitions.
++	 */
++	if ((1U << MINORBITS) % max_part != 0)
++		max_part = 1UL << fls(max_part);
++
++	if (max_part > DISK_MAX_PARTS) {
++		pr_info("brd: max_part can't be larger than %d, reset max_part = %d.\n",
++			DISK_MAX_PARTS, DISK_MAX_PARTS);
++		max_part = DISK_MAX_PARTS;
++	}
++}
++
+ static int __init brd_init(void)
+ {
+ 	struct brd_device *brd, *next;
+@@ -496,8 +515,7 @@ static int __init brd_init(void)
+ 	if (register_blkdev(RAMDISK_MAJOR, "ramdisk"))
+ 		return -EIO;
+ 
+-	if (unlikely(!max_part))
+-		max_part = 1;
++	brd_check_and_reset_par();
+ 
+ 	for (i = 0; i < rd_nr; i++) {
+ 		brd = brd_alloc(i);
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index b4607dd96185..78181908f0df 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -1265,6 +1265,16 @@ static int nbd_start_device(struct nbd_device *nbd)
+ 		args = kzalloc(sizeof(*args), GFP_KERNEL);
+ 		if (!args) {
+ 			sock_shutdown(nbd);
++			/*
++			 * If num_connections is m (2 < m),
++			 * and NO.1 ~ NO.n(1 < n < m) kzallocs are successful.
++			 * But NO.(n + 1) failed. We still have n recv threads.
++			 * So, add flush_workqueue here to prevent recv threads
++			 * dropping the last config_refs and trying to destroy
++			 * the workqueue from inside the workqueue.
++			 */
++			if (i)
++				flush_workqueue(nbd->recv_workq);
+ 			return -ENOMEM;
+ 		}
+ 		sk_set_memalloc(config->socks[i]->sock->sk);
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index 2b184563cd32..38dcb39051a7 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -2662,7 +2662,7 @@ static int rbd_img_fill_nodata(struct rbd_img_request *img_req,
+ 			       u64 off, u64 len)
+ {
+ 	struct ceph_file_extent ex = { off, len };
+-	union rbd_img_fill_iter dummy;
++	union rbd_img_fill_iter dummy = {};
+ 	struct rbd_img_fill_ctx fctx = {
+ 		.pos_type = OBJ_REQUEST_NODATA,
+ 		.pos = &dummy,
+diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
+index 4285e75e52c3..1bf4a908a0bd 100644
+--- a/drivers/block/zram/zram_drv.c
++++ b/drivers/block/zram/zram_drv.c
+@@ -626,7 +626,7 @@ static ssize_t writeback_store(struct device *dev,
+ 	struct bio bio;
+ 	struct bio_vec bio_vec;
+ 	struct page *page;
+-	ssize_t ret;
++	ssize_t ret = len;
+ 	int mode;
+ 	unsigned long blk_idx = 0;
+ 
+@@ -762,7 +762,6 @@ next:
+ 
+ 	if (blk_idx)
+ 		free_block_bdev(zram, blk_idx);
+-	ret = len;
+ 	__free_page(page);
+ release_init_lock:
+ 	up_read(&zram->init_lock);
+diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
+index a07cc19becdb..c78d10ea641f 100644
+--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
++++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
+@@ -715,9 +715,9 @@ EXPORT_SYMBOL_GPL(fsl_mc_device_remove);
+ struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev)
+ {
+ 	struct fsl_mc_device *mc_bus_dev, *endpoint;
+-	struct fsl_mc_obj_desc endpoint_desc = { 0 };
+-	struct dprc_endpoint endpoint1 = { 0 };
+-	struct dprc_endpoint endpoint2 = { 0 };
++	struct fsl_mc_obj_desc endpoint_desc = {{ 0 }};
++	struct dprc_endpoint endpoint1 = {{ 0 }};
++	struct dprc_endpoint endpoint2 = {{ 0 }};
+ 	int state, err;
+ 
+ 	mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
+diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
+index ccb44fe790a7..3d79b074f958 100644
+--- a/drivers/bus/ti-sysc.c
++++ b/drivers/bus/ti-sysc.c
+@@ -479,7 +479,7 @@ static void sysc_clkdm_deny_idle(struct sysc *ddata)
+ {
+ 	struct ti_sysc_platform_data *pdata;
+ 
+-	if (ddata->legacy_mode)
++	if (ddata->legacy_mode || (ddata->cfg.quirks & SYSC_QUIRK_CLKDM_NOAUTO))
+ 		return;
+ 
+ 	pdata = dev_get_platdata(ddata->dev);
+@@ -491,7 +491,7 @@ static void sysc_clkdm_allow_idle(struct sysc *ddata)
+ {
+ 	struct ti_sysc_platform_data *pdata;
+ 
+-	if (ddata->legacy_mode)
++	if (ddata->legacy_mode || (ddata->cfg.quirks & SYSC_QUIRK_CLKDM_NOAUTO))
+ 		return;
+ 
+ 	pdata = dev_get_platdata(ddata->dev);
+@@ -1251,6 +1251,12 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
+ 	/* Quirks that need to be set based on detected module */
+ 	SYSC_QUIRK("aess", 0, 0, 0x10, -1, 0x40000000, 0xffffffff,
+ 		   SYSC_MODULE_QUIRK_AESS),
++	SYSC_QUIRK("dcan", 0x48480000, 0x20, -1, -1, 0xa3170504, 0xffffffff,
++		   SYSC_QUIRK_CLKDM_NOAUTO),
++	SYSC_QUIRK("dwc3", 0x48880000, 0, 0x10, -1, 0x500a0200, 0xffffffff,
++		   SYSC_QUIRK_CLKDM_NOAUTO),
++	SYSC_QUIRK("dwc3", 0x488c0000, 0, 0x10, -1, 0x500a0200, 0xffffffff,
++		   SYSC_QUIRK_CLKDM_NOAUTO),
+ 	SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x00000006, 0xffffffff,
+ 		   SYSC_MODULE_QUIRK_HDQ1W),
+ 	SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x0000000a, 0xffffffff,
+diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
+index 9ac6671bb514..f69609b47fef 100644
+--- a/drivers/char/hpet.c
++++ b/drivers/char/hpet.c
+@@ -855,7 +855,7 @@ int hpet_alloc(struct hpet_data *hdp)
+ 		return 0;
+ 	}
+ 
+-	hpetp = kzalloc(struct_size(hpetp, hp_dev, hdp->hd_nirqs - 1),
++	hpetp = kzalloc(struct_size(hpetp, hp_dev, hdp->hd_nirqs),
+ 			GFP_KERNEL);
+ 
+ 	if (!hpetp)
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index cda12933a17d..ea1973d35843 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -1687,8 +1687,9 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
+ 	print_once = true;
+ #endif
+ 	if (__ratelimit(&unseeded_warning))
+-		pr_notice("random: %s called from %pS with crng_init=%d\n",
+-			  func_name, caller, crng_init);
++		printk_deferred(KERN_NOTICE "random: %s called from %pS "
++				"with crng_init=%d\n", func_name, caller,
++				crng_init);
+ }
+ 
+ /*
+diff --git a/drivers/clk/at91/sam9x60.c b/drivers/clk/at91/sam9x60.c
+index 86238d5ecb4d..77398aefeb6d 100644
+--- a/drivers/clk/at91/sam9x60.c
++++ b/drivers/clk/at91/sam9x60.c
+@@ -47,6 +47,7 @@ static const struct clk_programmable_layout sam9x60_programmable_layout = {
+ 	.pres_shift = 8,
+ 	.css_mask = 0x1f,
+ 	.have_slck_mck = 0,
++	.is_pres_direct = 1,
+ };
+ 
+ static const struct clk_pcr_layout sam9x60_pcr_layout = {
+diff --git a/drivers/clk/clk-bm1880.c b/drivers/clk/clk-bm1880.c
+index 4cd175afce9b..e6d6599d310a 100644
+--- a/drivers/clk/clk-bm1880.c
++++ b/drivers/clk/clk-bm1880.c
+@@ -474,11 +474,10 @@ static struct bm1880_composite_clock bm1880_composite_clks[] = {
+ static unsigned long bm1880_pll_rate_calc(u32 regval, unsigned long parent_rate)
+ {
+ 	u64 numerator;
+-	u32 fbdiv, fref, refdiv;
++	u32 fbdiv, refdiv;
+ 	u32 postdiv1, postdiv2, denominator;
+ 
+ 	fbdiv = (regval >> 16) & 0xfff;
+-	fref = parent_rate;
+ 	refdiv = regval & 0x1f;
+ 	postdiv1 = (regval >> 8) & 0x7;
+ 	postdiv2 = (regval >> 12) & 0x7;
+diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
+index 772258de2d1f..66f056ac4c15 100644
+--- a/drivers/clk/clk.c
++++ b/drivers/clk/clk.c
+@@ -3338,6 +3338,21 @@ static int __clk_core_init(struct clk_core *core)
+ 		goto out;
+ 	}
+ 
++	/*
++	 * optional platform-specific magic
++	 *
++	 * The .init callback is not used by any of the basic clock types, but
++	 * exists for weird hardware that must perform initialization magic.
++	 * Please consider other ways of solving initialization problems before
++	 * using this callback, as its use is discouraged.
++	 *
++	 * If it exist, this callback should called before any other callback of
++	 * the clock
++	 */
++	if (core->ops->init)
++		core->ops->init(core->hw);
++
++
+ 	core->parent = __clk_init_parent(core);
+ 
+ 	/*
+@@ -3362,17 +3377,6 @@ static int __clk_core_init(struct clk_core *core)
+ 		core->orphan = true;
+ 	}
+ 
+-	/*
+-	 * optional platform-specific magic
+-	 *
+-	 * The .init callback is not used by any of the basic clock types, but
+-	 * exists for weird hardware that must perform initialization magic.
+-	 * Please consider other ways of solving initialization problems before
+-	 * using this callback, as its use is discouraged.
+-	 */
+-	if (core->ops->init)
+-		core->ops->init(core->hw);
+-
+ 	/*
+ 	 * Set clk's accuracy.  The preferred method is to use
+ 	 * .recalc_accuracy. For simple clocks and lazy developers the default
+@@ -3732,6 +3736,28 @@ fail_out:
+ 	return ERR_PTR(ret);
+ }
+ 
++/**
++ * dev_or_parent_of_node() - Get device node of @dev or @dev's parent
++ * @dev: Device to get device node of
++ *
++ * Return: device node pointer of @dev, or the device node pointer of
++ * @dev->parent if dev doesn't have a device node, or NULL if neither
++ * @dev or @dev->parent have a device node.
++ */
++static struct device_node *dev_or_parent_of_node(struct device *dev)
++{
++	struct device_node *np;
++
++	if (!dev)
++		return NULL;
++
++	np = dev_of_node(dev);
++	if (!np)
++		np = dev_of_node(dev->parent);
++
++	return np;
++}
++
+ /**
+  * clk_register - allocate a new clock, register it and return an opaque cookie
+  * @dev: device that is registering this clock
+@@ -3747,7 +3773,7 @@ fail_out:
+  */
+ struct clk *clk_register(struct device *dev, struct clk_hw *hw)
+ {
+-	return __clk_register(dev, dev_of_node(dev), hw);
++	return __clk_register(dev, dev_or_parent_of_node(dev), hw);
+ }
+ EXPORT_SYMBOL_GPL(clk_register);
+ 
+@@ -3763,7 +3789,8 @@ EXPORT_SYMBOL_GPL(clk_register);
+  */
+ int clk_hw_register(struct device *dev, struct clk_hw *hw)
+ {
+-	return PTR_ERR_OR_ZERO(__clk_register(dev, dev_of_node(dev), hw));
++	return PTR_ERR_OR_ZERO(__clk_register(dev, dev_or_parent_of_node(dev),
++			       hw));
+ }
+ EXPORT_SYMBOL_GPL(clk_hw_register);
+ 
+diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
+index bc5bb6ac8636..30ddbc1ced2e 100644
+--- a/drivers/clk/imx/clk.h
++++ b/drivers/clk/imx/clk.h
+@@ -54,48 +54,48 @@ extern struct imx_pll14xx_clk imx_1416x_pll;
+ extern struct imx_pll14xx_clk imx_1443x_pll;
+ 
+ #define imx_clk_cpu(name, parent_name, div, mux, pll, step) \
+-	imx_clk_hw_cpu(name, parent_name, div, mux, pll, step)->clk
++	to_clk(imx_clk_hw_cpu(name, parent_name, div, mux, pll, step))
+ 
+ #define clk_register_gate2(dev, name, parent_name, flags, reg, bit_idx, \
+ 				cgr_val, clk_gate_flags, lock, share_count) \
+-	clk_hw_register_gate2(dev, name, parent_name, flags, reg, bit_idx, \
+-				cgr_val, clk_gate_flags, lock, share_count)->clk
++	to_clk(clk_hw_register_gate2(dev, name, parent_name, flags, reg, bit_idx, \
++				cgr_val, clk_gate_flags, lock, share_count))
+ 
+ #define imx_clk_pllv3(type, name, parent_name, base, div_mask) \
+-	imx_clk_hw_pllv3(type, name, parent_name, base, div_mask)->clk
++	to_clk(imx_clk_hw_pllv3(type, name, parent_name, base, div_mask))
+ 
+ #define imx_clk_pfd(name, parent_name, reg, idx) \
+-	imx_clk_hw_pfd(name, parent_name, reg, idx)->clk
++	to_clk(imx_clk_hw_pfd(name, parent_name, reg, idx))
+ 
+ #define imx_clk_gate_exclusive(name, parent, reg, shift, exclusive_mask) \
+-	imx_clk_hw_gate_exclusive(name, parent, reg, shift, exclusive_mask)->clk
++	to_clk(imx_clk_hw_gate_exclusive(name, parent, reg, shift, exclusive_mask))
+ 
+ #define imx_clk_fixed_factor(name, parent, mult, div) \
+-	imx_clk_hw_fixed_factor(name, parent, mult, div)->clk
++	to_clk(imx_clk_hw_fixed_factor(name, parent, mult, div))
+ 
+ #define imx_clk_divider2(name, parent, reg, shift, width) \
+-	imx_clk_hw_divider2(name, parent, reg, shift, width)->clk
++	to_clk(imx_clk_hw_divider2(name, parent, reg, shift, width))
+ 
+ #define imx_clk_gate_dis(name, parent, reg, shift) \
+-	imx_clk_hw_gate_dis(name, parent, reg, shift)->clk
++	to_clk(imx_clk_hw_gate_dis(name, parent, reg, shift))
+ 
+ #define imx_clk_gate2(name, parent, reg, shift) \
+-	imx_clk_hw_gate2(name, parent, reg, shift)->clk
++	to_clk(imx_clk_hw_gate2(name, parent, reg, shift))
+ 
+ #define imx_clk_gate2_flags(name, parent, reg, shift, flags) \
+-	imx_clk_hw_gate2_flags(name, parent, reg, shift, flags)->clk
++	to_clk(imx_clk_hw_gate2_flags(name, parent, reg, shift, flags))
+ 
+ #define imx_clk_gate2_shared2(name, parent, reg, shift, share_count) \
+-	imx_clk_hw_gate2_shared2(name, parent, reg, shift, share_count)->clk
++	to_clk(imx_clk_hw_gate2_shared2(name, parent, reg, shift, share_count))
+ 
+ #define imx_clk_gate3(name, parent, reg, shift) \
+-	imx_clk_hw_gate3(name, parent, reg, shift)->clk
++	to_clk(imx_clk_hw_gate3(name, parent, reg, shift))
+ 
+ #define imx_clk_gate4(name, parent, reg, shift) \
+-	imx_clk_hw_gate4(name, parent, reg, shift)->clk
++	to_clk(imx_clk_hw_gate4(name, parent, reg, shift))
+ 
+ #define imx_clk_mux(name, reg, shift, width, parents, num_parents) \
+-	imx_clk_hw_mux(name, reg, shift, width, parents, num_parents)->clk
++	to_clk(imx_clk_hw_mux(name, reg, shift, width, parents, num_parents))
+ 
+ struct clk *imx_clk_pll14xx(const char *name, const char *parent_name,
+ 		 void __iomem *base, const struct imx_pll14xx_clk *pll_clk);
+@@ -198,6 +198,13 @@ struct clk_hw *imx_clk_hw_fixup_mux(const char *name, void __iomem *reg,
+ 			      u8 shift, u8 width, const char * const *parents,
+ 			      int num_parents, void (*fixup)(u32 *val));
+ 
++static inline struct clk *to_clk(struct clk_hw *hw)
++{
++	if (IS_ERR_OR_NULL(hw))
++		return ERR_CAST(hw);
++	return hw->clk;
++}
++
+ static inline struct clk *imx_clk_fixed(const char *name, int rate)
+ {
+ 	return clk_register_fixed_rate(NULL, name, NULL, 0, rate);
+diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c
+index ddb1e5634739..3a5853ca98c6 100644
+--- a/drivers/clk/meson/clk-pll.c
++++ b/drivers/clk/meson/clk-pll.c
+@@ -77,6 +77,15 @@ static unsigned long meson_clk_pll_recalc_rate(struct clk_hw *hw,
+ 	unsigned int m, n, frac;
+ 
+ 	n = meson_parm_read(clk->map, &pll->n);
++
++	/*
++	 * On some HW, N is set to zero on init. This value is invalid as
++	 * it would result in a division by zero. The rate can't be
++	 * calculated in this case
++	 */
++	if (n == 0)
++		return 0;
++
+ 	m = meson_parm_read(clk->map, &pll->m);
+ 
+ 	frac = MESON_PARM_APPLICABLE(&pll->frac) ?
+diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
+index 67e6691e080c..8856ce476ccf 100644
+--- a/drivers/clk/meson/meson8b.c
++++ b/drivers/clk/meson/meson8b.c
+@@ -1764,8 +1764,11 @@ static struct clk_regmap meson8b_hdmi_sys = {
+ 
+ /*
+  * The MALI IP is clocked by two identical clocks (mali_0 and mali_1)
+- * muxed by a glitch-free switch on Meson8b and Meson8m2. Meson8 only
+- * has mali_0 and no glitch-free mux.
++ * muxed by a glitch-free switch on Meson8b and Meson8m2. The CCF can
++ * actually manage this glitch-free mux because it does top-to-bottom
++ * updates the each clock tree and switches to the "inactive" one when
++ * CLK_SET_RATE_GATE is set.
++ * Meson8 only has mali_0 and no glitch-free mux.
+  */
+ static const struct clk_hw *meson8b_mali_0_1_parent_hws[] = {
+ 	&meson8b_xtal.hw,
+@@ -1830,7 +1833,7 @@ static struct clk_regmap meson8b_mali_0 = {
+ 			&meson8b_mali_0_div.hw
+ 		},
+ 		.num_parents = 1,
+-		.flags = CLK_SET_RATE_PARENT,
++		.flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT,
+ 	},
+ };
+ 
+@@ -1885,7 +1888,7 @@ static struct clk_regmap meson8b_mali_1 = {
+ 			&meson8b_mali_1_div.hw
+ 		},
+ 		.num_parents = 1,
+-		.flags = CLK_SET_RATE_PARENT,
++		.flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT,
+ 	},
+ };
+ 
+diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
+index 8f4b9bec2956..cecdb07ce13b 100644
+--- a/drivers/clk/qcom/clk-rcg2.c
++++ b/drivers/clk/qcom/clk-rcg2.c
+@@ -217,6 +217,9 @@ static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
+ 
+ 	clk_flags = clk_hw_get_flags(hw);
+ 	p = clk_hw_get_parent_by_index(hw, index);
++	if (!p)
++		return -EINVAL;
++
+ 	if (clk_flags & CLK_SET_RATE_PARENT) {
+ 		rate = f->freq;
+ 		if (f->pre_div) {
+@@ -952,7 +955,7 @@ static void clk_rcg2_dfs_populate_freq(struct clk_hw *hw, unsigned int l,
+ 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ 	struct clk_hw *p;
+ 	unsigned long prate = 0;
+-	u32 val, mask, cfg, mode;
++	u32 val, mask, cfg, mode, src;
+ 	int i, num_parents;
+ 
+ 	regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(l), &cfg);
+@@ -962,12 +965,12 @@ static void clk_rcg2_dfs_populate_freq(struct clk_hw *hw, unsigned int l,
+ 	if (cfg & mask)
+ 		f->pre_div = cfg & mask;
+ 
+-	cfg &= CFG_SRC_SEL_MASK;
+-	cfg >>= CFG_SRC_SEL_SHIFT;
++	src = cfg & CFG_SRC_SEL_MASK;
++	src >>= CFG_SRC_SEL_SHIFT;
+ 
+ 	num_parents = clk_hw_get_num_parents(hw);
+ 	for (i = 0; i < num_parents; i++) {
+-		if (cfg == rcg->parent_map[i].cfg) {
++		if (src == rcg->parent_map[i].cfg) {
+ 			f->src = rcg->parent_map[i].src;
+ 			p = clk_hw_get_parent_by_index(&rcg->clkr.hw, i);
+ 			prate = clk_hw_get_rate(p);
+diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
+index 930fa4a4c52a..e5c3db11bf26 100644
+--- a/drivers/clk/qcom/clk-smd-rpm.c
++++ b/drivers/clk/qcom/clk-smd-rpm.c
+@@ -648,6 +648,7 @@ static const struct rpm_smd_clk_desc rpm_clk_qcs404 = {
+ };
+ 
+ /* msm8998 */
++DEFINE_CLK_SMD_RPM(msm8998, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0);
+ DEFINE_CLK_SMD_RPM(msm8998, pcnoc_clk, pcnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0);
+ DEFINE_CLK_SMD_RPM(msm8998, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1);
+ DEFINE_CLK_SMD_RPM(msm8998, cnoc_clk, cnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 2);
+@@ -671,6 +672,8 @@ DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8998, rf_clk2_pin, rf_clk2_a_pin, 5);
+ DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8998, rf_clk3, rf_clk3_a, 6);
+ DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8998, rf_clk3_pin, rf_clk3_a_pin, 6);
+ static struct clk_smd_rpm *msm8998_clks[] = {
++	[RPM_SMD_BIMC_CLK] = &msm8998_bimc_clk,
++	[RPM_SMD_BIMC_A_CLK] = &msm8998_bimc_a_clk,
+ 	[RPM_SMD_PCNOC_CLK] = &msm8998_pcnoc_clk,
+ 	[RPM_SMD_PCNOC_A_CLK] = &msm8998_pcnoc_a_clk,
+ 	[RPM_SMD_SNOC_CLK] = &msm8998_snoc_clk,
+diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c
+index cf31b5d03270..df1d7056436c 100644
+--- a/drivers/clk/qcom/gcc-msm8998.c
++++ b/drivers/clk/qcom/gcc-msm8998.c
+@@ -1996,6 +1996,19 @@ static struct clk_branch gcc_gp3_clk = {
+ 	},
+ };
+ 
++static struct clk_branch gcc_bimc_gfx_clk = {
++	.halt_reg = 0x46040,
++	.halt_check = BRANCH_HALT,
++	.clkr = {
++		.enable_reg = 0x46040,
++		.enable_mask = BIT(0),
++		.hw.init = &(struct clk_init_data){
++			.name = "gcc_bimc_gfx_clk",
++			.ops = &clk_branch2_ops,
++		},
++	},
++};
++
+ static struct clk_branch gcc_gpu_bimc_gfx_clk = {
+ 	.halt_reg = 0x71010,
+ 	.halt_check = BRANCH_HALT,
+@@ -2810,6 +2823,7 @@ static struct clk_regmap *gcc_msm8998_clocks[] = {
+ 	[GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ 	[GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ 	[GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
++	[GCC_BIMC_GFX_CLK] = &gcc_bimc_gfx_clk.clkr,
+ 	[GCC_GPU_BIMC_GFX_CLK] = &gcc_gpu_bimc_gfx_clk.clkr,
+ 	[GCC_GPU_BIMC_GFX_SRC_CLK] = &gcc_gpu_bimc_gfx_src_clk.clkr,
+ 	[GCC_GPU_CFG_AHB_CLK] = &gcc_gpu_cfg_ahb_clk.clkr,
+diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c
+index c97b647db9b6..488f8b3980c5 100644
+--- a/drivers/clk/renesas/rcar-gen3-cpg.c
++++ b/drivers/clk/renesas/rcar-gen3-cpg.c
+@@ -470,7 +470,8 @@ static struct clk * __init cpg_rpc_clk_register(const char *name,
+ 
+ 	clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL,
+ 				     &rpc->div.hw,  &clk_divider_ops,
+-				     &rpc->gate.hw, &clk_gate_ops, 0);
++				     &rpc->gate.hw, &clk_gate_ops,
++				     CLK_SET_RATE_PARENT);
+ 	if (IS_ERR(clk)) {
+ 		kfree(rpc);
+ 		return clk;
+@@ -506,7 +507,8 @@ static struct clk * __init cpg_rpcd2_clk_register(const char *name,
+ 
+ 	clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL,
+ 				     &rpcd2->fixed.hw, &clk_fixed_factor_ops,
+-				     &rpcd2->gate.hw, &clk_gate_ops, 0);
++				     &rpcd2->gate.hw, &clk_gate_ops,
++				     CLK_SET_RATE_PARENT);
+ 	if (IS_ERR(clk))
+ 		kfree(rpcd2);
+ 
+diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
+index 49bd7a4c015c..5f66bf879772 100644
+--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
++++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
+@@ -921,11 +921,26 @@ static const struct sunxi_ccu_desc sun50i_a64_ccu_desc = {
+ 	.num_resets	= ARRAY_SIZE(sun50i_a64_ccu_resets),
+ };
+ 
++static struct ccu_pll_nb sun50i_a64_pll_cpu_nb = {
++	.common	= &pll_cpux_clk.common,
++	/* copy from pll_cpux_clk */
++	.enable	= BIT(31),
++	.lock	= BIT(28),
++};
++
++static struct ccu_mux_nb sun50i_a64_cpu_nb = {
++	.common		= &cpux_clk.common,
++	.cm		= &cpux_clk.mux,
++	.delay_us	= 1, /* > 8 clock cycles at 24 MHz */
++	.bypass_index	= 1, /* index of 24 MHz oscillator */
++};
++
+ static int sun50i_a64_ccu_probe(struct platform_device *pdev)
+ {
+ 	struct resource *res;
+ 	void __iomem *reg;
+ 	u32 val;
++	int ret;
+ 
+ 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ 	reg = devm_ioremap_resource(&pdev->dev, res);
+@@ -939,7 +954,18 @@ static int sun50i_a64_ccu_probe(struct platform_device *pdev)
+ 
+ 	writel(0x515, reg + SUN50I_A64_PLL_MIPI_REG);
+ 
+-	return sunxi_ccu_probe(pdev->dev.of_node, reg, &sun50i_a64_ccu_desc);
++	ret = sunxi_ccu_probe(pdev->dev.of_node, reg, &sun50i_a64_ccu_desc);
++	if (ret)
++		return ret;
++
++	/* Gate then ungate PLL CPU after any rate changes */
++	ccu_pll_notifier_register(&sun50i_a64_pll_cpu_nb);
++
++	/* Reparent CPU during PLL CPU rate changes */
++	ccu_mux_notifier_register(pll_cpux_clk.common.hw.clk,
++				  &sun50i_a64_cpu_nb);
++
++	return 0;
+ }
+ 
+ static const struct of_device_id sun50i_a64_ccu_ids[] = {
+diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c
+index 5f46782cebeb..b656ba2abcf7 100644
+--- a/drivers/clk/ti/clk-7xx.c
++++ b/drivers/clk/ti/clk-7xx.c
+@@ -405,7 +405,7 @@ static const struct omap_clkctrl_bit_data dra7_gmac_bit_data[] __initconst = {
+ };
+ 
+ static const struct omap_clkctrl_reg_data dra7_gmac_clkctrl_regs[] __initconst = {
+-	{ DRA7_GMAC_GMAC_CLKCTRL, dra7_gmac_bit_data, CLKF_SW_SUP, "dpll_gmac_ck" },
++	{ DRA7_GMAC_GMAC_CLKCTRL, dra7_gmac_bit_data, CLKF_SW_SUP, "gmac_main_clk" },
+ 	{ 0 },
+ };
+ 
+diff --git a/drivers/clk/uniphier/clk-uniphier-peri.c b/drivers/clk/uniphier/clk-uniphier-peri.c
+index 9caa52944b1c..3e32db9dad81 100644
+--- a/drivers/clk/uniphier/clk-uniphier-peri.c
++++ b/drivers/clk/uniphier/clk-uniphier-peri.c
+@@ -18,8 +18,8 @@
+ #define UNIPHIER_PERI_CLK_FI2C(idx, ch)					\
+ 	UNIPHIER_CLK_GATE("i2c" #ch, (idx), "i2c", 0x24, 24 + (ch))
+ 
+-#define UNIPHIER_PERI_CLK_SCSSI(idx)					\
+-	UNIPHIER_CLK_GATE("scssi", (idx), "spi", 0x20, 17)
++#define UNIPHIER_PERI_CLK_SCSSI(idx, ch)				\
++	UNIPHIER_CLK_GATE("scssi" #ch, (idx), "spi", 0x20, 17 + (ch))
+ 
+ #define UNIPHIER_PERI_CLK_MCSSI(idx)					\
+ 	UNIPHIER_CLK_GATE("mcssi", (idx), "spi", 0x24, 14)
+@@ -35,7 +35,7 @@ const struct uniphier_clk_data uniphier_ld4_peri_clk_data[] = {
+ 	UNIPHIER_PERI_CLK_I2C(6, 2),
+ 	UNIPHIER_PERI_CLK_I2C(7, 3),
+ 	UNIPHIER_PERI_CLK_I2C(8, 4),
+-	UNIPHIER_PERI_CLK_SCSSI(11),
++	UNIPHIER_PERI_CLK_SCSSI(11, 0),
+ 	{ /* sentinel */ }
+ };
+ 
+@@ -51,7 +51,10 @@ const struct uniphier_clk_data uniphier_pro4_peri_clk_data[] = {
+ 	UNIPHIER_PERI_CLK_FI2C(8, 4),
+ 	UNIPHIER_PERI_CLK_FI2C(9, 5),
+ 	UNIPHIER_PERI_CLK_FI2C(10, 6),
+-	UNIPHIER_PERI_CLK_SCSSI(11),
+-	UNIPHIER_PERI_CLK_MCSSI(12),
++	UNIPHIER_PERI_CLK_SCSSI(11, 0),
++	UNIPHIER_PERI_CLK_SCSSI(12, 1),
++	UNIPHIER_PERI_CLK_SCSSI(13, 2),
++	UNIPHIER_PERI_CLK_SCSSI(14, 3),
++	UNIPHIER_PERI_CLK_MCSSI(15),
+ 	{ /* sentinel */ }
+ };
+diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c
+index 2b196cbfadb6..b235f446ee50 100644
+--- a/drivers/clocksource/bcm2835_timer.c
++++ b/drivers/clocksource/bcm2835_timer.c
+@@ -121,7 +121,7 @@ static int __init bcm2835_timer_init(struct device_node *node)
+ 	ret = setup_irq(irq, &timer->act);
+ 	if (ret) {
+ 		pr_err("Can't set up timer IRQ\n");
+-		goto err_iounmap;
++		goto err_timer_free;
+ 	}
+ 
+ 	clockevents_config_and_register(&timer->evt, freq, 0xf, 0xffffffff);
+@@ -130,6 +130,9 @@ static int __init bcm2835_timer_init(struct device_node *node)
+ 
+ 	return 0;
+ 
++err_timer_free:
++	kfree(timer);
++
+ err_iounmap:
+ 	iounmap(base);
+ 	return ret;
+diff --git a/drivers/clocksource/timer-davinci.c b/drivers/clocksource/timer-davinci.c
+index 62745c962049..e421946a91c5 100644
+--- a/drivers/clocksource/timer-davinci.c
++++ b/drivers/clocksource/timer-davinci.c
+@@ -302,10 +302,6 @@ int __init davinci_timer_register(struct clk *clk,
+ 		return rv;
+ 	}
+ 
+-	clockevents_config_and_register(&clockevent->dev, tick_rate,
+-					DAVINCI_TIMER_MIN_DELTA,
+-					DAVINCI_TIMER_MAX_DELTA);
+-
+ 	davinci_clocksource.dev.rating = 300;
+ 	davinci_clocksource.dev.read = davinci_clocksource_read;
+ 	davinci_clocksource.dev.mask =
+@@ -323,6 +319,10 @@ int __init davinci_timer_register(struct clk *clk,
+ 		davinci_clocksource_init_tim34(base);
+ 	}
+ 
++	clockevents_config_and_register(&clockevent->dev, tick_rate,
++					DAVINCI_TIMER_MIN_DELTA,
++					DAVINCI_TIMER_MAX_DELTA);
++
+ 	rv = clocksource_register_hz(&davinci_clocksource.dev, tick_rate);
+ 	if (rv) {
+ 		pr_err("Unable to register clocksource");
+diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
+index 91eb768d4221..0a73bebd04e5 100644
+--- a/drivers/crypto/Kconfig
++++ b/drivers/crypto/Kconfig
+@@ -716,7 +716,7 @@ source "drivers/crypto/stm32/Kconfig"
+ 
+ config CRYPTO_DEV_SAFEXCEL
+ 	tristate "Inside Secure's SafeXcel cryptographic engine driver"
+-	depends on OF || PCI || COMPILE_TEST
++	depends on (OF || PCI || COMPILE_TEST) && HAS_IOMEM
+ 	select CRYPTO_LIB_AES
+ 	select CRYPTO_AUTHENC
+ 	select CRYPTO_SKCIPHER
+diff --git a/drivers/crypto/amlogic/Kconfig b/drivers/crypto/amlogic/Kconfig
+index b90850d18965..cf9547602670 100644
+--- a/drivers/crypto/amlogic/Kconfig
++++ b/drivers/crypto/amlogic/Kconfig
+@@ -1,5 +1,6 @@
+ config CRYPTO_DEV_AMLOGIC_GXL
+ 	tristate "Support for amlogic cryptographic offloader"
++	depends on HAS_IOMEM
+ 	default y if ARCH_MESON
+ 	select CRYPTO_SKCIPHER
+ 	select CRYPTO_ENGINE
+diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
+index aca75237bbcf..dffa2aa855fd 100644
+--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
++++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
+@@ -727,6 +727,14 @@ static int chtls_close_listsrv_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
+ 	return 0;
+ }
+ 
++static void chtls_purge_wr_queue(struct sock *sk)
++{
++	struct sk_buff *skb;
++
++	while ((skb = dequeue_wr(sk)) != NULL)
++		kfree_skb(skb);
++}
++
+ static void chtls_release_resources(struct sock *sk)
+ {
+ 	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+@@ -741,6 +749,11 @@ static void chtls_release_resources(struct sock *sk)
+ 	kfree_skb(csk->txdata_skb_cache);
+ 	csk->txdata_skb_cache = NULL;
+ 
++	if (csk->wr_credits != csk->wr_max_credits) {
++		chtls_purge_wr_queue(sk);
++		chtls_reset_wr_list(csk);
++	}
++
+ 	if (csk->l2t_entry) {
+ 		cxgb4_l2t_release(csk->l2t_entry);
+ 		csk->l2t_entry = NULL;
+@@ -1735,6 +1748,7 @@ static void chtls_peer_close(struct sock *sk, struct sk_buff *skb)
+ 		else
+ 			sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
+ 	}
++	kfree_skb(skb);
+ }
+ 
+ static void chtls_close_con_rpl(struct sock *sk, struct sk_buff *skb)
+@@ -2062,19 +2076,6 @@ rel_skb:
+ 	return 0;
+ }
+ 
+-static struct sk_buff *dequeue_wr(struct sock *sk)
+-{
+-	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+-	struct sk_buff *skb = csk->wr_skb_head;
+-
+-	if (likely(skb)) {
+-	/* Don't bother clearing the tail */
+-		csk->wr_skb_head = WR_SKB_CB(skb)->next_wr;
+-		WR_SKB_CB(skb)->next_wr = NULL;
+-	}
+-	return skb;
+-}
+-
+ static void chtls_rx_ack(struct sock *sk, struct sk_buff *skb)
+ {
+ 	struct cpl_fw4_ack *hdr = cplhdr(skb) + RSS_HDR;
+diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.h b/drivers/crypto/chelsio/chtls/chtls_cm.h
+index 129d7ac649a9..3fac0c74a41f 100644
+--- a/drivers/crypto/chelsio/chtls/chtls_cm.h
++++ b/drivers/crypto/chelsio/chtls/chtls_cm.h
+@@ -185,6 +185,12 @@ static inline void chtls_kfree_skb(struct sock *sk, struct sk_buff *skb)
+ 	kfree_skb(skb);
+ }
+ 
++static inline void chtls_reset_wr_list(struct chtls_sock *csk)
++{
++	csk->wr_skb_head = NULL;
++	csk->wr_skb_tail = NULL;
++}
++
+ static inline void enqueue_wr(struct chtls_sock *csk, struct sk_buff *skb)
+ {
+ 	WR_SKB_CB(skb)->next_wr = NULL;
+@@ -197,4 +203,19 @@ static inline void enqueue_wr(struct chtls_sock *csk, struct sk_buff *skb)
+ 		WR_SKB_CB(csk->wr_skb_tail)->next_wr = skb;
+ 	csk->wr_skb_tail = skb;
+ }
++
++static inline struct sk_buff *dequeue_wr(struct sock *sk)
++{
++	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
++	struct sk_buff *skb = NULL;
++
++	skb = csk->wr_skb_head;
++
++	if (likely(skb)) {
++	 /* Don't bother clearing the tail */
++		csk->wr_skb_head = WR_SKB_CB(skb)->next_wr;
++		WR_SKB_CB(skb)->next_wr = NULL;
++	}
++	return skb;
++}
+ #endif
+diff --git a/drivers/crypto/chelsio/chtls/chtls_hw.c b/drivers/crypto/chelsio/chtls/chtls_hw.c
+index 2a34035d3cfb..a217fe72602d 100644
+--- a/drivers/crypto/chelsio/chtls/chtls_hw.c
++++ b/drivers/crypto/chelsio/chtls/chtls_hw.c
+@@ -350,6 +350,7 @@ int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 optname)
+ 	kwr->sc_imm.cmd_more = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_IMM));
+ 	kwr->sc_imm.len = cpu_to_be32(klen);
+ 
++	lock_sock(sk);
+ 	/* key info */
+ 	kctx = (struct _key_ctx *)(kwr + 1);
+ 	ret = chtls_key_info(csk, kctx, keylen, optname);
+@@ -388,8 +389,10 @@ int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 optname)
+ 		csk->tlshws.txkey = keyid;
+ 	}
+ 
++	release_sock(sk);
+ 	return ret;
+ out_notcb:
++	release_sock(sk);
+ 	free_tls_keyid(sk);
+ out_nokey:
+ 	kfree_skb(skb);
+diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+index 98f037e6ea3e..d8b015266ee4 100644
+--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
++++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+@@ -1043,6 +1043,7 @@ static unsigned int hpre_rsa_max_size(struct crypto_akcipher *tfm)
+ static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm)
+ {
+ 	struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
++	int ret;
+ 
+ 	ctx->rsa.soft_tfm = crypto_alloc_akcipher("rsa-generic", 0, 0);
+ 	if (IS_ERR(ctx->rsa.soft_tfm)) {
+@@ -1050,7 +1051,11 @@ static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm)
+ 		return PTR_ERR(ctx->rsa.soft_tfm);
+ 	}
+ 
+-	return hpre_ctx_init(ctx);
++	ret = hpre_ctx_init(ctx);
++	if (ret)
++		crypto_free_akcipher(ctx->rsa.soft_tfm);
++
++	return ret;
+ }
+ 
+ static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm)
+diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
+index 34e0424410bf..0c98c37e39f4 100644
+--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
++++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
+@@ -106,18 +106,18 @@ static const char * const hpre_debug_file_name[] = {
+ };
+ 
+ static const struct hpre_hw_error hpre_hw_errors[] = {
+-	{ .int_msk = BIT(0), .msg = "hpre_ecc_1bitt_err" },
+-	{ .int_msk = BIT(1), .msg = "hpre_ecc_2bit_err" },
+-	{ .int_msk = BIT(2), .msg = "hpre_data_wr_err" },
+-	{ .int_msk = BIT(3), .msg = "hpre_data_rd_err" },
+-	{ .int_msk = BIT(4), .msg = "hpre_bd_rd_err" },
+-	{ .int_msk = BIT(5), .msg = "hpre_ooo_2bit_ecc_err" },
+-	{ .int_msk = BIT(6), .msg = "hpre_cltr1_htbt_tm_out_err" },
+-	{ .int_msk = BIT(7), .msg = "hpre_cltr2_htbt_tm_out_err" },
+-	{ .int_msk = BIT(8), .msg = "hpre_cltr3_htbt_tm_out_err" },
+-	{ .int_msk = BIT(9), .msg = "hpre_cltr4_htbt_tm_out_err" },
+-	{ .int_msk = GENMASK(15, 10), .msg = "hpre_ooo_rdrsp_err" },
+-	{ .int_msk = GENMASK(21, 16), .msg = "hpre_ooo_wrrsp_err" },
++	{ .int_msk = BIT(0), .msg = "core_ecc_1bit_err_int_set" },
++	{ .int_msk = BIT(1), .msg = "core_ecc_2bit_err_int_set" },
++	{ .int_msk = BIT(2), .msg = "dat_wb_poison_int_set" },
++	{ .int_msk = BIT(3), .msg = "dat_rd_poison_int_set" },
++	{ .int_msk = BIT(4), .msg = "bd_rd_poison_int_set" },
++	{ .int_msk = BIT(5), .msg = "ooo_ecc_2bit_err_int_set" },
++	{ .int_msk = BIT(6), .msg = "cluster1_shb_timeout_int_set" },
++	{ .int_msk = BIT(7), .msg = "cluster2_shb_timeout_int_set" },
++	{ .int_msk = BIT(8), .msg = "cluster3_shb_timeout_int_set" },
++	{ .int_msk = BIT(9), .msg = "cluster4_shb_timeout_int_set" },
++	{ .int_msk = GENMASK(15, 10), .msg = "ooo_rdrsp_err_int_set" },
++	{ .int_msk = GENMASK(21, 16), .msg = "ooo_wrrsp_err_int_set" },
+ 	{ /* sentinel */ }
+ };
+ 
+diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
+index b846d73d9a85..841f4c56ca73 100644
+--- a/drivers/crypto/hisilicon/sec2/sec.h
++++ b/drivers/crypto/hisilicon/sec2/sec.h
+@@ -40,7 +40,7 @@ struct sec_req {
+ 	int req_id;
+ 
+ 	/* Status of the SEC request */
+-	atomic_t fake_busy;
++	bool fake_busy;
+ };
+ 
+ /**
+diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
+index 0a5391fff485..2475aaf0d59b 100644
+--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
++++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
+@@ -141,7 +141,7 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
+ 		return -ENOBUFS;
+ 
+ 	if (!ret) {
+-		if (atomic_read(&req->fake_busy))
++		if (req->fake_busy)
+ 			ret = -EBUSY;
+ 		else
+ 			ret = -EINPROGRESS;
+@@ -641,7 +641,7 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req)
+ 	if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt)
+ 		sec_update_iv(req);
+ 
+-	if (atomic_cmpxchg(&req->fake_busy, 1, 0) != 1)
++	if (req->fake_busy)
+ 		sk_req->base.complete(&sk_req->base, -EINPROGRESS);
+ 
+ 	sk_req->base.complete(&sk_req->base, req->err_type);
+@@ -672,9 +672,9 @@ static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
+ 	}
+ 
+ 	if (ctx->fake_req_limit <= atomic_inc_return(&qp_ctx->pending_reqs))
+-		atomic_set(&req->fake_busy, 1);
++		req->fake_busy = true;
+ 	else
+-		atomic_set(&req->fake_busy, 0);
++		req->fake_busy = false;
+ 
+ 	ret = ctx->req_op->get_res(ctx, req);
+ 	if (ret) {
+diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
+index ab742dfbab99..d40e2da3b05d 100644
+--- a/drivers/crypto/hisilicon/sec2/sec_main.c
++++ b/drivers/crypto/hisilicon/sec2/sec_main.c
+@@ -608,13 +608,13 @@ static const struct file_operations sec_dbg_fops = {
+ 	.write = sec_debug_write,
+ };
+ 
+-static int debugfs_atomic64_t_get(void *data, u64 *val)
++static int sec_debugfs_atomic64_get(void *data, u64 *val)
+ {
+-        *val = atomic64_read((atomic64_t *)data);
+-        return 0;
++	*val = atomic64_read((atomic64_t *)data);
++	return 0;
+ }
+-DEFINE_DEBUGFS_ATTRIBUTE(fops_atomic64_t_ro, debugfs_atomic64_t_get, NULL,
+-                        "%lld\n");
++DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get,
++			 NULL, "%lld\n");
+ 
+ static int sec_core_debug_init(struct sec_dev *sec)
+ {
+@@ -636,11 +636,11 @@ static int sec_core_debug_init(struct sec_dev *sec)
+ 
+ 	debugfs_create_regset32("regs", 0444, tmp_d, regset);
+ 
+-	debugfs_create_file("send_cnt", 0444, tmp_d, &dfx->send_cnt,
+-			    &fops_atomic64_t_ro);
++	debugfs_create_file("send_cnt", 0444, tmp_d,
++			    &dfx->send_cnt, &sec_atomic64_ops);
+ 
+-	debugfs_create_file("recv_cnt", 0444, tmp_d, &dfx->recv_cnt,
+-			    &fops_atomic64_t_ro);
++	debugfs_create_file("recv_cnt", 0444, tmp_d,
++			    &dfx->recv_cnt, &sec_atomic64_ops);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
+index 35535833b6f7..c7804635e89e 100644
+--- a/drivers/devfreq/Kconfig
++++ b/drivers/devfreq/Kconfig
+@@ -115,7 +115,8 @@ config ARM_TEGRA20_DEVFREQ
+ 
+ config ARM_RK3399_DMC_DEVFREQ
+ 	tristate "ARM RK3399 DMC DEVFREQ Driver"
+-	depends on ARCH_ROCKCHIP
++	depends on (ARCH_ROCKCHIP && HAVE_ARM_SMCCC) || \
++		(COMPILE_TEST && HAVE_ARM_SMCCC)
+ 	select DEVFREQ_EVENT_ROCKCHIP_DFI
+ 	select DEVFREQ_GOV_SIMPLE_ONDEMAND
+ 	select PM_DEVFREQ_EVENT
+diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
+index 07602083c743..e99f082d15df 100644
+--- a/drivers/devfreq/devfreq.c
++++ b/drivers/devfreq/devfreq.c
+@@ -10,6 +10,7 @@
+ #include <linux/kernel.h>
+ #include <linux/kmod.h>
+ #include <linux/sched.h>
++#include <linux/debugfs.h>
+ #include <linux/errno.h>
+ #include <linux/err.h>
+ #include <linux/init.h>
+@@ -33,6 +34,7 @@
+ #define HZ_PER_KHZ	1000
+ 
+ static struct class *devfreq_class;
++static struct dentry *devfreq_debugfs;
+ 
+ /*
+  * devfreq core provides delayed work based load monitoring helper
+@@ -209,10 +211,10 @@ static int set_freq_table(struct devfreq *devfreq)
+ int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
+ {
+ 	int lev, prev_lev, ret = 0;
+-	unsigned long cur_time;
++	u64 cur_time;
+ 
+ 	lockdep_assert_held(&devfreq->lock);
+-	cur_time = jiffies;
++	cur_time = get_jiffies_64();
+ 
+ 	/* Immediately exit if previous_freq is not initialized yet. */
+ 	if (!devfreq->previous_freq)
+@@ -535,7 +537,7 @@ void devfreq_monitor_resume(struct devfreq *devfreq)
+ 			msecs_to_jiffies(devfreq->profile->polling_ms));
+ 
+ out_update:
+-	devfreq->last_stat_updated = jiffies;
++	devfreq->last_stat_updated = get_jiffies_64();
+ 	devfreq->stop_polling = false;
+ 
+ 	if (devfreq->profile->get_cur_freq &&
+@@ -820,7 +822,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
+ 
+ 	devfreq->time_in_state = devm_kcalloc(&devfreq->dev,
+ 			devfreq->profile->max_state,
+-			sizeof(unsigned long),
++			sizeof(*devfreq->time_in_state),
+ 			GFP_KERNEL);
+ 	if (!devfreq->time_in_state) {
+ 		mutex_unlock(&devfreq->lock);
+@@ -828,7 +830,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
+ 		goto err_devfreq;
+ 	}
+ 
+-	devfreq->last_stat_updated = jiffies;
++	devfreq->last_stat_updated = get_jiffies_64();
+ 
+ 	srcu_init_notifier_head(&devfreq->transition_notifier_list);
+ 
+@@ -1589,8 +1591,8 @@ static ssize_t trans_stat_show(struct device *dev,
+ 		for (j = 0; j < max_state; j++)
+ 			len += sprintf(buf + len, "%10u",
+ 				devfreq->trans_table[(i * max_state) + j]);
+-		len += sprintf(buf + len, "%10u\n",
+-			jiffies_to_msecs(devfreq->time_in_state[i]));
++		len += sprintf(buf + len, "%10llu\n", (u64)
++			jiffies64_to_msecs(devfreq->time_in_state[i]));
+ 	}
+ 
+ 	len += sprintf(buf + len, "Total transition : %u\n",
+@@ -1614,6 +1616,81 @@ static struct attribute *devfreq_attrs[] = {
+ };
+ ATTRIBUTE_GROUPS(devfreq);
+ 
++/**
++ * devfreq_summary_show() - Show the summary of the devfreq devices
++ * @s:		seq_file instance to show the summary of devfreq devices
++ * @data:	not used
++ *
++ * Show the summary of the devfreq devices via 'devfreq_summary' debugfs file.
++ * It helps that user can know the detailed information of the devfreq devices.
++ *
++ * Return 0 always because it shows the information without any data change.
++ */
++static int devfreq_summary_show(struct seq_file *s, void *data)
++{
++	struct devfreq *devfreq;
++	struct devfreq *p_devfreq = NULL;
++	unsigned long cur_freq, min_freq, max_freq;
++	unsigned int polling_ms;
++
++	seq_printf(s, "%-30s %-10s %-10s %-15s %10s %12s %12s %12s\n",
++			"dev_name",
++			"dev",
++			"parent_dev",
++			"governor",
++			"polling_ms",
++			"cur_freq_Hz",
++			"min_freq_Hz",
++			"max_freq_Hz");
++	seq_printf(s, "%30s %10s %10s %15s %10s %12s %12s %12s\n",
++			"------------------------------",
++			"----------",
++			"----------",
++			"---------------",
++			"----------",
++			"------------",
++			"------------",
++			"------------");
++
++	mutex_lock(&devfreq_list_lock);
++
++	list_for_each_entry_reverse(devfreq, &devfreq_list, node) {
++#if IS_ENABLED(CONFIG_DEVFREQ_GOV_PASSIVE)
++		if (!strncmp(devfreq->governor_name, DEVFREQ_GOV_PASSIVE,
++							DEVFREQ_NAME_LEN)) {
++			struct devfreq_passive_data *data = devfreq->data;
++
++			if (data)
++				p_devfreq = data->parent;
++		} else {
++			p_devfreq = NULL;
++		}
++#endif
++
++		mutex_lock(&devfreq->lock);
++		cur_freq = devfreq->previous_freq,
++		get_freq_range(devfreq, &min_freq, &max_freq);
++		polling_ms = devfreq->profile->polling_ms,
++		mutex_unlock(&devfreq->lock);
++
++		seq_printf(s,
++			"%-30s %-10s %-10s %-15s %10d %12ld %12ld %12ld\n",
++			dev_name(devfreq->dev.parent),
++			dev_name(&devfreq->dev),
++			p_devfreq ? dev_name(&p_devfreq->dev) : "null",
++			devfreq->governor_name,
++			polling_ms,
++			cur_freq,
++			min_freq,
++			max_freq);
++	}
++
++	mutex_unlock(&devfreq_list_lock);
++
++	return 0;
++}
++DEFINE_SHOW_ATTRIBUTE(devfreq_summary);
++
+ static int __init devfreq_init(void)
+ {
+ 	devfreq_class = class_create(THIS_MODULE, "devfreq");
+@@ -1630,6 +1707,11 @@ static int __init devfreq_init(void)
+ 	}
+ 	devfreq_class->dev_groups = devfreq_groups;
+ 
++	devfreq_debugfs = debugfs_create_dir("devfreq", NULL);
++	debugfs_create_file("devfreq_summary", 0444,
++				devfreq_debugfs, NULL,
++				&devfreq_summary_fops);
++
+ 	return 0;
+ }
+ subsys_initcall(devfreq_init);
+diff --git a/drivers/devfreq/event/Kconfig b/drivers/devfreq/event/Kconfig
+index cef2cf5347ca..a53e0a6ffdfe 100644
+--- a/drivers/devfreq/event/Kconfig
++++ b/drivers/devfreq/event/Kconfig
+@@ -34,7 +34,7 @@ config DEVFREQ_EVENT_EXYNOS_PPMU
+ 
+ config DEVFREQ_EVENT_ROCKCHIP_DFI
+ 	tristate "ROCKCHIP DFI DEVFREQ event Driver"
+-	depends on ARCH_ROCKCHIP
++	depends on ARCH_ROCKCHIP || COMPILE_TEST
+ 	help
+ 	  This add the devfreq-event driver for Rockchip SoC. It provides DFI
+ 	  (DDR Monitor Module) driver to count ddr load.
+diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
+index 85c7a77bf3f0..055deea42c37 100644
+--- a/drivers/devfreq/event/exynos-ppmu.c
++++ b/drivers/devfreq/event/exynos-ppmu.c
+@@ -101,17 +101,22 @@ static struct __exynos_ppmu_events {
+ 	PPMU_EVENT(dmc1_1),
+ };
+ 
+-static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev)
++static int __exynos_ppmu_find_ppmu_id(const char *edev_name)
+ {
+ 	int i;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(ppmu_events); i++)
+-		if (!strcmp(edev->desc->name, ppmu_events[i].name))
++		if (!strcmp(edev_name, ppmu_events[i].name))
+ 			return ppmu_events[i].id;
+ 
+ 	return -EINVAL;
+ }
+ 
++static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev)
++{
++	return __exynos_ppmu_find_ppmu_id(edev->desc->name);
++}
++
+ /*
+  * The devfreq-event ops structure for PPMU v1.1
+  */
+@@ -556,13 +561,11 @@ static int of_get_devfreq_events(struct device_node *np,
+ 			 * use default if not.
+ 			 */
+ 			if (info->ppmu_type == EXYNOS_TYPE_PPMU_V2) {
+-				struct devfreq_event_dev edev;
+ 				int id;
+ 				/* Not all registers take the same value for
+ 				 * read+write data count.
+ 				 */
+-				edev.desc = &desc[j];
+-				id = exynos_ppmu_find_ppmu_id(&edev);
++				id = __exynos_ppmu_find_ppmu_id(desc[j].name);
+ 
+ 				switch (id) {
+ 				case PPMU_PMNCNT0:
+diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
+index 03ac4b96117c..4b604086b1b3 100644
+--- a/drivers/dma/dmaengine.c
++++ b/drivers/dma/dmaengine.c
+@@ -179,7 +179,7 @@ __dma_device_satisfies_mask(struct dma_device *device,
+ 
+ static struct module *dma_chan_to_owner(struct dma_chan *chan)
+ {
+-	return chan->device->dev->driver->owner;
++	return chan->device->owner;
+ }
+ 
+ /**
+@@ -919,6 +919,8 @@ int dma_async_device_register(struct dma_device *device)
+ 		return -EIO;
+ 	}
+ 
++	device->owner = device->dev->driver->owner;
++
+ 	if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) {
+ 		dev_err(device->dev,
+ 			"Device claims capability %s, but op is not defined\n",
+diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
+index 89792083d62c..95cc0256b387 100644
+--- a/drivers/dma/fsl-qdma.c
++++ b/drivers/dma/fsl-qdma.c
+@@ -304,7 +304,7 @@ static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
+ 
+ 	vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
+ 
+-	if (!fsl_queue->comp_pool && !fsl_queue->comp_pool)
++	if (!fsl_queue->comp_pool && !fsl_queue->desc_pool)
+ 		return;
+ 
+ 	list_for_each_entry_safe(comp_temp, _comp_temp,
+diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
+index c27e206a764c..66f1b2ac5cde 100644
+--- a/drivers/dma/imx-sdma.c
++++ b/drivers/dma/imx-sdma.c
+@@ -760,12 +760,8 @@ static void sdma_start_desc(struct sdma_channel *sdmac)
+ 		return;
+ 	}
+ 	sdmac->desc = desc = to_sdma_desc(&vd->tx);
+-	/*
+-	 * Do not delete the node in desc_issued list in cyclic mode, otherwise
+-	 * the desc allocated will never be freed in vchan_dma_desc_free_list
+-	 */
+-	if (!(sdmac->flags & IMX_DMA_SG_LOOP))
+-		list_del(&vd->node);
++
++	list_del(&vd->node);
+ 
+ 	sdma->channel_control[channel].base_bd_ptr = desc->bd_phys;
+ 	sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
+@@ -1071,7 +1067,6 @@ static void sdma_channel_terminate_work(struct work_struct *work)
+ 
+ 	spin_lock_irqsave(&sdmac->vc.lock, flags);
+ 	vchan_get_all_descriptors(&sdmac->vc, &head);
+-	sdmac->desc = NULL;
+ 	spin_unlock_irqrestore(&sdmac->vc.lock, flags);
+ 	vchan_dma_desc_free_list(&sdmac->vc, &head);
+ 	sdmac->context_loaded = false;
+@@ -1080,11 +1075,19 @@ static void sdma_channel_terminate_work(struct work_struct *work)
+ static int sdma_disable_channel_async(struct dma_chan *chan)
+ {
+ 	struct sdma_channel *sdmac = to_sdma_chan(chan);
++	unsigned long flags;
++
++	spin_lock_irqsave(&sdmac->vc.lock, flags);
+ 
+ 	sdma_disable_channel(chan);
+ 
+-	if (sdmac->desc)
++	if (sdmac->desc) {
++		vchan_terminate_vdesc(&sdmac->desc->vd);
++		sdmac->desc = NULL;
+ 		schedule_work(&sdmac->terminate_worker);
++	}
++
++	spin_unlock_irqrestore(&sdmac->vc.lock, flags);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
+index 756a3c951dc7..03a7f647f7b2 100644
+--- a/drivers/dma/ti/edma.c
++++ b/drivers/dma/ti/edma.c
+@@ -2289,13 +2289,6 @@ static int edma_probe(struct platform_device *pdev)
+ 	if (!info)
+ 		return -ENODEV;
+ 
+-	pm_runtime_enable(dev);
+-	ret = pm_runtime_get_sync(dev);
+-	if (ret < 0) {
+-		dev_err(dev, "pm_runtime_get_sync() failed\n");
+-		return ret;
+-	}
+-
+ 	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ 	if (ret)
+ 		return ret;
+@@ -2326,27 +2319,33 @@ static int edma_probe(struct platform_device *pdev)
+ 
+ 	platform_set_drvdata(pdev, ecc);
+ 
++	pm_runtime_enable(dev);
++	ret = pm_runtime_get_sync(dev);
++	if (ret < 0) {
++		dev_err(dev, "pm_runtime_get_sync() failed\n");
++		pm_runtime_disable(dev);
++		return ret;
++	}
++
+ 	/* Get eDMA3 configuration from IP */
+ 	ret = edma_setup_from_hw(dev, info, ecc);
+ 	if (ret)
+-		return ret;
++		goto err_disable_pm;
+ 
+ 	/* Allocate memory based on the information we got from the IP */
+ 	ecc->slave_chans = devm_kcalloc(dev, ecc->num_channels,
+ 					sizeof(*ecc->slave_chans), GFP_KERNEL);
+-	if (!ecc->slave_chans)
+-		return -ENOMEM;
+ 
+ 	ecc->slot_inuse = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_slots),
+ 				       sizeof(unsigned long), GFP_KERNEL);
+-	if (!ecc->slot_inuse)
+-		return -ENOMEM;
+ 
+ 	ecc->channels_mask = devm_kcalloc(dev,
+ 					   BITS_TO_LONGS(ecc->num_channels),
+ 					   sizeof(unsigned long), GFP_KERNEL);
+-	if (!ecc->channels_mask)
+-		return -ENOMEM;
++	if (!ecc->slave_chans || !ecc->slot_inuse || !ecc->channels_mask) {
++		ret = -ENOMEM;
++		goto err_disable_pm;
++	}
+ 
+ 	/* Mark all channels available initially */
+ 	bitmap_fill(ecc->channels_mask, ecc->num_channels);
+@@ -2388,7 +2387,7 @@ static int edma_probe(struct platform_device *pdev)
+ 				       ecc);
+ 		if (ret) {
+ 			dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret);
+-			return ret;
++			goto err_disable_pm;
+ 		}
+ 		ecc->ccint = irq;
+ 	}
+@@ -2404,7 +2403,7 @@ static int edma_probe(struct platform_device *pdev)
+ 				       ecc);
+ 		if (ret) {
+ 			dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret);
+-			return ret;
++			goto err_disable_pm;
+ 		}
+ 		ecc->ccerrint = irq;
+ 	}
+@@ -2412,7 +2411,8 @@ static int edma_probe(struct platform_device *pdev)
+ 	ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY);
+ 	if (ecc->dummy_slot < 0) {
+ 		dev_err(dev, "Can't allocate PaRAM dummy slot\n");
+-		return ecc->dummy_slot;
++		ret = ecc->dummy_slot;
++		goto err_disable_pm;
+ 	}
+ 
+ 	queue_priority_mapping = info->queue_priority_mapping;
+@@ -2512,6 +2512,9 @@ static int edma_probe(struct platform_device *pdev)
+ 
+ err_reg1:
+ 	edma_free_slot(ecc, ecc->dummy_slot);
++err_disable_pm:
++	pm_runtime_put_sync(dev);
++	pm_runtime_disable(dev);
+ 	return ret;
+ }
+ 
+@@ -2542,6 +2545,8 @@ static int edma_remove(struct platform_device *pdev)
+ 	if (ecc->dma_memcpy)
+ 		dma_async_device_unregister(ecc->dma_memcpy);
+ 	edma_free_slot(ecc, ecc->dummy_slot);
++	pm_runtime_put_sync(dev);
++	pm_runtime_disable(dev);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/edac/sifive_edac.c b/drivers/edac/sifive_edac.c
+index c0cc72a3b2be..3a3dcb14ed99 100644
+--- a/drivers/edac/sifive_edac.c
++++ b/drivers/edac/sifive_edac.c
+@@ -54,8 +54,8 @@ static int ecc_register(struct platform_device *pdev)
+ 	p->dci = edac_device_alloc_ctl_info(0, "sifive_ecc", 1, "sifive_ecc",
+ 					    1, 1, NULL, 0,
+ 					    edac_device_alloc_index());
+-	if (IS_ERR(p->dci))
+-		return PTR_ERR(p->dci);
++	if (!p->dci)
++		return -ENOMEM;
+ 
+ 	p->dci->dev = &pdev->dev;
+ 	p->dci->mod_name = "Sifive ECC Manager";
+diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
+index 904fa09e6a6b..d99f5b0c8a09 100644
+--- a/drivers/firmware/efi/arm-init.c
++++ b/drivers/firmware/efi/arm-init.c
+@@ -10,10 +10,12 @@
+ #define pr_fmt(fmt)	"efi: " fmt
+ 
+ #include <linux/efi.h>
++#include <linux/fwnode.h>
+ #include <linux/init.h>
+ #include <linux/memblock.h>
+ #include <linux/mm_types.h>
+ #include <linux/of.h>
++#include <linux/of_address.h>
+ #include <linux/of_fdt.h>
+ #include <linux/platform_device.h>
+ #include <linux/screen_info.h>
+@@ -276,15 +278,112 @@ void __init efi_init(void)
+ 		efi_memmap_unmap();
+ }
+ 
++static bool efifb_overlaps_pci_range(const struct of_pci_range *range)
++{
++	u64 fb_base = screen_info.lfb_base;
++
++	if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
++		fb_base |= (u64)(unsigned long)screen_info.ext_lfb_base << 32;
++
++	return fb_base >= range->cpu_addr &&
++	       fb_base < (range->cpu_addr + range->size);
++}
++
++static struct device_node *find_pci_overlap_node(void)
++{
++	struct device_node *np;
++
++	for_each_node_by_type(np, "pci") {
++		struct of_pci_range_parser parser;
++		struct of_pci_range range;
++		int err;
++
++		err = of_pci_range_parser_init(&parser, np);
++		if (err) {
++			pr_warn("of_pci_range_parser_init() failed: %d\n", err);
++			continue;
++		}
++
++		for_each_of_pci_range(&parser, &range)
++			if (efifb_overlaps_pci_range(&range))
++				return np;
++	}
++	return NULL;
++}
++
++/*
++ * If the efifb framebuffer is backed by a PCI graphics controller, we have
++ * to ensure that this relation is expressed using a device link when
++ * running in DT mode, or the probe order may be reversed, resulting in a
++ * resource reservation conflict on the memory window that the efifb
++ * framebuffer steals from the PCIe host bridge.
++ */
++static int efifb_add_links(const struct fwnode_handle *fwnode,
++			   struct device *dev)
++{
++	struct device_node *sup_np;
++	struct device *sup_dev;
++
++	sup_np = find_pci_overlap_node();
++
++	/*
++	 * If there's no PCI graphics controller backing the efifb, we are
++	 * done here.
++	 */
++	if (!sup_np)
++		return 0;
++
++	sup_dev = get_dev_from_fwnode(&sup_np->fwnode);
++	of_node_put(sup_np);
++
++	/*
++	 * Return -ENODEV if the PCI graphics controller device hasn't been
++	 * registered yet.  This ensures that efifb isn't allowed to probe
++	 * and this function is retried again when new devices are
++	 * registered.
++	 */
++	if (!sup_dev)
++		return -ENODEV;
++
++	/*
++	 * If this fails, retrying this function at a later point won't
++	 * change anything. So, don't return an error after this.
++	 */
++	if (!device_link_add(dev, sup_dev, 0))
++		dev_warn(dev, "device_link_add() failed\n");
++
++	put_device(sup_dev);
++
++	return 0;
++}
++
++static const struct fwnode_operations efifb_fwnode_ops = {
++	.add_links = efifb_add_links,
++};
++
++static struct fwnode_handle efifb_fwnode = {
++	.ops = &efifb_fwnode_ops,
++};
++
+ static int __init register_gop_device(void)
+ {
+-	void *pd;
++	struct platform_device *pd;
++	int err;
+ 
+ 	if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
+ 		return 0;
+ 
+-	pd = platform_device_register_data(NULL, "efi-framebuffer", 0,
+-					   &screen_info, sizeof(screen_info));
+-	return PTR_ERR_OR_ZERO(pd);
++	pd = platform_device_alloc("efi-framebuffer", 0);
++	if (!pd)
++		return -ENOMEM;
++
++	if (IS_ENABLED(CONFIG_PCI))
++		pd->dev.fwnode = &efifb_fwnode;
++
++	err = platform_device_add_data(pd, &screen_info, sizeof(screen_info));
++	if (err)
++		return err;
++
++	return platform_device_add(pd);
+ }
+ subsys_initcall(register_gop_device);
+diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c
+index 08234e64993a..3224933f4c8f 100644
+--- a/drivers/gpio/gpio-grgpio.c
++++ b/drivers/gpio/gpio-grgpio.c
+@@ -253,17 +253,16 @@ static int grgpio_irq_map(struct irq_domain *d, unsigned int irq,
+ 	lirq->irq = irq;
+ 	uirq = &priv->uirqs[lirq->index];
+ 	if (uirq->refcnt == 0) {
++		spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
+ 		ret = request_irq(uirq->uirq, grgpio_irq_handler, 0,
+ 				  dev_name(priv->dev), priv);
+ 		if (ret) {
+ 			dev_err(priv->dev,
+ 				"Could not request underlying irq %d\n",
+ 				uirq->uirq);
+-
+-			spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
+-
+ 			return ret;
+ 		}
++		spin_lock_irqsave(&priv->gc.bgpio_lock, flags);
+ 	}
+ 	uirq->refcnt++;
+ 
+@@ -309,8 +308,11 @@ static void grgpio_irq_unmap(struct irq_domain *d, unsigned int irq)
+ 	if (index >= 0) {
+ 		uirq = &priv->uirqs[lirq->index];
+ 		uirq->refcnt--;
+-		if (uirq->refcnt == 0)
++		if (uirq->refcnt == 0) {
++			spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
+ 			free_irq(uirq->uirq, priv);
++			return;
++		}
+ 	}
+ 
+ 	spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index bcfbfded9ba3..175c6363cf61 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -2053,6 +2053,7 @@ static int gpiochip_hierarchy_irq_domain_alloc(struct irq_domain *d,
+ 				     parent_type);
+ 	chip_info(gc, "alloc_irqs_parent for %d parent hwirq %d\n",
+ 		  irq, parent_hwirq);
++	irq_set_lockdep_class(irq, gc->irq.lock_key, gc->irq.request_key);
+ 	ret = irq_domain_alloc_irqs_parent(d, irq, 1, &parent_fwspec);
+ 	if (ret)
+ 		chip_err(gc,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+index 72232fccf61a..be6d0cfe41ae 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+@@ -338,17 +338,9 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
+ 		path_size += le16_to_cpu(path->usSize);
+ 
+ 		if (device_support & le16_to_cpu(path->usDeviceTag)) {
+-			uint8_t con_obj_id, con_obj_num, con_obj_type;
+-
+-			con_obj_id =
++			uint8_t con_obj_id =
+ 			    (le16_to_cpu(path->usConnObjectId) & OBJECT_ID_MASK)
+ 			    >> OBJECT_ID_SHIFT;
+-			con_obj_num =
+-			    (le16_to_cpu(path->usConnObjectId) & ENUM_ID_MASK)
+-			    >> ENUM_ID_SHIFT;
+-			con_obj_type =
+-			    (le16_to_cpu(path->usConnObjectId) &
+-			     OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
+ 
+ 			/* Skip TV/CV support */
+ 			if ((le16_to_cpu(path->usDeviceTag) ==
+@@ -373,14 +365,7 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
+ 			router.ddc_valid = false;
+ 			router.cd_valid = false;
+ 			for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) {
+-				uint8_t grph_obj_id, grph_obj_num, grph_obj_type;
+-
+-				grph_obj_id =
+-				    (le16_to_cpu(path->usGraphicObjIds[j]) &
+-				     OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+-				grph_obj_num =
+-				    (le16_to_cpu(path->usGraphicObjIds[j]) &
+-				     ENUM_ID_MASK) >> ENUM_ID_SHIFT;
++				uint8_t grph_obj_type=
+ 				grph_obj_type =
+ 				    (le16_to_cpu(path->usGraphicObjIds[j]) &
+ 				     OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index c17505fba988..332b9c24a2cd 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -3639,8 +3639,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
+ 	if (r)
+ 		return r;
+ 
+-	amdgpu_amdkfd_pre_reset(adev);
+-
+ 	/* Resume IP prior to SMC */
+ 	r = amdgpu_device_ip_reinit_early_sriov(adev);
+ 	if (r)
+diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+index 0d8767eb7a70..1c3a7d4bb65d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
++++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+@@ -269,7 +269,11 @@ flr_done:
+ 	}
+ 
+ 	/* Trigger recovery for world switch failure if no TDR */
+-	if (amdgpu_device_should_recover_gpu(adev))
++	if (amdgpu_device_should_recover_gpu(adev)
++		&& (adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT ||
++		adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
++		adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
++		adev->video_timeout == MAX_SCHEDULE_TIMEOUT))
+ 		amdgpu_device_gpu_recover(adev, NULL);
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
+index 0ba66bef5746..de40bf12c4a8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nv.c
++++ b/drivers/gpu/drm/amd/amdgpu/nv.c
+@@ -701,6 +701,12 @@ static int nv_common_early_init(void *handle)
+ 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
+ 			AMD_PG_SUPPORT_VCN_DPG |
+ 			AMD_PG_SUPPORT_ATHUB;
++		/* guest vm gets 0xffffffff when reading RCC_DEV0_EPF0_STRAP0,
++		 * as a consequence, the rev_id and external_rev_id are wrong.
++		 * workaround it by hardcoding rev_id to 0 (default value).
++		 */
++		if (amdgpu_sriov_vf(adev))
++			adev->rev_id = 0;
+ 		adev->external_rev_id = adev->rev_id + 0xa;
+ 		break;
+ 	default:
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index 8e1640bc07af..04ea7cd69295 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -1145,9 +1145,7 @@ static int soc15_common_early_init(void *handle)
+ 				AMD_CG_SUPPORT_SDMA_LS |
+ 				AMD_CG_SUPPORT_VCN_MGCG;
+ 
+-			adev->pg_flags = AMD_PG_SUPPORT_SDMA |
+-				AMD_PG_SUPPORT_VCN |
+-				AMD_PG_SUPPORT_VCN_DPG;
++			adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
+ 		} else if (adev->pdev->device == 0x15d8) {
+ 			adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
+ 				AMD_CG_SUPPORT_GFX_MGLS |
+@@ -1190,9 +1188,7 @@ static int soc15_common_early_init(void *handle)
+ 				AMD_CG_SUPPORT_SDMA_LS |
+ 				AMD_CG_SUPPORT_VCN_MGCG;
+ 
+-			adev->pg_flags = AMD_PG_SUPPORT_SDMA |
+-				AMD_PG_SUPPORT_VCN |
+-				AMD_PG_SUPPORT_VCN_DPG;
++			adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
+ 		}
+ 		break;
+ 	case CHIP_ARCTURUS:
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
+index 839f186e1182..19e870c79896 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h
++++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
+@@ -52,6 +52,7 @@
+ 		uint32_t old_ = 0;	\
+ 		uint32_t tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \
+ 		uint32_t loop = adev->usec_timeout;		\
++		ret = 0;					\
+ 		while ((tmp_ & (mask)) != (expected_value)) {	\
+ 			if (old_ != tmp_) {			\
+ 				loop = adev->usec_timeout;	\
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
+index 15c523027285..511712c2e382 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
+@@ -93,7 +93,7 @@ void kfd_debugfs_init(void)
+ 			    kfd_debugfs_hqds_by_device, &kfd_debugfs_fops);
+ 	debugfs_create_file("rls", S_IFREG | 0444, debugfs_root,
+ 			    kfd_debugfs_rls_by_device, &kfd_debugfs_fops);
+-	debugfs_create_file("hang_hws", S_IFREG | 0644, debugfs_root,
++	debugfs_create_file("hang_hws", S_IFREG | 0200, debugfs_root,
+ 			    NULL, &kfd_debugfs_hang_hws_fops);
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index 984c2f2b24b6..d128a8bbe19d 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -1225,16 +1225,18 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
+ 
+ 	list_add(&q->list, &qpd->queues_list);
+ 	qpd->queue_count++;
++
++	if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
++		dqm->sdma_queue_count++;
++	else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
++		dqm->xgmi_sdma_queue_count++;
++
+ 	if (q->properties.is_active) {
+ 		dqm->queue_count++;
+ 		retval = execute_queues_cpsch(dqm,
+ 				KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
+ 	}
+ 
+-	if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
+-		dqm->sdma_queue_count++;
+-	else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
+-		dqm->xgmi_sdma_queue_count++;
+ 	/*
+ 	 * Unconditionally increment this counter, regardless of the queue's
+ 	 * type or whether the queue is active.
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+index 0b401dfbe98a..34f483ac36ca 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+@@ -97,8 +97,6 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
+ 			(struct edid *) edid->raw_edid);
+ 
+ 	sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
+-	if (sad_count < 0)
+-		DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ 	if (sad_count <= 0)
+ 		return result;
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.h b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.h
+deleted file mode 100644
+index 45a07eeffbb6..000000000000
+--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.h
++++ /dev/null
+@@ -1,43 +0,0 @@
+-/*
+- * Copyright 2017 Advanced Micro Devices, Inc.
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included in
+- * all copies or substantial portions of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+- * OTHER DEALINGS IN THE SOFTWARE.
+- *
+- * Authors: AMD
+- *
+- */
+-
+-#ifndef _DCN_CALC_MATH_H_
+-#define _DCN_CALC_MATH_H_
+-
+-float dcn_bw_mod(const float arg1, const float arg2);
+-float dcn_bw_min2(const float arg1, const float arg2);
+-unsigned int dcn_bw_max(const unsigned int arg1, const unsigned int arg2);
+-float dcn_bw_max2(const float arg1, const float arg2);
+-float dcn_bw_floor2(const float arg, const float significance);
+-float dcn_bw_floor(const float arg);
+-float dcn_bw_ceil2(const float arg, const float significance);
+-float dcn_bw_ceil(const float arg);
+-float dcn_bw_max3(float v1, float v2, float v3);
+-float dcn_bw_max5(float v1, float v2, float v3, float v4, float v5);
+-float dcn_bw_pow(float a, float exp);
+-float dcn_bw_log(float a, float b);
+-double dcn_bw_fabs(double a);
+-
+-#endif /* _DCN_CALC_MATH_H_ */
+diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+index 9b2cb57bf2ba..c9a241fe46cf 100644
+--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
++++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+@@ -1438,6 +1438,7 @@ void dcn_bw_update_from_pplib(struct dc *dc)
+ 	struct dc_context *ctx = dc->ctx;
+ 	struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0};
+ 	bool res;
++	unsigned vmin0p65_idx, vmid0p72_idx, vnom0p8_idx, vmax0p9_idx;
+ 
+ 	/* TODO: This is not the proper way to obtain fabric_and_dram_bandwidth, should be min(fclk, memclk) */
+ 	res = dm_pp_get_clock_levels_by_type_with_voltage(
+@@ -1449,17 +1450,28 @@ void dcn_bw_update_from_pplib(struct dc *dc)
+ 		res = verify_clock_values(&fclks);
+ 
+ 	if (res) {
+-		ASSERT(fclks.num_levels >= 3);
+-		dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = 32 * (fclks.data[0].clocks_in_khz / 1000.0) / 1000.0;
+-		dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = dc->dcn_soc->number_of_channels *
+-				(fclks.data[fclks.num_levels - (fclks.num_levels > 2 ? 3 : 2)].clocks_in_khz / 1000.0)
+-				* ddr4_dram_factor_single_Channel / 1000.0;
+-		dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = dc->dcn_soc->number_of_channels *
+-				(fclks.data[fclks.num_levels - 2].clocks_in_khz / 1000.0)
+-				* ddr4_dram_factor_single_Channel / 1000.0;
+-		dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = dc->dcn_soc->number_of_channels *
+-				(fclks.data[fclks.num_levels - 1].clocks_in_khz / 1000.0)
+-				* ddr4_dram_factor_single_Channel / 1000.0;
++		ASSERT(fclks.num_levels);
++
++		vmin0p65_idx = 0;
++		vmid0p72_idx = fclks.num_levels -
++			(fclks.num_levels > 2 ? 3 : (fclks.num_levels > 1 ? 2 : 1));
++		vnom0p8_idx = fclks.num_levels - (fclks.num_levels > 1 ? 2 : 1);
++		vmax0p9_idx = fclks.num_levels - 1;
++
++		dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 =
++			32 * (fclks.data[vmin0p65_idx].clocks_in_khz / 1000.0) / 1000.0;
++		dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 =
++			dc->dcn_soc->number_of_channels *
++			(fclks.data[vmid0p72_idx].clocks_in_khz / 1000.0)
++			* ddr4_dram_factor_single_Channel / 1000.0;
++		dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 =
++			dc->dcn_soc->number_of_channels *
++			(fclks.data[vnom0p8_idx].clocks_in_khz / 1000.0)
++			* ddr4_dram_factor_single_Channel / 1000.0;
++		dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 =
++			dc->dcn_soc->number_of_channels *
++			(fclks.data[vmax0p9_idx].clocks_in_khz / 1000.0)
++			* ddr4_dram_factor_single_Channel / 1000.0;
+ 	} else
+ 		BREAK_TO_DEBUGGER();
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+index 25d7b7c6681c..7dca2e6eb3bc 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+@@ -100,13 +100,13 @@ uint32_t dentist_get_did_from_divider(int divider)
+ }
+ 
+ void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
+-		struct dc_state *context)
++		struct dc_state *context, bool safe_to_lower)
+ {
+ 	int i;
+ 
+ 	clk_mgr->dccg->ref_dppclk = clk_mgr->base.clks.dppclk_khz;
+ 	for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
+-		int dpp_inst, dppclk_khz;
++		int dpp_inst, dppclk_khz, prev_dppclk_khz;
+ 
+ 		/* Loop index will match dpp->inst if resource exists,
+ 		 * and we want to avoid dependency on dpp object
+@@ -114,8 +114,12 @@ void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
+ 		dpp_inst = i;
+ 		dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
+ 
+-		clk_mgr->dccg->funcs->update_dpp_dto(
+-				clk_mgr->dccg, dpp_inst, dppclk_khz);
++		prev_dppclk_khz = clk_mgr->base.ctx->dc->current_state->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
++
++		if (safe_to_lower || prev_dppclk_khz < dppclk_khz) {
++			clk_mgr->dccg->funcs->update_dpp_dto(
++							clk_mgr->dccg, dpp_inst, dppclk_khz);
++		}
+ 	}
+ }
+ 
+@@ -240,7 +244,7 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
+ 	if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) {
+ 		if (dpp_clock_lowered) {
+ 			// if clock is being lowered, increase DTO before lowering refclk
+-			dcn20_update_clocks_update_dpp_dto(clk_mgr, context);
++			dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
+ 			dcn20_update_clocks_update_dentist(clk_mgr);
+ 		} else {
+ 			// if clock is being raised, increase refclk before lowering DTO
+@@ -248,7 +252,7 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
+ 				dcn20_update_clocks_update_dentist(clk_mgr);
+ 			// always update dtos unless clock is lowered and not safe to lower
+ 			if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
+-				dcn20_update_clocks_update_dpp_dto(clk_mgr, context);
++				dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
+ 		}
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h
+index c9fd824f3c23..74ccd6c04134 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h
+@@ -34,7 +34,7 @@ void dcn2_update_clocks_fpga(struct clk_mgr *clk_mgr,
+ 			struct dc_state *context,
+ 			bool safe_to_lower);
+ void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
+-		struct dc_state *context);
++		struct dc_state *context, bool safe_to_lower);
+ 
+ void dcn2_init_clocks(struct clk_mgr *clk_mgr);
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+index 35c55e54eac0..dbf063856846 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+@@ -164,16 +164,16 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
+ 	}
+ 
+ 	if (dpp_clock_lowered) {
+-		// if clock is being lowered, increase DTO before lowering refclk
+-		dcn20_update_clocks_update_dpp_dto(clk_mgr, context);
++		// increase per DPP DTO before lowering global dppclk
++		dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
+ 		rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
+ 	} else {
+-		// if clock is being raised, increase refclk before lowering DTO
++		// increase global DPPCLK before lowering per DPP DTO
+ 		if (update_dppclk || update_dispclk)
+ 			rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
+ 		// always update dtos unless clock is lowered and not safe to lower
+ 		if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
+-			dcn20_update_clocks_update_dpp_dto(clk_mgr, context);
++			dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
+ 	}
+ 
+ 	if (update_dispclk &&
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 32f31bf91915..8904a85186aa 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -2396,12 +2396,7 @@ void dc_set_power_state(
+ 	enum dc_acpi_cm_power_state power_state)
+ {
+ 	struct kref refcount;
+-	struct display_mode_lib *dml = kzalloc(sizeof(struct display_mode_lib),
+-						GFP_KERNEL);
+-
+-	ASSERT(dml);
+-	if (!dml)
+-		return;
++	struct display_mode_lib *dml;
+ 
+ 	switch (power_state) {
+ 	case DC_ACPI_CM_POWER_STATE_D0:
+@@ -2423,6 +2418,12 @@ void dc_set_power_state(
+ 		 * clean state, and dc hw programming optimizations will not
+ 		 * cause any trouble.
+ 		 */
++		dml = kzalloc(sizeof(struct display_mode_lib),
++				GFP_KERNEL);
++
++		ASSERT(dml);
++		if (!dml)
++			return;
+ 
+ 		/* Preserve refcount */
+ 		refcount = dc->current_state->refcount;
+@@ -2436,10 +2437,10 @@ void dc_set_power_state(
+ 		dc->current_state->refcount = refcount;
+ 		dc->current_state->bw_ctx.dml = *dml;
+ 
++		kfree(dml);
++
+ 		break;
+ 	}
+-
+-	kfree(dml);
+ }
+ 
+ void dc_resume(struct dc *dc)
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index 4619f94f0ac7..70846ae7d854 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -968,8 +968,7 @@ static bool dc_link_detect_helper(struct dc_link *link,
+ 			same_edid = is_same_edid(&prev_sink->dc_edid, &sink->dc_edid);
+ 
+ 		if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
+-			sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX &&
+-			reason != DETECT_REASON_HPDRX) {
++			sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
+ 			/*
+ 			 * TODO debug why Dell 2413 doesn't like
+ 			 *  two link trainings
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 0416a17b0897..320f4eeebf84 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -417,6 +417,8 @@ struct dc_debug_options {
+ 	bool cm_in_bypass;
+ #endif
+ 	int force_clock_mode;/*every mode change.*/
++
++	bool nv12_iflip_vm_wa;
+ };
+ 
+ struct dc_debug_data {
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
+index 3b613fb93ef8..0162d3ffe268 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
+@@ -233,12 +233,13 @@ void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c
+ 		struct dc_crtc_timing *timing)
+ {
+ 	struct optc *optc1 = DCN10TG_FROM_TG(optc);
+-	/* 2 pieces of memory required for up to 5120 displays, 4 for up to 8192 */
+ 	int mpcc_hactive = (timing->h_addressable + timing->h_border_left + timing->h_border_right)
+ 			/ opp_cnt;
+-	int memory_mask = mpcc_hactive <= 2560 ? 0x3 : 0xf;
++	uint32_t memory_mask;
+ 	uint32_t data_fmt = 0;
+ 
++	ASSERT(opp_cnt == 2);
++
+ 	/* TODO: In pseudocode but does not affect maximus, delete comment if we dont need on asic
+ 	 * REG_SET(OTG_GLOBAL_CONTROL2, 0, GLOBAL_UPDATE_LOCK_EN, 1);
+ 	 * Program OTG register MASTER_UPDATE_LOCK_DB_X/Y to the position before DP frame start
+@@ -246,9 +247,17 @@ void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c
+ 	 *		MASTER_UPDATE_LOCK_DB_X, 160,
+ 	 *		MASTER_UPDATE_LOCK_DB_Y, 240);
+ 	 */
++
++	/* 2 pieces of memory required for up to 5120 displays, 4 for up to 8192,
++	 * however, for ODM combine we can simplify by always using 4.
++	 * To make sure there's no overlap, each instance "reserves" 2 memories and
++	 * they are uniquely combined here.
++	 */
++	memory_mask = 0x3 << (opp_id[0] * 2) | 0x3 << (opp_id[1] * 2);
++
+ 	if (REG(OPTC_MEMORY_CONFIG))
+ 		REG_SET(OPTC_MEMORY_CONFIG, 0,
+-			OPTC_MEM_SEL, memory_mask << (optc->inst * 4));
++			OPTC_MEM_SEL, memory_mask);
+ 
+ 	if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ 		data_fmt = 1;
+@@ -257,7 +266,6 @@ void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c
+ 
+ 	REG_UPDATE(OPTC_DATA_FORMAT_CONTROL, OPTC_DATA_FORMAT, data_fmt);
+ 
+-	ASSERT(opp_cnt == 2);
+ 	REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0,
+ 			OPTC_NUM_OF_INPUT_SEGMENT, 1,
+ 			OPTC_SEG0_SRC_SEL, opp_id[0],
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
+index 2f5a5867e674..1ddd6ae22155 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
+@@ -164,6 +164,69 @@ static void hubp21_setup(
+ 
+ }
+ 
++void hubp21_set_viewport(
++	struct hubp *hubp,
++	const struct rect *viewport,
++	const struct rect *viewport_c)
++{
++	struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
++	int patched_viewport_height = 0;
++	struct dc_debug_options *debug = &hubp->ctx->dc->debug;
++
++	REG_SET_2(DCSURF_PRI_VIEWPORT_DIMENSION, 0,
++		  PRI_VIEWPORT_WIDTH, viewport->width,
++		  PRI_VIEWPORT_HEIGHT, viewport->height);
++
++	REG_SET_2(DCSURF_PRI_VIEWPORT_START, 0,
++		  PRI_VIEWPORT_X_START, viewport->x,
++		  PRI_VIEWPORT_Y_START, viewport->y);
++
++	/*for stereo*/
++	REG_SET_2(DCSURF_SEC_VIEWPORT_DIMENSION, 0,
++		  SEC_VIEWPORT_WIDTH, viewport->width,
++		  SEC_VIEWPORT_HEIGHT, viewport->height);
++
++	REG_SET_2(DCSURF_SEC_VIEWPORT_START, 0,
++		  SEC_VIEWPORT_X_START, viewport->x,
++		  SEC_VIEWPORT_Y_START, viewport->y);
++
++	/*
++	 *	Work around for underflow issue with NV12 + rIOMMU translation
++	 *	+ immediate flip. This will cause hubp underflow, but will not
++	 *	be user visible since underflow is in blank region
++	 */
++	patched_viewport_height = viewport_c->height;
++	if (viewport_c->height != 0 && debug->nv12_iflip_vm_wa) {
++		int pte_row_height = 0;
++		int pte_rows = 0;
++
++		REG_GET(DCHUBP_REQ_SIZE_CONFIG,
++			PTE_ROW_HEIGHT_LINEAR, &pte_row_height);
++
++		pte_row_height = 1 << (pte_row_height + 3);
++		pte_rows = (viewport_c->height + pte_row_height - 1) / pte_row_height;
++		patched_viewport_height = pte_rows * pte_row_height + 3;
++	}
++
++
++	/* DC supports NV12 only at the moment */
++	REG_SET_2(DCSURF_PRI_VIEWPORT_DIMENSION_C, 0,
++		  PRI_VIEWPORT_WIDTH_C, viewport_c->width,
++		  PRI_VIEWPORT_HEIGHT_C, patched_viewport_height);
++
++	REG_SET_2(DCSURF_PRI_VIEWPORT_START_C, 0,
++		  PRI_VIEWPORT_X_START_C, viewport_c->x,
++		  PRI_VIEWPORT_Y_START_C, viewport_c->y);
++
++	REG_SET_2(DCSURF_SEC_VIEWPORT_DIMENSION_C, 0,
++		  SEC_VIEWPORT_WIDTH_C, viewport_c->width,
++		  SEC_VIEWPORT_HEIGHT_C, patched_viewport_height);
++
++	REG_SET_2(DCSURF_SEC_VIEWPORT_START_C, 0,
++		  SEC_VIEWPORT_X_START_C, viewport_c->x,
++		  SEC_VIEWPORT_Y_START_C, viewport_c->y);
++}
++
+ void hubp21_set_vm_system_aperture_settings(struct hubp *hubp,
+ 		struct vm_system_aperture_param *apt)
+ {
+@@ -211,7 +274,7 @@ static struct hubp_funcs dcn21_hubp_funcs = {
+ 	.hubp_set_vm_system_aperture_settings = hubp21_set_vm_system_aperture_settings,
+ 	.set_blank = hubp1_set_blank,
+ 	.dcc_control = hubp1_dcc_control,
+-	.mem_program_viewport = min_set_viewport,
++	.mem_program_viewport = hubp21_set_viewport,
+ 	.set_cursor_attributes	= hubp2_cursor_set_attributes,
+ 	.set_cursor_position	= hubp1_cursor_set_position,
+ 	.hubp_clk_cntl = hubp1_clk_cntl,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+index b29b2c99a564..83cda43a1b6b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+@@ -847,6 +847,7 @@ static const struct dc_debug_options debug_defaults_drv = {
+ 		.scl_reset_length10 = true,
+ 		.sanity_checks = true,
+ 		.disable_48mhz_pwrdwn = false,
++		.nv12_iflip_vm_wa = true
+ };
+ 
+ static const struct dc_debug_options debug_defaults_diags = {
+@@ -1351,12 +1352,6 @@ struct display_stream_compressor *dcn21_dsc_create(
+ 
+ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
+ {
+-	/*
+-	TODO: Fix this function to calcualte correct values.
+-	There are known issues with this function currently
+-	that will need to be investigated. Use hardcoded known good values for now.
+-
+-
+ 	struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool);
+ 	struct clk_limit_table *clk_table = &bw_params->clk_table;
+ 	int i;
+@@ -1371,11 +1366,10 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
+ 		dcn2_1_soc.clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+ 		dcn2_1_soc.clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
+ 		dcn2_1_soc.clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
+-		dcn2_1_soc.clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 16 / 1000;
++		dcn2_1_soc.clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
+ 	}
+-	dcn2_1_soc.clock_limits[i] = dcn2_1_soc.clock_limits[i - i];
++	dcn2_1_soc.clock_limits[i] = dcn2_1_soc.clock_limits[i - 1];
+ 	dcn2_1_soc.num_states = i;
+-	*/
+ }
+ 
+ /* Temporary Place holder until we can get them from fuse */
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c
+index b953b02a1512..723af0b2dda0 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c
+@@ -24,7 +24,7 @@
+  */
+ 
+ #include "dml_common_defs.h"
+-#include "../calcs/dcn_calc_math.h"
++#include "dcn_calc_math.h"
+ 
+ #include "dml_inline_defs.h"
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
+index eca140da13d8..ded71ea82413 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
++++ b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
+@@ -27,7 +27,7 @@
+ #define __DML_INLINE_DEFS_H__
+ 
+ #include "dml_common_defs.h"
+-#include "../calcs/dcn_calc_math.h"
++#include "dcn_calc_math.h"
+ #include "dml_logger.h"
+ 
+ static inline double dml_min(double a, double b)
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calc_math.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calc_math.h
+new file mode 100644
+index 000000000000..45a07eeffbb6
+--- /dev/null
++++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calc_math.h
+@@ -0,0 +1,43 @@
++/*
++ * Copyright 2017 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++#ifndef _DCN_CALC_MATH_H_
++#define _DCN_CALC_MATH_H_
++
++float dcn_bw_mod(const float arg1, const float arg2);
++float dcn_bw_min2(const float arg1, const float arg2);
++unsigned int dcn_bw_max(const unsigned int arg1, const unsigned int arg2);
++float dcn_bw_max2(const float arg1, const float arg2);
++float dcn_bw_floor2(const float arg, const float significance);
++float dcn_bw_floor(const float arg);
++float dcn_bw_ceil2(const float arg, const float significance);
++float dcn_bw_ceil(const float arg);
++float dcn_bw_max3(float v1, float v2, float v3);
++float dcn_bw_max5(float v1, float v2, float v3, float v4, float v5);
++float dcn_bw_pow(float a, float exp);
++float dcn_bw_log(float a, float b);
++double dcn_bw_fabs(double a);
++
++#endif /* _DCN_CALC_MATH_H_ */
+diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+index 5437b50e9f90..d9ea4ae690af 100644
+--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
++++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+@@ -807,6 +807,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
+ 			2 * in_out_vrr->min_refresh_in_uhz)
+ 		in_out_vrr->btr.btr_enabled = false;
+ 
++	in_out_vrr->fixed.fixed_active = false;
+ 	in_out_vrr->btr.btr_active = false;
+ 	in_out_vrr->btr.inserted_duration_in_us = 0;
+ 	in_out_vrr->btr.frames_to_insert = 0;
+@@ -826,6 +827,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
+ 		in_out_vrr->adjust.v_total_max = stream->timing.v_total;
+ 	} else if (in_out_vrr->state == VRR_STATE_ACTIVE_VARIABLE &&
+ 			refresh_range >= MIN_REFRESH_RANGE_IN_US) {
++
+ 		in_out_vrr->adjust.v_total_min =
+ 			calc_v_total_from_refresh(stream,
+ 				in_out_vrr->max_refresh_in_uhz);
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+index 1115761982a7..fed3fc4bb57a 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+@@ -1026,12 +1026,15 @@ static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
+ 
+ 	clocks->num_levels = 0;
+ 	for (i = 0; i < pclk_vol_table->count; i++) {
+-		clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk * 10;
+-		clocks->data[i].latency_in_us = latency_required ?
+-						smu10_get_mem_latency(hwmgr,
+-						pclk_vol_table->entries[i].clk) :
+-						0;
+-		clocks->num_levels++;
++		if (pclk_vol_table->entries[i].clk) {
++			clocks->data[clocks->num_levels].clocks_in_khz =
++				pclk_vol_table->entries[i].clk * 10;
++			clocks->data[clocks->num_levels].latency_in_us = latency_required ?
++				smu10_get_mem_latency(hwmgr,
++						      pclk_vol_table->entries[i].clk) :
++				0;
++			clocks->num_levels++;
++		}
+ 	}
+ 
+ 	return 0;
+@@ -1077,9 +1080,11 @@ static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
+ 
+ 	clocks->num_levels = 0;
+ 	for (i = 0; i < pclk_vol_table->count; i++) {
+-		clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk  * 10;
+-		clocks->data[i].voltage_in_mv = pclk_vol_table->entries[i].vol;
+-		clocks->num_levels++;
++		if (pclk_vol_table->entries[i].clk) {
++			clocks->data[clocks->num_levels].clocks_in_khz = pclk_vol_table->entries[i].clk  * 10;
++			clocks->data[clocks->num_levels].voltage_in_mv = pclk_vol_table->entries[i].vol;
++			clocks->num_levels++;
++		}
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
+index 895b73f23079..6d4a29e99ae2 100644
+--- a/drivers/gpu/drm/drm_client_modeset.c
++++ b/drivers/gpu/drm/drm_client_modeset.c
+@@ -114,6 +114,33 @@ drm_client_find_modeset(struct drm_client_dev *client, struct drm_crtc *crtc)
+ 	return NULL;
+ }
+ 
++static struct drm_display_mode *
++drm_connector_get_tiled_mode(struct drm_connector *connector)
++{
++	struct drm_display_mode *mode;
++
++	list_for_each_entry(mode, &connector->modes, head) {
++		if (mode->hdisplay == connector->tile_h_size &&
++		    mode->vdisplay == connector->tile_v_size)
++			return mode;
++	}
++	return NULL;
++}
++
++static struct drm_display_mode *
++drm_connector_fallback_non_tiled_mode(struct drm_connector *connector)
++{
++	struct drm_display_mode *mode;
++
++	list_for_each_entry(mode, &connector->modes, head) {
++		if (mode->hdisplay == connector->tile_h_size &&
++		    mode->vdisplay == connector->tile_v_size)
++			continue;
++		return mode;
++	}
++	return NULL;
++}
++
+ static struct drm_display_mode *
+ drm_connector_has_preferred_mode(struct drm_connector *connector, int width, int height)
+ {
+@@ -348,8 +375,15 @@ static bool drm_client_target_preferred(struct drm_connector **connectors,
+ 	struct drm_connector *connector;
+ 	u64 conn_configured = 0;
+ 	int tile_pass = 0;
++	int num_tiled_conns = 0;
+ 	int i;
+ 
++	for (i = 0; i < connector_count; i++) {
++		if (connectors[i]->has_tile &&
++		    connectors[i]->status == connector_status_connected)
++			num_tiled_conns++;
++	}
++
+ retry:
+ 	for (i = 0; i < connector_count; i++) {
+ 		connector = connectors[i];
+@@ -399,6 +433,28 @@ retry:
+ 			list_for_each_entry(modes[i], &connector->modes, head)
+ 				break;
+ 		}
++		/*
++		 * In case of tiled mode if all tiles not present fallback to
++		 * first available non tiled mode.
++		 * After all tiles are present, try to find the tiled mode
++		 * for all and if tiled mode not present due to fbcon size
++		 * limitations, use first non tiled mode only for
++		 * tile 0,0 and set to no mode for all other tiles.
++		 */
++		if (connector->has_tile) {
++			if (num_tiled_conns <
++			    connector->num_h_tile * connector->num_v_tile ||
++			    (connector->tile_h_loc == 0 &&
++			     connector->tile_v_loc == 0 &&
++			     !drm_connector_get_tiled_mode(connector))) {
++				DRM_DEBUG_KMS("Falling back to non tiled mode on Connector %d\n",
++					      connector->base.id);
++				modes[i] = drm_connector_fallback_non_tiled_mode(connector);
++			} else {
++				modes[i] = drm_connector_get_tiled_mode(connector);
++			}
++		}
++
+ 		DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
+ 			  "none");
+ 		conn_configured |= BIT_ULL(i);
+@@ -515,6 +571,7 @@ static bool drm_client_firmware_config(struct drm_client_dev *client,
+ 	bool fallback = true, ret = true;
+ 	int num_connectors_enabled = 0;
+ 	int num_connectors_detected = 0;
++	int num_tiled_conns = 0;
+ 	struct drm_modeset_acquire_ctx ctx;
+ 
+ 	if (!drm_drv_uses_atomic_modeset(dev))
+@@ -532,6 +589,11 @@ static bool drm_client_firmware_config(struct drm_client_dev *client,
+ 	memcpy(save_enabled, enabled, count);
+ 	mask = GENMASK(count - 1, 0);
+ 	conn_configured = 0;
++	for (i = 0; i < count; i++) {
++		if (connectors[i]->has_tile &&
++		    connectors[i]->status == connector_status_connected)
++			num_tiled_conns++;
++	}
+ retry:
+ 	conn_seq = conn_configured;
+ 	for (i = 0; i < count; i++) {
+@@ -631,6 +693,16 @@ retry:
+ 				      connector->name);
+ 			modes[i] = &connector->state->crtc->mode;
+ 		}
++		/*
++		 * In case of tiled modes, if all tiles are not present
++		 * then fallback to a non tiled mode.
++		 */
++		if (connector->has_tile &&
++		    num_tiled_conns < connector->num_h_tile * connector->num_v_tile) {
++			DRM_DEBUG_KMS("Falling back to non tiled mode on Connector %d\n",
++				      connector->base.id);
++			modes[i] = drm_connector_fallback_non_tiled_mode(connector);
++		}
+ 		crtcs[i] = new_crtc;
+ 
+ 		DRM_DEBUG_KMS("connector %s on [CRTC:%d:%s]: %dx%d%s\n",
+diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
+index ca3c55c6b815..2ece2957da1a 100644
+--- a/drivers/gpu/drm/drm_debugfs_crc.c
++++ b/drivers/gpu/drm/drm_debugfs_crc.c
+@@ -140,8 +140,8 @@ static ssize_t crc_control_write(struct file *file, const char __user *ubuf,
+ 	if (IS_ERR(source))
+ 		return PTR_ERR(source);
+ 
+-	if (source[len] == '\n')
+-		source[len] = '\0';
++	if (source[len - 1] == '\n')
++		source[len - 1] = '\0';
+ 
+ 	ret = crtc->funcs->verify_crc_source(crtc, source, &values_cnt);
+ 	if (ret)
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
+index 6cd90cb4b6b1..4a65ef8d8bff 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -517,8 +517,10 @@ drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw,
+ 			}
+ 
+ 			if (failed) {
+-				for (i = 0; i < r->num_transactions; i++)
++				for (i = 0; i < r->num_transactions; i++) {
++					tx = &r->transactions[i];
+ 					kfree(tx->bytes);
++				}
+ 				return -ENOMEM;
+ 			}
+ 
+diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
+index e34058c721be..16bff1be4b8a 100644
+--- a/drivers/gpu/drm/drm_mipi_dbi.c
++++ b/drivers/gpu/drm/drm_mipi_dbi.c
+@@ -367,9 +367,9 @@ static void mipi_dbi_blank(struct mipi_dbi_dev *dbidev)
+ 	memset(dbidev->tx_buf, 0, len);
+ 
+ 	mipi_dbi_command(dbi, MIPI_DCS_SET_COLUMN_ADDRESS, 0, 0,
+-			 (width >> 8) & 0xFF, (width - 1) & 0xFF);
++			 ((width - 1) >> 8) & 0xFF, (width - 1) & 0xFF);
+ 	mipi_dbi_command(dbi, MIPI_DCS_SET_PAGE_ADDRESS, 0, 0,
+-			 (height >> 8) & 0xFF, (height - 1) & 0xFF);
++			 ((height - 1) >> 8) & 0xFF, (height - 1) & 0xFF);
+ 	mipi_dbi_command_buf(dbi, MIPI_DCS_WRITE_MEMORY_START,
+ 			     (u8 *)dbidev->tx_buf, len);
+ 
+diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
+index 218f3bb15276..90237abee088 100644
+--- a/drivers/gpu/drm/gma500/framebuffer.c
++++ b/drivers/gpu/drm/gma500/framebuffer.c
+@@ -462,6 +462,7 @@ static int psbfb_probe(struct drm_fb_helper *helper,
+ 		container_of(helper, struct psb_fbdev, psb_fb_helper);
+ 	struct drm_device *dev = psb_fbdev->psb_fb_helper.dev;
+ 	struct drm_psb_private *dev_priv = dev->dev_private;
++	unsigned int fb_size;
+ 	int bytespp;
+ 
+ 	bytespp = sizes->surface_bpp / 8;
+@@ -471,8 +472,11 @@ static int psbfb_probe(struct drm_fb_helper *helper,
+ 	/* If the mode will not fit in 32bit then switch to 16bit to get
+ 	   a console on full resolution. The X mode setting server will
+ 	   allocate its own 32bit GEM framebuffer */
+-	if (ALIGN(sizes->fb_width * bytespp, 64) * sizes->fb_height >
+-	                dev_priv->vram_stolen_size) {
++	fb_size = ALIGN(sizes->surface_width * bytespp, 64) *
++		  sizes->surface_height;
++	fb_size = ALIGN(fb_size, PAGE_SIZE);
++
++	if (fb_size > dev_priv->vram_stolen_size) {
+                 sizes->surface_bpp = 16;
+                 sizes->surface_depth = 16;
+         }
+diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.c b/drivers/gpu/drm/ingenic/ingenic-drm.c
+index ec32e1c67335..43a015f33e97 100644
+--- a/drivers/gpu/drm/ingenic/ingenic-drm.c
++++ b/drivers/gpu/drm/ingenic/ingenic-drm.c
+@@ -372,14 +372,18 @@ static void ingenic_drm_plane_atomic_update(struct drm_plane *plane,
+ 	struct ingenic_drm *priv = drm_plane_get_priv(plane);
+ 	struct drm_plane_state *state = plane->state;
+ 	unsigned int width, height, cpp;
++	dma_addr_t addr;
+ 
+-	width = state->crtc->state->adjusted_mode.hdisplay;
+-	height = state->crtc->state->adjusted_mode.vdisplay;
+-	cpp = state->fb->format->cpp[plane->index];
++	if (state && state->fb) {
++		addr = drm_fb_cma_get_gem_addr(state->fb, state, 0);
++		width = state->crtc->state->adjusted_mode.hdisplay;
++		height = state->crtc->state->adjusted_mode.vdisplay;
++		cpp = state->fb->format->cpp[plane->index];
+ 
+-	priv->dma_hwdesc->addr = drm_fb_cma_get_gem_addr(state->fb, state, 0);
+-	priv->dma_hwdesc->cmd = width * height * cpp / 4;
+-	priv->dma_hwdesc->cmd |= JZ_LCD_CMD_EOF_IRQ;
++		priv->dma_hwdesc->addr = addr;
++		priv->dma_hwdesc->cmd = width * height * cpp / 4;
++		priv->dma_hwdesc->cmd |= JZ_LCD_CMD_EOF_IRQ;
++	}
+ }
+ 
+ static void ingenic_drm_encoder_atomic_mode_set(struct drm_encoder *encoder,
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+index 3305a94fc930..c3f5111fd563 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+@@ -328,6 +328,7 @@ err_pm_runtime_put:
+ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
+ {
+ 	struct drm_device *drm = mtk_crtc->base.dev;
++	struct drm_crtc *crtc = &mtk_crtc->base;
+ 	int i;
+ 
+ 	DRM_DEBUG_DRIVER("%s\n", __func__);
+@@ -353,6 +354,13 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
+ 	mtk_disp_mutex_unprepare(mtk_crtc->mutex);
+ 
+ 	pm_runtime_put(drm->dev);
++
++	if (crtc->state->event && !crtc->state->active) {
++		spin_lock_irq(&crtc->dev->event_lock);
++		drm_crtc_send_vblank_event(crtc, crtc->state->event);
++		crtc->state->event = NULL;
++		spin_unlock_irq(&crtc->dev->event_lock);
++	}
+ }
+ 
+ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
+@@ -633,6 +641,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
+ 	int pipe = priv->num_pipes;
+ 	int ret;
+ 	int i;
++	uint gamma_lut_size = 0;
+ 
+ 	if (!path)
+ 		return 0;
+@@ -683,6 +692,9 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
+ 		}
+ 
+ 		mtk_crtc->ddp_comp[i] = comp;
++
++		if (comp->funcs && comp->funcs->gamma_set)
++			gamma_lut_size = MTK_LUT_SIZE;
+ 	}
+ 
+ 	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
+@@ -703,8 +715,10 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
+ 				NULL, pipe);
+ 	if (ret < 0)
+ 		return ret;
+-	drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE);
+-	drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, false, MTK_LUT_SIZE);
++
++	if (gamma_lut_size)
++		drm_mode_crtc_set_gamma_size(&mtk_crtc->base, gamma_lut_size);
++	drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, false, gamma_lut_size);
+ 	priv->num_pipes++;
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+index b02e2042547f..7d9e63e20ded 100644
+--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+@@ -753,11 +753,18 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
+ 		gpu->funcs->flush(gpu, gpu->rb[0]);
+ 		if (!a5xx_idle(gpu, gpu->rb[0]))
+ 			return -EINVAL;
+-	} else {
+-		/* Print a warning so if we die, we know why */
++	} else if (ret == -ENODEV) {
++		/*
++		 * This device does not use zap shader (but print a warning
++		 * just in case someone got their dt wrong.. hopefully they
++		 * have a debug UART to realize the error of their ways...
++		 * if you mess this up you are about to crash horribly)
++		 */
+ 		dev_warn_once(gpu->dev->dev,
+ 			"Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
+ 		gpu_write(gpu, REG_A5XX_RBBM_SECVID_TRUST_CNTL, 0x0);
++	} else {
++		return ret;
+ 	}
+ 
+ 	/* Last step - yield the ringbuffer */
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+index dc8ec2c94301..686c34d706b0 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+@@ -537,12 +537,19 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
+ 		a6xx_flush(gpu, gpu->rb[0]);
+ 		if (!a6xx_idle(gpu, gpu->rb[0]))
+ 			return -EINVAL;
+-	} else {
+-		/* Print a warning so if we die, we know why */
++	} else if (ret == -ENODEV) {
++		/*
++		 * This device does not use zap shader (but print a warning
++		 * just in case someone got their dt wrong.. hopefully they
++		 * have a debug UART to realize the error of their ways...
++		 * if you mess this up you are about to crash horribly)
++		 */
+ 		dev_warn_once(gpu->dev->dev,
+ 			"Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
+ 		gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0);
+ 		ret = 0;
++	} else {
++		return ret;
+ 	}
+ 
+ out:
+diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
+index fa1439941596..0ad5d87b5a8e 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
++++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
+@@ -635,10 +635,10 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
+ 	unsigned long c, i;
+ 	int ret = -ENOMEM;
+ 
+-	args.src = kcalloc(max, sizeof(args.src), GFP_KERNEL);
++	args.src = kcalloc(max, sizeof(*args.src), GFP_KERNEL);
+ 	if (!args.src)
+ 		goto out;
+-	args.dst = kcalloc(max, sizeof(args.dst), GFP_KERNEL);
++	args.dst = kcalloc(max, sizeof(*args.dst), GFP_KERNEL);
+ 	if (!args.dst)
+ 		goto out_free_src;
+ 
+diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
+index 9118df035b28..70bb6bb97af8 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
++++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
+@@ -156,7 +156,7 @@ nouveau_fence_wait_uevent_handler(struct nvif_notify *notify)
+ 
+ 		fence = list_entry(fctx->pending.next, typeof(*fence), head);
+ 		chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
+-		if (nouveau_fence_update(fence->channel, fctx))
++		if (nouveau_fence_update(chan, fctx))
+ 			ret = NVIF_NOTIFY_DROP;
+ 	}
+ 	spin_unlock_irqrestore(&fctx->lock, flags);
+diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
+index 77a0c6ad3cef..7ca0a2498532 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
++++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
+@@ -63,14 +63,12 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
+ {
+ 	struct nouveau_bo *nvbo = nouveau_bo(bo);
+ 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+-	struct nouveau_mem *mem;
+ 	int ret;
+ 
+ 	if (drm->client.device.info.ram_size == 0)
+ 		return -ENOMEM;
+ 
+ 	ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
+-	mem = nouveau_mem(reg);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -103,11 +101,9 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
+ {
+ 	struct nouveau_bo *nvbo = nouveau_bo(bo);
+ 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+-	struct nouveau_mem *mem;
+ 	int ret;
+ 
+ 	ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
+-	mem = nouveau_mem(reg);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/gpu/drm/nouveau/nvkm/core/memory.c b/drivers/gpu/drm/nouveau/nvkm/core/memory.c
+index e85a08ecd9da..4cc186262d34 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/core/memory.c
++++ b/drivers/gpu/drm/nouveau/nvkm/core/memory.c
+@@ -91,8 +91,8 @@ nvkm_memory_tags_get(struct nvkm_memory *memory, struct nvkm_device *device,
+ 	}
+ 
+ 	refcount_set(&tags->refcount, 1);
++	*ptags = memory->tags = tags;
+ 	mutex_unlock(&fb->subdev.mutex);
+-	*ptags = tags;
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
+index bcf32d92ee5a..50e3539f33d2 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
+@@ -74,6 +74,8 @@ nv50_disp_chan_mthd(struct nv50_disp_chan *chan, int debug)
+ 
+ 	if (debug > subdev->debug)
+ 		return;
++	if (!mthd)
++		return;
+ 
+ 	for (i = 0; (list = mthd->data[i].mthd) != NULL; i++) {
+ 		u32 base = chan->head * mthd->addr;
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
+index 500cb08dd608..b57ab5cea9a1 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
+@@ -143,23 +143,24 @@ gk20a_gr_av_to_method(struct gf100_gr *gr, const char *fw_name,
+ 
+ 	nent = (fuc.size / sizeof(struct gk20a_fw_av));
+ 
+-	pack = vzalloc((sizeof(*pack) * max_classes) +
+-		       (sizeof(*init) * (nent + 1)));
++	pack = vzalloc((sizeof(*pack) * (max_classes + 1)) +
++		       (sizeof(*init) * (nent + max_classes + 1)));
+ 	if (!pack) {
+ 		ret = -ENOMEM;
+ 		goto end;
+ 	}
+ 
+-	init = (void *)(pack + max_classes);
++	init = (void *)(pack + max_classes + 1);
+ 
+-	for (i = 0; i < nent; i++) {
+-		struct gf100_gr_init *ent = &init[i];
++	for (i = 0; i < nent; i++, init++) {
+ 		struct gk20a_fw_av *av = &((struct gk20a_fw_av *)fuc.data)[i];
+ 		u32 class = av->addr & 0xffff;
+ 		u32 addr = (av->addr & 0xffff0000) >> 14;
+ 
+ 		if (prevclass != class) {
+-			pack[classidx].init = ent;
++			if (prevclass) /* Add terminator to the method list. */
++				init++;
++			pack[classidx].init = init;
+ 			pack[classidx].type = class;
+ 			prevclass = class;
+ 			if (++classidx >= max_classes) {
+@@ -169,10 +170,10 @@ gk20a_gr_av_to_method(struct gf100_gr *gr, const char *fw_name,
+ 			}
+ 		}
+ 
+-		ent->addr = addr;
+-		ent->data = av->data;
+-		ent->count = 1;
+-		ent->pitch = 1;
++		init->addr = addr;
++		init->data = av->data;
++		init->count = 1;
++		init->pitch = 1;
+ 	}
+ 
+ 	*ppack = pack;
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
+index ca251560d3e0..bb4a4266897c 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
+@@ -146,6 +146,7 @@ nvkm_fault_dtor(struct nvkm_subdev *subdev)
+ 	struct nvkm_fault *fault = nvkm_fault(subdev);
+ 	int i;
+ 
++	nvkm_notify_fini(&fault->nrpfb);
+ 	nvkm_event_fini(&fault->event);
+ 
+ 	for (i = 0; i < fault->buffer_nr; i++) {
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
+index df8b919dcf09..ace6fefba428 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
+@@ -108,6 +108,7 @@ gm20b_secboot_new(struct nvkm_device *device, int index,
+ 	struct gm200_secboot *gsb;
+ 	struct nvkm_acr *acr;
+ 
++	*psb = NULL;
+ 	acr = acr_r352_new(BIT(NVKM_SECBOOT_FALCON_FECS) |
+ 			   BIT(NVKM_SECBOOT_FALCON_PMU));
+ 	if (IS_ERR(acr))
+@@ -116,10 +117,8 @@ gm20b_secboot_new(struct nvkm_device *device, int index,
+ 	acr->optional_falcons = BIT(NVKM_SECBOOT_FALCON_PMU);
+ 
+ 	gsb = kzalloc(sizeof(*gsb), GFP_KERNEL);
+-	if (!gsb) {
+-		psb = NULL;
++	if (!gsb)
+ 		return -ENOMEM;
+-	}
+ 	*psb = &gsb->base;
+ 
+ 	ret = nvkm_secboot_ctor(&gm20b_secboot, acr, device, index, &gsb->base);
+diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
+index 5d487686d25c..72f69709f349 100644
+--- a/drivers/gpu/drm/panel/panel-simple.c
++++ b/drivers/gpu/drm/panel/panel-simple.c
+@@ -2061,6 +2061,40 @@ static const struct drm_display_mode mitsubishi_aa070mc01_mode = {
+ 	.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ };
+ 
++static const struct drm_display_mode logicpd_type_28_mode = {
++	.clock = 9000,
++	.hdisplay = 480,
++	.hsync_start = 480 + 3,
++	.hsync_end = 480 + 3 + 42,
++	.htotal = 480 + 3 + 42 + 2,
++
++	.vdisplay = 272,
++	.vsync_start = 272 + 2,
++	.vsync_end = 272 + 2 + 11,
++	.vtotal = 272 + 2 + 11 + 3,
++	.vrefresh = 60,
++	.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
++};
++
++static const struct panel_desc logicpd_type_28 = {
++	.modes = &logicpd_type_28_mode,
++	.num_modes = 1,
++	.bpc = 8,
++	.size = {
++		.width = 105,
++		.height = 67,
++	},
++	.delay = {
++		.prepare = 200,
++		.enable = 200,
++		.unprepare = 200,
++		.disable = 200,
++	},
++	.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
++	.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE |
++		     DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE,
++};
++
+ static const struct panel_desc mitsubishi_aa070mc01 = {
+ 	.modes = &mitsubishi_aa070mc01_mode,
+ 	.num_modes = 1,
+@@ -3287,6 +3321,9 @@ static const struct of_device_id platform_of_match[] = {
+ 	}, {
+ 		.compatible = "lg,lp129qe",
+ 		.data = &lg_lp129qe,
++	}, {
++		.compatible = "logicpd,type28",
++		.data = &logicpd_type_28,
+ 	}, {
+ 		.compatible = "mitsubishi,aa070mc01-ca1",
+ 		.data = &mitsubishi_aa070mc01,
+diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
+index 611cbe7aee69..bfc1631093e9 100644
+--- a/drivers/gpu/drm/qxl/qxl_kms.c
++++ b/drivers/gpu/drm/qxl/qxl_kms.c
+@@ -184,7 +184,7 @@ int qxl_device_init(struct qxl_device *qdev,
+ 
+ 	if (!qxl_check_device(qdev)) {
+ 		r = -ENODEV;
+-		goto surface_mapping_free;
++		goto rom_unmap;
+ 	}
+ 
+ 	r = qxl_bo_init(qdev);
+diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
+index e81b01f8db90..0826efd9b5f5 100644
+--- a/drivers/gpu/drm/radeon/radeon_display.c
++++ b/drivers/gpu/drm/radeon/radeon_display.c
+@@ -127,6 +127,8 @@ static void dce5_crtc_load_lut(struct drm_crtc *crtc)
+ 
+ 	DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id);
+ 
++	msleep(10);
++
+ 	WREG32(NI_INPUT_CSC_CONTROL + radeon_crtc->crtc_offset,
+ 	       (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) |
+ 		NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS)));
+diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+index 0d59f390de19..662d8075f411 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
++++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+@@ -542,6 +542,7 @@ static int rcar_du_properties_init(struct rcar_du_device *rcdu)
+ static int rcar_du_vsps_init(struct rcar_du_device *rcdu)
+ {
+ 	const struct device_node *np = rcdu->dev->of_node;
++	const char *vsps_prop_name = "renesas,vsps";
+ 	struct of_phandle_args args;
+ 	struct {
+ 		struct device_node *np;
+@@ -557,15 +558,21 @@ static int rcar_du_vsps_init(struct rcar_du_device *rcdu)
+ 	 * entry contains a pointer to the VSP DT node and a bitmask of the
+ 	 * connected DU CRTCs.
+ 	 */
+-	cells = of_property_count_u32_elems(np, "vsps") / rcdu->num_crtcs - 1;
++	ret = of_property_count_u32_elems(np, vsps_prop_name);
++	if (ret < 0) {
++		/* Backward compatibility with old DTBs. */
++		vsps_prop_name = "vsps";
++		ret = of_property_count_u32_elems(np, vsps_prop_name);
++	}
++	cells = ret / rcdu->num_crtcs - 1;
+ 	if (cells > 1)
+ 		return -EINVAL;
+ 
+ 	for (i = 0; i < rcdu->num_crtcs; ++i) {
+ 		unsigned int j;
+ 
+-		ret = of_parse_phandle_with_fixed_args(np, "vsps", cells, i,
+-						       &args);
++		ret = of_parse_phandle_with_fixed_args(np, vsps_prop_name,
++						       cells, i, &args);
+ 		if (ret < 0)
+ 			goto error;
+ 
+@@ -587,8 +594,8 @@ static int rcar_du_vsps_init(struct rcar_du_device *rcdu)
+ 
+ 		/*
+ 		 * Store the VSP pointer and pipe index in the CRTC. If the
+-		 * second cell of the 'vsps' specifier isn't present, default
+-		 * to 0 to remain compatible with older DT bindings.
++		 * second cell of the 'renesas,vsps' specifier isn't present,
++		 * default to 0 to remain compatible with older DT bindings.
+ 		 */
+ 		rcdu->crtcs[i].vsp = &rcdu->vsps[j];
+ 		rcdu->crtcs[i].vsp_pipe = cells >= 1 ? args.args[0] : 0;
+diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
+index 0b56ba005e25..eedae2a7b532 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
++++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
+@@ -38,6 +38,7 @@
+ #include <drm/drm_gem_shmem_helper.h>
+ #include <drm/drm_ioctl.h>
+ #include <drm/drm_probe_helper.h>
++#include <drm/virtgpu_drm.h>
+ 
+ #define DRIVER_NAME "virtio_gpu"
+ #define DRIVER_DESC "virtio GPU"
+@@ -312,13 +313,13 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
+ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
+ 					  uint32_t ctx_id,
+ 					  uint64_t offset, uint32_t level,
+-					  struct virtio_gpu_box *box,
++					  struct drm_virtgpu_3d_box *box,
+ 					  struct virtio_gpu_object_array *objs,
+ 					  struct virtio_gpu_fence *fence);
+ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
+ 					uint32_t ctx_id,
+ 					uint64_t offset, uint32_t level,
+-					struct virtio_gpu_box *box,
++					struct drm_virtgpu_3d_box *box,
+ 					struct virtio_gpu_object_array *objs,
+ 					struct virtio_gpu_fence *fence);
+ void
+diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+index 9af1ec62434f..205ec4abae2b 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
++++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+@@ -33,17 +33,6 @@
+ 
+ #include "virtgpu_drv.h"
+ 
+-static void convert_to_hw_box(struct virtio_gpu_box *dst,
+-			      const struct drm_virtgpu_3d_box *src)
+-{
+-	dst->x = cpu_to_le32(src->x);
+-	dst->y = cpu_to_le32(src->y);
+-	dst->z = cpu_to_le32(src->z);
+-	dst->w = cpu_to_le32(src->w);
+-	dst->h = cpu_to_le32(src->h);
+-	dst->d = cpu_to_le32(src->d);
+-}
+-
+ static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
+ 				struct drm_file *file_priv)
+ {
+@@ -304,7 +293,6 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
+ 	struct virtio_gpu_fence *fence;
+ 	int ret;
+ 	u32 offset = args->offset;
+-	struct virtio_gpu_box box;
+ 
+ 	if (vgdev->has_virgl_3d == false)
+ 		return -ENOSYS;
+@@ -317,8 +305,6 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
+ 	if (ret != 0)
+ 		goto err_put_free;
+ 
+-	convert_to_hw_box(&box, &args->box);
+-
+ 	fence = virtio_gpu_fence_alloc(vgdev);
+ 	if (!fence) {
+ 		ret = -ENOMEM;
+@@ -326,7 +312,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
+ 	}
+ 	virtio_gpu_cmd_transfer_from_host_3d
+ 		(vgdev, vfpriv->ctx_id, offset, args->level,
+-		 &box, objs, fence);
++		 &args->box, objs, fence);
+ 	dma_fence_put(&fence->f);
+ 	return 0;
+ 
+@@ -345,7 +331,6 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
+ 	struct drm_virtgpu_3d_transfer_to_host *args = data;
+ 	struct virtio_gpu_object_array *objs;
+ 	struct virtio_gpu_fence *fence;
+-	struct virtio_gpu_box box;
+ 	int ret;
+ 	u32 offset = args->offset;
+ 
+@@ -353,11 +338,10 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
+ 	if (objs == NULL)
+ 		return -ENOENT;
+ 
+-	convert_to_hw_box(&box, &args->box);
+ 	if (!vgdev->has_virgl_3d) {
+ 		virtio_gpu_cmd_transfer_to_host_2d
+ 			(vgdev, offset,
+-			 box.w, box.h, box.x, box.y,
++			 args->box.w, args->box.h, args->box.x, args->box.y,
+ 			 objs, NULL);
+ 	} else {
+ 		ret = virtio_gpu_array_lock_resv(objs);
+@@ -372,7 +356,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
+ 		virtio_gpu_cmd_transfer_to_host_3d
+ 			(vgdev,
+ 			 vfpriv ? vfpriv->ctx_id : 0, offset,
+-			 args->level, &box, objs, fence);
++			 args->level, &args->box, objs, fence);
+ 		dma_fence_put(&fence->f);
+ 	}
+ 	return 0;
+diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
+index 74ad3bc3ebe8..9274c4063c70 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
++++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
+@@ -40,6 +40,17 @@
+ 			       + MAX_INLINE_CMD_SIZE		 \
+ 			       + MAX_INLINE_RESP_SIZE)
+ 
++static void convert_to_hw_box(struct virtio_gpu_box *dst,
++			      const struct drm_virtgpu_3d_box *src)
++{
++	dst->x = cpu_to_le32(src->x);
++	dst->y = cpu_to_le32(src->y);
++	dst->z = cpu_to_le32(src->z);
++	dst->w = cpu_to_le32(src->w);
++	dst->h = cpu_to_le32(src->h);
++	dst->d = cpu_to_le32(src->d);
++}
++
+ void virtio_gpu_ctrl_ack(struct virtqueue *vq)
+ {
+ 	struct drm_device *dev = vq->vdev->priv;
+@@ -965,7 +976,7 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
+ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
+ 					uint32_t ctx_id,
+ 					uint64_t offset, uint32_t level,
+-					struct virtio_gpu_box *box,
++					struct drm_virtgpu_3d_box *box,
+ 					struct virtio_gpu_object_array *objs,
+ 					struct virtio_gpu_fence *fence)
+ {
+@@ -987,7 +998,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
+ 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
+ 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
+ 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+-	cmd_p->box = *box;
++	convert_to_hw_box(&cmd_p->box, box);
+ 	cmd_p->offset = cpu_to_le64(offset);
+ 	cmd_p->level = cpu_to_le32(level);
+ 
+@@ -997,7 +1008,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
+ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
+ 					  uint32_t ctx_id,
+ 					  uint64_t offset, uint32_t level,
+-					  struct virtio_gpu_box *box,
++					  struct drm_virtgpu_3d_box *box,
+ 					  struct virtio_gpu_object_array *objs,
+ 					  struct virtio_gpu_fence *fence)
+ {
+@@ -1013,7 +1024,7 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
+ 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
+ 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
+ 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+-	cmd_p->box = *box;
++	convert_to_hw_box(&cmd_p->box, box);
+ 	cmd_p->offset = cpu_to_le64(offset);
+ 	cmd_p->level = cpu_to_le32(level);
+ 
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+index 4ac55fc2bf97..44d858ce4ce7 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+@@ -209,8 +209,10 @@ int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
+ 
+ 	cres->hash.key = user_key | (res_type << 24);
+ 	ret = drm_ht_insert_item(&man->resources, &cres->hash);
+-	if (unlikely(ret != 0))
++	if (unlikely(ret != 0)) {
++		kfree(cres);
+ 		goto out_invalid_key;
++	}
+ 
+ 	cres->state = VMW_CMDBUF_RES_ADD;
+ 	cres->res = vmw_resource_reference(res);
+diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
+index a1898e11b04e..943bf944bf72 100644
+--- a/drivers/ide/cmd64x.c
++++ b/drivers/ide/cmd64x.c
+@@ -66,6 +66,9 @@ static void cmd64x_program_timings(ide_drive_t *drive, u8 mode)
+ 	struct ide_timing t;
+ 	u8 arttim = 0;
+ 
++	if (drive->dn >= ARRAY_SIZE(drwtim_regs))
++		return;
++
+ 	ide_timing_compute(drive, mode, &t, T, 0);
+ 
+ 	/*
+diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
+index ac6fc3fffa0d..458e72e034b0 100644
+--- a/drivers/ide/serverworks.c
++++ b/drivers/ide/serverworks.c
+@@ -115,6 +115,9 @@ static void svwks_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
+ 	struct pci_dev *dev = to_pci_dev(hwif->dev);
+ 	const u8 pio = drive->pio_mode - XFER_PIO_0;
+ 
++	if (drive->dn >= ARRAY_SIZE(drive_pci))
++		return;
++
+ 	pci_write_config_byte(dev, drive_pci[drive->dn], pio_modes[pio]);
+ 
+ 	if (svwks_csb_check(dev)) {
+@@ -141,6 +144,9 @@ static void svwks_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
+ 
+ 	u8 ultra_enable	 = 0, ultra_timing = 0, dma_timing = 0;
+ 
++	if (drive->dn >= ARRAY_SIZE(drive_pci2))
++		return;
++
+ 	pci_read_config_byte(dev, (0x56|hwif->channel), &ultra_timing);
+ 	pci_read_config_byte(dev, 0x54, &ultra_enable);
+ 
+diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+index b921dd9e108f..e45123d8d281 100644
+--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
++++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+@@ -1506,8 +1506,11 @@ static int st_lsm6dsx_read_oneshot(struct st_lsm6dsx_sensor *sensor,
+ 	if (err < 0)
+ 		return err;
+ 
+-	if (!hw->enable_event)
+-		st_lsm6dsx_sensor_set_enable(sensor, false);
++	if (!hw->enable_event) {
++		err = st_lsm6dsx_sensor_set_enable(sensor, false);
++		if (err < 0)
++			return err;
++	}
+ 
+ 	*val = (s16)le16_to_cpu(data);
+ 
+diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
+index d535995711c3..e55f345799e4 100644
+--- a/drivers/infiniband/core/cache.c
++++ b/drivers/infiniband/core/cache.c
+@@ -51,9 +51,8 @@ struct ib_pkey_cache {
+ 
+ struct ib_update_work {
+ 	struct work_struct work;
+-	struct ib_device  *device;
+-	u8                 port_num;
+-	bool		   enforce_security;
++	struct ib_event event;
++	bool enforce_security;
+ };
+ 
+ union ib_gid zgid;
+@@ -130,7 +129,7 @@ static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port)
+ 	event.element.port_num	= port;
+ 	event.event		= IB_EVENT_GID_CHANGE;
+ 
+-	ib_dispatch_event(&event);
++	ib_dispatch_event_clients(&event);
+ }
+ 
+ static const char * const gid_type_str[] = {
+@@ -1381,9 +1380,8 @@ err:
+ 	return ret;
+ }
+ 
+-static void ib_cache_update(struct ib_device *device,
+-			    u8                port,
+-			    bool	      enforce_security)
++static int
++ib_cache_update(struct ib_device *device, u8 port, bool enforce_security)
+ {
+ 	struct ib_port_attr       *tprops = NULL;
+ 	struct ib_pkey_cache      *pkey_cache = NULL, *old_pkey_cache;
+@@ -1391,11 +1389,11 @@ static void ib_cache_update(struct ib_device *device,
+ 	int                        ret;
+ 
+ 	if (!rdma_is_port_valid(device, port))
+-		return;
++		return -EINVAL;
+ 
+ 	tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
+ 	if (!tprops)
+-		return;
++		return -ENOMEM;
+ 
+ 	ret = ib_query_port(device, port, tprops);
+ 	if (ret) {
+@@ -1413,8 +1411,10 @@ static void ib_cache_update(struct ib_device *device,
+ 	pkey_cache = kmalloc(struct_size(pkey_cache, table,
+ 					 tprops->pkey_tbl_len),
+ 			     GFP_KERNEL);
+-	if (!pkey_cache)
++	if (!pkey_cache) {
++		ret = -ENOMEM;
+ 		goto err;
++	}
+ 
+ 	pkey_cache->table_len = tprops->pkey_tbl_len;
+ 
+@@ -1446,50 +1446,84 @@ static void ib_cache_update(struct ib_device *device,
+ 
+ 	kfree(old_pkey_cache);
+ 	kfree(tprops);
+-	return;
++	return 0;
+ 
+ err:
+ 	kfree(pkey_cache);
+ 	kfree(tprops);
++	return ret;
++}
++
++static void ib_cache_event_task(struct work_struct *_work)
++{
++	struct ib_update_work *work =
++		container_of(_work, struct ib_update_work, work);
++	int ret;
++
++	/* Before distributing the cache update event, first sync
++	 * the cache.
++	 */
++	ret = ib_cache_update(work->event.device, work->event.element.port_num,
++			      work->enforce_security);
++
++	/* GID event is notified already for individual GID entries by
++	 * dispatch_gid_change_event(). Hence, notifiy for rest of the
++	 * events.
++	 */
++	if (!ret && work->event.event != IB_EVENT_GID_CHANGE)
++		ib_dispatch_event_clients(&work->event);
++
++	kfree(work);
+ }
+ 
+-static void ib_cache_task(struct work_struct *_work)
++static void ib_generic_event_task(struct work_struct *_work)
+ {
+ 	struct ib_update_work *work =
+ 		container_of(_work, struct ib_update_work, work);
+ 
+-	ib_cache_update(work->device,
+-			work->port_num,
+-			work->enforce_security);
++	ib_dispatch_event_clients(&work->event);
+ 	kfree(work);
+ }
+ 
+-static void ib_cache_event(struct ib_event_handler *handler,
+-			   struct ib_event *event)
++static bool is_cache_update_event(const struct ib_event *event)
++{
++	return (event->event == IB_EVENT_PORT_ERR    ||
++		event->event == IB_EVENT_PORT_ACTIVE ||
++		event->event == IB_EVENT_LID_CHANGE  ||
++		event->event == IB_EVENT_PKEY_CHANGE ||
++		event->event == IB_EVENT_CLIENT_REREGISTER ||
++		event->event == IB_EVENT_GID_CHANGE);
++}
++
++/**
++ * ib_dispatch_event - Dispatch an asynchronous event
++ * @event:Event to dispatch
++ *
++ * Low-level drivers must call ib_dispatch_event() to dispatch the
++ * event to all registered event handlers when an asynchronous event
++ * occurs.
++ */
++void ib_dispatch_event(const struct ib_event *event)
+ {
+ 	struct ib_update_work *work;
+ 
+-	if (event->event == IB_EVENT_PORT_ERR    ||
+-	    event->event == IB_EVENT_PORT_ACTIVE ||
+-	    event->event == IB_EVENT_LID_CHANGE  ||
+-	    event->event == IB_EVENT_PKEY_CHANGE ||
+-	    event->event == IB_EVENT_CLIENT_REREGISTER ||
+-	    event->event == IB_EVENT_GID_CHANGE) {
+-		work = kmalloc(sizeof *work, GFP_ATOMIC);
+-		if (work) {
+-			INIT_WORK(&work->work, ib_cache_task);
+-			work->device   = event->device;
+-			work->port_num = event->element.port_num;
+-			if (event->event == IB_EVENT_PKEY_CHANGE ||
+-			    event->event == IB_EVENT_GID_CHANGE)
+-				work->enforce_security = true;
+-			else
+-				work->enforce_security = false;
+-
+-			queue_work(ib_wq, &work->work);
+-		}
+-	}
++	work = kzalloc(sizeof(*work), GFP_ATOMIC);
++	if (!work)
++		return;
++
++	if (is_cache_update_event(event))
++		INIT_WORK(&work->work, ib_cache_event_task);
++	else
++		INIT_WORK(&work->work, ib_generic_event_task);
++
++	work->event = *event;
++	if (event->event == IB_EVENT_PKEY_CHANGE ||
++	    event->event == IB_EVENT_GID_CHANGE)
++		work->enforce_security = true;
++
++	queue_work(ib_wq, &work->work);
+ }
++EXPORT_SYMBOL(ib_dispatch_event);
+ 
+ int ib_cache_setup_one(struct ib_device *device)
+ {
+@@ -1505,9 +1539,6 @@ int ib_cache_setup_one(struct ib_device *device)
+ 	rdma_for_each_port (device, p)
+ 		ib_cache_update(device, p, true);
+ 
+-	INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
+-			      device, ib_cache_event);
+-	ib_register_event_handler(&device->cache.event_handler);
+ 	return 0;
+ }
+ 
+@@ -1529,14 +1560,12 @@ void ib_cache_release_one(struct ib_device *device)
+ 
+ void ib_cache_cleanup_one(struct ib_device *device)
+ {
+-	/* The cleanup function unregisters the event handler,
+-	 * waits for all in-progress workqueue elements and cleans
+-	 * up the GID cache. This function should be called after
+-	 * the device was removed from the devices list and all
+-	 * clients were removed, so the cache exists but is
++	/* The cleanup function waits for all in-progress workqueue
++	 * elements and cleans up the GID cache. This function should be
++	 * called after the device was removed from the devices list and
++	 * all clients were removed, so the cache exists but is
+ 	 * non-functional and shouldn't be updated anymore.
+ 	 */
+-	ib_unregister_event_handler(&device->cache.event_handler);
+ 	flush_workqueue(ib_wq);
+ 	gid_table_cleanup_one(device);
+ 
+diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
+index 3645e092e1c7..d657d90e618b 100644
+--- a/drivers/infiniband/core/core_priv.h
++++ b/drivers/infiniband/core/core_priv.h
+@@ -149,6 +149,7 @@ unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port);
+ int ib_cache_setup_one(struct ib_device *device);
+ void ib_cache_cleanup_one(struct ib_device *device);
+ void ib_cache_release_one(struct ib_device *device);
++void ib_dispatch_event_clients(struct ib_event *event);
+ 
+ #ifdef CONFIG_CGROUP_RDMA
+ void ib_device_register_rdmacg(struct ib_device *device);
+diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
+index 84dd74fe13b8..c38b2b0b078a 100644
+--- a/drivers/infiniband/core/device.c
++++ b/drivers/infiniband/core/device.c
+@@ -588,6 +588,7 @@ struct ib_device *_ib_alloc_device(size_t size)
+ 
+ 	INIT_LIST_HEAD(&device->event_handler_list);
+ 	spin_lock_init(&device->event_handler_lock);
++	init_rwsem(&device->event_handler_rwsem);
+ 	mutex_init(&device->unregistration_lock);
+ 	/*
+ 	 * client_data needs to be alloc because we don't want our mark to be
+@@ -1931,17 +1932,15 @@ EXPORT_SYMBOL(ib_set_client_data);
+  *
+  * ib_register_event_handler() registers an event handler that will be
+  * called back when asynchronous IB events occur (as defined in
+- * chapter 11 of the InfiniBand Architecture Specification).  This
+- * callback may occur in interrupt context.
++ * chapter 11 of the InfiniBand Architecture Specification). This
++ * callback occurs in workqueue context.
+  */
+ void ib_register_event_handler(struct ib_event_handler *event_handler)
+ {
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
++	down_write(&event_handler->device->event_handler_rwsem);
+ 	list_add_tail(&event_handler->list,
+ 		      &event_handler->device->event_handler_list);
+-	spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
++	up_write(&event_handler->device->event_handler_rwsem);
+ }
+ EXPORT_SYMBOL(ib_register_event_handler);
+ 
+@@ -1954,35 +1953,23 @@ EXPORT_SYMBOL(ib_register_event_handler);
+  */
+ void ib_unregister_event_handler(struct ib_event_handler *event_handler)
+ {
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
++	down_write(&event_handler->device->event_handler_rwsem);
+ 	list_del(&event_handler->list);
+-	spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
++	up_write(&event_handler->device->event_handler_rwsem);
+ }
+ EXPORT_SYMBOL(ib_unregister_event_handler);
+ 
+-/**
+- * ib_dispatch_event - Dispatch an asynchronous event
+- * @event:Event to dispatch
+- *
+- * Low-level drivers must call ib_dispatch_event() to dispatch the
+- * event to all registered event handlers when an asynchronous event
+- * occurs.
+- */
+-void ib_dispatch_event(struct ib_event *event)
++void ib_dispatch_event_clients(struct ib_event *event)
+ {
+-	unsigned long flags;
+ 	struct ib_event_handler *handler;
+ 
+-	spin_lock_irqsave(&event->device->event_handler_lock, flags);
++	down_read(&event->device->event_handler_rwsem);
+ 
+ 	list_for_each_entry(handler, &event->device->event_handler_list, list)
+ 		handler->handler(handler, event);
+ 
+-	spin_unlock_irqrestore(&event->device->event_handler_lock, flags);
++	up_read(&event->device->event_handler_rwsem);
+ }
+-EXPORT_SYMBOL(ib_dispatch_event);
+ 
+ static int iw_query_port(struct ib_device *device,
+ 			   u8 port_num,
+diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
+index 9b1fb84a3d45..10924f122072 100644
+--- a/drivers/infiniband/hw/hfi1/chip.c
++++ b/drivers/infiniband/hw/hfi1/chip.c
+@@ -1685,6 +1685,14 @@ static u64 access_sw_pio_drain(const struct cntr_entry *entry,
+ 	return dd->verbs_dev.n_piodrain;
+ }
+ 
++static u64 access_sw_ctx0_seq_drop(const struct cntr_entry *entry,
++				   void *context, int vl, int mode, u64 data)
++{
++	struct hfi1_devdata *dd = context;
++
++	return dd->ctx0_seq_drop;
++}
++
+ static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
+ 			      void *context, int vl, int mode, u64 data)
+ {
+@@ -4106,6 +4114,7 @@ def_access_ibp_counter(rc_crwaits);
+ static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
+ [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
+ [C_RX_LEN_ERR] = RXE32_DEV_CNTR_ELEM(RxLenErr, RCV_LENGTH_ERR_CNT, CNTR_SYNTH),
++[C_RX_SHORT_ERR] = RXE32_DEV_CNTR_ELEM(RxShrErr, RCV_SHORT_ERR_CNT, CNTR_SYNTH),
+ [C_RX_ICRC_ERR] = RXE32_DEV_CNTR_ELEM(RxICrcErr, RCV_ICRC_ERR_CNT, CNTR_SYNTH),
+ [C_RX_EBP] = RXE32_DEV_CNTR_ELEM(RxEbpCnt, RCV_EBP_CNT, CNTR_SYNTH),
+ [C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
+@@ -4249,6 +4258,8 @@ static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
+ 			    access_sw_cpu_intr),
+ [C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
+ 			    access_sw_cpu_rcv_limit),
++[C_SW_CTX0_SEQ_DROP] = CNTR_ELEM("SeqDrop0", 0, 0, CNTR_NORMAL,
++			    access_sw_ctx0_seq_drop),
+ [C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
+ 			    access_sw_vtx_wait),
+ [C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
+diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
+index 4ca5ac8d7e9e..af0061936c66 100644
+--- a/drivers/infiniband/hw/hfi1/chip.h
++++ b/drivers/infiniband/hw/hfi1/chip.h
+@@ -859,6 +859,7 @@ static inline int idx_from_vl(int vl)
+ enum {
+ 	C_RCV_OVF = 0,
+ 	C_RX_LEN_ERR,
++	C_RX_SHORT_ERR,
+ 	C_RX_ICRC_ERR,
+ 	C_RX_EBP,
+ 	C_RX_TID_FULL,
+@@ -926,6 +927,7 @@ enum {
+ 	C_DC_PG_STS_TX_MBE_CNT,
+ 	C_SW_CPU_INTR,
+ 	C_SW_CPU_RCV_LIM,
++	C_SW_CTX0_SEQ_DROP,
+ 	C_SW_VTX_WAIT,
+ 	C_SW_PIO_WAIT,
+ 	C_SW_PIO_DRAIN,
+diff --git a/drivers/infiniband/hw/hfi1/chip_registers.h b/drivers/infiniband/hw/hfi1/chip_registers.h
+index ab3589d17aee..fb3ec9bff7a2 100644
+--- a/drivers/infiniband/hw/hfi1/chip_registers.h
++++ b/drivers/infiniband/hw/hfi1/chip_registers.h
+@@ -381,6 +381,7 @@
+ #define DC_LCB_STS_LINK_TRANSFER_ACTIVE (DC_LCB_CSRS + 0x000000000468)
+ #define DC_LCB_STS_ROUND_TRIP_LTP_CNT (DC_LCB_CSRS + 0x0000000004B0)
+ #define RCV_LENGTH_ERR_CNT 0
++#define RCV_SHORT_ERR_CNT 2
+ #define RCV_ICRC_ERR_CNT 6
+ #define RCV_EBP_CNT 9
+ #define RCV_BUF_OVFL_CNT 10
+diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
+index 01aa1f132f55..941b465244ab 100644
+--- a/drivers/infiniband/hw/hfi1/driver.c
++++ b/drivers/infiniband/hw/hfi1/driver.c
+@@ -734,6 +734,7 @@ static noinline int skip_rcv_packet(struct hfi1_packet *packet, int thread)
+ {
+ 	int ret;
+ 
++	packet->rcd->dd->ctx0_seq_drop++;
+ 	/* Set up for the next packet */
+ 	packet->rhqoff += packet->rsize;
+ 	if (packet->rhqoff >= packet->maxcnt)
+diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
+index 27dea5e1e201..9edfd3e56f61 100644
+--- a/drivers/infiniband/hw/hfi1/hfi.h
++++ b/drivers/infiniband/hw/hfi1/hfi.h
+@@ -1153,6 +1153,8 @@ struct hfi1_devdata {
+ 
+ 	char *boardname; /* human readable board info */
+ 
++	u64 ctx0_seq_drop;
++
+ 	/* reset value */
+ 	u64 z_int_counter;
+ 	u64 z_rcv_limit;
+diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
+index 9ad19170c3f9..95765560c1cf 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
++++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
+@@ -1064,8 +1064,8 @@ int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
+ 		if (!(npage % (1 << (mtt->page_shift - PAGE_SHIFT)))) {
+ 			if (page_addr & ((1 << mtt->page_shift) - 1)) {
+ 				dev_err(dev,
+-					"page_addr 0x%llx is not page_shift %d alignment!\n",
+-					page_addr, mtt->page_shift);
++					"page_addr is not page_shift %d alignment!\n",
++					mtt->page_shift);
+ 				ret = -EINVAL;
+ 				goto out;
+ 			}
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index 997cbfe4b90c..760630c7aae7 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -815,6 +815,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
+ 				struct ib_device_attr *props,
+ 				struct ib_udata *uhw)
+ {
++	size_t uhw_outlen = (uhw) ? uhw->outlen : 0;
+ 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ 	struct mlx5_core_dev *mdev = dev->mdev;
+ 	int err = -ENOMEM;
+@@ -828,12 +829,12 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
+ 	u64 max_tso;
+ 
+ 	resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length);
+-	if (uhw->outlen && uhw->outlen < resp_len)
++	if (uhw_outlen && uhw_outlen < resp_len)
+ 		return -EINVAL;
+ 
+ 	resp.response_length = resp_len;
+ 
+-	if (uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen))
++	if (uhw && uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen))
+ 		return -EINVAL;
+ 
+ 	memset(props, 0, sizeof(*props));
+@@ -897,7 +898,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
+ 			props->raw_packet_caps |=
+ 				IB_RAW_PACKET_CAP_CVLAN_STRIPPING;
+ 
+-		if (field_avail(typeof(resp), tso_caps, uhw->outlen)) {
++		if (field_avail(typeof(resp), tso_caps, uhw_outlen)) {
+ 			max_tso = MLX5_CAP_ETH(mdev, max_lso_cap);
+ 			if (max_tso) {
+ 				resp.tso_caps.max_tso = 1 << max_tso;
+@@ -907,7 +908,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
+ 			}
+ 		}
+ 
+-		if (field_avail(typeof(resp), rss_caps, uhw->outlen)) {
++		if (field_avail(typeof(resp), rss_caps, uhw_outlen)) {
+ 			resp.rss_caps.rx_hash_function =
+ 						MLX5_RX_HASH_FUNC_TOEPLITZ;
+ 			resp.rss_caps.rx_hash_fields_mask =
+@@ -927,9 +928,9 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
+ 			resp.response_length += sizeof(resp.rss_caps);
+ 		}
+ 	} else {
+-		if (field_avail(typeof(resp), tso_caps, uhw->outlen))
++		if (field_avail(typeof(resp), tso_caps, uhw_outlen))
+ 			resp.response_length += sizeof(resp.tso_caps);
+-		if (field_avail(typeof(resp), rss_caps, uhw->outlen))
++		if (field_avail(typeof(resp), rss_caps, uhw_outlen))
+ 			resp.response_length += sizeof(resp.rss_caps);
+ 	}
+ 
+@@ -1054,7 +1055,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
+ 						MLX5_MAX_CQ_PERIOD;
+ 	}
+ 
+-	if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) {
++	if (field_avail(typeof(resp), cqe_comp_caps, uhw_outlen)) {
+ 		resp.response_length += sizeof(resp.cqe_comp_caps);
+ 
+ 		if (MLX5_CAP_GEN(dev->mdev, cqe_compression)) {
+@@ -1072,7 +1073,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
+ 		}
+ 	}
+ 
+-	if (field_avail(typeof(resp), packet_pacing_caps, uhw->outlen) &&
++	if (field_avail(typeof(resp), packet_pacing_caps, uhw_outlen) &&
+ 	    raw_support) {
+ 		if (MLX5_CAP_QOS(mdev, packet_pacing) &&
+ 		    MLX5_CAP_GEN(mdev, qos)) {
+@@ -1091,7 +1092,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
+ 	}
+ 
+ 	if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes,
+-			uhw->outlen)) {
++			uhw_outlen)) {
+ 		if (MLX5_CAP_ETH(mdev, multi_pkt_send_wqe))
+ 			resp.mlx5_ib_support_multi_pkt_send_wqes =
+ 				MLX5_IB_ALLOW_MPW;
+@@ -1104,7 +1105,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
+ 			sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
+ 	}
+ 
+-	if (field_avail(typeof(resp), flags, uhw->outlen)) {
++	if (field_avail(typeof(resp), flags, uhw_outlen)) {
+ 		resp.response_length += sizeof(resp.flags);
+ 
+ 		if (MLX5_CAP_GEN(mdev, cqe_compression_128))
+@@ -1120,8 +1121,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
+ 		resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT;
+ 	}
+ 
+-	if (field_avail(typeof(resp), sw_parsing_caps,
+-			uhw->outlen)) {
++	if (field_avail(typeof(resp), sw_parsing_caps, uhw_outlen)) {
+ 		resp.response_length += sizeof(resp.sw_parsing_caps);
+ 		if (MLX5_CAP_ETH(mdev, swp)) {
+ 			resp.sw_parsing_caps.sw_parsing_offloads |=
+@@ -1141,7 +1141,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
+ 		}
+ 	}
+ 
+-	if (field_avail(typeof(resp), striding_rq_caps, uhw->outlen) &&
++	if (field_avail(typeof(resp), striding_rq_caps, uhw_outlen) &&
+ 	    raw_support) {
+ 		resp.response_length += sizeof(resp.striding_rq_caps);
+ 		if (MLX5_CAP_GEN(mdev, striding_rq)) {
+@@ -1164,8 +1164,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
+ 		}
+ 	}
+ 
+-	if (field_avail(typeof(resp), tunnel_offloads_caps,
+-			uhw->outlen)) {
++	if (field_avail(typeof(resp), tunnel_offloads_caps, uhw_outlen)) {
+ 		resp.response_length += sizeof(resp.tunnel_offloads_caps);
+ 		if (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan))
+ 			resp.tunnel_offloads_caps |=
+@@ -1186,7 +1185,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
+ 				MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP;
+ 	}
+ 
+-	if (uhw->outlen) {
++	if (uhw_outlen) {
+ 		err = ib_copy_to_udata(uhw, &resp, resp.response_length);
+ 
+ 		if (err)
+@@ -4771,7 +4770,6 @@ static int __get_port_caps(struct mlx5_ib_dev *dev, u8 port)
+ 	struct ib_device_attr *dprops = NULL;
+ 	struct ib_port_attr *pprops = NULL;
+ 	int err = -ENOMEM;
+-	struct ib_udata uhw = {.inlen = 0, .outlen = 0};
+ 
+ 	pprops = kzalloc(sizeof(*pprops), GFP_KERNEL);
+ 	if (!pprops)
+@@ -4781,7 +4779,7 @@ static int __get_port_caps(struct mlx5_ib_dev *dev, u8 port)
+ 	if (!dprops)
+ 		goto out;
+ 
+-	err = mlx5_ib_query_device(&dev->ib_dev, dprops, &uhw);
++	err = mlx5_ib_query_device(&dev->ib_dev, dprops, NULL);
+ 	if (err) {
+ 		mlx5_ib_warn(dev, "query_device failed %d\n", err);
+ 		goto out;
+diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
+index 95834206c80c..92de39c4a7c1 100644
+--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
++++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
+@@ -408,7 +408,7 @@ struct rxe_dev {
+ 	struct list_head	pending_mmaps;
+ 
+ 	spinlock_t		mmap_offset_lock; /* guard mmap_offset */
+-	int			mmap_offset;
++	u64			mmap_offset;
+ 
+ 	atomic64_t		stats_counters[RXE_NUM_OF_COUNTERS];
+ 
+diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
+index d61731c0037d..b87b1e074f62 100644
+--- a/drivers/input/touchscreen/edt-ft5x06.c
++++ b/drivers/input/touchscreen/edt-ft5x06.c
+@@ -1050,6 +1050,7 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
+ {
+ 	const struct edt_i2c_chip_data *chip_data;
+ 	struct edt_ft5x06_ts_data *tsdata;
++	u8 buf[2] = { 0xfc, 0x00 };
+ 	struct input_dev *input;
+ 	unsigned long irq_flags;
+ 	int error;
+@@ -1140,6 +1141,12 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
+ 		return error;
+ 	}
+ 
++	/*
++	 * Dummy read access. EP0700MLP1 returns bogus data on the first
++	 * register read access and ignores writes.
++	 */
++	edt_ft5x06_ts_readwrite(tsdata->client, 2, buf, 2, buf);
++
+ 	edt_ft5x06_ts_set_regs(tsdata);
+ 	edt_ft5x06_ts_get_defaults(&client->dev, tsdata);
+ 	edt_ft5x06_ts_get_parameters(tsdata);
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index bd25674ee4db..7a6c056b9b9c 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -230,11 +230,8 @@ static struct pci_dev *setup_aliases(struct device *dev)
+ 	 */
+ 	ivrs_alias = amd_iommu_alias_table[pci_dev_id(pdev)];
+ 	if (ivrs_alias != pci_dev_id(pdev) &&
+-	    PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
+-		pci_add_dma_alias(pdev, ivrs_alias & 0xff);
+-		pci_info(pdev, "Added PCI DMA alias %02x.%d\n",
+-			PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias));
+-	}
++	    PCI_BUS_NUM(ivrs_alias) == pdev->bus->number)
++		pci_add_dma_alias(pdev, ivrs_alias & 0xff, 1);
+ 
+ 	clone_aliases(pdev);
+ 
+diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
+index 483f7bc379fa..d7cbca8bf2cd 100644
+--- a/drivers/iommu/amd_iommu_init.c
++++ b/drivers/iommu/amd_iommu_init.c
+@@ -147,7 +147,7 @@ bool amd_iommu_dump;
+ bool amd_iommu_irq_remap __read_mostly;
+ 
+ int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
+-static int amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
++static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
+ 
+ static bool amd_iommu_detected;
+ static bool __initdata amd_iommu_disabled;
+@@ -1523,8 +1523,6 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
+ 			iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
+ 		if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
+ 			amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
+-		if (((h->efr_attr & (0x1 << IOMMU_FEAT_XTSUP_SHIFT)) == 0))
+-			amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
+ 		break;
+ 	case 0x11:
+ 	case 0x40:
+@@ -1534,8 +1532,15 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
+ 			iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
+ 		if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0))
+ 			amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
+-		if (((h->efr_reg & (0x1 << IOMMU_EFR_XTSUP_SHIFT)) == 0))
+-			amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
++		/*
++		 * Note: Since iommu_update_intcapxt() leverages
++		 * the IOMMU MMIO access to MSI capability block registers
++		 * for MSI address lo/hi/data, we need to check both
++		 * EFR[XtSup] and EFR[MsiCapMmioSup] for x2APIC support.
++		 */
++		if ((h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT)) &&
++		    (h->efr_reg & BIT(IOMMU_EFR_MSICAPMMIOSUP_SHIFT)))
++			amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
+ 		break;
+ 	default:
+ 		return -EINVAL;
+@@ -1996,8 +2001,8 @@ static int iommu_init_intcapxt(struct amd_iommu *iommu)
+ 	struct irq_affinity_notify *notify = &iommu->intcapxt_notify;
+ 
+ 	/**
+-	 * IntCapXT requires XTSup=1, which can be inferred
+-	 * amd_iommu_xt_mode.
++	 * IntCapXT requires XTSup=1 and MsiCapMmioSup=1,
++	 * which can be inferred from amd_iommu_xt_mode.
+ 	 */
+ 	if (amd_iommu_xt_mode != IRQ_REMAP_X2APIC_MODE)
+ 		return 0;
+diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
+index f52f59d5c6bd..798e1533a147 100644
+--- a/drivers/iommu/amd_iommu_types.h
++++ b/drivers/iommu/amd_iommu_types.h
+@@ -377,12 +377,12 @@
+ #define IOMMU_CAP_EFR     27
+ 
+ /* IOMMU Feature Reporting Field (for IVHD type 10h */
+-#define IOMMU_FEAT_XTSUP_SHIFT	0
+ #define IOMMU_FEAT_GASUP_SHIFT	6
+ 
+ /* IOMMU Extended Feature Register (EFR) */
+ #define IOMMU_EFR_XTSUP_SHIFT	2
+ #define IOMMU_EFR_GASUP_SHIFT	7
++#define IOMMU_EFR_MSICAPMMIOSUP_SHIFT	46
+ 
+ #define MAX_DOMAIN_ID 65536
+ 
+diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
+index 2f7680faba49..6bd6a3f3f471 100644
+--- a/drivers/iommu/arm-smmu-v3.c
++++ b/drivers/iommu/arm-smmu-v3.c
+@@ -1643,7 +1643,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
+ 						 STRTAB_STE_1_EATS_TRANS));
+ 
+ 	arm_smmu_sync_ste_for_sid(smmu, sid);
+-	dst[0] = cpu_to_le64(val);
++	/* See comment in arm_smmu_write_ctx_desc() */
++	WRITE_ONCE(dst[0], cpu_to_le64(val));
+ 	arm_smmu_sync_ste_for_sid(smmu, sid);
+ 
+ 	/* It's likely that we'll want to use the new STE soon */
+diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
+index 3acfa6a25fa2..fb66f717127d 100644
+--- a/drivers/iommu/dmar.c
++++ b/drivers/iommu/dmar.c
+@@ -1354,7 +1354,6 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
+ 	struct qi_desc desc;
+ 
+ 	if (mask) {
+-		WARN_ON_ONCE(addr & ((1ULL << (VTD_PAGE_SHIFT + mask)) - 1));
+ 		addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
+ 		desc.qw1 = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
+ 	} else
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index 932267f49f9a..dfedbb04f647 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -3406,7 +3406,8 @@ static unsigned long intel_alloc_iova(struct device *dev,
+ 	iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
+ 				   IOVA_PFN(dma_mask), true);
+ 	if (unlikely(!iova_pfn)) {
+-		dev_err(dev, "Allocating %ld-page iova failed", nrpages);
++		dev_err_once(dev, "Allocating %ld-page iova failed\n",
++			     nrpages);
+ 		return 0;
+ 	}
+ 
+@@ -4319,12 +4320,16 @@ int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
+ {
+ 	struct acpi_dmar_reserved_memory *rmrr;
+ 	struct dmar_rmrr_unit *rmrru;
+-	int ret;
+ 
+ 	rmrr = (struct acpi_dmar_reserved_memory *)header;
+-	ret = arch_rmrr_sanity_check(rmrr);
+-	if (ret)
+-		return ret;
++	if (arch_rmrr_sanity_check(rmrr))
++		WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND,
++			   "Your BIOS is broken; bad RMRR [%#018Lx-%#018Lx]\n"
++			   "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
++			   rmrr->base_address, rmrr->end_address,
++			   dmi_get_system_info(DMI_BIOS_VENDOR),
++			   dmi_get_system_info(DMI_BIOS_VERSION),
++			   dmi_get_system_info(DMI_PRODUCT_VERSION));
+ 
+ 	rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
+ 	if (!rmrru)
+diff --git a/drivers/iommu/intel-pasid.c b/drivers/iommu/intel-pasid.c
+index 040a445be300..e7cb0b8a7332 100644
+--- a/drivers/iommu/intel-pasid.c
++++ b/drivers/iommu/intel-pasid.c
+@@ -499,8 +499,16 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
+ 	}
+ 
+ #ifdef CONFIG_X86
+-	if (cpu_feature_enabled(X86_FEATURE_LA57))
+-		pasid_set_flpm(pte, 1);
++	/* Both CPU and IOMMU paging mode need to match */
++	if (cpu_feature_enabled(X86_FEATURE_LA57)) {
++		if (cap_5lp_support(iommu->cap)) {
++			pasid_set_flpm(pte, 1);
++		} else {
++			pr_err("VT-d has no 5-level paging support for CPU\n");
++			pasid_clear_entry(pte);
++			return -EINVAL;
++		}
++	}
+ #endif /* CONFIG_X86 */
+ 
+ 	pasid_set_domain_id(pte, did);
+diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
+index dca88f9fdf29..518d0b2d12af 100644
+--- a/drivers/iommu/intel-svm.c
++++ b/drivers/iommu/intel-svm.c
+@@ -317,7 +317,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
+ 		/* Do not use PASID 0 in caching mode (virtualised IOMMU) */
+ 		ret = intel_pasid_alloc_id(svm,
+ 					   !!cap_caching_mode(iommu->cap),
+-					   pasid_max - 1, GFP_KERNEL);
++					   pasid_max, GFP_KERNEL);
+ 		if (ret < 0) {
+ 			kfree(svm);
+ 			kfree(sdev);
+@@ -654,11 +654,10 @@ static irqreturn_t prq_event_thread(int irq, void *d)
+ 			if (req->priv_data_present)
+ 				memcpy(&resp.qw2, req->priv_data,
+ 				       sizeof(req->priv_data));
++			resp.qw2 = 0;
++			resp.qw3 = 0;
++			qi_submit_sync(&resp, iommu);
+ 		}
+-		resp.qw2 = 0;
+-		resp.qw3 = 0;
+-		qi_submit_sync(&resp, iommu);
+-
+ 		head = (head + sizeof(*req)) & PRQ_RING_MASK;
+ 	}
+ 
+diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
+index c7a914b9bbbc..0e6a9536eca6 100644
+--- a/drivers/iommu/iova.c
++++ b/drivers/iommu/iova.c
+@@ -233,7 +233,7 @@ static DEFINE_MUTEX(iova_cache_mutex);
+ 
+ struct iova *alloc_iova_mem(void)
+ {
+-	return kmem_cache_zalloc(iova_cache, GFP_ATOMIC);
++	return kmem_cache_zalloc(iova_cache, GFP_ATOMIC | __GFP_NOWARN);
+ }
+ EXPORT_SYMBOL(alloc_iova_mem);
+ 
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+index e05673bcd52b..50f89056c16b 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -598,7 +598,7 @@ static struct its_collection *its_build_invall_cmd(struct its_node *its,
+ 						   struct its_cmd_desc *desc)
+ {
+ 	its_encode_cmd(cmd, GITS_CMD_INVALL);
+-	its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
++	its_encode_collection(cmd, desc->its_invall_cmd.col->col_id);
+ 
+ 	its_fixup_cmd(cmd);
+ 
+@@ -1170,13 +1170,14 @@ static void its_send_vclear(struct its_device *dev, u32 event_id)
+  */
+ static struct its_vlpi_map *get_vlpi_map(struct irq_data *d)
+ {
+-	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+-	u32 event = its_get_event_id(d);
++	if (irqd_is_forwarded_to_vcpu(d)) {
++		struct its_device *its_dev = irq_data_get_irq_chip_data(d);
++		u32 event = its_get_event_id(d);
+ 
+-	if (!irqd_is_forwarded_to_vcpu(d))
+-		return NULL;
++		return dev_event_to_vlpi_map(its_dev, event);
++	}
+ 
+-	return dev_event_to_vlpi_map(its_dev, event);
++	return NULL;
+ }
+ 
+ static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
+diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
+index d6218012097b..3f5baa5043db 100644
+--- a/drivers/irqchip/irq-gic-v3.c
++++ b/drivers/irqchip/irq-gic-v3.c
+@@ -1821,6 +1821,7 @@ static struct
+ 	struct redist_region *redist_regs;
+ 	u32 nr_redist_regions;
+ 	bool single_redist;
++	int enabled_rdists;
+ 	u32 maint_irq;
+ 	int maint_irq_mode;
+ 	phys_addr_t vcpu_base;
+@@ -1915,8 +1916,10 @@ static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header,
+ 	 * If GICC is enabled and has valid gicr base address, then it means
+ 	 * GICR base is presented via GICC
+ 	 */
+-	if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address)
++	if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) {
++		acpi_data.enabled_rdists++;
+ 		return 0;
++	}
+ 
+ 	/*
+ 	 * It's perfectly valid firmware can pass disabled GICC entry, driver
+@@ -1946,8 +1949,10 @@ static int __init gic_acpi_count_gicr_regions(void)
+ 
+ 	count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
+ 				      gic_acpi_match_gicc, 0);
+-	if (count > 0)
++	if (count > 0) {
+ 		acpi_data.single_redist = true;
++		count = acpi_data.enabled_rdists;
++	}
+ 
+ 	return count;
+ }
+diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
+index 3f09f658e8e2..6b566bba263b 100644
+--- a/drivers/irqchip/irq-mbigen.c
++++ b/drivers/irqchip/irq-mbigen.c
+@@ -374,6 +374,7 @@ static struct platform_driver mbigen_platform_driver = {
+ 		.name		= "Hisilicon MBIGEN-V2",
+ 		.of_match_table	= mbigen_of_match,
+ 		.acpi_match_table = ACPI_PTR(mbigen_acpi_match),
++		.suppress_bind_attrs = true,
+ 	},
+ 	.probe			= mbigen_device_probe,
+ };
+diff --git a/drivers/leds/leds-pca963x.c b/drivers/leds/leds-pca963x.c
+index 4afc317901a8..66cdc003b8f4 100644
+--- a/drivers/leds/leds-pca963x.c
++++ b/drivers/leds/leds-pca963x.c
+@@ -40,6 +40,8 @@
+ #define PCA963X_LED_PWM		0x2	/* Controlled through PWM */
+ #define PCA963X_LED_GRP_PWM	0x3	/* Controlled through PWM/GRPPWM */
+ 
++#define PCA963X_MODE2_OUTDRV	0x04	/* Open-drain or totem pole */
++#define PCA963X_MODE2_INVRT	0x10	/* Normal or inverted direction */
+ #define PCA963X_MODE2_DMBLNK	0x20	/* Enable blinking */
+ 
+ #define PCA963X_MODE1		0x00
+@@ -438,12 +440,12 @@ static int pca963x_probe(struct i2c_client *client,
+ 						    PCA963X_MODE2);
+ 		/* Configure output: open-drain or totem pole (push-pull) */
+ 		if (pdata->outdrv == PCA963X_OPEN_DRAIN)
+-			mode2 |= 0x01;
++			mode2 &= ~PCA963X_MODE2_OUTDRV;
+ 		else
+-			mode2 |= 0x05;
++			mode2 |= PCA963X_MODE2_OUTDRV;
+ 		/* Configure direction: normal or inverted */
+ 		if (pdata->dir == PCA963X_INVERTED)
+-			mode2 |= 0x10;
++			mode2 |= PCA963X_MODE2_INVRT;
+ 		i2c_smbus_write_byte_data(pca963x->chip->client, PCA963X_MODE2,
+ 					  mode2);
+ 	}
+diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
+index c71365e7c1fa..a50dcfda656f 100644
+--- a/drivers/md/bcache/bset.h
++++ b/drivers/md/bcache/bset.h
+@@ -397,7 +397,8 @@ void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *state);
+ 
+ /* Bkey utility code */
+ 
+-#define bset_bkey_last(i)	bkey_idx((struct bkey *) (i)->d, (i)->keys)
++#define bset_bkey_last(i)	bkey_idx((struct bkey *) (i)->d, \
++					 (unsigned int)(i)->keys)
+ 
+ static inline struct bkey *bset_bkey_idx(struct bset *i, unsigned int idx)
+ {
+diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
+index 33ddc5269e8d..6730820780b0 100644
+--- a/drivers/md/bcache/journal.c
++++ b/drivers/md/bcache/journal.c
+@@ -422,7 +422,8 @@ err:
+ static void btree_flush_write(struct cache_set *c)
+ {
+ 	struct btree *b, *t, *btree_nodes[BTREE_FLUSH_NR];
+-	unsigned int i, nr, ref_nr;
++	unsigned int i, nr;
++	int ref_nr;
+ 	atomic_t *fifo_front_p, *now_fifo_front_p;
+ 	size_t mask;
+ 
+diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c
+index ba1c93791d8d..503aafe188dc 100644
+--- a/drivers/md/bcache/stats.c
++++ b/drivers/md/bcache/stats.c
+@@ -109,9 +109,13 @@ int bch_cache_accounting_add_kobjs(struct cache_accounting *acc,
+ 
+ void bch_cache_accounting_clear(struct cache_accounting *acc)
+ {
+-	memset(&acc->total.cache_hits,
+-	       0,
+-	       sizeof(struct cache_stats));
++	acc->total.cache_hits = 0;
++	acc->total.cache_misses = 0;
++	acc->total.cache_bypass_hits = 0;
++	acc->total.cache_bypass_misses = 0;
++	acc->total.cache_readaheads = 0;
++	acc->total.cache_miss_collisions = 0;
++	acc->total.sectors_bypassed = 0;
+ }
+ 
+ void bch_cache_accounting_destroy(struct cache_accounting *acc)
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 77e9869345e7..3b3724285d90 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -1275,6 +1275,9 @@ static void cached_dev_free(struct closure *cl)
+ 
+ 	mutex_unlock(&bch_register_lock);
+ 
++	if (dc->sb_bio.bi_inline_vecs[0].bv_page)
++		put_page(bio_first_page_all(&dc->sb_bio));
++
+ 	if (!IS_ERR_OR_NULL(dc->bdev))
+ 		blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
+ 
+@@ -2372,29 +2375,35 @@ static bool bch_is_open(struct block_device *bdev)
+ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
+ 			       const char *buffer, size_t size)
+ {
+-	ssize_t ret = -EINVAL;
+-	const char *err = "cannot allocate memory";
++	const char *err;
+ 	char *path = NULL;
+-	struct cache_sb *sb = NULL;
++	struct cache_sb *sb;
+ 	struct block_device *bdev = NULL;
+-	struct page *sb_page = NULL;
++	struct page *sb_page;
++	ssize_t ret;
+ 
++	ret = -EBUSY;
++	err = "failed to reference bcache module";
+ 	if (!try_module_get(THIS_MODULE))
+-		return -EBUSY;
++		goto out;
+ 
+ 	/* For latest state of bcache_is_reboot */
+ 	smp_mb();
++	err = "bcache is in reboot";
+ 	if (bcache_is_reboot)
+-		return -EBUSY;
++		goto out_module_put;
+ 
++	ret = -ENOMEM;
++	err = "cannot allocate memory";
+ 	path = kstrndup(buffer, size, GFP_KERNEL);
+ 	if (!path)
+-		goto err;
++		goto out_module_put;
+ 
+ 	sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL);
+ 	if (!sb)
+-		goto err;
++		goto out_free_path;
+ 
++	ret = -EINVAL;
+ 	err = "failed to open device";
+ 	bdev = blkdev_get_by_path(strim(path),
+ 				  FMODE_READ|FMODE_WRITE|FMODE_EXCL,
+@@ -2411,57 +2420,69 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
+ 			if (!IS_ERR(bdev))
+ 				bdput(bdev);
+ 			if (attr == &ksysfs_register_quiet)
+-				goto quiet_out;
++				goto done;
+ 		}
+-		goto err;
++		goto out_free_sb;
+ 	}
+ 
+ 	err = "failed to set blocksize";
+ 	if (set_blocksize(bdev, 4096))
+-		goto err_close;
++		goto out_blkdev_put;
+ 
+ 	err = read_super(sb, bdev, &sb_page);
+ 	if (err)
+-		goto err_close;
++		goto out_blkdev_put;
+ 
+ 	err = "failed to register device";
+ 	if (SB_IS_BDEV(sb)) {
+ 		struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
+ 
+ 		if (!dc)
+-			goto err_close;
++			goto out_put_sb_page;
+ 
+ 		mutex_lock(&bch_register_lock);
+ 		ret = register_bdev(sb, sb_page, bdev, dc);
+ 		mutex_unlock(&bch_register_lock);
+ 		/* blkdev_put() will be called in cached_dev_free() */
+-		if (ret < 0)
+-			goto err;
++		if (ret < 0) {
++			bdev = NULL;
++			goto out_put_sb_page;
++		}
+ 	} else {
+ 		struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
+ 
+ 		if (!ca)
+-			goto err_close;
++			goto out_put_sb_page;
+ 
+ 		/* blkdev_put() will be called in bch_cache_release() */
+-		if (register_cache(sb, sb_page, bdev, ca) != 0)
+-			goto err;
++		if (register_cache(sb, sb_page, bdev, ca) != 0) {
++			bdev = NULL;
++			goto out_put_sb_page;
++		}
+ 	}
+-quiet_out:
+-	ret = size;
+-out:
+-	if (sb_page)
+-		put_page(sb_page);
++
++	put_page(sb_page);
++done:
++	kfree(sb);
++	kfree(path);
++	module_put(THIS_MODULE);
++	return size;
++
++out_put_sb_page:
++	put_page(sb_page);
++out_blkdev_put:
++	if (bdev)
++		blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
++out_free_sb:
+ 	kfree(sb);
++out_free_path:
+ 	kfree(path);
++	path = NULL;
++out_module_put:
+ 	module_put(THIS_MODULE);
++out:
++	pr_info("error %s: %s", path?path:"", err);
+ 	return ret;
+-
+-err_close:
+-	blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
+-err:
+-	pr_info("error %s: %s", path, err);
+-	goto out;
+ }
+ 
+ 
+diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
+index c412eaa975fc..9a18bef0a5ff 100644
+--- a/drivers/md/dm-raid.c
++++ b/drivers/md/dm-raid.c
+@@ -129,7 +129,9 @@ struct raid_dev {
+ 				  CTR_FLAG_RAID10_COPIES | \
+ 				  CTR_FLAG_RAID10_FORMAT | \
+ 				  CTR_FLAG_DELTA_DISKS | \
+-				  CTR_FLAG_DATA_OFFSET)
++				  CTR_FLAG_DATA_OFFSET | \
++				  CTR_FLAG_JOURNAL_DEV | \
++				  CTR_FLAG_JOURNAL_MODE)
+ 
+ /* Valid options definitions per raid level... */
+ 
+@@ -3001,7 +3003,6 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ 		{ 1, 254, "Cannot understand number of raid devices parameters" }
+ 	};
+ 
+-	/* Must have <raid_type> */
+ 	arg = dm_shift_arg(&as);
+ 	if (!arg) {
+ 		ti->error = "No arguments";
+@@ -3508,8 +3509,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
+ 	unsigned long recovery;
+ 	unsigned int raid_param_cnt = 1; /* at least 1 for chunksize */
+ 	unsigned int sz = 0;
+-	unsigned int rebuild_disks;
+-	unsigned int write_mostly_params = 0;
++	unsigned int rebuild_writemostly_count = 0;
+ 	sector_t progress, resync_max_sectors, resync_mismatches;
+ 	enum sync_state state;
+ 	struct raid_type *rt;
+@@ -3593,18 +3593,20 @@ static void raid_status(struct dm_target *ti, status_type_t type,
+ 	case STATUSTYPE_TABLE:
+ 		/* Report the table line string you would use to construct this raid set */
+ 
+-		/* Calculate raid parameter count */
+-		for (i = 0; i < rs->raid_disks; i++)
+-			if (test_bit(WriteMostly, &rs->dev[i].rdev.flags))
+-				write_mostly_params += 2;
+-		rebuild_disks = memweight(rs->rebuild_disks, DISKS_ARRAY_ELEMS * sizeof(*rs->rebuild_disks));
+-		raid_param_cnt += rebuild_disks * 2 +
+-				  write_mostly_params +
++		/*
++		 * Count any rebuild or writemostly argument pairs and subtract the
++		 * hweight count being added below of any rebuild and writemostly ctr flags.
++		 */
++		for (i = 0; i < rs->raid_disks; i++) {
++			rebuild_writemostly_count += (test_bit(i, (void *) rs->rebuild_disks) ? 2 : 0) +
++						     (test_bit(WriteMostly, &rs->dev[i].rdev.flags) ? 2 : 0);
++		}
++		rebuild_writemostly_count -= (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags) ? 2 : 0) +
++					     (test_bit(__CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags) ? 2 : 0);
++		/* Calculate raid parameter count based on ^ rebuild/writemostly argument counts and ctr flags set. */
++		raid_param_cnt += rebuild_writemostly_count +
+ 				  hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_NO_ARGS) +
+-				  hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_ONE_ARG) * 2 +
+-				  (test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags) ? 2 : 0) +
+-				  (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags) ? 2 : 0);
+-
++				  hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_ONE_ARG) * 2;
+ 		/* Emit table line */
+ 		/* This has to be in the documented order for userspace! */
+ 		DMEMIT("%s %u %u", rs->raid_type->name, raid_param_cnt, mddev->new_chunk_sectors);
+@@ -3612,11 +3614,10 @@ static void raid_status(struct dm_target *ti, status_type_t type,
+ 			DMEMIT(" %s", dm_raid_arg_name_by_flag(CTR_FLAG_SYNC));
+ 		if (test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))
+ 			DMEMIT(" %s", dm_raid_arg_name_by_flag(CTR_FLAG_NOSYNC));
+-		if (rebuild_disks)
++		if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags))
+ 			for (i = 0; i < rs->raid_disks; i++)
+-				if (test_bit(rs->dev[i].rdev.raid_disk, (void *) rs->rebuild_disks))
+-					DMEMIT(" %s %u", dm_raid_arg_name_by_flag(CTR_FLAG_REBUILD),
+-							 rs->dev[i].rdev.raid_disk);
++				if (test_bit(i, (void *) rs->rebuild_disks))
++					DMEMIT(" %s %u", dm_raid_arg_name_by_flag(CTR_FLAG_REBUILD), i);
+ 		if (test_bit(__CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags))
+ 			DMEMIT(" %s %lu", dm_raid_arg_name_by_flag(CTR_FLAG_DAEMON_SLEEP),
+ 					  mddev->bitmap_info.daemon_sleep);
+@@ -3626,7 +3627,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
+ 		if (test_bit(__CTR_FLAG_MAX_RECOVERY_RATE, &rs->ctr_flags))
+ 			DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_MAX_RECOVERY_RATE),
+ 					 mddev->sync_speed_max);
+-		if (write_mostly_params)
++		if (test_bit(__CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags))
+ 			for (i = 0; i < rs->raid_disks; i++)
+ 				if (test_bit(WriteMostly, &rs->dev[i].rdev.flags))
+ 					DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_WRITE_MOSTLY),
+@@ -4029,7 +4030,7 @@ static void raid_resume(struct dm_target *ti)
+ 
+ static struct target_type raid_target = {
+ 	.name = "raid",
+-	.version = {1, 15, 0},
++	.version = {1, 15, 1},
+ 	.module = THIS_MODULE,
+ 	.ctr = raid_ctr,
+ 	.dtr = raid_dtr,
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index a2bb2622cdbd..4fb6e89c8786 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -231,6 +231,7 @@ struct pool {
+ 	struct dm_target *ti;	/* Only set if a pool target is bound */
+ 
+ 	struct mapped_device *pool_md;
++	struct block_device *data_dev;
+ 	struct block_device *md_dev;
+ 	struct dm_pool_metadata *pmd;
+ 
+@@ -2933,6 +2934,7 @@ static struct kmem_cache *_new_mapping_cache;
+ 
+ static struct pool *pool_create(struct mapped_device *pool_md,
+ 				struct block_device *metadata_dev,
++				struct block_device *data_dev,
+ 				unsigned long block_size,
+ 				int read_only, char **error)
+ {
+@@ -3040,6 +3042,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
+ 	pool->last_commit_jiffies = jiffies;
+ 	pool->pool_md = pool_md;
+ 	pool->md_dev = metadata_dev;
++	pool->data_dev = data_dev;
+ 	__pool_table_insert(pool);
+ 
+ 	return pool;
+@@ -3081,6 +3084,7 @@ static void __pool_dec(struct pool *pool)
+ 
+ static struct pool *__pool_find(struct mapped_device *pool_md,
+ 				struct block_device *metadata_dev,
++				struct block_device *data_dev,
+ 				unsigned long block_size, int read_only,
+ 				char **error, int *created)
+ {
+@@ -3091,19 +3095,23 @@ static struct pool *__pool_find(struct mapped_device *pool_md,
+ 			*error = "metadata device already in use by a pool";
+ 			return ERR_PTR(-EBUSY);
+ 		}
++		if (pool->data_dev != data_dev) {
++			*error = "data device already in use by a pool";
++			return ERR_PTR(-EBUSY);
++		}
+ 		__pool_inc(pool);
+ 
+ 	} else {
+ 		pool = __pool_table_lookup(pool_md);
+ 		if (pool) {
+-			if (pool->md_dev != metadata_dev) {
++			if (pool->md_dev != metadata_dev || pool->data_dev != data_dev) {
+ 				*error = "different pool cannot replace a pool";
+ 				return ERR_PTR(-EINVAL);
+ 			}
+ 			__pool_inc(pool);
+ 
+ 		} else {
+-			pool = pool_create(pool_md, metadata_dev, block_size, read_only, error);
++			pool = pool_create(pool_md, metadata_dev, data_dev, block_size, read_only, error);
+ 			*created = 1;
+ 		}
+ 	}
+@@ -3356,7 +3364,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ 		goto out;
+ 	}
+ 
+-	pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev,
++	pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev, data_dev->bdev,
+ 			   block_size, pf.mode == PM_READ_ONLY, &ti->error, &pool_created);
+ 	if (IS_ERR(pool)) {
+ 		r = PTR_ERR(pool);
+@@ -4098,7 +4106,7 @@ static struct target_type pool_target = {
+ 	.name = "thin-pool",
+ 	.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
+ 		    DM_TARGET_IMMUTABLE,
+-	.version = {1, 21, 0},
++	.version = {1, 22, 0},
+ 	.module = THIS_MODULE,
+ 	.ctr = pool_ctr,
+ 	.dtr = pool_dtr,
+@@ -4475,7 +4483,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
+ 
+ static struct target_type thin_target = {
+ 	.name = "thin",
+-	.version = {1, 21, 0},
++	.version = {1, 22, 0},
+ 	.module	= THIS_MODULE,
+ 	.ctr = thin_ctr,
+ 	.dtr = thin_dtr,
+diff --git a/drivers/media/i2c/mt9v032.c b/drivers/media/i2c/mt9v032.c
+index 4b9b98cf6674..5bd3ae82992f 100644
+--- a/drivers/media/i2c/mt9v032.c
++++ b/drivers/media/i2c/mt9v032.c
+@@ -428,10 +428,12 @@ static int mt9v032_enum_mbus_code(struct v4l2_subdev *subdev,
+ 				  struct v4l2_subdev_pad_config *cfg,
+ 				  struct v4l2_subdev_mbus_code_enum *code)
+ {
++	struct mt9v032 *mt9v032 = to_mt9v032(subdev);
++
+ 	if (code->index > 0)
+ 		return -EINVAL;
+ 
+-	code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
++	code->code = mt9v032->format.code;
+ 	return 0;
+ }
+ 
+@@ -439,7 +441,11 @@ static int mt9v032_enum_frame_size(struct v4l2_subdev *subdev,
+ 				   struct v4l2_subdev_pad_config *cfg,
+ 				   struct v4l2_subdev_frame_size_enum *fse)
+ {
+-	if (fse->index >= 3 || fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
++	struct mt9v032 *mt9v032 = to_mt9v032(subdev);
++
++	if (fse->index >= 3)
++		return -EINVAL;
++	if (mt9v032->format.code != fse->code)
+ 		return -EINVAL;
+ 
+ 	fse->min_width = MT9V032_WINDOW_WIDTH_DEF / (1 << fse->index);
+diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
+index 5e495c833d32..bb968e764f31 100644
+--- a/drivers/media/i2c/ov5640.c
++++ b/drivers/media/i2c/ov5640.c
+@@ -874,7 +874,7 @@ static unsigned long ov5640_calc_sys_clk(struct ov5640_dev *sensor,
+ 			 * We have reached the maximum allowed PLL1 output,
+ 			 * increase sysdiv.
+ 			 */
+-			if (!rate)
++			if (!_rate)
+ 				break;
+ 
+ 			/*
+diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
+index 8644205d3cd3..8e5a2c580821 100644
+--- a/drivers/media/pci/cx23885/cx23885-cards.c
++++ b/drivers/media/pci/cx23885/cx23885-cards.c
+@@ -801,6 +801,25 @@ struct cx23885_board cx23885_boards[] = {
+ 		.name		= "Hauppauge WinTV-Starburst2",
+ 		.portb		= CX23885_MPEG_DVB,
+ 	},
++	[CX23885_BOARD_AVERMEDIA_CE310B] = {
++		.name		= "AVerMedia CE310B",
++		.porta		= CX23885_ANALOG_VIDEO,
++		.force_bff	= 1,
++		.input          = {{
++			.type   = CX23885_VMUX_COMPOSITE1,
++			.vmux   = CX25840_VIN1_CH1 |
++				  CX25840_NONE_CH2 |
++				  CX25840_NONE0_CH3,
++			.amux   = CX25840_AUDIO7,
++		}, {
++			.type   = CX23885_VMUX_SVIDEO,
++			.vmux   = CX25840_VIN8_CH1 |
++				  CX25840_NONE_CH2 |
++				  CX25840_VIN7_CH3 |
++				  CX25840_SVIDEO_ON,
++			.amux   = CX25840_AUDIO7,
++		} },
++	},
+ };
+ const unsigned int cx23885_bcount = ARRAY_SIZE(cx23885_boards);
+ 
+@@ -1124,6 +1143,10 @@ struct cx23885_subid cx23885_subids[] = {
+ 		.subvendor = 0x0070,
+ 		.subdevice = 0xf02a,
+ 		.card      = CX23885_BOARD_HAUPPAUGE_STARBURST2,
++	}, {
++		.subvendor = 0x1461,
++		.subdevice = 0x3100,
++		.card      = CX23885_BOARD_AVERMEDIA_CE310B,
+ 	},
+ };
+ const unsigned int cx23885_idcount = ARRAY_SIZE(cx23885_subids);
+@@ -2348,6 +2371,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
+ 	case CX23885_BOARD_DVBSKY_T982:
+ 	case CX23885_BOARD_VIEWCAST_260E:
+ 	case CX23885_BOARD_VIEWCAST_460E:
++	case CX23885_BOARD_AVERMEDIA_CE310B:
+ 		dev->sd_cx25840 = v4l2_i2c_new_subdev(&dev->v4l2_dev,
+ 				&dev->i2c_bus[2].i2c_adap,
+ 				"cx25840", 0x88 >> 1, NULL);
+diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c
+index 8098b15493de..7fc408ee4934 100644
+--- a/drivers/media/pci/cx23885/cx23885-video.c
++++ b/drivers/media/pci/cx23885/cx23885-video.c
+@@ -257,7 +257,8 @@ static int cx23885_video_mux(struct cx23885_dev *dev, unsigned int input)
+ 		(dev->board == CX23885_BOARD_MYGICA_X8507) ||
+ 		(dev->board == CX23885_BOARD_AVERMEDIA_HC81R) ||
+ 		(dev->board == CX23885_BOARD_VIEWCAST_260E) ||
+-		(dev->board == CX23885_BOARD_VIEWCAST_460E)) {
++		(dev->board == CX23885_BOARD_VIEWCAST_460E) ||
++		(dev->board == CX23885_BOARD_AVERMEDIA_CE310B)) {
+ 		/* Configure audio routing */
+ 		v4l2_subdev_call(dev->sd_cx25840, audio, s_routing,
+ 			INPUT(input)->amux, 0, 0);
+diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h
+index a95a2e4c6a0d..c472498e57c4 100644
+--- a/drivers/media/pci/cx23885/cx23885.h
++++ b/drivers/media/pci/cx23885/cx23885.h
+@@ -101,6 +101,7 @@
+ #define CX23885_BOARD_HAUPPAUGE_STARBURST2     59
+ #define CX23885_BOARD_HAUPPAUGE_QUADHD_DVB_885 60
+ #define CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC_885 61
++#define CX23885_BOARD_AVERMEDIA_CE310B         62
+ 
+ #define GPIO_0 0x00000001
+ #define GPIO_1 0x00000002
+diff --git a/drivers/media/platform/sti/bdisp/bdisp-hw.c b/drivers/media/platform/sti/bdisp/bdisp-hw.c
+index 4372abbb5950..a74e9fd65238 100644
+--- a/drivers/media/platform/sti/bdisp/bdisp-hw.c
++++ b/drivers/media/platform/sti/bdisp/bdisp-hw.c
+@@ -14,8 +14,8 @@
+ #define MAX_SRC_WIDTH           2048
+ 
+ /* Reset & boot poll config */
+-#define POLL_RST_MAX            50
+-#define POLL_RST_DELAY_MS       20
++#define POLL_RST_MAX            500
++#define POLL_RST_DELAY_MS       2
+ 
+ enum bdisp_target_plan {
+ 	BDISP_RGB,
+@@ -382,7 +382,7 @@ int bdisp_hw_reset(struct bdisp_dev *bdisp)
+ 	for (i = 0; i < POLL_RST_MAX; i++) {
+ 		if (readl(bdisp->regs + BLT_STA1) & BLT_STA1_IDLE)
+ 			break;
+-		msleep(POLL_RST_DELAY_MS);
++		udelay(POLL_RST_DELAY_MS * 1000);
+ 	}
+ 	if (i == POLL_RST_MAX)
+ 		dev_err(bdisp->dev, "Reset timeout\n");
+diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
+index f36dc6258900..b8b07c1de2a8 100644
+--- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
++++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
+@@ -11,6 +11,7 @@
+ #include <linux/module.h>
+ #include <linux/mutex.h>
+ #include <linux/of.h>
++#include <linux/of_device.h>
+ #include <linux/of_graph.h>
+ #include <linux/platform_device.h>
+ #include <linux/pm_runtime.h>
+@@ -155,6 +156,27 @@ static int sun4i_csi_probe(struct platform_device *pdev)
+ 	subdev = &csi->subdev;
+ 	vdev = &csi->vdev;
+ 
++	/*
++	 * On Allwinner SoCs, some high memory bandwidth devices do DMA
++	 * directly over the memory bus (called MBUS), instead of the
++	 * system bus. The memory bus has a different addressing scheme
++	 * without the DRAM starting offset.
++	 *
++	 * In some cases this can be described by an interconnect in
++	 * the device tree. In other cases where the hardware is not
++	 * fully understood and the interconnect is left out of the
++	 * device tree, fall back to a default offset.
++	 */
++	if (of_find_property(csi->dev->of_node, "interconnects", NULL)) {
++		ret = of_dma_configure(csi->dev, csi->dev->of_node, true);
++		if (ret)
++			return ret;
++	} else {
++#ifdef PHYS_PFN_OFFSET
++		csi->dev->dma_pfn_offset = PHYS_PFN_OFFSET;
++#endif
++	}
++
+ 	csi->mdev.dev = csi->dev;
+ 	strscpy(csi->mdev.model, "Allwinner Video Capture Device",
+ 		sizeof(csi->mdev.model));
+diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h
+index 001c8bde006c..88d39b3554c4 100644
+--- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h
++++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h
+@@ -22,8 +22,8 @@
+ #define CSI_CFG_INPUT_FMT(fmt)			((fmt) << 20)
+ #define CSI_CFG_OUTPUT_FMT(fmt)			((fmt) << 16)
+ #define CSI_CFG_YUV_DATA_SEQ(seq)		((seq) << 8)
+-#define CSI_CFG_VSYNC_POL(pol)			((pol) << 2)
+-#define CSI_CFG_HSYNC_POL(pol)			((pol) << 1)
++#define CSI_CFG_VREF_POL(pol)			((pol) << 2)
++#define CSI_CFG_HREF_POL(pol)			((pol) << 1)
+ #define CSI_CFG_PCLK_POL(pol)			((pol) << 0)
+ 
+ #define CSI_CPT_CTRL_REG		0x08
+diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c b/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
+index d6979e11a67b..78fa1c535ac6 100644
+--- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
++++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
+@@ -228,7 +228,7 @@ static int sun4i_csi_start_streaming(struct vb2_queue *vq, unsigned int count)
+ 	struct sun4i_csi *csi = vb2_get_drv_priv(vq);
+ 	struct v4l2_fwnode_bus_parallel *bus = &csi->bus;
+ 	const struct sun4i_csi_format *csi_fmt;
+-	unsigned long hsync_pol, pclk_pol, vsync_pol;
++	unsigned long href_pol, pclk_pol, vref_pol;
+ 	unsigned long flags;
+ 	unsigned int i;
+ 	int ret;
+@@ -278,13 +278,21 @@ static int sun4i_csi_start_streaming(struct vb2_queue *vq, unsigned int count)
+ 	writel(CSI_WIN_CTRL_H_ACTIVE(csi->fmt.height),
+ 	       csi->regs + CSI_WIN_CTRL_H_REG);
+ 
+-	hsync_pol = !!(bus->flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH);
+-	pclk_pol = !!(bus->flags & V4L2_MBUS_DATA_ACTIVE_HIGH);
+-	vsync_pol = !!(bus->flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH);
++	/*
++	 * This hardware uses [HV]REF instead of [HV]SYNC. Based on the
++	 * provided timing diagrams in the manual, positive polarity
++	 * equals active high [HV]REF.
++	 *
++	 * When the back porch is 0, [HV]REF is more or less equivalent
++	 * to [HV]SYNC inverted.
++	 */
++	href_pol = !!(bus->flags & V4L2_MBUS_HSYNC_ACTIVE_LOW);
++	vref_pol = !!(bus->flags & V4L2_MBUS_VSYNC_ACTIVE_LOW);
++	pclk_pol = !!(bus->flags & V4L2_MBUS_PCLK_SAMPLE_RISING);
+ 	writel(CSI_CFG_INPUT_FMT(csi_fmt->input) |
+ 	       CSI_CFG_OUTPUT_FMT(csi_fmt->output) |
+-	       CSI_CFG_VSYNC_POL(vsync_pol) |
+-	       CSI_CFG_HSYNC_POL(hsync_pol) |
++	       CSI_CFG_VREF_POL(vref_pol) |
++	       CSI_CFG_HREF_POL(href_pol) |
+ 	       CSI_CFG_PCLK_POL(pclk_pol),
+ 	       csi->regs + CSI_CFG_REG);
+ 
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
+index 2b688cc39bb8..99883550375e 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -497,6 +497,22 @@ static int uvc_parse_format(struct uvc_device *dev,
+ 			}
+ 		}
+ 
++		/* Some devices report bpp that doesn't match the format. */
++		if (dev->quirks & UVC_QUIRK_FORCE_BPP) {
++			const struct v4l2_format_info *info =
++				v4l2_format_info(format->fcc);
++
++			if (info) {
++				unsigned int div = info->hdiv * info->vdiv;
++
++				n = info->bpp[0] * div;
++				for (i = 1; i < info->comp_planes; i++)
++					n += info->bpp[i];
++
++				format->bpp = DIV_ROUND_UP(8 * n, div);
++			}
++		}
++
+ 		if (buffer[2] == UVC_VS_FORMAT_UNCOMPRESSED) {
+ 			ftype = UVC_VS_FRAME_UNCOMPRESSED;
+ 		} else {
+@@ -2874,6 +2890,15 @@ static const struct usb_device_id uvc_ids[] = {
+ 	  .bInterfaceSubClass	= 1,
+ 	  .bInterfaceProtocol	= 0,
+ 	  .driver_info		= (kernel_ulong_t)&uvc_quirk_force_y8 },
++	/* GEO Semiconductor GC6500 */
++	{ .match_flags		= USB_DEVICE_ID_MATCH_DEVICE
++				| USB_DEVICE_ID_MATCH_INT_INFO,
++	  .idVendor		= 0x29fe,
++	  .idProduct		= 0x4d53,
++	  .bInterfaceClass	= USB_CLASS_VIDEO,
++	  .bInterfaceSubClass	= 1,
++	  .bInterfaceProtocol	= 0,
++	  .driver_info		= UVC_INFO_QUIRK(UVC_QUIRK_FORCE_BPP) },
+ 	/* Intel RealSense D4M */
+ 	{ .match_flags		= USB_DEVICE_ID_MATCH_DEVICE
+ 				| USB_DEVICE_ID_MATCH_INT_INFO,
+diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
+index f773dc5d802c..6ab972c643e3 100644
+--- a/drivers/media/usb/uvc/uvcvideo.h
++++ b/drivers/media/usb/uvc/uvcvideo.h
+@@ -198,6 +198,7 @@
+ #define UVC_QUIRK_RESTRICT_FRAME_RATE	0x00000200
+ #define UVC_QUIRK_RESTORE_CTRLS_ON_INIT	0x00000400
+ #define UVC_QUIRK_FORCE_Y8		0x00000800
++#define UVC_QUIRK_FORCE_BPP		0x00001000
+ 
+ /* Format flags */
+ #define UVC_FMT_FLAG_COMPRESSED		0x00000001
+diff --git a/drivers/misc/xilinx_sdfec.c b/drivers/misc/xilinx_sdfec.c
+index 11835969e982..48ba7e02bed7 100644
+--- a/drivers/misc/xilinx_sdfec.c
++++ b/drivers/misc/xilinx_sdfec.c
+@@ -1025,25 +1025,25 @@ static long xsdfec_dev_compat_ioctl(struct file *file, unsigned int cmd,
+ }
+ #endif
+ 
+-static unsigned int xsdfec_poll(struct file *file, poll_table *wait)
++static __poll_t xsdfec_poll(struct file *file, poll_table *wait)
+ {
+-	unsigned int mask = 0;
++	__poll_t mask = 0;
+ 	struct xsdfec_dev *xsdfec;
+ 
+ 	xsdfec = container_of(file->private_data, struct xsdfec_dev, miscdev);
+ 
+ 	if (!xsdfec)
+-		return POLLNVAL | POLLHUP;
++		return EPOLLNVAL | EPOLLHUP;
+ 
+ 	poll_wait(file, &xsdfec->waitq, wait);
+ 
+ 	/* XSDFEC ISR detected an error */
+ 	spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
+ 	if (xsdfec->state_updated)
+-		mask |= POLLIN | POLLPRI;
++		mask |= EPOLLIN | EPOLLPRI;
+ 
+ 	if (xsdfec->stats_updated)
+-		mask |= POLLIN | POLLRDNORM;
++		mask |= EPOLLIN | EPOLLRDNORM;
+ 	spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
+ 
+ 	return mask;
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 01b603c5e76a..9d62200b6c33 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -944,6 +944,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
+ 	dma_addr -= bp->rx_dma_offset;
+ 	dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
+ 			     DMA_ATTR_WEAK_ORDERING);
++	page_pool_release_page(rxr->page_pool, page);
+ 
+ 	if (unlikely(!payload))
+ 		payload = eth_get_headlen(bp->dev, data_ptr, len);
+diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
+index acb2856936d2..6e2ab10ad2e6 100644
+--- a/drivers/net/ethernet/cisco/enic/enic_main.c
++++ b/drivers/net/ethernet/cisco/enic/enic_main.c
+@@ -2013,10 +2013,10 @@ static int enic_stop(struct net_device *netdev)
+ 		napi_disable(&enic->napi[i]);
+ 
+ 	netif_carrier_off(netdev);
+-	netif_tx_disable(netdev);
+ 	if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
+ 		for (i = 0; i < enic->wq_count; i++)
+ 			napi_disable(&enic->napi[enic_cq_wq(enic, i)]);
++	netif_tx_disable(netdev);
+ 
+ 	if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
+ 		enic_dev_del_station_addr(enic);
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+index 2e99438cb1bf..de52686b1d46 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+@@ -36,7 +36,6 @@ void enetc_sched_speed_set(struct net_device *ndev)
+ 	case SPEED_10:
+ 	default:
+ 		pspeed = ENETC_PMR_PSPEED_10M;
+-		netdev_err(ndev, "Qbv PSPEED set speed link down.\n");
+ 	}
+ 
+ 	priv->speed = speed;
+@@ -192,7 +191,6 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
+ 	u32 hi_credit_bit, hi_credit_reg;
+ 	u32 max_interference_size;
+ 	u32 port_frame_max_size;
+-	u32 tc_max_sized_frame;
+ 	u8 tc = cbs->queue;
+ 	u8 prio_top, prio_next;
+ 	int bw_sum = 0;
+@@ -250,7 +248,7 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
+ 		return -EINVAL;
+ 	}
+ 
+-	tc_max_sized_frame = enetc_port_rd(&si->hw, ENETC_PTCMSDUR(tc));
++	enetc_port_rd(&si->hw, ENETC_PTCMSDUR(tc));
+ 
+ 	/* For top prio TC, the max_interfrence_size is maxSizedFrame.
+ 	 *
+diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
+index 72868a28b621..7d08bf6370ae 100644
+--- a/drivers/net/ethernet/freescale/gianfar.c
++++ b/drivers/net/ethernet/freescale/gianfar.c
+@@ -2205,13 +2205,17 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
+ 	skb_dirtytx = tx_queue->skb_dirtytx;
+ 
+ 	while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
++		bool do_tstamp;
++
++		do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
++			    priv->hwts_tx_en;
+ 
+ 		frags = skb_shinfo(skb)->nr_frags;
+ 
+ 		/* When time stamping, one additional TxBD must be freed.
+ 		 * Also, we need to dma_unmap_single() the TxPAL.
+ 		 */
+-		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
++		if (unlikely(do_tstamp))
+ 			nr_txbds = frags + 2;
+ 		else
+ 			nr_txbds = frags + 1;
+@@ -2225,7 +2229,7 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
+ 		    (lstatus & BD_LENGTH_MASK))
+ 			break;
+ 
+-		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
++		if (unlikely(do_tstamp)) {
+ 			next = next_txbd(bdp, base, tx_ring_size);
+ 			buflen = be16_to_cpu(next->length) +
+ 				 GMAC_FCB_LEN + GMAC_TXPAL_LEN;
+@@ -2235,7 +2239,7 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
+ 		dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
+ 				 buflen, DMA_TO_DEVICE);
+ 
+-		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
++		if (unlikely(do_tstamp)) {
+ 			struct skb_shared_hwtstamps shhwtstamps;
+ 			u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
+ 					  ~0x7UL);
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+index f73cd917c44f..3156de786d95 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+@@ -791,7 +791,7 @@ int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
+ 	struct i40e_ring *ring;
+ 
+ 	if (test_bit(__I40E_CONFIG_BUSY, pf->state))
+-		return -ENETDOWN;
++		return -EAGAIN;
+ 
+ 	if (test_bit(__I40E_VSI_DOWN, vsi->state))
+ 		return -ENETDOWN;
+diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
+index 2c212f64d99f..8b2b9e254d28 100644
+--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
++++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
+@@ -1071,13 +1071,16 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
+ 		ice_put_rx_buf(rx_ring, rx_buf);
+ 		continue;
+ construct_skb:
+-		if (skb)
++		if (skb) {
+ 			ice_add_rx_frag(rx_ring, rx_buf, skb, size);
+-		else if (ice_ring_uses_build_skb(rx_ring))
+-			skb = ice_build_skb(rx_ring, rx_buf, &xdp);
+-		else
++		} else if (likely(xdp.data)) {
++			if (ice_ring_uses_build_skb(rx_ring))
++				skb = ice_build_skb(rx_ring, rx_buf, &xdp);
++			else
++				skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
++		} else {
+ 			skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
+-
++		}
+ 		/* exit if we failed to retrieve a buffer */
+ 		if (!skb) {
+ 			rx_ring->rx_stats.alloc_buf_failed++;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+index 9f09253f9f46..a05158472ed1 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+@@ -297,6 +297,9 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
+ 			s->tx_tls_drop_bypass_req   += sq_stats->tls_drop_bypass_req;
+ #endif
+ 			s->tx_cqes		+= sq_stats->cqes;
++
++			/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
++			barrier();
+ 		}
+ 	}
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+index f2a0e72285ba..02f7e4a39578 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+@@ -89,7 +89,7 @@ void mlx5_wq_cyc_wqe_dump(struct mlx5_wq_cyc *wq, u16 ix, u8 nstrides)
+ 	len = nstrides << wq->fbc.log_stride;
+ 	wqe = mlx5_wq_cyc_get_wqe(wq, ix);
+ 
+-	pr_info("WQE DUMP: WQ size %d WQ cur size %d, WQE index 0x%x, len: %ld\n",
++	pr_info("WQE DUMP: WQ size %d WQ cur size %d, WQE index 0x%x, len: %zu\n",
+ 		mlx5_wq_cyc_get_size(wq), wq->cur_sz, ix, len);
+ 	print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, wqe, len, false);
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
+index 9bf8da5f6daf..3fe878d7c94c 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
+@@ -573,6 +573,7 @@ static int mlxsw_hwmon_module_init(struct mlxsw_hwmon *mlxsw_hwmon)
+ 
+ static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon)
+ {
++	enum mlxsw_reg_mgpir_device_type device_type;
+ 	int index, max_index, sensor_index;
+ 	char mgpir_pl[MLXSW_REG_MGPIR_LEN];
+ 	char mtmp_pl[MLXSW_REG_MTMP_LEN];
+@@ -584,8 +585,9 @@ static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon)
+ 	if (err)
+ 		return err;
+ 
+-	mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, NULL, NULL, NULL);
+-	if (!gbox_num)
++	mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, &device_type, NULL, NULL);
++	if (device_type != MLXSW_REG_MGPIR_DEVICE_TYPE_GEARBOX_DIE ||
++	    !gbox_num)
+ 		return 0;
+ 
+ 	index = mlxsw_hwmon->module_sensor_max;
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+index c721b171bd8d..ce0a6837daa3 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+@@ -895,8 +895,10 @@ static int
+ mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core,
+ 			     struct mlxsw_thermal *thermal)
+ {
++	enum mlxsw_reg_mgpir_device_type device_type;
+ 	struct mlxsw_thermal_module *gearbox_tz;
+ 	char mgpir_pl[MLXSW_REG_MGPIR_LEN];
++	u8 gbox_num;
+ 	int i;
+ 	int err;
+ 
+@@ -908,11 +910,13 @@ mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core,
+ 	if (err)
+ 		return err;
+ 
+-	mlxsw_reg_mgpir_unpack(mgpir_pl, &thermal->tz_gearbox_num, NULL, NULL,
++	mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, &device_type, NULL,
+ 			       NULL);
+-	if (!thermal->tz_gearbox_num)
++	if (device_type != MLXSW_REG_MGPIR_DEVICE_TYPE_GEARBOX_DIE ||
++	    !gbox_num)
+ 		return 0;
+ 
++	thermal->tz_gearbox_num = gbox_num;
+ 	thermal->tz_gearbox_arr = kcalloc(thermal->tz_gearbox_num,
+ 					  sizeof(*thermal->tz_gearbox_arr),
+ 					  GFP_KERNEL);
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+index 49933818c6f5..2dc0978428e6 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+@@ -215,7 +215,7 @@ mlxsw_sp_dpipe_table_erif_entries_dump(void *priv, bool counters_enabled,
+ start_again:
+ 	err = devlink_dpipe_entry_ctx_prepare(dump_ctx);
+ 	if (err)
+-		return err;
++		goto err_ctx_prepare;
+ 	j = 0;
+ 	for (; i < rif_count; i++) {
+ 		struct mlxsw_sp_rif *rif = mlxsw_sp_rif_by_index(mlxsw_sp, i);
+@@ -247,6 +247,7 @@ start_again:
+ 	return 0;
+ err_entry_append:
+ err_entry_get:
++err_ctx_prepare:
+ 	rtnl_unlock();
+ 	devlink_dpipe_entry_clear(&entry);
+ 	return err;
+diff --git a/drivers/net/ethernet/netronome/nfp/abm/cls.c b/drivers/net/ethernet/netronome/nfp/abm/cls.c
+index 9f8a1f69c0c4..23ebddfb9532 100644
+--- a/drivers/net/ethernet/netronome/nfp/abm/cls.c
++++ b/drivers/net/ethernet/netronome/nfp/abm/cls.c
+@@ -176,10 +176,8 @@ nfp_abm_u32_knode_replace(struct nfp_abm_link *alink,
+ 	u8 mask, val;
+ 	int err;
+ 
+-	if (!nfp_abm_u32_check_knode(alink->abm, knode, proto, extack)) {
+-		err = -EOPNOTSUPP;
++	if (!nfp_abm_u32_check_knode(alink->abm, knode, proto, extack))
+ 		goto err_delete;
+-	}
+ 
+ 	tos_off = proto == htons(ETH_P_IP) ? 16 : 20;
+ 
+@@ -200,18 +198,14 @@ nfp_abm_u32_knode_replace(struct nfp_abm_link *alink,
+ 		if ((iter->val & cmask) == (val & cmask) &&
+ 		    iter->band != knode->res->classid) {
+ 			NL_SET_ERR_MSG_MOD(extack, "conflict with already offloaded filter");
+-			err = -EOPNOTSUPP;
+ 			goto err_delete;
+ 		}
+ 	}
+ 
+ 	if (!match) {
+ 		match = kzalloc(sizeof(*match), GFP_KERNEL);
+-		if (!match) {
+-			err = -ENOMEM;
+-			goto err_delete;
+-		}
+-
++		if (!match)
++			return -ENOMEM;
+ 		list_add(&match->list, &alink->dscp_map);
+ 	}
+ 	match->handle = knode->handle;
+@@ -227,7 +221,7 @@ nfp_abm_u32_knode_replace(struct nfp_abm_link *alink,
+ 
+ err_delete:
+ 	nfp_abm_u32_knode_delete(alink, knode);
+-	return err;
++	return -EOPNOTSUPP;
+ }
+ 
+ static int nfp_abm_setup_tc_block_cb(enum tc_setup_type type,
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index 92a590154bb9..2d2d22f86dc6 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -6831,6 +6831,15 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	int chipset, region;
+ 	int jumbo_max, rc;
+ 
++	/* Some tools for creating an initramfs don't consider softdeps, then
++	 * r8169.ko may be in initramfs, but realtek.ko not. Then the generic
++	 * PHY driver is used that doesn't work with most chip versions.
++	 */
++	if (!driver_find("RTL8201CP Ethernet", &mdio_bus_type)) {
++		dev_err(&pdev->dev, "realtek.ko not loaded, maybe it needs to be added to initramfs?\n");
++		return -ENOENT;
++	}
++
+ 	dev = devm_alloc_etherdev(&pdev->dev, sizeof (*tp));
+ 	if (!dev)
+ 		return -ENOMEM;
+diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
+index 6fc04ffb22c2..d4e095d0e8f1 100644
+--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
++++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
+@@ -517,25 +517,14 @@ static int ixp4xx_mdio_write(struct mii_bus *bus, int phy_id, int location,
+ 	return ret;
+ }
+ 
+-static int ixp4xx_mdio_register(void)
++static int ixp4xx_mdio_register(struct eth_regs __iomem *regs)
+ {
+ 	int err;
+ 
+ 	if (!(mdio_bus = mdiobus_alloc()))
+ 		return -ENOMEM;
+ 
+-	if (cpu_is_ixp43x()) {
+-		/* IXP43x lacks NPE-B and uses NPE-C for MII PHY access */
+-		if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEC_ETH))
+-			return -ENODEV;
+-		mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthC_BASE_VIRT;
+-	} else {
+-		/* All MII PHY accesses use NPE-B Ethernet registers */
+-		if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEB_ETH0))
+-			return -ENODEV;
+-		mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
+-	}
+-
++	mdio_regs = regs;
+ 	__raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control);
+ 	spin_lock_init(&mdio_lock);
+ 	mdio_bus->name = "IXP4xx MII Bus";
+@@ -1374,7 +1363,7 @@ static const struct net_device_ops ixp4xx_netdev_ops = {
+ 	.ndo_validate_addr = eth_validate_addr,
+ };
+ 
+-static int eth_init_one(struct platform_device *pdev)
++static int ixp4xx_eth_probe(struct platform_device *pdev)
+ {
+ 	struct port *port;
+ 	struct net_device *dev;
+@@ -1384,7 +1373,7 @@ static int eth_init_one(struct platform_device *pdev)
+ 	char phy_id[MII_BUS_ID_SIZE + 3];
+ 	int err;
+ 
+-	if (!(dev = alloc_etherdev(sizeof(struct port))))
++	if (!(dev = devm_alloc_etherdev(&pdev->dev, sizeof(struct port))))
+ 		return -ENOMEM;
+ 
+ 	SET_NETDEV_DEV(dev, &pdev->dev);
+@@ -1394,20 +1383,51 @@ static int eth_init_one(struct platform_device *pdev)
+ 
+ 	switch (port->id) {
+ 	case IXP4XX_ETH_NPEA:
++		/* If the MDIO bus is not up yet, defer probe */
++		if (!mdio_bus)
++			return -EPROBE_DEFER;
+ 		port->regs = (struct eth_regs __iomem *)IXP4XX_EthA_BASE_VIRT;
+ 		regs_phys  = IXP4XX_EthA_BASE_PHYS;
+ 		break;
+ 	case IXP4XX_ETH_NPEB:
++		/*
++		 * On all except IXP43x, NPE-B is used for the MDIO bus.
++		 * If there is no NPE-B in the feature set, bail out, else
++		 * register the MDIO bus.
++		 */
++		if (!cpu_is_ixp43x()) {
++			if (!(ixp4xx_read_feature_bits() &
++			      IXP4XX_FEATURE_NPEB_ETH0))
++				return -ENODEV;
++			/* Else register the MDIO bus on NPE-B */
++			if ((err = ixp4xx_mdio_register(IXP4XX_EthC_BASE_VIRT)))
++				return err;
++		}
++		if (!mdio_bus)
++			return -EPROBE_DEFER;
+ 		port->regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
+ 		regs_phys  = IXP4XX_EthB_BASE_PHYS;
+ 		break;
+ 	case IXP4XX_ETH_NPEC:
++		/*
++		 * IXP43x lacks NPE-B and uses NPE-C for the MDIO bus access,
++		 * of there is no NPE-C, no bus, nothing works, so bail out.
++		 */
++		if (cpu_is_ixp43x()) {
++			if (!(ixp4xx_read_feature_bits() &
++			      IXP4XX_FEATURE_NPEC_ETH))
++				return -ENODEV;
++			/* Else register the MDIO bus on NPE-C */
++			if ((err = ixp4xx_mdio_register(IXP4XX_EthC_BASE_VIRT)))
++				return err;
++		}
++		if (!mdio_bus)
++			return -EPROBE_DEFER;
+ 		port->regs = (struct eth_regs __iomem *)IXP4XX_EthC_BASE_VIRT;
+ 		regs_phys  = IXP4XX_EthC_BASE_PHYS;
+ 		break;
+ 	default:
+-		err = -ENODEV;
+-		goto err_free;
++		return -ENODEV;
+ 	}
+ 
+ 	dev->netdev_ops = &ixp4xx_netdev_ops;
+@@ -1416,10 +1436,8 @@ static int eth_init_one(struct platform_device *pdev)
+ 
+ 	netif_napi_add(dev, &port->napi, eth_poll, NAPI_WEIGHT);
+ 
+-	if (!(port->npe = npe_request(NPE_ID(port->id)))) {
+-		err = -EIO;
+-		goto err_free;
+-	}
++	if (!(port->npe = npe_request(NPE_ID(port->id))))
++		return -EIO;
+ 
+ 	port->mem_res = request_mem_region(regs_phys, REGS_SIZE, dev->name);
+ 	if (!port->mem_res) {
+@@ -1465,12 +1483,10 @@ err_free_mem:
+ 	release_resource(port->mem_res);
+ err_npe_rel:
+ 	npe_release(port->npe);
+-err_free:
+-	free_netdev(dev);
+ 	return err;
+ }
+ 
+-static int eth_remove_one(struct platform_device *pdev)
++static int ixp4xx_eth_remove(struct platform_device *pdev)
+ {
+ 	struct net_device *dev = platform_get_drvdata(pdev);
+ 	struct phy_device *phydev = dev->phydev;
+@@ -1478,45 +1494,21 @@ static int eth_remove_one(struct platform_device *pdev)
+ 
+ 	unregister_netdev(dev);
+ 	phy_disconnect(phydev);
++	ixp4xx_mdio_remove();
+ 	npe_port_tab[NPE_ID(port->id)] = NULL;
+ 	npe_release(port->npe);
+ 	release_resource(port->mem_res);
+-	free_netdev(dev);
+ 	return 0;
+ }
+ 
+ static struct platform_driver ixp4xx_eth_driver = {
+ 	.driver.name	= DRV_NAME,
+-	.probe		= eth_init_one,
+-	.remove		= eth_remove_one,
++	.probe		= ixp4xx_eth_probe,
++	.remove		= ixp4xx_eth_remove,
+ };
+-
+-static int __init eth_init_module(void)
+-{
+-	int err;
+-
+-	/*
+-	 * FIXME: we bail out on device tree boot but this really needs
+-	 * to be fixed in a nicer way: this registers the MDIO bus before
+-	 * even matching the driver infrastructure, we should only probe
+-	 * detected hardware.
+-	 */
+-	if (of_have_populated_dt())
+-		return -ENODEV;
+-	if ((err = ixp4xx_mdio_register()))
+-		return err;
+-	return platform_driver_register(&ixp4xx_eth_driver);
+-}
+-
+-static void __exit eth_cleanup_module(void)
+-{
+-	platform_driver_unregister(&ixp4xx_eth_driver);
+-	ixp4xx_mdio_remove();
+-}
++module_platform_driver(ixp4xx_eth_driver);
+ 
+ MODULE_AUTHOR("Krzysztof Halasa");
+ MODULE_DESCRIPTION("Intel IXP4xx Ethernet driver");
+ MODULE_LICENSE("GPL v2");
+ MODULE_ALIAS("platform:ixp4xx_eth");
+-module_init(eth_init_module);
+-module_exit(eth_cleanup_module);
+diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
+index 7c5265fd2b94..4190f9ed5313 100644
+--- a/drivers/net/phy/fixed_phy.c
++++ b/drivers/net/phy/fixed_phy.c
+@@ -212,16 +212,13 @@ static struct gpio_desc *fixed_phy_get_gpiod(struct device_node *np)
+ 	 */
+ 	gpiod = gpiod_get_from_of_node(fixed_link_node, "link-gpios", 0,
+ 				       GPIOD_IN, "mdio");
+-	of_node_put(fixed_link_node);
+-	if (IS_ERR(gpiod)) {
+-		if (PTR_ERR(gpiod) == -EPROBE_DEFER)
+-			return gpiod;
+-
++	if (IS_ERR(gpiod) && PTR_ERR(gpiod) != -EPROBE_DEFER) {
+ 		if (PTR_ERR(gpiod) != -ENOENT)
+ 			pr_err("error getting GPIO for fixed link %pOF, proceed without\n",
+ 			       fixed_link_node);
+ 		gpiod = NULL;
+ 	}
++	of_node_put(fixed_link_node);
+ 
+ 	return gpiod;
+ }
+diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
+index 476db5345e1a..879ca37c8508 100644
+--- a/drivers/net/phy/realtek.c
++++ b/drivers/net/phy/realtek.c
+@@ -171,7 +171,9 @@ static int rtl8211c_config_init(struct phy_device *phydev)
+ 
+ static int rtl8211f_config_init(struct phy_device *phydev)
+ {
++	struct device *dev = &phydev->mdio.dev;
+ 	u16 val;
++	int ret;
+ 
+ 	/* enable TX-delay for rgmii-{id,txid}, and disable it for rgmii and
+ 	 * rgmii-rxid. The RX-delay can be enabled by the external RXDLY pin.
+@@ -189,7 +191,22 @@ static int rtl8211f_config_init(struct phy_device *phydev)
+ 		return 0;
+ 	}
+ 
+-	return phy_modify_paged(phydev, 0xd08, 0x11, RTL8211F_TX_DELAY, val);
++	ret = phy_modify_paged_changed(phydev, 0xd08, 0x11, RTL8211F_TX_DELAY,
++				       val);
++	if (ret < 0) {
++		dev_err(dev, "Failed to update the TX delay register\n");
++		return ret;
++	} else if (ret) {
++		dev_dbg(dev,
++			"%s 2ns TX delay (and changing the value from pin-strapping RXD1 or the bootloader)\n",
++			val ? "Enabling" : "Disabling");
++	} else {
++		dev_dbg(dev,
++			"2ns TX delay was already %s (by pin-strapping RXD1 or bootloader configuration)\n",
++			val ? "enabled" : "disabled");
++	}
++
++	return 0;
+ }
+ 
+ static int rtl8211e_config_init(struct phy_device *phydev)
+diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
+index aef7de225783..4ad0a0c33d85 100644
+--- a/drivers/net/wan/fsl_ucc_hdlc.c
++++ b/drivers/net/wan/fsl_ucc_hdlc.c
+@@ -245,6 +245,11 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
+ 		ret = -ENOMEM;
+ 		goto free_riptr;
+ 	}
++	if (riptr != (u16)riptr || tiptr != (u16)tiptr) {
++		dev_err(priv->dev, "MURAM allocation out of addressable range\n");
++		ret = -ENOMEM;
++		goto free_tiptr;
++	}
+ 
+ 	/* Set RIPTR, TIPTR */
+ 	iowrite16be(riptr, &priv->ucc_pram->riptr);
+diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
+index 5643675ff724..bf78073ee7fd 100644
+--- a/drivers/net/wan/hdlc_x25.c
++++ b/drivers/net/wan/hdlc_x25.c
+@@ -62,11 +62,12 @@ static int x25_data_indication(struct net_device *dev, struct sk_buff *skb)
+ {
+ 	unsigned char *ptr;
+ 
+-	skb_push(skb, 1);
+-
+ 	if (skb_cow(skb, 1))
+ 		return NET_RX_DROP;
+ 
++	skb_push(skb, 1);
++	skb_reset_network_header(skb);
++
+ 	ptr  = skb->data;
+ 	*ptr = X25_IFACE_DATA;
+ 
+@@ -79,6 +80,13 @@ static int x25_data_indication(struct net_device *dev, struct sk_buff *skb)
+ static void x25_data_transmit(struct net_device *dev, struct sk_buff *skb)
+ {
+ 	hdlc_device *hdlc = dev_to_hdlc(dev);
++
++	skb_reset_network_header(skb);
++	skb->protocol = hdlc_type_trans(skb, dev);
++
++	if (dev_nit_active(dev))
++		dev_queue_xmit_nit(skb, dev);
++
+ 	hdlc->xmit(skb, dev); /* Ignore return value :-( */
+ }
+ 
+@@ -93,6 +101,7 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	switch (skb->data[0]) {
+ 	case X25_IFACE_DATA:	/* Data to be transmitted */
+ 		skb_pull(skb, 1);
++		skb_reset_network_header(skb);
+ 		if ((result = lapb_data_request(dev, skb)) != LAPB_OK)
+ 			dev_kfree_skb(skb);
+ 		return NETDEV_TX_OK;
+diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
+index ea6ee6a608ce..e7619cec978a 100644
+--- a/drivers/net/wan/ixp4xx_hss.c
++++ b/drivers/net/wan/ixp4xx_hss.c
+@@ -258,7 +258,7 @@ struct port {
+ 	struct hss_plat_info *plat;
+ 	buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS];
+ 	struct desc *desc_tab;	/* coherent */
+-	u32 desc_tab_phys;
++	dma_addr_t desc_tab_phys;
+ 	unsigned int id;
+ 	unsigned int clock_type, clock_rate, loopback;
+ 	unsigned int initialized, carrier;
+@@ -858,7 +858,7 @@ static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		dev->stats.tx_dropped++;
+ 		return NETDEV_TX_OK;
+ 	}
+-	memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4);
++	memcpy_swab32(mem, (u32 *)((uintptr_t)skb->data & ~3), bytes / 4);
+ 	dev_kfree_skb(skb);
+ #endif
+ 
+diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
+index 16177497bba7..7e85c4916e7f 100644
+--- a/drivers/net/wireless/ath/ath10k/snoc.c
++++ b/drivers/net/wireless/ath/ath10k/snoc.c
+@@ -1563,13 +1563,16 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
+ 	ret = ath10k_qmi_init(ar, msa_size);
+ 	if (ret) {
+ 		ath10k_warn(ar, "failed to register wlfw qmi client: %d\n", ret);
+-		goto err_core_destroy;
++		goto err_power_off;
+ 	}
+ 
+ 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n");
+ 
+ 	return 0;
+ 
++err_power_off:
++	ath10k_hw_power_off(ar);
++
+ err_free_irq:
+ 	ath10k_snoc_free_irq(ar);
+ 
+diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+index 69a1ec53df29..7b358484940e 100644
+--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
++++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+@@ -3707,6 +3707,7 @@ ath10k_wmi_tlv_op_gen_config_pno_start(struct ath10k *ar,
+ 	struct wmi_tlv *tlv;
+ 	struct sk_buff *skb;
+ 	__le32 *channel_list;
++	u16 tlv_len;
+ 	size_t len;
+ 	void *ptr;
+ 	u32 i;
+@@ -3764,10 +3765,12 @@ ath10k_wmi_tlv_op_gen_config_pno_start(struct ath10k *ar,
+ 	/* nlo_configured_parameters(nlo_list) */
+ 	cmd->no_of_ssids = __cpu_to_le32(min_t(u8, pno->uc_networks_count,
+ 					       WMI_NLO_MAX_SSIDS));
++	tlv_len = __le32_to_cpu(cmd->no_of_ssids) *
++		sizeof(struct nlo_configured_parameters);
+ 
+ 	tlv = ptr;
+ 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+-	tlv->len = __cpu_to_le16(len);
++	tlv->len = __cpu_to_le16(tlv_len);
+ 
+ 	ptr += sizeof(*tlv);
+ 	nlo_list = ptr;
+diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
+index 9f564e2b7a14..214d65108b29 100644
+--- a/drivers/net/wireless/ath/ath10k/wmi.c
++++ b/drivers/net/wireless/ath/ath10k/wmi.c
+@@ -9476,7 +9476,7 @@ static int ath10k_wmi_mgmt_tx_clean_up_pending(int msdu_id, void *ptr,
+ 
+ 	msdu = pkt_addr->vaddr;
+ 	dma_unmap_single(ar->dev, pkt_addr->paddr,
+-			 msdu->len, DMA_FROM_DEVICE);
++			 msdu->len, DMA_TO_DEVICE);
+ 	ieee80211_free_txskb(ar->hw, msdu);
+ 
+ 	return 0;
+diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c
+index 778b63be6a9a..02548d40253c 100644
+--- a/drivers/net/wireless/ath/wil6210/txrx_edma.c
++++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c
+@@ -869,6 +869,7 @@ static struct sk_buff *wil_sring_reap_rx_edma(struct wil6210_priv *wil,
+ 	u8 data_offset;
+ 	struct wil_rx_status_extended *s;
+ 	u16 sring_idx = sring - wil->srings;
++	int invalid_buff_id_retry;
+ 
+ 	BUILD_BUG_ON(sizeof(struct wil_rx_status_extended) > sizeof(skb->cb));
+ 
+@@ -882,9 +883,9 @@ again:
+ 	/* Extract the buffer ID from the status message */
+ 	buff_id = le16_to_cpu(wil_rx_status_get_buff_id(msg));
+ 
++	invalid_buff_id_retry = 0;
+ 	while (!buff_id) {
+ 		struct wil_rx_status_extended *s;
+-		int invalid_buff_id_retry = 0;
+ 
+ 		wil_dbg_txrx(wil,
+ 			     "buff_id is not updated yet by HW, (swhead 0x%x)\n",
+diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c
+index 4325e91736eb..8b6b657c4b85 100644
+--- a/drivers/net/wireless/broadcom/b43legacy/main.c
++++ b/drivers/net/wireless/broadcom/b43legacy/main.c
+@@ -1275,8 +1275,9 @@ static void handle_irq_ucode_debug(struct b43legacy_wldev *dev)
+ }
+ 
+ /* Interrupt handler bottom-half */
+-static void b43legacy_interrupt_tasklet(struct b43legacy_wldev *dev)
++static void b43legacy_interrupt_tasklet(unsigned long data)
+ {
++	struct b43legacy_wldev *dev = (struct b43legacy_wldev *)data;
+ 	u32 reason;
+ 	u32 dma_reason[ARRAY_SIZE(dev->dma_reason)];
+ 	u32 merged_dma_reason = 0;
+@@ -3741,7 +3742,7 @@ static int b43legacy_one_core_attach(struct ssb_device *dev,
+ 	b43legacy_set_status(wldev, B43legacy_STAT_UNINIT);
+ 	wldev->bad_frames_preempt = modparam_bad_frames_preempt;
+ 	tasklet_init(&wldev->isr_tasklet,
+-		     (void (*)(unsigned long))b43legacy_interrupt_tasklet,
++		     b43legacy_interrupt_tasklet,
+ 		     (unsigned long)wldev);
+ 	if (modparam_pio)
+ 		wldev->__using_pio = true;
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+index 7ba9f6a68645..1f5deea5a288 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+@@ -2092,7 +2092,8 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p,
+ 	/* firmware requires unique mac address for p2pdev interface */
+ 	if (addr && ether_addr_equal(addr, pri_ifp->mac_addr)) {
+ 		bphy_err(drvr, "discovery vif must be different from primary interface\n");
+-		return ERR_PTR(-EINVAL);
++		err = -EINVAL;
++		goto fail;
+ 	}
+ 
+ 	brcmf_p2p_generate_bss_mac(p2p, addr);
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+index 264ad63232f8..a935993a3c51 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+@@ -1935,6 +1935,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
+ 					       BRCMF_SDIO_FT_NORMAL)) {
+ 				rd->len = 0;
+ 				brcmu_pkt_buf_free_skb(pkt);
++				continue;
+ 			}
+ 			bus->sdcnt.rx_readahead_cnt++;
+ 			if (rd->len != roundup(rd_new.len, 16)) {
+@@ -4225,6 +4226,12 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
+ 	}
+ 
+ 	if (err == 0) {
++		/* Assign bus interface call back */
++		sdiod->bus_if->dev = sdiod->dev;
++		sdiod->bus_if->ops = &brcmf_sdio_bus_ops;
++		sdiod->bus_if->chip = bus->ci->chip;
++		sdiod->bus_if->chiprev = bus->ci->chiprev;
++
+ 		/* Allow full data communication using DPC from now on. */
+ 		brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DATA);
+ 
+@@ -4241,12 +4248,6 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
+ 
+ 	sdio_release_host(sdiod->func1);
+ 
+-	/* Assign bus interface call back */
+-	sdiod->bus_if->dev = sdiod->dev;
+-	sdiod->bus_if->ops = &brcmf_sdio_bus_ops;
+-	sdiod->bus_if->chip = bus->ci->chip;
+-	sdiod->bus_if->chiprev = bus->ci->chiprev;
+-
+ 	err = brcmf_alloc(sdiod->dev, sdiod->settings);
+ 	if (err) {
+ 		brcmf_err("brcmf_alloc failed\n");
+diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+index c4c83ab60cbc..0579554ed4b3 100644
+--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
++++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+@@ -3206,8 +3206,9 @@ static void ipw2100_tx_send_data(struct ipw2100_priv *priv)
+ 	}
+ }
+ 
+-static void ipw2100_irq_tasklet(struct ipw2100_priv *priv)
++static void ipw2100_irq_tasklet(unsigned long data)
+ {
++	struct ipw2100_priv *priv = (struct ipw2100_priv *)data;
+ 	struct net_device *dev = priv->net_dev;
+ 	unsigned long flags;
+ 	u32 inta, tmp;
+@@ -6006,7 +6007,7 @@ static void ipw2100_rf_kill(struct work_struct *work)
+ 	spin_unlock_irqrestore(&priv->low_lock, flags);
+ }
+ 
+-static void ipw2100_irq_tasklet(struct ipw2100_priv *priv);
++static void ipw2100_irq_tasklet(unsigned long data);
+ 
+ static const struct net_device_ops ipw2100_netdev_ops = {
+ 	.ndo_open		= ipw2100_open,
+@@ -6136,7 +6137,7 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
+ 	INIT_DELAYED_WORK(&priv->rf_kill, ipw2100_rf_kill);
+ 	INIT_DELAYED_WORK(&priv->scan_event, ipw2100_scan_event);
+ 
+-	tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
++	tasklet_init(&priv->irq_tasklet,
+ 		     ipw2100_irq_tasklet, (unsigned long)priv);
+ 
+ 	/* NOTE:  We do not start the deferred work for status checks yet */
+diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+index 31e43fc1d12b..5ef6f87a48ac 100644
+--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
++++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+@@ -1945,8 +1945,9 @@ static void notify_wx_assoc_event(struct ipw_priv *priv)
+ 	wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
+ }
+ 
+-static void ipw_irq_tasklet(struct ipw_priv *priv)
++static void ipw_irq_tasklet(unsigned long data)
+ {
++	struct ipw_priv *priv = (struct ipw_priv *)data;
+ 	u32 inta, inta_mask, handled = 0;
+ 	unsigned long flags;
+ 	int rc = 0;
+@@ -10677,7 +10678,7 @@ static int ipw_setup_deferred_work(struct ipw_priv *priv)
+ 	INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
+ #endif				/* CONFIG_IPW2200_QOS */
+ 
+-	tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
++	tasklet_init(&priv->irq_tasklet,
+ 		     ipw_irq_tasklet, (unsigned long)priv);
+ 
+ 	return ret;
+diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+index 1168055da182..206b43b9dff8 100644
+--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
++++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+@@ -1376,8 +1376,9 @@ il3945_dump_nic_error_log(struct il_priv *il)
+ }
+ 
+ static void
+-il3945_irq_tasklet(struct il_priv *il)
++il3945_irq_tasklet(unsigned long data)
+ {
++	struct il_priv *il = (struct il_priv *)data;
+ 	u32 inta, handled = 0;
+ 	u32 inta_fh;
+ 	unsigned long flags;
+@@ -3401,7 +3402,7 @@ il3945_setup_deferred_work(struct il_priv *il)
+ 	timer_setup(&il->watchdog, il_bg_watchdog, 0);
+ 
+ 	tasklet_init(&il->irq_tasklet,
+-		     (void (*)(unsigned long))il3945_irq_tasklet,
++		     il3945_irq_tasklet,
+ 		     (unsigned long)il);
+ }
+ 
+diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+index 3664f56f8cbd..d1e17589dbeb 100644
+--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
++++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+@@ -4343,8 +4343,9 @@ il4965_synchronize_irq(struct il_priv *il)
+ }
+ 
+ static void
+-il4965_irq_tasklet(struct il_priv *il)
++il4965_irq_tasklet(unsigned long data)
+ {
++	struct il_priv *il = (struct il_priv *)data;
+ 	u32 inta, handled = 0;
+ 	u32 inta_fh;
+ 	unsigned long flags;
+@@ -6237,7 +6238,7 @@ il4965_setup_deferred_work(struct il_priv *il)
+ 	timer_setup(&il->watchdog, il_bg_watchdog, 0);
+ 
+ 	tasklet_init(&il->irq_tasklet,
+-		     (void (*)(unsigned long))il4965_irq_tasklet,
++		     il4965_irq_tasklet,
+ 		     (unsigned long)il);
+ }
+ 
+diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
+index d966b29b45ee..348c17ce72f5 100644
+--- a/drivers/net/wireless/intel/iwlegacy/common.c
++++ b/drivers/net/wireless/intel/iwlegacy/common.c
+@@ -699,7 +699,7 @@ il_eeprom_init(struct il_priv *il)
+ 	u32 gp = _il_rd(il, CSR_EEPROM_GP);
+ 	int sz;
+ 	int ret;
+-	u16 addr;
++	int addr;
+ 
+ 	/* allocate eeprom */
+ 	sz = il->cfg->eeprom_size;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+index efdf15f57f16..02df603b6400 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+@@ -5,10 +5,9 @@
+  *
+  * GPL LICENSE SUMMARY
+  *
+- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+- * Copyright(c) 2018 - 2019 Intel Corporation
++ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of version 2 of the GNU General Public License as
+@@ -28,10 +27,9 @@
+  *
+  * BSD LICENSE
+  *
+- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+- * Copyright(c) 2018 - 2019 Intel Corporation
++ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
+  * All rights reserved.
+  *
+  * Redistribution and use in source and binary forms, with or without
+@@ -2037,7 +2035,7 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
+ 	rcu_read_lock();
+ 
+ 	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_ctxt_cmd.sta_id]);
+-	if (IS_ERR(sta)) {
++	if (IS_ERR_OR_NULL(sta)) {
+ 		rcu_read_unlock();
+ 		WARN(1, "Can't find STA to configure HE\n");
+ 		return;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+index b5a16f00bada..fcad25ffd811 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+@@ -734,7 +734,8 @@ static  struct thermal_zone_device_ops tzone_ops = {
+ static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm)
+ {
+ 	int i;
+-	char name[] = "iwlwifi";
++	char name[16];
++	static atomic_t counter = ATOMIC_INIT(0);
+ 
+ 	if (!iwl_mvm_is_tt_in_fw(mvm)) {
+ 		mvm->tz_device.tzone = NULL;
+@@ -744,6 +745,7 @@ static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm)
+ 
+ 	BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH);
+ 
++	sprintf(name, "iwlwifi_%u", atomic_inc_return(&counter) & 0xFF);
+ 	mvm->tz_device.tzone = thermal_zone_device_register(name,
+ 							IWL_MAX_DTS_TRIPS,
+ 							IWL_WRITABLE_TRIPS_MSK,
+diff --git a/drivers/net/wireless/intersil/hostap/hostap_ap.c b/drivers/net/wireless/intersil/hostap/hostap_ap.c
+index 0094b1d2b577..3ec46f48cfde 100644
+--- a/drivers/net/wireless/intersil/hostap/hostap_ap.c
++++ b/drivers/net/wireless/intersil/hostap/hostap_ap.c
+@@ -2508,7 +2508,7 @@ static int prism2_hostapd_add_sta(struct ap_data *ap,
+ 		sta->supported_rates[0] = 2;
+ 	if (sta->tx_supp_rates & WLAN_RATE_2M)
+ 		sta->supported_rates[1] = 4;
+- 	if (sta->tx_supp_rates & WLAN_RATE_5M5)
++	if (sta->tx_supp_rates & WLAN_RATE_5M5)
+ 		sta->supported_rates[2] = 11;
+ 	if (sta->tx_supp_rates & WLAN_RATE_11M)
+ 		sta->supported_rates[3] = 22;
+diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
+index 8c79b963bcff..e753f43e0162 100644
+--- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
++++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
+@@ -1361,7 +1361,8 @@ static int ezusb_init(struct hermes *hw)
+ 	int retval;
+ 
+ 	BUG_ON(in_interrupt());
+-	BUG_ON(!upriv);
++	if (!upriv)
++		return -EINVAL;
+ 
+ 	upriv->reply_count = 0;
+ 	/* Write the MAGIC number on the simulated registers to keep
+diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
+index f88d26535978..25335bd2873b 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
++++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
+@@ -1061,13 +1061,15 @@ done:
+ 	return ret;
+ }
+ 
+-static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw)
++static void _rtl_pci_irq_tasklet(unsigned long data)
+ {
++	struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
+ 	_rtl_pci_tx_chk_waitq(hw);
+ }
+ 
+-static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
++static void _rtl_pci_prepare_bcn_tasklet(unsigned long data)
+ {
++	struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
+ 	struct rtl_priv *rtlpriv = rtl_priv(hw);
+ 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ 	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+@@ -1193,10 +1195,10 @@ static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
+ 
+ 	/*task */
+ 	tasklet_init(&rtlpriv->works.irq_tasklet,
+-		     (void (*)(unsigned long))_rtl_pci_irq_tasklet,
++		     _rtl_pci_irq_tasklet,
+ 		     (unsigned long)hw);
+ 	tasklet_init(&rtlpriv->works.irq_prepare_bcn_tasklet,
+-		     (void (*)(unsigned long))_rtl_pci_prepare_bcn_tasklet,
++		     _rtl_pci_prepare_bcn_tasklet,
+ 		     (unsigned long)hw);
+ 	INIT_WORK(&rtlpriv->works.lps_change_work,
+ 		  rtl_lps_change_work_callback);
+diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
+index ae61415e1665..f369ddca953a 100644
+--- a/drivers/net/wireless/realtek/rtw88/main.c
++++ b/drivers/net/wireless/realtek/rtw88/main.c
+@@ -706,8 +706,8 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
+ 		if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
+ 			is_support_sgi = true;
+ 	} else if (sta->ht_cap.ht_supported) {
+-		ra_mask |= (sta->ht_cap.mcs.rx_mask[NL80211_BAND_5GHZ] << 20) |
+-			   (sta->ht_cap.mcs.rx_mask[NL80211_BAND_2GHZ] << 12);
++		ra_mask |= (sta->ht_cap.mcs.rx_mask[1] << 20) |
++			   (sta->ht_cap.mcs.rx_mask[0] << 12);
+ 		if (sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
+ 			stbc_en = HT_STBC_EN;
+ 		if (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)
+@@ -717,6 +717,9 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
+ 			is_support_sgi = true;
+ 	}
+ 
++	if (efuse->hw_cap.nss == 1)
++		ra_mask &= RA_MASK_VHT_RATES_1SS | RA_MASK_HT_RATES_1SS;
++
+ 	if (hal->current_band_type == RTW_BAND_5G) {
+ 		ra_mask |= (u64)sta->supp_rates[NL80211_BAND_5GHZ] << 4;
+ 		if (sta->vht_cap.vht_supported) {
+@@ -750,11 +753,6 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
+ 		wireless_set = 0;
+ 	}
+ 
+-	if (efuse->hw_cap.nss == 1) {
+-		ra_mask &= RA_MASK_VHT_RATES_1SS;
+-		ra_mask &= RA_MASK_HT_RATES_1SS;
+-	}
+-
+ 	switch (sta->bandwidth) {
+ 	case IEEE80211_STA_RX_BW_80:
+ 		bw_mode = RTW_CHANNEL_WIDTH_80;
+diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
+index a58e8276a41a..a6746b5a9ff2 100644
+--- a/drivers/net/wireless/realtek/rtw88/pci.c
++++ b/drivers/net/wireless/realtek/rtw88/pci.c
+@@ -832,6 +832,11 @@ static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
+ 
+ 	while (count--) {
+ 		skb = skb_dequeue(&ring->queue);
++		if (!skb) {
++			rtw_err(rtwdev, "failed to dequeue %d skb TX queue %d, BD=0x%08x, rp %d -> %d\n",
++				count, hw_queue, bd_idx, ring->r.rp, cur_rp);
++			break;
++		}
+ 		tx_data = rtw_pci_get_tx_data(skb);
+ 		pci_unmap_single(rtwpci->pdev, tx_data->dma, skb->len,
+ 				 PCI_DMA_TODEVICE);
+diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c
+index 604dba4f18af..8e4d355dc3ae 100644
+--- a/drivers/nfc/port100.c
++++ b/drivers/nfc/port100.c
+@@ -565,7 +565,7 @@ static void port100_tx_update_payload_len(void *_frame, int len)
+ {
+ 	struct port100_frame *frame = _frame;
+ 
+-	frame->datalen = cpu_to_le16(le16_to_cpu(frame->datalen) + len);
++	le16_add_cpu(&frame->datalen, len);
+ }
+ 
+ static bool port100_rx_frame_is_valid(void *_frame)
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 365a2ddbeaa7..da392b50f73e 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -167,7 +167,6 @@ struct nvme_queue {
+ 	 /* only used for poll queues: */
+ 	spinlock_t cq_poll_lock ____cacheline_aligned_in_smp;
+ 	volatile struct nvme_completion *cqes;
+-	struct blk_mq_tags **tags;
+ 	dma_addr_t sq_dma_addr;
+ 	dma_addr_t cq_dma_addr;
+ 	u32 __iomem *q_db;
+@@ -376,29 +375,17 @@ static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ 
+ 	WARN_ON(hctx_idx != 0);
+ 	WARN_ON(dev->admin_tagset.tags[0] != hctx->tags);
+-	WARN_ON(nvmeq->tags);
+ 
+ 	hctx->driver_data = nvmeq;
+-	nvmeq->tags = &dev->admin_tagset.tags[0];
+ 	return 0;
+ }
+ 
+-static void nvme_admin_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
+-{
+-	struct nvme_queue *nvmeq = hctx->driver_data;
+-
+-	nvmeq->tags = NULL;
+-}
+-
+ static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ 			  unsigned int hctx_idx)
+ {
+ 	struct nvme_dev *dev = data;
+ 	struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1];
+ 
+-	if (!nvmeq->tags)
+-		nvmeq->tags = &dev->tagset.tags[hctx_idx];
+-
+ 	WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags);
+ 	hctx->driver_data = nvmeq;
+ 	return 0;
+@@ -948,6 +935,13 @@ static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq)
+ 		writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
+ }
+ 
++static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq)
++{
++	if (!nvmeq->qid)
++		return nvmeq->dev->admin_tagset.tags[0];
++	return nvmeq->dev->tagset.tags[nvmeq->qid - 1];
++}
++
+ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
+ {
+ 	volatile struct nvme_completion *cqe = &nvmeq->cqes[idx];
+@@ -972,7 +966,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
+ 		return;
+ 	}
+ 
+-	req = blk_mq_tag_to_rq(*nvmeq->tags, cqe->command_id);
++	req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id);
+ 	trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
+ 	nvme_end_request(req, cqe->status, cqe->result);
+ }
+@@ -1572,7 +1566,6 @@ static const struct blk_mq_ops nvme_mq_admin_ops = {
+ 	.queue_rq	= nvme_queue_rq,
+ 	.complete	= nvme_pci_complete_rq,
+ 	.init_hctx	= nvme_admin_init_hctx,
+-	.exit_hctx      = nvme_admin_exit_hctx,
+ 	.init_request	= nvme_init_request,
+ 	.timeout	= nvme_timeout,
+ };
+diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
+index 28438b833c1b..461987f669c5 100644
+--- a/drivers/nvme/target/core.c
++++ b/drivers/nvme/target/core.c
+@@ -555,7 +555,8 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
+ 	} else {
+ 		struct nvmet_ns *old;
+ 
+-		list_for_each_entry_rcu(old, &subsys->namespaces, dev_link) {
++		list_for_each_entry_rcu(old, &subsys->namespaces, dev_link,
++					lockdep_is_held(&subsys->lock)) {
+ 			BUG_ON(ns->nsid == old->nsid);
+ 			if (ns->nsid < old->nsid)
+ 				break;
+@@ -938,6 +939,17 @@ bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len)
+ }
+ EXPORT_SYMBOL_GPL(nvmet_check_data_len);
+ 
++bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len)
++{
++	if (unlikely(data_len > req->transfer_len)) {
++		req->error_loc = offsetof(struct nvme_common_command, dptr);
++		nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
++		return false;
++	}
++
++	return true;
++}
++
+ int nvmet_req_alloc_sgl(struct nvmet_req *req)
+ {
+ 	struct pci_dev *p2p_dev = NULL;
+@@ -1172,7 +1184,8 @@ static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
+ 
+ 	ctrl->p2p_client = get_device(req->p2p_client);
+ 
+-	list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link)
++	list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link,
++				lockdep_is_held(&ctrl->subsys->lock))
+ 		nvmet_p2pmem_ns_add_p2p(ctrl, ns);
+ }
+ 
+diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
+index b6fca0e421ef..ea0e596be15d 100644
+--- a/drivers/nvme/target/io-cmd-bdev.c
++++ b/drivers/nvme/target/io-cmd-bdev.c
+@@ -280,7 +280,7 @@ static void nvmet_bdev_execute_discard(struct nvmet_req *req)
+ 
+ static void nvmet_bdev_execute_dsm(struct nvmet_req *req)
+ {
+-	if (!nvmet_check_data_len(req, nvmet_dsm_len(req)))
++	if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
+ 		return;
+ 
+ 	switch (le32_to_cpu(req->cmd->dsm.attributes)) {
+diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
+index caebfce06605..cd5670b83118 100644
+--- a/drivers/nvme/target/io-cmd-file.c
++++ b/drivers/nvme/target/io-cmd-file.c
+@@ -336,7 +336,7 @@ static void nvmet_file_dsm_work(struct work_struct *w)
+ 
+ static void nvmet_file_execute_dsm(struct nvmet_req *req)
+ {
+-	if (!nvmet_check_data_len(req, nvmet_dsm_len(req)))
++	if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
+ 		return;
+ 	INIT_WORK(&req->f.work, nvmet_file_dsm_work);
+ 	schedule_work(&req->f.work);
+diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
+index 46df45e837c9..eda28b22a2c8 100644
+--- a/drivers/nvme/target/nvmet.h
++++ b/drivers/nvme/target/nvmet.h
+@@ -374,6 +374,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
+ 		struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
+ void nvmet_req_uninit(struct nvmet_req *req);
+ bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len);
++bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len);
+ void nvmet_req_complete(struct nvmet_req *req, u16 status);
+ int nvmet_req_alloc_sgl(struct nvmet_req *req);
+ void nvmet_req_free_sgl(struct nvmet_req *req);
+diff --git a/drivers/opp/of.c b/drivers/opp/of.c
+index 1cbb58240b80..1e5fcdee043c 100644
+--- a/drivers/opp/of.c
++++ b/drivers/opp/of.c
+@@ -678,15 +678,17 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
+ 			dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
+ 				ret);
+ 			of_node_put(np);
+-			return ret;
++			goto put_list_kref;
+ 		} else if (opp) {
+ 			count++;
+ 		}
+ 	}
+ 
+ 	/* There should be one of more OPP defined */
+-	if (WARN_ON(!count))
+-		return -ENOENT;
++	if (WARN_ON(!count)) {
++		ret = -ENOENT;
++		goto put_list_kref;
++	}
+ 
+ 	list_for_each_entry(opp, &opp_table->opp_list, node)
+ 		pstate_count += !!opp->pstate;
+@@ -695,7 +697,8 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
+ 	if (pstate_count && pstate_count != count) {
+ 		dev_err(dev, "Not all nodes have performance state set (%d: %d)\n",
+ 			count, pstate_count);
+-		return -ENOENT;
++		ret = -ENOENT;
++		goto put_list_kref;
+ 	}
+ 
+ 	if (pstate_count)
+@@ -704,6 +707,11 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
+ 	opp_table->parsed_static_opps = true;
+ 
+ 	return 0;
++
++put_list_kref:
++	_put_opp_list_kref(opp_table);
++
++	return ret;
+ }
+ 
+ /* Initializes OPP tables based on old-deprecated bindings */
+@@ -738,6 +746,7 @@ static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table)
+ 		if (ret) {
+ 			dev_err(dev, "%s: Failed to add OPP %ld (%d)\n",
+ 				__func__, freq, ret);
++			_put_opp_list_kref(opp_table);
+ 			return ret;
+ 		}
+ 		nr -= 2;
+diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
+index b6f064c885c3..3ef0bb281e7c 100644
+--- a/drivers/pci/ats.c
++++ b/drivers/pci/ats.c
+@@ -69,6 +69,7 @@ int pci_enable_ats(struct pci_dev *dev, int ps)
+ 	dev->ats_enabled = 1;
+ 	return 0;
+ }
++EXPORT_SYMBOL_GPL(pci_enable_ats);
+ 
+ /**
+  * pci_disable_ats - disable the ATS capability
+@@ -87,6 +88,7 @@ void pci_disable_ats(struct pci_dev *dev)
+ 
+ 	dev->ats_enabled = 0;
+ }
++EXPORT_SYMBOL_GPL(pci_disable_ats);
+ 
+ void pci_restore_ats_state(struct pci_dev *dev)
+ {
+diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c
+index 0a468c73bae3..8c7f875acf7f 100644
+--- a/drivers/pci/controller/pcie-iproc.c
++++ b/drivers/pci/controller/pcie-iproc.c
+@@ -1588,6 +1588,30 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd802,
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd804,
+ 			quirk_paxc_disable_msi_parsing);
+ 
++static void quirk_paxc_bridge(struct pci_dev *pdev)
++{
++	/*
++	 * The PCI config space is shared with the PAXC root port and the first
++	 * Ethernet device.  So, we need to workaround this by telling the PCI
++	 * code that the bridge is not an Ethernet device.
++	 */
++	if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
++		pdev->class = PCI_CLASS_BRIDGE_PCI << 8;
++
++	/*
++	 * MPSS is not being set properly (as it is currently 0).  This is
++	 * because that area of the PCI config space is hard coded to zero, and
++	 * is not modifiable by firmware.  Set this to 2 (e.g., 512 byte MPS)
++	 * so that the MPS can be set to the real max value.
++	 */
++	pdev->pcie_mpss = 2;
++}
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16cd, quirk_paxc_bridge);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0, quirk_paxc_bridge);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd750, quirk_paxc_bridge);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd802, quirk_paxc_bridge);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd804, quirk_paxc_bridge);
++
+ MODULE_AUTHOR("Ray Jui <rjui@broadcom.com>");
+ MODULE_DESCRIPTION("Broadcom iPROC PCIe common driver");
+ MODULE_LICENSE("GPL v2");
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index e87196cc1a7f..951099279192 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -5998,7 +5998,8 @@ EXPORT_SYMBOL_GPL(pci_pr3_present);
+ /**
+  * pci_add_dma_alias - Add a DMA devfn alias for a device
+  * @dev: the PCI device for which alias is added
+- * @devfn: alias slot and function
++ * @devfn_from: alias slot and function
++ * @nr_devfns: number of subsequent devfns to alias
+  *
+  * This helper encodes an 8-bit devfn as a bit number in dma_alias_mask
+  * which is used to program permissible bus-devfn source addresses for DMA
+@@ -6014,18 +6015,29 @@ EXPORT_SYMBOL_GPL(pci_pr3_present);
+  * cannot be left as a userspace activity).  DMA aliases should therefore
+  * be configured via quirks, such as the PCI fixup header quirk.
+  */
+-void pci_add_dma_alias(struct pci_dev *dev, u8 devfn)
++void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns)
+ {
++	int devfn_to;
++
++	nr_devfns = min(nr_devfns, (unsigned) MAX_NR_DEVFNS - devfn_from);
++	devfn_to = devfn_from + nr_devfns - 1;
++
+ 	if (!dev->dma_alias_mask)
+-		dev->dma_alias_mask = bitmap_zalloc(U8_MAX, GFP_KERNEL);
++		dev->dma_alias_mask = bitmap_zalloc(MAX_NR_DEVFNS, GFP_KERNEL);
+ 	if (!dev->dma_alias_mask) {
+ 		pci_warn(dev, "Unable to allocate DMA alias mask\n");
+ 		return;
+ 	}
+ 
+-	set_bit(devfn, dev->dma_alias_mask);
+-	pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
+-		 PCI_SLOT(devfn), PCI_FUNC(devfn));
++	bitmap_set(dev->dma_alias_mask, devfn_from, nr_devfns);
++
++	if (nr_devfns == 1)
++		pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
++				PCI_SLOT(devfn_from), PCI_FUNC(devfn_from));
++	else if (nr_devfns > 1)
++		pci_info(dev, "Enabling fixed DMA alias for devfn range from %02x.%d to %02x.%d\n",
++				PCI_SLOT(devfn_from), PCI_FUNC(devfn_from),
++				PCI_SLOT(devfn_to), PCI_FUNC(devfn_to));
+ }
+ 
+ bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
+diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
+index a0a53bd05a0b..6394e7746fb5 100644
+--- a/drivers/pci/pci.h
++++ b/drivers/pci/pci.h
+@@ -4,6 +4,9 @@
+ 
+ #include <linux/pci.h>
+ 
++/* Number of possible devfns: 0.0 to 1f.7 inclusive */
++#define MAX_NR_DEVFNS 256
++
+ #define PCI_FIND_CAP_TTL	48
+ 
+ #define PCI_VSEC_ID_INTEL_TBT	0x1234	/* Thunderbolt */
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index fbeb9f73ef28..83953752337c 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -1871,19 +1871,40 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x2609, quirk_intel_pcie_pm);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x260a, quirk_intel_pcie_pm);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x260b, quirk_intel_pcie_pm);
+ 
++static void quirk_d3hot_delay(struct pci_dev *dev, unsigned int delay)
++{
++	if (dev->d3_delay >= delay)
++		return;
++
++	dev->d3_delay = delay;
++	pci_info(dev, "extending delay after power-on from D3hot to %d msec\n",
++		 dev->d3_delay);
++}
++
+ static void quirk_radeon_pm(struct pci_dev *dev)
+ {
+ 	if (dev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
+-	    dev->subsystem_device == 0x00e2) {
+-		if (dev->d3_delay < 20) {
+-			dev->d3_delay = 20;
+-			pci_info(dev, "extending delay after power-on from D3 to %d msec\n",
+-				 dev->d3_delay);
+-		}
+-	}
++	    dev->subsystem_device == 0x00e2)
++		quirk_d3hot_delay(dev, 20);
+ }
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6741, quirk_radeon_pm);
+ 
++/*
++ * Ryzen5/7 XHCI controllers fail upon resume from runtime suspend or s2idle.
++ * https://bugzilla.kernel.org/show_bug.cgi?id=205587
++ *
++ * The kernel attempts to transition these devices to D3cold, but that seems
++ * to be ineffective on the platforms in question; the PCI device appears to
++ * remain on in D3hot state. The D3hot-to-D0 transition then requires an
++ * extended delay in order to succeed.
++ */
++static void quirk_ryzen_xhci_d3hot(struct pci_dev *dev)
++{
++	quirk_d3hot_delay(dev, 20);
++}
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e0, quirk_ryzen_xhci_d3hot);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e1, quirk_ryzen_xhci_d3hot);
++
+ #ifdef CONFIG_X86_IO_APIC
+ static int dmi_disable_ioapicreroute(const struct dmi_system_id *d)
+ {
+@@ -2381,32 +2402,6 @@ DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_BROADCOM,
+ 			 PCI_DEVICE_ID_TIGON3_5719,
+ 			 quirk_brcm_5719_limit_mrrs);
+ 
+-#ifdef CONFIG_PCIE_IPROC_PLATFORM
+-static void quirk_paxc_bridge(struct pci_dev *pdev)
+-{
+-	/*
+-	 * The PCI config space is shared with the PAXC root port and the first
+-	 * Ethernet device.  So, we need to workaround this by telling the PCI
+-	 * code that the bridge is not an Ethernet device.
+-	 */
+-	if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
+-		pdev->class = PCI_CLASS_BRIDGE_PCI << 8;
+-
+-	/*
+-	 * MPSS is not being set properly (as it is currently 0).  This is
+-	 * because that area of the PCI config space is hard coded to zero, and
+-	 * is not modifiable by firmware.  Set this to 2 (e.g., 512 byte MPS)
+-	 * so that the MPS can be set to the real max value.
+-	 */
+-	pdev->pcie_mpss = 2;
+-}
+-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16cd, quirk_paxc_bridge);
+-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0, quirk_paxc_bridge);
+-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd750, quirk_paxc_bridge);
+-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd802, quirk_paxc_bridge);
+-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd804, quirk_paxc_bridge);
+-#endif
+-
+ /*
+  * Originally in EDAC sources for i82875P: Intel tells BIOS developers to
+  * hide device 6 which configures the overflow device access containing the
+@@ -3932,7 +3927,7 @@ int pci_dev_specific_reset(struct pci_dev *dev, int probe)
+ static void quirk_dma_func0_alias(struct pci_dev *dev)
+ {
+ 	if (PCI_FUNC(dev->devfn) != 0)
+-		pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
++		pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 0), 1);
+ }
+ 
+ /*
+@@ -3946,7 +3941,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe476, quirk_dma_func0_alias);
+ static void quirk_dma_func1_alias(struct pci_dev *dev)
+ {
+ 	if (PCI_FUNC(dev->devfn) != 1)
+-		pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 1));
++		pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 1), 1);
+ }
+ 
+ /*
+@@ -4031,7 +4026,7 @@ static void quirk_fixed_dma_alias(struct pci_dev *dev)
+ 
+ 	id = pci_match_id(fixed_dma_alias_tbl, dev);
+ 	if (id)
+-		pci_add_dma_alias(dev, id->driver_data);
++		pci_add_dma_alias(dev, id->driver_data, 1);
+ }
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ADAPTEC2, 0x0285, quirk_fixed_dma_alias);
+ 
+@@ -4072,9 +4067,9 @@ DECLARE_PCI_FIXUP_HEADER(0x8086, 0x244e, quirk_use_pcie_bridge_dma_alias);
+  */
+ static void quirk_mic_x200_dma_alias(struct pci_dev *pdev)
+ {
+-	pci_add_dma_alias(pdev, PCI_DEVFN(0x10, 0x0));
+-	pci_add_dma_alias(pdev, PCI_DEVFN(0x11, 0x0));
+-	pci_add_dma_alias(pdev, PCI_DEVFN(0x12, 0x3));
++	pci_add_dma_alias(pdev, PCI_DEVFN(0x10, 0x0), 1);
++	pci_add_dma_alias(pdev, PCI_DEVFN(0x11, 0x0), 1);
++	pci_add_dma_alias(pdev, PCI_DEVFN(0x12, 0x3), 1);
+ }
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2260, quirk_mic_x200_dma_alias);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2264, quirk_mic_x200_dma_alias);
+@@ -4098,13 +4093,8 @@ static void quirk_pex_vca_alias(struct pci_dev *pdev)
+ 	const unsigned int num_pci_slots = 0x20;
+ 	unsigned int slot;
+ 
+-	for (slot = 0; slot < num_pci_slots; slot++) {
+-		pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x0));
+-		pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x1));
+-		pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x2));
+-		pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x3));
+-		pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x4));
+-	}
++	for (slot = 0; slot < num_pci_slots; slot++)
++		pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x0), 5);
+ }
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2954, quirk_pex_vca_alias);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2955, quirk_pex_vca_alias);
+@@ -5339,7 +5329,7 @@ static void quirk_switchtec_ntb_dma_alias(struct pci_dev *pdev)
+ 			pci_dbg(pdev,
+ 				"Aliasing Partition %d Proxy ID %02x.%d\n",
+ 				pp, PCI_SLOT(devfn), PCI_FUNC(devfn));
+-			pci_add_dma_alias(pdev, devfn);
++			pci_add_dma_alias(pdev, devfn, 1);
+ 		}
+ 	}
+ 
+@@ -5381,6 +5371,21 @@ SWITCHTEC_QUIRK(0x8574);  /* PFXI 64XG3 */
+ SWITCHTEC_QUIRK(0x8575);  /* PFXI 80XG3 */
+ SWITCHTEC_QUIRK(0x8576);  /* PFXI 96XG3 */
+ 
++/*
++ * The PLX NTB uses devfn proxy IDs to move TLPs between NT endpoints.
++ * These IDs are used to forward responses to the originator on the other
++ * side of the NTB.  Alias all possible IDs to the NTB to permit access when
++ * the IOMMU is turned on.
++ */
++static void quirk_plx_ntb_dma_alias(struct pci_dev *pdev)
++{
++	pci_info(pdev, "Setting PLX NTB proxy ID aliases\n");
++	/* PLX NTB may use all 256 devfns */
++	pci_add_dma_alias(pdev, 0, 256);
++}
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, 0x87b0, quirk_plx_ntb_dma_alias);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, 0x87b1, quirk_plx_ntb_dma_alias);
++
+ /*
+  * On Lenovo Thinkpad P50 SKUs with a Nvidia Quadro M1000M, the BIOS does
+  * not always reset the secondary Nvidia GPU between reboots if the system
+diff --git a/drivers/pci/search.c b/drivers/pci/search.c
+index bade14002fd8..e4dbdef5aef0 100644
+--- a/drivers/pci/search.c
++++ b/drivers/pci/search.c
+@@ -41,9 +41,9 @@ int pci_for_each_dma_alias(struct pci_dev *pdev,
+ 	 * DMA, iterate over that too.
+ 	 */
+ 	if (unlikely(pdev->dma_alias_mask)) {
+-		u8 devfn;
++		unsigned int devfn;
+ 
+-		for_each_set_bit(devfn, pdev->dma_alias_mask, U8_MAX) {
++		for_each_set_bit(devfn, pdev->dma_alias_mask, MAX_NR_DEVFNS) {
+ 			ret = fn(pdev, PCI_DEVID(pdev->bus->number, devfn),
+ 				 data);
+ 			if (ret)
+diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
+index 55083c67b2bb..95dca2cb5265 100644
+--- a/drivers/perf/fsl_imx8_ddr_perf.c
++++ b/drivers/perf/fsl_imx8_ddr_perf.c
+@@ -633,13 +633,17 @@ static int ddr_perf_probe(struct platform_device *pdev)
+ 
+ 	if (ret < 0) {
+ 		dev_err(&pdev->dev, "cpuhp_setup_state_multi failed\n");
+-		goto ddr_perf_err;
++		goto cpuhp_state_err;
+ 	}
+ 
+ 	pmu->cpuhp_state = ret;
+ 
+ 	/* Register the pmu instance for cpu hotplug */
+-	cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node);
++	ret = cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node);
++	if (ret) {
++		dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
++		goto cpuhp_instance_err;
++	}
+ 
+ 	/* Request irq */
+ 	irq = of_irq_get(np, 0);
+@@ -673,9 +677,10 @@ static int ddr_perf_probe(struct platform_device *pdev)
+ 	return 0;
+ 
+ ddr_perf_err:
+-	if (pmu->cpuhp_state)
+-		cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
+-
++	cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
++cpuhp_instance_err:
++	cpuhp_remove_multi_state(pmu->cpuhp_state);
++cpuhp_state_err:
+ 	ida_simple_remove(&ddr_ida, pmu->id);
+ 	dev_warn(&pdev->dev, "i.MX8 DDR Perf PMU failed (%d), disabled\n", ret);
+ 	return ret;
+@@ -686,6 +691,7 @@ static int ddr_perf_remove(struct platform_device *pdev)
+ 	struct ddr_pmu *pmu = platform_get_drvdata(pdev);
+ 
+ 	cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
++	cpuhp_remove_multi_state(pmu->cpuhp_state);
+ 	irq_set_affinity_hint(pmu->irq, NULL);
+ 
+ 	perf_pmu_unregister(&pmu->pmu);
+diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
+index 72ffd19448e5..ce9cf50121bd 100644
+--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
++++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
+@@ -753,7 +753,13 @@ static void byt_gpio_clear_triggering(struct byt_gpio *vg, unsigned int offset)
+ 
+ 	raw_spin_lock_irqsave(&byt_lock, flags);
+ 	value = readl(reg);
+-	value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL);
++
++	/* Do not clear direct-irq enabled IRQs (from gpio_disable_free) */
++	if (value & BYT_DIRECT_IRQ_EN)
++		/* nothing to do */ ;
++	else
++		value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL);
++
+ 	writel(value, reg);
+ 	raw_spin_unlock_irqrestore(&byt_lock, flags);
+ }
+diff --git a/drivers/pinctrl/intel/pinctrl-tigerlake.c b/drivers/pinctrl/intel/pinctrl-tigerlake.c
+index 58572b15b3ce..08a86f6fdea6 100644
+--- a/drivers/pinctrl/intel/pinctrl-tigerlake.c
++++ b/drivers/pinctrl/intel/pinctrl-tigerlake.c
+@@ -2,7 +2,7 @@
+ /*
+  * Intel Tiger Lake PCH pinctrl/GPIO driver
+  *
+- * Copyright (C) 2019, Intel Corporation
++ * Copyright (C) 2019 - 2020, Intel Corporation
+  * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+  *          Mika Westerberg <mika.westerberg@linux.intel.com>
+  */
+@@ -21,15 +21,19 @@
+ #define TGL_GPI_IS	0x100
+ #define TGL_GPI_IE	0x120
+ 
+-#define TGL_GPP(r, s, e)				\
++#define TGL_NO_GPIO	-1
++
++#define TGL_GPP(r, s, e, g)				\
+ 	{						\
+ 		.reg_num = (r),				\
+ 		.base = (s),				\
+ 		.size = ((e) - (s) + 1),		\
++		.gpio_base = (g),			\
+ 	}
+ 
+-#define TGL_COMMUNITY(s, e, g)				\
++#define TGL_COMMUNITY(b, s, e, g)			\
+ 	{						\
++		.barno = (b),				\
+ 		.padown_offset = TGL_PAD_OWN,		\
+ 		.padcfglock_offset = TGL_PADCFGLOCK,	\
+ 		.hostown_offset = TGL_HOSTSW_OWN,	\
+@@ -42,7 +46,7 @@
+ 	}
+ 
+ /* Tiger Lake-LP */
+-static const struct pinctrl_pin_desc tgllp_community0_pins[] = {
++static const struct pinctrl_pin_desc tgllp_pins[] = {
+ 	/* GPP_B */
+ 	PINCTRL_PIN(0, "CORE_VID_0"),
+ 	PINCTRL_PIN(1, "CORE_VID_1"),
+@@ -113,324 +117,273 @@ static const struct pinctrl_pin_desc tgllp_community0_pins[] = {
+ 	PINCTRL_PIN(64, "GPPC_A_22"),
+ 	PINCTRL_PIN(65, "I2S1_SCLK"),
+ 	PINCTRL_PIN(66, "ESPI_CLK_LOOPBK"),
+-};
+-
+-static const struct intel_padgroup tgllp_community0_gpps[] = {
+-	TGL_GPP(0, 0, 25),	/* GPP_B */
+-	TGL_GPP(1, 26, 41),	/* GPP_T */
+-	TGL_GPP(2, 42, 66),	/* GPP_A */
+-};
+-
+-static const struct intel_community tgllp_community0[] = {
+-	TGL_COMMUNITY(0, 66, tgllp_community0_gpps),
+-};
+-
+-static const struct intel_pinctrl_soc_data tgllp_community0_soc_data = {
+-	.uid = "0",
+-	.pins = tgllp_community0_pins,
+-	.npins = ARRAY_SIZE(tgllp_community0_pins),
+-	.communities = tgllp_community0,
+-	.ncommunities = ARRAY_SIZE(tgllp_community0),
+-};
+-
+-static const struct pinctrl_pin_desc tgllp_community1_pins[] = {
+ 	/* GPP_S */
+-	PINCTRL_PIN(0, "SNDW0_CLK"),
+-	PINCTRL_PIN(1, "SNDW0_DATA"),
+-	PINCTRL_PIN(2, "SNDW1_CLK"),
+-	PINCTRL_PIN(3, "SNDW1_DATA"),
+-	PINCTRL_PIN(4, "SNDW2_CLK"),
+-	PINCTRL_PIN(5, "SNDW2_DATA"),
+-	PINCTRL_PIN(6, "SNDW3_CLK"),
+-	PINCTRL_PIN(7, "SNDW3_DATA"),
++	PINCTRL_PIN(67, "SNDW0_CLK"),
++	PINCTRL_PIN(68, "SNDW0_DATA"),
++	PINCTRL_PIN(69, "SNDW1_CLK"),
++	PINCTRL_PIN(70, "SNDW1_DATA"),
++	PINCTRL_PIN(71, "SNDW2_CLK"),
++	PINCTRL_PIN(72, "SNDW2_DATA"),
++	PINCTRL_PIN(73, "SNDW3_CLK"),
++	PINCTRL_PIN(74, "SNDW3_DATA"),
+ 	/* GPP_H */
+-	PINCTRL_PIN(8, "GPPC_H_0"),
+-	PINCTRL_PIN(9, "GPPC_H_1"),
+-	PINCTRL_PIN(10, "GPPC_H_2"),
+-	PINCTRL_PIN(11, "SX_EXIT_HOLDOFFB"),
+-	PINCTRL_PIN(12, "I2C2_SDA"),
+-	PINCTRL_PIN(13, "I2C2_SCL"),
+-	PINCTRL_PIN(14, "I2C3_SDA"),
+-	PINCTRL_PIN(15, "I2C3_SCL"),
+-	PINCTRL_PIN(16, "I2C4_SDA"),
+-	PINCTRL_PIN(17, "I2C4_SCL"),
+-	PINCTRL_PIN(18, "SRCCLKREQB_4"),
+-	PINCTRL_PIN(19, "SRCCLKREQB_5"),
+-	PINCTRL_PIN(20, "M2_SKT2_CFG_0"),
+-	PINCTRL_PIN(21, "M2_SKT2_CFG_1"),
+-	PINCTRL_PIN(22, "M2_SKT2_CFG_2"),
+-	PINCTRL_PIN(23, "M2_SKT2_CFG_3"),
+-	PINCTRL_PIN(24, "DDPB_CTRLCLK"),
+-	PINCTRL_PIN(25, "DDPB_CTRLDATA"),
+-	PINCTRL_PIN(26, "CPU_C10_GATEB"),
+-	PINCTRL_PIN(27, "TIME_SYNC_0"),
+-	PINCTRL_PIN(28, "IMGCLKOUT_1"),
+-	PINCTRL_PIN(29, "IMGCLKOUT_2"),
+-	PINCTRL_PIN(30, "IMGCLKOUT_3"),
+-	PINCTRL_PIN(31, "IMGCLKOUT_4"),
++	PINCTRL_PIN(75, "GPPC_H_0"),
++	PINCTRL_PIN(76, "GPPC_H_1"),
++	PINCTRL_PIN(77, "GPPC_H_2"),
++	PINCTRL_PIN(78, "SX_EXIT_HOLDOFFB"),
++	PINCTRL_PIN(79, "I2C2_SDA"),
++	PINCTRL_PIN(80, "I2C2_SCL"),
++	PINCTRL_PIN(81, "I2C3_SDA"),
++	PINCTRL_PIN(82, "I2C3_SCL"),
++	PINCTRL_PIN(83, "I2C4_SDA"),
++	PINCTRL_PIN(84, "I2C4_SCL"),
++	PINCTRL_PIN(85, "SRCCLKREQB_4"),
++	PINCTRL_PIN(86, "SRCCLKREQB_5"),
++	PINCTRL_PIN(87, "M2_SKT2_CFG_0"),
++	PINCTRL_PIN(88, "M2_SKT2_CFG_1"),
++	PINCTRL_PIN(89, "M2_SKT2_CFG_2"),
++	PINCTRL_PIN(90, "M2_SKT2_CFG_3"),
++	PINCTRL_PIN(91, "DDPB_CTRLCLK"),
++	PINCTRL_PIN(92, "DDPB_CTRLDATA"),
++	PINCTRL_PIN(93, "CPU_C10_GATEB"),
++	PINCTRL_PIN(94, "TIME_SYNC_0"),
++	PINCTRL_PIN(95, "IMGCLKOUT_1"),
++	PINCTRL_PIN(96, "IMGCLKOUT_2"),
++	PINCTRL_PIN(97, "IMGCLKOUT_3"),
++	PINCTRL_PIN(98, "IMGCLKOUT_4"),
+ 	/* GPP_D */
+-	PINCTRL_PIN(32, "ISH_GP_0"),
+-	PINCTRL_PIN(33, "ISH_GP_1"),
+-	PINCTRL_PIN(34, "ISH_GP_2"),
+-	PINCTRL_PIN(35, "ISH_GP_3"),
+-	PINCTRL_PIN(36, "IMGCLKOUT_0"),
+-	PINCTRL_PIN(37, "SRCCLKREQB_0"),
+-	PINCTRL_PIN(38, "SRCCLKREQB_1"),
+-	PINCTRL_PIN(39, "SRCCLKREQB_2"),
+-	PINCTRL_PIN(40, "SRCCLKREQB_3"),
+-	PINCTRL_PIN(41, "ISH_SPI_CSB"),
+-	PINCTRL_PIN(42, "ISH_SPI_CLK"),
+-	PINCTRL_PIN(43, "ISH_SPI_MISO"),
+-	PINCTRL_PIN(44, "ISH_SPI_MOSI"),
+-	PINCTRL_PIN(45, "ISH_UART0_RXD"),
+-	PINCTRL_PIN(46, "ISH_UART0_TXD"),
+-	PINCTRL_PIN(47, "ISH_UART0_RTSB"),
+-	PINCTRL_PIN(48, "ISH_UART0_CTSB"),
+-	PINCTRL_PIN(49, "ISH_GP_4"),
+-	PINCTRL_PIN(50, "ISH_GP_5"),
+-	PINCTRL_PIN(51, "I2S_MCLK1_OUT"),
+-	PINCTRL_PIN(52, "GSPI2_CLK_LOOPBK"),
++	PINCTRL_PIN(99, "ISH_GP_0"),
++	PINCTRL_PIN(100, "ISH_GP_1"),
++	PINCTRL_PIN(101, "ISH_GP_2"),
++	PINCTRL_PIN(102, "ISH_GP_3"),
++	PINCTRL_PIN(103, "IMGCLKOUT_0"),
++	PINCTRL_PIN(104, "SRCCLKREQB_0"),
++	PINCTRL_PIN(105, "SRCCLKREQB_1"),
++	PINCTRL_PIN(106, "SRCCLKREQB_2"),
++	PINCTRL_PIN(107, "SRCCLKREQB_3"),
++	PINCTRL_PIN(108, "ISH_SPI_CSB"),
++	PINCTRL_PIN(109, "ISH_SPI_CLK"),
++	PINCTRL_PIN(110, "ISH_SPI_MISO"),
++	PINCTRL_PIN(111, "ISH_SPI_MOSI"),
++	PINCTRL_PIN(112, "ISH_UART0_RXD"),
++	PINCTRL_PIN(113, "ISH_UART0_TXD"),
++	PINCTRL_PIN(114, "ISH_UART0_RTSB"),
++	PINCTRL_PIN(115, "ISH_UART0_CTSB"),
++	PINCTRL_PIN(116, "ISH_GP_4"),
++	PINCTRL_PIN(117, "ISH_GP_5"),
++	PINCTRL_PIN(118, "I2S_MCLK1_OUT"),
++	PINCTRL_PIN(119, "GSPI2_CLK_LOOPBK"),
+ 	/* GPP_U */
+-	PINCTRL_PIN(53, "UART3_RXD"),
+-	PINCTRL_PIN(54, "UART3_TXD"),
+-	PINCTRL_PIN(55, "UART3_RTSB"),
+-	PINCTRL_PIN(56, "UART3_CTSB"),
+-	PINCTRL_PIN(57, "GSPI3_CS0B"),
+-	PINCTRL_PIN(58, "GSPI3_CLK"),
+-	PINCTRL_PIN(59, "GSPI3_MISO"),
+-	PINCTRL_PIN(60, "GSPI3_MOSI"),
+-	PINCTRL_PIN(61, "GSPI4_CS0B"),
+-	PINCTRL_PIN(62, "GSPI4_CLK"),
+-	PINCTRL_PIN(63, "GSPI4_MISO"),
+-	PINCTRL_PIN(64, "GSPI4_MOSI"),
+-	PINCTRL_PIN(65, "GSPI5_CS0B"),
+-	PINCTRL_PIN(66, "GSPI5_CLK"),
+-	PINCTRL_PIN(67, "GSPI5_MISO"),
+-	PINCTRL_PIN(68, "GSPI5_MOSI"),
+-	PINCTRL_PIN(69, "GSPI6_CS0B"),
+-	PINCTRL_PIN(70, "GSPI6_CLK"),
+-	PINCTRL_PIN(71, "GSPI6_MISO"),
+-	PINCTRL_PIN(72, "GSPI6_MOSI"),
+-	PINCTRL_PIN(73, "GSPI3_CLK_LOOPBK"),
+-	PINCTRL_PIN(74, "GSPI4_CLK_LOOPBK"),
+-	PINCTRL_PIN(75, "GSPI5_CLK_LOOPBK"),
+-	PINCTRL_PIN(76, "GSPI6_CLK_LOOPBK"),
++	PINCTRL_PIN(120, "UART3_RXD"),
++	PINCTRL_PIN(121, "UART3_TXD"),
++	PINCTRL_PIN(122, "UART3_RTSB"),
++	PINCTRL_PIN(123, "UART3_CTSB"),
++	PINCTRL_PIN(124, "GSPI3_CS0B"),
++	PINCTRL_PIN(125, "GSPI3_CLK"),
++	PINCTRL_PIN(126, "GSPI3_MISO"),
++	PINCTRL_PIN(127, "GSPI3_MOSI"),
++	PINCTRL_PIN(128, "GSPI4_CS0B"),
++	PINCTRL_PIN(129, "GSPI4_CLK"),
++	PINCTRL_PIN(130, "GSPI4_MISO"),
++	PINCTRL_PIN(131, "GSPI4_MOSI"),
++	PINCTRL_PIN(132, "GSPI5_CS0B"),
++	PINCTRL_PIN(133, "GSPI5_CLK"),
++	PINCTRL_PIN(134, "GSPI5_MISO"),
++	PINCTRL_PIN(135, "GSPI5_MOSI"),
++	PINCTRL_PIN(136, "GSPI6_CS0B"),
++	PINCTRL_PIN(137, "GSPI6_CLK"),
++	PINCTRL_PIN(138, "GSPI6_MISO"),
++	PINCTRL_PIN(139, "GSPI6_MOSI"),
++	PINCTRL_PIN(140, "GSPI3_CLK_LOOPBK"),
++	PINCTRL_PIN(141, "GSPI4_CLK_LOOPBK"),
++	PINCTRL_PIN(142, "GSPI5_CLK_LOOPBK"),
++	PINCTRL_PIN(143, "GSPI6_CLK_LOOPBK"),
+ 	/* vGPIO */
+-	PINCTRL_PIN(77, "CNV_BTEN"),
+-	PINCTRL_PIN(78, "CNV_BT_HOST_WAKEB"),
+-	PINCTRL_PIN(79, "CNV_BT_IF_SELECT"),
+-	PINCTRL_PIN(80, "vCNV_BT_UART_TXD"),
+-	PINCTRL_PIN(81, "vCNV_BT_UART_RXD"),
+-	PINCTRL_PIN(82, "vCNV_BT_UART_CTS_B"),
+-	PINCTRL_PIN(83, "vCNV_BT_UART_RTS_B"),
+-	PINCTRL_PIN(84, "vCNV_MFUART1_TXD"),
+-	PINCTRL_PIN(85, "vCNV_MFUART1_RXD"),
+-	PINCTRL_PIN(86, "vCNV_MFUART1_CTS_B"),
+-	PINCTRL_PIN(87, "vCNV_MFUART1_RTS_B"),
+-	PINCTRL_PIN(88, "vUART0_TXD"),
+-	PINCTRL_PIN(89, "vUART0_RXD"),
+-	PINCTRL_PIN(90, "vUART0_CTS_B"),
+-	PINCTRL_PIN(91, "vUART0_RTS_B"),
+-	PINCTRL_PIN(92, "vISH_UART0_TXD"),
+-	PINCTRL_PIN(93, "vISH_UART0_RXD"),
+-	PINCTRL_PIN(94, "vISH_UART0_CTS_B"),
+-	PINCTRL_PIN(95, "vISH_UART0_RTS_B"),
+-	PINCTRL_PIN(96, "vCNV_BT_I2S_BCLK"),
+-	PINCTRL_PIN(97, "vCNV_BT_I2S_WS_SYNC"),
+-	PINCTRL_PIN(98, "vCNV_BT_I2S_SDO"),
+-	PINCTRL_PIN(99, "vCNV_BT_I2S_SDI"),
+-	PINCTRL_PIN(100, "vI2S2_SCLK"),
+-	PINCTRL_PIN(101, "vI2S2_SFRM"),
+-	PINCTRL_PIN(102, "vI2S2_TXD"),
+-	PINCTRL_PIN(103, "vI2S2_RXD"),
+-};
+-
+-static const struct intel_padgroup tgllp_community1_gpps[] = {
+-	TGL_GPP(0, 0, 7),	/* GPP_S */
+-	TGL_GPP(1, 8, 31),	/* GPP_H */
+-	TGL_GPP(2, 32, 52),	/* GPP_D */
+-	TGL_GPP(3, 53, 76),	/* GPP_U */
+-	TGL_GPP(4, 77, 103),	/* vGPIO */
+-};
+-
+-static const struct intel_community tgllp_community1[] = {
+-	TGL_COMMUNITY(0, 103, tgllp_community1_gpps),
+-};
+-
+-static const struct intel_pinctrl_soc_data tgllp_community1_soc_data = {
+-	.uid = "1",
+-	.pins = tgllp_community1_pins,
+-	.npins = ARRAY_SIZE(tgllp_community1_pins),
+-	.communities = tgllp_community1,
+-	.ncommunities = ARRAY_SIZE(tgllp_community1),
+-};
+-
+-static const struct pinctrl_pin_desc tgllp_community4_pins[] = {
++	PINCTRL_PIN(144, "CNV_BTEN"),
++	PINCTRL_PIN(145, "CNV_BT_HOST_WAKEB"),
++	PINCTRL_PIN(146, "CNV_BT_IF_SELECT"),
++	PINCTRL_PIN(147, "vCNV_BT_UART_TXD"),
++	PINCTRL_PIN(148, "vCNV_BT_UART_RXD"),
++	PINCTRL_PIN(149, "vCNV_BT_UART_CTS_B"),
++	PINCTRL_PIN(150, "vCNV_BT_UART_RTS_B"),
++	PINCTRL_PIN(151, "vCNV_MFUART1_TXD"),
++	PINCTRL_PIN(152, "vCNV_MFUART1_RXD"),
++	PINCTRL_PIN(153, "vCNV_MFUART1_CTS_B"),
++	PINCTRL_PIN(154, "vCNV_MFUART1_RTS_B"),
++	PINCTRL_PIN(155, "vUART0_TXD"),
++	PINCTRL_PIN(156, "vUART0_RXD"),
++	PINCTRL_PIN(157, "vUART0_CTS_B"),
++	PINCTRL_PIN(158, "vUART0_RTS_B"),
++	PINCTRL_PIN(159, "vISH_UART0_TXD"),
++	PINCTRL_PIN(160, "vISH_UART0_RXD"),
++	PINCTRL_PIN(161, "vISH_UART0_CTS_B"),
++	PINCTRL_PIN(162, "vISH_UART0_RTS_B"),
++	PINCTRL_PIN(163, "vCNV_BT_I2S_BCLK"),
++	PINCTRL_PIN(164, "vCNV_BT_I2S_WS_SYNC"),
++	PINCTRL_PIN(165, "vCNV_BT_I2S_SDO"),
++	PINCTRL_PIN(166, "vCNV_BT_I2S_SDI"),
++	PINCTRL_PIN(167, "vI2S2_SCLK"),
++	PINCTRL_PIN(168, "vI2S2_SFRM"),
++	PINCTRL_PIN(169, "vI2S2_TXD"),
++	PINCTRL_PIN(170, "vI2S2_RXD"),
+ 	/* GPP_C */
+-	PINCTRL_PIN(0, "SMBCLK"),
+-	PINCTRL_PIN(1, "SMBDATA"),
+-	PINCTRL_PIN(2, "SMBALERTB"),
+-	PINCTRL_PIN(3, "SML0CLK"),
+-	PINCTRL_PIN(4, "SML0DATA"),
+-	PINCTRL_PIN(5, "SML0ALERTB"),
+-	PINCTRL_PIN(6, "SML1CLK"),
+-	PINCTRL_PIN(7, "SML1DATA"),
+-	PINCTRL_PIN(8, "UART0_RXD"),
+-	PINCTRL_PIN(9, "UART0_TXD"),
+-	PINCTRL_PIN(10, "UART0_RTSB"),
+-	PINCTRL_PIN(11, "UART0_CTSB"),
+-	PINCTRL_PIN(12, "UART1_RXD"),
+-	PINCTRL_PIN(13, "UART1_TXD"),
+-	PINCTRL_PIN(14, "UART1_RTSB"),
+-	PINCTRL_PIN(15, "UART1_CTSB"),
+-	PINCTRL_PIN(16, "I2C0_SDA"),
+-	PINCTRL_PIN(17, "I2C0_SCL"),
+-	PINCTRL_PIN(18, "I2C1_SDA"),
+-	PINCTRL_PIN(19, "I2C1_SCL"),
+-	PINCTRL_PIN(20, "UART2_RXD"),
+-	PINCTRL_PIN(21, "UART2_TXD"),
+-	PINCTRL_PIN(22, "UART2_RTSB"),
+-	PINCTRL_PIN(23, "UART2_CTSB"),
++	PINCTRL_PIN(171, "SMBCLK"),
++	PINCTRL_PIN(172, "SMBDATA"),
++	PINCTRL_PIN(173, "SMBALERTB"),
++	PINCTRL_PIN(174, "SML0CLK"),
++	PINCTRL_PIN(175, "SML0DATA"),
++	PINCTRL_PIN(176, "SML0ALERTB"),
++	PINCTRL_PIN(177, "SML1CLK"),
++	PINCTRL_PIN(178, "SML1DATA"),
++	PINCTRL_PIN(179, "UART0_RXD"),
++	PINCTRL_PIN(180, "UART0_TXD"),
++	PINCTRL_PIN(181, "UART0_RTSB"),
++	PINCTRL_PIN(182, "UART0_CTSB"),
++	PINCTRL_PIN(183, "UART1_RXD"),
++	PINCTRL_PIN(184, "UART1_TXD"),
++	PINCTRL_PIN(185, "UART1_RTSB"),
++	PINCTRL_PIN(186, "UART1_CTSB"),
++	PINCTRL_PIN(187, "I2C0_SDA"),
++	PINCTRL_PIN(188, "I2C0_SCL"),
++	PINCTRL_PIN(189, "I2C1_SDA"),
++	PINCTRL_PIN(190, "I2C1_SCL"),
++	PINCTRL_PIN(191, "UART2_RXD"),
++	PINCTRL_PIN(192, "UART2_TXD"),
++	PINCTRL_PIN(193, "UART2_RTSB"),
++	PINCTRL_PIN(194, "UART2_CTSB"),
+ 	/* GPP_F */
+-	PINCTRL_PIN(24, "CNV_BRI_DT"),
+-	PINCTRL_PIN(25, "CNV_BRI_RSP"),
+-	PINCTRL_PIN(26, "CNV_RGI_DT"),
+-	PINCTRL_PIN(27, "CNV_RGI_RSP"),
+-	PINCTRL_PIN(28, "CNV_RF_RESET_B"),
+-	PINCTRL_PIN(29, "GPPC_F_5"),
+-	PINCTRL_PIN(30, "CNV_PA_BLANKING"),
+-	PINCTRL_PIN(31, "GPPC_F_7"),
+-	PINCTRL_PIN(32, "I2S_MCLK2_INOUT"),
+-	PINCTRL_PIN(33, "BOOTMPC"),
+-	PINCTRL_PIN(34, "GPPC_F_10"),
+-	PINCTRL_PIN(35, "GPPC_F_11"),
+-	PINCTRL_PIN(36, "GSXDOUT"),
+-	PINCTRL_PIN(37, "GSXSLOAD"),
+-	PINCTRL_PIN(38, "GSXDIN"),
+-	PINCTRL_PIN(39, "GSXSRESETB"),
+-	PINCTRL_PIN(40, "GSXCLK"),
+-	PINCTRL_PIN(41, "GMII_MDC"),
+-	PINCTRL_PIN(42, "GMII_MDIO"),
+-	PINCTRL_PIN(43, "SRCCLKREQB_6"),
+-	PINCTRL_PIN(44, "EXT_PWR_GATEB"),
+-	PINCTRL_PIN(45, "EXT_PWR_GATE2B"),
+-	PINCTRL_PIN(46, "VNN_CTRL"),
+-	PINCTRL_PIN(47, "V1P05_CTRL"),
+-	PINCTRL_PIN(48, "GPPF_CLK_LOOPBACK"),
++	PINCTRL_PIN(195, "CNV_BRI_DT"),
++	PINCTRL_PIN(196, "CNV_BRI_RSP"),
++	PINCTRL_PIN(197, "CNV_RGI_DT"),
++	PINCTRL_PIN(198, "CNV_RGI_RSP"),
++	PINCTRL_PIN(199, "CNV_RF_RESET_B"),
++	PINCTRL_PIN(200, "GPPC_F_5"),
++	PINCTRL_PIN(201, "CNV_PA_BLANKING"),
++	PINCTRL_PIN(202, "GPPC_F_7"),
++	PINCTRL_PIN(203, "I2S_MCLK2_INOUT"),
++	PINCTRL_PIN(204, "BOOTMPC"),
++	PINCTRL_PIN(205, "GPPC_F_10"),
++	PINCTRL_PIN(206, "GPPC_F_11"),
++	PINCTRL_PIN(207, "GSXDOUT"),
++	PINCTRL_PIN(208, "GSXSLOAD"),
++	PINCTRL_PIN(209, "GSXDIN"),
++	PINCTRL_PIN(210, "GSXSRESETB"),
++	PINCTRL_PIN(211, "GSXCLK"),
++	PINCTRL_PIN(212, "GMII_MDC"),
++	PINCTRL_PIN(213, "GMII_MDIO"),
++	PINCTRL_PIN(214, "SRCCLKREQB_6"),
++	PINCTRL_PIN(215, "EXT_PWR_GATEB"),
++	PINCTRL_PIN(216, "EXT_PWR_GATE2B"),
++	PINCTRL_PIN(217, "VNN_CTRL"),
++	PINCTRL_PIN(218, "V1P05_CTRL"),
++	PINCTRL_PIN(219, "GPPF_CLK_LOOPBACK"),
+ 	/* HVCMOS */
+-	PINCTRL_PIN(49, "L_BKLTEN"),
+-	PINCTRL_PIN(50, "L_BKLTCTL"),
+-	PINCTRL_PIN(51, "L_VDDEN"),
+-	PINCTRL_PIN(52, "SYS_PWROK"),
+-	PINCTRL_PIN(53, "SYS_RESETB"),
+-	PINCTRL_PIN(54, "MLK_RSTB"),
++	PINCTRL_PIN(220, "L_BKLTEN"),
++	PINCTRL_PIN(221, "L_BKLTCTL"),
++	PINCTRL_PIN(222, "L_VDDEN"),
++	PINCTRL_PIN(223, "SYS_PWROK"),
++	PINCTRL_PIN(224, "SYS_RESETB"),
++	PINCTRL_PIN(225, "MLK_RSTB"),
+ 	/* GPP_E */
+-	PINCTRL_PIN(55, "SATAXPCIE_0"),
+-	PINCTRL_PIN(56, "SPI1_IO_2"),
+-	PINCTRL_PIN(57, "SPI1_IO_3"),
+-	PINCTRL_PIN(58, "CPU_GP_0"),
+-	PINCTRL_PIN(59, "SATA_DEVSLP_0"),
+-	PINCTRL_PIN(60, "SATA_DEVSLP_1"),
+-	PINCTRL_PIN(61, "GPPC_E_6"),
+-	PINCTRL_PIN(62, "CPU_GP_1"),
+-	PINCTRL_PIN(63, "SPI1_CS1B"),
+-	PINCTRL_PIN(64, "USB2_OCB_0"),
+-	PINCTRL_PIN(65, "SPI1_CSB"),
+-	PINCTRL_PIN(66, "SPI1_CLK"),
+-	PINCTRL_PIN(67, "SPI1_MISO_IO_1"),
+-	PINCTRL_PIN(68, "SPI1_MOSI_IO_0"),
+-	PINCTRL_PIN(69, "DDSP_HPD_A"),
+-	PINCTRL_PIN(70, "ISH_GP_6"),
+-	PINCTRL_PIN(71, "ISH_GP_7"),
+-	PINCTRL_PIN(72, "GPPC_E_17"),
+-	PINCTRL_PIN(73, "DDP1_CTRLCLK"),
+-	PINCTRL_PIN(74, "DDP1_CTRLDATA"),
+-	PINCTRL_PIN(75, "DDP2_CTRLCLK"),
+-	PINCTRL_PIN(76, "DDP2_CTRLDATA"),
+-	PINCTRL_PIN(77, "DDPA_CTRLCLK"),
+-	PINCTRL_PIN(78, "DDPA_CTRLDATA"),
+-	PINCTRL_PIN(79, "SPI1_CLK_LOOPBK"),
++	PINCTRL_PIN(226, "SATAXPCIE_0"),
++	PINCTRL_PIN(227, "SPI1_IO_2"),
++	PINCTRL_PIN(228, "SPI1_IO_3"),
++	PINCTRL_PIN(229, "CPU_GP_0"),
++	PINCTRL_PIN(230, "SATA_DEVSLP_0"),
++	PINCTRL_PIN(231, "SATA_DEVSLP_1"),
++	PINCTRL_PIN(232, "GPPC_E_6"),
++	PINCTRL_PIN(233, "CPU_GP_1"),
++	PINCTRL_PIN(234, "SPI1_CS1B"),
++	PINCTRL_PIN(235, "USB2_OCB_0"),
++	PINCTRL_PIN(236, "SPI1_CSB"),
++	PINCTRL_PIN(237, "SPI1_CLK"),
++	PINCTRL_PIN(238, "SPI1_MISO_IO_1"),
++	PINCTRL_PIN(239, "SPI1_MOSI_IO_0"),
++	PINCTRL_PIN(240, "DDSP_HPD_A"),
++	PINCTRL_PIN(241, "ISH_GP_6"),
++	PINCTRL_PIN(242, "ISH_GP_7"),
++	PINCTRL_PIN(243, "GPPC_E_17"),
++	PINCTRL_PIN(244, "DDP1_CTRLCLK"),
++	PINCTRL_PIN(245, "DDP1_CTRLDATA"),
++	PINCTRL_PIN(246, "DDP2_CTRLCLK"),
++	PINCTRL_PIN(247, "DDP2_CTRLDATA"),
++	PINCTRL_PIN(248, "DDPA_CTRLCLK"),
++	PINCTRL_PIN(249, "DDPA_CTRLDATA"),
++	PINCTRL_PIN(250, "SPI1_CLK_LOOPBK"),
+ 	/* JTAG */
+-	PINCTRL_PIN(80, "JTAG_TDO"),
+-	PINCTRL_PIN(81, "JTAGX"),
+-	PINCTRL_PIN(82, "PRDYB"),
+-	PINCTRL_PIN(83, "PREQB"),
+-	PINCTRL_PIN(84, "CPU_TRSTB"),
+-	PINCTRL_PIN(85, "JTAG_TDI"),
+-	PINCTRL_PIN(86, "JTAG_TMS"),
+-	PINCTRL_PIN(87, "JTAG_TCK"),
+-	PINCTRL_PIN(88, "DBG_PMODE"),
+-};
+-
+-static const struct intel_padgroup tgllp_community4_gpps[] = {
+-	TGL_GPP(0, 0, 23),	/* GPP_C */
+-	TGL_GPP(1, 24, 48),	/* GPP_F */
+-	TGL_GPP(2, 49, 54),	/* HVCMOS */
+-	TGL_GPP(3, 55, 79),	/* GPP_E */
+-	TGL_GPP(4, 80, 88),	/* JTAG */
++	PINCTRL_PIN(251, "JTAG_TDO"),
++	PINCTRL_PIN(252, "JTAGX"),
++	PINCTRL_PIN(253, "PRDYB"),
++	PINCTRL_PIN(254, "PREQB"),
++	PINCTRL_PIN(255, "CPU_TRSTB"),
++	PINCTRL_PIN(256, "JTAG_TDI"),
++	PINCTRL_PIN(257, "JTAG_TMS"),
++	PINCTRL_PIN(258, "JTAG_TCK"),
++	PINCTRL_PIN(259, "DBG_PMODE"),
++	/* GPP_R */
++	PINCTRL_PIN(260, "HDA_BCLK"),
++	PINCTRL_PIN(261, "HDA_SYNC"),
++	PINCTRL_PIN(262, "HDA_SDO"),
++	PINCTRL_PIN(263, "HDA_SDI_0"),
++	PINCTRL_PIN(264, "HDA_RSTB"),
++	PINCTRL_PIN(265, "HDA_SDI_1"),
++	PINCTRL_PIN(266, "GPP_R_6"),
++	PINCTRL_PIN(267, "GPP_R_7"),
++	/* SPI */
++	PINCTRL_PIN(268, "SPI0_IO_2"),
++	PINCTRL_PIN(269, "SPI0_IO_3"),
++	PINCTRL_PIN(270, "SPI0_MOSI_IO_0"),
++	PINCTRL_PIN(271, "SPI0_MISO_IO_1"),
++	PINCTRL_PIN(272, "SPI0_TPM_CSB"),
++	PINCTRL_PIN(273, "SPI0_FLASH_0_CSB"),
++	PINCTRL_PIN(274, "SPI0_FLASH_1_CSB"),
++	PINCTRL_PIN(275, "SPI0_CLK"),
++	PINCTRL_PIN(276, "SPI0_CLK_LOOPBK"),
+ };
+ 
+-static const struct intel_community tgllp_community4[] = {
+-	TGL_COMMUNITY(0, 88, tgllp_community4_gpps),
++static const struct intel_padgroup tgllp_community0_gpps[] = {
++	TGL_GPP(0, 0, 25, 0),			/* GPP_B */
++	TGL_GPP(1, 26, 41, 32),			/* GPP_T */
++	TGL_GPP(2, 42, 66, 64),			/* GPP_A */
+ };
+ 
+-static const struct intel_pinctrl_soc_data tgllp_community4_soc_data = {
+-	.uid = "4",
+-	.pins = tgllp_community4_pins,
+-	.npins = ARRAY_SIZE(tgllp_community4_pins),
+-	.communities = tgllp_community4,
+-	.ncommunities = ARRAY_SIZE(tgllp_community4),
++static const struct intel_padgroup tgllp_community1_gpps[] = {
++	TGL_GPP(0, 67, 74, 96),			/* GPP_S */
++	TGL_GPP(1, 75, 98, 128),		/* GPP_H */
++	TGL_GPP(2, 99, 119, 160),		/* GPP_D */
++	TGL_GPP(3, 120, 143, 192),		/* GPP_U */
++	TGL_GPP(4, 144, 170, 224),		/* vGPIO */
+ };
+ 
+-static const struct pinctrl_pin_desc tgllp_community5_pins[] = {
+-	/* GPP_R */
+-	PINCTRL_PIN(0, "HDA_BCLK"),
+-	PINCTRL_PIN(1, "HDA_SYNC"),
+-	PINCTRL_PIN(2, "HDA_SDO"),
+-	PINCTRL_PIN(3, "HDA_SDI_0"),
+-	PINCTRL_PIN(4, "HDA_RSTB"),
+-	PINCTRL_PIN(5, "HDA_SDI_1"),
+-	PINCTRL_PIN(6, "GPP_R_6"),
+-	PINCTRL_PIN(7, "GPP_R_7"),
+-	/* SPI */
+-	PINCTRL_PIN(8, "SPI0_IO_2"),
+-	PINCTRL_PIN(9, "SPI0_IO_3"),
+-	PINCTRL_PIN(10, "SPI0_MOSI_IO_0"),
+-	PINCTRL_PIN(11, "SPI0_MISO_IO_1"),
+-	PINCTRL_PIN(12, "SPI0_TPM_CSB"),
+-	PINCTRL_PIN(13, "SPI0_FLASH_0_CSB"),
+-	PINCTRL_PIN(14, "SPI0_FLASH_1_CSB"),
+-	PINCTRL_PIN(15, "SPI0_CLK"),
+-	PINCTRL_PIN(16, "SPI0_CLK_LOOPBK"),
++static const struct intel_padgroup tgllp_community4_gpps[] = {
++	TGL_GPP(0, 171, 194, 256),		/* GPP_C */
++	TGL_GPP(1, 195, 219, 288),		/* GPP_F */
++	TGL_GPP(2, 220, 225, TGL_NO_GPIO),	/* HVCMOS */
++	TGL_GPP(3, 226, 250, 320),		/* GPP_E */
++	TGL_GPP(4, 251, 259, TGL_NO_GPIO),	/* JTAG */
+ };
+ 
+ static const struct intel_padgroup tgllp_community5_gpps[] = {
+-	TGL_GPP(0, 0, 7),	/* GPP_R */
+-	TGL_GPP(1, 8, 16),	/* SPI */
+-};
+-
+-static const struct intel_community tgllp_community5[] = {
+-	TGL_COMMUNITY(0, 16, tgllp_community5_gpps),
++	TGL_GPP(0, 260, 267, 352),		/* GPP_R */
++	TGL_GPP(1, 268, 276, TGL_NO_GPIO),	/* SPI */
+ };
+ 
+-static const struct intel_pinctrl_soc_data tgllp_community5_soc_data = {
+-	.uid = "5",
+-	.pins = tgllp_community5_pins,
+-	.npins = ARRAY_SIZE(tgllp_community5_pins),
+-	.communities = tgllp_community5,
+-	.ncommunities = ARRAY_SIZE(tgllp_community5),
++static const struct intel_community tgllp_communities[] = {
++	TGL_COMMUNITY(0, 0, 66, tgllp_community0_gpps),
++	TGL_COMMUNITY(1, 67, 170, tgllp_community1_gpps),
++	TGL_COMMUNITY(2, 171, 259, tgllp_community4_gpps),
++	TGL_COMMUNITY(3, 260, 276, tgllp_community5_gpps),
+ };
+ 
+-static const struct intel_pinctrl_soc_data *tgllp_soc_data_array[] = {
+-	&tgllp_community0_soc_data,
+-	&tgllp_community1_soc_data,
+-	&tgllp_community4_soc_data,
+-	&tgllp_community5_soc_data,
+-	NULL
++static const struct intel_pinctrl_soc_data tgllp_soc_data = {
++	.pins = tgllp_pins,
++	.npins = ARRAY_SIZE(tgllp_pins),
++	.communities = tgllp_communities,
++	.ncommunities = ARRAY_SIZE(tgllp_communities),
+ };
+ 
+ static const struct acpi_device_id tgl_pinctrl_acpi_match[] = {
+-	{ "INT34C5", (kernel_ulong_t)tgllp_soc_data_array },
++	{ "INT34C5", (kernel_ulong_t)&tgllp_soc_data },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(acpi, tgl_pinctrl_acpi_match);
+@@ -438,7 +391,7 @@ MODULE_DEVICE_TABLE(acpi, tgl_pinctrl_acpi_match);
+ static INTEL_PINCTRL_PM_OPS(tgl_pinctrl_pm_ops);
+ 
+ static struct platform_driver tgl_pinctrl_driver = {
+-	.probe = intel_pinctrl_probe_by_uid,
++	.probe = intel_pinctrl_probe_by_hid,
+ 	.driver = {
+ 		.name = "tigerlake-pinctrl",
+ 		.acpi_match_table = tgl_pinctrl_acpi_match,
+diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7264.c b/drivers/pinctrl/sh-pfc/pfc-sh7264.c
+index 4a95867deb8a..5a026601d4f9 100644
+--- a/drivers/pinctrl/sh-pfc/pfc-sh7264.c
++++ b/drivers/pinctrl/sh-pfc/pfc-sh7264.c
+@@ -497,17 +497,15 @@ enum {
+ 	SD_WP_MARK, SD_CLK_MARK, SD_CMD_MARK,
+ 	CRX0_MARK, CRX1_MARK,
+ 	CTX0_MARK, CTX1_MARK,
++	CRX0_CRX1_MARK, CTX0_CTX1_MARK,
+ 
+ 	PWM1A_MARK, PWM1B_MARK, PWM1C_MARK, PWM1D_MARK,
+ 	PWM1E_MARK, PWM1F_MARK, PWM1G_MARK, PWM1H_MARK,
+ 	PWM2A_MARK, PWM2B_MARK, PWM2C_MARK, PWM2D_MARK,
+ 	PWM2E_MARK, PWM2F_MARK, PWM2G_MARK, PWM2H_MARK,
+ 	IERXD_MARK, IETXD_MARK,
+-	CRX0_CRX1_MARK,
+ 	WDTOVF_MARK,
+ 
+-	CRX0X1_MARK,
+-
+ 	/* DMAC */
+ 	TEND0_MARK, DACK0_MARK, DREQ0_MARK,
+ 	TEND1_MARK, DACK1_MARK, DREQ1_MARK,
+@@ -995,12 +993,12 @@ static const u16 pinmux_data[] = {
+ 
+ 	PINMUX_DATA(PJ3_DATA, PJ3MD_00),
+ 	PINMUX_DATA(CRX1_MARK, PJ3MD_01),
+-	PINMUX_DATA(CRX0X1_MARK, PJ3MD_10),
++	PINMUX_DATA(CRX0_CRX1_MARK, PJ3MD_10),
+ 	PINMUX_DATA(IRQ1_PJ_MARK, PJ3MD_11),
+ 
+ 	PINMUX_DATA(PJ2_DATA, PJ2MD_000),
+ 	PINMUX_DATA(CTX1_MARK, PJ2MD_001),
+-	PINMUX_DATA(CRX0_CRX1_MARK, PJ2MD_010),
++	PINMUX_DATA(CTX0_CTX1_MARK, PJ2MD_010),
+ 	PINMUX_DATA(CS2_MARK, PJ2MD_011),
+ 	PINMUX_DATA(SCK0_MARK, PJ2MD_100),
+ 	PINMUX_DATA(LCD_M_DISP_MARK, PJ2MD_101),
+@@ -1245,6 +1243,7 @@ static const struct pinmux_func pinmux_func_gpios[] = {
+ 	GPIO_FN(CTX1),
+ 	GPIO_FN(CRX1),
+ 	GPIO_FN(CTX0),
++	GPIO_FN(CTX0_CTX1),
+ 	GPIO_FN(CRX0),
+ 	GPIO_FN(CRX0_CRX1),
+ 
+diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7269.c b/drivers/pinctrl/sh-pfc/pfc-sh7269.c
+index 6cbb18ef77dc..d20974a55d93 100644
+--- a/drivers/pinctrl/sh-pfc/pfc-sh7269.c
++++ b/drivers/pinctrl/sh-pfc/pfc-sh7269.c
+@@ -737,13 +737,12 @@ enum {
+ 	CRX0_MARK, CTX0_MARK,
+ 	CRX1_MARK, CTX1_MARK,
+ 	CRX2_MARK, CTX2_MARK,
+-	CRX0_CRX1_MARK,
+-	CRX0_CRX1_CRX2_MARK,
+-	CTX0CTX1CTX2_MARK,
++	CRX0_CRX1_MARK, CTX0_CTX1_MARK,
++	CRX0_CRX1_CRX2_MARK, CTX0_CTX1_CTX2_MARK,
+ 	CRX1_PJ22_MARK, CTX1_PJ23_MARK,
+ 	CRX2_PJ20_MARK, CTX2_PJ21_MARK,
+-	CRX0CRX1_PJ22_MARK,
+-	CRX0CRX1CRX2_PJ20_MARK,
++	CRX0_CRX1_PJ22_MARK, CTX0_CTX1_PJ23_MARK,
++	CRX0_CRX1_CRX2_PJ20_MARK, CTX0_CTX1_CTX2_PJ21_MARK,
+ 
+ 	/* VDC */
+ 	DV_CLK_MARK,
+@@ -821,6 +820,7 @@ static const u16 pinmux_data[] = {
+ 	PINMUX_DATA(CS3_MARK, PC8MD_001),
+ 	PINMUX_DATA(TXD7_MARK, PC8MD_010),
+ 	PINMUX_DATA(CTX1_MARK, PC8MD_011),
++	PINMUX_DATA(CTX0_CTX1_MARK, PC8MD_100),
+ 
+ 	PINMUX_DATA(PC7_DATA, PC7MD_000),
+ 	PINMUX_DATA(CKE_MARK, PC7MD_001),
+@@ -833,11 +833,12 @@ static const u16 pinmux_data[] = {
+ 	PINMUX_DATA(CAS_MARK, PC6MD_001),
+ 	PINMUX_DATA(SCK7_MARK, PC6MD_010),
+ 	PINMUX_DATA(CTX0_MARK, PC6MD_011),
++	PINMUX_DATA(CTX0_CTX1_CTX2_MARK, PC6MD_100),
+ 
+ 	PINMUX_DATA(PC5_DATA, PC5MD_000),
+ 	PINMUX_DATA(RAS_MARK, PC5MD_001),
+ 	PINMUX_DATA(CRX0_MARK, PC5MD_011),
+-	PINMUX_DATA(CTX0CTX1CTX2_MARK, PC5MD_100),
++	PINMUX_DATA(CTX0_CTX1_CTX2_MARK, PC5MD_100),
+ 	PINMUX_DATA(IRQ0_PC_MARK, PC5MD_101),
+ 
+ 	PINMUX_DATA(PC4_DATA, PC4MD_00),
+@@ -1289,30 +1290,32 @@ static const u16 pinmux_data[] = {
+ 	PINMUX_DATA(LCD_DATA23_PJ23_MARK, PJ23MD_010),
+ 	PINMUX_DATA(LCD_TCON6_MARK, PJ23MD_011),
+ 	PINMUX_DATA(IRQ3_PJ_MARK, PJ23MD_100),
+-	PINMUX_DATA(CTX1_MARK, PJ23MD_101),
++	PINMUX_DATA(CTX1_PJ23_MARK, PJ23MD_101),
++	PINMUX_DATA(CTX0_CTX1_PJ23_MARK, PJ23MD_110),
+ 
+ 	PINMUX_DATA(PJ22_DATA, PJ22MD_000),
+ 	PINMUX_DATA(DV_DATA22_MARK, PJ22MD_001),
+ 	PINMUX_DATA(LCD_DATA22_PJ22_MARK, PJ22MD_010),
+ 	PINMUX_DATA(LCD_TCON5_MARK, PJ22MD_011),
+ 	PINMUX_DATA(IRQ2_PJ_MARK, PJ22MD_100),
+-	PINMUX_DATA(CRX1_MARK, PJ22MD_101),
+-	PINMUX_DATA(CRX0_CRX1_MARK, PJ22MD_110),
++	PINMUX_DATA(CRX1_PJ22_MARK, PJ22MD_101),
++	PINMUX_DATA(CRX0_CRX1_PJ22_MARK, PJ22MD_110),
+ 
+ 	PINMUX_DATA(PJ21_DATA, PJ21MD_000),
+ 	PINMUX_DATA(DV_DATA21_MARK, PJ21MD_001),
+ 	PINMUX_DATA(LCD_DATA21_PJ21_MARK, PJ21MD_010),
+ 	PINMUX_DATA(LCD_TCON4_MARK, PJ21MD_011),
+ 	PINMUX_DATA(IRQ1_PJ_MARK, PJ21MD_100),
+-	PINMUX_DATA(CTX2_MARK, PJ21MD_101),
++	PINMUX_DATA(CTX2_PJ21_MARK, PJ21MD_101),
++	PINMUX_DATA(CTX0_CTX1_CTX2_PJ21_MARK, PJ21MD_110),
+ 
+ 	PINMUX_DATA(PJ20_DATA, PJ20MD_000),
+ 	PINMUX_DATA(DV_DATA20_MARK, PJ20MD_001),
+ 	PINMUX_DATA(LCD_DATA20_PJ20_MARK, PJ20MD_010),
+ 	PINMUX_DATA(LCD_TCON3_MARK, PJ20MD_011),
+ 	PINMUX_DATA(IRQ0_PJ_MARK, PJ20MD_100),
+-	PINMUX_DATA(CRX2_MARK, PJ20MD_101),
+-	PINMUX_DATA(CRX0CRX1CRX2_PJ20_MARK, PJ20MD_110),
++	PINMUX_DATA(CRX2_PJ20_MARK, PJ20MD_101),
++	PINMUX_DATA(CRX0_CRX1_CRX2_PJ20_MARK, PJ20MD_110),
+ 
+ 	PINMUX_DATA(PJ19_DATA, PJ19MD_000),
+ 	PINMUX_DATA(DV_DATA19_MARK, PJ19MD_001),
+@@ -1663,12 +1666,24 @@ static const struct pinmux_func pinmux_func_gpios[] = {
+ 	GPIO_FN(WDTOVF),
+ 
+ 	/* CAN */
++	GPIO_FN(CTX2),
++	GPIO_FN(CRX2),
+ 	GPIO_FN(CTX1),
+ 	GPIO_FN(CRX1),
+ 	GPIO_FN(CTX0),
+ 	GPIO_FN(CRX0),
++	GPIO_FN(CTX0_CTX1),
+ 	GPIO_FN(CRX0_CRX1),
++	GPIO_FN(CTX0_CTX1_CTX2),
+ 	GPIO_FN(CRX0_CRX1_CRX2),
++	GPIO_FN(CTX2_PJ21),
++	GPIO_FN(CRX2_PJ20),
++	GPIO_FN(CTX1_PJ23),
++	GPIO_FN(CRX1_PJ22),
++	GPIO_FN(CTX0_CTX1_PJ23),
++	GPIO_FN(CRX0_CRX1_PJ22),
++	GPIO_FN(CTX0_CTX1_CTX2_PJ21),
++	GPIO_FN(CRX0_CRX1_CRX2_PJ20),
+ 
+ 	/* DMAC */
+ 	GPIO_FN(TEND0),
+diff --git a/drivers/pwm/pwm-omap-dmtimer.c b/drivers/pwm/pwm-omap-dmtimer.c
+index 00772fc53490..e36fcad668a6 100644
+--- a/drivers/pwm/pwm-omap-dmtimer.c
++++ b/drivers/pwm/pwm-omap-dmtimer.c
+@@ -298,15 +298,10 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
+ 		goto put;
+ 	}
+ 
+-put:
+-	of_node_put(timer);
+-	if (ret < 0)
+-		return ret;
+-
+ 	omap = devm_kzalloc(&pdev->dev, sizeof(*omap), GFP_KERNEL);
+ 	if (!omap) {
+-		pdata->free(dm_timer);
+-		return -ENOMEM;
++		ret = -ENOMEM;
++		goto err_alloc_omap;
+ 	}
+ 
+ 	omap->pdata = pdata;
+@@ -339,18 +334,38 @@ put:
+ 	ret = pwmchip_add(&omap->chip);
+ 	if (ret < 0) {
+ 		dev_err(&pdev->dev, "failed to register PWM\n");
+-		omap->pdata->free(omap->dm_timer);
+-		return ret;
++		goto err_pwmchip_add;
+ 	}
+ 
++	of_node_put(timer);
++
+ 	platform_set_drvdata(pdev, omap);
+ 
+ 	return 0;
++
++err_pwmchip_add:
++
++	/*
++	 * *omap is allocated using devm_kzalloc,
++	 * so no free necessary here
++	 */
++err_alloc_omap:
++
++	pdata->free(dm_timer);
++put:
++	of_node_put(timer);
++
++	return ret;
+ }
+ 
+ static int pwm_omap_dmtimer_remove(struct platform_device *pdev)
+ {
+ 	struct pwm_omap_dmtimer_chip *omap = platform_get_drvdata(pdev);
++	int ret;
++
++	ret = pwmchip_remove(&omap->chip);
++	if (ret)
++		return ret;
+ 
+ 	if (pm_runtime_active(&omap->dm_timer_pdev->dev))
+ 		omap->pdata->stop(omap->dm_timer);
+@@ -359,7 +374,7 @@ static int pwm_omap_dmtimer_remove(struct platform_device *pdev)
+ 
+ 	mutex_destroy(&omap->mutex);
+ 
+-	return pwmchip_remove(&omap->chip);
++	return 0;
+ }
+ 
+ static const struct of_device_id pwm_omap_dmtimer_of_match[] = {
+diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c
+index 168684b02ebc..b07bdca3d510 100644
+--- a/drivers/pwm/pwm-pca9685.c
++++ b/drivers/pwm/pwm-pca9685.c
+@@ -159,13 +159,9 @@ static void pca9685_pwm_gpio_set(struct gpio_chip *gpio, unsigned int offset,
+ static void pca9685_pwm_gpio_free(struct gpio_chip *gpio, unsigned int offset)
+ {
+ 	struct pca9685 *pca = gpiochip_get_data(gpio);
+-	struct pwm_device *pwm;
+ 
+ 	pca9685_pwm_gpio_set(gpio, offset, 0);
+ 	pm_runtime_put(pca->chip.dev);
+-	mutex_lock(&pca->lock);
+-	pwm = &pca->chip.pwms[offset];
+-	mutex_unlock(&pca->lock);
+ }
+ 
+ static int pca9685_pwm_gpio_get_direction(struct gpio_chip *chip,
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index 03d79fee2987..d015d99cb59d 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -3470,6 +3470,7 @@ int regulator_set_voltage_rdev(struct regulator_dev *rdev, int min_uV,
+ out:
+ 	return ret;
+ }
++EXPORT_SYMBOL_GPL(regulator_set_voltage_rdev);
+ 
+ static int regulator_limit_voltage_step(struct regulator_dev *rdev,
+ 					int *current_uV, int *min_uV)
+@@ -4034,6 +4035,7 @@ int regulator_get_voltage_rdev(struct regulator_dev *rdev)
+ 		return ret;
+ 	return ret - rdev->constraints->uV_offset;
+ }
++EXPORT_SYMBOL_GPL(regulator_get_voltage_rdev);
+ 
+ /**
+  * regulator_get_voltage - get regulator output voltage
+diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
+index 5b4003226484..31f79fda3238 100644
+--- a/drivers/regulator/rk808-regulator.c
++++ b/drivers/regulator/rk808-regulator.c
+@@ -1282,7 +1282,7 @@ static int rk808_regulator_dt_parse_pdata(struct device *dev,
+ 		}
+ 
+ 		if (!pdata->dvs_gpio[i]) {
+-			dev_warn(dev, "there is no dvs%d gpio\n", i);
++			dev_info(dev, "there is no dvs%d gpio\n", i);
+ 			continue;
+ 		}
+ 
+diff --git a/drivers/regulator/vctrl-regulator.c b/drivers/regulator/vctrl-regulator.c
+index 9a9ee8188109..cbadb1c99679 100644
+--- a/drivers/regulator/vctrl-regulator.c
++++ b/drivers/regulator/vctrl-regulator.c
+@@ -11,10 +11,13 @@
+ #include <linux/module.h>
+ #include <linux/of.h>
+ #include <linux/of_device.h>
++#include <linux/regulator/coupler.h>
+ #include <linux/regulator/driver.h>
+ #include <linux/regulator/of_regulator.h>
+ #include <linux/sort.h>
+ 
++#include "internal.h"
++
+ struct vctrl_voltage_range {
+ 	int min_uV;
+ 	int max_uV;
+@@ -79,7 +82,7 @@ static int vctrl_calc_output_voltage(struct vctrl_data *vctrl, int ctrl_uV)
+ static int vctrl_get_voltage(struct regulator_dev *rdev)
+ {
+ 	struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
+-	int ctrl_uV = regulator_get_voltage(vctrl->ctrl_reg);
++	int ctrl_uV = regulator_get_voltage_rdev(vctrl->ctrl_reg->rdev);
+ 
+ 	return vctrl_calc_output_voltage(vctrl, ctrl_uV);
+ }
+@@ -90,16 +93,16 @@ static int vctrl_set_voltage(struct regulator_dev *rdev,
+ {
+ 	struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
+ 	struct regulator *ctrl_reg = vctrl->ctrl_reg;
+-	int orig_ctrl_uV = regulator_get_voltage(ctrl_reg);
++	int orig_ctrl_uV = regulator_get_voltage_rdev(ctrl_reg->rdev);
+ 	int uV = vctrl_calc_output_voltage(vctrl, orig_ctrl_uV);
+ 	int ret;
+ 
+ 	if (req_min_uV >= uV || !vctrl->ovp_threshold)
+ 		/* voltage rising or no OVP */
+-		return regulator_set_voltage(
+-			ctrl_reg,
++		return regulator_set_voltage_rdev(ctrl_reg->rdev,
+ 			vctrl_calc_ctrl_voltage(vctrl, req_min_uV),
+-			vctrl_calc_ctrl_voltage(vctrl, req_max_uV));
++			vctrl_calc_ctrl_voltage(vctrl, req_max_uV),
++			PM_SUSPEND_ON);
+ 
+ 	while (uV > req_min_uV) {
+ 		int max_drop_uV = (uV * vctrl->ovp_threshold) / 100;
+@@ -114,9 +117,10 @@ static int vctrl_set_voltage(struct regulator_dev *rdev,
+ 		next_uV = max_t(int, req_min_uV, uV - max_drop_uV);
+ 		next_ctrl_uV = vctrl_calc_ctrl_voltage(vctrl, next_uV);
+ 
+-		ret = regulator_set_voltage(ctrl_reg,
++		ret = regulator_set_voltage_rdev(ctrl_reg->rdev,
++					    next_ctrl_uV,
+ 					    next_ctrl_uV,
+-					    next_ctrl_uV);
++					    PM_SUSPEND_ON);
+ 		if (ret)
+ 			goto err;
+ 
+@@ -130,7 +134,8 @@ static int vctrl_set_voltage(struct regulator_dev *rdev,
+ 
+ err:
+ 	/* Try to go back to original voltage */
+-	regulator_set_voltage(ctrl_reg, orig_ctrl_uV, orig_ctrl_uV);
++	regulator_set_voltage_rdev(ctrl_reg->rdev, orig_ctrl_uV, orig_ctrl_uV,
++				   PM_SUSPEND_ON);
+ 
+ 	return ret;
+ }
+@@ -155,9 +160,10 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev,
+ 
+ 	if (selector >= vctrl->sel || !vctrl->ovp_threshold) {
+ 		/* voltage rising or no OVP */
+-		ret = regulator_set_voltage(ctrl_reg,
++		ret = regulator_set_voltage_rdev(ctrl_reg->rdev,
++					    vctrl->vtable[selector].ctrl,
+ 					    vctrl->vtable[selector].ctrl,
+-					    vctrl->vtable[selector].ctrl);
++					    PM_SUSPEND_ON);
+ 		if (!ret)
+ 			vctrl->sel = selector;
+ 
+@@ -173,9 +179,10 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev,
+ 		else
+ 			next_sel = vctrl->vtable[vctrl->sel].ovp_min_sel;
+ 
+-		ret = regulator_set_voltage(ctrl_reg,
++		ret = regulator_set_voltage_rdev(ctrl_reg->rdev,
+ 					    vctrl->vtable[next_sel].ctrl,
+-					    vctrl->vtable[next_sel].ctrl);
++					    vctrl->vtable[next_sel].ctrl,
++					    PM_SUSPEND_ON);
+ 		if (ret) {
+ 			dev_err(&rdev->dev,
+ 				"failed to set control voltage to %duV\n",
+@@ -195,9 +202,10 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev,
+ err:
+ 	if (vctrl->sel != orig_sel) {
+ 		/* Try to go back to original voltage */
+-		if (!regulator_set_voltage(ctrl_reg,
++		if (!regulator_set_voltage_rdev(ctrl_reg->rdev,
++					   vctrl->vtable[orig_sel].ctrl,
+ 					   vctrl->vtable[orig_sel].ctrl,
+-					   vctrl->vtable[orig_sel].ctrl))
++					   PM_SUSPEND_ON))
+ 			vctrl->sel = orig_sel;
+ 		else
+ 			dev_warn(&rdev->dev,
+@@ -482,7 +490,7 @@ static int vctrl_probe(struct platform_device *pdev)
+ 		if (ret)
+ 			return ret;
+ 
+-		ctrl_uV = regulator_get_voltage(vctrl->ctrl_reg);
++		ctrl_uV = regulator_get_voltage_rdev(vctrl->ctrl_reg->rdev);
+ 		if (ctrl_uV < 0) {
+ 			dev_err(&pdev->dev, "failed to get control voltage\n");
+ 			return ctrl_uV;
+diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
+index 471128a2e723..164fc2a53ef1 100644
+--- a/drivers/remoteproc/qcom_q6v5_mss.c
++++ b/drivers/remoteproc/qcom_q6v5_mss.c
+@@ -1594,7 +1594,6 @@ static const struct rproc_hexagon_res msm8998_mss = {
+ 	.active_clk_names = (char*[]){
+ 			"iface",
+ 			"bus",
+-			"mem",
+ 			"gpll0_mss",
+ 			"mnoc_axi",
+ 			"snoc_axi",
+diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
+index 307df98347ba..8115f945151b 100644
+--- a/drivers/remoteproc/remoteproc_core.c
++++ b/drivers/remoteproc/remoteproc_core.c
+@@ -2223,7 +2223,7 @@ static int __init remoteproc_init(void)
+ 
+ 	return 0;
+ }
+-module_init(remoteproc_init);
++subsys_initcall(remoteproc_init);
+ 
+ static void __exit remoteproc_exit(void)
+ {
+diff --git a/drivers/reset/reset-uniphier.c b/drivers/reset/reset-uniphier.c
+index 74e589f5dd6a..279e535bf5d8 100644
+--- a/drivers/reset/reset-uniphier.c
++++ b/drivers/reset/reset-uniphier.c
+@@ -193,8 +193,8 @@ static const struct uniphier_reset_data uniphier_pro5_sd_reset_data[] = {
+ #define UNIPHIER_PERI_RESET_FI2C(id, ch)		\
+ 	UNIPHIER_RESETX((id), 0x114, 24 + (ch))
+ 
+-#define UNIPHIER_PERI_RESET_SCSSI(id)			\
+-	UNIPHIER_RESETX((id), 0x110, 17)
++#define UNIPHIER_PERI_RESET_SCSSI(id, ch)		\
++	UNIPHIER_RESETX((id), 0x110, 17 + (ch))
+ 
+ #define UNIPHIER_PERI_RESET_MCSSI(id)			\
+ 	UNIPHIER_RESETX((id), 0x114, 14)
+@@ -209,7 +209,7 @@ static const struct uniphier_reset_data uniphier_ld4_peri_reset_data[] = {
+ 	UNIPHIER_PERI_RESET_I2C(6, 2),
+ 	UNIPHIER_PERI_RESET_I2C(7, 3),
+ 	UNIPHIER_PERI_RESET_I2C(8, 4),
+-	UNIPHIER_PERI_RESET_SCSSI(11),
++	UNIPHIER_PERI_RESET_SCSSI(11, 0),
+ 	UNIPHIER_RESET_END,
+ };
+ 
+@@ -225,8 +225,11 @@ static const struct uniphier_reset_data uniphier_pro4_peri_reset_data[] = {
+ 	UNIPHIER_PERI_RESET_FI2C(8, 4),
+ 	UNIPHIER_PERI_RESET_FI2C(9, 5),
+ 	UNIPHIER_PERI_RESET_FI2C(10, 6),
+-	UNIPHIER_PERI_RESET_SCSSI(11),
+-	UNIPHIER_PERI_RESET_MCSSI(12),
++	UNIPHIER_PERI_RESET_SCSSI(11, 0),
++	UNIPHIER_PERI_RESET_SCSSI(12, 1),
++	UNIPHIER_PERI_RESET_SCSSI(13, 2),
++	UNIPHIER_PERI_RESET_SCSSI(14, 3),
++	UNIPHIER_PERI_RESET_MCSSI(15),
+ 	UNIPHIER_RESET_END,
+ };
+ 
+diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
+index d77515d8382c..0f46e4a42889 100644
+--- a/drivers/rtc/Kconfig
++++ b/drivers/rtc/Kconfig
+@@ -240,6 +240,7 @@ config RTC_DRV_AS3722
+ 
+ config RTC_DRV_DS1307
+ 	tristate "Dallas/Maxim DS1307/37/38/39/40/41, ST M41T00, EPSON RX-8025, ISL12057"
++	select REGMAP_I2C
+ 	help
+ 	  If you say yes here you get support for various compatible RTC
+ 	  chips (often with battery backup) connected with I2C. This driver
+@@ -621,6 +622,7 @@ config RTC_DRV_RX8010
+ 
+ config RTC_DRV_RX8581
+ 	tristate "Epson RX-8571/RX-8581"
++	select REGMAP_I2C
+ 	help
+ 	  If you say yes here you will get support for the Epson RX-8571/
+ 	  RX-8581.
+@@ -648,6 +650,7 @@ config RTC_DRV_EM3027
+ 
+ config RTC_DRV_RV3028
+ 	tristate "Micro Crystal RV3028"
++	select REGMAP_I2C
+ 	help
+ 	  If you say yes here you get support for the Micro Crystal
+ 	  RV3028.
+@@ -677,6 +680,7 @@ config RTC_DRV_S5M
+ 
+ config RTC_DRV_SD3078
+     tristate "ZXW Shenzhen whwave SD3078"
++    select REGMAP_I2C
+     help
+       If you say yes here you get support for the ZXW Shenzhen whwave
+       SD3078 RTC chips.
+@@ -848,14 +852,14 @@ config RTC_I2C_AND_SPI
+ 	default m if I2C=m
+ 	default y if I2C=y
+ 	default y if SPI_MASTER=y
+-	select REGMAP_I2C if I2C
+-	select REGMAP_SPI if SPI_MASTER
+ 
+ comment "SPI and I2C RTC drivers"
+ 
+ config RTC_DRV_DS3232
+ 	tristate "Dallas/Maxim DS3232/DS3234"
+ 	depends on RTC_I2C_AND_SPI
++	select REGMAP_I2C if I2C
++	select REGMAP_SPI if SPI_MASTER
+ 	help
+ 	  If you say yes here you get support for Dallas Semiconductor
+ 	  DS3232 and DS3234 real-time clock chips. If an interrupt is associated
+@@ -875,6 +879,8 @@ config RTC_DRV_DS3232_HWMON
+ config RTC_DRV_PCF2127
+ 	tristate "NXP PCF2127"
+ 	depends on RTC_I2C_AND_SPI
++	select REGMAP_I2C if I2C
++	select REGMAP_SPI if SPI_MASTER
+ 	select WATCHDOG_CORE if WATCHDOG
+ 	help
+ 	  If you say yes here you get support for the NXP PCF2127/29 RTC
+@@ -891,6 +897,8 @@ config RTC_DRV_PCF2127
+ config RTC_DRV_RV3029C2
+ 	tristate "Micro Crystal RV3029/3049"
+ 	depends on RTC_I2C_AND_SPI
++	select REGMAP_I2C if I2C
++	select REGMAP_SPI if SPI_MASTER
+ 	help
+ 	  If you say yes here you get support for the Micro Crystal
+ 	  RV3029 and RV3049 RTC chips.
+diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
+index a9d40d3b90ef..4190a025381a 100644
+--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
++++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
+@@ -2314,7 +2314,7 @@ ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
+ 			 * At some speeds, we only support
+ 			 * ST transfers.
+ 			 */
+-		 	if ((syncrate->sxfr_u2 & ST_SXFR) != 0)
++			if ((syncrate->sxfr_u2 & ST_SXFR) != 0)
+ 				*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
+ 			break;
+ 		}
+diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
+index 0bc63a7ab41c..b5dd1caae5e9 100644
+--- a/drivers/scsi/iscsi_tcp.c
++++ b/drivers/scsi/iscsi_tcp.c
+@@ -887,6 +887,10 @@ free_host:
+ static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session)
+ {
+ 	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
++	struct iscsi_session *session = cls_session->dd_data;
++
++	if (WARN_ON_ONCE(session->leadconn))
++		return;
+ 
+ 	iscsi_tcp_r2tpool_free(cls_session->dd_data);
+ 	iscsi_session_teardown(cls_session);
+diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
+index 99c9bb249758..1b4dbb28fb41 100644
+--- a/drivers/scsi/lpfc/lpfc_ct.c
++++ b/drivers/scsi/lpfc/lpfc_ct.c
+@@ -1493,33 +1493,35 @@ int
+ lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol,
+ 	size_t size)
+ {
+-	char fwrev[FW_REV_STR_SIZE];
+-	int n;
++	char fwrev[FW_REV_STR_SIZE] = {0};
++	char tmp[MAXHOSTNAMELEN] = {0};
+ 
+-	lpfc_decode_firmware_rev(vport->phba, fwrev, 0);
++	memset(symbol, 0, size);
+ 
+-	n = scnprintf(symbol, size, "Emulex %s", vport->phba->ModelName);
+-	if (size < n)
+-		return n;
++	scnprintf(tmp, sizeof(tmp), "Emulex %s", vport->phba->ModelName);
++	if (strlcat(symbol, tmp, size) >= size)
++		goto buffer_done;
+ 
+-	n += scnprintf(symbol + n, size - n, " FV%s", fwrev);
+-	if (size < n)
+-		return n;
++	lpfc_decode_firmware_rev(vport->phba, fwrev, 0);
++	scnprintf(tmp, sizeof(tmp), " FV%s", fwrev);
++	if (strlcat(symbol, tmp, size) >= size)
++		goto buffer_done;
+ 
+-	n += scnprintf(symbol + n, size - n, " DV%s.",
+-		      lpfc_release_version);
+-	if (size < n)
+-		return n;
++	scnprintf(tmp, sizeof(tmp), " DV%s", lpfc_release_version);
++	if (strlcat(symbol, tmp, size) >= size)
++		goto buffer_done;
+ 
+-	n += scnprintf(symbol + n, size - n, " HN:%s.",
+-		      init_utsname()->nodename);
+-	if (size < n)
+-		return n;
++	scnprintf(tmp, sizeof(tmp), " HN:%s", init_utsname()->nodename);
++	if (strlcat(symbol, tmp, size) >= size)
++		goto buffer_done;
+ 
+ 	/* Note :- OS name is "Linux" */
+-	n += scnprintf(symbol + n, size - n, " OS:%s",
+-		      init_utsname()->sysname);
+-	return n;
++	scnprintf(tmp, sizeof(tmp), " OS:%s", init_utsname()->sysname);
++	strlcat(symbol, tmp, size);
++
++buffer_done:
++	return strnlen(symbol, size);
++
+ }
+ 
+ static uint32_t
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index ed8d9709b9b9..271afea654e2 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -2947,6 +2947,24 @@ iscsi_set_path(struct iscsi_transport *transport, struct iscsi_uevent *ev)
+ 	return err;
+ }
+ 
++static int iscsi_session_has_conns(int sid)
++{
++	struct iscsi_cls_conn *conn;
++	unsigned long flags;
++	int found = 0;
++
++	spin_lock_irqsave(&connlock, flags);
++	list_for_each_entry(conn, &connlist, conn_list) {
++		if (iscsi_conn_get_sid(conn) == sid) {
++			found = 1;
++			break;
++		}
++	}
++	spin_unlock_irqrestore(&connlock, flags);
++
++	return found;
++}
++
+ static int
+ iscsi_set_iface_params(struct iscsi_transport *transport,
+ 		       struct iscsi_uevent *ev, uint32_t len)
+@@ -3524,10 +3542,12 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
+ 		break;
+ 	case ISCSI_UEVENT_DESTROY_SESSION:
+ 		session = iscsi_session_lookup(ev->u.d_session.sid);
+-		if (session)
+-			transport->destroy_session(session);
+-		else
++		if (!session)
+ 			err = -EINVAL;
++		else if (iscsi_session_has_conns(ev->u.d_session.sid))
++			err = -EBUSY;
++		else
++			transport->destroy_session(session);
+ 		break;
+ 	case ISCSI_UEVENT_UNBIND_SESSION:
+ 		session = iscsi_session_lookup(ev->u.d_session.sid);
+diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
+index 83e28edc3ac5..8a21f49caf0d 100644
+--- a/drivers/scsi/ufs/ufs-mediatek.c
++++ b/drivers/scsi/ufs/ufs-mediatek.c
+@@ -13,6 +13,7 @@
+ 
+ #include "ufshcd.h"
+ #include "ufshcd-pltfrm.h"
++#include "ufs_quirks.h"
+ #include "unipro.h"
+ #include "ufs-mediatek.h"
+ 
+@@ -289,6 +290,15 @@ static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+ 	return 0;
+ }
+ 
++static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba,
++				    struct ufs_dev_desc *card)
++{
++	if (card->wmanufacturerid == UFS_VENDOR_SAMSUNG)
++		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
++
++	return 0;
++}
++
+ /**
+  * struct ufs_hba_mtk_vops - UFS MTK specific variant operations
+  *
+@@ -301,6 +311,7 @@ static struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
+ 	.setup_clocks        = ufs_mtk_setup_clocks,
+ 	.link_startup_notify = ufs_mtk_link_startup_notify,
+ 	.pwr_change_notify   = ufs_mtk_pwr_change_notify,
++	.apply_dev_quirks    = ufs_mtk_apply_dev_quirks,
+ 	.suspend             = ufs_mtk_suspend,
+ 	.resume              = ufs_mtk_resume,
+ };
+diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
+index c69c29a1ceb9..ebb5c66e069f 100644
+--- a/drivers/scsi/ufs/ufs-qcom.c
++++ b/drivers/scsi/ufs/ufs-qcom.c
+@@ -949,7 +949,8 @@ out:
+ 	return err;
+ }
+ 
+-static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
++static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba,
++				     struct ufs_dev_desc *card)
+ {
+ 	int err = 0;
+ 
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
+index 3fbf9ea16c64..5340a980d24b 100644
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -4799,7 +4799,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+ 		break;
+ 	} /* end of switch */
+ 
+-	if (host_byte(result) != DID_OK)
++	if ((host_byte(result) != DID_OK) && !hba->silence_err_logs)
+ 		ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
+ 	return result;
+ }
+@@ -5351,8 +5351,8 @@ static void ufshcd_err_handler(struct work_struct *work)
+ 
+ 	/*
+ 	 * if host reset is required then skip clearing the pending
+-	 * transfers forcefully because they will automatically get
+-	 * cleared after link startup.
++	 * transfers forcefully because they will get cleared during
++	 * host reset and restore
+ 	 */
+ 	if (needs_reset)
+ 		goto skip_pending_xfer_clear;
+@@ -6282,9 +6282,15 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
+ 	int err;
+ 	unsigned long flags;
+ 
+-	/* Reset the host controller */
++	/*
++	 * Stop the host controller and complete the requests
++	 * cleared by h/w
++	 */
+ 	spin_lock_irqsave(hba->host->host_lock, flags);
+ 	ufshcd_hba_stop(hba, false);
++	hba->silence_err_logs = true;
++	ufshcd_complete_requests(hba);
++	hba->silence_err_logs = false;
+ 	spin_unlock_irqrestore(hba->host->host_lock, flags);
+ 
+ 	/* scale up clocks to max frequency before full reinitialization */
+@@ -6318,7 +6324,6 @@ out:
+ static int ufshcd_reset_and_restore(struct ufs_hba *hba)
+ {
+ 	int err = 0;
+-	unsigned long flags;
+ 	int retries = MAX_HOST_RESET_RETRIES;
+ 
+ 	do {
+@@ -6328,15 +6333,6 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba)
+ 		err = ufshcd_host_reset_and_restore(hba);
+ 	} while (err && --retries);
+ 
+-	/*
+-	 * After reset the door-bell might be cleared, complete
+-	 * outstanding requests in s/w here.
+-	 */
+-	spin_lock_irqsave(hba->host->host_lock, flags);
+-	ufshcd_transfer_req_compl(hba);
+-	ufshcd_tmc_handler(hba);
+-	spin_unlock_irqrestore(hba->host->host_lock, flags);
+-
+ 	return err;
+ }
+ 
+@@ -6802,7 +6798,8 @@ out:
+ 	return ret;
+ }
+ 
+-static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
++static void ufshcd_tune_unipro_params(struct ufs_hba *hba,
++				      struct ufs_dev_desc *card)
+ {
+ 	if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
+ 		ufshcd_tune_pa_tactivate(hba);
+@@ -6816,7 +6813,7 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
+ 	if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
+ 		ufshcd_quirk_tune_host_pa_tactivate(hba);
+ 
+-	ufshcd_vops_apply_dev_quirks(hba);
++	ufshcd_vops_apply_dev_quirks(hba, card);
+ }
+ 
+ static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
+@@ -6979,10 +6976,9 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
+ 	}
+ 
+ 	ufs_fixup_device_setup(hba, &card);
++	ufshcd_tune_unipro_params(hba, &card);
+ 	ufs_put_device_desc(&card);
+ 
+-	ufshcd_tune_unipro_params(hba);
+-
+ 	/* UFS device is also active now */
+ 	ufshcd_set_ufs_dev_active(hba);
+ 	ufshcd_force_reset_auto_bkops(hba);
+diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
+index 2740f6941ec6..46bec0e18c72 100644
+--- a/drivers/scsi/ufs/ufshcd.h
++++ b/drivers/scsi/ufs/ufshcd.h
+@@ -322,7 +322,7 @@ struct ufs_hba_variant_ops {
+ 	void	(*setup_task_mgmt)(struct ufs_hba *, int, u8);
+ 	void    (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme,
+ 					enum ufs_notify_change_status);
+-	int	(*apply_dev_quirks)(struct ufs_hba *);
++	int	(*apply_dev_quirks)(struct ufs_hba *, struct ufs_dev_desc *);
+ 	int     (*suspend)(struct ufs_hba *, enum ufs_pm_op);
+ 	int     (*resume)(struct ufs_hba *, enum ufs_pm_op);
+ 	void	(*dbg_register_dump)(struct ufs_hba *hba);
+@@ -513,6 +513,7 @@ struct ufs_stats {
+  * @uic_error: UFS interconnect layer error status
+  * @saved_err: sticky error mask
+  * @saved_uic_err: sticky UIC error mask
++ * @silence_err_logs: flag to silence error logs
+  * @dev_cmd: ufs device management command information
+  * @last_dme_cmd_tstamp: time stamp of the last completed DME command
+  * @auto_bkops_enabled: to track whether bkops is enabled in device
+@@ -670,6 +671,7 @@ struct ufs_hba {
+ 	u32 saved_err;
+ 	u32 saved_uic_err;
+ 	struct ufs_stats ufs_stats;
++	bool silence_err_logs;
+ 
+ 	/* Device management request data */
+ 	struct ufs_dev_cmd dev_cmd;
+@@ -1055,10 +1057,11 @@ static inline void ufshcd_vops_hibern8_notify(struct ufs_hba *hba,
+ 		return hba->vops->hibern8_notify(hba, cmd, status);
+ }
+ 
+-static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba)
++static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba,
++					       struct ufs_dev_desc *card)
+ {
+ 	if (hba->vops && hba->vops->apply_dev_quirks)
+-		return hba->vops->apply_dev_quirks(hba);
++		return hba->vops->apply_dev_quirks(hba, card);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/soc/tegra/fuse/tegra-apbmisc.c b/drivers/soc/tegra/fuse/tegra-apbmisc.c
+index df76778af601..f8b9c4058926 100644
+--- a/drivers/soc/tegra/fuse/tegra-apbmisc.c
++++ b/drivers/soc/tegra/fuse/tegra-apbmisc.c
+@@ -123,7 +123,7 @@ void __init tegra_init_apbmisc(void)
+ 			apbmisc.flags = IORESOURCE_MEM;
+ 
+ 			/* strapping options */
+-			if (tegra_get_chip_id() == TEGRA124) {
++			if (of_machine_is_compatible("nvidia,tegra124")) {
+ 				straps.start = 0x7000e864;
+ 				straps.end = 0x7000e867;
+ 			} else {
+diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
+index 2cc0ddb4a988..1375bdfc587b 100644
+--- a/drivers/spi/spi-fsl-lpspi.c
++++ b/drivers/spi/spi-fsl-lpspi.c
+@@ -862,6 +862,22 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
+ 	fsl_lpspi->dev = &pdev->dev;
+ 	fsl_lpspi->is_slave = is_slave;
+ 
++	controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
++	controller->transfer_one = fsl_lpspi_transfer_one;
++	controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware;
++	controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware;
++	controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
++	controller->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
++	controller->dev.of_node = pdev->dev.of_node;
++	controller->bus_num = pdev->id;
++	controller->slave_abort = fsl_lpspi_slave_abort;
++
++	ret = devm_spi_register_controller(&pdev->dev, controller);
++	if (ret < 0) {
++		dev_err(&pdev->dev, "spi_register_controller error.\n");
++		goto out_controller_put;
++	}
++
+ 	if (!fsl_lpspi->is_slave) {
+ 		for (i = 0; i < controller->num_chipselect; i++) {
+ 			int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);
+@@ -885,16 +901,6 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
+ 		controller->prepare_message = fsl_lpspi_prepare_message;
+ 	}
+ 
+-	controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
+-	controller->transfer_one = fsl_lpspi_transfer_one;
+-	controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware;
+-	controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware;
+-	controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+-	controller->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
+-	controller->dev.of_node = pdev->dev.of_node;
+-	controller->bus_num = pdev->id;
+-	controller->slave_abort = fsl_lpspi_slave_abort;
+-
+ 	init_completion(&fsl_lpspi->xfer_done);
+ 
+ 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+@@ -952,12 +958,6 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
+ 	if (ret < 0)
+ 		dev_err(&pdev->dev, "dma setup error %d, use pio\n", ret);
+ 
+-	ret = devm_spi_register_controller(&pdev->dev, controller);
+-	if (ret < 0) {
+-		dev_err(&pdev->dev, "spi_register_controller error.\n");
+-		goto out_controller_put;
+-	}
+-
+ 	return 0;
+ 
+ out_controller_put:
+diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c
+index 79b1558b74b8..e8a499cd1f13 100644
+--- a/drivers/spi/spi-fsl-qspi.c
++++ b/drivers/spi/spi-fsl-qspi.c
+@@ -410,7 +410,7 @@ static bool fsl_qspi_supports_op(struct spi_mem *mem,
+ 	    op->data.nbytes > q->devtype_data->txfifo)
+ 		return false;
+ 
+-	return true;
++	return spi_mem_default_supports_op(mem, op);
+ }
+ 
+ static void fsl_qspi_prepare_lut(struct fsl_qspi *q,
+diff --git a/drivers/staging/media/meson/vdec/vdec.c b/drivers/staging/media/meson/vdec/vdec.c
+index 0a1a04fd5d13..8dd1396909d7 100644
+--- a/drivers/staging/media/meson/vdec/vdec.c
++++ b/drivers/staging/media/meson/vdec/vdec.c
+@@ -133,6 +133,8 @@ vdec_queue_recycle(struct amvdec_session *sess, struct vb2_buffer *vb)
+ 	struct amvdec_buffer *new_buf;
+ 
+ 	new_buf = kmalloc(sizeof(*new_buf), GFP_KERNEL);
++	if (!new_buf)
++		return;
+ 	new_buf->vb = vb;
+ 
+ 	mutex_lock(&sess->bufs_recycle_lock);
+diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+index 710c33fd4965..47f4cc6a19a9 100644
+--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
++++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+@@ -222,18 +222,21 @@ static char *translate_scan(struct adapter *padapter,
+ 
+ 	/* parsing WPA/WPA2 IE */
+ 	{
+-		u8 buf[MAX_WPA_IE_LEN];
++		u8 *buf;
+ 		u8 wpa_ie[255], rsn_ie[255];
+ 		u16 wpa_len = 0, rsn_len = 0;
+ 		u8 *p;
+ 
++		buf = kzalloc(MAX_WPA_IE_LEN, GFP_ATOMIC);
++		if (!buf)
++			return start;
++
+ 		rtw_get_sec_ie(pnetwork->network.ies, pnetwork->network.ie_length, rsn_ie, &rsn_len, wpa_ie, &wpa_len);
+ 		RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_scan: ssid =%s\n", pnetwork->network.ssid.ssid));
+ 		RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_scan: wpa_len =%d rsn_len =%d\n", wpa_len, rsn_len));
+ 
+ 		if (wpa_len > 0) {
+ 			p = buf;
+-			memset(buf, 0, MAX_WPA_IE_LEN);
+ 			p += sprintf(p, "wpa_ie=");
+ 			for (i = 0; i < wpa_len; i++)
+ 				p += sprintf(p, "%02x", wpa_ie[i]);
+@@ -250,7 +253,6 @@ static char *translate_scan(struct adapter *padapter,
+ 		}
+ 		if (rsn_len > 0) {
+ 			p = buf;
+-			memset(buf, 0, MAX_WPA_IE_LEN);
+ 			p += sprintf(p, "rsn_ie=");
+ 			for (i = 0; i < rsn_len; i++)
+ 				p += sprintf(p, "%02x", rsn_ie[i]);
+@@ -264,6 +266,7 @@ static char *translate_scan(struct adapter *padapter,
+ 			iwe.u.data.length = rsn_len;
+ 			start = iwe_stream_add_point(info, start, stop, &iwe, rsn_ie);
+ 		}
++		kfree(buf);
+ 	}
+ 
+ 	{/* parsing WPS IE */
+diff --git a/drivers/staging/wfx/data_tx.c b/drivers/staging/wfx/data_tx.c
+index b13d7341f8bb..0c6a3a1a1ddf 100644
+--- a/drivers/staging/wfx/data_tx.c
++++ b/drivers/staging/wfx/data_tx.c
+@@ -282,8 +282,7 @@ void wfx_tx_policy_init(struct wfx_vif *wvif)
+ static int wfx_alloc_link_id(struct wfx_vif *wvif, const u8 *mac)
+ {
+ 	int i, ret = 0;
+-	unsigned long max_inactivity = 0;
+-	unsigned long now = jiffies;
++	unsigned long oldest;
+ 
+ 	spin_lock_bh(&wvif->ps_state_lock);
+ 	for (i = 0; i < WFX_MAX_STA_IN_AP_MODE; ++i) {
+@@ -292,13 +291,10 @@ static int wfx_alloc_link_id(struct wfx_vif *wvif, const u8 *mac)
+ 			break;
+ 		} else if (wvif->link_id_db[i].status != WFX_LINK_HARD &&
+ 			   !wvif->wdev->tx_queue_stats.link_map_cache[i + 1]) {
+-			unsigned long inactivity =
+-				now - wvif->link_id_db[i].timestamp;
+-
+-			if (inactivity < max_inactivity)
+-				continue;
+-			max_inactivity = inactivity;
+-			ret = i + 1;
++			if (!ret || time_after(oldest, wvif->link_id_db[i].timestamp)) {
++				oldest = wvif->link_id_db[i].timestamp;
++				ret = i + 1;
++			}
+ 		}
+ 	}
+ 
+diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
+index e8a9047de451..36f1a4d870eb 100644
+--- a/drivers/tty/synclink_gt.c
++++ b/drivers/tty/synclink_gt.c
+@@ -1334,10 +1334,10 @@ static void throttle(struct tty_struct * tty)
+ 	DBGINFO(("%s throttle\n", info->device_name));
+ 	if (I_IXOFF(tty))
+ 		send_xchar(tty, STOP_CHAR(tty));
+- 	if (C_CRTSCTS(tty)) {
++	if (C_CRTSCTS(tty)) {
+ 		spin_lock_irqsave(&info->lock,flags);
+ 		info->signals &= ~SerialSignal_RTS;
+-	 	set_signals(info);
++		set_signals(info);
+ 		spin_unlock_irqrestore(&info->lock,flags);
+ 	}
+ }
+@@ -1359,10 +1359,10 @@ static void unthrottle(struct tty_struct * tty)
+ 		else
+ 			send_xchar(tty, START_CHAR(tty));
+ 	}
+- 	if (C_CRTSCTS(tty)) {
++	if (C_CRTSCTS(tty)) {
+ 		spin_lock_irqsave(&info->lock,flags);
+ 		info->signals |= SerialSignal_RTS;
+-	 	set_signals(info);
++		set_signals(info);
+ 		spin_unlock_irqrestore(&info->lock,flags);
+ 	}
+ }
+@@ -2560,8 +2560,8 @@ static void change_params(struct slgt_info *info)
+ 	info->read_status_mask = IRQ_RXOVER;
+ 	if (I_INPCK(info->port.tty))
+ 		info->read_status_mask |= MASK_PARITY | MASK_FRAMING;
+- 	if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
+- 		info->read_status_mask |= MASK_BREAK;
++	if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
++		info->read_status_mask |= MASK_BREAK;
+ 	if (I_IGNPAR(info->port.tty))
+ 		info->ignore_status_mask |= MASK_PARITY | MASK_FRAMING;
+ 	if (I_IGNBRK(info->port.tty)) {
+@@ -3192,7 +3192,7 @@ static int tiocmset(struct tty_struct *tty,
+ 		info->signals &= ~SerialSignal_DTR;
+ 
+ 	spin_lock_irqsave(&info->lock,flags);
+- 	set_signals(info);
++	set_signals(info);
+ 	spin_unlock_irqrestore(&info->lock,flags);
+ 	return 0;
+ }
+@@ -3203,7 +3203,7 @@ static int carrier_raised(struct tty_port *port)
+ 	struct slgt_info *info = container_of(port, struct slgt_info, port);
+ 
+ 	spin_lock_irqsave(&info->lock,flags);
+- 	get_signals(info);
++	get_signals(info);
+ 	spin_unlock_irqrestore(&info->lock,flags);
+ 	return (info->signals & SerialSignal_DCD) ? 1 : 0;
+ }
+@@ -3218,7 +3218,7 @@ static void dtr_rts(struct tty_port *port, int on)
+ 		info->signals |= SerialSignal_RTS | SerialSignal_DTR;
+ 	else
+ 		info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
+- 	set_signals(info);
++	set_signals(info);
+ 	spin_unlock_irqrestore(&info->lock,flags);
+ }
+ 
+diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
+index fcb91bf7a15b..54b897a646d0 100644
+--- a/drivers/tty/synclinkmp.c
++++ b/drivers/tty/synclinkmp.c
+@@ -1453,10 +1453,10 @@ static void throttle(struct tty_struct * tty)
+ 	if (I_IXOFF(tty))
+ 		send_xchar(tty, STOP_CHAR(tty));
+ 
+- 	if (C_CRTSCTS(tty)) {
++	if (C_CRTSCTS(tty)) {
+ 		spin_lock_irqsave(&info->lock,flags);
+ 		info->serial_signals &= ~SerialSignal_RTS;
+-	 	set_signals(info);
++		set_signals(info);
+ 		spin_unlock_irqrestore(&info->lock,flags);
+ 	}
+ }
+@@ -1482,10 +1482,10 @@ static void unthrottle(struct tty_struct * tty)
+ 			send_xchar(tty, START_CHAR(tty));
+ 	}
+ 
+- 	if (C_CRTSCTS(tty)) {
++	if (C_CRTSCTS(tty)) {
+ 		spin_lock_irqsave(&info->lock,flags);
+ 		info->serial_signals |= SerialSignal_RTS;
+-	 	set_signals(info);
++		set_signals(info);
+ 		spin_unlock_irqrestore(&info->lock,flags);
+ 	}
+ }
+@@ -2470,7 +2470,7 @@ static void isr_io_pin( SLMP_INFO *info, u16 status )
+ 					if (status & SerialSignal_CTS) {
+ 						if ( debug_level >= DEBUG_LEVEL_ISR )
+ 							printk("CTS tx start...");
+-			 			info->port.tty->hw_stopped = 0;
++						info->port.tty->hw_stopped = 0;
+ 						tx_start(info);
+ 						info->pending_bh |= BH_TRANSMIT;
+ 						return;
+@@ -2479,7 +2479,7 @@ static void isr_io_pin( SLMP_INFO *info, u16 status )
+ 					if (!(status & SerialSignal_CTS)) {
+ 						if ( debug_level >= DEBUG_LEVEL_ISR )
+ 							printk("CTS tx stop...");
+-			 			info->port.tty->hw_stopped = 1;
++						info->port.tty->hw_stopped = 1;
+ 						tx_stop(info);
+ 					}
+ 				}
+@@ -2806,8 +2806,8 @@ static void change_params(SLMP_INFO *info)
+ 	info->read_status_mask2 = OVRN;
+ 	if (I_INPCK(info->port.tty))
+ 		info->read_status_mask2 |= PE | FRME;
+- 	if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
+- 		info->read_status_mask1 |= BRKD;
++	if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
++		info->read_status_mask1 |= BRKD;
+ 	if (I_IGNPAR(info->port.tty))
+ 		info->ignore_status_mask2 |= PE | FRME;
+ 	if (I_IGNBRK(info->port.tty)) {
+@@ -3177,7 +3177,7 @@ static int tiocmget(struct tty_struct *tty)
+  	unsigned long flags;
+ 
+ 	spin_lock_irqsave(&info->lock,flags);
+- 	get_signals(info);
++	get_signals(info);
+ 	spin_unlock_irqrestore(&info->lock,flags);
+ 
+ 	result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS : 0) |
+@@ -3215,7 +3215,7 @@ static int tiocmset(struct tty_struct *tty,
+ 		info->serial_signals &= ~SerialSignal_DTR;
+ 
+ 	spin_lock_irqsave(&info->lock,flags);
+- 	set_signals(info);
++	set_signals(info);
+ 	spin_unlock_irqrestore(&info->lock,flags);
+ 
+ 	return 0;
+@@ -3227,7 +3227,7 @@ static int carrier_raised(struct tty_port *port)
+ 	unsigned long flags;
+ 
+ 	spin_lock_irqsave(&info->lock,flags);
+- 	get_signals(info);
++	get_signals(info);
+ 	spin_unlock_irqrestore(&info->lock,flags);
+ 
+ 	return (info->serial_signals & SerialSignal_DCD) ? 1 : 0;
+@@ -3243,7 +3243,7 @@ static void dtr_rts(struct tty_port *port, int on)
+ 		info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
+ 	else
+ 		info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
+- 	set_signals(info);
++	set_signals(info);
+ 	spin_unlock_irqrestore(&info->lock,flags);
+ }
+ 
+diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c
+index 81c88f7bbbcb..f6ab3f28c838 100644
+--- a/drivers/uio/uio_dmem_genirq.c
++++ b/drivers/uio/uio_dmem_genirq.c
+@@ -132,11 +132,13 @@ static int uio_dmem_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on)
+ 	if (irq_on) {
+ 		if (test_and_clear_bit(0, &priv->flags))
+ 			enable_irq(dev_info->irq);
++		spin_unlock_irqrestore(&priv->lock, flags);
+ 	} else {
+-		if (!test_and_set_bit(0, &priv->flags))
++		if (!test_and_set_bit(0, &priv->flags)) {
++			spin_unlock_irqrestore(&priv->lock, flags);
+ 			disable_irq(dev_info->irq);
++		}
+ 	}
+-	spin_unlock_irqrestore(&priv->lock, flags);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
+index 6be10e496e10..a9133773b89e 100644
+--- a/drivers/usb/dwc2/gadget.c
++++ b/drivers/usb/dwc2/gadget.c
+@@ -4056,11 +4056,12 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
+ 	 * a unique tx-fifo even if it is non-periodic.
+ 	 */
+ 	if (dir_in && hsotg->dedicated_fifos) {
++		unsigned fifo_count = dwc2_hsotg_tx_fifo_count(hsotg);
+ 		u32 fifo_index = 0;
+ 		u32 fifo_size = UINT_MAX;
+ 
+ 		size = hs_ep->ep.maxpacket * hs_ep->mc;
+-		for (i = 1; i < hsotg->num_of_eps; ++i) {
++		for (i = 1; i <= fifo_count; ++i) {
+ 			if (hsotg->fifo_map & (1 << i))
+ 				continue;
+ 			val = dwc2_readl(hsotg, DPTXFSIZN(i));
+diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
+index 5567ed2cddbe..fa252870c926 100644
+--- a/drivers/usb/dwc3/host.c
++++ b/drivers/usb/dwc3/host.c
+@@ -88,10 +88,10 @@ int dwc3_host_init(struct dwc3 *dwc)
+ 	memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props));
+ 
+ 	if (dwc->usb3_lpm_capable)
+-		props[prop_idx++].name = "usb3-lpm-capable";
++		props[prop_idx++] = PROPERTY_ENTRY_BOOL("usb3-lpm-capable");
+ 
+ 	if (dwc->usb2_lpm_disable)
+-		props[prop_idx++].name = "usb2-lpm-disable";
++		props[prop_idx++] = PROPERTY_ENTRY_BOOL("usb2-lpm-disable");
+ 
+ 	/**
+ 	 * WORKAROUND: dwc3 revisions <=3.00a have a limitation
+@@ -103,7 +103,7 @@ int dwc3_host_init(struct dwc3 *dwc)
+ 	 * This following flag tells XHCI to do just that.
+ 	 */
+ 	if (dwc->revision <= DWC3_REVISION_300A)
+-		props[prop_idx++].name = "quirk-broken-port-ped";
++		props[prop_idx++] = PROPERTY_ENTRY_BOOL("quirk-broken-port-ped");
+ 
+ 	if (prop_idx) {
+ 		ret = platform_device_add_properties(xhci, props);
+diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c
+index 64d80c65bb96..aaf975c809bf 100644
+--- a/drivers/usb/gadget/udc/gr_udc.c
++++ b/drivers/usb/gadget/udc/gr_udc.c
+@@ -2175,8 +2175,6 @@ static int gr_probe(struct platform_device *pdev)
+ 		return -ENOMEM;
+ 	}
+ 
+-	spin_lock(&dev->lock);
+-
+ 	/* Inside lock so that no gadget can use this udc until probe is done */
+ 	retval = usb_add_gadget_udc(dev->dev, &dev->gadget);
+ 	if (retval) {
+@@ -2185,15 +2183,21 @@ static int gr_probe(struct platform_device *pdev)
+ 	}
+ 	dev->added = 1;
+ 
++	spin_lock(&dev->lock);
++
+ 	retval = gr_udc_init(dev);
+-	if (retval)
++	if (retval) {
++		spin_unlock(&dev->lock);
+ 		goto out;
+-
+-	gr_dfs_create(dev);
++	}
+ 
+ 	/* Clear all interrupt enables that might be left on since last boot */
+ 	gr_disable_interrupts_and_pullup(dev);
+ 
++	spin_unlock(&dev->lock);
++
++	gr_dfs_create(dev);
++
+ 	retval = gr_request_irq(dev, dev->irq);
+ 	if (retval) {
+ 		dev_err(dev->dev, "Failed to request irq %d\n", dev->irq);
+@@ -2222,8 +2226,6 @@ static int gr_probe(struct platform_device *pdev)
+ 		dev_info(dev->dev, "regs: %p, irq %d\n", dev->regs, dev->irq);
+ 
+ out:
+-	spin_unlock(&dev->lock);
+-
+ 	if (retval)
+ 		gr_remove(pdev);
+ 
+diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
+index a3d2fef67746..5c93226e0e20 100644
+--- a/drivers/usb/musb/omap2430.c
++++ b/drivers/usb/musb/omap2430.c
+@@ -361,8 +361,6 @@ static const struct musb_platform_ops omap2430_ops = {
+ 	.init		= omap2430_musb_init,
+ 	.exit		= omap2430_musb_exit,
+ 
+-	.set_vbus	= omap2430_musb_set_vbus,
+-
+ 	.enable		= omap2430_musb_enable,
+ 	.disable	= omap2430_musb_disable,
+ 
+diff --git a/drivers/vfio/pci/vfio_pci_nvlink2.c b/drivers/vfio/pci/vfio_pci_nvlink2.c
+index f2983f0f84be..3f5f8198a6bb 100644
+--- a/drivers/vfio/pci/vfio_pci_nvlink2.c
++++ b/drivers/vfio/pci/vfio_pci_nvlink2.c
+@@ -97,8 +97,10 @@ static void vfio_pci_nvgpu_release(struct vfio_pci_device *vdev,
+ 
+ 	/* If there were any mappings at all... */
+ 	if (data->mm) {
+-		ret = mm_iommu_put(data->mm, data->mem);
+-		WARN_ON(ret);
++		if (data->mem) {
++			ret = mm_iommu_put(data->mm, data->mem);
++			WARN_ON(ret);
++		}
+ 
+ 		mmdrop(data->mm);
+ 	}
+diff --git a/drivers/video/backlight/qcom-wled.c b/drivers/video/backlight/qcom-wled.c
+index d46052d8ff41..3d276b30a78c 100644
+--- a/drivers/video/backlight/qcom-wled.c
++++ b/drivers/video/backlight/qcom-wled.c
+@@ -956,8 +956,8 @@ static int wled_configure(struct wled *wled, int version)
+ 	struct wled_config *cfg = &wled->cfg;
+ 	struct device *dev = wled->dev;
+ 	const __be32 *prop_addr;
+-	u32 size, val, c, string_len;
+-	int rc, i, j;
++	u32 size, val, c;
++	int rc, i, j, string_len;
+ 
+ 	const struct wled_u32_opts *u32_opts = NULL;
+ 	const struct wled_u32_opts wled3_opts[] = {
+diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
+index aa9541bf964b..f65991a67af2 100644
+--- a/drivers/video/fbdev/Kconfig
++++ b/drivers/video/fbdev/Kconfig
+@@ -2215,6 +2215,7 @@ config FB_HYPERV
+ 	select FB_CFB_COPYAREA
+ 	select FB_CFB_IMAGEBLIT
+ 	select FB_DEFERRED_IO
++	select DMA_CMA if HAVE_DMA_CONTIGUOUS && CMA
+ 	help
+ 	  This framebuffer driver supports Microsoft Hyper-V Synthetic Video.
+ 
+diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
+index 4cd27e5172a1..8cf39d98b2bd 100644
+--- a/drivers/video/fbdev/hyperv_fb.c
++++ b/drivers/video/fbdev/hyperv_fb.c
+@@ -31,6 +31,16 @@
+  * "set-vmvideo" command. For example
+  *     set-vmvideo -vmname name -horizontalresolution:1920 \
+  * -verticalresolution:1200 -resolutiontype single
++ *
++ * Gen 1 VMs also support direct using VM's physical memory for framebuffer.
++ * It could improve the efficiency and performance for framebuffer and VM.
++ * This requires to allocate contiguous physical memory from Linux kernel's
++ * CMA memory allocator. To enable this, supply a kernel parameter to give
++ * enough memory space to CMA allocator for framebuffer. For example:
++ *    cma=130m
++ * This gives 130MB memory to CMA allocator that can be allocated to
++ * framebuffer. For reference, 8K resolution (7680x4320) takes about
++ * 127MB memory.
+  */
+ 
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+@@ -228,7 +238,6 @@ struct synthvid_msg {
+ } __packed;
+ 
+ 
+-
+ /* FB driver definitions and structures */
+ #define HVFB_WIDTH 1152 /* default screen width */
+ #define HVFB_HEIGHT 864 /* default screen height */
+@@ -258,12 +267,15 @@ struct hvfb_par {
+ 	/* If true, the VSC notifies the VSP on every framebuffer change */
+ 	bool synchronous_fb;
+ 
++	/* If true, need to copy from deferred IO mem to framebuffer mem */
++	bool need_docopy;
++
+ 	struct notifier_block hvfb_panic_nb;
+ 
+ 	/* Memory for deferred IO and frame buffer itself */
+ 	unsigned char *dio_vp;
+ 	unsigned char *mmio_vp;
+-	unsigned long mmio_pp;
++	phys_addr_t mmio_pp;
+ 
+ 	/* Dirty rectangle, protected by delayed_refresh_lock */
+ 	int x1, y1, x2, y2;
+@@ -434,7 +446,7 @@ static void synthvid_deferred_io(struct fb_info *p,
+ 		maxy = max_t(int, maxy, y2);
+ 
+ 		/* Copy from dio space to mmio address */
+-		if (par->fb_ready)
++		if (par->fb_ready && par->need_docopy)
+ 			hvfb_docopy(par, start, PAGE_SIZE);
+ 	}
+ 
+@@ -751,12 +763,12 @@ static void hvfb_update_work(struct work_struct *w)
+ 		return;
+ 
+ 	/* Copy the dirty rectangle to frame buffer memory */
+-	for (j = y1; j < y2; j++) {
+-		hvfb_docopy(par,
+-			    j * info->fix.line_length +
+-			    (x1 * screen_depth / 8),
+-			    (x2 - x1) * screen_depth / 8);
+-	}
++	if (par->need_docopy)
++		for (j = y1; j < y2; j++)
++			hvfb_docopy(par,
++				    j * info->fix.line_length +
++				    (x1 * screen_depth / 8),
++				    (x2 - x1) * screen_depth / 8);
+ 
+ 	/* Refresh */
+ 	if (par->fb_ready && par->update)
+@@ -801,7 +813,8 @@ static int hvfb_on_panic(struct notifier_block *nb,
+ 	par = container_of(nb, struct hvfb_par, hvfb_panic_nb);
+ 	par->synchronous_fb = true;
+ 	info = par->info;
+-	hvfb_docopy(par, 0, dio_fb_size);
++	if (par->need_docopy)
++		hvfb_docopy(par, 0, dio_fb_size);
+ 	synthvid_update(info, 0, 0, INT_MAX, INT_MAX);
+ 
+ 	return NOTIFY_DONE;
+@@ -940,6 +953,62 @@ static void hvfb_get_option(struct fb_info *info)
+ 	return;
+ }
+ 
++/*
++ * Allocate enough contiguous physical memory.
++ * Return physical address if succeeded or -1 if failed.
++ */
++static phys_addr_t hvfb_get_phymem(struct hv_device *hdev,
++				   unsigned int request_size)
++{
++	struct page *page = NULL;
++	dma_addr_t dma_handle;
++	void *vmem;
++	phys_addr_t paddr = 0;
++	unsigned int order = get_order(request_size);
++
++	if (request_size == 0)
++		return -1;
++
++	if (order < MAX_ORDER) {
++		/* Call alloc_pages if the size is less than 2^MAX_ORDER */
++		page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
++		if (!page)
++			return -1;
++
++		paddr = (page_to_pfn(page) << PAGE_SHIFT);
++	} else {
++		/* Allocate from CMA */
++		hdev->device.coherent_dma_mask = DMA_BIT_MASK(64);
++
++		vmem = dma_alloc_coherent(&hdev->device,
++					  round_up(request_size, PAGE_SIZE),
++					  &dma_handle,
++					  GFP_KERNEL | __GFP_NOWARN);
++
++		if (!vmem)
++			return -1;
++
++		paddr = virt_to_phys(vmem);
++	}
++
++	return paddr;
++}
++
++/* Release contiguous physical memory */
++static void hvfb_release_phymem(struct hv_device *hdev,
++				phys_addr_t paddr, unsigned int size)
++{
++	unsigned int order = get_order(size);
++
++	if (order < MAX_ORDER)
++		__free_pages(pfn_to_page(paddr >> PAGE_SHIFT), order);
++	else
++		dma_free_coherent(&hdev->device,
++				  round_up(size, PAGE_SIZE),
++				  phys_to_virt(paddr),
++				  paddr);
++}
++
+ 
+ /* Get framebuffer memory from Hyper-V video pci space */
+ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
+@@ -949,22 +1018,61 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
+ 	void __iomem *fb_virt;
+ 	int gen2vm = efi_enabled(EFI_BOOT);
+ 	resource_size_t pot_start, pot_end;
++	phys_addr_t paddr;
+ 	int ret;
+ 
+-	dio_fb_size =
+-		screen_width * screen_height * screen_depth / 8;
++	info->apertures = alloc_apertures(1);
++	if (!info->apertures)
++		return -ENOMEM;
+ 
+-	if (gen2vm) {
+-		pot_start = 0;
+-		pot_end = -1;
+-	} else {
++	if (!gen2vm) {
+ 		pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT,
+-			      PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
++			PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
+ 		if (!pdev) {
+ 			pr_err("Unable to find PCI Hyper-V video\n");
++			kfree(info->apertures);
+ 			return -ENODEV;
+ 		}
+ 
++		info->apertures->ranges[0].base = pci_resource_start(pdev, 0);
++		info->apertures->ranges[0].size = pci_resource_len(pdev, 0);
++
++		/*
++		 * For Gen 1 VM, we can directly use the contiguous memory
++		 * from VM. If we succeed, deferred IO happens directly
++		 * on this allocated framebuffer memory, avoiding extra
++		 * memory copy.
++		 */
++		paddr = hvfb_get_phymem(hdev, screen_fb_size);
++		if (paddr != (phys_addr_t) -1) {
++			par->mmio_pp = paddr;
++			par->mmio_vp = par->dio_vp = __va(paddr);
++
++			info->fix.smem_start = paddr;
++			info->fix.smem_len = screen_fb_size;
++			info->screen_base = par->mmio_vp;
++			info->screen_size = screen_fb_size;
++
++			par->need_docopy = false;
++			goto getmem_done;
++		}
++		pr_info("Unable to allocate enough contiguous physical memory on Gen 1 VM. Using MMIO instead.\n");
++	} else {
++		info->apertures->ranges[0].base = screen_info.lfb_base;
++		info->apertures->ranges[0].size = screen_info.lfb_size;
++	}
++
++	/*
++	 * Cannot use the contiguous physical memory.
++	 * Allocate mmio space for framebuffer.
++	 */
++	dio_fb_size =
++		screen_width * screen_height * screen_depth / 8;
++
++	if (gen2vm) {
++		pot_start = 0;
++		pot_end = -1;
++	} else {
+ 		if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
+ 		    pci_resource_len(pdev, 0) < screen_fb_size) {
+ 			pr_err("Resource not available or (0x%lx < 0x%lx)\n",
+@@ -993,20 +1101,6 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
+ 	if (par->dio_vp == NULL)
+ 		goto err3;
+ 
+-	info->apertures = alloc_apertures(1);
+-	if (!info->apertures)
+-		goto err4;
+-
+-	if (gen2vm) {
+-		info->apertures->ranges[0].base = screen_info.lfb_base;
+-		info->apertures->ranges[0].size = screen_info.lfb_size;
+-		remove_conflicting_framebuffers(info->apertures,
+-						KBUILD_MODNAME, false);
+-	} else {
+-		info->apertures->ranges[0].base = pci_resource_start(pdev, 0);
+-		info->apertures->ranges[0].size = pci_resource_len(pdev, 0);
+-	}
+-
+ 	/* Physical address of FB device */
+ 	par->mmio_pp = par->mem->start;
+ 	/* Virtual address of FB device */
+@@ -1017,13 +1111,15 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
+ 	info->screen_base = par->dio_vp;
+ 	info->screen_size = dio_fb_size;
+ 
++getmem_done:
++	remove_conflicting_framebuffers(info->apertures,
++					KBUILD_MODNAME, false);
+ 	if (!gen2vm)
+ 		pci_dev_put(pdev);
++	kfree(info->apertures);
+ 
+ 	return 0;
+ 
+-err4:
+-	vfree(par->dio_vp);
+ err3:
+ 	iounmap(fb_virt);
+ err2:
+@@ -1032,18 +1128,25 @@ err2:
+ err1:
+ 	if (!gen2vm)
+ 		pci_dev_put(pdev);
++	kfree(info->apertures);
+ 
+ 	return -ENOMEM;
+ }
+ 
+ /* Release the framebuffer */
+-static void hvfb_putmem(struct fb_info *info)
++static void hvfb_putmem(struct hv_device *hdev, struct fb_info *info)
+ {
+ 	struct hvfb_par *par = info->par;
+ 
+-	vfree(par->dio_vp);
+-	iounmap(info->screen_base);
+-	vmbus_free_mmio(par->mem->start, screen_fb_size);
++	if (par->need_docopy) {
++		vfree(par->dio_vp);
++		iounmap(info->screen_base);
++		vmbus_free_mmio(par->mem->start, screen_fb_size);
++	} else {
++		hvfb_release_phymem(hdev, info->fix.smem_start,
++				    screen_fb_size);
++	}
++
+ 	par->mem = NULL;
+ }
+ 
+@@ -1062,6 +1165,7 @@ static int hvfb_probe(struct hv_device *hdev,
+ 	par = info->par;
+ 	par->info = info;
+ 	par->fb_ready = false;
++	par->need_docopy = true;
+ 	init_completion(&par->wait);
+ 	INIT_DELAYED_WORK(&par->dwork, hvfb_update_work);
+ 
+@@ -1147,7 +1251,7 @@ static int hvfb_probe(struct hv_device *hdev,
+ 
+ error:
+ 	fb_deferred_io_cleanup(info);
+-	hvfb_putmem(info);
++	hvfb_putmem(hdev, info);
+ error2:
+ 	vmbus_close(hdev->channel);
+ error1:
+@@ -1177,7 +1281,7 @@ static int hvfb_remove(struct hv_device *hdev)
+ 	vmbus_close(hdev->channel);
+ 	hv_set_drvdata(hdev, NULL);
+ 
+-	hvfb_putmem(info);
++	hvfb_putmem(hdev, info);
+ 	framebuffer_release(info);
+ 
+ 	return 0;
+diff --git a/drivers/video/fbdev/pxa168fb.c b/drivers/video/fbdev/pxa168fb.c
+index 1410f476e135..1fc50fc0694b 100644
+--- a/drivers/video/fbdev/pxa168fb.c
++++ b/drivers/video/fbdev/pxa168fb.c
+@@ -766,8 +766,8 @@ failed_free_cmap:
+ failed_free_clk:
+ 	clk_disable_unprepare(fbi->clk);
+ failed_free_fbmem:
+-	dma_free_coherent(fbi->dev, info->fix.smem_len,
+-			info->screen_base, fbi->fb_start_dma);
++	dma_free_wc(fbi->dev, info->fix.smem_len,
++		    info->screen_base, fbi->fb_start_dma);
+ failed_free_info:
+ 	kfree(info);
+ 
+@@ -801,7 +801,7 @@ static int pxa168fb_remove(struct platform_device *pdev)
+ 
+ 	irq = platform_get_irq(pdev, 0);
+ 
+-	dma_free_wc(fbi->dev, PAGE_ALIGN(info->fix.smem_len),
++	dma_free_wc(fbi->dev, info->fix.smem_len,
+ 		    info->screen_base, info->fix.smem_start);
+ 
+ 	clk_disable_unprepare(fbi->clk);
+diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
+index 7e5d84caeb94..7bfe365d9372 100644
+--- a/drivers/virtio/virtio_balloon.c
++++ b/drivers/virtio/virtio_balloon.c
+@@ -158,6 +158,8 @@ static void set_page_pfns(struct virtio_balloon *vb,
+ {
+ 	unsigned int i;
+ 
++	BUILD_BUG_ON(VIRTIO_BALLOON_PAGES_PER_PAGE > VIRTIO_BALLOON_ARRAY_PFNS_MAX);
++
+ 	/*
+ 	 * Set balloon pfns pointing at this page.
+ 	 * Note that the first pfn points at start of the page.
+diff --git a/drivers/visorbus/visorchipset.c b/drivers/visorbus/visorchipset.c
+index ca752b8f495f..cb1eb7e05f87 100644
+--- a/drivers/visorbus/visorchipset.c
++++ b/drivers/visorbus/visorchipset.c
+@@ -1210,14 +1210,17 @@ static void setup_crash_devices_work_queue(struct work_struct *work)
+ {
+ 	struct controlvm_message local_crash_bus_msg;
+ 	struct controlvm_message local_crash_dev_msg;
+-	struct controlvm_message msg;
++	struct controlvm_message msg = {
++		.hdr.id = CONTROLVM_CHIPSET_INIT,
++		.cmd.init_chipset = {
++			.bus_count = 23,
++			.switch_count = 0,
++		},
++	};
+ 	u32 local_crash_msg_offset;
+ 	u16 local_crash_msg_count;
+ 
+ 	/* send init chipset msg */
+-	msg.hdr.id = CONTROLVM_CHIPSET_INIT;
+-	msg.cmd.init_chipset.bus_count = 23;
+-	msg.cmd.init_chipset.switch_count = 0;
+ 	chipset_init(&msg);
+ 	/* get saved message count */
+ 	if (visorchannel_read(chipset_dev->controlvm_channel,
+diff --git a/drivers/vme/bridges/vme_fake.c b/drivers/vme/bridges/vme_fake.c
+index 3208a4409e44..6a1bc284f297 100644
+--- a/drivers/vme/bridges/vme_fake.c
++++ b/drivers/vme/bridges/vme_fake.c
+@@ -414,8 +414,9 @@ static void fake_lm_check(struct fake_driver *bridge, unsigned long long addr,
+ 	}
+ }
+ 
+-static u8 fake_vmeread8(struct fake_driver *bridge, unsigned long long addr,
+-		u32 aspace, u32 cycle)
++static noinline_for_stack u8 fake_vmeread8(struct fake_driver *bridge,
++					   unsigned long long addr,
++					   u32 aspace, u32 cycle)
+ {
+ 	u8 retval = 0xff;
+ 	int i;
+@@ -446,8 +447,9 @@ static u8 fake_vmeread8(struct fake_driver *bridge, unsigned long long addr,
+ 	return retval;
+ }
+ 
+-static u16 fake_vmeread16(struct fake_driver *bridge, unsigned long long addr,
+-		u32 aspace, u32 cycle)
++static noinline_for_stack u16 fake_vmeread16(struct fake_driver *bridge,
++					     unsigned long long addr,
++					     u32 aspace, u32 cycle)
+ {
+ 	u16 retval = 0xffff;
+ 	int i;
+@@ -478,8 +480,9 @@ static u16 fake_vmeread16(struct fake_driver *bridge, unsigned long long addr,
+ 	return retval;
+ }
+ 
+-static u32 fake_vmeread32(struct fake_driver *bridge, unsigned long long addr,
+-		u32 aspace, u32 cycle)
++static noinline_for_stack u32 fake_vmeread32(struct fake_driver *bridge,
++					     unsigned long long addr,
++					     u32 aspace, u32 cycle)
+ {
+ 	u32 retval = 0xffffffff;
+ 	int i;
+@@ -609,8 +612,9 @@ out:
+ 	return retval;
+ }
+ 
+-static void fake_vmewrite8(struct fake_driver *bridge, u8 *buf,
+-			   unsigned long long addr, u32 aspace, u32 cycle)
++static noinline_for_stack void fake_vmewrite8(struct fake_driver *bridge,
++					      u8 *buf, unsigned long long addr,
++					      u32 aspace, u32 cycle)
+ {
+ 	int i;
+ 	unsigned long long start, end, offset;
+@@ -639,8 +643,9 @@ static void fake_vmewrite8(struct fake_driver *bridge, u8 *buf,
+ 
+ }
+ 
+-static void fake_vmewrite16(struct fake_driver *bridge, u16 *buf,
+-			    unsigned long long addr, u32 aspace, u32 cycle)
++static noinline_for_stack void fake_vmewrite16(struct fake_driver *bridge,
++					       u16 *buf, unsigned long long addr,
++					       u32 aspace, u32 cycle)
+ {
+ 	int i;
+ 	unsigned long long start, end, offset;
+@@ -669,8 +674,9 @@ static void fake_vmewrite16(struct fake_driver *bridge, u16 *buf,
+ 
+ }
+ 
+-static void fake_vmewrite32(struct fake_driver *bridge, u32 *buf,
+-			    unsigned long long addr, u32 aspace, u32 cycle)
++static noinline_for_stack void fake_vmewrite32(struct fake_driver *bridge,
++					       u32 *buf, unsigned long long addr,
++					       u32 aspace, u32 cycle)
+ {
+ 	int i;
+ 	unsigned long long start, end, offset;
+diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
+index 0b52ab4cb964..72c70f59fc60 100644
+--- a/fs/btrfs/check-integrity.c
++++ b/fs/btrfs/check-integrity.c
+@@ -629,7 +629,6 @@ static struct btrfsic_dev_state *btrfsic_dev_state_hashtable_lookup(dev_t dev,
+ static int btrfsic_process_superblock(struct btrfsic_state *state,
+ 				      struct btrfs_fs_devices *fs_devices)
+ {
+-	struct btrfs_fs_info *fs_info = state->fs_info;
+ 	struct btrfs_super_block *selected_super;
+ 	struct list_head *dev_head = &fs_devices->devices;
+ 	struct btrfs_device *device;
+@@ -700,7 +699,7 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
+ 			break;
+ 		}
+ 
+-		num_copies = btrfs_num_copies(fs_info, next_bytenr,
++		num_copies = btrfs_num_copies(state->fs_info, next_bytenr,
+ 					      state->metablock_size);
+ 		if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
+ 			pr_info("num_copies(log_bytenr=%llu) = %d\n",
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index ba7292435c14..2e9f938508e9 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -3108,17 +3108,21 @@ do {								\
+ 	rcu_read_unlock();					\
+ } while (0)
+ 
+-__cold
+-static inline void assfail(const char *expr, const char *file, int line)
++#ifdef CONFIG_BTRFS_ASSERT
++__cold __noreturn
++static inline void assertfail(const char *expr, const char *file, int line)
+ {
+-	if (IS_ENABLED(CONFIG_BTRFS_ASSERT)) {
+-		pr_err("assertion failed: %s, in %s:%d\n", expr, file, line);
+-		BUG();
+-	}
++	pr_err("assertion failed: %s, in %s:%d\n", expr, file, line);
++	BUG();
+ }
+ 
+-#define ASSERT(expr)	\
+-	(likely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
++#define ASSERT(expr)						\
++	(likely(expr) ? (void)0 : assertfail(#expr, __FILE__, __LINE__))
++
++#else
++static inline void assertfail(const char *expr, const char* file, int line) { }
++#define ASSERT(expr)	(void)(expr)
++#endif
+ 
+ /*
+  * Use that for functions that are conditionally exported for sanity tests but
+diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
+index b1bfdc5c1387..6f18333e83c3 100644
+--- a/fs/btrfs/file-item.c
++++ b/fs/btrfs/file-item.c
+@@ -274,7 +274,8 @@ found:
+ 		csum += count * csum_size;
+ 		nblocks -= count;
+ next:
+-		while (count--) {
++		while (count > 0) {
++			count--;
+ 			disk_bytenr += fs_info->sectorsize;
+ 			offset += fs_info->sectorsize;
+ 			page_bytes_left -= fs_info->sectorsize;
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index c70baafb2a39..537b4c563f09 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -2191,6 +2191,7 @@ int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
+ /* see btrfs_writepage_start_hook for details on why this is required */
+ struct btrfs_writepage_fixup {
+ 	struct page *page;
++	struct inode *inode;
+ 	struct btrfs_work work;
+ };
+ 
+@@ -2204,27 +2205,71 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
+ 	struct inode *inode;
+ 	u64 page_start;
+ 	u64 page_end;
+-	int ret;
++	int ret = 0;
++	bool free_delalloc_space = true;
+ 
+ 	fixup = container_of(work, struct btrfs_writepage_fixup, work);
+ 	page = fixup->page;
++	inode = fixup->inode;
++	page_start = page_offset(page);
++	page_end = page_offset(page) + PAGE_SIZE - 1;
++
++	/*
++	 * This is similar to page_mkwrite, we need to reserve the space before
++	 * we take the page lock.
++	 */
++	ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start,
++					   PAGE_SIZE);
+ again:
+ 	lock_page(page);
++
++	/*
++	 * Before we queued this fixup, we took a reference on the page.
++	 * page->mapping may go NULL, but it shouldn't be moved to a different
++	 * address space.
++	 */
+ 	if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
+-		ClearPageChecked(page);
++		/*
++		 * Unfortunately this is a little tricky, either
++		 *
++		 * 1) We got here and our page had already been dealt with and
++		 *    we reserved our space, thus ret == 0, so we need to just
++		 *    drop our space reservation and bail.  This can happen the
++		 *    first time we come into the fixup worker, or could happen
++		 *    while waiting for the ordered extent.
++		 * 2) Our page was already dealt with, but we happened to get an
++		 *    ENOSPC above from the btrfs_delalloc_reserve_space.  In
++		 *    this case we obviously don't have anything to release, but
++		 *    because the page was already dealt with we don't want to
++		 *    mark the page with an error, so make sure we're resetting
++		 *    ret to 0.  This is why we have this check _before_ the ret
++		 *    check, because we do not want to have a surprise ENOSPC
++		 *    when the page was already properly dealt with.
++		 */
++		if (!ret) {
++			btrfs_delalloc_release_extents(BTRFS_I(inode),
++						       PAGE_SIZE);
++			btrfs_delalloc_release_space(inode, data_reserved,
++						     page_start, PAGE_SIZE,
++						     true);
++		}
++		ret = 0;
+ 		goto out_page;
+ 	}
+ 
+-	inode = page->mapping->host;
+-	page_start = page_offset(page);
+-	page_end = page_offset(page) + PAGE_SIZE - 1;
++	/*
++	 * We can't mess with the page state unless it is locked, so now that
++	 * it is locked bail if we failed to make our space reservation.
++	 */
++	if (ret)
++		goto out_page;
+ 
+ 	lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
+ 			 &cached_state);
+ 
+ 	/* already ordered? We're done */
+ 	if (PagePrivate2(page))
+-		goto out;
++		goto out_reserved;
+ 
+ 	ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start,
+ 					PAGE_SIZE);
+@@ -2237,39 +2282,49 @@ again:
+ 		goto again;
+ 	}
+ 
+-	ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start,
+-					   PAGE_SIZE);
+-	if (ret) {
+-		mapping_set_error(page->mapping, ret);
+-		end_extent_writepage(page, ret, page_start, page_end);
+-		ClearPageChecked(page);
+-		goto out;
+-	 }
+-
+ 	ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0,
+ 					&cached_state);
+-	if (ret) {
+-		mapping_set_error(page->mapping, ret);
+-		end_extent_writepage(page, ret, page_start, page_end);
+-		ClearPageChecked(page);
++	if (ret)
+ 		goto out_reserved;
+-	}
+ 
+-	ClearPageChecked(page);
+-	set_page_dirty(page);
++	/*
++	 * Everything went as planned, we're now the owner of a dirty page with
++	 * delayed allocation bits set and space reserved for our COW
++	 * destination.
++	 *
++	 * The page was dirty when we started, nothing should have cleaned it.
++	 */
++	BUG_ON(!PageDirty(page));
++	free_delalloc_space = false;
+ out_reserved:
+ 	btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
+-	if (ret)
++	if (free_delalloc_space)
+ 		btrfs_delalloc_release_space(inode, data_reserved, page_start,
+ 					     PAGE_SIZE, true);
+-out:
+ 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
+ 			     &cached_state);
+ out_page:
++	if (ret) {
++		/*
++		 * We hit ENOSPC or other errors.  Update the mapping and page
++		 * to reflect the errors and clean the page.
++		 */
++		mapping_set_error(page->mapping, ret);
++		end_extent_writepage(page, ret, page_start, page_end);
++		clear_page_dirty_for_io(page);
++		SetPageError(page);
++	}
++	ClearPageChecked(page);
+ 	unlock_page(page);
+ 	put_page(page);
+ 	kfree(fixup);
+ 	extent_changeset_free(data_reserved);
++	/*
++	 * As a precaution, do a delayed iput in case it would be the last iput
++	 * that could need flushing space. Recursing back to fixup worker would
++	 * deadlock.
++	 */
++	btrfs_add_delayed_iput(inode);
+ }
+ 
+ /*
+@@ -2293,6 +2348,13 @@ int btrfs_writepage_cow_fixup(struct page *page, u64 start, u64 end)
+ 	if (TestClearPagePrivate2(page))
+ 		return 0;
+ 
++	/*
++	 * PageChecked is set below when we create a fixup worker for this page,
++	 * don't try to create another one if we're already PageChecked()
++	 *
++	 * The extent_io writepage code will redirty the page if we send back
++	 * EAGAIN.
++	 */
+ 	if (PageChecked(page))
+ 		return -EAGAIN;
+ 
+@@ -2300,12 +2362,21 @@ int btrfs_writepage_cow_fixup(struct page *page, u64 start, u64 end)
+ 	if (!fixup)
+ 		return -EAGAIN;
+ 
++	/*
++	 * We are already holding a reference to this inode from
++	 * write_cache_pages.  We need to hold it because the space reservation
++	 * takes place outside of the page lock, and we can't trust
++	 * page->mapping outside of the page lock.
++	 */
++	ihold(inode);
+ 	SetPageChecked(page);
+ 	get_page(page);
+ 	btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL, NULL);
+ 	fixup->page = page;
++	fixup->inode = inode;
+ 	btrfs_queue_work(fs_info->fixup_workers, &fixup->work);
+-	return -EBUSY;
++
++	return -EAGAIN;
+ }
+ 
+ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 72ff80f7f24c..a8b71ded4d21 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -723,6 +723,32 @@ static struct btrfs_fs_devices *find_fsid_changed(
+ 
+ 	return NULL;
+ }
++
++static struct btrfs_fs_devices *find_fsid_reverted_metadata(
++				struct btrfs_super_block *disk_super)
++{
++	struct btrfs_fs_devices *fs_devices;
++
++	/*
++	 * Handle the case where the scanned device is part of an fs whose last
++	 * metadata UUID change reverted it to the original FSID. At the same
++	 * time * fs_devices was first created by another constitutent device
++	 * which didn't fully observe the operation. This results in an
++	 * btrfs_fs_devices created with metadata/fsid different AND
++	 * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the
++	 * fs_devices equal to the FSID of the disk.
++	 */
++	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
++		if (memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
++			   BTRFS_FSID_SIZE) != 0 &&
++		    memcmp(fs_devices->metadata_uuid, disk_super->fsid,
++			   BTRFS_FSID_SIZE) == 0 &&
++		    fs_devices->fsid_change)
++			return fs_devices;
++	}
++
++	return NULL;
++}
+ /*
+  * Add new device to list of registered devices
+  *
+@@ -762,7 +788,9 @@ static noinline struct btrfs_device *device_list_add(const char *path,
+ 		fs_devices = find_fsid(disk_super->fsid,
+ 				       disk_super->metadata_uuid);
+ 	} else {
+-		fs_devices = find_fsid(disk_super->fsid, NULL);
++		fs_devices = find_fsid_reverted_metadata(disk_super);
++		if (!fs_devices)
++			fs_devices = find_fsid(disk_super->fsid, NULL);
+ 	}
+ 
+ 
+@@ -792,12 +820,18 @@ static noinline struct btrfs_device *device_list_add(const char *path,
+ 		 * a device which had the CHANGING_FSID_V2 flag then replace the
+ 		 * metadata_uuid/fsid values of the fs_devices.
+ 		 */
+-		if (has_metadata_uuid && fs_devices->fsid_change &&
++		if (fs_devices->fsid_change &&
+ 		    found_transid > fs_devices->latest_generation) {
+ 			memcpy(fs_devices->fsid, disk_super->fsid,
+ 					BTRFS_FSID_SIZE);
+-			memcpy(fs_devices->metadata_uuid,
+-					disk_super->metadata_uuid, BTRFS_FSID_SIZE);
++
++			if (has_metadata_uuid)
++				memcpy(fs_devices->metadata_uuid,
++				       disk_super->metadata_uuid,
++				       BTRFS_FSID_SIZE);
++			else
++				memcpy(fs_devices->metadata_uuid,
++				       disk_super->fsid, BTRFS_FSID_SIZE);
+ 
+ 			fs_devices->fsid_change = false;
+ 		}
+@@ -7342,6 +7376,8 @@ int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
+ 			else
+ 				btrfs_dev_stat_set(dev, i, 0);
+ 		}
++		btrfs_info(fs_info, "device stats zeroed by %s (%d)",
++			   current->comm, task_pid_nr(current));
+ 	} else {
+ 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
+ 			if (stats->nr_items > i)
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index 145d46ba25ae..816d49aed96b 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -2558,8 +2558,7 @@ static void __do_request(struct ceph_mds_client *mdsc,
+ 		if (!(mdsc->fsc->mount_options->flags &
+ 		      CEPH_MOUNT_OPT_MOUNTWAIT) &&
+ 		    !ceph_mdsmap_is_cluster_available(mdsc->mdsmap)) {
+-			err = -ENOENT;
+-			pr_info("probably no mds server is up\n");
++			err = -EHOSTUNREACH;
+ 			goto finish;
+ 		}
+ 	}
+diff --git a/fs/ceph/super.c b/fs/ceph/super.c
+index 9b5536451528..5a708ac9a54c 100644
+--- a/fs/ceph/super.c
++++ b/fs/ceph/super.c
+@@ -1066,6 +1066,11 @@ static int ceph_get_tree(struct fs_context *fc)
+ 	return 0;
+ 
+ out_splat:
++	if (!ceph_mdsmap_is_cluster_available(fsc->mdsc->mdsmap)) {
++		pr_info("No mds server is up or the cluster is laggy\n");
++		err = -EHOSTUNREACH;
++	}
++
+ 	ceph_mdsc_close_sessions(fsc->mdsc);
+ 	deactivate_locked_super(sb);
+ 	goto out_final;
+diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
+index 41957b82d796..606f26d862dc 100644
+--- a/fs/cifs/cifs_dfs_ref.c
++++ b/fs/cifs/cifs_dfs_ref.c
+@@ -120,17 +120,17 @@ cifs_build_devname(char *nodename, const char *prepath)
+ 
+ 
+ /**
+- * cifs_compose_mount_options	-	creates mount options for refferral
++ * cifs_compose_mount_options	-	creates mount options for referral
+  * @sb_mountdata:	parent/root DFS mount options (template)
+  * @fullpath:		full path in UNC format
+- * @ref:		server's referral
++ * @ref:		optional server's referral
+  * @devname:		optional pointer for saving device name
+  *
+  * creates mount options for submount based on template options sb_mountdata
+  * and replacing unc,ip,prefixpath options with ones we've got form ref_unc.
+  *
+  * Returns: pointer to new mount options or ERR_PTR.
+- * Caller is responcible for freeing retunrned value if it is not error.
++ * Caller is responsible for freeing returned value if it is not error.
+  */
+ char *cifs_compose_mount_options(const char *sb_mountdata,
+ 				   const char *fullpath,
+@@ -150,18 +150,27 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
+ 	if (sb_mountdata == NULL)
+ 		return ERR_PTR(-EINVAL);
+ 
+-	if (strlen(fullpath) - ref->path_consumed) {
+-		prepath = fullpath + ref->path_consumed;
+-		/* skip initial delimiter */
+-		if (*prepath == '/' || *prepath == '\\')
+-			prepath++;
+-	}
++	if (ref) {
++		if (strlen(fullpath) - ref->path_consumed) {
++			prepath = fullpath + ref->path_consumed;
++			/* skip initial delimiter */
++			if (*prepath == '/' || *prepath == '\\')
++				prepath++;
++		}
+ 
+-	name = cifs_build_devname(ref->node_name, prepath);
+-	if (IS_ERR(name)) {
+-		rc = PTR_ERR(name);
+-		name = NULL;
+-		goto compose_mount_options_err;
++		name = cifs_build_devname(ref->node_name, prepath);
++		if (IS_ERR(name)) {
++			rc = PTR_ERR(name);
++			name = NULL;
++			goto compose_mount_options_err;
++		}
++	} else {
++		name = cifs_build_devname((char *)fullpath, NULL);
++		if (IS_ERR(name)) {
++			rc = PTR_ERR(name);
++			name = NULL;
++			goto compose_mount_options_err;
++		}
+ 	}
+ 
+ 	rc = dns_resolve_server_name_to_ip(name, &srvIP);
+@@ -225,6 +234,8 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
+ 
+ 	if (devname)
+ 		*devname = name;
++	else
++		kfree(name);
+ 
+ 	/*cifs_dbg(FYI, "%s: parent mountdata: %s\n", __func__, sb_mountdata);*/
+ 	/*cifs_dbg(FYI, "%s: submount mountdata: %s\n", __func__, mountdata );*/
+@@ -241,23 +252,23 @@ compose_mount_options_err:
+ }
+ 
+ /**
+- * cifs_dfs_do_refmount - mounts specified path using provided refferal
++ * cifs_dfs_do_mount - mounts specified path using DFS full path
++ *
++ * Always pass down @fullpath to smb3_do_mount() so we can use the root server
++ * to perform failover in case we failed to connect to the first target in the
++ * referral.
++ *
+  * @cifs_sb:		parent/root superblock
+  * @fullpath:		full path in UNC format
+- * @ref:		server's referral
+  */
+-static struct vfsmount *cifs_dfs_do_refmount(struct dentry *mntpt,
+-		struct cifs_sb_info *cifs_sb,
+-		const char *fullpath, const struct dfs_info3_param *ref)
++static struct vfsmount *cifs_dfs_do_mount(struct dentry *mntpt,
++					  struct cifs_sb_info *cifs_sb,
++					  const char *fullpath)
+ {
+ 	struct vfsmount *mnt;
+ 	char *mountdata;
+ 	char *devname;
+ 
+-	/*
+-	 * Always pass down the DFS full path to smb3_do_mount() so we
+-	 * can use it later for failover.
+-	 */
+ 	devname = kstrndup(fullpath, strlen(fullpath), GFP_KERNEL);
+ 	if (!devname)
+ 		return ERR_PTR(-ENOMEM);
+@@ -266,7 +277,7 @@ static struct vfsmount *cifs_dfs_do_refmount(struct dentry *mntpt,
+ 
+ 	/* strip first '\' from fullpath */
+ 	mountdata = cifs_compose_mount_options(cifs_sb->mountdata,
+-					       fullpath + 1, ref, NULL);
++					       fullpath + 1, NULL, NULL);
+ 	if (IS_ERR(mountdata)) {
+ 		kfree(devname);
+ 		return (struct vfsmount *)mountdata;
+@@ -278,28 +289,16 @@ static struct vfsmount *cifs_dfs_do_refmount(struct dentry *mntpt,
+ 	return mnt;
+ }
+ 
+-static void dump_referral(const struct dfs_info3_param *ref)
+-{
+-	cifs_dbg(FYI, "DFS: ref path: %s\n", ref->path_name);
+-	cifs_dbg(FYI, "DFS: node path: %s\n", ref->node_name);
+-	cifs_dbg(FYI, "DFS: fl: %d, srv_type: %d\n",
+-		 ref->flags, ref->server_type);
+-	cifs_dbg(FYI, "DFS: ref_flags: %d, path_consumed: %d\n",
+-		 ref->ref_flag, ref->path_consumed);
+-}
+-
+ /*
+  * Create a vfsmount that we can automount
+  */
+ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
+ {
+-	struct dfs_info3_param referral = {0};
+ 	struct cifs_sb_info *cifs_sb;
+ 	struct cifs_ses *ses;
+ 	struct cifs_tcon *tcon;
+ 	char *full_path, *root_path;
+ 	unsigned int xid;
+-	int len;
+ 	int rc;
+ 	struct vfsmount *mnt;
+ 
+@@ -357,7 +356,7 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
+ 	if (!rc) {
+ 		rc = dfs_cache_find(xid, ses, cifs_sb->local_nls,
+ 				    cifs_remap(cifs_sb), full_path + 1,
+-				    &referral, NULL);
++				    NULL, NULL);
+ 	}
+ 
+ 	free_xid(xid);
+@@ -366,26 +365,16 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
+ 		mnt = ERR_PTR(rc);
+ 		goto free_root_path;
+ 	}
+-
+-	dump_referral(&referral);
+-
+-	len = strlen(referral.node_name);
+-	if (len < 2) {
+-		cifs_dbg(VFS, "%s: Net Address path too short: %s\n",
+-			 __func__, referral.node_name);
+-		mnt = ERR_PTR(-EINVAL);
+-		goto free_dfs_ref;
+-	}
+ 	/*
+-	 * cifs_mount() will retry every available node server in case
+-	 * of failures.
++	 * OK - we were able to get and cache a referral for @full_path.
++	 *
++	 * Now, pass it down to cifs_mount() and it will retry every available
++	 * node server in case of failures - no need to do it here.
+ 	 */
+-	mnt = cifs_dfs_do_refmount(mntpt, cifs_sb, full_path, &referral);
+-	cifs_dbg(FYI, "%s: cifs_dfs_do_refmount:%s , mnt:%p\n", __func__,
+-		 referral.node_name, mnt);
++	mnt = cifs_dfs_do_mount(mntpt, cifs_sb, full_path);
++	cifs_dbg(FYI, "%s: cifs_dfs_do_mount:%s , mnt:%p\n", __func__,
++		 full_path + 1, mnt);
+ 
+-free_dfs_ref:
+-	free_dfs_info_param(&referral);
+ free_root_path:
+ 	kfree(root_path);
+ free_full_path:
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 05ea0e2b7e0e..0aa3623ae0e1 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -3709,8 +3709,10 @@ match_prepath(struct super_block *sb, struct cifs_mnt_data *mnt_data)
+ {
+ 	struct cifs_sb_info *old = CIFS_SB(sb);
+ 	struct cifs_sb_info *new = mnt_data->cifs_sb;
+-	bool old_set = old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH;
+-	bool new_set = new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH;
++	bool old_set = (old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
++		old->prepath;
++	bool new_set = (new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
++		new->prepath;
+ 
+ 	if (old_set && new_set && !strcmp(new->prepath, old->prepath))
+ 		return 1;
+diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
+index 2faa05860a48..cf6cec59696c 100644
+--- a/fs/cifs/dfs_cache.c
++++ b/fs/cifs/dfs_cache.c
+@@ -1319,7 +1319,7 @@ static struct cifs_ses *find_root_ses(struct dfs_cache_vol_info *vi,
+ 	char *mdata = NULL, *devname = NULL;
+ 	struct TCP_Server_Info *server;
+ 	struct cifs_ses *ses;
+-	struct smb_vol vol;
++	struct smb_vol vol = {NULL};
+ 
+ 	rpath = get_dfs_root(path);
+ 	if (IS_ERR(rpath))
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 0a3b37abc5e1..6c9497c18f0b 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -4029,6 +4029,9 @@ smb2_writev_callback(struct mid_q_entry *mid)
+ 				     wdata->cfile->fid.persistent_fid,
+ 				     tcon->tid, tcon->ses->Suid, wdata->offset,
+ 				     wdata->bytes, wdata->result);
++		if (wdata->result == -ENOSPC)
++			printk_once(KERN_WARNING "Out of space writing to %s\n",
++				    tcon->treeName);
+ 	} else
+ 		trace_smb3_write_done(0 /* no xid */,
+ 				      wdata->cfile->fid.persistent_fid,
+diff --git a/fs/ext4/file.c b/fs/ext4/file.c
+index 6a7293a5cda2..977ac58dc718 100644
+--- a/fs/ext4/file.c
++++ b/fs/ext4/file.c
+@@ -88,9 +88,10 @@ static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
+ 	struct inode *inode = file_inode(iocb->ki_filp);
+ 	ssize_t ret;
+ 
+-	if (!inode_trylock_shared(inode)) {
+-		if (iocb->ki_flags & IOCB_NOWAIT)
++	if (iocb->ki_flags & IOCB_NOWAIT) {
++		if (!inode_trylock_shared(inode))
+ 			return -EAGAIN;
++	} else {
+ 		inode_lock_shared(inode);
+ 	}
+ 	/*
+@@ -487,9 +488,10 @@ ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ 	bool extend = false;
+ 	struct inode *inode = file_inode(iocb->ki_filp);
+ 
+-	if (!inode_trylock(inode)) {
+-		if (iocb->ki_flags & IOCB_NOWAIT)
++	if (iocb->ki_flags & IOCB_NOWAIT) {
++		if (!inode_trylock(inode))
+ 			return -EAGAIN;
++	} else {
+ 		inode_lock(inode);
+ 	}
+ 
+diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
+index fef7755300c3..410c904cf59b 100644
+--- a/fs/ext4/readpage.c
++++ b/fs/ext4/readpage.c
+@@ -57,6 +57,7 @@ enum bio_post_read_step {
+ 	STEP_INITIAL = 0,
+ 	STEP_DECRYPT,
+ 	STEP_VERITY,
++	STEP_MAX,
+ };
+ 
+ struct bio_post_read_ctx {
+@@ -106,10 +107,22 @@ static void verity_work(struct work_struct *work)
+ {
+ 	struct bio_post_read_ctx *ctx =
+ 		container_of(work, struct bio_post_read_ctx, work);
++	struct bio *bio = ctx->bio;
+ 
+-	fsverity_verify_bio(ctx->bio);
++	/*
++	 * fsverity_verify_bio() may call readpages() again, and although verity
++	 * will be disabled for that, decryption may still be needed, causing
++	 * another bio_post_read_ctx to be allocated.  So to guarantee that
++	 * mempool_alloc() never deadlocks we must free the current ctx first.
++	 * This is safe because verity is the last post-read step.
++	 */
++	BUILD_BUG_ON(STEP_VERITY + 1 != STEP_MAX);
++	mempool_free(ctx, bio_post_read_ctx_pool);
++	bio->bi_private = NULL;
+ 
+-	bio_post_read_processing(ctx);
++	fsverity_verify_bio(bio);
++
++	__read_end_io(bio);
+ }
+ 
+ static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index a034cd0ce021..fc40a72f7827 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -1180,19 +1180,6 @@ int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
+ 	int err = 0;
+ 	bool direct_io = iocb->ki_flags & IOCB_DIRECT;
+ 
+-	/* convert inline data for Direct I/O*/
+-	if (direct_io) {
+-		err = f2fs_convert_inline_inode(inode);
+-		if (err)
+-			return err;
+-	}
+-
+-	if (direct_io && allow_outplace_dio(inode, iocb, from))
+-		return 0;
+-
+-	if (is_inode_flag_set(inode, FI_NO_PREALLOC))
+-		return 0;
+-
+ 	map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
+ 	map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
+ 	if (map.m_len > map.m_lblk)
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index 13aef5f28fa8..6c4436a5ce79 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -50,7 +50,7 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
+ 	struct page *page = vmf->page;
+ 	struct inode *inode = file_inode(vmf->vma->vm_file);
+ 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+-	struct dnode_of_data dn = { .node_changed = false };
++	struct dnode_of_data dn;
+ 	int err;
+ 
+ 	if (unlikely(f2fs_cp_error(sbi))) {
+@@ -63,6 +63,9 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
+ 		goto err;
+ 	}
+ 
++	/* should do out of any locked page */
++	f2fs_balance_fs(sbi, true);
++
+ 	sb_start_pagefault(inode->i_sb);
+ 
+ 	f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
+@@ -120,8 +123,6 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
+ out_sem:
+ 	up_read(&F2FS_I(inode)->i_mmap_sem);
+ 
+-	f2fs_balance_fs(sbi, dn.node_changed);
+-
+ 	sb_end_pagefault(inode->i_sb);
+ err:
+ 	return block_page_mkwrite_return(err);
+@@ -3383,18 +3384,41 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ 				ret = -EAGAIN;
+ 				goto out;
+ 			}
+-		} else {
+-			preallocated = true;
+-			target_size = iocb->ki_pos + iov_iter_count(from);
++			goto write;
++		}
+ 
+-			err = f2fs_preallocate_blocks(iocb, from);
+-			if (err) {
+-				clear_inode_flag(inode, FI_NO_PREALLOC);
+-				inode_unlock(inode);
+-				ret = err;
+-				goto out;
+-			}
++		if (is_inode_flag_set(inode, FI_NO_PREALLOC))
++			goto write;
++
++		if (iocb->ki_flags & IOCB_DIRECT) {
++			/*
++			 * Convert inline data for Direct I/O before entering
++			 * f2fs_direct_IO().
++			 */
++			err = f2fs_convert_inline_inode(inode);
++			if (err)
++				goto out_err;
++			/*
++			 * If force_buffere_io() is true, we have to allocate
++			 * blocks all the time, since f2fs_direct_IO will fall
++			 * back to buffered IO.
++			 */
++			if (!f2fs_force_buffered_io(inode, iocb, from) &&
++					allow_outplace_dio(inode, iocb, from))
++				goto write;
++		}
++		preallocated = true;
++		target_size = iocb->ki_pos + iov_iter_count(from);
++
++		err = f2fs_preallocate_blocks(iocb, from);
++		if (err) {
++out_err:
++			clear_inode_flag(inode, FI_NO_PREALLOC);
++			inode_unlock(inode);
++			ret = err;
++			goto out;
+ 		}
++write:
+ 		ret = __generic_file_write_iter(iocb, from);
+ 		clear_inode_flag(inode, FI_NO_PREALLOC);
+ 
+diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
+index a1c507b0b4ac..5d9584281935 100644
+--- a/fs/f2fs/namei.c
++++ b/fs/f2fs/namei.c
+@@ -797,6 +797,7 @@ static int __f2fs_tmpfile(struct inode *dir, struct dentry *dentry,
+ 
+ 	if (whiteout) {
+ 		f2fs_i_links_write(inode, false);
++		inode->i_state |= I_LINKABLE;
+ 		*whiteout = inode;
+ 	} else {
+ 		d_tmpfile(dentry, inode);
+@@ -867,6 +868,12 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ 			F2FS_I(old_dentry->d_inode)->i_projid)))
+ 		return -EXDEV;
+ 
++	if (flags & RENAME_WHITEOUT) {
++		err = f2fs_create_whiteout(old_dir, &whiteout);
++		if (err)
++			return err;
++	}
++
+ 	err = dquot_initialize(old_dir);
+ 	if (err)
+ 		goto out;
+@@ -898,17 +905,11 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ 		}
+ 	}
+ 
+-	if (flags & RENAME_WHITEOUT) {
+-		err = f2fs_create_whiteout(old_dir, &whiteout);
+-		if (err)
+-			goto out_dir;
+-	}
+-
+ 	if (new_inode) {
+ 
+ 		err = -ENOTEMPTY;
+ 		if (old_dir_entry && !f2fs_empty_dir(new_inode))
+-			goto out_whiteout;
++			goto out_dir;
+ 
+ 		err = -ENOENT;
+ 		new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name,
+@@ -916,7 +917,7 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ 		if (!new_entry) {
+ 			if (IS_ERR(new_page))
+ 				err = PTR_ERR(new_page);
+-			goto out_whiteout;
++			goto out_dir;
+ 		}
+ 
+ 		f2fs_balance_fs(sbi, true);
+@@ -948,7 +949,7 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ 		err = f2fs_add_link(new_dentry, old_inode);
+ 		if (err) {
+ 			f2fs_unlock_op(sbi);
+-			goto out_whiteout;
++			goto out_dir;
+ 		}
+ 
+ 		if (old_dir_entry)
+@@ -972,7 +973,7 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ 				if (IS_ERR(old_page))
+ 					err = PTR_ERR(old_page);
+ 				f2fs_unlock_op(sbi);
+-				goto out_whiteout;
++				goto out_dir;
+ 			}
+ 		}
+ 	}
+@@ -991,7 +992,6 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ 	f2fs_delete_entry(old_entry, old_page, old_dir, NULL);
+ 
+ 	if (whiteout) {
+-		whiteout->i_state |= I_LINKABLE;
+ 		set_inode_flag(whiteout, FI_INC_LINK);
+ 		err = f2fs_add_link(old_dentry, whiteout);
+ 		if (err)
+@@ -1027,15 +1027,14 @@ put_out_dir:
+ 	f2fs_unlock_op(sbi);
+ 	if (new_page)
+ 		f2fs_put_page(new_page, 0);
+-out_whiteout:
+-	if (whiteout)
+-		iput(whiteout);
+ out_dir:
+ 	if (old_dir_entry)
+ 		f2fs_put_page(old_dir_page, 0);
+ out_old:
+ 	f2fs_put_page(old_page, 0);
+ out:
++	if (whiteout)
++		iput(whiteout);
+ 	return err;
+ }
+ 
+diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
+index 70945ceb9c0c..e79c86b8553a 100644
+--- a/fs/f2fs/sysfs.c
++++ b/fs/f2fs/sysfs.c
+@@ -733,10 +733,12 @@ int __init f2fs_init_sysfs(void)
+ 
+ 	ret = kobject_init_and_add(&f2fs_feat, &f2fs_feat_ktype,
+ 				   NULL, "features");
+-	if (ret)
++	if (ret) {
++		kobject_put(&f2fs_feat);
+ 		kset_unregister(&f2fs_kset);
+-	else
++	} else {
+ 		f2fs_proc_root = proc_mkdir("fs/f2fs", NULL);
++	}
+ 	return ret;
+ }
+ 
+@@ -757,8 +759,11 @@ int f2fs_register_sysfs(struct f2fs_sb_info *sbi)
+ 	init_completion(&sbi->s_kobj_unregister);
+ 	err = kobject_init_and_add(&sbi->s_kobj, &f2fs_sb_ktype, NULL,
+ 				"%s", sb->s_id);
+-	if (err)
++	if (err) {
++		kobject_put(&sbi->s_kobj);
++		wait_for_completion(&sbi->s_kobj_unregister);
+ 		return err;
++	}
+ 
+ 	if (f2fs_proc_root)
+ 		sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);
+@@ -786,4 +791,5 @@ void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi)
+ 		remove_proc_entry(sbi->sb->s_id, f2fs_proc_root);
+ 	}
+ 	kobject_del(&sbi->s_kobj);
++	kobject_put(&sbi->s_kobj);
+ }
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index 695369f46f92..3dd37a998ea9 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -803,6 +803,10 @@ static int fuse_do_readpage(struct file *file, struct page *page)
+ 
+ 	attr_ver = fuse_get_attr_version(fc);
+ 
++	/* Don't overflow end offset */
++	if (pos + (desc.length - 1) == LLONG_MAX)
++		desc.length--;
++
+ 	fuse_read_args_fill(&ia, file, pos, desc.length, FUSE_READ);
+ 	res = fuse_simple_request(fc, &ia.ap.args);
+ 	if (res < 0)
+@@ -888,6 +892,14 @@ static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file)
+ 	ap->args.out_pages = true;
+ 	ap->args.page_zeroing = true;
+ 	ap->args.page_replace = true;
++
++	/* Don't overflow end offset */
++	if (pos + (count - 1) == LLONG_MAX) {
++		count--;
++		ap->descs[ap->num_pages - 1].length--;
++	}
++	WARN_ON((loff_t) (pos + count) < 0);
++
+ 	fuse_read_args_fill(ia, file, pos, count, FUSE_READ);
+ 	ia->read.attr_ver = fuse_get_attr_version(fc);
+ 	if (fc->async_read) {
+diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
+index 8fff6677a5da..96bf33986d03 100644
+--- a/fs/jbd2/checkpoint.c
++++ b/fs/jbd2/checkpoint.c
+@@ -164,7 +164,7 @@ void __jbd2_log_wait_for_space(journal_t *journal)
+ 				       "journal space in %s\n", __func__,
+ 				       journal->j_devname);
+ 				WARN_ON(1);
+-				jbd2_journal_abort(journal, 0);
++				jbd2_journal_abort(journal, -EIO);
+ 			}
+ 			write_lock(&journal->j_state_lock);
+ 		} else {
+diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
+index 3845750f70ec..27373f5792a4 100644
+--- a/fs/jbd2/commit.c
++++ b/fs/jbd2/commit.c
+@@ -782,7 +782,7 @@ start_journal_io:
+ 		err = journal_submit_commit_record(journal, commit_transaction,
+ 						 &cbh, crc32_sum);
+ 		if (err)
+-			__jbd2_journal_abort_hard(journal);
++			jbd2_journal_abort(journal, err);
+ 	}
+ 
+ 	blk_finish_plug(&plug);
+@@ -875,7 +875,7 @@ start_journal_io:
+ 		err = journal_submit_commit_record(journal, commit_transaction,
+ 						&cbh, crc32_sum);
+ 		if (err)
+-			__jbd2_journal_abort_hard(journal);
++			jbd2_journal_abort(journal, err);
+ 	}
+ 	if (cbh)
+ 		err = journal_wait_on_commit_record(journal, cbh);
+diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
+index b3e2433f94fe..0b4280fcad91 100644
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -1711,6 +1711,11 @@ int jbd2_journal_load(journal_t *journal)
+ 		       journal->j_devname);
+ 		return -EFSCORRUPTED;
+ 	}
++	/*
++	 * clear JBD2_ABORT flag initialized in journal_init_common
++	 * here to update log tail information with the newest seq.
++	 */
++	journal->j_flags &= ~JBD2_ABORT;
+ 
+ 	/* OK, we've finished with the dynamic journal bits:
+ 	 * reinitialise the dynamic contents of the superblock in memory
+@@ -1718,7 +1723,6 @@ int jbd2_journal_load(journal_t *journal)
+ 	if (journal_reset(journal))
+ 		goto recovery_error;
+ 
+-	journal->j_flags &= ~JBD2_ABORT;
+ 	journal->j_flags |= JBD2_LOADED;
+ 	return 0;
+ 
+@@ -2143,8 +2147,7 @@ static void __journal_abort_soft (journal_t *journal, int errno)
+ 
+ 	if (journal->j_flags & JBD2_ABORT) {
+ 		write_unlock(&journal->j_state_lock);
+-		if (!old_errno && old_errno != -ESHUTDOWN &&
+-		    errno == -ESHUTDOWN)
++		if (old_errno != -ESHUTDOWN && errno == -ESHUTDOWN)
+ 			jbd2_journal_update_sb_errno(journal);
+ 		return;
+ 	}
+@@ -2152,12 +2155,10 @@ static void __journal_abort_soft (journal_t *journal, int errno)
+ 
+ 	__jbd2_journal_abort_hard(journal);
+ 
+-	if (errno) {
+-		jbd2_journal_update_sb_errno(journal);
+-		write_lock(&journal->j_state_lock);
+-		journal->j_flags |= JBD2_REC_ERR;
+-		write_unlock(&journal->j_state_lock);
+-	}
++	jbd2_journal_update_sb_errno(journal);
++	write_lock(&journal->j_state_lock);
++	journal->j_flags |= JBD2_REC_ERR;
++	write_unlock(&journal->j_state_lock);
+ }
+ 
+ /**
+@@ -2199,11 +2200,6 @@ static void __journal_abort_soft (journal_t *journal, int errno)
+  * failure to disk.  ext3_error, for example, now uses this
+  * functionality.
+  *
+- * Errors which originate from within the journaling layer will NOT
+- * supply an errno; a null errno implies that absolutely no further
+- * writes are done to the journal (unless there are any already in
+- * progress).
+- *
+  */
+ 
+ void jbd2_journal_abort(journal_t *journal, int errno)
+diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
+index 9637aad36bdc..e2ae54b35dfe 100644
+--- a/fs/nfs/nfs42proc.c
++++ b/fs/nfs/nfs42proc.c
+@@ -343,14 +343,14 @@ static ssize_t _nfs42_proc_copy(struct file *src,
+ 		status = handle_async_copy(res, dst_server, src_server, src,
+ 				dst, &args->src_stateid, restart);
+ 		if (status)
+-			return status;
++			goto out;
+ 	}
+ 
+ 	if ((!res->synchronous || !args->sync) &&
+ 			res->write_res.verifier.committed != NFS_FILE_SYNC) {
+ 		status = process_copy_commit(dst, pos_dst, res);
+ 		if (status)
+-			return status;
++			goto out;
+ 	}
+ 
+ 	truncate_pagecache_range(dst_inode, pos_dst,
+diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
+index d0feef17db50..dc6b9c2f36b2 100644
+--- a/fs/nfs/nfs4xdr.c
++++ b/fs/nfs/nfs4xdr.c
+@@ -4097,7 +4097,7 @@ static int decode_attr_time_access(struct xdr_stream *xdr, uint32_t *bitmap, str
+ 			status = NFS_ATTR_FATTR_ATIME;
+ 		bitmap[1] &= ~FATTR4_WORD1_TIME_ACCESS;
+ 	}
+-	dprintk("%s: atime=%ld\n", __func__, (long)time->tv_sec);
++	dprintk("%s: atime=%lld\n", __func__, time->tv_sec);
+ 	return status;
+ }
+ 
+@@ -4115,7 +4115,7 @@ static int decode_attr_time_metadata(struct xdr_stream *xdr, uint32_t *bitmap, s
+ 			status = NFS_ATTR_FATTR_CTIME;
+ 		bitmap[1] &= ~FATTR4_WORD1_TIME_METADATA;
+ 	}
+-	dprintk("%s: ctime=%ld\n", __func__, (long)time->tv_sec);
++	dprintk("%s: ctime=%lld\n", __func__, time->tv_sec);
+ 	return status;
+ }
+ 
+@@ -4132,8 +4132,8 @@ static int decode_attr_time_delta(struct xdr_stream *xdr, uint32_t *bitmap,
+ 		status = decode_attr_time(xdr, time);
+ 		bitmap[1] &= ~FATTR4_WORD1_TIME_DELTA;
+ 	}
+-	dprintk("%s: time_delta=%ld %ld\n", __func__, (long)time->tv_sec,
+-		(long)time->tv_nsec);
++	dprintk("%s: time_delta=%lld %ld\n", __func__, time->tv_sec,
++		time->tv_nsec);
+ 	return status;
+ }
+ 
+@@ -4197,7 +4197,7 @@ static int decode_attr_time_modify(struct xdr_stream *xdr, uint32_t *bitmap, str
+ 			status = NFS_ATTR_FATTR_MTIME;
+ 		bitmap[1] &= ~FATTR4_WORD1_TIME_MODIFY;
+ 	}
+-	dprintk("%s: mtime=%ld\n", __func__, (long)time->tv_sec);
++	dprintk("%s: mtime=%lld\n", __func__, time->tv_sec);
+ 	return status;
+ }
+ 
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index f0bca0e87d0c..82cf80dde5c7 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -280,19 +280,25 @@ out:
+  * Commit metadata changes to stable storage.
+  */
+ static int
+-commit_metadata(struct svc_fh *fhp)
++commit_inode_metadata(struct inode *inode)
+ {
+-	struct inode *inode = d_inode(fhp->fh_dentry);
+ 	const struct export_operations *export_ops = inode->i_sb->s_export_op;
+ 
+-	if (!EX_ISSYNC(fhp->fh_export))
+-		return 0;
+-
+ 	if (export_ops->commit_metadata)
+ 		return export_ops->commit_metadata(inode);
+ 	return sync_inode_metadata(inode, 1);
+ }
+ 
++static int
++commit_metadata(struct svc_fh *fhp)
++{
++	struct inode *inode = d_inode(fhp->fh_dentry);
++
++	if (!EX_ISSYNC(fhp->fh_export))
++		return 0;
++	return commit_inode_metadata(inode);
++}
++
+ /*
+  * Go over the attributes and take care of the small differences between
+  * NFS semantics and what Linux expects.
+@@ -537,6 +543,9 @@ __be32 nfsd4_clone_file_range(struct file *src, u64 src_pos, struct file *dst,
+ 	if (sync) {
+ 		loff_t dst_end = count ? dst_pos + count - 1 : LLONG_MAX;
+ 		int status = vfs_fsync_range(dst, dst_pos, dst_end, 0);
++
++		if (!status)
++			status = commit_inode_metadata(file_inode(src));
+ 		if (status < 0)
+ 			return nfserrno(status);
+ 	}
+diff --git a/fs/ocfs2/dlm/Makefile b/fs/ocfs2/dlm/Makefile
+index 38b224372776..5e700b45d32d 100644
+--- a/fs/ocfs2/dlm/Makefile
++++ b/fs/ocfs2/dlm/Makefile
+@@ -1,6 +1,4 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+-ccflags-y := -I $(srctree)/$(src)/..
+-
+ obj-$(CONFIG_OCFS2_FS_O2CB) += ocfs2_dlm.o
+ 
+ ocfs2_dlm-objs := dlmdomain.o dlmdebug.o dlmthread.o dlmrecovery.o \
+diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c
+index 4de89af96abf..6abaded3ff6b 100644
+--- a/fs/ocfs2/dlm/dlmast.c
++++ b/fs/ocfs2/dlm/dlmast.c
+@@ -23,15 +23,15 @@
+ #include <linux/spinlock.h>
+ 
+ 
+-#include "cluster/heartbeat.h"
+-#include "cluster/nodemanager.h"
+-#include "cluster/tcp.h"
++#include "../cluster/heartbeat.h"
++#include "../cluster/nodemanager.h"
++#include "../cluster/tcp.h"
+ 
+ #include "dlmapi.h"
+ #include "dlmcommon.h"
+ 
+ #define MLOG_MASK_PREFIX ML_DLM
+-#include "cluster/masklog.h"
++#include "../cluster/masklog.h"
+ 
+ static void dlm_update_lvb(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
+ 			   struct dlm_lock *lock);
+diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c
+index 965f45dbe17b..6051edc33aef 100644
+--- a/fs/ocfs2/dlm/dlmconvert.c
++++ b/fs/ocfs2/dlm/dlmconvert.c
+@@ -23,9 +23,9 @@
+ #include <linux/spinlock.h>
+ 
+ 
+-#include "cluster/heartbeat.h"
+-#include "cluster/nodemanager.h"
+-#include "cluster/tcp.h"
++#include "../cluster/heartbeat.h"
++#include "../cluster/nodemanager.h"
++#include "../cluster/tcp.h"
+ 
+ #include "dlmapi.h"
+ #include "dlmcommon.h"
+@@ -33,7 +33,7 @@
+ #include "dlmconvert.h"
+ 
+ #define MLOG_MASK_PREFIX ML_DLM
+-#include "cluster/masklog.h"
++#include "../cluster/masklog.h"
+ 
+ /* NOTE: __dlmconvert_master is the only function in here that
+  * needs a spinlock held on entry (res->spinlock) and it is the
+diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
+index 4d0b452012b2..c5c6efba7b5e 100644
+--- a/fs/ocfs2/dlm/dlmdebug.c
++++ b/fs/ocfs2/dlm/dlmdebug.c
+@@ -17,9 +17,9 @@
+ #include <linux/debugfs.h>
+ #include <linux/export.h>
+ 
+-#include "cluster/heartbeat.h"
+-#include "cluster/nodemanager.h"
+-#include "cluster/tcp.h"
++#include "../cluster/heartbeat.h"
++#include "../cluster/nodemanager.h"
++#include "../cluster/tcp.h"
+ 
+ #include "dlmapi.h"
+ #include "dlmcommon.h"
+@@ -27,7 +27,7 @@
+ #include "dlmdebug.h"
+ 
+ #define MLOG_MASK_PREFIX ML_DLM
+-#include "cluster/masklog.h"
++#include "../cluster/masklog.h"
+ 
+ static int stringify_lockname(const char *lockname, int locklen, char *buf,
+ 			      int len);
+diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
+index ee6f459f9770..357cfc702ce3 100644
+--- a/fs/ocfs2/dlm/dlmdomain.c
++++ b/fs/ocfs2/dlm/dlmdomain.c
+@@ -20,9 +20,9 @@
+ #include <linux/debugfs.h>
+ #include <linux/sched/signal.h>
+ 
+-#include "cluster/heartbeat.h"
+-#include "cluster/nodemanager.h"
+-#include "cluster/tcp.h"
++#include "../cluster/heartbeat.h"
++#include "../cluster/nodemanager.h"
++#include "../cluster/tcp.h"
+ 
+ #include "dlmapi.h"
+ #include "dlmcommon.h"
+@@ -30,7 +30,7 @@
+ #include "dlmdebug.h"
+ 
+ #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_DOMAIN)
+-#include "cluster/masklog.h"
++#include "../cluster/masklog.h"
+ 
+ /*
+  * ocfs2 node maps are array of long int, which limits to send them freely
+diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c
+index baff087f3863..83f0760e4fba 100644
+--- a/fs/ocfs2/dlm/dlmlock.c
++++ b/fs/ocfs2/dlm/dlmlock.c
+@@ -25,9 +25,9 @@
+ #include <linux/delay.h>
+ 
+ 
+-#include "cluster/heartbeat.h"
+-#include "cluster/nodemanager.h"
+-#include "cluster/tcp.h"
++#include "../cluster/heartbeat.h"
++#include "../cluster/nodemanager.h"
++#include "../cluster/tcp.h"
+ 
+ #include "dlmapi.h"
+ #include "dlmcommon.h"
+@@ -35,7 +35,7 @@
+ #include "dlmconvert.h"
+ 
+ #define MLOG_MASK_PREFIX ML_DLM
+-#include "cluster/masklog.h"
++#include "../cluster/masklog.h"
+ 
+ static struct kmem_cache *dlm_lock_cache;
+ 
+diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
+index 74b768ca1cd8..c9d7037b6793 100644
+--- a/fs/ocfs2/dlm/dlmmaster.c
++++ b/fs/ocfs2/dlm/dlmmaster.c
+@@ -25,9 +25,9 @@
+ #include <linux/delay.h>
+ 
+ 
+-#include "cluster/heartbeat.h"
+-#include "cluster/nodemanager.h"
+-#include "cluster/tcp.h"
++#include "../cluster/heartbeat.h"
++#include "../cluster/nodemanager.h"
++#include "../cluster/tcp.h"
+ 
+ #include "dlmapi.h"
+ #include "dlmcommon.h"
+@@ -35,7 +35,7 @@
+ #include "dlmdebug.h"
+ 
+ #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER)
+-#include "cluster/masklog.h"
++#include "../cluster/masklog.h"
+ 
+ static void dlm_mle_node_down(struct dlm_ctxt *dlm,
+ 			      struct dlm_master_list_entry *mle,
+diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
+index 064ce5bbc3f6..bcaaca5112d6 100644
+--- a/fs/ocfs2/dlm/dlmrecovery.c
++++ b/fs/ocfs2/dlm/dlmrecovery.c
+@@ -26,16 +26,16 @@
+ #include <linux/delay.h>
+ 
+ 
+-#include "cluster/heartbeat.h"
+-#include "cluster/nodemanager.h"
+-#include "cluster/tcp.h"
++#include "../cluster/heartbeat.h"
++#include "../cluster/nodemanager.h"
++#include "../cluster/tcp.h"
+ 
+ #include "dlmapi.h"
+ #include "dlmcommon.h"
+ #include "dlmdomain.h"
+ 
+ #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY)
+-#include "cluster/masklog.h"
++#include "../cluster/masklog.h"
+ 
+ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node);
+ 
+diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c
+index 61c51c268460..fd40c17cd022 100644
+--- a/fs/ocfs2/dlm/dlmthread.c
++++ b/fs/ocfs2/dlm/dlmthread.c
+@@ -25,16 +25,16 @@
+ #include <linux/delay.h>
+ 
+ 
+-#include "cluster/heartbeat.h"
+-#include "cluster/nodemanager.h"
+-#include "cluster/tcp.h"
++#include "../cluster/heartbeat.h"
++#include "../cluster/nodemanager.h"
++#include "../cluster/tcp.h"
+ 
+ #include "dlmapi.h"
+ #include "dlmcommon.h"
+ #include "dlmdomain.h"
+ 
+ #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_THREAD)
+-#include "cluster/masklog.h"
++#include "../cluster/masklog.h"
+ 
+ static int dlm_thread(void *data);
+ static void dlm_flush_asts(struct dlm_ctxt *dlm);
+diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c
+index 3883633e82eb..dcb17ca8ae74 100644
+--- a/fs/ocfs2/dlm/dlmunlock.c
++++ b/fs/ocfs2/dlm/dlmunlock.c
+@@ -23,15 +23,15 @@
+ #include <linux/spinlock.h>
+ #include <linux/delay.h>
+ 
+-#include "cluster/heartbeat.h"
+-#include "cluster/nodemanager.h"
+-#include "cluster/tcp.h"
++#include "../cluster/heartbeat.h"
++#include "../cluster/nodemanager.h"
++#include "../cluster/tcp.h"
+ 
+ #include "dlmapi.h"
+ #include "dlmcommon.h"
+ 
+ #define MLOG_MASK_PREFIX ML_DLM
+-#include "cluster/masklog.h"
++#include "../cluster/masklog.h"
+ 
+ #define DLM_UNLOCK_FREE_LOCK           0x00000001
+ #define DLM_UNLOCK_CALL_AST            0x00000002
+diff --git a/fs/ocfs2/dlmfs/Makefile b/fs/ocfs2/dlmfs/Makefile
+index a9874e441bd4..c7895f65be0e 100644
+--- a/fs/ocfs2/dlmfs/Makefile
++++ b/fs/ocfs2/dlmfs/Makefile
+@@ -1,6 +1,4 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+-ccflags-y := -I $(srctree)/$(src)/..
+-
+ obj-$(CONFIG_OCFS2_FS) += ocfs2_dlmfs.o
+ 
+ ocfs2_dlmfs-objs := userdlm.o dlmfs.o
+diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
+index 4f1668c81e1f..8e4f1ace467c 100644
+--- a/fs/ocfs2/dlmfs/dlmfs.c
++++ b/fs/ocfs2/dlmfs/dlmfs.c
+@@ -33,11 +33,11 @@
+ 
+ #include <linux/uaccess.h>
+ 
+-#include "stackglue.h"
++#include "../stackglue.h"
+ #include "userdlm.h"
+ 
+ #define MLOG_MASK_PREFIX ML_DLMFS
+-#include "cluster/masklog.h"
++#include "../cluster/masklog.h"
+ 
+ 
+ static const struct super_operations dlmfs_ops;
+diff --git a/fs/ocfs2/dlmfs/userdlm.c b/fs/ocfs2/dlmfs/userdlm.c
+index 525b14ddfba5..3df5be25bfb1 100644
+--- a/fs/ocfs2/dlmfs/userdlm.c
++++ b/fs/ocfs2/dlmfs/userdlm.c
+@@ -21,12 +21,12 @@
+ #include <linux/types.h>
+ #include <linux/crc32.h>
+ 
+-#include "ocfs2_lockingver.h"
+-#include "stackglue.h"
++#include "../ocfs2_lockingver.h"
++#include "../stackglue.h"
+ #include "userdlm.h"
+ 
+ #define MLOG_MASK_PREFIX ML_DLMFS
+-#include "cluster/masklog.h"
++#include "../cluster/masklog.h"
+ 
+ 
+ static inline struct user_lock_res *user_lksb_to_lock_res(struct ocfs2_dlm_lksb *lksb)
+diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
+index 3103ba7f97a2..bfe611ed1b1d 100644
+--- a/fs/ocfs2/journal.h
++++ b/fs/ocfs2/journal.h
+@@ -597,9 +597,11 @@ static inline void ocfs2_update_inode_fsync_trans(handle_t *handle,
+ {
+ 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
+ 
+-	oi->i_sync_tid = handle->h_transaction->t_tid;
+-	if (datasync)
+-		oi->i_datasync_tid = handle->h_transaction->t_tid;
++	if (!is_handle_aborted(handle)) {
++		oi->i_sync_tid = handle->h_transaction->t_tid;
++		if (datasync)
++			oi->i_datasync_tid = handle->h_transaction->t_tid;
++	}
+ }
+ 
+ #endif /* OCFS2_JOURNAL_H */
+diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c
+index 25543a966c48..29eaa4544372 100644
+--- a/fs/orangefs/orangefs-debugfs.c
++++ b/fs/orangefs/orangefs-debugfs.c
+@@ -273,6 +273,7 @@ static void *help_start(struct seq_file *m, loff_t *pos)
+ 
+ static void *help_next(struct seq_file *m, void *v, loff_t *pos)
+ {
++	(*pos)++;
+ 	gossip_debug(GOSSIP_DEBUGFS_DEBUG, "help_next: start\n");
+ 
+ 	return NULL;
+diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
+index da9ebe33882b..bb4973aefbb1 100644
+--- a/fs/reiserfs/stree.c
++++ b/fs/reiserfs/stree.c
+@@ -2246,7 +2246,8 @@ error_out:
+ 	/* also releases the path */
+ 	unfix_nodes(&s_ins_balance);
+ #ifdef REISERQUOTA_DEBUG
+-	reiserfs_debug(th->t_super, REISERFS_DEBUG_CODE,
++	if (inode)
++		reiserfs_debug(th->t_super, REISERFS_DEBUG_CODE,
+ 		       "reiserquota insert_item(): freeing %u id=%u type=%c",
+ 		       quota_bytes, inode->i_uid, head2type(ih));
+ #endif
+diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
+index d127af64283e..a6bce5b1fb1d 100644
+--- a/fs/reiserfs/super.c
++++ b/fs/reiserfs/super.c
+@@ -1948,7 +1948,7 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
+ 		if (!sbi->s_jdev) {
+ 			SWARN(silent, s, "", "Cannot allocate memory for "
+ 				"journal device name");
+-			goto error;
++			goto error_unlocked;
+ 		}
+ 	}
+ #ifdef CONFIG_QUOTA
+diff --git a/fs/udf/super.c b/fs/udf/super.c
+index 8c28e93e9b73..4baa1ca91e9b 100644
+--- a/fs/udf/super.c
++++ b/fs/udf/super.c
+@@ -1035,7 +1035,6 @@ static int check_partition_desc(struct super_block *sb,
+ 	switch (le32_to_cpu(p->accessType)) {
+ 	case PD_ACCESS_TYPE_READ_ONLY:
+ 	case PD_ACCESS_TYPE_WRITE_ONCE:
+-	case PD_ACCESS_TYPE_REWRITABLE:
+ 	case PD_ACCESS_TYPE_NONE:
+ 		goto force_ro;
+ 	}
+@@ -2492,17 +2491,29 @@ static unsigned int udf_count_free_table(struct super_block *sb,
+ static unsigned int udf_count_free(struct super_block *sb)
+ {
+ 	unsigned int accum = 0;
+-	struct udf_sb_info *sbi;
++	struct udf_sb_info *sbi = UDF_SB(sb);
+ 	struct udf_part_map *map;
++	unsigned int part = sbi->s_partition;
++	int ptype = sbi->s_partmaps[part].s_partition_type;
++
++	if (ptype == UDF_METADATA_MAP25) {
++		part = sbi->s_partmaps[part].s_type_specific.s_metadata.
++							s_phys_partition_ref;
++	} else if (ptype == UDF_VIRTUAL_MAP15 || ptype == UDF_VIRTUAL_MAP20) {
++		/*
++		 * Filesystems with VAT are append-only and we cannot write to
++ 		 * them. Let's just report 0 here.
++		 */
++		return 0;
++	}
+ 
+-	sbi = UDF_SB(sb);
+ 	if (sbi->s_lvid_bh) {
+ 		struct logicalVolIntegrityDesc *lvid =
+ 			(struct logicalVolIntegrityDesc *)
+ 			sbi->s_lvid_bh->b_data;
+-		if (le32_to_cpu(lvid->numOfPartitions) > sbi->s_partition) {
++		if (le32_to_cpu(lvid->numOfPartitions) > part) {
+ 			accum = le32_to_cpu(
+-					lvid->freeSpaceTable[sbi->s_partition]);
++					lvid->freeSpaceTable[part]);
+ 			if (accum == 0xFFFFFFFF)
+ 				accum = 0;
+ 		}
+@@ -2511,7 +2522,7 @@ static unsigned int udf_count_free(struct super_block *sb)
+ 	if (accum)
+ 		return accum;
+ 
+-	map = &sbi->s_partmaps[sbi->s_partition];
++	map = &sbi->s_partmaps[part];
+ 	if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
+ 		accum += udf_count_free_bitmap(sb,
+ 					       map->s_uspace.s_bitmap);
+diff --git a/include/dt-bindings/clock/qcom,gcc-msm8998.h b/include/dt-bindings/clock/qcom,gcc-msm8998.h
+index de1d8a1f5966..63e02dc32a0b 100644
+--- a/include/dt-bindings/clock/qcom,gcc-msm8998.h
++++ b/include/dt-bindings/clock/qcom,gcc-msm8998.h
+@@ -182,6 +182,7 @@
+ #define GCC_MSS_GPLL0_DIV_CLK_SRC				173
+ #define GCC_MSS_SNOC_AXI_CLK					174
+ #define GCC_MSS_MNOC_BIMC_AXI_CLK				175
++#define GCC_BIMC_GFX_CLK					176
+ 
+ #define PCIE_0_GDSC						0
+ #define UFS_GDSC						1
+diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
+index e51ee772b9f5..def48a583670 100644
+--- a/include/linux/cpuhotplug.h
++++ b/include/linux/cpuhotplug.h
+@@ -59,6 +59,7 @@ enum cpuhp_state {
+ 	CPUHP_IOMMU_INTEL_DEAD,
+ 	CPUHP_LUSTRE_CFS_DEAD,
+ 	CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
++	CPUHP_PADATA_DEAD,
+ 	CPUHP_WORKQUEUE_PREP,
+ 	CPUHP_POWER_NUMA_PREPARE,
+ 	CPUHP_HRTIMERS_PREPARE,
+diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
+index fb376b5b7281..95816a8e3d26 100644
+--- a/include/linux/devfreq.h
++++ b/include/linux/devfreq.h
+@@ -177,8 +177,8 @@ struct devfreq {
+ 	/* information for device frequency transition */
+ 	unsigned int total_trans;
+ 	unsigned int *trans_table;
+-	unsigned long *time_in_state;
+-	unsigned long last_stat_updated;
++	u64 *time_in_state;
++	u64 last_stat_updated;
+ 
+ 	struct srcu_notifier_head transition_notifier_list;
+ 
+diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
+index dad4a68fa009..8013562751a5 100644
+--- a/include/linux/dmaengine.h
++++ b/include/linux/dmaengine.h
+@@ -674,6 +674,7 @@ struct dma_filter {
+  * @fill_align: alignment shift for memset operations
+  * @dev_id: unique device ID
+  * @dev: struct device reference for dma mapping api
++ * @owner: owner module (automatically set based on the provided dev)
+  * @src_addr_widths: bit mask of src addr widths the device supports
+  *	Width is specified in bytes, e.g. for a device supporting
+  *	a width of 4 the mask should have BIT(4) set.
+@@ -737,6 +738,7 @@ struct dma_device {
+ 
+ 	int dev_id;
+ 	struct device *dev;
++	struct module *owner;
+ 
+ 	u32 src_addr_widths;
+ 	u32 dst_addr_widths;
+diff --git a/include/linux/list_nulls.h b/include/linux/list_nulls.h
+index 3ef96743db8d..1ecd35664e0d 100644
+--- a/include/linux/list_nulls.h
++++ b/include/linux/list_nulls.h
+@@ -72,10 +72,10 @@ static inline void hlist_nulls_add_head(struct hlist_nulls_node *n,
+ 	struct hlist_nulls_node *first = h->first;
+ 
+ 	n->next = first;
+-	n->pprev = &h->first;
++	WRITE_ONCE(n->pprev, &h->first);
+ 	h->first = n;
+ 	if (!is_a_nulls(first))
+-		first->pprev = &n->next;
++		WRITE_ONCE(first->pprev, &n->next);
+ }
+ 
+ static inline void __hlist_nulls_del(struct hlist_nulls_node *n)
+@@ -85,13 +85,13 @@ static inline void __hlist_nulls_del(struct hlist_nulls_node *n)
+ 
+ 	WRITE_ONCE(*pprev, next);
+ 	if (!is_a_nulls(next))
+-		next->pprev = pprev;
++		WRITE_ONCE(next->pprev, pprev);
+ }
+ 
+ static inline void hlist_nulls_del(struct hlist_nulls_node *n)
+ {
+ 	__hlist_nulls_del(n);
+-	n->pprev = LIST_POISON2;
++	WRITE_ONCE(n->pprev, LIST_POISON2);
+ }
+ 
+ /**
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index c393dff2d66f..930fab293073 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -2310,7 +2310,7 @@ static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev)
+ }
+ #endif
+ 
+-void pci_add_dma_alias(struct pci_dev *dev, u8 devfn);
++void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns);
+ bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2);
+ int pci_for_each_dma_alias(struct pci_dev *pdev,
+ 			   int (*fn)(struct pci_dev *pdev,
+diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h
+index 8cfe570fdece..2cbde6542849 100644
+--- a/include/linux/platform_data/ti-sysc.h
++++ b/include/linux/platform_data/ti-sysc.h
+@@ -49,6 +49,7 @@ struct sysc_regbits {
+ 	s8 emufree_shift;
+ };
+ 
++#define SYSC_QUIRK_CLKDM_NOAUTO		BIT(21)
+ #define SYSC_QUIRK_FORCE_MSTANDBY	BIT(20)
+ #define SYSC_MODULE_QUIRK_AESS		BIT(19)
+ #define SYSC_MODULE_QUIRK_SGX		BIT(18)
+diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
+index 0832c9b66852..e0ddb47f4402 100644
+--- a/include/linux/raid/pq.h
++++ b/include/linux/raid/pq.h
+@@ -27,7 +27,6 @@ extern const char raid6_empty_zero_page[PAGE_SIZE];
+ 
+ #include <errno.h>
+ #include <inttypes.h>
+-#include <limits.h>
+ #include <stddef.h>
+ #include <sys/mman.h>
+ #include <sys/time.h>
+@@ -59,7 +58,9 @@ extern const char raid6_empty_zero_page[PAGE_SIZE];
+ #define enable_kernel_altivec()
+ #define disable_kernel_altivec()
+ 
++#undef	EXPORT_SYMBOL
+ #define EXPORT_SYMBOL(sym)
++#undef	EXPORT_SYMBOL_GPL
+ #define EXPORT_SYMBOL_GPL(sym)
+ #define MODULE_LICENSE(licence)
+ #define MODULE_DESCRIPTION(desc)
+diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
+index 61974c4c566b..90f2e2232c6d 100644
+--- a/include/linux/rculist_nulls.h
++++ b/include/linux/rculist_nulls.h
+@@ -34,7 +34,7 @@ static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n)
+ {
+ 	if (!hlist_nulls_unhashed(n)) {
+ 		__hlist_nulls_del(n);
+-		n->pprev = NULL;
++		WRITE_ONCE(n->pprev, NULL);
+ 	}
+ }
+ 
+@@ -66,7 +66,7 @@ static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n)
+ static inline void hlist_nulls_del_rcu(struct hlist_nulls_node *n)
+ {
+ 	__hlist_nulls_del(n);
+-	n->pprev = LIST_POISON2;
++	WRITE_ONCE(n->pprev, LIST_POISON2);
+ }
+ 
+ /**
+@@ -94,10 +94,10 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
+ 	struct hlist_nulls_node *first = h->first;
+ 
+ 	n->next = first;
+-	n->pprev = &h->first;
++	WRITE_ONCE(n->pprev, &h->first);
+ 	rcu_assign_pointer(hlist_nulls_first_rcu(h), n);
+ 	if (!is_a_nulls(first))
+-		first->pprev = &n->next;
++		WRITE_ONCE(first->pprev, &n->next);
+ }
+ 
+ /**
+diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
+index 5f36e0d2ede6..95353ae476a1 100644
+--- a/include/media/v4l2-device.h
++++ b/include/media/v4l2-device.h
+@@ -371,7 +371,7 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev)
+ 		struct v4l2_subdev *__sd;				\
+ 									\
+ 		__v4l2_device_call_subdevs_p(v4l2_dev, __sd,		\
+-			!(grpid) || __sd->grp_id == (grpid), o, f ,	\
++			(grpid) == 0 || __sd->grp_id == (grpid), o, f ,	\
+ 			##args);					\
+ 	} while (0)
+ 
+@@ -403,7 +403,7 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev)
+ ({									\
+ 	struct v4l2_subdev *__sd;					\
+ 	__v4l2_device_call_subdevs_until_err_p(v4l2_dev, __sd,		\
+-			!(grpid) || __sd->grp_id == (grpid), o, f ,	\
++			(grpid) == 0 || __sd->grp_id == (grpid), o, f ,	\
+ 			##args);					\
+ })
+ 
+@@ -431,8 +431,8 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev)
+ 		struct v4l2_subdev *__sd;				\
+ 									\
+ 		__v4l2_device_call_subdevs_p(v4l2_dev, __sd,		\
+-			!(grpmsk) || (__sd->grp_id & (grpmsk)), o, f ,	\
+-			##args);					\
++			(grpmsk) == 0 || (__sd->grp_id & (grpmsk)), o,	\
++			f , ##args);					\
+ 	} while (0)
+ 
+ /**
+@@ -462,8 +462,8 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev)
+ ({									\
+ 	struct v4l2_subdev *__sd;					\
+ 	__v4l2_device_call_subdevs_until_err_p(v4l2_dev, __sd,		\
+-			!(grpmsk) || (__sd->grp_id & (grpmsk)), o, f ,	\
+-			##args);					\
++			(grpmsk) == 0 || (__sd->grp_id & (grpmsk)), o,	\
++			f , ##args);					\
+ })
+ 
+ 
+diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
+index 8d0f447e1faa..a14f837fb1c8 100644
+--- a/include/rdma/ib_verbs.h
++++ b/include/rdma/ib_verbs.h
+@@ -2149,7 +2149,6 @@ struct ib_port_cache {
+ 
+ struct ib_cache {
+ 	rwlock_t                lock;
+-	struct ib_event_handler event_handler;
+ };
+ 
+ struct ib_port_immutable {
+@@ -2627,7 +2626,11 @@ struct ib_device {
+ 	struct rcu_head rcu_head;
+ 
+ 	struct list_head              event_handler_list;
+-	spinlock_t                    event_handler_lock;
++	/* Protects event_handler_list */
++	struct rw_semaphore event_handler_rwsem;
++
++	/* Protects QP's event_handler calls and open_qp list */
++	spinlock_t event_handler_lock;
+ 
+ 	struct rw_semaphore	      client_data_rwsem;
+ 	struct xarray                 client_data;
+@@ -2942,7 +2945,7 @@ bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
+ 
+ void ib_register_event_handler(struct ib_event_handler *event_handler);
+ void ib_unregister_event_handler(struct ib_event_handler *event_handler);
+-void ib_dispatch_event(struct ib_event *event);
++void ib_dispatch_event(const struct ib_event *event);
+ 
+ int ib_query_port(struct ib_device *device,
+ 		  u8 port_num, struct ib_port_attr *port_attr);
+diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
+index 66122602bd08..697e2c0624dc 100644
+--- a/include/trace/events/rcu.h
++++ b/include/trace/events/rcu.h
+@@ -449,7 +449,7 @@ TRACE_EVENT_RCU(rcu_fqs,
+  */
+ TRACE_EVENT_RCU(rcu_dyntick,
+ 
+-	TP_PROTO(const char *polarity, long oldnesting, long newnesting, atomic_t dynticks),
++	TP_PROTO(const char *polarity, long oldnesting, long newnesting, int dynticks),
+ 
+ 	TP_ARGS(polarity, oldnesting, newnesting, dynticks),
+ 
+@@ -464,7 +464,7 @@ TRACE_EVENT_RCU(rcu_dyntick,
+ 		__entry->polarity = polarity;
+ 		__entry->oldnesting = oldnesting;
+ 		__entry->newnesting = newnesting;
+-		__entry->dynticks = atomic_read(&dynticks);
++		__entry->dynticks = dynticks;
+ 	),
+ 
+ 	TP_printk("%s %lx %lx %#3x", __entry->polarity,
+diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
+index ecf42bec38c0..6f22e0e74ef2 100644
+--- a/kernel/bpf/inode.c
++++ b/kernel/bpf/inode.c
+@@ -196,6 +196,7 @@ static void *map_seq_next(struct seq_file *m, void *v, loff_t *pos)
+ 	void *key = map_iter(m)->key;
+ 	void *prev_key;
+ 
++	(*pos)++;
+ 	if (map_iter(m)->done)
+ 		return NULL;
+ 
+@@ -208,8 +209,6 @@ static void *map_seq_next(struct seq_file *m, void *v, loff_t *pos)
+ 		map_iter(m)->done = true;
+ 		return NULL;
+ 	}
+-
+-	++(*pos);
+ 	return key;
+ }
+ 
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 4dc279ed3b2d..9c706af713fb 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -525,8 +525,7 @@ static int bringup_wait_for_ap(unsigned int cpu)
+ 	if (WARN_ON_ONCE((!cpu_online(cpu))))
+ 		return -ECANCELED;
+ 
+-	/* Unpark the stopper thread and the hotplug thread of the target cpu */
+-	stop_machine_unpark(cpu);
++	/* Unpark the hotplug thread of the target cpu */
+ 	kthread_unpark(st->thread);
+ 
+ 	/*
+@@ -1089,8 +1088,8 @@ void notify_cpu_starting(unsigned int cpu)
+ 
+ /*
+  * Called from the idle task. Wake up the controlling task which brings the
+- * stopper and the hotplug thread of the upcoming CPU up and then delegates
+- * the rest of the online bringup to the hotplug thread.
++ * hotplug thread of the upcoming CPU up and then delegates the rest of the
++ * online bringup to the hotplug thread.
+  */
+ void cpuhp_online_idle(enum cpuhp_state state)
+ {
+@@ -1100,6 +1099,12 @@ void cpuhp_online_idle(enum cpuhp_state state)
+ 	if (state != CPUHP_AP_ONLINE_IDLE)
+ 		return;
+ 
++	/*
++	 * Unpart the stopper thread before we start the idle loop (and start
++	 * scheduling); this ensures the stopper task is always available.
++	 */
++	stop_machine_unpark(smp_processor_id());
++
+ 	st->state = CPUHP_AP_ONLINE_IDLE;
+ 	complete_ap_thread(st, true);
+ }
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index 53534aa258a6..fd81882f0521 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -610,6 +610,18 @@ void wait_for_kprobe_optimizer(void)
+ 	mutex_unlock(&kprobe_mutex);
+ }
+ 
++static bool optprobe_queued_unopt(struct optimized_kprobe *op)
++{
++	struct optimized_kprobe *_op;
++
++	list_for_each_entry(_op, &unoptimizing_list, list) {
++		if (op == _op)
++			return true;
++	}
++
++	return false;
++}
++
+ /* Optimize kprobe if p is ready to be optimized */
+ static void optimize_kprobe(struct kprobe *p)
+ {
+@@ -631,17 +643,21 @@ static void optimize_kprobe(struct kprobe *p)
+ 		return;
+ 
+ 	/* Check if it is already optimized. */
+-	if (op->kp.flags & KPROBE_FLAG_OPTIMIZED)
++	if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) {
++		if (optprobe_queued_unopt(op)) {
++			/* This is under unoptimizing. Just dequeue the probe */
++			list_del_init(&op->list);
++		}
+ 		return;
++	}
+ 	op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
+ 
+-	if (!list_empty(&op->list))
+-		/* This is under unoptimizing. Just dequeue the probe */
+-		list_del_init(&op->list);
+-	else {
+-		list_add(&op->list, &optimizing_list);
+-		kick_kprobe_optimizer();
+-	}
++	/* On unoptimizing/optimizing_list, op must have OPTIMIZED flag */
++	if (WARN_ON_ONCE(!list_empty(&op->list)))
++		return;
++
++	list_add(&op->list, &optimizing_list);
++	kick_kprobe_optimizer();
+ }
+ 
+ /* Short cut to direct unoptimizing */
+@@ -662,31 +678,34 @@ static void unoptimize_kprobe(struct kprobe *p, bool force)
+ 		return; /* This is not an optprobe nor optimized */
+ 
+ 	op = container_of(p, struct optimized_kprobe, kp);
+-	if (!kprobe_optimized(p)) {
+-		/* Unoptimized or unoptimizing case */
+-		if (force && !list_empty(&op->list)) {
+-			/*
+-			 * Only if this is unoptimizing kprobe and forced,
+-			 * forcibly unoptimize it. (No need to unoptimize
+-			 * unoptimized kprobe again :)
+-			 */
+-			list_del_init(&op->list);
+-			force_unoptimize_kprobe(op);
+-		}
++	if (!kprobe_optimized(p))
+ 		return;
+-	}
+ 
+ 	op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
+ 	if (!list_empty(&op->list)) {
+-		/* Dequeue from the optimization queue */
+-		list_del_init(&op->list);
++		if (optprobe_queued_unopt(op)) {
++			/* Queued in unoptimizing queue */
++			if (force) {
++				/*
++				 * Forcibly unoptimize the kprobe here, and queue it
++				 * in the freeing list for release afterwards.
++				 */
++				force_unoptimize_kprobe(op);
++				list_move(&op->list, &freeing_list);
++			}
++		} else {
++			/* Dequeue from the optimizing queue */
++			list_del_init(&op->list);
++			op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
++		}
+ 		return;
+ 	}
++
+ 	/* Optimized kprobe case */
+-	if (force)
++	if (force) {
+ 		/* Forcibly update the code: this is a special case */
+ 		force_unoptimize_kprobe(op);
+-	else {
++	} else {
+ 		list_add(&op->list, &unoptimizing_list);
+ 		kick_kprobe_optimizer();
+ 	}
+diff --git a/kernel/module.c b/kernel/module.c
+index b56f3224b161..4810ce0fbbca 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -214,7 +214,8 @@ static struct module *mod_find(unsigned long addr)
+ {
+ 	struct module *mod;
+ 
+-	list_for_each_entry_rcu(mod, &modules, list) {
++	list_for_each_entry_rcu(mod, &modules, list,
++				lockdep_is_held(&module_mutex)) {
+ 		if (within_module(addr, mod))
+ 			return mod;
+ 	}
+@@ -448,7 +449,8 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
+ 	if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
+ 		return true;
+ 
+-	list_for_each_entry_rcu(mod, &modules, list) {
++	list_for_each_entry_rcu(mod, &modules, list,
++				lockdep_is_held(&module_mutex)) {
+ 		struct symsearch arr[] = {
+ 			{ mod->syms, mod->syms + mod->num_syms, mod->crcs,
+ 			  NOT_GPL_ONLY, false },
+@@ -616,7 +618,8 @@ static struct module *find_module_all(const char *name, size_t len,
+ 
+ 	module_assert_mutex_or_preempt();
+ 
+-	list_for_each_entry_rcu(mod, &modules, list) {
++	list_for_each_entry_rcu(mod, &modules, list,
++				lockdep_is_held(&module_mutex)) {
+ 		if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
+ 			continue;
+ 		if (strlen(mod->name) == len && !memcmp(mod->name, name, len))
+@@ -1781,6 +1784,8 @@ static int module_add_modinfo_attrs(struct module *mod)
+ error_out:
+ 	if (i > 0)
+ 		module_remove_modinfo_attrs(mod, --i);
++	else
++		kfree(mod->modinfo_attrs);
+ 	return error;
+ }
+ 
+@@ -3054,9 +3059,7 @@ static int setup_load_info(struct load_info *info, int flags)
+ 
+ 	/* Try to find a name early so we can log errors with a module name */
+ 	info->index.info = find_sec(info, ".modinfo");
+-	if (!info->index.info)
+-		info->name = "(missing .modinfo section)";
+-	else
++	if (info->index.info)
+ 		info->name = get_modinfo(info, "name");
+ 
+ 	/* Find internal symbols and strings. */
+@@ -3071,14 +3074,15 @@ static int setup_load_info(struct load_info *info, int flags)
+ 	}
+ 
+ 	if (info->index.sym == 0) {
+-		pr_warn("%s: module has no symbols (stripped?)\n", info->name);
++		pr_warn("%s: module has no symbols (stripped?)\n",
++			info->name ?: "(missing .modinfo section or name field)");
+ 		return -ENOEXEC;
+ 	}
+ 
+ 	info->index.mod = find_sec(info, ".gnu.linkonce.this_module");
+ 	if (!info->index.mod) {
+ 		pr_warn("%s: No module found in object\n",
+-			info->name ?: "(missing .modinfo name field)");
++			info->name ?: "(missing .modinfo section or name field)");
+ 		return -ENOEXEC;
+ 	}
+ 	/* This is temporary: point mod into copy of data. */
+diff --git a/kernel/padata.c b/kernel/padata.c
+index 9c82ee4a9732..fda7a7039422 100644
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -512,7 +512,7 @@ static int padata_replace_one(struct padata_shell *ps)
+ 	return 0;
+ }
+ 
+-static int padata_replace(struct padata_instance *pinst, int cpu)
++static int padata_replace(struct padata_instance *pinst)
+ {
+ 	int notification_mask = 0;
+ 	struct padata_shell *ps;
+@@ -523,16 +523,12 @@ static int padata_replace(struct padata_instance *pinst, int cpu)
+ 	cpumask_copy(pinst->omask, pinst->rcpumask.pcpu);
+ 	cpumask_and(pinst->rcpumask.pcpu, pinst->cpumask.pcpu,
+ 		    cpu_online_mask);
+-	if (cpu >= 0)
+-		cpumask_clear_cpu(cpu, pinst->rcpumask.pcpu);
+ 	if (!cpumask_equal(pinst->omask, pinst->rcpumask.pcpu))
+ 		notification_mask |= PADATA_CPU_PARALLEL;
+ 
+ 	cpumask_copy(pinst->omask, pinst->rcpumask.cbcpu);
+ 	cpumask_and(pinst->rcpumask.cbcpu, pinst->cpumask.cbcpu,
+ 		    cpu_online_mask);
+-	if (cpu >= 0)
+-		cpumask_clear_cpu(cpu, pinst->rcpumask.cbcpu);
+ 	if (!cpumask_equal(pinst->omask, pinst->rcpumask.cbcpu))
+ 		notification_mask |= PADATA_CPU_SERIAL;
+ 
+@@ -624,7 +620,7 @@ out_replace:
+ 	cpumask_copy(pinst->cpumask.pcpu, pcpumask);
+ 	cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
+ 
+-	err = padata_setup_cpumasks(pinst) ?: padata_replace(pinst, -1);
++	err = padata_setup_cpumasks(pinst) ?: padata_replace(pinst);
+ 
+ 	if (valid)
+ 		__padata_start(pinst);
+@@ -715,7 +711,7 @@ static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
+ 	int err = 0;
+ 
+ 	if (cpumask_test_cpu(cpu, cpu_online_mask)) {
+-		err = padata_replace(pinst, -1);
++		err = padata_replace(pinst);
+ 
+ 		if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) &&
+ 		    padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
+@@ -729,12 +725,12 @@ static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
+ {
+ 	int err = 0;
+ 
+-	if (cpumask_test_cpu(cpu, cpu_online_mask)) {
++	if (!cpumask_test_cpu(cpu, cpu_online_mask)) {
+ 		if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) ||
+ 		    !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
+ 			__padata_stop(pinst);
+ 
+-		err = padata_replace(pinst, cpu);
++		err = padata_replace(pinst);
+ 	}
+ 
+ 	return err;
+@@ -796,7 +792,7 @@ static int padata_cpu_online(unsigned int cpu, struct hlist_node *node)
+ 	return ret;
+ }
+ 
+-static int padata_cpu_prep_down(unsigned int cpu, struct hlist_node *node)
++static int padata_cpu_dead(unsigned int cpu, struct hlist_node *node)
+ {
+ 	struct padata_instance *pinst;
+ 	int ret;
+@@ -817,6 +813,7 @@ static enum cpuhp_state hp_online;
+ static void __padata_free(struct padata_instance *pinst)
+ {
+ #ifdef CONFIG_HOTPLUG_CPU
++	cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD, &pinst->node);
+ 	cpuhp_state_remove_instance_nocalls(hp_online, &pinst->node);
+ #endif
+ 
+@@ -1024,6 +1021,8 @@ static struct padata_instance *padata_alloc(const char *name,
+ 
+ #ifdef CONFIG_HOTPLUG_CPU
+ 	cpuhp_state_add_instance_nocalls_cpuslocked(hp_online, &pinst->node);
++	cpuhp_state_add_instance_nocalls_cpuslocked(CPUHP_PADATA_DEAD,
++						    &pinst->node);
+ #endif
+ 
+ 	put_online_cpus();
+@@ -1136,17 +1135,24 @@ static __init int padata_driver_init(void)
+ 	int ret;
+ 
+ 	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "padata:online",
+-				      padata_cpu_online,
+-				      padata_cpu_prep_down);
++				      padata_cpu_online, NULL);
+ 	if (ret < 0)
+ 		return ret;
+ 	hp_online = ret;
++
++	ret = cpuhp_setup_state_multi(CPUHP_PADATA_DEAD, "padata:dead",
++				      NULL, padata_cpu_dead);
++	if (ret < 0) {
++		cpuhp_remove_multi_state(hp_online);
++		return ret;
++	}
+ 	return 0;
+ }
+ module_init(padata_driver_init);
+ 
+ static __exit void padata_driver_exit(void)
+ {
++	cpuhp_remove_multi_state(CPUHP_PADATA_DEAD);
+ 	cpuhp_remove_multi_state(hp_online);
+ }
+ module_exit(padata_driver_exit);
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index 1ef6f75d92f1..fada22dc4ab6 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -2770,8 +2770,6 @@ void register_console(struct console *newcon)
+ 		 * for us.
+ 		 */
+ 		logbuf_lock_irqsave(flags);
+-		console_seq = syslog_seq;
+-		console_idx = syslog_idx;
+ 		/*
+ 		 * We're about to replay the log buffer.  Only do this to the
+ 		 * just-registered console to avoid excessive message spam to
+@@ -2783,6 +2781,8 @@ void register_console(struct console *newcon)
+ 		 */
+ 		exclusive_console = newcon;
+ 		exclusive_console_stop_seq = console_seq;
++		console_seq = syslog_seq;
++		console_idx = syslog_idx;
+ 		logbuf_unlock_irqrestore(flags);
+ 	}
+ 	console_unlock();
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index 1694a6b57ad8..6145e08a1407 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -577,7 +577,7 @@ static void rcu_eqs_enter(bool user)
+ 	}
+ 
+ 	lockdep_assert_irqs_disabled();
+-	trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, rdp->dynticks);
++	trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks));
+ 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
+ 	rdp = this_cpu_ptr(&rcu_data);
+ 	do_nocb_deferred_wakeup(rdp);
+@@ -650,14 +650,15 @@ static __always_inline void rcu_nmi_exit_common(bool irq)
+ 	 * leave it in non-RCU-idle state.
+ 	 */
+ 	if (rdp->dynticks_nmi_nesting != 1) {
+-		trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2, rdp->dynticks);
++		trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2,
++				  atomic_read(&rdp->dynticks));
+ 		WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */
+ 			   rdp->dynticks_nmi_nesting - 2);
+ 		return;
+ 	}
+ 
+ 	/* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
+-	trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, rdp->dynticks);
++	trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks));
+ 	WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
+ 
+ 	if (irq)
+@@ -744,7 +745,7 @@ static void rcu_eqs_exit(bool user)
+ 	rcu_dynticks_task_exit();
+ 	rcu_dynticks_eqs_exit();
+ 	rcu_cleanup_after_idle();
+-	trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, rdp->dynticks);
++	trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks));
+ 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
+ 	WRITE_ONCE(rdp->dynticks_nesting, 1);
+ 	WARN_ON_ONCE(rdp->dynticks_nmi_nesting);
+@@ -833,7 +834,7 @@ static __always_inline void rcu_nmi_enter_common(bool irq)
+ 	}
+ 	trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
+ 			  rdp->dynticks_nmi_nesting,
+-			  rdp->dynticks_nmi_nesting + incby, rdp->dynticks);
++			  rdp->dynticks_nmi_nesting + incby, atomic_read(&rdp->dynticks));
+ 	WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */
+ 		   rdp->dynticks_nmi_nesting + incby);
+ 	barrier();
+diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
+index 69c5aa64fcfd..f504ac831779 100644
+--- a/kernel/rcu/tree_exp.h
++++ b/kernel/rcu/tree_exp.h
+@@ -558,7 +558,7 @@ static void rcu_exp_wait_wake(unsigned long s)
+ 			spin_unlock(&rnp->exp_lock);
+ 		}
+ 		smp_mb(); /* All above changes before wakeup. */
+-		wake_up_all(&rnp->exp_wq[rcu_seq_ctr(rcu_state.expedited_sequence) & 0x3]);
++		wake_up_all(&rnp->exp_wq[rcu_seq_ctr(s) & 0x3]);
+ 	}
+ 	trace_rcu_exp_grace_period(rcu_state.name, s, TPS("endwake"));
+ 	mutex_unlock(&rcu_state.exp_wake_mutex);
+diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
+index f849e7429816..f7118842a2b8 100644
+--- a/kernel/rcu/tree_plugin.h
++++ b/kernel/rcu/tree_plugin.h
+@@ -2322,6 +2322,8 @@ static void __init rcu_organize_nocb_kthreads(void)
+ {
+ 	int cpu;
+ 	bool firsttime = true;
++	bool gotnocbs = false;
++	bool gotnocbscbs = true;
+ 	int ls = rcu_nocb_gp_stride;
+ 	int nl = 0;  /* Next GP kthread. */
+ 	struct rcu_data *rdp;
+@@ -2344,21 +2346,31 @@ static void __init rcu_organize_nocb_kthreads(void)
+ 		rdp = per_cpu_ptr(&rcu_data, cpu);
+ 		if (rdp->cpu >= nl) {
+ 			/* New GP kthread, set up for CBs & next GP. */
++			gotnocbs = true;
+ 			nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls;
+ 			rdp->nocb_gp_rdp = rdp;
+ 			rdp_gp = rdp;
+-			if (!firsttime && dump_tree)
+-				pr_cont("\n");
+-			firsttime = false;
+-			pr_alert("%s: No-CB GP kthread CPU %d:", __func__, cpu);
++			if (dump_tree) {
++				if (!firsttime)
++					pr_cont("%s\n", gotnocbscbs
++							? "" : " (self only)");
++				gotnocbscbs = false;
++				firsttime = false;
++				pr_alert("%s: No-CB GP kthread CPU %d:",
++					 __func__, cpu);
++			}
+ 		} else {
+ 			/* Another CB kthread, link to previous GP kthread. */
++			gotnocbscbs = true;
+ 			rdp->nocb_gp_rdp = rdp_gp;
+ 			rdp_prev->nocb_next_cb_rdp = rdp;
+-			pr_alert(" %d", cpu);
++			if (dump_tree)
++				pr_cont(" %d", cpu);
+ 		}
+ 		rdp_prev = rdp;
+ 	}
++	if (gotnocbs && dump_tree)
++		pr_cont("%s\n", gotnocbscbs ? "" : " (self only)");
+ }
+ 
+ /*
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 894fb81313fd..b2564d62a0f7 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -1253,7 +1253,8 @@ static void __init init_uclamp(void)
+ 	mutex_init(&uclamp_mutex);
+ 
+ 	for_each_possible_cpu(cpu) {
+-		memset(&cpu_rq(cpu)->uclamp, 0, sizeof(struct uclamp_rq));
++		memset(&cpu_rq(cpu)->uclamp, 0,
++				sizeof(struct uclamp_rq)*UCLAMP_CNT);
+ 		cpu_rq(cpu)->uclamp_flags = 0;
+ 	}
+ 
+diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
+index 6ec1e595b1d4..dfb64c08a407 100644
+--- a/kernel/sched/topology.c
++++ b/kernel/sched/topology.c
+@@ -1879,6 +1879,42 @@ static struct sched_domain *build_sched_domain(struct sched_domain_topology_leve
+ 	return sd;
+ }
+ 
++/*
++ * Ensure topology masks are sane, i.e. there are no conflicts (overlaps) for
++ * any two given CPUs at this (non-NUMA) topology level.
++ */
++static bool topology_span_sane(struct sched_domain_topology_level *tl,
++			      const struct cpumask *cpu_map, int cpu)
++{
++	int i;
++
++	/* NUMA levels are allowed to overlap */
++	if (tl->flags & SDTL_OVERLAP)
++		return true;
++
++	/*
++	 * Non-NUMA levels cannot partially overlap - they must be either
++	 * completely equal or completely disjoint. Otherwise we can end up
++	 * breaking the sched_group lists - i.e. a later get_group() pass
++	 * breaks the linking done for an earlier span.
++	 */
++	for_each_cpu(i, cpu_map) {
++		if (i == cpu)
++			continue;
++		/*
++		 * We should 'and' all those masks with 'cpu_map' to exactly
++		 * match the topology we're about to build, but that can only
++		 * remove CPUs, which only lessens our ability to detect
++		 * overlaps
++		 */
++		if (!cpumask_equal(tl->mask(cpu), tl->mask(i)) &&
++		    cpumask_intersects(tl->mask(cpu), tl->mask(i)))
++			return false;
++	}
++
++	return true;
++}
++
+ /*
+  * Find the sched_domain_topology_level where all CPU capacities are visible
+  * for all CPUs.
+@@ -1975,6 +2011,9 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
+ 				has_asym = true;
+ 			}
+ 
++			if (WARN_ON(!topology_span_sane(tl, cpu_map, i)))
++				goto error;
++
+ 			sd = build_sched_domain(tl, cpu_map, attr, sd, dflags, i);
+ 
+ 			if (tl == sched_domain_topology)
+diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
+index 4b11f0309eee..b97401f6bc23 100644
+--- a/kernel/time/alarmtimer.c
++++ b/kernel/time/alarmtimer.c
+@@ -88,6 +88,7 @@ static int alarmtimer_rtc_add_device(struct device *dev,
+ 	unsigned long flags;
+ 	struct rtc_device *rtc = to_rtc_device(dev);
+ 	struct wakeup_source *__ws;
++	struct platform_device *pdev;
+ 	int ret = 0;
+ 
+ 	if (rtcdev)
+@@ -99,9 +100,11 @@ static int alarmtimer_rtc_add_device(struct device *dev,
+ 		return -1;
+ 
+ 	__ws = wakeup_source_register(dev, "alarmtimer");
++	pdev = platform_device_register_data(dev, "alarmtimer",
++					     PLATFORM_DEVID_AUTO, NULL, 0);
+ 
+ 	spin_lock_irqsave(&rtcdev_lock, flags);
+-	if (!rtcdev) {
++	if (__ws && !IS_ERR(pdev) && !rtcdev) {
+ 		if (!try_module_get(rtc->owner)) {
+ 			ret = -1;
+ 			goto unlock;
+@@ -112,10 +115,14 @@ static int alarmtimer_rtc_add_device(struct device *dev,
+ 		get_device(dev);
+ 		ws = __ws;
+ 		__ws = NULL;
++		pdev = NULL;
++	} else {
++		ret = -1;
+ 	}
+ unlock:
+ 	spin_unlock_irqrestore(&rtcdev_lock, flags);
+ 
++	platform_device_unregister(pdev);
+ 	wakeup_source_unregister(__ws);
+ 
+ 	return ret;
+@@ -876,8 +883,7 @@ static struct platform_driver alarmtimer_driver = {
+  */
+ static int __init alarmtimer_init(void)
+ {
+-	struct platform_device *pdev;
+-	int error = 0;
++	int error;
+ 	int i;
+ 
+ 	alarmtimer_rtc_timer_init();
+@@ -900,15 +906,7 @@ static int __init alarmtimer_init(void)
+ 	if (error)
+ 		goto out_if;
+ 
+-	pdev = platform_device_register_simple("alarmtimer", -1, NULL, 0);
+-	if (IS_ERR(pdev)) {
+-		error = PTR_ERR(pdev);
+-		goto out_drv;
+-	}
+ 	return 0;
+-
+-out_drv:
+-	platform_driver_unregister(&alarmtimer_driver);
+ out_if:
+ 	alarmtimer_rtc_interface_remove();
+ 	return error;
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 3581bd96d6eb..ddb47a0af854 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -7038,9 +7038,10 @@ static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
+ 	struct trace_array *tr = m->private;
+ 	struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids);
+ 
+-	if (v == FTRACE_NO_PIDS)
++	if (v == FTRACE_NO_PIDS) {
++		(*pos)++;
+ 		return NULL;
+-
++	}
+ 	return trace_pid_next(pid_list, v, pos);
+ }
+ 
+diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
+index 6ac35b9e195d..e10585ef00e1 100644
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -470,11 +470,12 @@ struct action_data {
+ 	 * When a histogram trigger is hit, the values of any
+ 	 * references to variables, including variables being passed
+ 	 * as parameters to synthetic events, are collected into a
+-	 * var_ref_vals array.  This var_ref_idx is the index of the
+-	 * first param in the array to be passed to the synthetic
+-	 * event invocation.
++	 * var_ref_vals array.  This var_ref_idx array is an array of
++	 * indices into the var_ref_vals array, one for each synthetic
++	 * event param, and is passed to the synthetic event
++	 * invocation.
+ 	 */
+-	unsigned int		var_ref_idx;
++	unsigned int		var_ref_idx[TRACING_MAP_VARS_MAX];
+ 	struct synth_event	*synth_event;
+ 	bool			use_trace_keyword;
+ 	char			*synth_event_name;
+@@ -875,14 +876,14 @@ static struct trace_event_functions synth_event_funcs = {
+ 
+ static notrace void trace_event_raw_event_synth(void *__data,
+ 						u64 *var_ref_vals,
+-						unsigned int var_ref_idx)
++						unsigned int *var_ref_idx)
+ {
+ 	struct trace_event_file *trace_file = __data;
+ 	struct synth_trace_event *entry;
+ 	struct trace_event_buffer fbuffer;
+ 	struct ring_buffer *buffer;
+ 	struct synth_event *event;
+-	unsigned int i, n_u64;
++	unsigned int i, n_u64, val_idx;
+ 	int fields_size = 0;
+ 
+ 	event = trace_file->event_call->data;
+@@ -905,15 +906,16 @@ static notrace void trace_event_raw_event_synth(void *__data,
+ 		goto out;
+ 
+ 	for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
++		val_idx = var_ref_idx[i];
+ 		if (event->fields[i]->is_string) {
+-			char *str_val = (char *)(long)var_ref_vals[var_ref_idx + i];
++			char *str_val = (char *)(long)var_ref_vals[val_idx];
+ 			char *str_field = (char *)&entry->fields[n_u64];
+ 
+ 			strscpy(str_field, str_val, STR_VAR_LEN_MAX);
+ 			n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
+ 		} else {
+ 			struct synth_field *field = event->fields[i];
+-			u64 val = var_ref_vals[var_ref_idx + i];
++			u64 val = var_ref_vals[val_idx];
+ 
+ 			switch (field->size) {
+ 			case 1:
+@@ -1113,10 +1115,10 @@ static struct tracepoint *alloc_synth_tracepoint(char *name)
+ }
+ 
+ typedef void (*synth_probe_func_t) (void *__data, u64 *var_ref_vals,
+-				    unsigned int var_ref_idx);
++				    unsigned int *var_ref_idx);
+ 
+ static inline void trace_synth(struct synth_event *event, u64 *var_ref_vals,
+-			       unsigned int var_ref_idx)
++			       unsigned int *var_ref_idx)
+ {
+ 	struct tracepoint *tp = event->tp;
+ 
+@@ -2035,12 +2037,6 @@ static int parse_map_size(char *str)
+ 	unsigned long size, map_bits;
+ 	int ret;
+ 
+-	strsep(&str, "=");
+-	if (!str) {
+-		ret = -EINVAL;
+-		goto out;
+-	}
+-
+ 	ret = kstrtoul(str, 0, &size);
+ 	if (ret)
+ 		goto out;
+@@ -2100,25 +2096,25 @@ static int parse_action(char *str, struct hist_trigger_attrs *attrs)
+ static int parse_assignment(struct trace_array *tr,
+ 			    char *str, struct hist_trigger_attrs *attrs)
+ {
+-	int ret = 0;
++	int len, ret = 0;
+ 
+-	if ((str_has_prefix(str, "key=")) ||
+-	    (str_has_prefix(str, "keys="))) {
+-		attrs->keys_str = kstrdup(str, GFP_KERNEL);
++	if ((len = str_has_prefix(str, "key=")) ||
++	    (len = str_has_prefix(str, "keys="))) {
++		attrs->keys_str = kstrdup(str + len, GFP_KERNEL);
+ 		if (!attrs->keys_str) {
+ 			ret = -ENOMEM;
+ 			goto out;
+ 		}
+-	} else if ((str_has_prefix(str, "val=")) ||
+-		   (str_has_prefix(str, "vals=")) ||
+-		   (str_has_prefix(str, "values="))) {
+-		attrs->vals_str = kstrdup(str, GFP_KERNEL);
++	} else if ((len = str_has_prefix(str, "val=")) ||
++		   (len = str_has_prefix(str, "vals=")) ||
++		   (len = str_has_prefix(str, "values="))) {
++		attrs->vals_str = kstrdup(str + len, GFP_KERNEL);
+ 		if (!attrs->vals_str) {
+ 			ret = -ENOMEM;
+ 			goto out;
+ 		}
+-	} else if (str_has_prefix(str, "sort=")) {
+-		attrs->sort_key_str = kstrdup(str, GFP_KERNEL);
++	} else if ((len = str_has_prefix(str, "sort="))) {
++		attrs->sort_key_str = kstrdup(str + len, GFP_KERNEL);
+ 		if (!attrs->sort_key_str) {
+ 			ret = -ENOMEM;
+ 			goto out;
+@@ -2129,12 +2125,8 @@ static int parse_assignment(struct trace_array *tr,
+ 			ret = -ENOMEM;
+ 			goto out;
+ 		}
+-	} else if (str_has_prefix(str, "clock=")) {
+-		strsep(&str, "=");
+-		if (!str) {
+-			ret = -EINVAL;
+-			goto out;
+-		}
++	} else if ((len = str_has_prefix(str, "clock="))) {
++		str += len;
+ 
+ 		str = strstrip(str);
+ 		attrs->clock = kstrdup(str, GFP_KERNEL);
+@@ -2142,8 +2134,8 @@ static int parse_assignment(struct trace_array *tr,
+ 			ret = -ENOMEM;
+ 			goto out;
+ 		}
+-	} else if (str_has_prefix(str, "size=")) {
+-		int map_bits = parse_map_size(str);
++	} else if ((len = str_has_prefix(str, "size="))) {
++		int map_bits = parse_map_size(str + len);
+ 
+ 		if (map_bits < 0) {
+ 			ret = map_bits;
+@@ -2183,8 +2175,14 @@ parse_hist_trigger_attrs(struct trace_array *tr, char *trigger_str)
+ 
+ 	while (trigger_str) {
+ 		char *str = strsep(&trigger_str, ":");
++		char *rhs;
+ 
+-		if (strchr(str, '=')) {
++		rhs = strchr(str, '=');
++		if (rhs) {
++			if (!strlen(++rhs)) {
++				ret = -EINVAL;
++				goto free;
++			}
+ 			ret = parse_assignment(tr, str, attrs);
+ 			if (ret)
+ 				goto free;
+@@ -2655,6 +2653,22 @@ static int init_var_ref(struct hist_field *ref_field,
+ 	goto out;
+ }
+ 
++static int find_var_ref_idx(struct hist_trigger_data *hist_data,
++			    struct hist_field *var_field)
++{
++	struct hist_field *ref_field;
++	int i;
++
++	for (i = 0; i < hist_data->n_var_refs; i++) {
++		ref_field = hist_data->var_refs[i];
++		if (ref_field->var.idx == var_field->var.idx &&
++		    ref_field->var.hist_data == var_field->hist_data)
++			return i;
++	}
++
++	return -ENOENT;
++}
++
+ /**
+  * create_var_ref - Create a variable reference and attach it to trigger
+  * @hist_data: The trigger that will be referencing the variable
+@@ -4228,11 +4242,11 @@ static int trace_action_create(struct hist_trigger_data *hist_data,
+ 	struct trace_array *tr = hist_data->event_file->tr;
+ 	char *event_name, *param, *system = NULL;
+ 	struct hist_field *hist_field, *var_ref;
+-	unsigned int i, var_ref_idx;
++	unsigned int i;
+ 	unsigned int field_pos = 0;
+ 	struct synth_event *event;
+ 	char *synth_event_name;
+-	int ret = 0;
++	int var_ref_idx, ret = 0;
+ 
+ 	lockdep_assert_held(&event_mutex);
+ 
+@@ -4249,8 +4263,6 @@ static int trace_action_create(struct hist_trigger_data *hist_data,
+ 
+ 	event->ref++;
+ 
+-	var_ref_idx = hist_data->n_var_refs;
+-
+ 	for (i = 0; i < data->n_params; i++) {
+ 		char *p;
+ 
+@@ -4299,6 +4311,14 @@ static int trace_action_create(struct hist_trigger_data *hist_data,
+ 				goto err;
+ 			}
+ 
++			var_ref_idx = find_var_ref_idx(hist_data, var_ref);
++			if (WARN_ON(var_ref_idx < 0)) {
++				ret = var_ref_idx;
++				goto err;
++			}
++
++			data->var_ref_idx[i] = var_ref_idx;
++
+ 			field_pos++;
+ 			kfree(p);
+ 			continue;
+@@ -4317,7 +4337,6 @@ static int trace_action_create(struct hist_trigger_data *hist_data,
+ 	}
+ 
+ 	data->synth_event = event;
+-	data->var_ref_idx = var_ref_idx;
+  out:
+ 	return ret;
+  err:
+@@ -4536,10 +4555,6 @@ static int create_val_fields(struct hist_trigger_data *hist_data,
+ 	if (!fields_str)
+ 		goto out;
+ 
+-	strsep(&fields_str, "=");
+-	if (!fields_str)
+-		goto out;
+-
+ 	for (i = 0, j = 1; i < TRACING_MAP_VALS_MAX &&
+ 		     j < TRACING_MAP_VALS_MAX; i++) {
+ 		field_str = strsep(&fields_str, ",");
+@@ -4634,10 +4649,6 @@ static int create_key_fields(struct hist_trigger_data *hist_data,
+ 	if (!fields_str)
+ 		goto out;
+ 
+-	strsep(&fields_str, "=");
+-	if (!fields_str)
+-		goto out;
+-
+ 	for (i = n_vals; i < n_vals + TRACING_MAP_KEYS_MAX; i++) {
+ 		field_str = strsep(&fields_str, ",");
+ 		if (!field_str)
+@@ -4795,12 +4806,6 @@ static int create_sort_keys(struct hist_trigger_data *hist_data)
+ 	if (!fields_str)
+ 		goto out;
+ 
+-	strsep(&fields_str, "=");
+-	if (!fields_str) {
+-		ret = -EINVAL;
+-		goto out;
+-	}
+-
+ 	for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) {
+ 		struct hist_field *hist_field;
+ 		char *field_str, *field_name;
+@@ -4809,9 +4814,11 @@ static int create_sort_keys(struct hist_trigger_data *hist_data)
+ 		sort_key = &hist_data->sort_keys[i];
+ 
+ 		field_str = strsep(&fields_str, ",");
+-		if (!field_str) {
+-			if (i == 0)
+-				ret = -EINVAL;
++		if (!field_str)
++			break;
++
++		if (!*field_str) {
++			ret = -EINVAL;
+ 			break;
+ 		}
+ 
+@@ -4821,7 +4828,7 @@ static int create_sort_keys(struct hist_trigger_data *hist_data)
+ 		}
+ 
+ 		field_name = strsep(&field_str, ".");
+-		if (!field_name) {
++		if (!field_name || !*field_name) {
+ 			ret = -EINVAL;
+ 			break;
+ 		}
+diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
+index 40106fff06a4..287d77eae59b 100644
+--- a/kernel/trace/trace_events_trigger.c
++++ b/kernel/trace/trace_events_trigger.c
+@@ -116,9 +116,10 @@ static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
+ {
+ 	struct trace_event_file *event_file = event_file_data(m->private);
+ 
+-	if (t == SHOW_AVAILABLE_TRIGGERS)
++	if (t == SHOW_AVAILABLE_TRIGGERS) {
++		(*pos)++;
+ 		return NULL;
+-
++	}
+ 	return seq_list_next(t, &event_file->triggers, pos);
+ }
+ 
+diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c
+index 874f1274cf99..d1fa19773cc8 100644
+--- a/kernel/trace/trace_stat.c
++++ b/kernel/trace/trace_stat.c
+@@ -280,18 +280,22 @@ static int tracing_stat_init(void)
+ 
+ 	d_tracing = tracing_init_dentry();
+ 	if (IS_ERR(d_tracing))
+-		return 0;
++		return -ENODEV;
+ 
+ 	stat_dir = tracefs_create_dir("trace_stat", d_tracing);
+-	if (!stat_dir)
++	if (!stat_dir) {
+ 		pr_warn("Could not create tracefs 'trace_stat' entry\n");
++		return -ENOMEM;
++	}
+ 	return 0;
+ }
+ 
+ static int init_stat_file(struct stat_session *session)
+ {
+-	if (!stat_dir && tracing_stat_init())
+-		return -ENODEV;
++	int ret;
++
++	if (!stat_dir && (ret = tracing_stat_init()))
++		return ret;
+ 
+ 	session->file = tracefs_create_file(session->ts->name, 0644,
+ 					    stat_dir,
+@@ -304,7 +308,7 @@ static int init_stat_file(struct stat_session *session)
+ int register_stat_tracer(struct tracer_stat *trace)
+ {
+ 	struct stat_session *session, *node;
+-	int ret;
++	int ret = -EINVAL;
+ 
+ 	if (!trace)
+ 		return -EINVAL;
+@@ -315,17 +319,15 @@ int register_stat_tracer(struct tracer_stat *trace)
+ 	/* Already registered? */
+ 	mutex_lock(&all_stat_sessions_mutex);
+ 	list_for_each_entry(node, &all_stat_sessions, session_list) {
+-		if (node->ts == trace) {
+-			mutex_unlock(&all_stat_sessions_mutex);
+-			return -EINVAL;
+-		}
++		if (node->ts == trace)
++			goto out;
+ 	}
+-	mutex_unlock(&all_stat_sessions_mutex);
+ 
++	ret = -ENOMEM;
+ 	/* Init the session */
+ 	session = kzalloc(sizeof(*session), GFP_KERNEL);
+ 	if (!session)
+-		return -ENOMEM;
++		goto out;
+ 
+ 	session->ts = trace;
+ 	INIT_LIST_HEAD(&session->session_list);
+@@ -334,15 +336,16 @@ int register_stat_tracer(struct tracer_stat *trace)
+ 	ret = init_stat_file(session);
+ 	if (ret) {
+ 		destroy_session(session);
+-		return ret;
++		goto out;
+ 	}
+ 
++	ret = 0;
+ 	/* Register */
+-	mutex_lock(&all_stat_sessions_mutex);
+ 	list_add_tail(&session->session_list, &all_stat_sessions);
++ out:
+ 	mutex_unlock(&all_stat_sessions_mutex);
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ void unregister_stat_tracer(struct tracer_stat *trace)
+diff --git a/kernel/watchdog.c b/kernel/watchdog.c
+index f41334ef0971..cbd3cf503c90 100644
+--- a/kernel/watchdog.c
++++ b/kernel/watchdog.c
+@@ -161,6 +161,8 @@ static void lockup_detector_update_enable(void)
+ 
+ #ifdef CONFIG_SOFTLOCKUP_DETECTOR
+ 
++#define SOFTLOCKUP_RESET	ULONG_MAX
++
+ /* Global variables, exported for sysctl */
+ unsigned int __read_mostly softlockup_panic =
+ 			CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
+@@ -274,7 +276,7 @@ notrace void touch_softlockup_watchdog_sched(void)
+ 	 * Preemption can be enabled.  It doesn't matter which CPU's timestamp
+ 	 * gets zeroed here, so use the raw_ operation.
+ 	 */
+-	raw_cpu_write(watchdog_touch_ts, 0);
++	raw_cpu_write(watchdog_touch_ts, SOFTLOCKUP_RESET);
+ }
+ 
+ notrace void touch_softlockup_watchdog(void)
+@@ -298,14 +300,14 @@ void touch_all_softlockup_watchdogs(void)
+ 	 * the softlockup check.
+ 	 */
+ 	for_each_cpu(cpu, &watchdog_allowed_mask)
+-		per_cpu(watchdog_touch_ts, cpu) = 0;
++		per_cpu(watchdog_touch_ts, cpu) = SOFTLOCKUP_RESET;
+ 	wq_watchdog_touch(-1);
+ }
+ 
+ void touch_softlockup_watchdog_sync(void)
+ {
+ 	__this_cpu_write(softlockup_touch_sync, true);
+-	__this_cpu_write(watchdog_touch_ts, 0);
++	__this_cpu_write(watchdog_touch_ts, SOFTLOCKUP_RESET);
+ }
+ 
+ static int is_softlockup(unsigned long touch_ts)
+@@ -383,7 +385,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
+ 	/* .. and repeat */
+ 	hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
+ 
+-	if (touch_ts == 0) {
++	if (touch_ts == SOFTLOCKUP_RESET) {
+ 		if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
+ 			/*
+ 			 * If the time stamp was touched atomically
+diff --git a/lib/debugobjects.c b/lib/debugobjects.c
+index 61261195f5b6..48054dbf1b51 100644
+--- a/lib/debugobjects.c
++++ b/lib/debugobjects.c
+@@ -132,14 +132,18 @@ static void fill_pool(void)
+ 	struct debug_obj *obj;
+ 	unsigned long flags;
+ 
+-	if (likely(obj_pool_free >= debug_objects_pool_min_level))
++	if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
+ 		return;
+ 
+ 	/*
+ 	 * Reuse objs from the global free list; they will be reinitialized
+ 	 * when allocating.
++	 *
++	 * Both obj_nr_tofree and obj_pool_free are checked locklessly; the
++	 * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical
++	 * sections.
+ 	 */
+-	while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
++	while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) {
+ 		raw_spin_lock_irqsave(&pool_lock, flags);
+ 		/*
+ 		 * Recheck with the lock held as the worker thread might have
+@@ -148,9 +152,9 @@ static void fill_pool(void)
+ 		while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
+ 			obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
+ 			hlist_del(&obj->node);
+-			obj_nr_tofree--;
++			WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
+ 			hlist_add_head(&obj->node, &obj_pool);
+-			obj_pool_free++;
++			WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
+ 		}
+ 		raw_spin_unlock_irqrestore(&pool_lock, flags);
+ 	}
+@@ -158,7 +162,7 @@ static void fill_pool(void)
+ 	if (unlikely(!obj_cache))
+ 		return;
+ 
+-	while (obj_pool_free < debug_objects_pool_min_level) {
++	while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
+ 		struct debug_obj *new[ODEBUG_BATCH_SIZE];
+ 		int cnt;
+ 
+@@ -174,7 +178,7 @@ static void fill_pool(void)
+ 		while (cnt) {
+ 			hlist_add_head(&new[--cnt]->node, &obj_pool);
+ 			debug_objects_allocated++;
+-			obj_pool_free++;
++			WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
+ 		}
+ 		raw_spin_unlock_irqrestore(&pool_lock, flags);
+ 	}
+@@ -236,7 +240,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
+ 	obj = __alloc_object(&obj_pool);
+ 	if (obj) {
+ 		obj_pool_used++;
+-		obj_pool_free--;
++		WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
+ 
+ 		/*
+ 		 * Looking ahead, allocate one batch of debug objects and
+@@ -255,7 +259,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
+ 					       &percpu_pool->free_objs);
+ 				percpu_pool->obj_free++;
+ 				obj_pool_used++;
+-				obj_pool_free--;
++				WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
+ 			}
+ 		}
+ 
+@@ -309,8 +313,8 @@ static void free_obj_work(struct work_struct *work)
+ 		obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
+ 		hlist_del(&obj->node);
+ 		hlist_add_head(&obj->node, &obj_pool);
+-		obj_pool_free++;
+-		obj_nr_tofree--;
++		WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
++		WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
+ 	}
+ 	raw_spin_unlock_irqrestore(&pool_lock, flags);
+ 	return;
+@@ -324,7 +328,7 @@ free_objs:
+ 	if (obj_nr_tofree) {
+ 		hlist_move_list(&obj_to_free, &tofree);
+ 		debug_objects_freed += obj_nr_tofree;
+-		obj_nr_tofree = 0;
++		WRITE_ONCE(obj_nr_tofree, 0);
+ 	}
+ 	raw_spin_unlock_irqrestore(&pool_lock, flags);
+ 
+@@ -375,10 +379,10 @@ free_to_obj_pool:
+ 	obj_pool_used--;
+ 
+ 	if (work) {
+-		obj_nr_tofree++;
++		WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
+ 		hlist_add_head(&obj->node, &obj_to_free);
+ 		if (lookahead_count) {
+-			obj_nr_tofree += lookahead_count;
++			WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count);
+ 			obj_pool_used -= lookahead_count;
+ 			while (lookahead_count) {
+ 				hlist_add_head(&objs[--lookahead_count]->node,
+@@ -396,15 +400,15 @@ free_to_obj_pool:
+ 			for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
+ 				obj = __alloc_object(&obj_pool);
+ 				hlist_add_head(&obj->node, &obj_to_free);
+-				obj_pool_free--;
+-				obj_nr_tofree++;
++				WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
++				WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
+ 			}
+ 		}
+ 	} else {
+-		obj_pool_free++;
++		WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
+ 		hlist_add_head(&obj->node, &obj_pool);
+ 		if (lookahead_count) {
+-			obj_pool_free += lookahead_count;
++			WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count);
+ 			obj_pool_used -= lookahead_count;
+ 			while (lookahead_count) {
+ 				hlist_add_head(&objs[--lookahead_count]->node,
+@@ -423,7 +427,7 @@ free_to_obj_pool:
+ static void free_object(struct debug_obj *obj)
+ {
+ 	__free_object(obj);
+-	if (!obj_freeing && obj_nr_tofree) {
++	if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
+ 		WRITE_ONCE(obj_freeing, true);
+ 		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
+ 	}
+@@ -982,7 +986,7 @@ repeat:
+ 		debug_objects_maxchecked = objs_checked;
+ 
+ 	/* Schedule work to actually kmem_cache_free() objects */
+-	if (!obj_freeing && obj_nr_tofree) {
++	if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
+ 		WRITE_ONCE(obj_freeing, true);
+ 		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
+ 	}
+@@ -1008,12 +1012,12 @@ static int debug_stats_show(struct seq_file *m, void *v)
+ 	seq_printf(m, "max_checked   :%d\n", debug_objects_maxchecked);
+ 	seq_printf(m, "warnings      :%d\n", debug_objects_warnings);
+ 	seq_printf(m, "fixups        :%d\n", debug_objects_fixups);
+-	seq_printf(m, "pool_free     :%d\n", obj_pool_free + obj_percpu_free);
++	seq_printf(m, "pool_free     :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free);
+ 	seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
+ 	seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
+ 	seq_printf(m, "pool_used     :%d\n", obj_pool_used - obj_percpu_free);
+ 	seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
+-	seq_printf(m, "on_free_list  :%d\n", obj_nr_tofree);
++	seq_printf(m, "on_free_list  :%d\n", READ_ONCE(obj_nr_tofree));
+ 	seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
+ 	seq_printf(m, "objs_freed    :%d\n", debug_objects_freed);
+ 	return 0;
+diff --git a/lib/kunit/try-catch.c b/lib/kunit/try-catch.c
+index 55686839eb61..6b9c5242017f 100644
+--- a/lib/kunit/try-catch.c
++++ b/lib/kunit/try-catch.c
+@@ -12,7 +12,6 @@
+ #include <linux/completion.h>
+ #include <linux/kernel.h>
+ #include <linux/kthread.h>
+-#include <linux/sched/sysctl.h>
+ 
+ void __noreturn kunit_try_catch_throw(struct kunit_try_catch *try_catch)
+ {
+@@ -31,8 +30,6 @@ static int kunit_generic_run_threadfn_adapter(void *data)
+ 
+ static unsigned long kunit_test_timeout(void)
+ {
+-	unsigned long timeout_msecs;
+-
+ 	/*
+ 	 * TODO(brendanhiggins@google.com): We should probably have some type of
+ 	 * variable timeout here. The only question is what that timeout value
+@@ -49,22 +46,11 @@ static unsigned long kunit_test_timeout(void)
+ 	 *
+ 	 * For more background on this topic, see:
+ 	 * https://mike-bland.com/2011/11/01/small-medium-large.html
++	 *
++	 * If tests timeout due to exceeding sysctl_hung_task_timeout_secs,
++	 * the task will be killed and an oops generated.
+ 	 */
+-	if (sysctl_hung_task_timeout_secs) {
+-		/*
+-		 * If sysctl_hung_task is active, just set the timeout to some
+-		 * value less than that.
+-		 *
+-		 * In regards to the above TODO, if we decide on variable
+-		 * timeouts, this logic will likely need to change.
+-		 */
+-		timeout_msecs = (sysctl_hung_task_timeout_secs - 1) *
+-				MSEC_PER_SEC;
+-	} else {
+-		timeout_msecs = 300 * MSEC_PER_SEC; /* 5 min */
+-	}
+-
+-	return timeout_msecs;
++	return 300 * MSEC_PER_SEC; /* 5 min */
+ }
+ 
+ void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context)
+diff --git a/lib/raid6/mktables.c b/lib/raid6/mktables.c
+index 9c485df1308f..f02e10fa6238 100644
+--- a/lib/raid6/mktables.c
++++ b/lib/raid6/mktables.c
+@@ -56,8 +56,8 @@ int main(int argc, char *argv[])
+ 	uint8_t v;
+ 	uint8_t exptbl[256], invtbl[256];
+ 
+-	printf("#include <linux/raid/pq.h>\n");
+ 	printf("#include <linux/export.h>\n");
++	printf("#include <linux/raid/pq.h>\n");
+ 
+ 	/* Compute multiplication table */
+ 	printf("\nconst u8  __attribute__((aligned(256)))\n"
+diff --git a/lib/scatterlist.c b/lib/scatterlist.c
+index c2cf2c311b7d..5813072bc589 100644
+--- a/lib/scatterlist.c
++++ b/lib/scatterlist.c
+@@ -311,7 +311,7 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
+ 			if (prv)
+ 				table->nents = ++table->orig_nents;
+ 
+- 			return -ENOMEM;
++			return -ENOMEM;
+ 		}
+ 
+ 		sg_init_table(sg, alloc_size);
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 81befd0c2510..466f2e4144b0 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -4477,14 +4477,14 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
+ 	/* Reinjected packets coming from act_mirred or similar should
+ 	 * not get XDP generic processing.
+ 	 */
+-	if (skb_cloned(skb) || skb_is_tc_redirected(skb))
++	if (skb_is_tc_redirected(skb))
+ 		return XDP_PASS;
+ 
+ 	/* XDP packets must be linear and must have sufficient headroom
+ 	 * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
+ 	 * native XDP provides, thus we need to do it here as well.
+ 	 */
+-	if (skb_is_nonlinear(skb) ||
++	if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
+ 	    skb_headroom(skb) < XDP_PACKET_HEADROOM) {
+ 		int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
+ 		int troom = skb->tail + skb->data_len - skb->end;
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 538f6a735a19..f797b1599c92 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -3543,7 +3543,7 @@ static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd,
+ 		return err;
+ 	}
+ 	default:
+-		break;
++		return -EBADRQC;
+ 	}
+ 	return 0;
+ }
+diff --git a/net/core/sock_map.c b/net/core/sock_map.c
+index 085cef5857bb..405397801bb0 100644
+--- a/net/core/sock_map.c
++++ b/net/core/sock_map.c
+@@ -881,6 +881,9 @@ static void sock_hash_free(struct bpf_map *map)
+ 	/* wait for psock readers accessing its map link */
+ 	synchronize_rcu();
+ 
++	/* wait for psock readers accessing its map link */
++	synchronize_rcu();
++
+ 	bpf_map_area_free(htab->buckets);
+ 	kfree(htab);
+ }
+diff --git a/net/dsa/tag_qca.c b/net/dsa/tag_qca.c
+index c8a128c9e5e0..70db7c909f74 100644
+--- a/net/dsa/tag_qca.c
++++ b/net/dsa/tag_qca.c
+@@ -33,7 +33,7 @@ static struct sk_buff *qca_tag_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	struct dsa_port *dp = dsa_slave_to_port(dev);
+ 	u16 *phdr, hdr;
+ 
+-	if (skb_cow_head(skb, 0) < 0)
++	if (skb_cow_head(skb, QCA_HDR_LEN) < 0)
+ 		return NULL;
+ 
+ 	skb_push(skb, QCA_HDR_LEN);
+diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c
+index 5284fcf16be7..f8d2919cf9fd 100644
+--- a/net/netfilter/nft_tunnel.c
++++ b/net/netfilter/nft_tunnel.c
+@@ -248,8 +248,9 @@ static int nft_tunnel_obj_vxlan_init(const struct nlattr *attr,
+ }
+ 
+ static const struct nla_policy nft_tunnel_opts_erspan_policy[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1] = {
++	[NFTA_TUNNEL_KEY_ERSPAN_VERSION]	= { .type = NLA_U32 },
+ 	[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX]	= { .type = NLA_U32 },
+-	[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR]	= { .type = NLA_U8 },
++	[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR]		= { .type = NLA_U8 },
+ 	[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID]	= { .type = NLA_U8 },
+ };
+ 
+diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
+index f9c0d1e8d380..7e54d2ab5254 100644
+--- a/net/sched/cls_flower.c
++++ b/net/sched/cls_flower.c
+@@ -691,6 +691,7 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
+ 					    .len = 128 / BITS_PER_BYTE },
+ 	[TCA_FLOWER_KEY_CT_LABELS_MASK]	= { .type = NLA_BINARY,
+ 					    .len = 128 / BITS_PER_BYTE },
++	[TCA_FLOWER_FLAGS]		= { .type = NLA_U32 },
+ };
+ 
+ static const struct nla_policy
+diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
+index 039cc86974f4..610a0b728161 100644
+--- a/net/sched/cls_matchall.c
++++ b/net/sched/cls_matchall.c
+@@ -157,6 +157,7 @@ static void *mall_get(struct tcf_proto *tp, u32 handle)
+ static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
+ 	[TCA_MATCHALL_UNSPEC]		= { .type = NLA_UNSPEC },
+ 	[TCA_MATCHALL_CLASSID]		= { .type = NLA_U32 },
++	[TCA_MATCHALL_FLAGS]		= { .type = NLA_U32 },
+ };
+ 
+ static int mall_set_parms(struct net *net, struct tcf_proto *tp,
+diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c
+index f38727ecf8b2..e1f64f4ba236 100644
+--- a/net/smc/smc_diag.c
++++ b/net/smc/smc_diag.c
+@@ -39,16 +39,15 @@ static void smc_diag_msg_common_fill(struct smc_diag_msg *r, struct sock *sk)
+ {
+ 	struct smc_sock *smc = smc_sk(sk);
+ 
++	memset(r, 0, sizeof(*r));
+ 	r->diag_family = sk->sk_family;
++	sock_diag_save_cookie(sk, r->id.idiag_cookie);
+ 	if (!smc->clcsock)
+ 		return;
+ 	r->id.idiag_sport = htons(smc->clcsock->sk->sk_num);
+ 	r->id.idiag_dport = smc->clcsock->sk->sk_dport;
+ 	r->id.idiag_if = smc->clcsock->sk->sk_bound_dev_if;
+-	sock_diag_save_cookie(sk, r->id.idiag_cookie);
+ 	if (sk->sk_protocol == SMCPROTO_SMC) {
+-		memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
+-		memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
+ 		r->id.idiag_src[0] = smc->clcsock->sk->sk_rcv_saddr;
+ 		r->id.idiag_dst[0] = smc->clcsock->sk->sk_daddr;
+ #if IS_ENABLED(CONFIG_IPV6)
+diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
+index f740cb51802a..7ede1e52fd81 100644
+--- a/net/sunrpc/cache.c
++++ b/net/sunrpc/cache.c
+@@ -1888,7 +1888,9 @@ void sunrpc_cache_unhash(struct cache_detail *cd, struct cache_head *h)
+ 	if (!hlist_unhashed(&h->cache_list)){
+ 		hlist_del_init_rcu(&h->cache_list);
+ 		cd->entries--;
++		set_bit(CACHE_CLEANED, &h->flags);
+ 		spin_unlock(&cd->hash_lock);
++		cache_fresh_unlocked(h, cd);
+ 		cache_put(h, cd);
+ 	} else
+ 		spin_unlock(&cd->hash_lock);
+diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
+index c0147a8cf188..06ebe3104cc0 100644
+--- a/samples/bpf/Makefile
++++ b/samples/bpf/Makefile
+@@ -236,6 +236,7 @@ BTF_LLVM_PROBE := $(shell echo "int main() { return 0; }" | \
+ 			  readelf -S ./llvm_btf_verify.o | grep BTF; \
+ 			  /bin/rm -f ./llvm_btf_verify.o)
+ 
++BPF_EXTRA_CFLAGS += -fno-stack-protector
+ ifneq ($(BTF_LLVM_PROBE),)
+ 	BPF_EXTRA_CFLAGS += -g
+ else
+diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
+index bc5f25763c1b..f3155af04d85 100644
+--- a/scripts/Kbuild.include
++++ b/scripts/Kbuild.include
+@@ -55,14 +55,13 @@ kecho := $($(quiet)kecho)
+ # - stdin is piped in from the first prerequisite ($<) so one has
+ #   to specify a valid file as first prerequisite (often the kbuild file)
+ define filechk
+-	$(Q)set -e;				\
+-	mkdir -p $(dir $@);			\
+-	{ $(filechk_$(1)); } > $@.tmp;		\
+-	if [ -r $@ ] && cmp -s $@ $@.tmp; then	\
+-		rm -f $@.tmp;			\
+-	else					\
+-		$(kecho) '  UPD     $@';	\
+-		mv -f $@.tmp $@;		\
++	$(Q)set -e;						\
++	mkdir -p $(dir $@);					\
++	trap "rm -f $(dot-target).tmp" EXIT;			\
++	{ $(filechk_$(1)); } > $(dot-target).tmp;		\
++	if [ ! -r $@ ] || ! cmp -s $@ $(dot-target).tmp; then	\
++		$(kecho) '  UPD     $@';			\
++		mv -f $(dot-target).tmp $@;			\
+ 	fi
+ endef
+ 
+diff --git a/scripts/Kconfig.include b/scripts/Kconfig.include
+index d4adfbe42690..bfb44b265a94 100644
+--- a/scripts/Kconfig.include
++++ b/scripts/Kconfig.include
+@@ -25,7 +25,7 @@ failure = $(if-success,$(1),n,y)
+ 
+ # $(cc-option,<flag>)
+ # Return y if the compiler supports <flag>, n otherwise
+-cc-option = $(success,$(CC) -Werror $(CLANG_FLAGS) $(1) -E -x c /dev/null -o /dev/null)
++cc-option = $(success,$(CC) -Werror $(CLANG_FLAGS) $(1) -S -x c /dev/null -o /dev/null)
+ 
+ # $(ld-option,<flag>)
+ # Return y if the linker supports <flag>, n otherwise
+diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
+index 3569d2dec37c..17298239e363 100644
+--- a/scripts/kconfig/confdata.c
++++ b/scripts/kconfig/confdata.c
+@@ -1353,7 +1353,7 @@ bool conf_set_all_new_symbols(enum conf_def_mode mode)
+ 
+ 		sym_calc_value(csym);
+ 		if (mode == def_random)
+-			has_changed = randomize_choice_values(csym);
++			has_changed |= randomize_choice_values(csym);
+ 		else {
+ 			set_all_choice_values(csym);
+ 			has_changed = true;
+diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
+index 436379940356..408b5c0b99b1 100755
+--- a/scripts/link-vmlinux.sh
++++ b/scripts/link-vmlinux.sh
+@@ -108,13 +108,13 @@ gen_btf()
+ 	local bin_arch
+ 
+ 	if ! [ -x "$(command -v ${PAHOLE})" ]; then
+-		info "BTF" "${1}: pahole (${PAHOLE}) is not available"
++		echo >&2 "BTF: ${1}: pahole (${PAHOLE}) is not available"
+ 		return 1
+ 	fi
+ 
+ 	pahole_ver=$(${PAHOLE} --version | sed -E 's/v([0-9]+)\.([0-9]+)/\1\2/')
+ 	if [ "${pahole_ver}" -lt "113" ]; then
+-		info "BTF" "${1}: pahole version $(${PAHOLE} --version) is too old, need at least v1.13"
++		echo >&2 "BTF: ${1}: pahole version $(${PAHOLE} --version) is too old, need at least v1.13"
+ 		return 1
+ 	fi
+ 
+diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
+index d7e987baf127..9b35db2fc777 100644
+--- a/security/integrity/ima/ima_main.c
++++ b/security/integrity/ima/ima_main.c
+@@ -655,6 +655,9 @@ void process_buffer_measurement(const void *buf, int size,
+ 	int action = 0;
+ 	u32 secid;
+ 
++	if (!ima_policy_flag)
++		return;
++
+ 	/*
+ 	 * Both LSM hooks and auxilary based buffer measurements are
+ 	 * based on policy.  To avoid code duplication, differentiate
+diff --git a/security/selinux/avc.c b/security/selinux/avc.c
+index 23dc888ae305..d18cb32a242a 100644
+--- a/security/selinux/avc.c
++++ b/security/selinux/avc.c
+@@ -617,40 +617,37 @@ static struct avc_node *avc_insert(struct selinux_avc *avc,
+ 	struct avc_node *pos, *node = NULL;
+ 	int hvalue;
+ 	unsigned long flag;
++	spinlock_t *lock;
++	struct hlist_head *head;
+ 
+ 	if (avc_latest_notif_update(avc, avd->seqno, 1))
+-		goto out;
++		return NULL;
+ 
+ 	node = avc_alloc_node(avc);
+-	if (node) {
+-		struct hlist_head *head;
+-		spinlock_t *lock;
+-		int rc = 0;
+-
+-		hvalue = avc_hash(ssid, tsid, tclass);
+-		avc_node_populate(node, ssid, tsid, tclass, avd);
+-		rc = avc_xperms_populate(node, xp_node);
+-		if (rc) {
+-			kmem_cache_free(avc_node_cachep, node);
+-			return NULL;
+-		}
+-		head = &avc->avc_cache.slots[hvalue];
+-		lock = &avc->avc_cache.slots_lock[hvalue];
++	if (!node)
++		return NULL;
+ 
+-		spin_lock_irqsave(lock, flag);
+-		hlist_for_each_entry(pos, head, list) {
+-			if (pos->ae.ssid == ssid &&
+-			    pos->ae.tsid == tsid &&
+-			    pos->ae.tclass == tclass) {
+-				avc_node_replace(avc, node, pos);
+-				goto found;
+-			}
++	avc_node_populate(node, ssid, tsid, tclass, avd);
++	if (avc_xperms_populate(node, xp_node)) {
++		avc_node_kill(avc, node);
++		return NULL;
++	}
++
++	hvalue = avc_hash(ssid, tsid, tclass);
++	head = &avc->avc_cache.slots[hvalue];
++	lock = &avc->avc_cache.slots_lock[hvalue];
++	spin_lock_irqsave(lock, flag);
++	hlist_for_each_entry(pos, head, list) {
++		if (pos->ae.ssid == ssid &&
++			pos->ae.tsid == tsid &&
++			pos->ae.tclass == tclass) {
++			avc_node_replace(avc, node, pos);
++			goto found;
+ 		}
+-		hlist_add_head_rcu(&node->list, head);
+-found:
+-		spin_unlock_irqrestore(lock, flag);
+ 	}
+-out:
++	hlist_add_head_rcu(&node->list, head);
++found:
++	spin_unlock_irqrestore(lock, flag);
+ 	return node;
+ }
+ 
+@@ -894,7 +891,7 @@ static int avc_update_node(struct selinux_avc *avc,
+ 	if (orig->ae.xp_node) {
+ 		rc = avc_xperms_populate(node, orig->ae.xp_node);
+ 		if (rc) {
+-			kmem_cache_free(avc_node_cachep, node);
++			avc_node_kill(avc, node);
+ 			goto out_unlock;
+ 		}
+ 	}
+diff --git a/sound/core/control.c b/sound/core/control.c
+index 7a4d8690ce41..08ca7666e84c 100644
+--- a/sound/core/control.c
++++ b/sound/core/control.c
+@@ -1430,8 +1430,9 @@ static int call_tlv_handler(struct snd_ctl_file *file, int op_flag,
+ 	if (kctl->tlv.c == NULL)
+ 		return -ENXIO;
+ 
+-	/* When locked, this is unavailable. */
+-	if (vd->owner != NULL && vd->owner != file)
++	/* Write and command operations are not allowed for locked element. */
++	if (op_flag != SNDRV_CTL_TLV_OP_READ &&
++	    vd->owner != NULL && vd->owner != file)
+ 		return -EPERM;
+ 
+ 	return kctl->tlv.c(kctl, op_flag, size, buf);
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 90aa0f400a57..1e20e85e9b46 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -922,6 +922,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x215f, "Lenovo T510", CXT_PINCFG_LENOVO_TP410),
+ 	SND_PCI_QUIRK(0x17aa, 0x21ce, "Lenovo T420", CXT_PINCFG_LENOVO_TP410),
+ 	SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520", CXT_PINCFG_LENOVO_TP410),
++	SND_PCI_QUIRK(0x17aa, 0x21d2, "Lenovo T420s", CXT_PINCFG_LENOVO_TP410),
+ 	SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT_PINCFG_LENOVO_TP410),
+ 	SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT_PINCFG_LENOVO_TP410),
+ 	SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo IdeaPad Z560", CXT_FIXUP_MUTE_LED_EAPD),
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index bde50414029d..4f195c7d966a 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -2862,9 +2862,12 @@ static int alloc_intel_hdmi(struct hda_codec *codec)
+ /* parse and post-process for Intel codecs */
+ static int parse_intel_hdmi(struct hda_codec *codec)
+ {
+-	int err;
++	int err, retries = 3;
++
++	do {
++		err = hdmi_parse_codec(codec);
++	} while (err < 0 && retries--);
+ 
+-	err = hdmi_parse_codec(codec);
+ 	if (err < 0) {
+ 		generic_spec_free(codec);
+ 		return err;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 128d81b4140b..c6b1581c6ffa 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5852,6 +5852,7 @@ enum {
+ 	ALC288_FIXUP_DELL1_MIC_NO_PRESENCE,
+ 	ALC288_FIXUP_DELL_XPS_13,
+ 	ALC288_FIXUP_DISABLE_AAMIX,
++	ALC292_FIXUP_DELL_E7X_AAMIX,
+ 	ALC292_FIXUP_DELL_E7X,
+ 	ALC292_FIXUP_DISABLE_AAMIX,
+ 	ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK,
+@@ -6547,12 +6548,19 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC293_FIXUP_DELL1_MIC_NO_PRESENCE
+ 	},
+-	[ALC292_FIXUP_DELL_E7X] = {
++	[ALC292_FIXUP_DELL_E7X_AAMIX] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc_fixup_dell_xps13,
+ 		.chained = true,
+ 		.chain_id = ALC292_FIXUP_DISABLE_AAMIX
+ 	},
++	[ALC292_FIXUP_DELL_E7X] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = snd_hda_gen_fixup_micmute_led,
++		/* micmute fixup must be applied at last */
++		.chained_before = true,
++		.chain_id = ALC292_FIXUP_DELL_E7X_AAMIX,
++	},
+ 	[ALC298_FIXUP_ALIENWARE_MIC_NO_PRESENCE] = {
+ 		.type = HDA_FIXUP_PINS,
+ 		.v.pins = (const struct hda_pintbl[]) {
+diff --git a/sound/sh/aica.c b/sound/sh/aica.c
+index bf1fb0d8a930..f69072d2564c 100644
+--- a/sound/sh/aica.c
++++ b/sound/sh/aica.c
+@@ -101,10 +101,10 @@ static void spu_memset(u32 toi, u32 what, int length)
+ }
+ 
+ /* spu_memload - write to SPU address space */
+-static void spu_memload(u32 toi, void *from, int length)
++static void spu_memload(u32 toi, const void *from, int length)
+ {
+ 	unsigned long flags;
+-	u32 *froml = from;
++	const u32 *froml = from;
+ 	u32 __iomem *to = (u32 __iomem *) (SPU_MEMORY_BASE + toi);
+ 	int i;
+ 	u32 val;
+diff --git a/sound/sh/sh_dac_audio.c b/sound/sh/sh_dac_audio.c
+index f9e36abc98ac..725992937e8f 100644
+--- a/sound/sh/sh_dac_audio.c
++++ b/sound/sh/sh_dac_audio.c
+@@ -175,7 +175,6 @@ static int snd_sh_dac_pcm_copy(struct snd_pcm_substream *substream,
+ {
+ 	/* channel is not used (interleaved data) */
+ 	struct snd_sh_dac *chip = snd_pcm_substream_chip(substream);
+-	struct snd_pcm_runtime *runtime = substream->runtime;
+ 
+ 	if (copy_from_user_toio(chip->data_buffer + pos, src, count))
+ 		return -EFAULT;
+@@ -195,7 +194,6 @@ static int snd_sh_dac_pcm_copy_kernel(struct snd_pcm_substream *substream,
+ {
+ 	/* channel is not used (interleaved data) */
+ 	struct snd_sh_dac *chip = snd_pcm_substream_chip(substream);
+-	struct snd_pcm_runtime *runtime = substream->runtime;
+ 
+ 	memcpy_toio(chip->data_buffer + pos, src, count);
+ 	chip->buffer_end = chip->data_buffer + pos + count;
+@@ -214,7 +212,6 @@ static int snd_sh_dac_pcm_silence(struct snd_pcm_substream *substream,
+ {
+ 	/* channel is not used (interleaved data) */
+ 	struct snd_sh_dac *chip = snd_pcm_substream_chip(substream);
+-	struct snd_pcm_runtime *runtime = substream->runtime;
+ 
+ 	memset_io(chip->data_buffer + pos, 0, count);
+ 	chip->buffer_end = chip->data_buffer + pos + count;
+diff --git a/sound/soc/atmel/Kconfig b/sound/soc/atmel/Kconfig
+index f118c229ed82..d1dc8e6366dc 100644
+--- a/sound/soc/atmel/Kconfig
++++ b/sound/soc/atmel/Kconfig
+@@ -19,6 +19,8 @@ config SND_ATMEL_SOC_DMA
+ 
+ config SND_ATMEL_SOC_SSC
+ 	tristate
++	select SND_ATMEL_SOC_DMA
++	select SND_ATMEL_SOC_PDC
+ 
+ config SND_ATMEL_SOC_SSC_PDC
+ 	tristate "SoC PCM DAI support for AT91 SSC controller using PDC"
+diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
+index 2a9b610f6d43..d3d32b501aca 100644
+--- a/sound/soc/codecs/wm_adsp.c
++++ b/sound/soc/codecs/wm_adsp.c
+@@ -1030,8 +1030,8 @@ static int wm_coeff_write_acked_control(struct wm_coeff_ctl *ctl,
+ 	return -ETIMEDOUT;
+ }
+ 
+-static int wm_coeff_write_control(struct wm_coeff_ctl *ctl,
+-				  const void *buf, size_t len)
++static int wm_coeff_write_ctrl_raw(struct wm_coeff_ctl *ctl,
++				   const void *buf, size_t len)
+ {
+ 	struct wm_adsp *dsp = ctl->dsp;
+ 	void *scratch;
+@@ -1061,6 +1061,23 @@ static int wm_coeff_write_control(struct wm_coeff_ctl *ctl,
+ 	return 0;
+ }
+ 
++static int wm_coeff_write_ctrl(struct wm_coeff_ctl *ctl,
++			       const void *buf, size_t len)
++{
++	int ret = 0;
++
++	if (ctl->flags & WMFW_CTL_FLAG_VOLATILE)
++		ret = -EPERM;
++	else if (buf != ctl->cache)
++		memcpy(ctl->cache, buf, len);
++
++	ctl->set = 1;
++	if (ctl->enabled && ctl->dsp->running)
++		ret = wm_coeff_write_ctrl_raw(ctl, buf, len);
++
++	return ret;
++}
++
+ static int wm_coeff_put(struct snd_kcontrol *kctl,
+ 			struct snd_ctl_elem_value *ucontrol)
+ {
+@@ -1071,16 +1088,7 @@ static int wm_coeff_put(struct snd_kcontrol *kctl,
+ 	int ret = 0;
+ 
+ 	mutex_lock(&ctl->dsp->pwr_lock);
+-
+-	if (ctl->flags & WMFW_CTL_FLAG_VOLATILE)
+-		ret = -EPERM;
+-	else
+-		memcpy(ctl->cache, p, ctl->len);
+-
+-	ctl->set = 1;
+-	if (ctl->enabled && ctl->dsp->running)
+-		ret = wm_coeff_write_control(ctl, p, ctl->len);
+-
++	ret = wm_coeff_write_ctrl(ctl, p, ctl->len);
+ 	mutex_unlock(&ctl->dsp->pwr_lock);
+ 
+ 	return ret;
+@@ -1096,15 +1104,10 @@ static int wm_coeff_tlv_put(struct snd_kcontrol *kctl,
+ 
+ 	mutex_lock(&ctl->dsp->pwr_lock);
+ 
+-	if (copy_from_user(ctl->cache, bytes, size)) {
++	if (copy_from_user(ctl->cache, bytes, size))
+ 		ret = -EFAULT;
+-	} else {
+-		ctl->set = 1;
+-		if (ctl->enabled && ctl->dsp->running)
+-			ret = wm_coeff_write_control(ctl, ctl->cache, size);
+-		else if (ctl->flags & WMFW_CTL_FLAG_VOLATILE)
+-			ret = -EPERM;
+-	}
++	else
++		ret = wm_coeff_write_ctrl(ctl, ctl->cache, size);
+ 
+ 	mutex_unlock(&ctl->dsp->pwr_lock);
+ 
+@@ -1135,8 +1138,8 @@ static int wm_coeff_put_acked(struct snd_kcontrol *kctl,
+ 	return ret;
+ }
+ 
+-static int wm_coeff_read_control(struct wm_coeff_ctl *ctl,
+-				 void *buf, size_t len)
++static int wm_coeff_read_ctrl_raw(struct wm_coeff_ctl *ctl,
++				  void *buf, size_t len)
+ {
+ 	struct wm_adsp *dsp = ctl->dsp;
+ 	void *scratch;
+@@ -1166,29 +1169,37 @@ static int wm_coeff_read_control(struct wm_coeff_ctl *ctl,
+ 	return 0;
+ }
+ 
+-static int wm_coeff_get(struct snd_kcontrol *kctl,
+-			struct snd_ctl_elem_value *ucontrol)
++static int wm_coeff_read_ctrl(struct wm_coeff_ctl *ctl, void *buf, size_t len)
+ {
+-	struct soc_bytes_ext *bytes_ext =
+-		(struct soc_bytes_ext *)kctl->private_value;
+-	struct wm_coeff_ctl *ctl = bytes_ext_to_ctl(bytes_ext);
+-	char *p = ucontrol->value.bytes.data;
+ 	int ret = 0;
+ 
+-	mutex_lock(&ctl->dsp->pwr_lock);
+-
+ 	if (ctl->flags & WMFW_CTL_FLAG_VOLATILE) {
+ 		if (ctl->enabled && ctl->dsp->running)
+-			ret = wm_coeff_read_control(ctl, p, ctl->len);
++			return wm_coeff_read_ctrl_raw(ctl, buf, len);
+ 		else
+-			ret = -EPERM;
++			return -EPERM;
+ 	} else {
+ 		if (!ctl->flags && ctl->enabled && ctl->dsp->running)
+-			ret = wm_coeff_read_control(ctl, ctl->cache, ctl->len);
++			ret = wm_coeff_read_ctrl_raw(ctl, ctl->cache, ctl->len);
+ 
+-		memcpy(p, ctl->cache, ctl->len);
++		if (buf != ctl->cache)
++			memcpy(buf, ctl->cache, len);
+ 	}
+ 
++	return ret;
++}
++
++static int wm_coeff_get(struct snd_kcontrol *kctl,
++			struct snd_ctl_elem_value *ucontrol)
++{
++	struct soc_bytes_ext *bytes_ext =
++		(struct soc_bytes_ext *)kctl->private_value;
++	struct wm_coeff_ctl *ctl = bytes_ext_to_ctl(bytes_ext);
++	char *p = ucontrol->value.bytes.data;
++	int ret;
++
++	mutex_lock(&ctl->dsp->pwr_lock);
++	ret = wm_coeff_read_ctrl(ctl, p, ctl->len);
+ 	mutex_unlock(&ctl->dsp->pwr_lock);
+ 
+ 	return ret;
+@@ -1204,15 +1215,7 @@ static int wm_coeff_tlv_get(struct snd_kcontrol *kctl,
+ 
+ 	mutex_lock(&ctl->dsp->pwr_lock);
+ 
+-	if (ctl->flags & WMFW_CTL_FLAG_VOLATILE) {
+-		if (ctl->enabled && ctl->dsp->running)
+-			ret = wm_coeff_read_control(ctl, ctl->cache, size);
+-		else
+-			ret = -EPERM;
+-	} else {
+-		if (!ctl->flags && ctl->enabled && ctl->dsp->running)
+-			ret = wm_coeff_read_control(ctl, ctl->cache, size);
+-	}
++	ret = wm_coeff_read_ctrl_raw(ctl, ctl->cache, size);
+ 
+ 	if (!ret && copy_to_user(bytes, ctl->cache, size))
+ 		ret = -EFAULT;
+@@ -1340,7 +1343,7 @@ static int wm_coeff_init_control_caches(struct wm_adsp *dsp)
+ 		 * created so we don't need to do anything.
+ 		 */
+ 		if (!ctl->flags || (ctl->flags & WMFW_CTL_FLAG_READABLE)) {
+-			ret = wm_coeff_read_control(ctl, ctl->cache, ctl->len);
++			ret = wm_coeff_read_ctrl_raw(ctl, ctl->cache, ctl->len);
+ 			if (ret < 0)
+ 				return ret;
+ 		}
+@@ -1358,7 +1361,8 @@ static int wm_coeff_sync_controls(struct wm_adsp *dsp)
+ 		if (!ctl->enabled)
+ 			continue;
+ 		if (ctl->set && !(ctl->flags & WMFW_CTL_FLAG_VOLATILE)) {
+-			ret = wm_coeff_write_control(ctl, ctl->cache, ctl->len);
++			ret = wm_coeff_write_ctrl_raw(ctl, ctl->cache,
++						      ctl->len);
+ 			if (ret < 0)
+ 				return ret;
+ 		}
+@@ -2048,7 +2052,7 @@ int wm_adsp_write_ctl(struct wm_adsp *dsp, const char *name, int type,
+ 	if (len > ctl->len)
+ 		return -EINVAL;
+ 
+-	ret = wm_coeff_write_control(ctl, buf, len);
++	ret = wm_coeff_write_ctrl(ctl, buf, len);
+ 
+ 	kcontrol = snd_soc_card_get_kcontrol(dsp->component->card, ctl->name);
+ 	snd_ctl_notify(dsp->component->card->snd_card,
+@@ -2070,7 +2074,7 @@ int wm_adsp_read_ctl(struct wm_adsp *dsp, const char *name, int type,
+ 	if (len > ctl->len)
+ 		return -EINVAL;
+ 
+-	return wm_coeff_read_control(ctl, buf, len);
++	return wm_coeff_read_ctrl(ctl, buf, len);
+ }
+ EXPORT_SYMBOL_GPL(wm_adsp_read_ctl);
+ 
+diff --git a/sound/soc/intel/boards/bxt_da7219_max98357a.c b/sound/soc/intel/boards/bxt_da7219_max98357a.c
+index 5873abb46441..749b1c4f1cee 100644
+--- a/sound/soc/intel/boards/bxt_da7219_max98357a.c
++++ b/sound/soc/intel/boards/bxt_da7219_max98357a.c
+@@ -617,12 +617,15 @@ static int bxt_card_late_probe(struct snd_soc_card *card)
+ 		snd_soc_dapm_add_routes(&card->dapm, broxton_map,
+ 					ARRAY_SIZE(broxton_map));
+ 
+-	pcm = list_first_entry(&ctx->hdmi_pcm_list, struct bxt_hdmi_pcm,
+-			       head);
+-	component = pcm->codec_dai->component;
++	if (list_empty(&ctx->hdmi_pcm_list))
++		return -EINVAL;
+ 
+-	if (ctx->common_hdmi_codec_drv)
++	if (ctx->common_hdmi_codec_drv) {
++		pcm = list_first_entry(&ctx->hdmi_pcm_list, struct bxt_hdmi_pcm,
++				       head);
++		component = pcm->codec_dai->component;
+ 		return hda_dsp_hdmi_build_controls(card, component);
++	}
+ 
+ 	list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
+ 		component = pcm->codec_dai->component;
+@@ -643,9 +646,6 @@ static int bxt_card_late_probe(struct snd_soc_card *card)
+ 		i++;
+ 	}
+ 
+-	if (!component)
+-		return -EINVAL;
+-
+ 	return hdac_hdmi_jack_port_init(component, &card->dapm);
+ }
+ 
+diff --git a/sound/soc/intel/boards/bxt_rt298.c b/sound/soc/intel/boards/bxt_rt298.c
+index eabf9d8468ae..becfc4fc1aff 100644
+--- a/sound/soc/intel/boards/bxt_rt298.c
++++ b/sound/soc/intel/boards/bxt_rt298.c
+@@ -529,12 +529,15 @@ static int bxt_card_late_probe(struct snd_soc_card *card)
+ 	int err, i = 0;
+ 	char jack_name[NAME_SIZE];
+ 
+-	pcm = list_first_entry(&ctx->hdmi_pcm_list, struct bxt_hdmi_pcm,
+-			       head);
+-	component = pcm->codec_dai->component;
++	if (list_empty(&ctx->hdmi_pcm_list))
++		return -EINVAL;
+ 
+-	if (ctx->common_hdmi_codec_drv)
++	if (ctx->common_hdmi_codec_drv) {
++		pcm = list_first_entry(&ctx->hdmi_pcm_list, struct bxt_hdmi_pcm,
++				       head);
++		component = pcm->codec_dai->component;
+ 		return hda_dsp_hdmi_build_controls(card, component);
++	}
+ 
+ 	list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
+ 		component = pcm->codec_dai->component;
+@@ -555,9 +558,6 @@ static int bxt_card_late_probe(struct snd_soc_card *card)
+ 		i++;
+ 	}
+ 
+-	if (!component)
+-		return -EINVAL;
+-
+ 	return hdac_hdmi_jack_port_init(component, &card->dapm);
+ }
+ 
+diff --git a/sound/soc/intel/boards/cml_rt1011_rt5682.c b/sound/soc/intel/boards/cml_rt1011_rt5682.c
+index 5f1bf6d3800c..a54636f77c8e 100644
+--- a/sound/soc/intel/boards/cml_rt1011_rt5682.c
++++ b/sound/soc/intel/boards/cml_rt1011_rt5682.c
+@@ -241,12 +241,15 @@ static int sof_card_late_probe(struct snd_soc_card *card)
+ 	struct hdmi_pcm *pcm;
+ 	int ret, i = 0;
+ 
+-	pcm = list_first_entry(&ctx->hdmi_pcm_list, struct hdmi_pcm,
+-			       head);
+-	component = pcm->codec_dai->component;
++	if (list_empty(&ctx->hdmi_pcm_list))
++		return -EINVAL;
+ 
+-	if (ctx->common_hdmi_codec_drv)
++	if (ctx->common_hdmi_codec_drv) {
++		pcm = list_first_entry(&ctx->hdmi_pcm_list, struct hdmi_pcm,
++				       head);
++		component = pcm->codec_dai->component;
+ 		return hda_dsp_hdmi_build_controls(card, component);
++	}
+ 
+ 	list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
+ 		component = pcm->codec_dai->component;
+@@ -265,8 +268,6 @@ static int sof_card_late_probe(struct snd_soc_card *card)
+ 
+ 		i++;
+ 	}
+-	if (!component)
+-		return -EINVAL;
+ 
+ 	return hdac_hdmi_jack_port_init(component, &card->dapm);
+ }
+diff --git a/sound/soc/intel/boards/glk_rt5682_max98357a.c b/sound/soc/intel/boards/glk_rt5682_max98357a.c
+index b36264d1d1cd..94c6bdfab63b 100644
+--- a/sound/soc/intel/boards/glk_rt5682_max98357a.c
++++ b/sound/soc/intel/boards/glk_rt5682_max98357a.c
+@@ -544,15 +544,18 @@ static int glk_card_late_probe(struct snd_soc_card *card)
+ 	struct snd_soc_component *component = NULL;
+ 	char jack_name[NAME_SIZE];
+ 	struct glk_hdmi_pcm *pcm;
+-	int err = 0;
++	int err;
+ 	int i = 0;
+ 
+-	pcm = list_first_entry(&ctx->hdmi_pcm_list, struct glk_hdmi_pcm,
+-			       head);
+-	component = pcm->codec_dai->component;
++	if (list_empty(&ctx->hdmi_pcm_list))
++		return -EINVAL;
+ 
+-	if (ctx->common_hdmi_codec_drv)
++	if (ctx->common_hdmi_codec_drv) {
++		pcm = list_first_entry(&ctx->hdmi_pcm_list, struct glk_hdmi_pcm,
++				       head);
++		component = pcm->codec_dai->component;
+ 		return hda_dsp_hdmi_build_controls(card, component);
++	}
+ 
+ 	list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
+ 		component = pcm->codec_dai->component;
+@@ -573,9 +576,6 @@ static int glk_card_late_probe(struct snd_soc_card *card)
+ 		i++;
+ 	}
+ 
+-	if (!component)
+-		return -EINVAL;
+-
+ 	return hdac_hdmi_jack_port_init(component, &card->dapm);
+ }
+ 
+diff --git a/sound/soc/intel/boards/sof_rt5682.c b/sound/soc/intel/boards/sof_rt5682.c
+index 751b8ea6ae1f..5d878873a8e0 100644
+--- a/sound/soc/intel/boards/sof_rt5682.c
++++ b/sound/soc/intel/boards/sof_rt5682.c
+@@ -35,6 +35,10 @@
+ #define SOF_RT5682_SSP_AMP(quirk)	\
+ 	(((quirk) << SOF_RT5682_SSP_AMP_SHIFT) & SOF_RT5682_SSP_AMP_MASK)
+ #define SOF_RT5682_MCLK_BYTCHT_EN		BIT(9)
++#define SOF_RT5682_NUM_HDMIDEV_SHIFT		10
++#define SOF_RT5682_NUM_HDMIDEV_MASK		(GENMASK(12, 10))
++#define SOF_RT5682_NUM_HDMIDEV(quirk)	\
++	((quirk << SOF_RT5682_NUM_HDMIDEV_SHIFT) & SOF_RT5682_NUM_HDMIDEV_MASK)
+ 
+ /* Default: MCLK on, MCLK 19.2M, SSP0  */
+ static unsigned long sof_rt5682_quirk = SOF_RT5682_MCLK_EN |
+@@ -269,19 +273,22 @@ static int sof_card_late_probe(struct snd_soc_card *card)
+ 	struct snd_soc_component *component = NULL;
+ 	char jack_name[NAME_SIZE];
+ 	struct sof_hdmi_pcm *pcm;
+-	int err = 0;
++	int err;
+ 	int i = 0;
+ 
+ 	/* HDMI is not supported by SOF on Baytrail/CherryTrail */
+ 	if (is_legacy_cpu)
+ 		return 0;
+ 
+-	pcm = list_first_entry(&ctx->hdmi_pcm_list, struct sof_hdmi_pcm,
+-			       head);
+-	component = pcm->codec_dai->component;
++	if (list_empty(&ctx->hdmi_pcm_list))
++		return -EINVAL;
+ 
+-	if (ctx->common_hdmi_codec_drv)
++	if (ctx->common_hdmi_codec_drv) {
++		pcm = list_first_entry(&ctx->hdmi_pcm_list, struct sof_hdmi_pcm,
++				       head);
++		component = pcm->codec_dai->component;
+ 		return hda_dsp_hdmi_build_controls(card, component);
++	}
+ 
+ 	list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
+ 		component = pcm->codec_dai->component;
+@@ -301,8 +308,6 @@ static int sof_card_late_probe(struct snd_soc_card *card)
+ 
+ 		i++;
+ 	}
+-	if (!component)
+-		return -EINVAL;
+ 
+ 	return hdac_hdmi_jack_port_init(component, &card->dapm);
+ }
+@@ -594,6 +599,19 @@ static int sof_audio_probe(struct platform_device *pdev)
+ 	if (!ctx)
+ 		return -ENOMEM;
+ 
++	if (pdev->id_entry && pdev->id_entry->driver_data)
++		sof_rt5682_quirk = (unsigned long)pdev->id_entry->driver_data;
++
++	dmi_check_system(sof_rt5682_quirk_table);
++
++	mach = (&pdev->dev)->platform_data;
++
++	/* A speaker amp might not be present when the quirk claims one is.
++	 * Detect this via whether the machine driver match includes quirk_data.
++	 */
++	if ((sof_rt5682_quirk & SOF_SPEAKER_AMP_PRESENT) && !mach->quirk_data)
++		sof_rt5682_quirk &= ~SOF_SPEAKER_AMP_PRESENT;
++
+ 	if (soc_intel_is_byt() || soc_intel_is_cht()) {
+ 		is_legacy_cpu = 1;
+ 		dmic_be_num = 0;
+@@ -604,11 +622,13 @@ static int sof_audio_probe(struct platform_device *pdev)
+ 						SOF_RT5682_SSP_CODEC(2);
+ 	} else {
+ 		dmic_be_num = 2;
+-		hdmi_num = 3;
++		hdmi_num = (sof_rt5682_quirk & SOF_RT5682_NUM_HDMIDEV_MASK) >>
++			 SOF_RT5682_NUM_HDMIDEV_SHIFT;
++		/* default number of HDMI DAI's */
++		if (!hdmi_num)
++			hdmi_num = 3;
+ 	}
+ 
+-	dmi_check_system(sof_rt5682_quirk_table);
+-
+ 	/* need to get main clock from pmc */
+ 	if (sof_rt5682_quirk & SOF_RT5682_MCLK_BYTCHT_EN) {
+ 		ctx->mclk = devm_clk_get(&pdev->dev, "pmc_plt_clk_3");
+@@ -652,7 +672,6 @@ static int sof_audio_probe(struct platform_device *pdev)
+ 	INIT_LIST_HEAD(&ctx->hdmi_pcm_list);
+ 
+ 	sof_audio_card_rt5682.dev = &pdev->dev;
+-	mach = (&pdev->dev)->platform_data;
+ 
+ 	/* set platform name for each dailink */
+ 	ret = snd_soc_fixup_dai_links_platform_name(&sof_audio_card_rt5682,
+@@ -683,6 +702,21 @@ static int sof_rt5682_remove(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
++static const struct platform_device_id board_ids[] = {
++	{
++		.name = "sof_rt5682",
++	},
++	{
++		.name = "tgl_max98357a_rt5682",
++		.driver_data = (kernel_ulong_t)(SOF_RT5682_MCLK_EN |
++					SOF_RT5682_SSP_CODEC(0) |
++					SOF_SPEAKER_AMP_PRESENT |
++					SOF_RT5682_SSP_AMP(1) |
++					SOF_RT5682_NUM_HDMIDEV(4)),
++	},
++	{ }
++};
++
+ static struct platform_driver sof_audio = {
+ 	.probe = sof_audio_probe,
+ 	.remove = sof_rt5682_remove,
+@@ -690,6 +724,7 @@ static struct platform_driver sof_audio = {
+ 		.name = "sof_rt5682",
+ 		.pm = &snd_soc_pm_ops,
+ 	},
++	.id_table = board_ids,
+ };
+ module_platform_driver(sof_audio)
+ 
+@@ -699,3 +734,4 @@ MODULE_AUTHOR("Bard Liao <bard.liao@intel.com>");
+ MODULE_AUTHOR("Sathya Prakash M R <sathya.prakash.m.r@intel.com>");
+ MODULE_LICENSE("GPL v2");
+ MODULE_ALIAS("platform:sof_rt5682");
++MODULE_ALIAS("platform:tgl_max98357a_rt5682");
+diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
+index 4e1fe623c390..0119f07cece6 100644
+--- a/sound/soc/soc-topology.c
++++ b/sound/soc/soc-topology.c
+@@ -604,9 +604,11 @@ static int soc_tplg_kcontrol_bind_io(struct snd_soc_tplg_ctl_hdr *hdr,
+ 		ext_ops = tplg->bytes_ext_ops;
+ 		num_ops = tplg->bytes_ext_ops_count;
+ 		for (i = 0; i < num_ops; i++) {
+-			if (!sbe->put && ext_ops[i].id == be->ext_ops.put)
++			if (!sbe->put &&
++			    ext_ops[i].id == le32_to_cpu(be->ext_ops.put))
+ 				sbe->put = ext_ops[i].put;
+-			if (!sbe->get && ext_ops[i].id == be->ext_ops.get)
++			if (!sbe->get &&
++			    ext_ops[i].id == le32_to_cpu(be->ext_ops.get))
+ 				sbe->get = ext_ops[i].get;
+ 		}
+ 
+@@ -621,11 +623,11 @@ static int soc_tplg_kcontrol_bind_io(struct snd_soc_tplg_ctl_hdr *hdr,
+ 	num_ops = tplg->io_ops_count;
+ 	for (i = 0; i < num_ops; i++) {
+ 
+-		if (k->put == NULL && ops[i].id == hdr->ops.put)
++		if (k->put == NULL && ops[i].id == le32_to_cpu(hdr->ops.put))
+ 			k->put = ops[i].put;
+-		if (k->get == NULL && ops[i].id == hdr->ops.get)
++		if (k->get == NULL && ops[i].id == le32_to_cpu(hdr->ops.get))
+ 			k->get = ops[i].get;
+-		if (k->info == NULL && ops[i].id == hdr->ops.info)
++		if (k->info == NULL && ops[i].id == le32_to_cpu(hdr->ops.info))
+ 			k->info = ops[i].info;
+ 	}
+ 
+@@ -638,11 +640,11 @@ static int soc_tplg_kcontrol_bind_io(struct snd_soc_tplg_ctl_hdr *hdr,
+ 	num_ops = ARRAY_SIZE(io_ops);
+ 	for (i = 0; i < num_ops; i++) {
+ 
+-		if (k->put == NULL && ops[i].id == hdr->ops.put)
++		if (k->put == NULL && ops[i].id == le32_to_cpu(hdr->ops.put))
+ 			k->put = ops[i].put;
+-		if (k->get == NULL && ops[i].id == hdr->ops.get)
++		if (k->get == NULL && ops[i].id == le32_to_cpu(hdr->ops.get))
+ 			k->get = ops[i].get;
+-		if (k->info == NULL && ops[i].id == hdr->ops.info)
++		if (k->info == NULL && ops[i].id == le32_to_cpu(hdr->ops.info))
+ 			k->info = ops[i].info;
+ 	}
+ 
+@@ -931,7 +933,7 @@ static int soc_tplg_denum_create_texts(struct soc_enum *se,
+ 	if (se->dobj.control.dtexts == NULL)
+ 		return -ENOMEM;
+ 
+-	for (i = 0; i < ec->items; i++) {
++	for (i = 0; i < le32_to_cpu(ec->items); i++) {
+ 
+ 		if (strnlen(ec->texts[i], SNDRV_CTL_ELEM_ID_NAME_MAXLEN) ==
+ 			SNDRV_CTL_ELEM_ID_NAME_MAXLEN) {
+@@ -1325,7 +1327,7 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_dmixer_create(
+ 		if (kc[i].name == NULL)
+ 			goto err_sm;
+ 		kc[i].iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+-		kc[i].access = mc->hdr.access;
++		kc[i].access = le32_to_cpu(mc->hdr.access);
+ 
+ 		/* we only support FL/FR channel mapping atm */
+ 		sm->reg = tplc_chan_get_reg(tplg, mc->channel,
+@@ -1337,10 +1339,10 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_dmixer_create(
+ 		sm->rshift = tplc_chan_get_shift(tplg, mc->channel,
+ 			SNDRV_CHMAP_FR);
+ 
+-		sm->max = mc->max;
+-		sm->min = mc->min;
+-		sm->invert = mc->invert;
+-		sm->platform_max = mc->platform_max;
++		sm->max = le32_to_cpu(mc->max);
++		sm->min = le32_to_cpu(mc->min);
++		sm->invert = le32_to_cpu(mc->invert);
++		sm->platform_max = le32_to_cpu(mc->platform_max);
+ 		sm->dobj.index = tplg->index;
+ 		INIT_LIST_HEAD(&sm->dobj.list);
+ 
+@@ -1401,7 +1403,7 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_denum_create(
+ 			goto err_se;
+ 
+ 		tplg->pos += (sizeof(struct snd_soc_tplg_enum_control) +
+-				ec->priv.size);
++			      le32_to_cpu(ec->priv.size));
+ 
+ 		dev_dbg(tplg->dev, " adding DAPM widget enum control %s\n",
+ 			ec->hdr.name);
+@@ -1411,7 +1413,7 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_denum_create(
+ 		if (kc[i].name == NULL)
+ 			goto err_se;
+ 		kc[i].iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+-		kc[i].access = ec->hdr.access;
++		kc[i].access = le32_to_cpu(ec->hdr.access);
+ 
+ 		/* we only support FL/FR channel mapping atm */
+ 		se->reg = tplc_chan_get_reg(tplg, ec->channel, SNDRV_CHMAP_FL);
+@@ -1420,8 +1422,8 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_denum_create(
+ 		se->shift_r = tplc_chan_get_shift(tplg, ec->channel,
+ 						  SNDRV_CHMAP_FR);
+ 
+-		se->items = ec->items;
+-		se->mask = ec->mask;
++		se->items = le32_to_cpu(ec->items);
++		se->mask = le32_to_cpu(ec->mask);
+ 		se->dobj.index = tplg->index;
+ 
+ 		switch (le32_to_cpu(ec->hdr.ops.info)) {
+@@ -1523,9 +1525,9 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_dbytes_create(
+ 		if (kc[i].name == NULL)
+ 			goto err_sbe;
+ 		kc[i].iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+-		kc[i].access = be->hdr.access;
++		kc[i].access = le32_to_cpu(be->hdr.access);
+ 
+-		sbe->max = be->max;
++		sbe->max = le32_to_cpu(be->max);
+ 		INIT_LIST_HEAD(&sbe->dobj.list);
+ 
+ 		/* map standard io handlers and check for external handlers */
+diff --git a/sound/soc/sof/intel/apl.c b/sound/soc/sof/intel/apl.c
+index 7daa8eb456c8..6f45e14f2b2e 100644
+--- a/sound/soc/sof/intel/apl.c
++++ b/sound/soc/sof/intel/apl.c
+@@ -41,7 +41,6 @@ const struct snd_sof_dsp_ops sof_apl_ops = {
+ 	.block_write	= sof_block_write,
+ 
+ 	/* doorbell */
+-	.irq_handler	= hda_dsp_ipc_irq_handler,
+ 	.irq_thread	= hda_dsp_ipc_irq_thread,
+ 
+ 	/* ipc */
+diff --git a/sound/soc/sof/intel/cnl.c b/sound/soc/sof/intel/cnl.c
+index 0e1e265f3f3b..9bd169e2691e 100644
+--- a/sound/soc/sof/intel/cnl.c
++++ b/sound/soc/sof/intel/cnl.c
+@@ -106,10 +106,6 @@ static irqreturn_t cnl_ipc_irq_thread(int irq, void *context)
+ 				    "nothing to do in IPC IRQ thread\n");
+ 	}
+ 
+-	/* re-enable IPC interrupt */
+-	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC,
+-				HDA_DSP_ADSPIC_IPC, HDA_DSP_ADSPIC_IPC);
+-
+ 	return IRQ_HANDLED;
+ }
+ 
+@@ -231,7 +227,6 @@ const struct snd_sof_dsp_ops sof_cnl_ops = {
+ 	.block_write	= sof_block_write,
+ 
+ 	/* doorbell */
+-	.irq_handler	= hda_dsp_ipc_irq_handler,
+ 	.irq_thread	= cnl_ipc_irq_thread,
+ 
+ 	/* ipc */
+diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c
+index 896d21984b73..1923b0c36bce 100644
+--- a/sound/soc/sof/intel/hda-dai.c
++++ b/sound/soc/sof/intel/hda-dai.c
+@@ -261,14 +261,11 @@ static int hda_link_pcm_prepare(struct snd_pcm_substream *substream,
+ {
+ 	struct hdac_ext_stream *link_dev =
+ 				snd_soc_dai_get_dma_data(dai, substream);
+-	struct sof_intel_hda_stream *hda_stream;
+ 	struct snd_sof_dev *sdev =
+ 				snd_soc_component_get_drvdata(dai->component);
+ 	struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
+ 	int stream = substream->stream;
+ 
+-	hda_stream = hstream_to_sof_hda_stream(link_dev);
+-
+ 	if (link_dev->link_prepared)
+ 		return 0;
+ 
+diff --git a/sound/soc/sof/intel/hda-ipc.c b/sound/soc/sof/intel/hda-ipc.c
+index 0fd2153c1769..1837f66e361f 100644
+--- a/sound/soc/sof/intel/hda-ipc.c
++++ b/sound/soc/sof/intel/hda-ipc.c
+@@ -230,22 +230,15 @@ irqreturn_t hda_dsp_ipc_irq_thread(int irq, void *context)
+ 				    "nothing to do in IPC IRQ thread\n");
+ 	}
+ 
+-	/* re-enable IPC interrupt */
+-	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC,
+-				HDA_DSP_ADSPIC_IPC, HDA_DSP_ADSPIC_IPC);
+-
+ 	return IRQ_HANDLED;
+ }
+ 
+-/* is this IRQ for ADSP ? - we only care about IPC here */
+-irqreturn_t hda_dsp_ipc_irq_handler(int irq, void *context)
++/* Check if an IPC IRQ occurred */
++bool hda_dsp_check_ipc_irq(struct snd_sof_dev *sdev)
+ {
+-	struct snd_sof_dev *sdev = context;
+-	int ret = IRQ_NONE;
++	bool ret = false;
+ 	u32 irq_status;
+ 
+-	spin_lock(&sdev->hw_lock);
+-
+ 	/* store status */
+ 	irq_status = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIS);
+ 	dev_vdbg(sdev->dev, "irq handler: irq_status:0x%x\n", irq_status);
+@@ -255,16 +248,10 @@ irqreturn_t hda_dsp_ipc_irq_handler(int irq, void *context)
+ 		goto out;
+ 
+ 	/* IPC message ? */
+-	if (irq_status & HDA_DSP_ADSPIS_IPC) {
+-		/* disable IPC interrupt */
+-		snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
+-						 HDA_DSP_REG_ADSPIC,
+-						 HDA_DSP_ADSPIC_IPC, 0);
+-		ret = IRQ_WAKE_THREAD;
+-	}
++	if (irq_status & HDA_DSP_ADSPIS_IPC)
++		ret = true;
+ 
+ out:
+-	spin_unlock(&sdev->hw_lock);
+ 	return ret;
+ }
+ 
+diff --git a/sound/soc/sof/intel/hda-stream.c b/sound/soc/sof/intel/hda-stream.c
+index 29ab43281670..927a36f92c24 100644
+--- a/sound/soc/sof/intel/hda-stream.c
++++ b/sound/soc/sof/intel/hda-stream.c
+@@ -549,22 +549,23 @@ int hda_dsp_stream_hw_free(struct snd_sof_dev *sdev,
+ 	return 0;
+ }
+ 
+-irqreturn_t hda_dsp_stream_interrupt(int irq, void *context)
++bool hda_dsp_check_stream_irq(struct snd_sof_dev *sdev)
+ {
+-	struct hdac_bus *bus = context;
+-	int ret = IRQ_WAKE_THREAD;
++	struct hdac_bus *bus = sof_to_bus(sdev);
++	bool ret = false;
+ 	u32 status;
+ 
+-	spin_lock(&bus->reg_lock);
++	/* The function can be called at irq thread, so use spin_lock_irq */
++	spin_lock_irq(&bus->reg_lock);
+ 
+ 	status = snd_hdac_chip_readl(bus, INTSTS);
+ 	dev_vdbg(bus->dev, "stream irq, INTSTS status: 0x%x\n", status);
+ 
+-	/* Register inaccessible, ignore it.*/
+-	if (status == 0xffffffff)
+-		ret = IRQ_NONE;
++	/* if Register inaccessible, ignore it.*/
++	if (status != 0xffffffff)
++		ret = true;
+ 
+-	spin_unlock(&bus->reg_lock);
++	spin_unlock_irq(&bus->reg_lock);
+ 
+ 	return ret;
+ }
+@@ -602,7 +603,8 @@ static bool hda_dsp_stream_check(struct hdac_bus *bus, u32 status)
+ 
+ irqreturn_t hda_dsp_stream_threaded_handler(int irq, void *context)
+ {
+-	struct hdac_bus *bus = context;
++	struct snd_sof_dev *sdev = context;
++	struct hdac_bus *bus = sof_to_bus(sdev);
+ #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ 	u32 rirb_status;
+ #endif
+diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
+index fb17b87b684b..82ecadda886c 100644
+--- a/sound/soc/sof/intel/hda.c
++++ b/sound/soc/sof/intel/hda.c
+@@ -499,6 +499,49 @@ static const struct sof_intel_dsp_desc
+ 	return chip_info;
+ }
+ 
++static irqreturn_t hda_dsp_interrupt_handler(int irq, void *context)
++{
++	struct snd_sof_dev *sdev = context;
++
++	/*
++	 * Get global interrupt status. It includes all hardware interrupt
++	 * sources in the Intel HD Audio controller.
++	 */
++	if (snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTSTS) &
++	    SOF_HDA_INTSTS_GIS) {
++
++		/* disable GIE interrupt */
++		snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
++					SOF_HDA_INTCTL,
++					SOF_HDA_INT_GLOBAL_EN,
++					0);
++
++		return IRQ_WAKE_THREAD;
++	}
++
++	return IRQ_NONE;
++}
++
++static irqreturn_t hda_dsp_interrupt_thread(int irq, void *context)
++{
++	struct snd_sof_dev *sdev = context;
++
++	/* deal with streams and controller first */
++	if (hda_dsp_check_stream_irq(sdev))
++		hda_dsp_stream_threaded_handler(irq, sdev);
++
++	if (hda_dsp_check_ipc_irq(sdev))
++		sof_ops(sdev)->irq_thread(irq, sdev);
++
++	/* enable GIE interrupt */
++	snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
++				SOF_HDA_INTCTL,
++				SOF_HDA_INT_GLOBAL_EN,
++				SOF_HDA_INT_GLOBAL_EN);
++
++	return IRQ_HANDLED;
++}
++
+ int hda_dsp_probe(struct snd_sof_dev *sdev)
+ {
+ 	struct pci_dev *pci = to_pci_dev(sdev->dev);
+@@ -603,9 +646,7 @@ int hda_dsp_probe(struct snd_sof_dev *sdev)
+ 	 */
+ 	if (hda_use_msi && pci_alloc_irq_vectors(pci, 1, 1, PCI_IRQ_MSI) > 0) {
+ 		dev_info(sdev->dev, "use msi interrupt mode\n");
+-		hdev->irq = pci_irq_vector(pci, 0);
+-		/* ipc irq number is the same of hda irq */
+-		sdev->ipc_irq = hdev->irq;
++		sdev->ipc_irq = pci_irq_vector(pci, 0);
+ 		/* initialised to "false" by kzalloc() */
+ 		sdev->msi_enabled = true;
+ 	}
+@@ -616,28 +657,17 @@ int hda_dsp_probe(struct snd_sof_dev *sdev)
+ 		 * in IO-APIC mode, hda->irq and ipc_irq are using the same
+ 		 * irq number of pci->irq
+ 		 */
+-		hdev->irq = pci->irq;
+ 		sdev->ipc_irq = pci->irq;
+ 	}
+ 
+-	dev_dbg(sdev->dev, "using HDA IRQ %d\n", hdev->irq);
+-	ret = request_threaded_irq(hdev->irq, hda_dsp_stream_interrupt,
+-				   hda_dsp_stream_threaded_handler,
+-				   IRQF_SHARED, "AudioHDA", bus);
+-	if (ret < 0) {
+-		dev_err(sdev->dev, "error: failed to register HDA IRQ %d\n",
+-			hdev->irq);
+-		goto free_irq_vector;
+-	}
+-
+ 	dev_dbg(sdev->dev, "using IPC IRQ %d\n", sdev->ipc_irq);
+-	ret = request_threaded_irq(sdev->ipc_irq, hda_dsp_ipc_irq_handler,
+-				   sof_ops(sdev)->irq_thread, IRQF_SHARED,
+-				   "AudioDSP", sdev);
++	ret = request_threaded_irq(sdev->ipc_irq, hda_dsp_interrupt_handler,
++				   hda_dsp_interrupt_thread,
++				   IRQF_SHARED, "AudioDSP", sdev);
+ 	if (ret < 0) {
+ 		dev_err(sdev->dev, "error: failed to register IPC IRQ %d\n",
+ 			sdev->ipc_irq);
+-		goto free_hda_irq;
++		goto free_irq_vector;
+ 	}
+ 
+ 	pci_set_master(pci);
+@@ -668,8 +698,6 @@ int hda_dsp_probe(struct snd_sof_dev *sdev)
+ 
+ free_ipc_irq:
+ 	free_irq(sdev->ipc_irq, sdev);
+-free_hda_irq:
+-	free_irq(hdev->irq, bus);
+ free_irq_vector:
+ 	if (sdev->msi_enabled)
+ 		pci_free_irq_vectors(pci);
+@@ -715,7 +743,6 @@ int hda_dsp_remove(struct snd_sof_dev *sdev)
+ 				SOF_HDA_PPCTL_GPROCEN, 0);
+ 
+ 	free_irq(sdev->ipc_irq, sdev);
+-	free_irq(hda->irq, bus);
+ 	if (sdev->msi_enabled)
+ 		pci_free_irq_vectors(pci);
+ 
+diff --git a/sound/soc/sof/intel/hda.h b/sound/soc/sof/intel/hda.h
+index 18d7e72bf9b7..de0115294c74 100644
+--- a/sound/soc/sof/intel/hda.h
++++ b/sound/soc/sof/intel/hda.h
+@@ -43,11 +43,14 @@
+ /* SOF_HDA_GCTL register bist */
+ #define SOF_HDA_GCTL_RESET		BIT(0)
+ 
+-/* SOF_HDA_INCTL and SOF_HDA_INTSTS regs */
++/* SOF_HDA_INCTL regs */
+ #define SOF_HDA_INT_GLOBAL_EN		BIT(31)
+ #define SOF_HDA_INT_CTRL_EN		BIT(30)
+ #define SOF_HDA_INT_ALL_STREAM		0xff
+ 
++/* SOF_HDA_INTSTS regs */
++#define SOF_HDA_INTSTS_GIS		BIT(31)
++
+ #define SOF_HDA_MAX_CAPS		10
+ #define SOF_HDA_CAP_ID_OFF		16
+ #define SOF_HDA_CAP_ID_MASK		GENMASK(SOF_HDA_CAP_ID_OFF + 11,\
+@@ -345,7 +348,7 @@
+ 
+ /* Number of DAIs */
+ #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+-#define SOF_SKL_NUM_DAIS		14
++#define SOF_SKL_NUM_DAIS		15
+ #else
+ #define SOF_SKL_NUM_DAIS		8
+ #endif
+@@ -406,8 +409,6 @@ struct sof_intel_hda_dev {
+ 	/* the maximum number of streams (playback + capture) supported */
+ 	u32 stream_max;
+ 
+-	int irq;
+-
+ 	/* PM related */
+ 	bool l1_support_changed;/* during suspend, is L1SEN changed or not */
+ 
+@@ -511,11 +512,12 @@ int hda_dsp_stream_hw_params(struct snd_sof_dev *sdev,
+ 			     struct snd_pcm_hw_params *params);
+ int hda_dsp_stream_trigger(struct snd_sof_dev *sdev,
+ 			   struct hdac_ext_stream *stream, int cmd);
+-irqreturn_t hda_dsp_stream_interrupt(int irq, void *context);
+ irqreturn_t hda_dsp_stream_threaded_handler(int irq, void *context);
+ int hda_dsp_stream_setup_bdl(struct snd_sof_dev *sdev,
+ 			     struct snd_dma_buffer *dmab,
+ 			     struct hdac_stream *stream);
++bool hda_dsp_check_ipc_irq(struct snd_sof_dev *sdev);
++bool hda_dsp_check_stream_irq(struct snd_sof_dev *sdev);
+ 
+ struct hdac_ext_stream *
+ 	hda_dsp_stream_get(struct snd_sof_dev *sdev, int direction);
+@@ -540,7 +542,6 @@ void hda_dsp_ipc_get_reply(struct snd_sof_dev *sdev);
+ int hda_dsp_ipc_get_mailbox_offset(struct snd_sof_dev *sdev);
+ int hda_dsp_ipc_get_window_offset(struct snd_sof_dev *sdev, u32 id);
+ 
+-irqreturn_t hda_dsp_ipc_irq_handler(int irq, void *context);
+ irqreturn_t hda_dsp_ipc_irq_thread(int irq, void *context);
+ int hda_dsp_ipc_cmd_done(struct snd_sof_dev *sdev, int dir);
+ 
+diff --git a/sound/soc/txx9/txx9aclc.c b/sound/soc/txx9/txx9aclc.c
+index 33c78d33e5a1..9a55926ebf07 100644
+--- a/sound/soc/txx9/txx9aclc.c
++++ b/sound/soc/txx9/txx9aclc.c
+@@ -51,7 +51,6 @@ static int txx9aclc_pcm_hw_params(struct snd_soc_component *component,
+ 				  struct snd_pcm_substream *substream,
+ 				  struct snd_pcm_hw_params *params)
+ {
+-	struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
+ 	struct snd_pcm_runtime *runtime = substream->runtime;
+ 	struct txx9aclc_dmadata *dmadata = runtime->private_data;
+ 	int ret;
+diff --git a/sound/usb/card.c b/sound/usb/card.c
+index 9f743ebae615..827fb0bc8b56 100644
+--- a/sound/usb/card.c
++++ b/sound/usb/card.c
+@@ -600,6 +600,10 @@ static int usb_audio_probe(struct usb_interface *intf,
+ 		}
+ 	}
+ 	if (! chip) {
++		err = snd_usb_apply_boot_quirk_once(dev, intf, quirk, id);
++		if (err < 0)
++			goto __error;
++
+ 		/* it's a fresh one.
+ 		 * now look for an empty slot and create a new card instance
+ 		 */
+diff --git a/sound/usb/format.c b/sound/usb/format.c
+index 25668ba5e68e..f4f0cf3deaf0 100644
+--- a/sound/usb/format.c
++++ b/sound/usb/format.c
+@@ -296,6 +296,9 @@ static int line6_parse_audio_format_rates_quirk(struct snd_usb_audio *chip,
+ 	case USB_ID(0x0E41, 0x4242): /* Line6 Helix Rack */
+ 	case USB_ID(0x0E41, 0x4244): /* Line6 Helix LT */
+ 	case USB_ID(0x0E41, 0x4246): /* Line6 HX-Stomp */
++	case USB_ID(0x0E41, 0x4248): /* Line6 Helix >= fw 2.82 */
++	case USB_ID(0x0E41, 0x4249): /* Line6 Helix Rack >= fw 2.82 */
++	case USB_ID(0x0E41, 0x424a): /* Line6 Helix LT >= fw 2.82 */
+ 		/* supported rates: 48Khz */
+ 		kfree(fp->rate_table);
+ 		fp->rate_table = kmalloc(sizeof(int), GFP_KERNEL);
+diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
+index 0e4eab96e23e..c9e1609296df 100644
+--- a/sound/usb/pcm.c
++++ b/sound/usb/pcm.c
+@@ -348,6 +348,10 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
+ 		ep = 0x84;
+ 		ifnum = 0;
+ 		goto add_sync_ep_from_ifnum;
++	case USB_ID(0x07fd, 0x0008): /* MOTU M Series */
++		ep = 0x81;
++		ifnum = 2;
++		goto add_sync_ep_from_ifnum;
+ 	case USB_ID(0x0582, 0x01d8): /* BOSS Katana */
+ 		/* BOSS Katana amplifiers do not need quirks */
+ 		return 0;
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 1ed25b1d2a6a..7448ab07bd36 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1113,6 +1113,31 @@ free_buf:
+ 	return err;
+ }
+ 
++static int snd_usb_motu_m_series_boot_quirk(struct usb_device *dev)
++{
++	int ret;
++
++	if (snd_usb_pipe_sanity_check(dev, usb_sndctrlpipe(dev, 0)))
++		return -EINVAL;
++	ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
++			      1, USB_TYPE_VENDOR | USB_RECIP_DEVICE,
++			      0x0, 0, NULL, 0, 1000);
++
++	if (ret < 0)
++		return ret;
++
++	msleep(2000);
++
++	ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
++			      1, USB_TYPE_VENDOR | USB_RECIP_DEVICE,
++			      0x20, 0, NULL, 0, 1000);
++
++	if (ret < 0)
++		return ret;
++
++	return 0;
++}
++
+ /*
+  * Setup quirks
+  */
+@@ -1297,6 +1322,19 @@ int snd_usb_apply_boot_quirk(struct usb_device *dev,
+ 	return 0;
+ }
+ 
++int snd_usb_apply_boot_quirk_once(struct usb_device *dev,
++				  struct usb_interface *intf,
++				  const struct snd_usb_audio_quirk *quirk,
++				  unsigned int id)
++{
++	switch (id) {
++	case USB_ID(0x07fd, 0x0008): /* MOTU M Series */
++		return snd_usb_motu_m_series_boot_quirk(dev);
++	}
++
++	return 0;
++}
++
+ /*
+  * check if the device uses big-endian samples
+  */
+diff --git a/sound/usb/quirks.h b/sound/usb/quirks.h
+index a80e0ddd0736..df0355843a4c 100644
+--- a/sound/usb/quirks.h
++++ b/sound/usb/quirks.h
+@@ -20,6 +20,11 @@ int snd_usb_apply_boot_quirk(struct usb_device *dev,
+ 			     const struct snd_usb_audio_quirk *quirk,
+ 			     unsigned int usb_id);
+ 
++int snd_usb_apply_boot_quirk_once(struct usb_device *dev,
++				  struct usb_interface *intf,
++				  const struct snd_usb_audio_quirk *quirk,
++				  unsigned int usb_id);
++
+ void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
+ 			      struct audioformat *fmt);
+ 
+diff --git a/sound/usb/usx2y/usX2Yhwdep.c b/sound/usb/usx2y/usX2Yhwdep.c
+index d1caa8ed9e68..9985fc139487 100644
+--- a/sound/usb/usx2y/usX2Yhwdep.c
++++ b/sound/usb/usx2y/usX2Yhwdep.c
+@@ -119,7 +119,7 @@ static int snd_usX2Y_hwdep_dsp_status(struct snd_hwdep *hw,
+ 	info->num_dsps = 2;		// 0: Prepad Data, 1: FPGA Code
+ 	if (us428->chip_status & USX2Y_STAT_CHIP_INIT)
+ 		info->chip_ready = 1;
+- 	info->version = USX2Y_DRIVER_VERSION; 
++	info->version = USX2Y_DRIVER_VERSION;
+ 	return 0;
+ }
+ 
+diff --git a/tools/arch/x86/lib/x86-opcode-map.txt b/tools/arch/x86/lib/x86-opcode-map.txt
+index 8908c58bd6cd..53adc1762ec0 100644
+--- a/tools/arch/x86/lib/x86-opcode-map.txt
++++ b/tools/arch/x86/lib/x86-opcode-map.txt
+@@ -929,7 +929,7 @@ EndTable
+ 
+ GrpTable: Grp3_2
+ 0: TEST Ev,Iz
+-1:
++1: TEST Ev,Iz
+ 2: NOT Ev
+ 3: NEG Ev
+ 4: MUL rAX,Ev
+diff --git a/tools/bpf/bpftool/cgroup.c b/tools/bpf/bpftool/cgroup.c
+index 1ef45e55039e..2f017caa678d 100644
+--- a/tools/bpf/bpftool/cgroup.c
++++ b/tools/bpf/bpftool/cgroup.c
+@@ -117,6 +117,25 @@ static int count_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type)
+ 	return prog_cnt;
+ }
+ 
++static int cgroup_has_attached_progs(int cgroup_fd)
++{
++	enum bpf_attach_type type;
++	bool no_prog = true;
++
++	for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
++		int count = count_attached_bpf_progs(cgroup_fd, type);
++
++		if (count < 0 && errno != EINVAL)
++			return -1;
++
++		if (count > 0) {
++			no_prog = false;
++			break;
++		}
++	}
++
++	return no_prog ? 0 : 1;
++}
+ static int show_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type,
+ 				   int level)
+ {
+@@ -161,6 +180,7 @@ static int show_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type,
+ static int do_show(int argc, char **argv)
+ {
+ 	enum bpf_attach_type type;
++	int has_attached_progs;
+ 	const char *path;
+ 	int cgroup_fd;
+ 	int ret = -1;
+@@ -192,6 +212,16 @@ static int do_show(int argc, char **argv)
+ 		goto exit;
+ 	}
+ 
++	has_attached_progs = cgroup_has_attached_progs(cgroup_fd);
++	if (has_attached_progs < 0) {
++		p_err("can't query bpf programs attached to %s: %s",
++		      path, strerror(errno));
++		goto exit_cgroup;
++	} else if (!has_attached_progs) {
++		ret = 0;
++		goto exit_cgroup;
++	}
++
+ 	if (json_output)
+ 		jsonw_start_array(json_wtr);
+ 	else
+@@ -212,6 +242,7 @@ static int do_show(int argc, char **argv)
+ 	if (json_output)
+ 		jsonw_end_array(json_wtr);
+ 
++exit_cgroup:
+ 	close(cgroup_fd);
+ exit:
+ 	return ret;
+@@ -228,7 +259,7 @@ static int do_show_tree_fn(const char *fpath, const struct stat *sb,
+ 			   int typeflag, struct FTW *ftw)
+ {
+ 	enum bpf_attach_type type;
+-	bool skip = true;
++	int has_attached_progs;
+ 	int cgroup_fd;
+ 
+ 	if (typeflag != FTW_D)
+@@ -240,22 +271,13 @@ static int do_show_tree_fn(const char *fpath, const struct stat *sb,
+ 		return SHOW_TREE_FN_ERR;
+ 	}
+ 
+-	for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
+-		int count = count_attached_bpf_progs(cgroup_fd, type);
+-
+-		if (count < 0 && errno != EINVAL) {
+-			p_err("can't query bpf programs attached to %s: %s",
+-			      fpath, strerror(errno));
+-			close(cgroup_fd);
+-			return SHOW_TREE_FN_ERR;
+-		}
+-		if (count > 0) {
+-			skip = false;
+-			break;
+-		}
+-	}
+-
+-	if (skip) {
++	has_attached_progs = cgroup_has_attached_progs(cgroup_fd);
++	if (has_attached_progs < 0) {
++		p_err("can't query bpf programs attached to %s: %s",
++		      fpath, strerror(errno));
++		close(cgroup_fd);
++		return SHOW_TREE_FN_ERR;
++	} else if (!has_attached_progs) {
+ 		close(cgroup_fd);
+ 		return 0;
+ 	}
+diff --git a/tools/lib/api/fs/fs.c b/tools/lib/api/fs/fs.c
+index 11b3885e833e..027b18f7ed8c 100644
+--- a/tools/lib/api/fs/fs.c
++++ b/tools/lib/api/fs/fs.c
+@@ -210,6 +210,7 @@ static bool fs__env_override(struct fs *fs)
+ 	size_t name_len = strlen(fs->name);
+ 	/* name + "_PATH" + '\0' */
+ 	char upper_name[name_len + 5 + 1];
++
+ 	memcpy(upper_name, fs->name, name_len);
+ 	mem_toupper(upper_name, name_len);
+ 	strcpy(&upper_name[name_len], "_PATH");
+@@ -219,7 +220,8 @@ static bool fs__env_override(struct fs *fs)
+ 		return false;
+ 
+ 	fs->found = true;
+-	strncpy(fs->path, override_path, sizeof(fs->path));
++	strncpy(fs->path, override_path, sizeof(fs->path) - 1);
++	fs->path[sizeof(fs->path) - 1] = '\0';
+ 	return true;
+ }
+ 
+diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
+index d2a19b0bc05a..ee08aeff30a1 100644
+--- a/tools/objtool/Makefile
++++ b/tools/objtool/Makefile
+@@ -2,10 +2,6 @@
+ include ../scripts/Makefile.include
+ include ../scripts/Makefile.arch
+ 
+-ifeq ($(ARCH),x86_64)
+-ARCH := x86
+-endif
+-
+ # always use the host compiler
+ HOSTAR	?= ar
+ HOSTCC	?= gcc
+@@ -33,7 +29,7 @@ all: $(OBJTOOL)
+ 
+ INCLUDES := -I$(srctree)/tools/include \
+ 	    -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \
+-	    -I$(srctree)/tools/arch/$(ARCH)/include
++	    -I$(srctree)/tools/arch/$(SRCARCH)/include
+ WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed
+ CFLAGS   := -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES) $(LIBELF_FLAGS)
+ LDFLAGS  += $(LIBELF_LIBS) $(LIBSUBCMD) $(KBUILD_HOSTLDFLAGS)
+diff --git a/tools/testing/selftests/bpf/test_select_reuseport.c b/tools/testing/selftests/bpf/test_select_reuseport.c
+index 7566c13eb51a..079d0f5a2909 100644
+--- a/tools/testing/selftests/bpf/test_select_reuseport.c
++++ b/tools/testing/selftests/bpf/test_select_reuseport.c
+@@ -30,7 +30,7 @@
+ #define REUSEPORT_ARRAY_SIZE 32
+ 
+ static int result_map, tmp_index_ovr_map, linum_map, data_check_map;
+-static enum result expected_results[NR_RESULTS];
++static __u32 expected_results[NR_RESULTS];
+ static int sk_fds[REUSEPORT_ARRAY_SIZE];
+ static int reuseport_array, outer_map;
+ static int select_by_skb_data_prog;
+@@ -662,7 +662,19 @@ static void setup_per_test(int type, unsigned short family, bool inany)
+ 
+ static void cleanup_per_test(void)
+ {
+-	int i, err;
++	int i, err, zero = 0;
++
++	memset(expected_results, 0, sizeof(expected_results));
++
++	for (i = 0; i < NR_RESULTS; i++) {
++		err = bpf_map_update_elem(result_map, &i, &zero, BPF_ANY);
++		RET_IF(err, "reset elem in result_map",
++		       "i:%u err:%d errno:%d\n", i, err, errno);
++	}
++
++	err = bpf_map_update_elem(linum_map, &zero, &zero, BPF_ANY);
++	RET_IF(err, "reset line number in linum_map", "err:%d errno:%d\n",
++	       err, errno);
+ 
+ 	for (i = 0; i < REUSEPORT_ARRAY_SIZE; i++)
+ 		close(sk_fds[i]);
+diff --git a/tools/testing/selftests/cgroup/test_core.c b/tools/testing/selftests/cgroup/test_core.c
+index c5ca669feb2b..e19ce940cd6a 100644
+--- a/tools/testing/selftests/cgroup/test_core.c
++++ b/tools/testing/selftests/cgroup/test_core.c
+@@ -369,7 +369,7 @@ static void *dummy_thread_fn(void *arg)
+ static int test_cgcore_proc_migration(const char *root)
+ {
+ 	int ret = KSFT_FAIL;
+-	int t, c_threads, n_threads = 13;
++	int t, c_threads = 0, n_threads = 13;
+ 	char *src = NULL, *dst = NULL;
+ 	pthread_t threads[n_threads];
+ 
+diff --git a/tools/testing/selftests/kselftest/runner.sh b/tools/testing/selftests/kselftest/runner.sh
+index a8d20cbb711c..e84d901f8567 100644
+--- a/tools/testing/selftests/kselftest/runner.sh
++++ b/tools/testing/selftests/kselftest/runner.sh
+@@ -91,7 +91,7 @@ run_one()
+ run_many()
+ {
+ 	echo "TAP version 13"
+-	DIR=$(basename "$PWD")
++	DIR="${PWD#${BASE_DIR}/}"
+ 	test_num=0
+ 	total=$(echo "$@" | wc -w)
+ 	echo "1..$total"
+diff --git a/tools/testing/selftests/net/so_txtime.c b/tools/testing/selftests/net/so_txtime.c
+index 34df4c8882af..383bac05ac32 100644
+--- a/tools/testing/selftests/net/so_txtime.c
++++ b/tools/testing/selftests/net/so_txtime.c
+@@ -12,7 +12,11 @@
+ #include <arpa/inet.h>
+ #include <error.h>
+ #include <errno.h>
++#include <inttypes.h>
+ #include <linux/net_tstamp.h>
++#include <linux/errqueue.h>
++#include <linux/ipv6.h>
++#include <linux/tcp.h>
+ #include <stdbool.h>
+ #include <stdlib.h>
+ #include <stdio.h>
+@@ -28,7 +32,7 @@ static int	cfg_clockid	= CLOCK_TAI;
+ static bool	cfg_do_ipv4;
+ static bool	cfg_do_ipv6;
+ static uint16_t	cfg_port	= 8000;
+-static int	cfg_variance_us	= 2000;
++static int	cfg_variance_us	= 4000;
+ 
+ static uint64_t glob_tstart;
+ 
+@@ -43,6 +47,9 @@ static struct timed_send cfg_in[MAX_NUM_PKT];
+ static struct timed_send cfg_out[MAX_NUM_PKT];
+ static int cfg_num_pkt;
+ 
++static int cfg_errq_level;
++static int cfg_errq_type;
++
+ static uint64_t gettime_ns(void)
+ {
+ 	struct timespec ts;
+@@ -90,13 +97,15 @@ static void do_send_one(int fdt, struct timed_send *ts)
+ 
+ }
+ 
+-static void do_recv_one(int fdr, struct timed_send *ts)
++static bool do_recv_one(int fdr, struct timed_send *ts)
+ {
+ 	int64_t tstop, texpect;
+ 	char rbuf[2];
+ 	int ret;
+ 
+ 	ret = recv(fdr, rbuf, sizeof(rbuf), 0);
++	if (ret == -1 && errno == EAGAIN)
++		return true;
+ 	if (ret == -1)
+ 		error(1, errno, "read");
+ 	if (ret != 1)
+@@ -113,6 +122,8 @@ static void do_recv_one(int fdr, struct timed_send *ts)
+ 
+ 	if (labs(tstop - texpect) > cfg_variance_us)
+ 		error(1, 0, "exceeds variance (%d us)", cfg_variance_us);
++
++	return false;
+ }
+ 
+ static void do_recv_verify_empty(int fdr)
+@@ -125,12 +136,70 @@ static void do_recv_verify_empty(int fdr)
+ 		error(1, 0, "recv: not empty as expected (%d, %d)", ret, errno);
+ }
+ 
++static void do_recv_errqueue_timeout(int fdt)
++{
++	char control[CMSG_SPACE(sizeof(struct sock_extended_err)) +
++		     CMSG_SPACE(sizeof(struct sockaddr_in6))] = {0};
++	char data[sizeof(struct ipv6hdr) +
++		  sizeof(struct tcphdr) + 1];
++	struct sock_extended_err *err;
++	struct msghdr msg = {0};
++	struct iovec iov = {0};
++	struct cmsghdr *cm;
++	int64_t tstamp = 0;
++	int ret;
++
++	iov.iov_base = data;
++	iov.iov_len = sizeof(data);
++
++	msg.msg_iov = &iov;
++	msg.msg_iovlen = 1;
++
++	msg.msg_control = control;
++	msg.msg_controllen = sizeof(control);
++
++	while (1) {
++		ret = recvmsg(fdt, &msg, MSG_ERRQUEUE);
++		if (ret == -1 && errno == EAGAIN)
++			break;
++		if (ret == -1)
++			error(1, errno, "errqueue");
++		if (msg.msg_flags != MSG_ERRQUEUE)
++			error(1, 0, "errqueue: flags 0x%x\n", msg.msg_flags);
++
++		cm = CMSG_FIRSTHDR(&msg);
++		if (cm->cmsg_level != cfg_errq_level ||
++		    cm->cmsg_type != cfg_errq_type)
++			error(1, 0, "errqueue: type 0x%x.0x%x\n",
++				    cm->cmsg_level, cm->cmsg_type);
++
++		err = (struct sock_extended_err *)CMSG_DATA(cm);
++		if (err->ee_origin != SO_EE_ORIGIN_TXTIME)
++			error(1, 0, "errqueue: origin 0x%x\n", err->ee_origin);
++		if (err->ee_code != ECANCELED)
++			error(1, 0, "errqueue: code 0x%x\n", err->ee_code);
++
++		tstamp = ((int64_t) err->ee_data) << 32 | err->ee_info;
++		tstamp -= (int64_t) glob_tstart;
++		tstamp /= 1000 * 1000;
++		fprintf(stderr, "send: pkt %c at %" PRId64 "ms dropped\n",
++				data[ret - 1], tstamp);
++
++		msg.msg_flags = 0;
++		msg.msg_controllen = sizeof(control);
++	}
++
++	error(1, 0, "recv: timeout");
++}
++
+ static void setsockopt_txtime(int fd)
+ {
+ 	struct sock_txtime so_txtime_val = { .clockid = cfg_clockid };
+ 	struct sock_txtime so_txtime_val_read = { 0 };
+ 	socklen_t vallen = sizeof(so_txtime_val);
+ 
++	so_txtime_val.flags = SOF_TXTIME_REPORT_ERRORS;
++
+ 	if (setsockopt(fd, SOL_SOCKET, SO_TXTIME,
+ 		       &so_txtime_val, sizeof(so_txtime_val)))
+ 		error(1, errno, "setsockopt txtime");
+@@ -194,7 +263,8 @@ static void do_test(struct sockaddr *addr, socklen_t alen)
+ 	for (i = 0; i < cfg_num_pkt; i++)
+ 		do_send_one(fdt, &cfg_in[i]);
+ 	for (i = 0; i < cfg_num_pkt; i++)
+-		do_recv_one(fdr, &cfg_out[i]);
++		if (do_recv_one(fdr, &cfg_out[i]))
++			do_recv_errqueue_timeout(fdt);
+ 
+ 	do_recv_verify_empty(fdr);
+ 
+@@ -280,6 +350,10 @@ int main(int argc, char **argv)
+ 		addr6.sin6_family = AF_INET6;
+ 		addr6.sin6_port = htons(cfg_port);
+ 		addr6.sin6_addr = in6addr_loopback;
++
++		cfg_errq_level = SOL_IPV6;
++		cfg_errq_type = IPV6_RECVERR;
++
+ 		do_test((void *)&addr6, sizeof(addr6));
+ 	}
+ 
+@@ -289,6 +363,10 @@ int main(int argc, char **argv)
+ 		addr4.sin_family = AF_INET;
+ 		addr4.sin_port = htons(cfg_port);
+ 		addr4.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
++
++		cfg_errq_level = SOL_IP;
++		cfg_errq_type = IP_RECVERR;
++
+ 		do_test((void *)&addr4, sizeof(addr4));
+ 	}
+ 
+diff --git a/tools/testing/selftests/net/so_txtime.sh b/tools/testing/selftests/net/so_txtime.sh
+index 5aa519328a5b..3f7800eaecb1 100755
+--- a/tools/testing/selftests/net/so_txtime.sh
++++ b/tools/testing/selftests/net/so_txtime.sh
+@@ -5,7 +5,12 @@
+ 
+ # Run in network namespace
+ if [[ $# -eq 0 ]]; then
+-	./in_netns.sh $0 __subprocess
++	if ! ./in_netns.sh $0 __subprocess; then
++		# test is time sensitive, can be flaky
++		echo "test failed: retry once"
++		./in_netns.sh $0 __subprocess
++	fi
++
+ 	exit $?
+ fi
+ 
+@@ -18,7 +23,7 @@ tc qdisc add dev lo root fq
+ ./so_txtime -4 -6 -c mono a,10,b,20 a,10,b,20
+ ./so_txtime -4 -6 -c mono a,20,b,10 b,20,a,20
+ 
+-if tc qdisc replace dev lo root etf clockid CLOCK_TAI delta 200000; then
++if tc qdisc replace dev lo root etf clockid CLOCK_TAI delta 400000; then
+ 	! ./so_txtime -4 -6 -c tai a,-1 a,-1
+ 	! ./so_txtime -4 -6 -c tai a,0 a,0
+ 	./so_txtime -4 -6 -c tai a,10 a,10
+diff --git a/tools/testing/selftests/powerpc/eeh/eeh-functions.sh b/tools/testing/selftests/powerpc/eeh/eeh-functions.sh
+index 26112ab5cdf4..f52ed92b53e7 100755
+--- a/tools/testing/selftests/powerpc/eeh/eeh-functions.sh
++++ b/tools/testing/selftests/powerpc/eeh/eeh-functions.sh
+@@ -53,9 +53,13 @@ eeh_one_dev() {
+ 	# is a no-op.
+ 	echo $dev >/sys/kernel/debug/powerpc/eeh_dev_check
+ 
+-	# Enforce a 30s timeout for recovery. Even the IPR, which is infamously
+-	# slow to reset, should recover within 30s.
+-	max_wait=30
++	# Default to a 60s timeout when waiting for a device to recover. This
++	# is an arbitrary default which can be overridden by setting the
++	# EEH_MAX_WAIT environmental variable when required.
++
++	# The current record holder for longest recovery time is:
++	#  "Adaptec Series 8 12G SAS/PCIe 3" at 39 seconds
++	max_wait=${EEH_MAX_WAIT:=60}
+ 
+ 	for i in `seq 0 ${max_wait}` ; do
+ 		if pe_ok $dev ; then
+diff --git a/tools/testing/selftests/size/get_size.c b/tools/testing/selftests/size/get_size.c
+index 2ad45b944355..2980b1a63366 100644
+--- a/tools/testing/selftests/size/get_size.c
++++ b/tools/testing/selftests/size/get_size.c
+@@ -11,23 +11,35 @@
+  * own execution.  It also attempts to have as few dependencies
+  * on kernel features as possible.
+  *
+- * It should be statically linked, with startup libs avoided.
+- * It uses no library calls, and only the following 3 syscalls:
++ * It should be statically linked, with startup libs avoided.  It uses
++ * no library calls except the syscall() function for the following 3
++ * syscalls:
+  *   sysinfo(), write(), and _exit()
+  *
+  * For output, it avoids printf (which in some C libraries
+  * has large external dependencies) by  implementing it's own
+  * number output and print routines, and using __builtin_strlen()
++ *
++ * The test may crash if any of the above syscalls fails because in some
++ * libc implementations (e.g. the GNU C Library) errno is saved in
++ * thread-local storage, which does not get initialized due to avoiding
++ * startup libs.
+  */
+ 
+ #include <sys/sysinfo.h>
+ #include <unistd.h>
++#include <sys/syscall.h>
+ 
+ #define STDOUT_FILENO 1
+ 
+ static int print(const char *s)
+ {
+-	return write(STDOUT_FILENO, s, __builtin_strlen(s));
++	size_t len = 0;
++
++	while (s[len] != '\0')
++		len++;
++
++	return syscall(SYS_write, STDOUT_FILENO, s, len);
+ }
+ 
+ static inline char *num_to_str(unsigned long num, char *buf, int len)
+@@ -79,12 +91,12 @@ void _start(void)
+ 	print("TAP version 13\n");
+ 	print("# Testing system size.\n");
+ 
+-	ccode = sysinfo(&info);
++	ccode = syscall(SYS_sysinfo, &info);
+ 	if (ccode < 0) {
+ 		print("not ok 1");
+ 		print(test_name);
+ 		print(" ---\n reason: \"could not get sysinfo\"\n ...\n");
+-		_exit(ccode);
++		syscall(SYS_exit, ccode);
+ 	}
+ 	print("ok 1");
+ 	print(test_name);
+@@ -100,5 +112,5 @@ void _start(void)
+ 	print(" ...\n");
+ 	print("1..1\n");
+ 
+-	_exit(0);
++	syscall(SYS_exit, 0);
+ }
+diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/basic.json b/tools/testing/selftests/tc-testing/tc-tests/filters/basic.json
+index 2e361cea63bc..98a20faf3198 100644
+--- a/tools/testing/selftests/tc-testing/tc-tests/filters/basic.json
++++ b/tools/testing/selftests/tc-testing/tc-tests/filters/basic.json
+@@ -6,6 +6,9 @@
+             "filter",
+             "basic"
+         ],
++        "plugins": {
++            "requires": "nsPlugin"
++        },
+         "setup": [
+             "$TC qdisc add dev $DEV1 ingress"
+         ],
+@@ -25,6 +28,9 @@
+             "filter",
+             "basic"
+         ],
++        "plugins": {
++            "requires": "nsPlugin"
++        },
+         "setup": [
+             "$TC qdisc add dev $DEV1 ingress"
+         ],
+@@ -44,6 +50,9 @@
+             "filter",
+             "basic"
+         ],
++        "plugins": {
++            "requires": "nsPlugin"
++        },
+         "setup": [
+             "$TC qdisc add dev $DEV1 ingress"
+         ],
+@@ -63,6 +72,9 @@
+             "filter",
+             "basic"
+         ],
++        "plugins": {
++            "requires": "nsPlugin"
++        },
+         "setup": [
+             "$TC qdisc add dev $DEV1 ingress"
+         ],
+@@ -82,6 +94,9 @@
+             "filter",
+             "basic"
+         ],
++        "plugins": {
++            "requires": "nsPlugin"
++        },
+         "setup": [
+             "$TC qdisc add dev $DEV1 ingress"
+         ],
+@@ -101,6 +116,9 @@
+             "filter",
+             "basic"
+         ],
++        "plugins": {
++            "requires": "nsPlugin"
++        },
+         "setup": [
+             "$TC qdisc add dev $DEV1 ingress"
+         ],
+@@ -120,6 +138,9 @@
+             "filter",
+             "basic"
+         ],
++        "plugins": {
++            "requires": "nsPlugin"
++        },
+         "setup": [
+             "$TC qdisc add dev $DEV1 ingress"
+         ],
+@@ -139,6 +160,9 @@
+             "filter",
+             "basic"
+         ],
++        "plugins": {
++            "requires": "nsPlugin"
++        },
+         "setup": [
+             "$TC qdisc add dev $DEV1 ingress"
+         ],
+@@ -158,6 +182,9 @@
+             "filter",
+             "basic"
+         ],
++        "plugins": {
++            "requires": "nsPlugin"
++        },
+         "setup": [
+             "$TC qdisc add dev $DEV1 ingress"
+         ],
+@@ -177,6 +204,9 @@
+             "filter",
+             "basic"
+         ],
++        "plugins": {
++            "requires": "nsPlugin"
++        },
+         "setup": [
+             "$TC qdisc add dev $DEV1 ingress"
+         ],
+@@ -196,6 +226,9 @@
+             "filter",
+             "basic"
+         ],
++        "plugins": {
++            "requires": "nsPlugin"
++        },
+         "setup": [
+             "$TC qdisc add dev $DEV1 ingress"
+         ],
+@@ -215,6 +248,9 @@
+             "filter",
+             "basic"
+         ],
++        "plugins": {
++            "requires": "nsPlugin"
++        },
+         "setup": [
+             "$TC qdisc add dev $DEV1 ingress"
+         ],
+@@ -234,6 +270,9 @@
+             "filter",
+             "basic"
+         ],
++        "plugins": {
++            "requires": "nsPlugin"
++        },
+         "setup": [
+             "$TC qdisc add dev $DEV1 ingress"
+         ],
+@@ -253,6 +292,9 @@
+             "filter",
+             "basic"
+         ],
++        "plugins": {
++            "requires": "nsPlugin"
++        },
+         "setup": [
+             "$TC qdisc add dev $DEV1 ingress"
+         ],
+@@ -272,6 +314,9 @@
+             "filter",
+             "basic"
+         ],
++        "plugins": {
++            "requires": "nsPlugin"
++        },
+         "setup": [
+             "$TC qdisc add dev $DEV1 ingress"
+         ],
+@@ -291,6 +336,9 @@
+             "filter",
+             "basic"
+         ],
++        "plugins": {
++            "requires": "nsPlugin"
++        },
+         "setup": [
+             "$TC qdisc add dev $DEV1 ingress"
+         ],
+@@ -310,6 +358,9 @@
+             "filter",
+             "basic"
+         ],
++        "plugins": {
++            "requires": "nsPlugin"
++        },
+         "setup": [
+             "$TC qdisc add dev $DEV1 ingress"
+         ],
+diff --git a/tools/usb/usbip/src/usbip_network.c b/tools/usb/usbip/src/usbip_network.c
+index d595d72693fb..ed4dc8c14269 100644
+--- a/tools/usb/usbip/src/usbip_network.c
++++ b/tools/usb/usbip/src/usbip_network.c
+@@ -50,39 +50,39 @@ void usbip_setup_port_number(char *arg)
+ 	info("using port %d (\"%s\")", usbip_port, usbip_port_string);
+ }
+ 
+-void usbip_net_pack_uint32_t(int pack, uint32_t *num)
++uint32_t usbip_net_pack_uint32_t(int pack, uint32_t num)
+ {
+ 	uint32_t i;
+ 
+ 	if (pack)
+-		i = htonl(*num);
++		i = htonl(num);
+ 	else
+-		i = ntohl(*num);
++		i = ntohl(num);
+ 
+-	*num = i;
++	return i;
+ }
+ 
+-void usbip_net_pack_uint16_t(int pack, uint16_t *num)
++uint16_t usbip_net_pack_uint16_t(int pack, uint16_t num)
+ {
+ 	uint16_t i;
+ 
+ 	if (pack)
+-		i = htons(*num);
++		i = htons(num);
+ 	else
+-		i = ntohs(*num);
++		i = ntohs(num);
+ 
+-	*num = i;
++	return i;
+ }
+ 
+ void usbip_net_pack_usb_device(int pack, struct usbip_usb_device *udev)
+ {
+-	usbip_net_pack_uint32_t(pack, &udev->busnum);
+-	usbip_net_pack_uint32_t(pack, &udev->devnum);
+-	usbip_net_pack_uint32_t(pack, &udev->speed);
++	udev->busnum = usbip_net_pack_uint32_t(pack, udev->busnum);
++	udev->devnum = usbip_net_pack_uint32_t(pack, udev->devnum);
++	udev->speed = usbip_net_pack_uint32_t(pack, udev->speed);
+ 
+-	usbip_net_pack_uint16_t(pack, &udev->idVendor);
+-	usbip_net_pack_uint16_t(pack, &udev->idProduct);
+-	usbip_net_pack_uint16_t(pack, &udev->bcdDevice);
++	udev->idVendor = usbip_net_pack_uint16_t(pack, udev->idVendor);
++	udev->idProduct = usbip_net_pack_uint16_t(pack, udev->idProduct);
++	udev->bcdDevice = usbip_net_pack_uint16_t(pack, udev->bcdDevice);
+ }
+ 
+ void usbip_net_pack_usb_interface(int pack __attribute__((unused)),
+@@ -129,6 +129,14 @@ ssize_t usbip_net_send(int sockfd, void *buff, size_t bufflen)
+ 	return usbip_net_xmit(sockfd, buff, bufflen, 1);
+ }
+ 
++static inline void usbip_net_pack_op_common(int pack,
++					    struct op_common *op_common)
++{
++	op_common->version = usbip_net_pack_uint16_t(pack, op_common->version);
++	op_common->code = usbip_net_pack_uint16_t(pack, op_common->code);
++	op_common->status = usbip_net_pack_uint32_t(pack, op_common->status);
++}
++
+ int usbip_net_send_op_common(int sockfd, uint32_t code, uint32_t status)
+ {
+ 	struct op_common op_common;
+@@ -140,7 +148,7 @@ int usbip_net_send_op_common(int sockfd, uint32_t code, uint32_t status)
+ 	op_common.code    = code;
+ 	op_common.status  = status;
+ 
+-	PACK_OP_COMMON(1, &op_common);
++	usbip_net_pack_op_common(1, &op_common);
+ 
+ 	rc = usbip_net_send(sockfd, &op_common, sizeof(op_common));
+ 	if (rc < 0) {
+@@ -164,7 +172,7 @@ int usbip_net_recv_op_common(int sockfd, uint16_t *code, int *status)
+ 		goto err;
+ 	}
+ 
+-	PACK_OP_COMMON(0, &op_common);
++	usbip_net_pack_op_common(0, &op_common);
+ 
+ 	if (op_common.version != USBIP_VERSION) {
+ 		err("USBIP Kernel and tool version mismatch: %d %d:",
+diff --git a/tools/usb/usbip/src/usbip_network.h b/tools/usb/usbip/src/usbip_network.h
+index 555215eae43e..83b4c5344f72 100644
+--- a/tools/usb/usbip/src/usbip_network.h
++++ b/tools/usb/usbip/src/usbip_network.h
+@@ -32,12 +32,6 @@ struct op_common {
+ 
+ } __attribute__((packed));
+ 
+-#define PACK_OP_COMMON(pack, op_common)  do {\
+-	usbip_net_pack_uint16_t(pack, &(op_common)->version);\
+-	usbip_net_pack_uint16_t(pack, &(op_common)->code);\
+-	usbip_net_pack_uint32_t(pack, &(op_common)->status);\
+-} while (0)
+-
+ /* ---------------------------------------------------------------------- */
+ /* Dummy Code */
+ #define OP_UNSPEC	0x00
+@@ -163,11 +157,11 @@ struct op_devlist_reply_extra {
+ } while (0)
+ 
+ #define PACK_OP_DEVLIST_REPLY(pack, reply)  do {\
+-	usbip_net_pack_uint32_t(pack, &(reply)->ndev);\
++	(reply)->ndev = usbip_net_pack_uint32_t(pack, (reply)->ndev);\
+ } while (0)
+ 
+-void usbip_net_pack_uint32_t(int pack, uint32_t *num);
+-void usbip_net_pack_uint16_t(int pack, uint16_t *num);
++uint32_t usbip_net_pack_uint32_t(int pack, uint32_t num);
++uint16_t usbip_net_pack_uint16_t(int pack, uint16_t num);
+ void usbip_net_pack_usb_device(int pack, struct usbip_usb_device *udev);
+ void usbip_net_pack_usb_interface(int pack, struct usbip_usb_interface *uinf);
+ 


             reply	other threads:[~2020-02-24 11:11 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-02-24 11:10 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2020-04-21 11:22 [gentoo-commits] proj/linux-patches:5.5 commit in: / Mike Pagano
2020-04-17 15:47 Mike Pagano
2020-04-17 14:47 Mike Pagano
2020-04-13 12:47 Mike Pagano
2020-04-08 12:44 Mike Pagano
2020-04-04 22:59 Mike Pagano
2020-04-02 15:28 Mike Pagano
2020-04-01 13:13 Mike Pagano
2020-04-01 12:04 Mike Pagano
2020-03-25 17:57 Mike Pagano
2020-03-25 15:02 Mike Pagano
2020-03-23 16:37 Mike Pagano
2020-03-21 18:59 Mike Pagano
2020-03-19 23:22 Mike Pagano
2020-03-18 15:24 Mike Pagano
2020-03-18 14:25 Mike Pagano
2020-03-12  9:56 Mike Pagano
2020-03-05 16:27 Mike Pagano
2020-02-28 18:31 Mike Pagano
2020-02-19 23:49 Mike Pagano
2020-02-14 23:56 Mike Pagano
2020-02-11 15:37 Mike Pagano
2020-02-05 14:44 Mike Pagano
2020-02-04 18:47 Mike Pagano
2020-02-01 10:33 Mike Pagano
2020-01-29 23:03 Mike Pagano
2019-12-30 23:49 Mike Pagano

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1582542644.5bd1273749aedbf70bead7dc7523d3759c12600e.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox