public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] proj/linux-patches:5.12 commit in: /
@ 2021-06-18 12:21 Mike Pagano
  0 siblings, 0 replies; 33+ messages in thread
From: Mike Pagano @ 2021-06-18 12:21 UTC (permalink / raw
  To: gentoo-commits

commit:     b221d7caaa2b0581ee90c76956e47540959508ca
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Jun 18 12:20:31 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Jun 18 12:20:31 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b221d7ca

Update BMQ to -r1 separate compilation fix

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 ...=> 5020_BMQ-and-PDS-io-scheduler-v5.12-r1.patch | 149 ++++++++-------------
 5022_BMQ-and-PDS-compilation-fix.patch             |  33 +++++
 2 files changed, 88 insertions(+), 94 deletions(-)

diff --git a/5020_BMQ-and-PDS-io-scheduler-v5.12-r0.patch b/5020_BMQ-and-PDS-io-scheduler-v5.12-r1.patch
similarity index 99%
rename from 5020_BMQ-and-PDS-io-scheduler-v5.12-r0.patch
rename to 5020_BMQ-and-PDS-io-scheduler-v5.12-r1.patch
index 7e92738..1060af5 100644
--- a/5020_BMQ-and-PDS-io-scheduler-v5.12-r0.patch
+++ b/5020_BMQ-and-PDS-io-scheduler-v5.12-r1.patch
@@ -831,10 +831,10 @@ index 5fc9c9b70862..eb6d7d87779f 100644
  obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
 diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
 new file mode 100644
-index 000000000000..f69ed4d89395
+index 000000000000..c85e3ccf9302
 --- /dev/null
 +++ b/kernel/sched/alt_core.c
-@@ -0,0 +1,7149 @@
+@@ -0,0 +1,7138 @@
 +/*
 + *  kernel/sched/alt_core.c
 + *
@@ -889,7 +889,7 @@ index 000000000000..f69ed4d89395
 + */
 +EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
 +
-+#define ALT_SCHED_VERSION "v5.11-r3"
++#define ALT_SCHED_VERSION "v5.12-r1"
 +
 +/* rt_prio(prio) defined in include/linux/sched/rt.h */
 +#define rt_task(p)		rt_prio((p)->prio)
@@ -1934,8 +1934,6 @@ index 000000000000..f69ed4d89395
 +}
 +
 +#define SCA_CHECK		0x01
-+#define SCA_MIGRATE_DISABLE	0x02
-+#define SCA_MIGRATE_ENABLE	0x04
 +
 +#ifdef CONFIG_SMP
 +
@@ -1975,23 +1973,31 @@ index 000000000000..f69ed4d89395
 +	__set_task_cpu(p, new_cpu);
 +}
 +
-+static inline bool is_per_cpu_kthread(struct task_struct *p)
-+{
-+	return ((p->flags & PF_KTHREAD) && (1 == p->nr_cpus_allowed));
-+}
-+
 +#define MDF_FORCE_ENABLED	0x80
 +
 +static void
-+__do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags);
-+
-+static int __set_cpus_allowed_ptr(struct task_struct *p,
-+				  const struct cpumask *new_mask,
-+				  u32 flags);
++__do_set_cpus_ptr(struct task_struct *p, const struct cpumask *new_mask)
++{
++	/*
++	 * This here violates the locking rules for affinity, since we're only
++	 * supposed to change these variables while holding both rq->lock and
++	 * p->pi_lock.
++	 *
++	 * HOWEVER, it magically works, because ttwu() is the only code that
++	 * accesses these variables under p->pi_lock and only does so after
++	 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule()
++	 * before finish_task().
++	 *
++	 * XXX do further audits, this smells like something putrid.
++	 */
++	SCHED_WARN_ON(!p->on_cpu);
++	p->cpus_ptr = new_mask;
++}
 +
 +void migrate_disable(void)
 +{
 +	struct task_struct *p = current;
++	int cpu;
 +
 +	if (p->migration_disabled) {
 +		p->migration_disabled++;
@@ -1999,16 +2005,18 @@ index 000000000000..f69ed4d89395
 +	}
 +
 +	preempt_disable();
-+	this_rq()->nr_pinned++;
-+	p->migration_disabled = 1;
-+	p->migration_flags &= ~MDF_FORCE_ENABLED;
-+
-+	/*
-+	 * Violates locking rules! see comment in __do_set_cpus_allowed().
-+	 */
-+	if (p->cpus_ptr == &p->cpus_mask)
-+		__do_set_cpus_allowed(p, cpumask_of(smp_processor_id()), SCA_MIGRATE_DISABLE);
++	cpu = smp_processor_id();
++	if (cpumask_test_cpu(cpu, &p->cpus_mask)) {
++		cpu_rq(cpu)->nr_pinned++;
++		p->migration_disabled = 1;
++		p->migration_flags &= ~MDF_FORCE_ENABLED;
 +
++		/*
++		 * Violates locking rules! see comment in __do_set_cpus_ptr().
++		 */
++		if (p->cpus_ptr == &p->cpus_mask)
++			__do_set_cpus_ptr(p, cpumask_of(cpu));
++	}
 +	preempt_enable();
 +}
 +EXPORT_SYMBOL_GPL(migrate_disable);
@@ -2035,7 +2043,7 @@ index 000000000000..f69ed4d89395
 +	 */
 +	WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &p->cpus_mask));
 +	if (p->cpus_ptr != &p->cpus_mask)
-+		__do_set_cpus_allowed(p, &p->cpus_mask, SCA_MIGRATE_ENABLE);
++		__do_set_cpus_ptr(p, &p->cpus_mask);
 +	/*
 +	 * Mustn't clear migration_disabled() until cpus_ptr points back at the
 +	 * regular cpus_mask, otherwise things that race (eg.
@@ -2188,43 +2196,22 @@ index 000000000000..f69ed4d89395
 +}
 +
 +static inline void
-+set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags)
++set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
 +{
-+	if (flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) {
-+		p->cpus_ptr = new_mask;
-+		return;
-+	}
-+
 +	cpumask_copy(&p->cpus_mask, new_mask);
 +	p->nr_cpus_allowed = cpumask_weight(new_mask);
 +}
 +
 +static void
-+__do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags)
++__do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
 +{
-+	/*
-+	 * This here violates the locking rules for affinity, since we're only
-+	 * supposed to change these variables while holding both rq->lock and
-+	 * p->pi_lock.
-+	 *
-+	 * HOWEVER, it magically works, because ttwu() is the only code that
-+	 * accesses these variables under p->pi_lock and only does so after
-+	 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule()
-+	 * before finish_task().
-+	 *
-+	 * XXX do further audits, this smells like something putrid.
-+	 */
-+	if (flags & (SCA_MIGRATE_DISABLE | SCA_MIGRATE_ENABLE))
-+		SCHED_WARN_ON(!p->on_cpu);
-+	else
-+		lockdep_assert_held(&p->pi_lock);
-+
-+	set_cpus_allowed_common(p, new_mask, flags);
++	lockdep_assert_held(&p->pi_lock);
++	set_cpus_allowed_common(p, new_mask);
 +}
 +
 +void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
 +{
-+	__do_set_cpus_allowed(p, new_mask, 0);
++	__do_set_cpus_allowed(p, new_mask);
 +}
 +
 +#endif
@@ -2469,7 +2456,7 @@ index 000000000000..f69ed4d89395
 +{
 +	cpumask_t chk_mask, tmp;
 +
-+	if (unlikely(!cpumask_and(&chk_mask, p->cpus_ptr, cpu_online_mask)))
++	if (unlikely(!cpumask_and(&chk_mask, p->cpus_ptr, cpu_active_mask)))
 +		return select_fallback_rq(task_cpu(p), p);
 +
 +	if (
@@ -2583,15 +2570,15 @@ index 000000000000..f69ed4d89395
 +		goto out;
 +	}
 +
-+	__do_set_cpus_allowed(p, new_mask, flags);
++	__do_set_cpus_allowed(p, new_mask);
 +
 +	/* Can the task run on the task's current CPU? If so, we're done */
 +	if (cpumask_test_cpu(task_cpu(p), new_mask))
 +		goto out;
 +
 +	if (p->migration_disabled) {
-+		if (p->cpus_ptr != &p->cpus_mask)
-+			__do_set_cpus_allowed(p, &p->cpus_mask, SCA_MIGRATE_ENABLE);
++		if (likely(p->cpus_ptr != &p->cpus_mask))
++			__do_set_cpus_ptr(p, &p->cpus_mask);
 +		p->migration_disabled = 0;
 +		p->migration_flags |= MDF_FORCE_ENABLED;
 +		/* When p is migrate_disabled, rq->lock should be held */
@@ -4270,6 +4257,10 @@ index 000000000000..f69ed4d89395
 +	if (cpumask_empty(&sched_sg_idle_mask))
 +		return;
 +
++	/* exit when cpu is offline */
++	if (unlikely(!rq->online))
++		return;
++
 +	cpu = cpu_of(rq);
 +	/*
 +	 * Only cpu in slibing idle group will do the checking and then
@@ -4653,15 +4644,13 @@ index 000000000000..f69ed4d89395
 +
 +			if ((nr_migrated = migrate_pending_tasks(src_rq, rq, cpu))) {
 +				src_rq->nr_running -= nr_migrated;
-+#ifdef CONFIG_SMP
 +				if (src_rq->nr_running < 2)
 +					cpumask_clear_cpu(i, &sched_rq_pending_mask);
-+#endif
++
 +				rq->nr_running += nr_migrated;
-+#ifdef CONFIG_SMP
 +				if (rq->nr_running > 1)
 +					cpumask_set_cpu(cpu, &sched_rq_pending_mask);
-+#endif
++
 +				update_sched_rq_watermark(rq);
 +				cpufreq_update_util(rq, 0);
 +
@@ -6921,7 +6910,7 @@ index 000000000000..f69ed4d89395
 +	 *
 +	 * And since this is boot we can forgo the serialisation.
 +	 */
-+	set_cpus_allowed_common(idle, cpumask_of(cpu), 0);
++	set_cpus_allowed_common(idle, cpumask_of(cpu));
 +#endif
 +
 +	/* Silence PROVE_RCU */
@@ -8943,7 +8932,7 @@ index 000000000000..7c71f1141d00
 +		boost_task(p);
 +}
 diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
-index 50cbad89f7fa..fb703fd370fd 100644
+index 50cbad89f7fa..41946f19468b 100644
 --- a/kernel/sched/cpufreq_schedutil.c
 +++ b/kernel/sched/cpufreq_schedutil.c
 @@ -57,6 +57,13 @@ struct sugov_cpu {
@@ -9063,25 +9052,16 @@ index 50cbad89f7fa..fb703fd370fd 100644
  	if (ret) {
  		kthread_stop(thread);
  		pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
-@@ -835,6 +903,7 @@ struct cpufreq_governor *cpufreq_default_governor(void)
- cpufreq_governor_init(schedutil_gov);
- 
+@@ -837,7 +905,9 @@ cpufreq_governor_init(schedutil_gov);
  #ifdef CONFIG_ENERGY_MODEL
-+#ifndef CONFIG_SCHED_ALT
  static void rebuild_sd_workfn(struct work_struct *work)
  {
++#ifndef CONFIG_SCHED_ALT
  	rebuild_sched_domains_energy();
-@@ -858,4 +927,10 @@ void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
- 	}
- 
++#endif /* CONFIG_SCHED_ALT */
  }
-+#else /* CONFIG_SCHED_ALT */
-+void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
-+				  struct cpufreq_governor *old_gov)
-+{
-+}
-+#endif
- #endif
+ static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
+ 
 diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
 index 5f611658eeab..631276f56ba0 100644
 --- a/kernel/sched/cputime.c
@@ -9802,23 +9782,4 @@ index 73ef12092250..24bf8ef1249a 100644
 +#endif
  	};
  	struct wakeup_test_data *x = data;
-diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
-index bc722a476..26a33f76b 100644
---- a/kernel/sched/pelt.h
-+++ b/kernel/sched/pelt.h
-@@ -44,6 +44,7 @@ static inline u32 get_pelt_divider(struct sched_avg *avg)
- 	return LOAD_AVG_MAX - 1024 + avg->period_contrib;
- }
  
-+#ifndef CONFIG_SCHED_ALT
- static inline void cfs_se_util_change(struct sched_avg *avg)
- {
- 	unsigned int enqueued;
-@@ -61,7 +62,6 @@ static inline void cfs_se_util_change(struct sched_avg *avg)
- 	WRITE_ONCE(avg->util_est.enqueued, enqueued);
- }
- 
--#ifndef CONFIG_SCHED_ALT
- /*
-  * The clock_pelt scales the time to reflect the effective amount of
-  * computation done during the running delta time but then sync back to

diff --git a/5022_BMQ-and-PDS-compilation-fix.patch b/5022_BMQ-and-PDS-compilation-fix.patch
new file mode 100644
index 0000000..f59ed5c
--- /dev/null
+++ b/5022_BMQ-and-PDS-compilation-fix.patch
@@ -0,0 +1,33 @@
+From b2dc217bab541a5e737b52137f1bcce0b1cc2ed5 Mon Sep 17 00:00:00 2001
+From: Piotr Gorski <lucjan.lucjanov@gmail.com>
+Date: Mon, 14 Jun 2021 15:46:03 +0200
+Subject: [PATCH] prjc: fix compilation error
+
+Signed-off-by: Piotr Gorski <lucjan.lucjanov@gmail.com>
+---
+ kernel/sched/pelt.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
+index bc722a476..26a33f76b 100644
+--- a/kernel/sched/pelt.h
++++ b/kernel/sched/pelt.h
+@@ -44,6 +44,7 @@ static inline u32 get_pelt_divider(struct sched_avg *avg)
+ 	return LOAD_AVG_MAX - 1024 + avg->period_contrib;
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ static inline void cfs_se_util_change(struct sched_avg *avg)
+ {
+ 	unsigned int enqueued;
+@@ -61,7 +62,6 @@ static inline void cfs_se_util_change(struct sched_avg *avg)
+ 	WRITE_ONCE(avg->util_est.enqueued, enqueued);
+ }
+ 
+-#ifndef CONFIG_SCHED_ALT
+ /*
+  * The clock_pelt scales the time to reflect the effective amount of
+  * computation done during the running delta time but then sync back to
+-- 
+2.32.0
+


^ permalink raw reply related	[flat|nested] 33+ messages in thread
* [gentoo-commits] proj/linux-patches:5.12 commit in: /
@ 2021-07-20 15:49 Alice Ferrazzi
  0 siblings, 0 replies; 33+ messages in thread
From: Alice Ferrazzi @ 2021-07-20 15:49 UTC (permalink / raw
  To: gentoo-commits

commit:     0e6a293a7cf6663592ceaee768cc4653c7bbd115
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Tue Jul 20 15:47:55 2021 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Tue Jul 20 15:48:19 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0e6a293a

Linux patch 5.12.19

Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>

 0000_README              |     4 +
 1018_linux-5.12.19.patch | 15066 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 15070 insertions(+)

diff --git a/0000_README b/0000_README
index 15e30b2..78578c4 100644
--- a/0000_README
+++ b/0000_README
@@ -115,6 +115,10 @@ Patch:  1017_linux-5.12.18.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.12.18
 
+Patch:  1018_linux-5.12.19.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.12.19
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1018_linux-5.12.19.patch b/1018_linux-5.12.19.patch
new file mode 100644
index 0000000..8be1164
--- /dev/null
+++ b/1018_linux-5.12.19.patch
@@ -0,0 +1,15066 @@
+diff --git a/Documentation/devicetree/bindings/i2c/i2c-at91.txt b/Documentation/devicetree/bindings/i2c/i2c-at91.txt
+index 96c914e048f59..2015f50aed0f9 100644
+--- a/Documentation/devicetree/bindings/i2c/i2c-at91.txt
++++ b/Documentation/devicetree/bindings/i2c/i2c-at91.txt
+@@ -73,7 +73,7 @@ i2c0: i2c@f8034600 {
+ 	pinctrl-0 = <&pinctrl_i2c0>;
+ 	pinctrl-1 = <&pinctrl_i2c0_gpio>;
+ 	sda-gpios = <&pioA 30 GPIO_ACTIVE_HIGH>;
+-	scl-gpios = <&pioA 31 GPIO_ACTIVE_HIGH>;
++	scl-gpios = <&pioA 31 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ 
+ 	wm8731: wm8731@1a {
+ 		compatible = "wm8731";
+diff --git a/Documentation/filesystems/f2fs.rst b/Documentation/filesystems/f2fs.rst
+index 35ed01a5fbc9d..bb2261431fbbf 100644
+--- a/Documentation/filesystems/f2fs.rst
++++ b/Documentation/filesystems/f2fs.rst
+@@ -711,10 +711,10 @@ users.
+ ===================== ======================== ===================
+ User                  F2FS                     Block
+ ===================== ======================== ===================
+-                      META                     WRITE_LIFE_NOT_SET
+-                      HOT_NODE                 "
+-                      WARM_NODE                "
+-                      COLD_NODE                "
++N/A                   META                     WRITE_LIFE_NOT_SET
++N/A                   HOT_NODE                 "
++N/A                   WARM_NODE                "
++N/A                   COLD_NODE                "
+ ioctl(COLD)           COLD_DATA                WRITE_LIFE_EXTREME
+ extension list        "                        "
+ 
+@@ -740,10 +740,10 @@ WRITE_LIFE_LONG       "                        WRITE_LIFE_LONG
+ ===================== ======================== ===================
+ User                  F2FS                     Block
+ ===================== ======================== ===================
+-                      META                     WRITE_LIFE_MEDIUM;
+-                      HOT_NODE                 WRITE_LIFE_NOT_SET
+-                      WARM_NODE                "
+-                      COLD_NODE                WRITE_LIFE_NONE
++N/A                   META                     WRITE_LIFE_MEDIUM;
++N/A                   HOT_NODE                 WRITE_LIFE_NOT_SET
++N/A                   WARM_NODE                "
++N/A                   COLD_NODE                WRITE_LIFE_NONE
+ ioctl(COLD)           COLD_DATA                WRITE_LIFE_EXTREME
+ extension list        "                        "
+ 
+diff --git a/Makefile b/Makefile
+index b708d7c665e12..9658bc0ba389b 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 12
+-SUBLEVEL = 18
++SUBLEVEL = 19
+ EXTRAVERSION =
+ NAME = Frozen Wasteland
+ 
+diff --git a/arch/arm/boot/dts/am335x-cm-t335.dts b/arch/arm/boot/dts/am335x-cm-t335.dts
+index 36d963db40266..ec4730c82d398 100644
+--- a/arch/arm/boot/dts/am335x-cm-t335.dts
++++ b/arch/arm/boot/dts/am335x-cm-t335.dts
+@@ -496,7 +496,7 @@ status = "okay";
+ 	status = "okay";
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&spi0_pins>;
+-	ti,pindir-d0-out-d1-in = <1>;
++	ti,pindir-d0-out-d1-in;
+ 	/* WLS1271 WiFi */
+ 	wlcore: wlcore@1 {
+ 		compatible = "ti,wl1271";
+diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts
+index f517d1e843cf4..8b696107eef8c 100644
+--- a/arch/arm/boot/dts/am43x-epos-evm.dts
++++ b/arch/arm/boot/dts/am43x-epos-evm.dts
+@@ -860,7 +860,7 @@
+ 	pinctrl-names = "default", "sleep";
+ 	pinctrl-0 = <&spi0_pins_default>;
+ 	pinctrl-1 = <&spi0_pins_sleep>;
+-	ti,pindir-d0-out-d1-in = <1>;
++	ti,pindir-d0-out-d1-in;
+ };
+ 
+ &spi1 {
+@@ -868,7 +868,7 @@
+ 	pinctrl-names = "default", "sleep";
+ 	pinctrl-0 = <&spi1_pins_default>;
+ 	pinctrl-1 = <&spi1_pins_sleep>;
+-	ti,pindir-d0-out-d1-in = <1>;
++	ti,pindir-d0-out-d1-in;
+ };
+ 
+ &usb2_phy1 {
+diff --git a/arch/arm/boot/dts/am5718.dtsi b/arch/arm/boot/dts/am5718.dtsi
+index ebf4d3cc1cfbe..6d7530a48c73f 100644
+--- a/arch/arm/boot/dts/am5718.dtsi
++++ b/arch/arm/boot/dts/am5718.dtsi
+@@ -17,17 +17,13 @@
+  * VCP1, VCP2
+  * MLB
+  * ISS
+- * USB3, USB4
++ * USB3
+  */
+ 
+ &usb3_tm {
+ 	status = "disabled";
+ };
+ 
+-&usb4_tm {
+-	status = "disabled";
+-};
+-
+ &atl_tm {
+ 	status = "disabled";
+ };
+diff --git a/arch/arm/boot/dts/bcm283x-rpi-usb-otg.dtsi b/arch/arm/boot/dts/bcm283x-rpi-usb-otg.dtsi
+index 20322de2f8bf1..e2fd9610e1252 100644
+--- a/arch/arm/boot/dts/bcm283x-rpi-usb-otg.dtsi
++++ b/arch/arm/boot/dts/bcm283x-rpi-usb-otg.dtsi
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0
+ &usb {
+ 	dr_mode = "otg";
+-	g-rx-fifo-size = <558>;
++	g-rx-fifo-size = <256>;
+ 	g-np-tx-fifo-size = <32>;
+ 	/*
+ 	 * According to dwc2 the sum of all device EP
+diff --git a/arch/arm/boot/dts/bcm283x-rpi-usb-peripheral.dtsi b/arch/arm/boot/dts/bcm283x-rpi-usb-peripheral.dtsi
+index 1409d1b559c18..0ff0e9e253272 100644
+--- a/arch/arm/boot/dts/bcm283x-rpi-usb-peripheral.dtsi
++++ b/arch/arm/boot/dts/bcm283x-rpi-usb-peripheral.dtsi
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0
+ &usb {
+ 	dr_mode = "peripheral";
+-	g-rx-fifo-size = <558>;
++	g-rx-fifo-size = <256>;
+ 	g-np-tx-fifo-size = <32>;
+ 	g-tx-fifo-size = <256 256 512 512 512 768 768>;
+ };
+diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi
+index 7db72a2f1020d..86872e12c355e 100644
+--- a/arch/arm/boot/dts/bcm5301x.dtsi
++++ b/arch/arm/boot/dts/bcm5301x.dtsi
+@@ -520,27 +520,27 @@
+ 		      <0x1811b408 0x004>,
+ 		      <0x180293a0 0x01c>;
+ 		reg-names = "mspi", "bspi", "intr_regs", "intr_status_reg";
+-		interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>,
++		interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>,
++			     <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>,
++			     <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>,
+ 			     <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>,
+ 			     <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>,
+ 			     <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>,
+-			     <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>,
+-			     <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>,
+-			     <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
+-		interrupt-names = "spi_lr_fullness_reached",
++			     <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
++		interrupt-names = "mspi_done",
++				  "mspi_halted",
++				  "spi_lr_fullness_reached",
+ 				  "spi_lr_session_aborted",
+ 				  "spi_lr_impatient",
+ 				  "spi_lr_session_done",
+-				  "spi_lr_overhead",
+-				  "mspi_done",
+-				  "mspi_halted";
++				  "spi_lr_overread";
+ 		clocks = <&iprocmed>;
+ 		clock-names = "iprocmed";
+ 		num-cs = <2>;
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 
+-		spi_nor: spi-nor@0 {
++		spi_nor: flash@0 {
+ 			compatible = "jedec,spi-nor";
+ 			reg = <0>;
+ 			spi-max-frequency = <20000000>;
+diff --git a/arch/arm/boot/dts/dra7-l4.dtsi b/arch/arm/boot/dts/dra7-l4.dtsi
+index a294a02f2d232..1dafce92fc767 100644
+--- a/arch/arm/boot/dts/dra7-l4.dtsi
++++ b/arch/arm/boot/dts/dra7-l4.dtsi
+@@ -4095,28 +4095,6 @@
+ 			};
+ 		};
+ 
+-		usb4_tm: target-module@140000 {		/* 0x48940000, ap 75 3c.0 */
+-			compatible = "ti,sysc-omap4", "ti,sysc";
+-			reg = <0x140000 0x4>,
+-			      <0x140010 0x4>;
+-			reg-names = "rev", "sysc";
+-			ti,sysc-mask = <SYSC_OMAP4_DMADISABLE>;
+-			ti,sysc-midle = <SYSC_IDLE_FORCE>,
+-					<SYSC_IDLE_NO>,
+-					<SYSC_IDLE_SMART>,
+-					<SYSC_IDLE_SMART_WKUP>;
+-			ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+-					<SYSC_IDLE_NO>,
+-					<SYSC_IDLE_SMART>,
+-					<SYSC_IDLE_SMART_WKUP>;
+-			/* Domains (P, C): l3init_pwrdm, l3init_clkdm */
+-			clocks = <&l3init_clkctrl DRA7_L3INIT_USB_OTG_SS4_CLKCTRL 0>;
+-			clock-names = "fck";
+-			#address-cells = <1>;
+-			#size-cells = <1>;
+-			ranges = <0x0 0x140000 0x20000>;
+-		};
+-
+ 		target-module@170000 {			/* 0x48970000, ap 21 0a.0 */
+ 			compatible = "ti,sysc-omap4", "ti,sysc";
+ 			reg = <0x170010 0x4>;
+diff --git a/arch/arm/boot/dts/dra71x.dtsi b/arch/arm/boot/dts/dra71x.dtsi
+index cad0e4a2bd8df..9c270d8f75d5b 100644
+--- a/arch/arm/boot/dts/dra71x.dtsi
++++ b/arch/arm/boot/dts/dra71x.dtsi
+@@ -11,7 +11,3 @@
+ &rtctarget {
+ 	status = "disabled";
+ };
+-
+-&usb4_tm {
+-	status = "disabled";
+-};
+diff --git a/arch/arm/boot/dts/dra72x.dtsi b/arch/arm/boot/dts/dra72x.dtsi
+index d403acc754b68..f3e934ef7d3e2 100644
+--- a/arch/arm/boot/dts/dra72x.dtsi
++++ b/arch/arm/boot/dts/dra72x.dtsi
+@@ -108,7 +108,3 @@
+ &pcie2_rc {
+ 	compatible = "ti,dra726-pcie-rc", "ti,dra7-pcie";
+ };
+-
+-&usb4_tm {
+-	status = "disabled";
+-};
+diff --git a/arch/arm/boot/dts/dra74x.dtsi b/arch/arm/boot/dts/dra74x.dtsi
+index e1850d6c841a7..b4e07d99ffde1 100644
+--- a/arch/arm/boot/dts/dra74x.dtsi
++++ b/arch/arm/boot/dts/dra74x.dtsi
+@@ -49,49 +49,6 @@
+ 			reg = <0x41500000 0x100>;
+ 		};
+ 
+-		target-module@48940000 {
+-			compatible = "ti,sysc-omap4", "ti,sysc";
+-			reg = <0x48940000 0x4>,
+-			      <0x48940010 0x4>;
+-			reg-names = "rev", "sysc";
+-			ti,sysc-mask = <SYSC_OMAP4_DMADISABLE>;
+-			ti,sysc-midle = <SYSC_IDLE_FORCE>,
+-					<SYSC_IDLE_NO>,
+-					<SYSC_IDLE_SMART>,
+-					<SYSC_IDLE_SMART_WKUP>;
+-			ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+-					<SYSC_IDLE_NO>,
+-					<SYSC_IDLE_SMART>,
+-					<SYSC_IDLE_SMART_WKUP>;
+-			clocks = <&l3init_clkctrl DRA7_L3INIT_USB_OTG_SS4_CLKCTRL 0>;
+-			clock-names = "fck";
+-			#address-cells = <1>;
+-			#size-cells = <1>;
+-			ranges = <0x0 0x48940000 0x20000>;
+-
+-			omap_dwc3_4: omap_dwc3_4@0 {
+-				compatible = "ti,dwc3";
+-				reg = <0 0x10000>;
+-				interrupts = <GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH>;
+-				#address-cells = <1>;
+-				#size-cells = <1>;
+-				utmi-mode = <2>;
+-				ranges;
+-				status = "disabled";
+-				usb4: usb@10000 {
+-					compatible = "snps,dwc3";
+-					reg = <0x10000 0x17000>;
+-					interrupts = <GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>,
+-						     <GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>,
+-						     <GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH>;
+-					interrupt-names = "peripheral",
+-							  "host",
+-							  "otg";
+-					maximum-speed = "high-speed";
+-					dr_mode = "otg";
+-				};
+-			};
+-		};
+ 
+ 		target-module@41501000 {
+ 			compatible = "ti,sysc-omap2", "ti,sysc";
+@@ -224,3 +181,52 @@
+ &pcie2_rc {
+ 	compatible = "ti,dra746-pcie-rc", "ti,dra7-pcie";
+ };
++
++&l4_per3 {
++	segment@0 {
++		usb4_tm: target-module@140000 {         /* 0x48940000, ap 75 3c.0 */
++			compatible = "ti,sysc-omap4", "ti,sysc";
++			reg = <0x140000 0x4>,
++			      <0x140010 0x4>;
++			reg-names = "rev", "sysc";
++			ti,sysc-mask = <SYSC_OMAP4_DMADISABLE>;
++			ti,sysc-midle = <SYSC_IDLE_FORCE>,
++					<SYSC_IDLE_NO>,
++					<SYSC_IDLE_SMART>,
++					<SYSC_IDLE_SMART_WKUP>;
++			ti,sysc-sidle = <SYSC_IDLE_FORCE>,
++					<SYSC_IDLE_NO>,
++					<SYSC_IDLE_SMART>,
++					<SYSC_IDLE_SMART_WKUP>;
++			/* Domains (P, C): l3init_pwrdm, l3init_clkdm */
++			clocks = <&l3init_clkctrl DRA7_L3INIT_USB_OTG_SS4_CLKCTRL 0>;
++			clock-names = "fck";
++			#address-cells = <1>;
++			#size-cells = <1>;
++			ranges = <0x0 0x140000 0x20000>;
++
++			omap_dwc3_4: omap_dwc3_4@0 {
++				compatible = "ti,dwc3";
++				reg = <0 0x10000>;
++				interrupts = <GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH>;
++				#address-cells = <1>;
++				#size-cells = <1>;
++				utmi-mode = <2>;
++				ranges;
++				status = "disabled";
++				usb4: usb@10000 {
++					compatible = "snps,dwc3";
++					reg = <0x10000 0x17000>;
++					interrupts = <GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>,
++						     <GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>,
++						     <GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH>;
++					interrupt-names = "peripheral",
++							  "host",
++							  "otg";
++					maximum-speed = "high-speed";
++					dr_mode = "otg";
++				};
++			};
++		};
++	};
++};
+diff --git a/arch/arm/boot/dts/exynos5422-odroidhc1.dts b/arch/arm/boot/dts/exynos5422-odroidhc1.dts
+index 20c222b33f98e..d91f7fa2cf808 100644
+--- a/arch/arm/boot/dts/exynos5422-odroidhc1.dts
++++ b/arch/arm/boot/dts/exynos5422-odroidhc1.dts
+@@ -22,7 +22,7 @@
+ 			label = "blue:heartbeat";
+ 			pwms = <&pwm 2 2000000 0>;
+ 			pwm-names = "pwm2";
+-			max_brightness = <255>;
++			max-brightness = <255>;
+ 			linux,default-trigger = "heartbeat";
+ 		};
+ 	};
+diff --git a/arch/arm/boot/dts/exynos5422-odroidxu4.dts b/arch/arm/boot/dts/exynos5422-odroidxu4.dts
+index ede7822576436..1c24f9b35973f 100644
+--- a/arch/arm/boot/dts/exynos5422-odroidxu4.dts
++++ b/arch/arm/boot/dts/exynos5422-odroidxu4.dts
+@@ -24,7 +24,7 @@
+ 			label = "blue:heartbeat";
+ 			pwms = <&pwm 2 2000000 0>;
+ 			pwm-names = "pwm2";
+-			max_brightness = <255>;
++			max-brightness = <255>;
+ 			linux,default-trigger = "heartbeat";
+ 		};
+ 	};
+diff --git a/arch/arm/boot/dts/exynos54xx-odroidxu-leds.dtsi b/arch/arm/boot/dts/exynos54xx-odroidxu-leds.dtsi
+index 2fc3e86dc5f70..982752e1df24a 100644
+--- a/arch/arm/boot/dts/exynos54xx-odroidxu-leds.dtsi
++++ b/arch/arm/boot/dts/exynos54xx-odroidxu-leds.dtsi
+@@ -22,7 +22,7 @@
+ 			 * Green LED is much brighter than the others
+ 			 * so limit its max brightness
+ 			 */
+-			max_brightness = <127>;
++			max-brightness = <127>;
+ 			linux,default-trigger = "mmc0";
+ 		};
+ 
+@@ -30,7 +30,7 @@
+ 			label = "blue:heartbeat";
+ 			pwms = <&pwm 2 2000000 0>;
+ 			pwm-names = "pwm2";
+-			max_brightness = <255>;
++			max-brightness = <255>;
+ 			linux,default-trigger = "heartbeat";
+ 		};
+ 	};
+diff --git a/arch/arm/boot/dts/gemini-rut1xx.dts b/arch/arm/boot/dts/gemini-rut1xx.dts
+index 9611ddf067927..08091d2a64e15 100644
+--- a/arch/arm/boot/dts/gemini-rut1xx.dts
++++ b/arch/arm/boot/dts/gemini-rut1xx.dts
+@@ -125,18 +125,6 @@
+ 			};
+ 		};
+ 
+-		ethernet@60000000 {
+-			status = "okay";
+-
+-			ethernet-port@0 {
+-				phy-mode = "rgmii";
+-				phy-handle = <&phy0>;
+-			};
+-			ethernet-port@1 {
+-				/* Not used in this platform */
+-			};
+-		};
+-
+ 		usb@68000000 {
+ 			status = "okay";
+ 		};
+diff --git a/arch/arm/boot/dts/imx6q-dhcom-som.dtsi b/arch/arm/boot/dts/imx6q-dhcom-som.dtsi
+index d0768ae429faa..e3de2b487cf49 100644
+--- a/arch/arm/boot/dts/imx6q-dhcom-som.dtsi
++++ b/arch/arm/boot/dts/imx6q-dhcom-som.dtsi
+@@ -96,30 +96,40 @@
+ 			reg = <0>;
+ 			max-speed = <100>;
+ 			reset-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
+-			reset-delay-us = <1000>;
+-			reset-post-delay-us = <1000>;
++			reset-assert-us = <1000>;
++			reset-deassert-us = <1000>;
++			smsc,disable-energy-detect; /* Make plugin detection reliable */
+ 		};
+ 	};
+ };
+ 
+ &i2c1 {
+ 	clock-frequency = <100000>;
+-	pinctrl-names = "default";
++	pinctrl-names = "default", "gpio";
+ 	pinctrl-0 = <&pinctrl_i2c1>;
++	pinctrl-1 = <&pinctrl_i2c1_gpio>;
++	scl-gpios = <&gpio3 21 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
++	sda-gpios = <&gpio3 28 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ 	status = "okay";
+ };
+ 
+ &i2c2 {
+ 	clock-frequency = <100000>;
+-	pinctrl-names = "default";
++	pinctrl-names = "default", "gpio";
+ 	pinctrl-0 = <&pinctrl_i2c2>;
++	pinctrl-1 = <&pinctrl_i2c2_gpio>;
++	scl-gpios = <&gpio4 12 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
++	sda-gpios = <&gpio4 13 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ 	status = "okay";
+ };
+ 
+ &i2c3 {
+ 	clock-frequency = <100000>;
+-	pinctrl-names = "default";
++	pinctrl-names = "default", "gpio";
+ 	pinctrl-0 = <&pinctrl_i2c3>;
++	pinctrl-1 = <&pinctrl_i2c3_gpio>;
++	scl-gpios = <&gpio1 3 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
++	sda-gpios = <&gpio1 6 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ 	status = "okay";
+ 
+ 	ltc3676: pmic@3c {
+@@ -285,6 +295,13 @@
+ 		>;
+ 	};
+ 
++	pinctrl_i2c1_gpio: i2c1-gpio-grp {
++		fsl,pins = <
++			MX6QDL_PAD_EIM_D21__GPIO3_IO21		0x4001b8b1
++			MX6QDL_PAD_EIM_D28__GPIO3_IO28		0x4001b8b1
++		>;
++	};
++
+ 	pinctrl_i2c2: i2c2-grp {
+ 		fsl,pins = <
+ 			MX6QDL_PAD_KEY_COL3__I2C2_SCL		0x4001b8b1
+@@ -292,6 +309,13 @@
+ 		>;
+ 	};
+ 
++	pinctrl_i2c2_gpio: i2c2-gpio-grp {
++		fsl,pins = <
++			MX6QDL_PAD_KEY_COL3__GPIO4_IO12		0x4001b8b1
++			MX6QDL_PAD_KEY_ROW3__GPIO4_IO13		0x4001b8b1
++		>;
++	};
++
+ 	pinctrl_i2c3: i2c3-grp {
+ 		fsl,pins = <
+ 			MX6QDL_PAD_GPIO_3__I2C3_SCL		0x4001b8b1
+@@ -299,6 +323,13 @@
+ 		>;
+ 	};
+ 
++	pinctrl_i2c3_gpio: i2c3-gpio-grp {
++		fsl,pins = <
++			MX6QDL_PAD_GPIO_3__GPIO1_IO03		0x4001b8b1
++			MX6QDL_PAD_GPIO_6__GPIO1_IO06		0x4001b8b1
++		>;
++	};
++
+ 	pinctrl_pmic_hw300: pmic-hw300-grp {
+ 		fsl,pins = <
+ 			MX6QDL_PAD_EIM_A25__GPIO5_IO02		0x1B0B0
+diff --git a/arch/arm/boot/dts/r8a7779-marzen.dts b/arch/arm/boot/dts/r8a7779-marzen.dts
+index d2240b89ee529..4658453234959 100644
+--- a/arch/arm/boot/dts/r8a7779-marzen.dts
++++ b/arch/arm/boot/dts/r8a7779-marzen.dts
+@@ -145,7 +145,7 @@
+ 	status = "okay";
+ 
+ 	clocks = <&mstp1_clks R8A7779_CLK_DU>, <&x3_clk>;
+-	clock-names = "du", "dclkin.0";
++	clock-names = "du.0", "dclkin.0";
+ 
+ 	ports {
+ 		port@0 {
+diff --git a/arch/arm/boot/dts/r8a7779.dtsi b/arch/arm/boot/dts/r8a7779.dtsi
+index 74d7e9084eabe..3c5fcdfe16b87 100644
+--- a/arch/arm/boot/dts/r8a7779.dtsi
++++ b/arch/arm/boot/dts/r8a7779.dtsi
+@@ -463,6 +463,7 @@
+ 		reg = <0xfff80000 0x40000>;
+ 		interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
+ 		clocks = <&mstp1_clks R8A7779_CLK_DU>;
++		clock-names = "du.0";
+ 		power-domains = <&sysc R8A7779_PD_ALWAYS_ON>;
+ 		status = "disabled";
+ 
+diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
+index 2617815e42a64..30e4d990c5a38 100644
+--- a/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
++++ b/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
+@@ -119,7 +119,6 @@
+ 	max-speed = <100>;
+ 	phy-handle = <&phy0>;
+ 	st,eth-ref-clk-sel;
+-	phy-reset-gpios = <&gpioh 3 GPIO_ACTIVE_LOW>;
+ 
+ 	mdio0 {
+ 		#address-cells = <1>;
+@@ -128,6 +127,13 @@
+ 
+ 		phy0: ethernet-phy@1 {
+ 			reg = <1>;
++			/* LAN8710Ai */
++			compatible = "ethernet-phy-id0007.c0f0",
++				     "ethernet-phy-ieee802.3-c22";
++			clocks = <&rcc ETHCK_K>;
++			reset-gpios = <&gpioh 3 GPIO_ACTIVE_LOW>;
++			reset-assert-us = <500>;
++			reset-deassert-us = <500>;
+ 			interrupt-parent = <&gpioi>;
+ 			interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
+ 		};
+diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-plus.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-plus.dts
+index 97f497854e05d..d05fa679dcd30 100644
+--- a/arch/arm/boot/dts/sun8i-h3-orangepi-plus.dts
++++ b/arch/arm/boot/dts/sun8i-h3-orangepi-plus.dts
+@@ -85,7 +85,7 @@
+ 	pinctrl-0 = <&emac_rgmii_pins>;
+ 	phy-supply = <&reg_gmac_3v3>;
+ 	phy-handle = <&ext_rgmii_phy>;
+-	phy-mode = "rgmii";
++	phy-mode = "rgmii-id";
+ 
+ 	status = "okay";
+ };
+diff --git a/arch/arm/mach-exynos/exynos.c b/arch/arm/mach-exynos/exynos.c
+index 25b01da4771b3..8b48326be9fd5 100644
+--- a/arch/arm/mach-exynos/exynos.c
++++ b/arch/arm/mach-exynos/exynos.c
+@@ -55,6 +55,7 @@ void __init exynos_sysram_init(void)
+ 		sysram_base_addr = of_iomap(node, 0);
+ 		sysram_base_phys = of_translate_address(node,
+ 					   of_get_address(node, 0, NULL, NULL));
++		of_node_put(node);
+ 		break;
+ 	}
+ 
+@@ -62,6 +63,7 @@ void __init exynos_sysram_init(void)
+ 		if (!of_device_is_available(node))
+ 			continue;
+ 		sysram_ns_base_addr = of_iomap(node, 0);
++		of_node_put(node);
+ 		break;
+ 	}
+ }
+diff --git a/arch/arm/probes/kprobes/test-thumb.c b/arch/arm/probes/kprobes/test-thumb.c
+index 456c181a7bfe8..4e11f0b760f89 100644
+--- a/arch/arm/probes/kprobes/test-thumb.c
++++ b/arch/arm/probes/kprobes/test-thumb.c
+@@ -441,21 +441,21 @@ void kprobe_thumb32_test_cases(void)
+ 		"3:	mvn	r0, r0	\n\t"
+ 		"2:	nop		\n\t")
+ 
+-	TEST_RX("tbh	[pc, r",7, (9f-(1f+4))>>1,"]",
++	TEST_RX("tbh	[pc, r",7, (9f-(1f+4))>>1,", lsl #1]",
+ 		"9:			\n\t"
+ 		".short	(2f-1b-4)>>1	\n\t"
+ 		".short	(3f-1b-4)>>1	\n\t"
+ 		"3:	mvn	r0, r0	\n\t"
+ 		"2:	nop		\n\t")
+ 
+-	TEST_RX("tbh	[pc, r",12, ((9f-(1f+4))>>1)+1,"]",
++	TEST_RX("tbh	[pc, r",12, ((9f-(1f+4))>>1)+1,", lsl #1]",
+ 		"9:			\n\t"
+ 		".short	(2f-1b-4)>>1	\n\t"
+ 		".short	(3f-1b-4)>>1	\n\t"
+ 		"3:	mvn	r0, r0	\n\t"
+ 		"2:	nop		\n\t")
+ 
+-	TEST_RRX("tbh	[r",1,9f, ", r",14,1,"]",
++	TEST_RRX("tbh	[r",1,9f, ", r",14,1,", lsl #1]",
+ 		"9:			\n\t"
+ 		".short	(2f-1b-4)>>1	\n\t"
+ 		".short	(3f-1b-4)>>1	\n\t"
+@@ -468,10 +468,10 @@ void kprobe_thumb32_test_cases(void)
+ 
+ 	TEST_UNSUPPORTED("strexb	r0, r1, [r2]")
+ 	TEST_UNSUPPORTED("strexh	r0, r1, [r2]")
+-	TEST_UNSUPPORTED("strexd	r0, r1, [r2]")
++	TEST_UNSUPPORTED("strexd	r0, r1, r2, [r2]")
+ 	TEST_UNSUPPORTED("ldrexb	r0, [r1]")
+ 	TEST_UNSUPPORTED("ldrexh	r0, [r1]")
+-	TEST_UNSUPPORTED("ldrexd	r0, [r1]")
++	TEST_UNSUPPORTED("ldrexd	r0, r1, [r1]")
+ 
+ 	TEST_GROUP("Data-processing (shifted register) and (modified immediate)")
+ 
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts
+index e22b94c83647f..5e66ce1a334fb 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts
++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts
+@@ -79,7 +79,7 @@
+ &emac {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&rgmii_pins>;
+-	phy-mode = "rgmii-id";
++	phy-mode = "rgmii-txid";
+ 	phy-handle = <&ext_rgmii_phy>;
+ 	phy-supply = <&reg_dc1sw>;
+ 	status = "okay";
+diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
+index b8f7cf5cbdab1..96914a307ba1d 100644
+--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
+@@ -597,6 +597,8 @@ edp_brij_i2c: &i2c2 {
+ 		clocks = <&rpmhcc RPMH_LN_BB_CLK3>;
+ 		clock-names = "refclk";
+ 
++		no-hpd;
++
+ 		ports {
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi b/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi
+index 8f617f7b6d34e..f712771df0c77 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi
+@@ -46,6 +46,14 @@
+ 	};
+ 
+ 	reserved-memory {
++		/* The rmtfs_mem needs to be guarded due to "XPU limitations"
++		 * it is otherwise possible for an allocation adjacent to the
++		 * rmtfs_mem region to trigger an XPU violation, causing a crash.
++		 */
++		rmtfs_lower_guard: memory@f5b00000 {
++			no-map;
++			reg = <0 0xf5b00000 0 0x1000>;
++		};
+ 		/*
+ 		 * The rmtfs memory region in downstream is 'dynamically allocated'
+ 		 * but given the same address every time. Hard code it as this address is
+@@ -59,6 +67,10 @@
+ 			qcom,client-id = <1>;
+ 			qcom,vmid = <15>;
+ 		};
++		rmtfs_upper_guard: memory@f5d01000 {
++			no-map;
++			reg = <0 0xf5d01000 0 0x2000>;
++		};
+ 
+ 		/*
+ 		 * It seems like reserving the old rmtfs_mem region is also needed to prevent
+diff --git a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
+index 140db2d5ba317..c2a709a384e9e 100644
+--- a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
++++ b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
+@@ -376,6 +376,8 @@
+ 		clocks = <&sn65dsi86_refclk>;
+ 		clock-names = "refclk";
+ 
++		no-hpd;
++
+ 		ports {
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+diff --git a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
+index d64fb8b1b86c3..2e427f21130d2 100644
+--- a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
++++ b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
+@@ -76,6 +76,7 @@
+ 			opp-hz = /bits/ 64 <1500000000>;
+ 			opp-microvolt = <820000>;
+ 			clock-latency-ns = <300000>;
++			opp-suspend;
+ 		};
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/renesas/r8a77960.dtsi b/arch/arm64/boot/dts/renesas/r8a77960.dtsi
+index 25d947a81b294..949a8b7873947 100644
+--- a/arch/arm64/boot/dts/renesas/r8a77960.dtsi
++++ b/arch/arm64/boot/dts/renesas/r8a77960.dtsi
+@@ -63,18 +63,19 @@
+ 
+ 		opp-500000000 {
+ 			opp-hz = /bits/ 64 <500000000>;
+-			opp-microvolt = <820000>;
++			opp-microvolt = <830000>;
+ 			clock-latency-ns = <300000>;
+ 		};
+ 		opp-1000000000 {
+ 			opp-hz = /bits/ 64 <1000000000>;
+-			opp-microvolt = <820000>;
++			opp-microvolt = <830000>;
+ 			clock-latency-ns = <300000>;
+ 		};
+ 		opp-1500000000 {
+ 			opp-hz = /bits/ 64 <1500000000>;
+-			opp-microvolt = <820000>;
++			opp-microvolt = <830000>;
+ 			clock-latency-ns = <300000>;
++			opp-suspend;
+ 		};
+ 		opp-1600000000 {
+ 			opp-hz = /bits/ 64 <1600000000>;
+diff --git a/arch/arm64/boot/dts/renesas/r8a77961.dtsi b/arch/arm64/boot/dts/renesas/r8a77961.dtsi
+index e8c31ebec0973..550b10a7e18bb 100644
+--- a/arch/arm64/boot/dts/renesas/r8a77961.dtsi
++++ b/arch/arm64/boot/dts/renesas/r8a77961.dtsi
+@@ -52,18 +52,19 @@
+ 
+ 		opp-500000000 {
+ 			opp-hz = /bits/ 64 <500000000>;
+-			opp-microvolt = <820000>;
++			opp-microvolt = <830000>;
+ 			clock-latency-ns = <300000>;
+ 		};
+ 		opp-1000000000 {
+ 			opp-hz = /bits/ 64 <1000000000>;
+-			opp-microvolt = <820000>;
++			opp-microvolt = <830000>;
+ 			clock-latency-ns = <300000>;
+ 		};
+ 		opp-1500000000 {
+ 			opp-hz = /bits/ 64 <1500000000>;
+-			opp-microvolt = <820000>;
++			opp-microvolt = <830000>;
+ 			clock-latency-ns = <300000>;
++			opp-suspend;
+ 		};
+ 		opp-1600000000 {
+ 			opp-hz = /bits/ 64 <1600000000>;
+diff --git a/arch/arm64/boot/dts/renesas/r8a77970-v3msk.dts b/arch/arm64/boot/dts/renesas/r8a77970-v3msk.dts
+index 7417cf5fea0f0..2426e533128ce 100644
+--- a/arch/arm64/boot/dts/renesas/r8a77970-v3msk.dts
++++ b/arch/arm64/boot/dts/renesas/r8a77970-v3msk.dts
+@@ -59,7 +59,7 @@
+ 	memory@48000000 {
+ 		device_type = "memory";
+ 		/* first 128MB is reserved for secure area. */
+-		reg = <0x0 0x48000000 0x0 0x38000000>;
++		reg = <0x0 0x48000000 0x0 0x78000000>;
+ 	};
+ 
+ 	osc5_clk: osc5-clock {
+diff --git a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
+index 86ac48e2c8491..2e17fc9fd7116 100644
+--- a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
++++ b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
+@@ -948,7 +948,6 @@
+ 			      <0x0 0xf1060000 0 0x110000>;
+ 			interrupts = <GIC_PPI 9
+ 				      (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_HIGH)>;
+-			power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ 		};
+ 
+ 		prr: chipid@fff00044 {
+diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts
+index 2d71ca7e429c1..a53055bb7ca4d 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts
+@@ -172,8 +172,6 @@
+ };
+ 
+ &gmac2phy {
+-	pinctrl-names = "default";
+-	pinctrl-0 = <&fephyled_linkm1>, <&fephyled_rxm1>;
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi
+index 20309076dbac0..35b7ab3bf10c6 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi
+@@ -384,6 +384,7 @@
+ 
+ 			vcc_sdio: LDO_REG4 {
+ 				regulator-name = "vcc_sdio";
++				regulator-always-on;
+ 				regulator-boot-on;
+ 				regulator-min-microvolt = <1800000>;
+ 				regulator-max-microvolt = <3000000>;
+@@ -488,6 +489,8 @@
+ 		regulator-min-microvolt = <712500>;
+ 		regulator-max-microvolt = <1500000>;
+ 		regulator-ramp-delay = <1000>;
++		regulator-always-on;
++		regulator-boot-on;
+ 		vin-supply = <&vcc3v3_sys>;
+ 
+ 		regulator-state-mem {
+diff --git a/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi
+index 3398f174f09b3..bfdf63d3947f6 100644
+--- a/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi
+@@ -671,6 +671,7 @@
+ 					  "otg";
+ 			maximum-speed = "super-speed";
+ 			dr_mode = "otg";
++			cdns,phyrst-a-enable;
+ 		};
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts b/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts
+index 60764366e22bd..86f7ab511ee86 100644
+--- a/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts
++++ b/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts
+@@ -635,6 +635,10 @@
+ 	status = "disabled";
+ };
+ 
++&cmn_refclk1 {
++	clock-frequency = <100000000>;
++};
++
+ &serdes0 {
+ 	serdes0_pcie_link: link@0 {
+ 		reg = <0>;
+diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
+index f1e7da3dfa276..411d85562d518 100644
+--- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
+@@ -8,6 +8,20 @@
+ #include <dt-bindings/mux/mux.h>
+ #include <dt-bindings/mux/ti-serdes.h>
+ 
++/ {
++	cmn_refclk: clock-cmnrefclk {
++		#clock-cells = <0>;
++		compatible = "fixed-clock";
++		clock-frequency = <0>;
++	};
++
++	cmn_refclk1: clock-cmnrefclk1 {
++		#clock-cells = <0>;
++		compatible = "fixed-clock";
++		clock-frequency = <0>;
++	};
++};
++
+ &cbass_main {
+ 	msmc_ram: sram@70000000 {
+ 		compatible = "mmio-sram";
+@@ -336,24 +350,12 @@
+ 		pinctrl-single,function-mask = <0xffffffff>;
+ 	};
+ 
+-	dummy_cmn_refclk: dummy-cmn-refclk {
+-		#clock-cells = <0>;
+-		compatible = "fixed-clock";
+-		clock-frequency = <100000000>;
+-	};
+-
+-	dummy_cmn_refclk1: dummy-cmn-refclk1 {
+-		#clock-cells = <0>;
+-		compatible = "fixed-clock";
+-		clock-frequency = <100000000>;
+-	};
+-
+ 	serdes_wiz0: wiz@5000000 {
+ 		compatible = "ti,j721e-wiz-16g";
+ 		#address-cells = <1>;
+ 		#size-cells = <1>;
+ 		power-domains = <&k3_pds 292 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 292 5>, <&k3_clks 292 11>, <&dummy_cmn_refclk>;
++		clocks = <&k3_clks 292 5>, <&k3_clks 292 11>, <&cmn_refclk>;
+ 		clock-names = "fck", "core_ref_clk", "ext_ref_clk";
+ 		assigned-clocks = <&k3_clks 292 11>, <&k3_clks 292 0>;
+ 		assigned-clock-parents = <&k3_clks 292 15>, <&k3_clks 292 4>;
+@@ -362,21 +364,21 @@
+ 		ranges = <0x5000000 0x0 0x5000000 0x10000>;
+ 
+ 		wiz0_pll0_refclk: pll0-refclk {
+-			clocks = <&k3_clks 292 11>, <&dummy_cmn_refclk>;
++			clocks = <&k3_clks 292 11>, <&cmn_refclk>;
+ 			#clock-cells = <0>;
+ 			assigned-clocks = <&wiz0_pll0_refclk>;
+ 			assigned-clock-parents = <&k3_clks 292 11>;
+ 		};
+ 
+ 		wiz0_pll1_refclk: pll1-refclk {
+-			clocks = <&k3_clks 292 0>, <&dummy_cmn_refclk1>;
++			clocks = <&k3_clks 292 0>, <&cmn_refclk1>;
+ 			#clock-cells = <0>;
+ 			assigned-clocks = <&wiz0_pll1_refclk>;
+ 			assigned-clock-parents = <&k3_clks 292 0>;
+ 		};
+ 
+ 		wiz0_refclk_dig: refclk-dig {
+-			clocks = <&k3_clks 292 11>, <&k3_clks 292 0>, <&dummy_cmn_refclk>, <&dummy_cmn_refclk1>;
++			clocks = <&k3_clks 292 11>, <&k3_clks 292 0>, <&cmn_refclk>, <&cmn_refclk1>;
+ 			#clock-cells = <0>;
+ 			assigned-clocks = <&wiz0_refclk_dig>;
+ 			assigned-clock-parents = <&k3_clks 292 11>;
+@@ -410,7 +412,7 @@
+ 		#address-cells = <1>;
+ 		#size-cells = <1>;
+ 		power-domains = <&k3_pds 293 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 293 5>, <&k3_clks 293 13>, <&dummy_cmn_refclk>;
++		clocks = <&k3_clks 293 5>, <&k3_clks 293 13>, <&cmn_refclk>;
+ 		clock-names = "fck", "core_ref_clk", "ext_ref_clk";
+ 		assigned-clocks = <&k3_clks 293 13>, <&k3_clks 293 0>;
+ 		assigned-clock-parents = <&k3_clks 293 17>, <&k3_clks 293 4>;
+@@ -419,21 +421,21 @@
+ 		ranges = <0x5010000 0x0 0x5010000 0x10000>;
+ 
+ 		wiz1_pll0_refclk: pll0-refclk {
+-			clocks = <&k3_clks 293 13>, <&dummy_cmn_refclk>;
++			clocks = <&k3_clks 293 13>, <&cmn_refclk>;
+ 			#clock-cells = <0>;
+ 			assigned-clocks = <&wiz1_pll0_refclk>;
+ 			assigned-clock-parents = <&k3_clks 293 13>;
+ 		};
+ 
+ 		wiz1_pll1_refclk: pll1-refclk {
+-			clocks = <&k3_clks 293 0>, <&dummy_cmn_refclk1>;
++			clocks = <&k3_clks 293 0>, <&cmn_refclk1>;
+ 			#clock-cells = <0>;
+ 			assigned-clocks = <&wiz1_pll1_refclk>;
+ 			assigned-clock-parents = <&k3_clks 293 0>;
+ 		};
+ 
+ 		wiz1_refclk_dig: refclk-dig {
+-			clocks = <&k3_clks 293 13>, <&k3_clks 293 0>, <&dummy_cmn_refclk>, <&dummy_cmn_refclk1>;
++			clocks = <&k3_clks 293 13>, <&k3_clks 293 0>, <&cmn_refclk>, <&cmn_refclk1>;
+ 			#clock-cells = <0>;
+ 			assigned-clocks = <&wiz1_refclk_dig>;
+ 			assigned-clock-parents = <&k3_clks 293 13>;
+@@ -467,7 +469,7 @@
+ 		#address-cells = <1>;
+ 		#size-cells = <1>;
+ 		power-domains = <&k3_pds 294 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 294 5>, <&k3_clks 294 11>, <&dummy_cmn_refclk>;
++		clocks = <&k3_clks 294 5>, <&k3_clks 294 11>, <&cmn_refclk>;
+ 		clock-names = "fck", "core_ref_clk", "ext_ref_clk";
+ 		assigned-clocks = <&k3_clks 294 11>, <&k3_clks 294 0>;
+ 		assigned-clock-parents = <&k3_clks 294 15>, <&k3_clks 294 4>;
+@@ -476,21 +478,21 @@
+ 		ranges = <0x5020000 0x0 0x5020000 0x10000>;
+ 
+ 		wiz2_pll0_refclk: pll0-refclk {
+-			clocks = <&k3_clks 294 11>, <&dummy_cmn_refclk>;
++			clocks = <&k3_clks 294 11>, <&cmn_refclk>;
+ 			#clock-cells = <0>;
+ 			assigned-clocks = <&wiz2_pll0_refclk>;
+ 			assigned-clock-parents = <&k3_clks 294 11>;
+ 		};
+ 
+ 		wiz2_pll1_refclk: pll1-refclk {
+-			clocks = <&k3_clks 294 0>, <&dummy_cmn_refclk1>;
++			clocks = <&k3_clks 294 0>, <&cmn_refclk1>;
+ 			#clock-cells = <0>;
+ 			assigned-clocks = <&wiz2_pll1_refclk>;
+ 			assigned-clock-parents = <&k3_clks 294 0>;
+ 		};
+ 
+ 		wiz2_refclk_dig: refclk-dig {
+-			clocks = <&k3_clks 294 11>, <&k3_clks 294 0>, <&dummy_cmn_refclk>, <&dummy_cmn_refclk1>;
++			clocks = <&k3_clks 294 11>, <&k3_clks 294 0>, <&cmn_refclk>, <&cmn_refclk1>;
+ 			#clock-cells = <0>;
+ 			assigned-clocks = <&wiz2_refclk_dig>;
+ 			assigned-clock-parents = <&k3_clks 294 11>;
+@@ -524,7 +526,7 @@
+ 		#address-cells = <1>;
+ 		#size-cells = <1>;
+ 		power-domains = <&k3_pds 295 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 295 5>, <&k3_clks 295 9>, <&dummy_cmn_refclk>;
++		clocks = <&k3_clks 295 5>, <&k3_clks 295 9>, <&cmn_refclk>;
+ 		clock-names = "fck", "core_ref_clk", "ext_ref_clk";
+ 		assigned-clocks = <&k3_clks 295 9>, <&k3_clks 295 0>;
+ 		assigned-clock-parents = <&k3_clks 295 13>, <&k3_clks 295 4>;
+@@ -533,21 +535,21 @@
+ 		ranges = <0x5030000 0x0 0x5030000 0x10000>;
+ 
+ 		wiz3_pll0_refclk: pll0-refclk {
+-			clocks = <&k3_clks 295 9>, <&dummy_cmn_refclk>;
++			clocks = <&k3_clks 295 9>, <&cmn_refclk>;
+ 			#clock-cells = <0>;
+ 			assigned-clocks = <&wiz3_pll0_refclk>;
+ 			assigned-clock-parents = <&k3_clks 295 9>;
+ 		};
+ 
+ 		wiz3_pll1_refclk: pll1-refclk {
+-			clocks = <&k3_clks 295 0>, <&dummy_cmn_refclk1>;
++			clocks = <&k3_clks 295 0>, <&cmn_refclk1>;
+ 			#clock-cells = <0>;
+ 			assigned-clocks = <&wiz3_pll1_refclk>;
+ 			assigned-clock-parents = <&k3_clks 295 0>;
+ 		};
+ 
+ 		wiz3_refclk_dig: refclk-dig {
+-			clocks = <&k3_clks 295 9>, <&k3_clks 295 0>, <&dummy_cmn_refclk>, <&dummy_cmn_refclk1>;
++			clocks = <&k3_clks 295 9>, <&k3_clks 295 0>, <&cmn_refclk>, <&cmn_refclk1>;
+ 			#clock-cells = <0>;
+ 			assigned-clocks = <&wiz3_refclk_dig>;
+ 			assigned-clock-parents = <&k3_clks 295 9>;
+diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
+index 95cd62d673711..2cf999e41d30e 100644
+--- a/arch/arm64/lib/copy_from_user.S
++++ b/arch/arm64/lib/copy_from_user.S
+@@ -29,7 +29,7 @@
+ 	.endm
+ 
+ 	.macro ldrh1 reg, ptr, val
+-	user_ldst 9998f, ldtrh, \reg, \ptr, \val
++	user_ldst 9997f, ldtrh, \reg, \ptr, \val
+ 	.endm
+ 
+ 	.macro strh1 reg, ptr, val
+@@ -37,7 +37,7 @@
+ 	.endm
+ 
+ 	.macro ldr1 reg, ptr, val
+-	user_ldst 9998f, ldtr, \reg, \ptr, \val
++	user_ldst 9997f, ldtr, \reg, \ptr, \val
+ 	.endm
+ 
+ 	.macro str1 reg, ptr, val
+@@ -45,7 +45,7 @@
+ 	.endm
+ 
+ 	.macro ldp1 reg1, reg2, ptr, val
+-	user_ldp 9998f, \reg1, \reg2, \ptr, \val
++	user_ldp 9997f, \reg1, \reg2, \ptr, \val
+ 	.endm
+ 
+ 	.macro stp1 reg1, reg2, ptr, val
+@@ -53,8 +53,10 @@
+ 	.endm
+ 
+ end	.req	x5
++srcin	.req	x15
+ SYM_FUNC_START(__arch_copy_from_user)
+ 	add	end, x0, x2
++	mov	srcin, x1
+ #include "copy_template.S"
+ 	mov	x0, #0				// Nothing to copy
+ 	ret
+@@ -63,6 +65,11 @@ EXPORT_SYMBOL(__arch_copy_from_user)
+ 
+ 	.section .fixup,"ax"
+ 	.align	2
++9997:	cmp	dst, dstin
++	b.ne	9998f
++	// Before being absolutely sure we couldn't copy anything, try harder
++USER(9998f, ldtrb tmp1w, [srcin])
++	strb	tmp1w, [dst], #1
+ 9998:	sub	x0, end, dst			// bytes not copied
+ 	ret
+ 	.previous
+diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
+index 1f61cd0df0627..dbea3799c3efb 100644
+--- a/arch/arm64/lib/copy_in_user.S
++++ b/arch/arm64/lib/copy_in_user.S
+@@ -30,33 +30,34 @@
+ 	.endm
+ 
+ 	.macro ldrh1 reg, ptr, val
+-	user_ldst 9998f, ldtrh, \reg, \ptr, \val
++	user_ldst 9997f, ldtrh, \reg, \ptr, \val
+ 	.endm
+ 
+ 	.macro strh1 reg, ptr, val
+-	user_ldst 9998f, sttrh, \reg, \ptr, \val
++	user_ldst 9997f, sttrh, \reg, \ptr, \val
+ 	.endm
+ 
+ 	.macro ldr1 reg, ptr, val
+-	user_ldst 9998f, ldtr, \reg, \ptr, \val
++	user_ldst 9997f, ldtr, \reg, \ptr, \val
+ 	.endm
+ 
+ 	.macro str1 reg, ptr, val
+-	user_ldst 9998f, sttr, \reg, \ptr, \val
++	user_ldst 9997f, sttr, \reg, \ptr, \val
+ 	.endm
+ 
+ 	.macro ldp1 reg1, reg2, ptr, val
+-	user_ldp 9998f, \reg1, \reg2, \ptr, \val
++	user_ldp 9997f, \reg1, \reg2, \ptr, \val
+ 	.endm
+ 
+ 	.macro stp1 reg1, reg2, ptr, val
+-	user_stp 9998f, \reg1, \reg2, \ptr, \val
++	user_stp 9997f, \reg1, \reg2, \ptr, \val
+ 	.endm
+ 
+ end	.req	x5
+-
++srcin	.req	x15
+ SYM_FUNC_START(__arch_copy_in_user)
+ 	add	end, x0, x2
++	mov	srcin, x1
+ #include "copy_template.S"
+ 	mov	x0, #0
+ 	ret
+@@ -65,6 +66,12 @@ EXPORT_SYMBOL(__arch_copy_in_user)
+ 
+ 	.section .fixup,"ax"
+ 	.align	2
++9997:	cmp	dst, dstin
++	b.ne	9998f
++	// Before being absolutely sure we couldn't copy anything, try harder
++USER(9998f, ldtrb tmp1w, [srcin])
++USER(9998f, sttrb tmp1w, [dst])
++	add	dst, dst, #1
+ 9998:	sub	x0, end, dst			// bytes not copied
+ 	ret
+ 	.previous
+diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
+index 043da90f5dd7d..9f380eecf6531 100644
+--- a/arch/arm64/lib/copy_to_user.S
++++ b/arch/arm64/lib/copy_to_user.S
+@@ -32,7 +32,7 @@
+ 	.endm
+ 
+ 	.macro strh1 reg, ptr, val
+-	user_ldst 9998f, sttrh, \reg, \ptr, \val
++	user_ldst 9997f, sttrh, \reg, \ptr, \val
+ 	.endm
+ 
+ 	.macro ldr1 reg, ptr, val
+@@ -40,7 +40,7 @@
+ 	.endm
+ 
+ 	.macro str1 reg, ptr, val
+-	user_ldst 9998f, sttr, \reg, \ptr, \val
++	user_ldst 9997f, sttr, \reg, \ptr, \val
+ 	.endm
+ 
+ 	.macro ldp1 reg1, reg2, ptr, val
+@@ -48,12 +48,14 @@
+ 	.endm
+ 
+ 	.macro stp1 reg1, reg2, ptr, val
+-	user_stp 9998f, \reg1, \reg2, \ptr, \val
++	user_stp 9997f, \reg1, \reg2, \ptr, \val
+ 	.endm
+ 
+ end	.req	x5
++srcin	.req	x15
+ SYM_FUNC_START(__arch_copy_to_user)
+ 	add	end, x0, x2
++	mov	srcin, x1
+ #include "copy_template.S"
+ 	mov	x0, #0
+ 	ret
+@@ -62,6 +64,12 @@ EXPORT_SYMBOL(__arch_copy_to_user)
+ 
+ 	.section .fixup,"ax"
+ 	.align	2
++9997:	cmp	dst, dstin
++	b.ne	9998f
++	// Before being absolutely sure we couldn't copy anything, try harder
++	ldrb	tmp1w, [srcin]
++USER(9998f, sttrb tmp1w, [dst])
++	add	dst, dst, #1
+ 9998:	sub	x0, end, dst			// bytes not copied
+ 	ret
+ 	.previous
+diff --git a/arch/hexagon/kernel/vmlinux.lds.S b/arch/hexagon/kernel/vmlinux.lds.S
+index 35b18e55eae80..57465bff1fe49 100644
+--- a/arch/hexagon/kernel/vmlinux.lds.S
++++ b/arch/hexagon/kernel/vmlinux.lds.S
+@@ -38,6 +38,8 @@ SECTIONS
+ 	.text : AT(ADDR(.text)) {
+ 		_text = .;
+ 		TEXT_TEXT
++		IRQENTRY_TEXT
++		SOFTIRQENTRY_TEXT
+ 		SCHED_TEXT
+ 		CPUIDLE_TEXT
+ 		LOCK_TEXT
+@@ -59,14 +61,9 @@ SECTIONS
+ 
+ 	_end = .;
+ 
+-	/DISCARD/ : {
+-		EXIT_TEXT
+-		EXIT_DATA
+-		EXIT_CALL
+-	}
+-
+ 	STABS_DEBUG
+ 	DWARF_DEBUG
+ 	ELF_DETAILS
+ 
++	DISCARDS
+ }
+diff --git a/arch/m68k/68000/dragen2.c b/arch/m68k/68000/dragen2.c
+index 584893c57c373..62f10a9e1ab7f 100644
+--- a/arch/m68k/68000/dragen2.c
++++ b/arch/m68k/68000/dragen2.c
+@@ -11,6 +11,7 @@
+ #include <linux/init.h>
+ #include <asm/machdep.h>
+ #include <asm/MC68VZ328.h>
++#include "screen.h"
+ 
+ /***************************************************************************/
+ /*                        Init Drangon Engine hardware                     */
+diff --git a/arch/m68k/68000/screen.h b/arch/m68k/68000/screen.h
+new file mode 100644
+index 0000000000000..2089bdf02688f
+--- /dev/null
++++ b/arch/m68k/68000/screen.h
+@@ -0,0 +1,804 @@
++/* Created with The GIMP */
++#define screen_width 320
++#define screen_height 240
++static unsigned char screen_bits[] = {
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x39, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x56,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x0d, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0x5a, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6b, 0x55,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x03, 0x95, 0x6a, 0x00, 0x00, 0x00, 0x03, 0xf8, 0x70, 0xe0, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38,
++   0x01, 0xf8, 0x00, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x5b, 0x55, 0x00, 0x00, 0x00, 0x0f,
++   0xfc, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x38, 0x03, 0xfc, 0x00, 0x7f, 0x80, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x31, 0xb5, 0x56,
++   0x00, 0x00, 0x00, 0x1e, 0x0c, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x03, 0x0e, 0x00, 0x61,
++   0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0xd7, 0x55, 0x55, 0x00, 0x03, 0x8e, 0x1c, 0x00, 0x70, 0xe1, 0x9e,
++   0x0e, 0x38, 0xe1, 0x80, 0x70, 0xc0, 0xf0, 0x73, 0x33, 0xc0, 0x78, 0x38,
++   0x00, 0x0e, 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x02, 0xa9, 0xaa, 0xad, 0x00, 0x03, 0x8e, 0x38,
++   0x00, 0x70, 0xe1, 0xff, 0x0e, 0x38, 0xe3, 0x80, 0x71, 0xc3, 0xf8, 0x77,
++   0x3f, 0xe1, 0xfc, 0x38, 0x00, 0x0e, 0x00, 0xef, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x2b, 0x55, 0x6a,
++   0x00, 0x03, 0x8e, 0x38, 0x00, 0x70, 0xe1, 0xe7, 0x0e, 0x38, 0x73, 0x00,
++   0x73, 0x83, 0x9c, 0x7f, 0x3c, 0xe1, 0xce, 0x38, 0x00, 0x1c, 0x00, 0xff,
++   0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x15, 0x56, 0xab, 0x55, 0x00, 0x03, 0x8e, 0x38, 0x00, 0x70, 0xe1, 0xc7,
++   0x0e, 0x38, 0x76, 0x00, 0x77, 0x07, 0x1c, 0x78, 0x38, 0xe3, 0x8e, 0x38,
++   0x00, 0x78, 0x00, 0xf3, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x69, 0x55, 0x5a, 0xb7, 0x00, 0x03, 0x8e, 0x38,
++   0x00, 0x70, 0xe1, 0xc7, 0x0e, 0x38, 0x3c, 0x00, 0x7e, 0x07, 0xfc, 0x70,
++   0x38, 0xe3, 0xfe, 0x38, 0x00, 0xf0, 0x00, 0xe1, 0xc0, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xa5, 0x9a, 0xab, 0x6d,
++   0x00, 0x03, 0x8e, 0x3c, 0x00, 0x70, 0xe1, 0xc7, 0x0e, 0x38, 0x3e, 0x00,
++   0x7f, 0x07, 0xfc, 0x70, 0x38, 0xe3, 0xfe, 0x38, 0x01, 0xc0, 0x00, 0xe1,
++   0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
++   0x4d, 0x75, 0xb6, 0xd5, 0x00, 0x03, 0x8e, 0x1c, 0x00, 0x70, 0xe1, 0xc7,
++   0x0e, 0x38, 0x77, 0x00, 0x77, 0x87, 0x00, 0x70, 0x38, 0xe3, 0x80, 0x38,
++   0x03, 0x80, 0x00, 0xe1, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x14, 0xaa, 0xca, 0xd5, 0x5b, 0x00, 0x03, 0x9e, 0x1f,
++   0x0c, 0x70, 0xe1, 0xc7, 0x0e, 0x78, 0x67, 0x00, 0x73, 0xc7, 0x8c, 0x70,
++   0x38, 0xe3, 0xc6, 0x38, 0x03, 0xfe, 0x38, 0x71, 0xc0, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2a, 0xb5, 0xbb, 0x5b, 0x6a,
++   0x00, 0x03, 0xfe, 0x0f, 0xfc, 0x70, 0xe1, 0xc7, 0x0f, 0xf8, 0xe3, 0x80,
++   0x71, 0xe3, 0xfc, 0x70, 0x38, 0xe1, 0xfe, 0x38, 0x03, 0xfe, 0x38, 0x7f,
++   0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa9,
++   0x6b, 0x56, 0xd6, 0xad, 0x00, 0x01, 0xe6, 0x03, 0xf0, 0x70, 0xe1, 0xc7,
++   0x07, 0x98, 0xc3, 0x80, 0x70, 0xe0, 0xf8, 0x70, 0x38, 0xe0, 0x7c, 0x38,
++   0x03, 0xfe, 0x38, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x03, 0x55, 0x55, 0x6a, 0xba, 0xeb, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x56, 0xd5, 0xd5, 0xd5, 0xac,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2a, 0x5a,
++   0xb7, 0x3d, 0x57, 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x75, 0x6d, 0xaa, 0xd3, 0xac, 0xaa, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x8b, 0x6a, 0xb6, 0xde, 0x6b, 0xb6,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xba, 0xad,
++   0xeb, 0x32, 0xda, 0xad, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x1e, 0xaa, 0xb5, 0xad, 0x6e, 0x96, 0xaa, 0x00, 0x00, 0x00, 0x00,
++   0x01, 0x86, 0x00, 0x00, 0x07, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0xfc, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x6b, 0x6f, 0x5a, 0xb5, 0x75, 0x69,
++   0x00, 0x00, 0x00, 0x00, 0x01, 0x86, 0x00, 0x00, 0x06, 0x18, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf5, 0xda, 0xd9,
++   0x53, 0xeb, 0x6b, 0x4b, 0x00, 0x00, 0xf0, 0xde, 0x03, 0xe6, 0xf0, 0x78,
++   0x06, 0x0c, 0x6c, 0x7c, 0x1f, 0x87, 0x86, 0xf0, 0xc0, 0xde, 0x0f, 0xcc,
++   0xde, 0x0f, 0x00, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x03, 0xd6, 0xab, 0x57, 0x6e, 0x8a, 0xd6, 0xba, 0x00, 0x01, 0x98, 0xe7,
++   0x01, 0x87, 0x38, 0xcc, 0x06, 0x0c, 0x6c, 0x06, 0x31, 0x8c, 0xc7, 0x38,
++   0xc0, 0xe7, 0x18, 0xcc, 0xe7, 0x19, 0x80, 0xc3, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x0e, 0x2d, 0x5e, 0xda, 0x55, 0xbb, 0x59, 0x42,
++   0x00, 0x03, 0x0c, 0xc3, 0x01, 0x86, 0x19, 0x8c, 0x06, 0x0c, 0x70, 0x06,
++   0x61, 0x98, 0x66, 0x18, 0xf8, 0xc3, 0x30, 0xcc, 0xc3, 0x31, 0x80, 0xc3,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1a, 0xf7, 0x6b, 0x6a,
++   0xab, 0x56, 0xd6, 0xbf, 0x00, 0x03, 0x0c, 0xc3, 0x01, 0x86, 0x19, 0xfc,
++   0x06, 0x0c, 0x60, 0x7e, 0x61, 0x98, 0x66, 0x18, 0xc0, 0xc3, 0x30, 0xcc,
++   0xc3, 0x3f, 0x80, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x75, 0x94, 0xdb, 0x75, 0x6e, 0xda, 0xaa, 0xa2, 0x00, 0x03, 0x0c, 0xc3,
++   0x01, 0x86, 0x19, 0x80, 0x06, 0x0c, 0x60, 0xc6, 0x61, 0x98, 0x66, 0x18,
++   0xc0, 0xc3, 0x30, 0xcc, 0xc3, 0x30, 0x00, 0xc3, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x01, 0xdb, 0x6b, 0xad, 0x9b, 0x27, 0x55, 0x55, 0x55,
++   0x00, 0x03, 0x0c, 0xc3, 0x01, 0x86, 0x19, 0x80, 0x06, 0x0c, 0x60, 0xc6,
++   0x33, 0x98, 0x66, 0x18, 0xc0, 0xc3, 0x19, 0xcc, 0xc3, 0x30, 0x00, 0xc3,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x36, 0xde, 0xb5, 0x65,
++   0x75, 0x5a, 0xd5, 0x56, 0x00, 0x01, 0x98, 0xc3, 0x01, 0x86, 0x18, 0xc4,
++   0x06, 0x18, 0x60, 0xce, 0x1d, 0x8c, 0xc6, 0x18, 0xc0, 0xc3, 0x0e, 0xcc,
++   0xc3, 0x18, 0x80, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a,
++   0xed, 0xb5, 0x6d, 0x56, 0x55, 0x55, 0x55, 0xae, 0x00, 0x00, 0xf0, 0xc3,
++   0x00, 0xe6, 0x18, 0x78, 0x07, 0xf0, 0x60, 0x77, 0x01, 0x87, 0x86, 0x18,
++   0xfc, 0xc3, 0x00, 0xcc, 0xc3, 0x0f, 0x00, 0xc3, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x7a, 0xab, 0x6d, 0xda, 0xaa, 0xca, 0xd5, 0x6d, 0x58,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x31, 0x80, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x56, 0xda, 0xaa, 0xea, 0xae,
++   0x9b, 0x5a, 0xa9, 0x4b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x3e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xd5,
++   0xbb, 0xfd, 0xad, 0xad, 0x69, 0xea, 0xcb, 0x55, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x05, 0xaf, 0xb6, 0x8a, 0x6a, 0xb9, 0x5a, 0x2d, 0xba, 0xb6,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1d, 0x7a, 0x75, 0x6e, 0xcd, 0x52,
++   0x9b, 0xdb, 0x55, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, 0xad,
++   0xaf, 0x95, 0x55, 0xed, 0x55, 0x55, 0x6b, 0x55, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0xea, 0xb5, 0xec, 0xfd, 0x59, 0x5a, 0xb5, 0x56, 0xaa, 0xb6,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xb7, 0x6b, 0x36, 0x4a, 0xeb, 0xab,
++   0x2d, 0x6a, 0x9b, 0x6b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0xad, 0x6f,
++   0x6b, 0xeb, 0xdd, 0x7b, 0x6a, 0x55, 0xb5, 0x56, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x17, 0x76, 0xda, 0xd6, 0x5d, 0x62, 0xc6, 0xd5, 0x36, 0xaa, 0xb5,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xdb, 0x56, 0xbc, 0xf2, 0xa5, 0x5d,
++   0x96, 0xaa, 0xb6, 0xd6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xab, 0xee, 0xfd,
++   0xd5, 0x2c, 0x6d, 0x9a, 0x75, 0x5b, 0x6a, 0xb5, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x03, 0xee, 0xad, 0x55, 0x15, 0xef, 0x54, 0xf6, 0xc5, 0x6a, 0xa9, 0xa6,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x05, 0x5b, 0xd6, 0xad, 0xbe, 0xb0, 0xa6, 0x35,
++   0x5b, 0xd5, 0x4a, 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1b, 0xeb, 0x5d, 0xf5,
++   0xaa, 0xd7, 0xf4, 0x75, 0xba, 0x55, 0xaf, 0x6e, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x37, 0x5e, 0xf7, 0x55, 0x61, 0xbc, 0x08, 0x5b, 0x55, 0x5a, 0xa9, 0xb5,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x01, 0xf6, 0xba, 0xaa, 0xaa, 0x9f, 0x69, 0xec, 0xd5,
++   0x4b, 0xa9, 0xaa, 0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x2d, 0xd5, 0xed, 0x5a,
++   0xf2, 0xd6, 0xae, 0xdb, 0x9e, 0x27, 0x5f, 0xb5, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f,
++   0x6d, 0x5b, 0xaa, 0xda, 0xae, 0x95, 0x58, 0xd5, 0x34, 0x6d, 0x68, 0xad,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x15, 0xdb, 0xd7, 0x56, 0xb5, 0xd5, 0x6d, 0x29, 0x5b,
++   0x4b, 0xdb, 0x57, 0xb6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0xda, 0xac, 0xd5, 0x4b,
++   0x57, 0x6a, 0x9b, 0x76, 0x5c, 0x95, 0x54, 0x2a, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfa,
++   0xb7, 0xfb, 0xab, 0xb6, 0xea, 0xad, 0x62, 0x95, 0xa1, 0xf5, 0xa9, 0xad,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x03, 0x57, 0xaf, 0x6b, 0x72, 0x54, 0x99, 0xd5, 0x0e, 0xf3,
++   0x5f, 0x15, 0x2a, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xea, 0xd4, 0xad, 0x5d, 0x35,
++   0xb5, 0x34, 0xb2, 0xaa, 0x54, 0xba, 0xad, 0x55, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x5e,
++   0xaf, 0xed, 0xab, 0xea, 0xb3, 0xaa, 0x6a, 0xad, 0xd5, 0xd5, 0xaa, 0xab,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x2a, 0xb9, 0xf5, 0xbb, 0xb5, 0x55, 0x64, 0x57, 0x55, 0x6b,
++   0x5d, 0xd0, 0x52, 0xac, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xab, 0x5b, 0xb6, 0x6d, 0x37,
++   0x6f, 0xaa, 0x5b, 0xa2, 0xb5, 0x1f, 0xed, 0x73, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x65, 0xaa,
++   0x6e, 0xfb, 0xd3, 0x5a, 0xd4, 0x54, 0xaa, 0x5e, 0xc3, 0xb5, 0x15, 0x56,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x07, 0xaf, 0x7f, 0xea, 0xb6, 0xaa, 0xd6, 0xad, 0xf1, 0xd2, 0xd5,
++   0x56, 0x55, 0x6a, 0xd5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0xbd, 0xda, 0xaf, 0x75, 0x6f, 0x5a,
++   0x45, 0x26, 0xb7, 0x5b, 0x20, 0xdd, 0x55, 0x2a, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3a, 0xeb, 0x7f,
++   0xf5, 0x4d, 0x95, 0x76, 0xd9, 0x56, 0xb5, 0x52, 0x6d, 0x12, 0xad, 0xaa,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x57, 0x3f, 0xa4, 0x8f, 0xb9, 0x6e, 0xa2, 0x6f, 0x1d, 0x6a, 0xef,
++   0xb5, 0x6d, 0xaa, 0xb5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x03, 0xfd, 0x75, 0x2f, 0x7c, 0x57, 0x88, 0xf6,
++   0x80, 0xb5, 0x57, 0x5c, 0xd7, 0x55, 0x54, 0xae, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x39, 0xff, 0xf6,
++   0xab, 0x7a, 0x57, 0x94, 0xef, 0x0d, 0xe4, 0xea, 0xa8, 0xaa, 0xab, 0xff,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x0c, 0xf7, 0xff, 0x6d, 0xb7, 0x4b, 0x39, 0x17, 0x2a, 0xfa, 0xb7, 0x56,
++   0xb7, 0xaa, 0xed, 0xd9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x18, 0xbe, 0xfd, 0xda, 0x9e, 0xec, 0xe6, 0xd5,
++   0x52, 0x2a, 0x58, 0xa9, 0x54, 0x5a, 0x97, 0xe7, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x72, 0xe9, 0xf6, 0xbe,
++   0xd4, 0x5b, 0xad, 0x63, 0x61, 0xf7, 0xb7, 0xaf, 0x55, 0x52, 0x9f, 0xee,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,
++   0xc9, 0xbd, 0xfb, 0x6b, 0xeb, 0xf5, 0x6b, 0x2d, 0x57, 0x52, 0x94, 0xaa,
++   0xb1, 0xab, 0x4a, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x02, 0xdb, 0xcb, 0xff, 0xf5, 0x3b, 0x55, 0xa0, 0x00,
++   0x45, 0x6e, 0xb5, 0xb5, 0x65, 0x52, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x2e, 0x7d, 0xdb, 0xa5,
++   0xca, 0xbe, 0x80, 0x00, 0x0a, 0x54, 0xaa, 0xa5, 0x45, 0x08, 0x09, 0x15,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38,
++   0xd8, 0xd3, 0x1b, 0xae, 0xb7, 0xea, 0x00, 0x00, 0x00, 0x95, 0xaa, 0x56,
++   0xdc, 0xe1, 0x21, 0x35, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x53, 0x41, 0xff, 0x77, 0xbd, 0xaa, 0x58, 0x00, 0x00,
++   0x00, 0xdb, 0x75, 0xd4, 0xb2, 0xa4, 0x07, 0xea, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xef, 0x07, 0xbd, 0x65, 0xeb,
++   0xa2, 0xd0, 0x00, 0x00, 0x00, 0x25, 0x4b, 0x35, 0x56, 0x80, 0x77, 0x6a,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x28,
++   0x01, 0x28, 0x9a, 0xb6, 0xd7, 0x60, 0x00, 0x00, 0x00, 0x0c, 0xaa, 0xaa,
++   0xaa, 0x02, 0x07, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x17, 0xe0, 0x17, 0xb1, 0xbf, 0xee, 0xb4, 0xc0, 0x00, 0x00,
++   0x00, 0x08, 0xaa, 0xad, 0x68, 0x54, 0x04, 0x5a, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x74, 0x87, 0x77, 0x72, 0x5a,
++   0xab, 0x80, 0x00, 0x00, 0x00, 0x02, 0xd4, 0xb5, 0x52, 0x08, 0x5b, 0xd4,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x75, 0x00,
++   0x1f, 0xd6, 0xef, 0xda, 0xd5, 0x00, 0x00, 0x00, 0x0c, 0x01, 0x52, 0xd5,
++   0x40, 0xf1, 0x55, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x9b, 0x00, 0x17, 0x4b, 0x92, 0xb7, 0xaf, 0x00, 0x00, 0x00,
++   0x0e, 0x01, 0x4e, 0xaa, 0xae, 0x95, 0x55, 0x6d, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xb8, 0x11, 0x2b, 0x13, 0x76, 0xef,
++   0x54, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x54, 0xaa, 0xaa, 0xb5, 0xad, 0x55,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x40, 0x00,
++   0x7e, 0x7c, 0x65, 0xf4, 0x78, 0x00, 0x00, 0x00, 0x1f, 0x00, 0xab, 0x56,
++   0xd5, 0x55, 0x59, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x0a, 0x40, 0x42, 0x6d, 0xce, 0xe5, 0xae, 0x54, 0x00, 0x00, 0x00,
++   0x18, 0x00, 0x6d, 0x75, 0x5d, 0x55, 0x4d, 0x52, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x2a, 0x82, 0x03, 0xdc, 0x54, 0xbf, 0x61,
++   0xa8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0xad, 0xa2, 0xb5, 0x60, 0xad,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, 0x00, 0x16,
++   0xd9, 0xb5, 0xa4, 0xc7, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, 0xea,
++   0xae, 0xd5, 0x57, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x54, 0x04, 0x0d, 0x76, 0xbb, 0x4b, 0xbc, 0x58, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x36, 0x95, 0xa9, 0x55, 0x54, 0xaa, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x01, 0x50, 0x08, 0x5b, 0xc5, 0x3d, 0x97, 0x0a,
++   0xd0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0xb6, 0xab, 0x2b, 0x55, 0xab,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x24, 0x20, 0x3d,
++   0x59, 0x7b, 0x76, 0x37, 0x58, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1d, 0x6d,
++   0x75, 0xb5, 0x55, 0x54, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x12, 0xf0, 0x01, 0xff, 0x21, 0xa8, 0xc3, 0x74, 0xa8, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x1b, 0xa9, 0x4b, 0x55, 0x55, 0x2a, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x25, 0xc0, 0x41, 0xca, 0x9c, 0x77, 0x58, 0x9d,
++   0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x56, 0xb4, 0xad, 0xb2, 0x55,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x80, 0x0f, 0xbe,
++   0xc0, 0xf6, 0xd5, 0xb3, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x54,
++   0xa5, 0xaa, 0x55, 0x54, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x17, 0x04, 0x87, 0xbc, 0x6a, 0x3b, 0xac, 0x9d, 0x58, 0x00, 0x00, 0x03,
++   0xe0, 0x00, 0x16, 0xab, 0x55, 0x4a, 0xd6, 0xa5, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x01, 0x5c, 0x00, 0x1f, 0x54, 0xc9, 0x2a, 0xb7, 0xa6,
++   0xd8, 0x0f, 0x00, 0x07, 0xf8, 0x00, 0x15, 0x6a, 0x55, 0x5a, 0xa4, 0xaa,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7a, 0x08, 0x74, 0xca,
++   0x9c, 0x5a, 0xa8, 0xc5, 0x30, 0x1f, 0x80, 0x0f, 0xfc, 0x00, 0x0d, 0x55,
++   0xaa, 0xa5, 0x55, 0x49, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a,
++   0xf0, 0x20, 0xea, 0x5a, 0xb0, 0xe7, 0x95, 0x7d, 0x10, 0x3f, 0xc0, 0x1f,
++   0xfe, 0x00, 0x0a, 0xaa, 0xaa, 0xad, 0x4a, 0x95, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x03, 0xf4, 0x02, 0x7d, 0xb5, 0x8f, 0x9c, 0xaa, 0xe9,
++   0xa0, 0x3f, 0xc0, 0x1f, 0xfe, 0x00, 0x06, 0xb5, 0x54, 0xa9, 0x2a, 0x54,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0x40, 0x47, 0xeb, 0xab,
++   0x75, 0x51, 0x55, 0x4d, 0xd8, 0x3f, 0xe0, 0x3f, 0x3f, 0x00, 0x0a, 0xaa,
++   0xa9, 0x4a, 0xaa, 0xad, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23,
++   0xc0, 0x06, 0xff, 0xe5, 0xb6, 0xbb, 0xa9, 0x34, 0x48, 0x30, 0xe0, 0x3c,
++   0x0f, 0x00, 0x0a, 0xaa, 0x8a, 0xaa, 0xaa, 0xa4, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0xaf, 0x80, 0x9f, 0xa1, 0x8d, 0xb5, 0xd6, 0x93, 0xcd,
++   0x90, 0x62, 0x60, 0x3c, 0x27, 0x00, 0x06, 0xaa, 0xb5, 0xaa, 0xaa, 0xa9,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9f, 0x92, 0x3f, 0xb5, 0x36,
++   0x56, 0xb5, 0x9d, 0x55, 0x90, 0x62, 0x60, 0x38, 0x17, 0x80, 0x0d, 0x54,
++   0xaa, 0x54, 0xaa, 0x9a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x7a,
++   0x00, 0xba, 0xab, 0x73, 0xe7, 0xa2, 0xb6, 0x55, 0x2c, 0x61, 0x60, 0x38,
++   0x17, 0x80, 0x09, 0x6b, 0x4a, 0xd5, 0x4a, 0x92, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x0d, 0xfe, 0x03, 0x6d, 0xea, 0x08, 0x51, 0x1d, 0x2b, 0x35,
++   0x08, 0x61, 0x60, 0x38, 0x07, 0x80, 0x06, 0xda, 0xaa, 0xa9, 0x55, 0x55,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x27, 0xf8, 0x4e, 0xfb, 0x89, 0xde,
++   0x35, 0xea, 0x6b, 0x72, 0x28, 0x60, 0x70, 0x38, 0x07, 0x80, 0x05, 0x52,
++   0xaa, 0xaa, 0x95, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0xf8,
++   0x0d, 0x63, 0xbd, 0xe3, 0x57, 0x55, 0xb6, 0x49, 0xcc, 0x20, 0x7f, 0xf8,
++   0x07, 0x80, 0x0a, 0xaa, 0xaa, 0xaa, 0xaa, 0xad, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x5d, 0xe1, 0x34, 0xc6, 0xab, 0xa7, 0x91, 0x77, 0x37, 0xcc,
++   0x48, 0x30, 0x9f, 0x3c, 0x07, 0x80, 0x0a, 0xaa, 0xaa, 0xaa, 0xaa, 0x69,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x3b, 0xc0, 0x34, 0xdb, 0x4e, 0xf9,
++   0xae, 0xd5, 0x54, 0xe1, 0xb4, 0x19, 0x3f, 0xfe, 0x0f, 0x00, 0x05, 0x55,
++   0x55, 0x55, 0x52, 0x92, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x7f, 0x89,
++   0xf1, 0xf7, 0xe5, 0x57, 0xea, 0x8d, 0x4c, 0xda, 0xac, 0x13, 0xff, 0xff,
++   0x80, 0x00, 0x0a, 0xd5, 0xaa, 0xa4, 0x95, 0x54, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0xf7, 0x02, 0x53, 0x6c, 0x72, 0x10, 0xa3, 0x6a, 0x74, 0xea,
++   0x34, 0x07, 0xff, 0xff, 0xe0, 0x00, 0x05, 0x4e, 0x54, 0x95, 0x55, 0x55,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0xfc, 0x97, 0xd6, 0x55, 0xb6, 0xab,
++   0xb7, 0x45, 0x46, 0xad, 0xf4, 0x1f, 0xff, 0xff, 0xfe, 0x00, 0x05, 0x2a,
++   0xd5, 0x54, 0x95, 0x51, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xcf, 0x0e,
++   0xfb, 0x2d, 0x12, 0xd4, 0xb8, 0xb6, 0xeb, 0x6a, 0x10, 0x3f, 0xff, 0xff,
++   0xff, 0x00, 0x02, 0xda, 0x8a, 0xab, 0x6a, 0xaa, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x57, 0xbc, 0x3a, 0xc6, 0x0d, 0x76, 0xb1, 0x77, 0x15, 0x2a, 0x56,
++   0x34, 0x7f, 0xff, 0xff, 0xff, 0x80, 0x02, 0xa5, 0x75, 0x2a, 0x52, 0xaa,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x02, 0x0f, 0xfc, 0xdf, 0x75, 0x9d, 0x5a, 0x67,
++   0x15, 0x59, 0xb3, 0x6c, 0x4c, 0x7f, 0xff, 0xff, 0xff, 0x80, 0x05, 0x55,
++   0x8a, 0xaa, 0xaa, 0xa4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf9, 0xdb,
++   0xed, 0x36, 0x5a, 0xeb, 0xad, 0x6b, 0x57, 0x5d, 0xa4, 0x7f, 0xff, 0xff,
++   0xf3, 0x80, 0x05, 0x54, 0xb2, 0xaa, 0xaa, 0xa9, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x7f, 0xf4, 0x5a, 0xaa, 0xdb, 0x40, 0x20, 0xea, 0xaa, 0xa8, 0x19,
++   0xe8, 0x1f, 0xff, 0xff, 0xe7, 0x00, 0x02, 0x55, 0x4b, 0xd5, 0x55, 0x4a,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x05, 0xff, 0xf2, 0xb5, 0x7d, 0x56, 0xaa, 0x76,
++   0xa5, 0xce, 0xb4, 0xd1, 0x2c, 0x0f, 0xff, 0xff, 0x0f, 0x00, 0x02, 0xa5,
++   0x5a, 0x2a, 0x55, 0x59, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xff, 0xc1, 0x31,
++   0x9a, 0x8d, 0x61, 0x2e, 0xb2, 0xb5, 0x57, 0x59, 0x88, 0x07, 0xff, 0xf8,
++   0x7e, 0x06, 0x00, 0xaa, 0x65, 0xd2, 0xaa, 0xaa, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x1f, 0x7e, 0x02, 0x55, 0x67, 0xb5, 0x5b, 0x05, 0xfa, 0xab, 0x0a, 0x8d,
++   0xe4, 0x03, 0xff, 0xc3, 0xfe, 0x07, 0x01, 0xaa, 0xaa, 0x26, 0xaa, 0xa9,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x16, 0x7d, 0x10, 0x57, 0x9d, 0x4a, 0x8a, 0xd7,
++   0x95, 0x5a, 0xd5, 0x4d, 0x58, 0x00, 0xfc, 0x1f, 0xe6, 0x03, 0xc1, 0x55,
++   0x52, 0xd5, 0x55, 0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x26, 0xe1, 0x42, 0xa4,
++   0x7d, 0xd7, 0xba, 0xb0, 0x24, 0xd5, 0x97, 0xc5, 0xd2, 0x00, 0x00, 0x7f,
++   0x87, 0x03, 0xe0, 0x42, 0xaa, 0x95, 0x55, 0x4b, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x0d, 0x42, 0x17, 0xfb, 0x8c, 0xac, 0x46, 0xae, 0xdd, 0xb3, 0x68, 0x92,
++   0x6c, 0x03, 0x03, 0xfe, 0x0f, 0x01, 0xe0, 0x5a, 0x55, 0x62, 0xaa, 0x54,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x01, 0x3d, 0x0a, 0x32, 0xd5, 0x7b, 0xc9, 0xde, 0xad,
++   0x91, 0x4a, 0xaa, 0xc6, 0x68, 0x21, 0xff, 0xf8, 0x7f, 0x00, 0x60, 0x2a,
++   0xa9, 0x2e, 0xa9, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x22, 0x64, 0xb3, 0x18,
++   0x8e, 0xae, 0xb4, 0x46, 0x9a, 0xbb, 0x54, 0xd5, 0xb4, 0x30, 0xff, 0xe0,
++   0xff, 0x80, 0x00, 0x52, 0xa5, 0x51, 0x55, 0x4a, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
++   0x82, 0xc1, 0x52, 0xb3, 0xcb, 0xc5, 0xd4, 0xda, 0xd3, 0x4a, 0x68, 0x87,
++   0x68, 0x30, 0x3f, 0x03, 0xff, 0x80, 0x00, 0x2a, 0xaa, 0x55, 0x45, 0x55,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x01, 0x5b, 0xc6, 0xa9, 0x19, 0x74, 0x3d, 0xa9, 0x92,
++   0xab, 0x5b, 0x95, 0xb0, 0x28, 0x78, 0x00, 0x07, 0xff, 0xc0, 0x00, 0x15,
++   0x2a, 0x95, 0x2a, 0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x10, 0x0f, 0x6b, 0xa2,
++   0xb7, 0x22, 0x61, 0x2b, 0x52, 0xaa, 0x33, 0x1d, 0xa0, 0x7c, 0x00, 0x0f,
++   0xff, 0xe0, 0x00, 0x14, 0xa4, 0xaa, 0xa9, 0x52, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09,
++   0x00, 0x23, 0x27, 0x64, 0x8d, 0xfb, 0x64, 0xa7, 0x5a, 0xaa, 0xd3, 0x5a,
++   0xa0, 0x7c, 0x00, 0x3f, 0xff, 0xe0, 0x00, 0x05, 0x29, 0x52, 0x45, 0x55,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x02, 0x4a, 0x5f, 0x66, 0x6a, 0xab, 0x22, 0xaf, 0xec,
++   0xa9, 0x55, 0x17, 0x17, 0xa0, 0xfe, 0x00, 0x7f, 0xff, 0xf0, 0x00, 0x02,
++   0xaa, 0x4a, 0xaa, 0x94, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x8d, 0x83, 0x49, 0xad,
++   0x9a, 0x57, 0x50, 0x6b, 0x6b, 0x5a, 0xd1, 0x1a, 0x80, 0xff, 0x00, 0xff,
++   0xff, 0xf0, 0x00, 0x05, 0x52, 0x95, 0x29, 0x55, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8c,
++   0x1b, 0x4b, 0xcb, 0x45, 0x7b, 0x4e, 0x4b, 0x5d, 0x2a, 0xd5, 0x16, 0x29,
++   0x41, 0xff, 0x87, 0xff, 0xff, 0xf8, 0x00, 0x01, 0x2a, 0xa5, 0x4a, 0x52,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x20, 0x52, 0x98, 0x9d, 0x0e, 0x95, 0x65, 0x38, 0x95,
++   0x55, 0x6a, 0xab, 0x35, 0x81, 0xff, 0xff, 0xff, 0xff, 0xf8, 0x00, 0x01,
++   0xa9, 0x14, 0xa9, 0x4a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x80, 0xe3, 0x2b, 0x74, 0x4d,
++   0xb5, 0x55, 0xd5, 0x95, 0x69, 0x4a, 0xac, 0x6e, 0x03, 0xff, 0xff, 0xff,
++   0xff, 0xfc, 0x00, 0x01, 0x25, 0x65, 0x2a, 0x95, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
++   0x9a, 0xb7, 0x0b, 0xb0, 0x6f, 0xfc, 0xfd, 0x6a, 0x8b, 0xd5, 0x4e, 0xa8,
++   0x03, 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00, 0x01, 0x49, 0x14, 0xa5, 0x6a,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x08, 0x02, 0x94, 0xab, 0x3d, 0xc9, 0x6b, 0xb5, 0x43, 0xee,
++   0xb6, 0x95, 0x54, 0x9a, 0x07, 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00, 0x00,
++   0xaa, 0x54, 0xaa, 0x4a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x42, 0x13, 0x9f, 0x68, 0x5b,
++   0xff, 0xfa, 0xd7, 0xfa, 0xa5, 0x6a, 0xac, 0xd4, 0x07, 0xff, 0xff, 0xff,
++   0xff, 0xfe, 0x00, 0x00, 0x55, 0x53, 0x2a, 0xaa, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x04,
++   0x65, 0x3c, 0x8b, 0x93, 0x87, 0x76, 0x75, 0xf5, 0x57, 0x55, 0x5c, 0xa8,
++   0x0f, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x00, 0x00, 0x0b, 0x54, 0x95, 0x54,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x2a, 0x7b, 0xfd, 0x75, 0xba, 0xae, 0xfe, 0x44, 0x5a,
++   0x8a, 0xca, 0x9c, 0xf0, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x00, 0x00,
++   0x14, 0x4a, 0xaa, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0xd8, 0xa2, 0xbd, 0x59, 0x52,
++   0x8e, 0xec, 0x48, 0xf1, 0x2d, 0x52, 0x9c, 0xa0, 0x0f, 0xff, 0xff, 0xff,
++   0xff, 0xfe, 0x02, 0x00, 0x02, 0x95, 0x54, 0x92, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, 0x35,
++   0x5f, 0xed, 0x6a, 0xf2, 0x95, 0x19, 0x09, 0xaa, 0x6a, 0x5d, 0x48, 0xc0,
++   0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0x02, 0xaa, 0x52, 0xaa,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x04, 0x22, 0xa6, 0xc3, 0xdf, 0xd0, 0x66, 0x29, 0xf0, 0x01, 0x6a,
++   0xca, 0xd5, 0x59, 0x80, 0x07, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x80,
++   0x05, 0x2a, 0xaa, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0xa4, 0x03, 0xf2, 0x61, 0xed,
++   0x17, 0xe2, 0x02, 0xaa, 0x99, 0x2a, 0x98, 0x80, 0x07, 0xff, 0xff, 0xff,
++   0xff, 0xff, 0x80, 0x40, 0x02, 0x55, 0x55, 0x2a, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x09, 0x05,
++   0x88, 0xa7, 0x47, 0xdc, 0xc0, 0x00, 0x2a, 0x6b, 0x4a, 0x6a, 0xb0, 0x00,
++   0x07, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc0, 0x20, 0x01, 0x55, 0x55, 0x55,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x80, 0x2e, 0x89, 0x11, 0xfb, 0x87, 0x9c, 0x90, 0x64, 0xaa, 0x9a,
++   0x4a, 0xa9, 0x1a, 0x00, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc0, 0x30,
++   0x00, 0xa8, 0x42, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x2b, 0x16, 0x2b, 0x85, 0x09, 0xf8,
++   0x83, 0x45, 0x5d, 0xad, 0x95, 0x49, 0x5c, 0x08, 0x1f, 0xff, 0xff, 0xff,
++   0xff, 0xff, 0xe0, 0x10, 0x00, 0xae, 0xea, 0x4a, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x49, 0x28,
++   0x3b, 0xf5, 0xbd, 0x30, 0x14, 0x96, 0x95, 0x6a, 0xd2, 0xb5, 0xaa, 0x10,
++   0x1f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0x18, 0x00, 0xa9, 0x15, 0x55,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x02, 0x19, 0x26, 0x58, 0x76, 0x4a, 0x1e, 0xe1, 0x7a, 0xba, 0xb1, 0x5b,
++   0x4a, 0xab, 0x2c, 0x10, 0x3f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0x08,
++   0x00, 0x2b, 0x6a, 0xa8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x90, 0x11, 0x21, 0x4d, 0x78, 0x6d, 0x20,
++   0x0a, 0x84, 0xda, 0xaf, 0x55, 0x56, 0x52, 0x20, 0x3f, 0xff, 0xff, 0xff,
++   0xff, 0xff, 0xf0, 0x0c, 0x00, 0x54, 0xaa, 0xa5, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x22, 0xe8, 0x62,
++   0xcd, 0x49, 0xd8, 0x42, 0xb4, 0xb8, 0x96, 0x3a, 0xa5, 0x54, 0xd4, 0x00,
++   0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0x04, 0x00, 0x14, 0xaa, 0x4a,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x05, 0x2d, 0x18, 0x4e, 0x0e, 0x61, 0xb0, 0x0a, 0xab, 0x6b, 0xaa, 0x37,
++   0x0d, 0x5a, 0xa8, 0x00, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0x06,
++   0x00, 0x2b, 0x4a, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x10, 0x40, 0xb1, 0xd8, 0x5f, 0xc1, 0x90, 0x55,
++   0x55, 0x4a, 0x32, 0xed, 0x79, 0x49, 0x50, 0x00, 0xff, 0xff, 0xff, 0xff,
++   0xff, 0xff, 0xf8, 0x06, 0x00, 0x12, 0xaa, 0xa9, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x16, 0x00, 0xd1,
++   0xac, 0x89, 0x91, 0x4a, 0xaa, 0xaa, 0xa9, 0xb2, 0xc5, 0x65, 0x68, 0x00,
++   0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0x03, 0x00, 0x15, 0x2a, 0x4a,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,
++   0x64, 0x54, 0x81, 0xcf, 0x89, 0x00, 0x1f, 0xf2, 0xaa, 0xd5, 0x6b, 0x35,
++   0x5a, 0xad, 0xa8, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0x03,
++   0x00, 0x05, 0x6a, 0x92, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x89, 0x23, 0xf6, 0x52, 0x28, 0x19, 0xd5,
++   0x45, 0x55, 0x49, 0x54, 0xa5, 0x6a, 0xa0, 0x01, 0xff, 0xff, 0xff, 0xff,
++   0xff, 0xff, 0xfc, 0x03, 0x00, 0x15, 0x24, 0xaa, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x09, 0x18, 0x04, 0x6c,
++   0x64, 0x61, 0x21, 0x95, 0xad, 0x56, 0xaa, 0xd7, 0xab, 0x55, 0x50, 0x03,
++   0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0x01, 0x80, 0x04, 0xaa, 0x94,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,
++   0x12, 0x46, 0x20, 0xdc, 0xc4, 0x01, 0x01, 0xaa, 0xaa, 0xa9, 0x5b, 0x49,
++   0x2a, 0xad, 0xa0, 0x03, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x01,
++   0x80, 0x02, 0xa4, 0xa5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x10, 0x44, 0x08, 0x00, 0xd1, 0x84, 0x20, 0x05, 0xaa,
++   0xaa, 0x95, 0x66, 0xaa, 0xaa, 0xaa, 0xa0, 0x03, 0xff, 0xff, 0xff, 0xff,
++   0xff, 0xff, 0xfe, 0x01, 0x80, 0x05, 0x55, 0x29, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x0c, 0xa8, 0x89, 0x99,
++   0x00, 0x22, 0x03, 0xd5, 0x4a, 0xaa, 0xa7, 0x4d, 0x5a, 0xd5, 0x40, 0x07,
++   0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x01, 0x80, 0x02, 0x49, 0x4a,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x48,
++   0x18, 0x02, 0x03, 0x24, 0x10, 0xa8, 0x2f, 0x6a, 0xaa, 0xaa, 0x45, 0x51,
++   0x4a, 0xb5, 0x40, 0x07, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x00,
++   0x80, 0x04, 0x95, 0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x02, 0x01, 0x05, 0x48, 0x07, 0xae, 0x83, 0x02, 0x8c, 0x2a,
++   0x95, 0x52, 0xcd, 0x56, 0xad, 0x56, 0xc0, 0x07, 0xff, 0xff, 0xff, 0xff,
++   0xff, 0xff, 0xfe, 0x00, 0xc0, 0x01, 0x52, 0x52, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x58, 0x98, 0x2f, 0x30,
++   0x02, 0x82, 0x01, 0x6a, 0xaa, 0xaa, 0x8d, 0x29, 0x35, 0xb5, 0x04, 0x07,
++   0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x00, 0xc0, 0x02, 0x94, 0x95,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
++   0x45, 0x30, 0x1f, 0xa0, 0x25, 0x1d, 0x15, 0x4a, 0xa5, 0x51, 0xad, 0x55,
++   0x6d, 0x6a, 0x84, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x00,
++   0xc0, 0x01, 0x29, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x08, 0xa1, 0x2d, 0x21, 0x7f, 0x40, 0x4d, 0xb0, 0x35, 0x55,
++   0x15, 0x15, 0x79, 0x25, 0x4a, 0xca, 0x04, 0x0f, 0xff, 0xff, 0xff, 0xff,
++   0xff, 0xff, 0xff, 0x00, 0xc0, 0x02, 0x52, 0x96, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x80, 0x40, 0x7f, 0x24,
++   0x8a, 0x58, 0xad, 0x55, 0x55, 0x52, 0x0a, 0x5a, 0xaa, 0xbb, 0x04, 0x0f,
++   0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0xc0, 0x00, 0xa5, 0x20,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x41, 0x09,
++   0x50, 0x42, 0xfd, 0xc0, 0x59, 0x53, 0x53, 0x4c, 0xaa, 0x54, 0x5a, 0xa2,
++   0xad, 0xaa, 0x04, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
++   0xc0, 0x01, 0x4d, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x08, 0x02, 0x15, 0x90, 0x7d, 0x15, 0x76, 0xac, 0xad, 0x52,
++   0x95, 0x55, 0xaa, 0x4c, 0xa5, 0x54, 0x04, 0x0f, 0xff, 0xff, 0xff, 0xff,
++   0xff, 0xff, 0xff, 0x00, 0xc0, 0x00, 0xa5, 0x12, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x0c, 0x83, 0x01, 0xff, 0xf9,
++   0x15, 0x29, 0x51, 0x55, 0x52, 0x2c, 0xa2, 0x54, 0xab, 0x6c, 0x04, 0x0f,
++   0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x80, 0x01, 0x28, 0x54,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x98,
++   0x06, 0x01, 0xff, 0xe9, 0x55, 0xd5, 0x56, 0xa5, 0x55, 0xaa, 0xad, 0x51,
++   0x35, 0x54, 0x04, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
++   0x80, 0x00, 0x4a, 0xa9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x01, 0x08, 0x31, 0x1c, 0x12, 0xf4, 0xc5, 0x44, 0x5a, 0xaa, 0x49,
++   0xaa, 0xa9, 0xa8, 0x95, 0x55, 0x50, 0x04, 0x0f, 0xff, 0xff, 0xff, 0xff,
++   0xff, 0xff, 0xff, 0x01, 0x80, 0x01, 0x52, 0x82, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x40, 0x48, 0x1c, 0x41, 0x3e, 0x2e,
++   0x8b, 0x54, 0xaa, 0xab, 0x52, 0xa5, 0x45, 0x24, 0x95, 0x50, 0x06, 0x0f,
++   0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0x01, 0x24, 0x5a,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc1, 0x00,
++   0x38, 0x01, 0x1d, 0x64, 0x9a, 0xa9, 0x2a, 0x94, 0xaa, 0xab, 0x55, 0x55,
++   0xaa, 0xd8, 0x02, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03,
++   0x00, 0x01, 0x4a, 0x92, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x13, 0xc8, 0x00, 0xb0, 0x02, 0x3e, 0x96, 0x35, 0x6a, 0xaa, 0x55,
++   0x4a, 0x95, 0x4a, 0x54, 0x55, 0x28, 0x03, 0x0f, 0xff, 0xff, 0xff, 0xff,
++   0xff, 0xff, 0xff, 0x8f, 0xc0, 0x01, 0x29, 0x4a, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x83, 0x49, 0x20, 0x8c, 0x78, 0xb5,
++   0x65, 0x4a, 0xaa, 0xa6, 0xaa, 0xa5, 0x29, 0x45, 0xaa, 0xa8, 0x03, 0x0f,
++   0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0x70, 0x01, 0x45, 0x28,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x25, 0x03, 0x20,
++   0x00, 0x30, 0x75, 0x4c, 0x95, 0x55, 0x54, 0xa9, 0x52, 0x95, 0x52, 0x95,
++   0x2a, 0xa8, 0x01, 0x8f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x80,
++   0x18, 0x00, 0x94, 0xa5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x0d, 0x0c, 0x0c, 0x44, 0x18, 0xe5, 0xb9, 0xb5, 0x54, 0x92, 0x96,
++   0xaa, 0x55, 0x4a, 0x69, 0x55, 0x20, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff,
++   0xff, 0xff, 0xff, 0x00, 0x00, 0x01, 0x29, 0x29, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x01, 0x2e, 0x08, 0x1c, 0x01, 0x7b, 0xdc, 0x96,
++   0x4a, 0xaa, 0xaa, 0xa9, 0x4a, 0xaa, 0x95, 0x0a, 0x73, 0x50, 0x00, 0xff,
++   0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x00, 0x00, 0x01, 0x52, 0x52,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x20, 0x30,
++   0x44, 0x79, 0x88, 0xd4, 0x95, 0x49, 0x54, 0xab, 0x52, 0x94, 0xa4, 0xaa,
++   0xc6, 0x51, 0xe0, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x00,
++   0x00, 0x00, 0x8a, 0x8a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x38, 0x2a, 0x21, 0x01, 0xfb, 0xa0, 0x7d, 0x54, 0xb5, 0x55, 0x54,
++   0x95, 0x55, 0x29, 0x55, 0x29, 0x57, 0xf8, 0x3f, 0xff, 0xff, 0xff, 0xff,
++   0xff, 0xff, 0xfe, 0x00, 0x00, 0x02, 0x50, 0x52, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x02, 0x78, 0x90, 0x00, 0x0d, 0xfb, 0xca, 0xd2,
++   0xaa, 0x92, 0x49, 0x55, 0x55, 0x2a, 0x4a, 0x4a, 0xd3, 0x57, 0xfc, 0x1f,
++   0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0x00, 0x00, 0x02, 0x95, 0x54,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x39, 0x20, 0xa4,
++   0x13, 0xfd, 0xab, 0x2a, 0x52, 0xa5, 0x52, 0x55, 0x52, 0xa9, 0x55, 0x55,
++   0x2a, 0x57, 0xff, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc6, 0x00,
++   0x00, 0x0d, 0x24, 0x89, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x08, 0x30, 0xc0, 0x00, 0x77, 0xb5, 0x55, 0x69, 0x4a, 0xaa, 0x96, 0xaa,
++   0x94, 0xaa, 0x91, 0x45, 0x55, 0x4f, 0xff, 0x03, 0xff, 0xff, 0xff, 0xff,
++   0xff, 0xff, 0xcf, 0x00, 0x00, 0x1e, 0x55, 0x2a, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x01, 0x30, 0x81, 0x00, 0x67, 0xb4, 0x74, 0x52,
++   0x55, 0x55, 0x2a, 0xaa, 0xaa, 0x52, 0x55, 0x35, 0x52, 0xaf, 0xff, 0x81,
++   0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcf, 0x00, 0x00, 0x1e, 0xa0, 0x48,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0xe3, 0x00, 0x08,
++   0x86, 0x4a, 0xd4, 0xd5, 0x54, 0x91, 0x49, 0x55, 0x51, 0x54, 0x95, 0x49,
++   0x56, 0x4f, 0xff, 0xc0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0x80,
++   0x00, 0x3e, 0x0b, 0x53, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x05, 0xc4, 0x02, 0x00, 0x7f, 0xbd, 0x8a, 0x25, 0x49, 0x55, 0x2a, 0xb5,
++   0x55, 0x52, 0xa4, 0x96, 0xac, 0x8f, 0xff, 0xe0, 0x7f, 0xff, 0xff, 0xff,
++   0xff, 0xff, 0x9f, 0x80, 0x00, 0x7e, 0x54, 0xa4, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x43, 0x80, 0x20, 0x21, 0x4d, 0x3f, 0x52, 0xb4,
++   0x55, 0x2a, 0x55, 0x55, 0x4a, 0xa4, 0x95, 0x32, 0xa5, 0x4f, 0xff, 0xe0,
++   0x3f, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f, 0xc0, 0x00, 0xfe, 0x49, 0x14,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xe8, 0x04, 0x08,
++   0x1a, 0x46, 0xd4, 0x49, 0xaa, 0xa4, 0xaa, 0xaa, 0x54, 0x95, 0x2a, 0xc4,
++   0x94, 0x8f, 0xff, 0xf0, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f, 0xe0,
++   0x01, 0xfe, 0x92, 0x49, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0x8e, 0x40, 0x80, 0x00, 0x74, 0x44, 0x51, 0x55, 0x64, 0xa9, 0x49, 0x55,
++   0x49, 0x51, 0x52, 0x5a, 0x2a, 0x1f, 0xff, 0xf8, 0x07, 0xff, 0xff, 0xff,
++   0xff, 0xff, 0x9f, 0xf0, 0x07, 0xfe, 0x24, 0x52, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x00, 0x0d, 0x90, 0x00, 0x41, 0xeb, 0x12, 0xab, 0x52,
++   0xa9, 0x4a, 0xaa, 0xa5, 0x52, 0x95, 0x2a, 0xa2, 0xa8, 0x1f, 0xff, 0xf8,
++   0x03, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f, 0xf8, 0x1f, 0xfe, 0x52, 0x8a,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x3b, 0x81, 0x08, 0x05,
++   0xe3, 0x15, 0xa5, 0x36, 0x95, 0x4a, 0x4a, 0xaa, 0x45, 0x2a, 0xa2, 0xac,
++   0x00, 0x3f, 0xff, 0xfc, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f, 0xff,
++   0xff, 0xfe, 0x04, 0xa8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++   0xb2, 0x00, 0x00, 0x80, 0xe8, 0x52, 0xac, 0xa8, 0xa4, 0x92, 0xa9, 0x4a,
++   0x95, 0x51, 0x2d, 0x50, 0xe1, 0xff, 0xff, 0xfe, 0x00, 0x7f, 0xff, 0xff,
++   0xff, 0xff, 0x9f, 0xff, 0xff, 0xfe, 0x29, 0x05, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x04, 0x00, 0x35, 0x10, 0x09, 0xa9, 0x4a, 0xaa, 0xaa,
++   0xa9, 0x54, 0x95, 0x2a, 0xa4, 0x8a, 0xa9, 0x43, 0xff, 0xff, 0xff, 0xfe,
++   0x00, 0x3f, 0xff, 0xff, 0xff, 0xff, 0x9f, 0xff, 0xff, 0xfe, 0x12, 0xa9,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x01, 0x03,
++   0x34, 0xa5, 0x19, 0x25, 0x55, 0x55, 0x55, 0x55, 0x15, 0x55, 0x45, 0x53,
++   0xff, 0xff, 0xff, 0xff, 0x00, 0x1f, 0xff, 0xff, 0xff, 0xff, 0x9f, 0xff,
++   0xff, 0xfe, 0x04, 0x92, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10,
++   0x49, 0x3c, 0x00, 0x34, 0x75, 0x4a, 0xa2, 0xaa, 0x92, 0x92, 0x92, 0x54,
++   0x52, 0x54, 0x99, 0x53, 0xff, 0xff, 0xff, 0xff, 0x80, 0x0f, 0xff, 0xff,
++   0xff, 0xff, 0x9f, 0xff, 0xff, 0xff, 0x14, 0x94, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0x02, 0x00, 0x78, 0x12, 0xfc, 0xaa, 0x92, 0x2d, 0x54,
++   0xa4, 0xa5, 0x2a, 0x92, 0x94, 0x92, 0xa2, 0xa3, 0xff, 0xff, 0xff, 0xff,
++   0x80, 0x0f, 0xff, 0xff, 0xff, 0xff, 0x9f, 0xff, 0xff, 0xff, 0x01, 0x51,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x16, 0xf0, 0x80, 0x71,
++   0xc5, 0x2a, 0x65, 0x25, 0x55, 0x4a, 0x52, 0xaa, 0x51, 0x52, 0x95, 0x21,
++   0xff, 0xff, 0xff, 0xff, 0xc0, 0x07, 0xff, 0xff, 0xff, 0xff, 0x8f, 0xff,
++   0xff, 0xff, 0x81, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04,
++   0x8e, 0xa0, 0x00, 0xb3, 0x94, 0xa4, 0xa9, 0x55, 0x45, 0x54, 0xa5, 0x25,
++   0x25, 0x2a, 0x65, 0x41, 0xff, 0xff, 0xff, 0xff, 0xe0, 0x07, 0xff, 0xff,
++   0xff, 0xff, 0x8f, 0xff, 0xff, 0xff, 0x80, 0x4a, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x00, 0xa0, 0x0f, 0x42, 0x13, 0x46, 0x94, 0x94, 0xa5, 0x4a,
++   0x54, 0x92, 0x88, 0xa8, 0x49, 0x55, 0x4a, 0x51, 0xff, 0xff, 0xff, 0xff,
++   0xe0, 0x07, 0xff, 0xff, 0xff, 0xff, 0x8f, 0xff, 0xff, 0xff, 0xe0, 0x52,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x2d, 0x00, 0x87, 0x56,
++   0x4b, 0x2a, 0x99, 0x55, 0x4a, 0xaa, 0x55, 0x4a, 0x92, 0xa2, 0x54, 0xa8,
++   0xff, 0xff, 0xff, 0xff, 0xf0, 0x07, 0xff, 0xff, 0xff, 0xfd, 0x8f, 0xff,
++   0xff, 0xff, 0xfe, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x88,
++   0x18, 0x04, 0x1f, 0x11, 0x51, 0x51, 0x0b, 0x54, 0x94, 0xa5, 0x52, 0x92,
++   0x55, 0x54, 0x95, 0x20, 0xff, 0xff, 0xff, 0xff, 0xf8, 0x0f, 0xff, 0xff,
++   0xff, 0xf9, 0x8f, 0xff, 0xff, 0xff, 0xff, 0x89, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x11, 0xe0, 0x38, 0x01, 0x3e, 0x4d, 0x0a, 0x55, 0x5a, 0x55,
++   0x55, 0x4a, 0x25, 0x24, 0x81, 0x25, 0x29, 0x48, 0xff, 0xff, 0xff, 0xff,
++   0xf8, 0x1f, 0xff, 0xff, 0xff, 0xf0, 0x8f, 0xff, 0xff, 0xff, 0xff, 0xc2,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x80, 0xf0, 0x90, 0x7e, 0xd4,
++   0x68, 0x92, 0xaa, 0x54, 0x92, 0x92, 0xa9, 0x55, 0x2e, 0xaa, 0xa2, 0x58,
++   0x7f, 0xff, 0xff, 0xff, 0xfc, 0x3f, 0xff, 0xff, 0xff, 0xe0, 0x8f, 0xff,
++   0xff, 0xff, 0xff, 0xe4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x04,
++   0x40, 0x00, 0x49, 0xe2, 0x8a, 0xa9, 0x54, 0x95, 0x2a, 0x54, 0x4a, 0x48,
++   0x91, 0x51, 0x2a, 0xc8, 0x7f, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff,
++   0xff, 0xc0, 0x8f, 0xff, 0xff, 0xff, 0xff, 0xe2, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x2e, 0x00, 0x00, 0x03, 0x79, 0x4a, 0xa9, 0x45, 0x49, 0x54,
++   0xa4, 0x89, 0x49, 0x52, 0x8a, 0x85, 0x4a, 0x98, 0x7f, 0xff, 0xff, 0xff,
++   0xfe, 0x7f, 0xff, 0xff, 0xff, 0x80, 0x8f, 0xff, 0xff, 0xff, 0xff, 0xc4,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x48, 0x44, 0x91, 0x65, 0x55,
++   0x4a, 0x54, 0x96, 0x85, 0x55, 0x52, 0xaa, 0x8a, 0x1a, 0xaa, 0x59, 0x30,
++   0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xfe, 0x01, 0x0f, 0xff,
++   0xff, 0xff, 0xff, 0x82, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x20, 0x00,
++   0x00, 0x02, 0xc3, 0x50, 0x29, 0x49, 0x2d, 0x35, 0x24, 0x54, 0x89, 0x32,
++   0x52, 0x91, 0x49, 0x40, 0xff, 0xff, 0xff, 0xff, 0xff, 0x30, 0xff, 0xff,
++   0xfc, 0x01, 0x0f, 0xff, 0xff, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x00, 0x20, 0x08, 0x80, 0x00, 0x87, 0x4b, 0x42, 0x95, 0x2a, 0x4a,
++   0x4a, 0x89, 0x52, 0x44, 0x45, 0x25, 0x55, 0x10, 0xff, 0xff, 0xff, 0xff,
++   0xff, 0x98, 0x7f, 0xff, 0xf0, 0x01, 0x0f, 0xff, 0xff, 0xff, 0xfc, 0x09,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x40, 0x40, 0x11, 0x15, 0x6a, 0x92,
++   0x4d, 0x29, 0x4a, 0xaa, 0xa4, 0xaa, 0x55, 0x54, 0x9a, 0x74, 0x44, 0xb1,
++   0xff, 0xff, 0xff, 0xff, 0xff, 0x8c, 0x1f, 0xff, 0x00, 0x01, 0x0f, 0xff,
++   0xff, 0xff, 0xf0, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,
++   0x80, 0x01, 0x48, 0x54, 0x94, 0xa6, 0xa9, 0x52, 0x45, 0x24, 0x84, 0x92,
++   0xa4, 0x85, 0x2a, 0x43, 0xff, 0xff, 0xff, 0xff, 0xff, 0x84, 0x00, 0x00,
++   0x00, 0x01, 0x1f, 0xff, 0xff, 0xff, 0xc0, 0x92, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x11, 0x00, 0x09, 0x02, 0x03, 0x53, 0x15, 0x2a, 0x49, 0x4a, 0xa5,
++   0x49, 0x29, 0x29, 0x24, 0x2a, 0xd5, 0x54, 0xa3, 0xff, 0xff, 0xff, 0xff,
++   0xff, 0xc6, 0x00, 0x00, 0x00, 0x03, 0x1f, 0xff, 0xff, 0xfe, 0x01, 0x24,
++   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x02, 0x20, 0x26, 0xd4, 0xa2,
++   0x49, 0xb5, 0x15, 0x4a, 0x52, 0xa9, 0x4a, 0x92, 0xa5, 0x12, 0x52, 0xa3,
++   0xff, 0xff, 0xff, 0xff, 0xff, 0xc6, 0x00, 0x00, 0x00, 0x03, 0x1f, 0xff,
++   0xff, 0xf0, 0x14, 0x92, 0x00, 0x00, 0x00, 0x00, 0x00, 0x84, 0x00, 0x44,
++   0x00, 0x0c, 0xc4, 0xa4, 0xa8, 0x25, 0x2a, 0xaa, 0x84, 0x45, 0x28, 0xa4,
++   0xa8, 0xa5, 0x4a, 0x43, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc2, 0x00, 0x00,
++   0x00, 0x06, 0x1f, 0xff, 0xff, 0xc0, 0x25, 0x24, 0x00, 0x00, 0x00, 0x00,
++   0x00, 0x10, 0x80, 0x12, 0x01, 0x11, 0x12, 0x8a, 0x95, 0x49, 0x55, 0x49,
++   0x2a, 0xaa, 0x45, 0x0a, 0x4a, 0x94, 0xaa, 0x90, 0xff, 0xff, 0xff, 0xff,
++   0xff, 0xc2, 0x00, 0x00, 0x00, 0x06, 0x1f, 0xff, 0xff, 0x02, 0x48, 0x92,
++   0x00, 0x00, 0x00, 0x00, 0x02, 0x01, 0x08, 0x00, 0x22, 0x01, 0x24, 0x95,
++   0x21, 0x55, 0x55, 0x2a, 0x54, 0x92, 0x94, 0xa8, 0xa9, 0x2a, 0x92, 0xa8,
++   0x1f, 0xff, 0xff, 0xff, 0xff, 0x82, 0x00, 0x00, 0x00, 0x06, 0x0f, 0xff,
++   0xfe, 0x02, 0x49, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x01, 0x40,
++   0x00, 0xc4, 0x89, 0x11, 0x54, 0x95, 0x29, 0x49, 0x49, 0x24, 0x29, 0x12,
++   0x92, 0xa5, 0x55, 0x28, 0x01, 0xff, 0xff, 0xff, 0xff, 0x82, 0x00, 0x00,
++   0x00, 0x02, 0x0f, 0xff, 0xfc, 0x14, 0x92, 0x49, 0x00, 0x00, 0x00, 0x00,
++   0x08, 0x42, 0x06, 0x44, 0x45, 0x08, 0x92, 0x24, 0x95, 0xaa, 0xa5, 0x52,
++   0x92, 0x55, 0x42, 0x52, 0xaa, 0x4a, 0x49, 0x4a, 0x80, 0x0f, 0xff, 0xff,
++   0xff, 0x84, 0x00, 0x00, 0x00, 0x00, 0x0f, 0xff, 0xf0, 0x04, 0x92, 0x49,
++   0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x2a, 0x80, 0x01, 0x95, 0x2a, 0x8a,
++   0xa0, 0x95, 0x2a, 0x95, 0x54, 0xa4, 0x54, 0x84, 0x44, 0x95, 0x55, 0x29,
++   0x54, 0x00, 0x7f, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xff,
++   0xe0, 0x29, 0x24, 0x92, 0x00, 0x00, 0x00, 0x00, 0x22, 0x04, 0x03, 0x03,
++   0x42, 0x12, 0x52, 0x2a, 0x15, 0x55, 0x54, 0xa8, 0x92, 0xa9, 0x49, 0x29,
++   0x54, 0xaa, 0x92, 0x52, 0xa5, 0x40, 0x07, 0xff, 0xff, 0x00, 0x15, 0x50,
++   0x92, 0x44, 0x07, 0xff, 0xc0, 0x52, 0x54, 0xaa, 0x00, 0x00, 0x00, 0x00,
++   0x84, 0x02, 0x86, 0x03, 0x08, 0x41, 0x48, 0x94, 0xa5, 0x52, 0xaa, 0x8a,
++   0x49, 0x52, 0x92, 0x45, 0x49, 0x55, 0x2a, 0xaa, 0x94, 0x96, 0x00, 0x7f,
++   0xfe, 0x01, 0x55, 0x2b, 0x25, 0x50, 0x03, 0xff, 0x00, 0xa4, 0x91, 0x08,
++   0x00, 0x00, 0x00, 0x00, 0x88, 0x2c, 0x18, 0x14, 0x0a, 0xaa, 0x92, 0x55,
++   0x49, 0x55, 0x4a, 0xa9, 0x52, 0x44, 0x92, 0x94, 0x92, 0x92, 0xca, 0x4a,
++   0x55, 0x27, 0xa0, 0x0f, 0xf8, 0x05, 0x35, 0x49, 0x68, 0x91, 0x00, 0xf8,
++   0x04, 0x95, 0x25, 0x29, 0x00, 0x00, 0x00, 0x01, 0x08, 0x04, 0x18, 0x00,
++   0x08, 0x04, 0x49, 0x08, 0x55, 0x4a, 0xa9, 0x12, 0x95, 0x55, 0x49, 0x22,
++   0x65, 0x2a, 0x5a, 0x92, 0xa9, 0x49, 0x54, 0x00, 0xe0, 0x24, 0xb2, 0x55,
++   0x15, 0x4c, 0x00, 0x00, 0x02, 0x48, 0x52, 0x52, 0x00, 0x00, 0x00, 0x02,
++   0x21, 0x09, 0x50, 0x81, 0x51, 0x69, 0x48, 0x52, 0x92, 0x94, 0xaa, 0x54,
++   0x25, 0x54, 0x92, 0x4a, 0x89, 0x54, 0x82, 0xaa, 0x4a, 0x53, 0x55, 0x00,
++   0x00, 0x49, 0x65, 0x52, 0x51, 0x12, 0x80, 0x00, 0x14, 0x92, 0x92, 0x42,
++   0x00, 0x00, 0x00, 0x01, 0x00, 0x20, 0x50, 0x24, 0x2a, 0x95, 0x25, 0x29,
++   0x4a, 0xb5, 0x48, 0xa5, 0x52, 0x49, 0x24, 0x94, 0xaa, 0xab, 0x15, 0x24,
++   0x92, 0xa5, 0x54, 0x40, 0x00, 0xaa, 0xb5, 0x25, 0x4a, 0xa4, 0x80, 0x00,
++   0x21, 0x24, 0xa8, 0x94, 0x00, 0x00, 0x00, 0x0a, 0x44, 0x02, 0xb1, 0x01,
++   0x24, 0x80, 0x48, 0xa2, 0x55, 0x55, 0x52, 0x8a, 0x94, 0xaa, 0x49, 0x22,
++   0x8a, 0x49, 0x49, 0x55, 0x4a, 0x51, 0x25, 0x48, 0x02, 0xa8, 0x6a, 0x69,
++   0x12, 0x29, 0x50, 0x00, 0x55, 0x24, 0x81, 0x24, 0x00, 0x00, 0x00, 0x23,
++   0x80, 0x90, 0x70, 0x14, 0x82, 0x55, 0x22, 0x8a, 0xa9, 0x14, 0xaa, 0xa9,
++   0x25, 0x51, 0x2a, 0x55, 0x21, 0x55, 0x55, 0x12, 0x55, 0x32, 0xaa, 0x94,
++   0xa5, 0x55, 0x5a, 0x92, 0x54, 0xa5, 0x52, 0x24, 0xa2, 0x49, 0x2a, 0x92,
++   0x00, 0x00, 0x00, 0x0e, 0x08, 0x00, 0xe2, 0xa0, 0x04, 0x54, 0x84, 0x52,
++   0x4a, 0xa4, 0xaa, 0x2a, 0xa9, 0x25, 0x45, 0x42, 0xae, 0x92, 0x44, 0xa4,
++   0x91, 0x24, 0xaa, 0xaa, 0xaa, 0x91, 0x55, 0x55, 0x51, 0x55, 0x54, 0xa9,
++   0x14, 0x92, 0x49, 0x24, 0x00, 0x00, 0x00, 0x24, 0x01, 0x82, 0x70, 0x09,
++   0x2a, 0x41, 0x2a, 0x89, 0x14, 0x49, 0x29, 0x52, 0x4a, 0xaa, 0x2a, 0x15,
++   0x12, 0xaa, 0xa9, 0x2a, 0xaa, 0xba, 0xa4, 0x94, 0x95, 0x55, 0x5a, 0x49,
++   0x25, 0x20, 0x49, 0x22, 0xa5, 0x24, 0x92, 0x92, 0x00, 0x00, 0x01, 0x0c,
++   0x93, 0x10, 0xe2, 0x11, 0x00, 0x94, 0x22, 0x52, 0x69, 0x53, 0x52, 0x45,
++   0x49, 0x22, 0xa4, 0x4a, 0x55, 0x29, 0x2a, 0xa4, 0x52, 0x42, 0xaa, 0xa5,
++   0x52, 0xa8, 0xaa, 0x55, 0x4a, 0xab, 0xa9, 0x4a, 0x54, 0x49, 0x32, 0x24 };
+diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile
+index f93f72bcba97e..0db5b2c38893b 100644
+--- a/arch/mips/boot/compressed/Makefile
++++ b/arch/mips/boot/compressed/Makefile
+@@ -40,7 +40,7 @@ GCOV_PROFILE := n
+ UBSAN_SANITIZE := n
+ 
+ # decompressor objects (linked with vmlinuz)
+-vmlinuzobjs-y := $(obj)/head.o $(obj)/decompress.o $(obj)/string.o
++vmlinuzobjs-y := $(obj)/head.o $(obj)/decompress.o $(obj)/string.o $(obj)/bswapsi.o
+ 
+ ifdef CONFIG_DEBUG_ZBOOT
+ vmlinuzobjs-$(CONFIG_DEBUG_ZBOOT)		   += $(obj)/dbg.o
+@@ -54,7 +54,7 @@ extra-y += uart-ath79.c
+ $(obj)/uart-ath79.c: $(srctree)/arch/mips/ath79/early_printk.c
+ 	$(call cmd,shipped)
+ 
+-vmlinuzobjs-$(CONFIG_KERNEL_XZ) += $(obj)/ashldi3.o $(obj)/bswapsi.o
++vmlinuzobjs-$(CONFIG_KERNEL_XZ) += $(obj)/ashldi3.o
+ 
+ extra-y += ashldi3.c
+ $(obj)/ashldi3.c: $(obj)/%.c: $(srctree)/lib/%.c FORCE
+diff --git a/arch/mips/boot/compressed/decompress.c b/arch/mips/boot/compressed/decompress.c
+index 3d70d15ada286..aae1346a509a9 100644
+--- a/arch/mips/boot/compressed/decompress.c
++++ b/arch/mips/boot/compressed/decompress.c
+@@ -7,6 +7,8 @@
+  * Author: Wu Zhangjin <wuzhangjin@gmail.com>
+  */
+ 
++#define DISABLE_BRANCH_PROFILING
++
+ #include <linux/types.h>
+ #include <linux/kernel.h>
+ #include <linux/string.h>
+diff --git a/arch/mips/include/asm/vdso/vdso.h b/arch/mips/include/asm/vdso/vdso.h
+index 737ddfc3411cb..a327ca21270ec 100644
+--- a/arch/mips/include/asm/vdso/vdso.h
++++ b/arch/mips/include/asm/vdso/vdso.h
+@@ -67,7 +67,7 @@ static inline const struct vdso_data *get_vdso_data(void)
+ 
+ static inline void __iomem *get_gic(const struct vdso_data *data)
+ {
+-	return (void __iomem *)data - PAGE_SIZE;
++	return (void __iomem *)((unsigned long)data & PAGE_MASK) - PAGE_SIZE;
+ }
+ 
+ #endif /* CONFIG_CLKSRC_MIPS_GIC */
+diff --git a/arch/powerpc/boot/devtree.c b/arch/powerpc/boot/devtree.c
+index 5d91036ad626d..58fbcfcc98c9e 100644
+--- a/arch/powerpc/boot/devtree.c
++++ b/arch/powerpc/boot/devtree.c
+@@ -13,6 +13,7 @@
+ #include "string.h"
+ #include "stdio.h"
+ #include "ops.h"
++#include "of.h"
+ 
+ void dt_fixup_memory(u64 start, u64 size)
+ {
+@@ -23,21 +24,25 @@ void dt_fixup_memory(u64 start, u64 size)
+ 	root = finddevice("/");
+ 	if (getprop(root, "#address-cells", &naddr, sizeof(naddr)) < 0)
+ 		naddr = 2;
++	else
++		naddr = be32_to_cpu(naddr);
+ 	if (naddr < 1 || naddr > 2)
+ 		fatal("Can't cope with #address-cells == %d in /\n\r", naddr);
+ 
+ 	if (getprop(root, "#size-cells", &nsize, sizeof(nsize)) < 0)
+ 		nsize = 1;
++	else
++		nsize = be32_to_cpu(nsize);
+ 	if (nsize < 1 || nsize > 2)
+ 		fatal("Can't cope with #size-cells == %d in /\n\r", nsize);
+ 
+ 	i = 0;
+ 	if (naddr == 2)
+-		memreg[i++] = start >> 32;
+-	memreg[i++] = start & 0xffffffff;
++		memreg[i++] = cpu_to_be32(start >> 32);
++	memreg[i++] = cpu_to_be32(start & 0xffffffff);
+ 	if (nsize == 2)
+-		memreg[i++] = size >> 32;
+-	memreg[i++] = size & 0xffffffff;
++		memreg[i++] = cpu_to_be32(size >> 32);
++	memreg[i++] = cpu_to_be32(size & 0xffffffff);
+ 
+ 	memory = finddevice("/memory");
+ 	if (! memory) {
+@@ -45,9 +50,9 @@ void dt_fixup_memory(u64 start, u64 size)
+ 		setprop_str(memory, "device_type", "memory");
+ 	}
+ 
+-	printf("Memory <- <0x%x", memreg[0]);
++	printf("Memory <- <0x%x", be32_to_cpu(memreg[0]));
+ 	for (i = 1; i < (naddr + nsize); i++)
+-		printf(" 0x%x", memreg[i]);
++		printf(" 0x%x", be32_to_cpu(memreg[i]));
+ 	printf("> (%ldMB)\n\r", (unsigned long)(size >> 20));
+ 
+ 	setprop(memory, "reg", memreg, (naddr + nsize)*sizeof(u32));
+@@ -65,10 +70,10 @@ void dt_fixup_cpu_clocks(u32 cpu, u32 tb, u32 bus)
+ 		printf("CPU bus-frequency <- 0x%x (%dMHz)\n\r", bus, MHZ(bus));
+ 
+ 	while ((devp = find_node_by_devtype(devp, "cpu"))) {
+-		setprop_val(devp, "clock-frequency", cpu);
+-		setprop_val(devp, "timebase-frequency", tb);
++		setprop_val(devp, "clock-frequency", cpu_to_be32(cpu));
++		setprop_val(devp, "timebase-frequency", cpu_to_be32(tb));
+ 		if (bus > 0)
+-			setprop_val(devp, "bus-frequency", bus);
++			setprop_val(devp, "bus-frequency", cpu_to_be32(bus));
+ 	}
+ 
+ 	timebase_period_ns = 1000000000 / tb;
+@@ -80,7 +85,7 @@ void dt_fixup_clock(const char *path, u32 freq)
+ 
+ 	if (devp) {
+ 		printf("%s: clock-frequency <- %x (%dMHz)\n\r", path, freq, MHZ(freq));
+-		setprop_val(devp, "clock-frequency", freq);
++		setprop_val(devp, "clock-frequency", cpu_to_be32(freq));
+ 	}
+ }
+ 
+@@ -133,8 +138,12 @@ void dt_get_reg_format(void *node, u32 *naddr, u32 *nsize)
+ {
+ 	if (getprop(node, "#address-cells", naddr, 4) != 4)
+ 		*naddr = 2;
++	else
++		*naddr = be32_to_cpu(*naddr);
+ 	if (getprop(node, "#size-cells", nsize, 4) != 4)
+ 		*nsize = 1;
++	else
++		*nsize = be32_to_cpu(*nsize);
+ }
+ 
+ static void copy_val(u32 *dest, u32 *src, int naddr)
+@@ -163,9 +172,9 @@ static int add_reg(u32 *reg, u32 *add, int naddr)
+ 	int i, carry = 0;
+ 
+ 	for (i = MAX_ADDR_CELLS - 1; i >= MAX_ADDR_CELLS - naddr; i--) {
+-		u64 tmp = (u64)reg[i] + add[i] + carry;
++		u64 tmp = (u64)be32_to_cpu(reg[i]) + be32_to_cpu(add[i]) + carry;
+ 		carry = tmp >> 32;
+-		reg[i] = (u32)tmp;
++		reg[i] = cpu_to_be32((u32)tmp);
+ 	}
+ 
+ 	return !carry;
+@@ -180,18 +189,18 @@ static int compare_reg(u32 *reg, u32 *range, u32 *rangesize)
+ 	u32 end;
+ 
+ 	for (i = 0; i < MAX_ADDR_CELLS; i++) {
+-		if (reg[i] < range[i])
++		if (be32_to_cpu(reg[i]) < be32_to_cpu(range[i]))
+ 			return 0;
+-		if (reg[i] > range[i])
++		if (be32_to_cpu(reg[i]) > be32_to_cpu(range[i]))
+ 			break;
+ 	}
+ 
+ 	for (i = 0; i < MAX_ADDR_CELLS; i++) {
+-		end = range[i] + rangesize[i];
++		end = be32_to_cpu(range[i]) + be32_to_cpu(rangesize[i]);
+ 
+-		if (reg[i] < end)
++		if (be32_to_cpu(reg[i]) < end)
+ 			break;
+-		if (reg[i] > end)
++		if (be32_to_cpu(reg[i]) > end)
+ 			return 0;
+ 	}
+ 
+@@ -240,7 +249,6 @@ static int dt_xlate(void *node, int res, int reglen, unsigned long *addr,
+ 		return 0;
+ 
+ 	dt_get_reg_format(parent, &naddr, &nsize);
+-
+ 	if (nsize > 2)
+ 		return 0;
+ 
+@@ -252,10 +260,10 @@ static int dt_xlate(void *node, int res, int reglen, unsigned long *addr,
+ 
+ 	copy_val(last_addr, prop_buf + offset, naddr);
+ 
+-	ret_size = prop_buf[offset + naddr];
++	ret_size = be32_to_cpu(prop_buf[offset + naddr]);
+ 	if (nsize == 2) {
+ 		ret_size <<= 32;
+-		ret_size |= prop_buf[offset + naddr + 1];
++		ret_size |= be32_to_cpu(prop_buf[offset + naddr + 1]);
+ 	}
+ 
+ 	for (;;) {
+@@ -278,7 +286,6 @@ static int dt_xlate(void *node, int res, int reglen, unsigned long *addr,
+ 
+ 		offset = find_range(last_addr, prop_buf, prev_naddr,
+ 		                    naddr, prev_nsize, buflen / 4);
+-
+ 		if (offset < 0)
+ 			return 0;
+ 
+@@ -296,8 +303,7 @@ static int dt_xlate(void *node, int res, int reglen, unsigned long *addr,
+ 	if (naddr > 2)
+ 		return 0;
+ 
+-	ret_addr = ((u64)last_addr[2] << 32) | last_addr[3];
+-
++	ret_addr = ((u64)be32_to_cpu(last_addr[2]) << 32) | be32_to_cpu(last_addr[3]);
+ 	if (sizeof(void *) == 4 &&
+ 	    (ret_addr >= 0x100000000ULL || ret_size > 0x100000000ULL ||
+ 	     ret_addr + ret_size > 0x100000000ULL))
+@@ -350,11 +356,14 @@ int dt_is_compatible(void *node, const char *compat)
+ int dt_get_virtual_reg(void *node, void **addr, int nres)
+ {
+ 	unsigned long xaddr;
+-	int n;
++	int n, i;
+ 
+ 	n = getprop(node, "virtual-reg", addr, nres * 4);
+-	if (n > 0)
++	if (n > 0) {
++		for (i = 0; i < n/4; i ++)
++			((u32 *)addr)[i] = be32_to_cpu(((u32 *)addr)[i]);
+ 		return n / 4;
++	}
+ 
+ 	for (n = 0; n < nres; n++) {
+ 		if (!dt_xlate_reg(node, n, &xaddr, NULL))
+diff --git a/arch/powerpc/boot/ns16550.c b/arch/powerpc/boot/ns16550.c
+index b0da4466d4198..f16d2be1d0f31 100644
+--- a/arch/powerpc/boot/ns16550.c
++++ b/arch/powerpc/boot/ns16550.c
+@@ -15,6 +15,7 @@
+ #include "stdio.h"
+ #include "io.h"
+ #include "ops.h"
++#include "of.h"
+ 
+ #define UART_DLL	0	/* Out: Divisor Latch Low */
+ #define UART_DLM	1	/* Out: Divisor Latch High */
+@@ -58,16 +59,20 @@ int ns16550_console_init(void *devp, struct serial_console_data *scdp)
+ 	int n;
+ 	u32 reg_offset;
+ 
+-	if (dt_get_virtual_reg(devp, (void **)&reg_base, 1) < 1)
++	if (dt_get_virtual_reg(devp, (void **)&reg_base, 1) < 1) {
++		printf("virt reg parse fail...\r\n");
+ 		return -1;
++	}
+ 
+ 	n = getprop(devp, "reg-offset", &reg_offset, sizeof(reg_offset));
+ 	if (n == sizeof(reg_offset))
+-		reg_base += reg_offset;
++		reg_base += be32_to_cpu(reg_offset);
+ 
+ 	n = getprop(devp, "reg-shift", &reg_shift, sizeof(reg_shift));
+ 	if (n != sizeof(reg_shift))
+ 		reg_shift = 0;
++	else
++		reg_shift = be32_to_cpu(reg_shift);
+ 
+ 	scdp->open = ns16550_open;
+ 	scdp->putc = ns16550_putc;
+diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h
+index e646c7f218bc8..12b6b76e8d0f7 100644
+--- a/arch/powerpc/include/asm/ps3.h
++++ b/arch/powerpc/include/asm/ps3.h
+@@ -71,6 +71,7 @@ struct ps3_dma_region_ops;
+  * @bus_addr: The 'translated' bus address of the region.
+  * @len: The length in bytes of the region.
+  * @offset: The offset from the start of memory of the region.
++ * @dma_mask: Device dma_mask.
+  * @ioid: The IOID of the device who owns this region
+  * @chunk_list: Opaque variable used by the ioc page manager.
+  * @region_ops: struct ps3_dma_region_ops - dma region operations
+@@ -85,6 +86,7 @@ struct ps3_dma_region {
+ 	enum ps3_dma_region_type region_type;
+ 	unsigned long len;
+ 	unsigned long offset;
++	u64 dma_mask;
+ 
+ 	/* driver variables  (set by ps3_dma_region_create) */
+ 	unsigned long bus_addr;
+diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c
+index 409e612107892..817a02ef60320 100644
+--- a/arch/powerpc/mm/book3s64/radix_tlb.c
++++ b/arch/powerpc/mm/book3s64/radix_tlb.c
+@@ -291,22 +291,30 @@ static inline void fixup_tlbie_lpid(unsigned long lpid)
+ /*
+  * We use 128 set in radix mode and 256 set in hpt mode.
+  */
+-static __always_inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
++static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
+ {
+ 	int set;
+ 
+ 	asm volatile("ptesync": : :"memory");
+ 
+-	/*
+-	 * Flush the first set of the TLB, and if we're doing a RIC_FLUSH_ALL,
+-	 * also flush the entire Page Walk Cache.
+-	 */
+-	__tlbiel_pid(pid, 0, ric);
++	switch (ric) {
++	case RIC_FLUSH_PWC:
+ 
+-	/* For PWC, only one flush is needed */
+-	if (ric == RIC_FLUSH_PWC) {
++		/* For PWC, only one flush is needed */
++		__tlbiel_pid(pid, 0, RIC_FLUSH_PWC);
+ 		ppc_after_tlbiel_barrier();
+ 		return;
++	case RIC_FLUSH_TLB:
++		__tlbiel_pid(pid, 0, RIC_FLUSH_TLB);
++		break;
++	case RIC_FLUSH_ALL:
++	default:
++		/*
++		 * Flush the first set of the TLB, and if
++		 * we're doing a RIC_FLUSH_ALL, also flush
++		 * the entire Page Walk Cache.
++		 */
++		__tlbiel_pid(pid, 0, RIC_FLUSH_ALL);
+ 	}
+ 
+ 	if (!cpu_has_feature(CPU_FTR_ARCH_31)) {
+@@ -1176,7 +1184,7 @@ void radix__tlb_flush(struct mmu_gather *tlb)
+ 	}
+ }
+ 
+-static __always_inline void __radix__flush_tlb_range_psize(struct mm_struct *mm,
++static void __radix__flush_tlb_range_psize(struct mm_struct *mm,
+ 				unsigned long start, unsigned long end,
+ 				int psize, bool also_pwc)
+ {
+diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
+index aaf1a887f653b..2657bf542985a 100644
+--- a/arch/powerpc/net/bpf_jit_comp64.c
++++ b/arch/powerpc/net/bpf_jit_comp64.c
+@@ -686,7 +686,7 @@ emit_clear:
+ 		 * BPF_STX ATOMIC (atomic ops)
+ 		 */
+ 		case BPF_STX | BPF_ATOMIC | BPF_W:
+-			if (insn->imm != BPF_ADD) {
++			if (imm != BPF_ADD) {
+ 				pr_err_ratelimited(
+ 					"eBPF filter atomic op code %02x (@%d) unsupported\n",
+ 					code, i);
+@@ -708,7 +708,7 @@ emit_clear:
+ 			PPC_BCC_SHORT(COND_NE, tmp_idx);
+ 			break;
+ 		case BPF_STX | BPF_ATOMIC | BPF_DW:
+-			if (insn->imm != BPF_ADD) {
++			if (imm != BPF_ADD) {
+ 				pr_err_ratelimited(
+ 					"eBPF filter atomic op code %02x (@%d) unsupported\n",
+ 					code, i);
+diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c
+index d094321964fb0..a81eac35d9009 100644
+--- a/arch/powerpc/platforms/ps3/mm.c
++++ b/arch/powerpc/platforms/ps3/mm.c
+@@ -6,6 +6,7 @@
+  *  Copyright 2006 Sony Corp.
+  */
+ 
++#include <linux/dma-mapping.h>
+ #include <linux/kernel.h>
+ #include <linux/export.h>
+ #include <linux/memblock.h>
+@@ -1118,6 +1119,7 @@ int ps3_dma_region_init(struct ps3_system_bus_device *dev,
+ 	enum ps3_dma_region_type region_type, void *addr, unsigned long len)
+ {
+ 	unsigned long lpar_addr;
++	int result;
+ 
+ 	lpar_addr = addr ? ps3_mm_phys_to_lpar(__pa(addr)) : 0;
+ 
+@@ -1129,6 +1131,16 @@ int ps3_dma_region_init(struct ps3_system_bus_device *dev,
+ 		r->offset -= map.r1.offset;
+ 	r->len = len ? len : ALIGN(map.total, 1 << r->page_size);
+ 
++	dev->core.dma_mask = &r->dma_mask;
++
++	result = dma_set_mask_and_coherent(&dev->core, DMA_BIT_MASK(32));
++
++	if (result < 0) {
++		dev_err(&dev->core, "%s:%d: dma_set_mask_and_coherent failed: %d\n",
++			__func__, __LINE__, result);
++		return result;
++	}
++
+ 	switch (dev->dev_type) {
+ 	case PS3_DEVICE_TYPE_SB:
+ 		r->region_ops =  (USE_DYNAMIC_DMA)
+diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
+index 4fcd460f496ec..0cf31cb872e5e 100644
+--- a/arch/s390/Kconfig
++++ b/arch/s390/Kconfig
+@@ -161,7 +161,6 @@ config S390
+ 	select HAVE_GCC_PLUGINS
+ 	select HAVE_GENERIC_VDSO
+ 	select HAVE_IOREMAP_PROT if PCI
+-	select HAVE_IRQ_EXIT_ON_IRQ_STACK
+ 	select HAVE_KERNEL_BZIP2
+ 	select HAVE_KERNEL_GZIP
+ 	select HAVE_KERNEL_LZ4
+diff --git a/arch/s390/Makefile b/arch/s390/Makefile
+index e443ed9947bd6..098abe3a56f37 100644
+--- a/arch/s390/Makefile
++++ b/arch/s390/Makefile
+@@ -28,6 +28,7 @@ KBUILD_CFLAGS_DECOMPRESSOR += -DDISABLE_BRANCH_PROFILING -D__NO_FORTIFY
+ KBUILD_CFLAGS_DECOMPRESSOR += -fno-delete-null-pointer-checks -msoft-float -mbackchain
+ KBUILD_CFLAGS_DECOMPRESSOR += -fno-asynchronous-unwind-tables
+ KBUILD_CFLAGS_DECOMPRESSOR += -ffreestanding
++KBUILD_CFLAGS_DECOMPRESSOR += -fno-stack-protector
+ KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-disable-warning, address-of-packed-member)
+ KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),-g)
+ KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO_DWARF4), $(call cc-option, -gdwarf-4,))
+diff --git a/arch/s390/boot/ipl_parm.c b/arch/s390/boot/ipl_parm.c
+index d372a45fe10e7..dd92092e3eec3 100644
+--- a/arch/s390/boot/ipl_parm.c
++++ b/arch/s390/boot/ipl_parm.c
+@@ -28,22 +28,25 @@ static inline int __diag308(unsigned long subcode, void *addr)
+ 	register unsigned long _addr asm("0") = (unsigned long)addr;
+ 	register unsigned long _rc asm("1") = 0;
+ 	unsigned long reg1, reg2;
+-	psw_t old = S390_lowcore.program_new_psw;
++	psw_t old;
+ 
+ 	asm volatile(
++		"	mvc	0(16,%[psw_old]),0(%[psw_pgm])\n"
+ 		"	epsw	%0,%1\n"
+-		"	st	%0,%[psw_pgm]\n"
+-		"	st	%1,%[psw_pgm]+4\n"
++		"	st	%0,0(%[psw_pgm])\n"
++		"	st	%1,4(%[psw_pgm])\n"
+ 		"	larl	%0,1f\n"
+-		"	stg	%0,%[psw_pgm]+8\n"
++		"	stg	%0,8(%[psw_pgm])\n"
+ 		"	diag	%[addr],%[subcode],0x308\n"
+-		"1:	nopr	%%r7\n"
++		"1:	mvc	0(16,%[psw_pgm]),0(%[psw_old])\n"
+ 		: "=&d" (reg1), "=&a" (reg2),
+-		  [psw_pgm] "=Q" (S390_lowcore.program_new_psw),
++		  "+Q" (S390_lowcore.program_new_psw),
++		  "=Q" (old),
+ 		  [addr] "+d" (_addr), "+d" (_rc)
+-		: [subcode] "d" (subcode)
++		: [subcode] "d" (subcode),
++		  [psw_old] "a" (&old),
++		  [psw_pgm] "a" (&S390_lowcore.program_new_psw)
+ 		: "cc", "memory");
+-	S390_lowcore.program_new_psw = old;
+ 	return _rc;
+ }
+ 
+diff --git a/arch/s390/boot/mem_detect.c b/arch/s390/boot/mem_detect.c
+index 40168e59abd38..a0e980f57c02c 100644
+--- a/arch/s390/boot/mem_detect.c
++++ b/arch/s390/boot/mem_detect.c
+@@ -69,24 +69,27 @@ static int __diag260(unsigned long rx1, unsigned long rx2)
+ 	register unsigned long _ry asm("4") = 0x10; /* storage configuration */
+ 	int rc = -1;				    /* fail */
+ 	unsigned long reg1, reg2;
+-	psw_t old = S390_lowcore.program_new_psw;
++	psw_t old;
+ 
+ 	asm volatile(
++		"	mvc	0(16,%[psw_old]),0(%[psw_pgm])\n"
+ 		"	epsw	%0,%1\n"
+-		"	st	%0,%[psw_pgm]\n"
+-		"	st	%1,%[psw_pgm]+4\n"
++		"	st	%0,0(%[psw_pgm])\n"
++		"	st	%1,4(%[psw_pgm])\n"
+ 		"	larl	%0,1f\n"
+-		"	stg	%0,%[psw_pgm]+8\n"
++		"	stg	%0,8(%[psw_pgm])\n"
+ 		"	diag	%[rx],%[ry],0x260\n"
+ 		"	ipm	%[rc]\n"
+ 		"	srl	%[rc],28\n"
+-		"1:\n"
++		"1:	mvc	0(16,%[psw_pgm]),0(%[psw_old])\n"
+ 		: "=&d" (reg1), "=&a" (reg2),
+-		  [psw_pgm] "=Q" (S390_lowcore.program_new_psw),
++		  "+Q" (S390_lowcore.program_new_psw),
++		  "=Q" (old),
+ 		  [rc] "+&d" (rc), [ry] "+d" (_ry)
+-		: [rx] "d" (_rx1), "d" (_rx2)
++		: [rx] "d" (_rx1), "d" (_rx2),
++		  [psw_old] "a" (&old),
++		  [psw_pgm] "a" (&S390_lowcore.program_new_psw)
+ 		: "cc", "memory");
+-	S390_lowcore.program_new_psw = old;
+ 	return rc == 0 ? _ry : -1;
+ }
+ 
+@@ -111,24 +114,30 @@ static int diag260(void)
+ 
+ static int tprot(unsigned long addr)
+ {
+-	unsigned long pgm_addr;
++	unsigned long reg1, reg2;
+ 	int rc = -EFAULT;
+-	psw_t old = S390_lowcore.program_new_psw;
++	psw_t old;
+ 
+-	S390_lowcore.program_new_psw.mask = __extract_psw();
+ 	asm volatile(
+-		"	larl	%[pgm_addr],1f\n"
+-		"	stg	%[pgm_addr],%[psw_pgm_addr]\n"
++		"	mvc	0(16,%[psw_old]),0(%[psw_pgm])\n"
++		"	epsw	%[reg1],%[reg2]\n"
++		"	st	%[reg1],0(%[psw_pgm])\n"
++		"	st	%[reg2],4(%[psw_pgm])\n"
++		"	larl	%[reg1],1f\n"
++		"	stg	%[reg1],8(%[psw_pgm])\n"
+ 		"	tprot	0(%[addr]),0\n"
+ 		"	ipm	%[rc]\n"
+ 		"	srl	%[rc],28\n"
+-		"1:\n"
+-		: [pgm_addr] "=&d"(pgm_addr),
+-		  [psw_pgm_addr] "=Q"(S390_lowcore.program_new_psw.addr),
+-		  [rc] "+&d"(rc)
+-		: [addr] "a"(addr)
++		"1:	mvc	0(16,%[psw_pgm]),0(%[psw_old])\n"
++		: [reg1] "=&d" (reg1),
++		  [reg2] "=&a" (reg2),
++		  [rc] "+&d" (rc),
++		  "=Q" (S390_lowcore.program_new_psw.addr),
++		  "=Q" (old)
++		: [psw_old] "a" (&old),
++		  [psw_pgm] "a" (&S390_lowcore.program_new_psw),
++		  [addr] "a" (addr)
+ 		: "cc", "memory");
+-	S390_lowcore.program_new_psw = old;
+ 	return rc;
+ }
+ 
+diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
+index 023a15dc25a33..dbd380d811335 100644
+--- a/arch/s390/include/asm/processor.h
++++ b/arch/s390/include/asm/processor.h
+@@ -207,7 +207,7 @@ static __always_inline unsigned long current_stack_pointer(void)
+ 	return sp;
+ }
+ 
+-static __no_kasan_or_inline unsigned short stap(void)
++static __always_inline unsigned short stap(void)
+ {
+ 	unsigned short cpu_address;
+ 
+@@ -246,7 +246,7 @@ static inline void __load_psw(psw_t psw)
+  * Set PSW mask to specified value, while leaving the
+  * PSW addr pointing to the next instruction.
+  */
+-static __no_kasan_or_inline void __load_psw_mask(unsigned long mask)
++static __always_inline void __load_psw_mask(unsigned long mask)
+ {
+ 	unsigned long addr;
+ 	psw_t psw;
+diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
+index 382d73da134cf..93538e63fa03f 100644
+--- a/arch/s390/kernel/setup.c
++++ b/arch/s390/kernel/setup.c
+@@ -165,7 +165,7 @@ static void __init set_preferred_console(void)
+ 	else if (CONSOLE_IS_3270)
+ 		add_preferred_console("tty3270", 0, NULL);
+ 	else if (CONSOLE_IS_VT220)
+-		add_preferred_console("ttyS", 1, NULL);
++		add_preferred_console("ttysclp", 0, NULL);
+ 	else if (CONSOLE_IS_HVC)
+ 		add_preferred_console("hvc", 0, NULL);
+ }
+diff --git a/arch/s390/purgatory/Makefile b/arch/s390/purgatory/Makefile
+index c57f8c40e9926..21c4ebe29b9a2 100644
+--- a/arch/s390/purgatory/Makefile
++++ b/arch/s390/purgatory/Makefile
+@@ -24,6 +24,7 @@ KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes
+ KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare
+ KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding
+ KBUILD_CFLAGS += -c -MD -Os -m64 -msoft-float -fno-common
++KBUILD_CFLAGS += -fno-stack-protector
+ KBUILD_CFLAGS += $(CLANG_FLAGS)
+ KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
+ KBUILD_AFLAGS := $(filter-out -DCC_USING_EXPOLINE,$(KBUILD_AFLAGS))
+diff --git a/arch/um/drivers/chan_user.c b/arch/um/drivers/chan_user.c
+index d8845d4aac6a7..6040817c036f3 100644
+--- a/arch/um/drivers/chan_user.c
++++ b/arch/um/drivers/chan_user.c
+@@ -256,7 +256,8 @@ static int winch_tramp(int fd, struct tty_port *port, int *fd_out,
+ 		goto out_close;
+ 	}
+ 
+-	if (os_set_fd_block(*fd_out, 0)) {
++	err = os_set_fd_block(*fd_out, 0);
++	if (err) {
+ 		printk(UM_KERN_ERR "winch_tramp: failed to set thread_fd "
+ 		       "non-blocking.\n");
+ 		goto out_close;
+diff --git a/arch/um/drivers/slip_user.c b/arch/um/drivers/slip_user.c
+index 482a19c5105c5..7334019c9e60a 100644
+--- a/arch/um/drivers/slip_user.c
++++ b/arch/um/drivers/slip_user.c
+@@ -145,7 +145,8 @@ static int slip_open(void *data)
+ 	}
+ 	sfd = err;
+ 
+-	if (set_up_tty(sfd))
++	err = set_up_tty(sfd);
++	if (err)
+ 		goto out_close2;
+ 
+ 	pri->slave = sfd;
+diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
+index 8e0b43cf089f4..cbd4f00fe77ee 100644
+--- a/arch/um/drivers/ubd_kern.c
++++ b/arch/um/drivers/ubd_kern.c
+@@ -1242,8 +1242,7 @@ static int __init ubd_driver_init(void){
+ 		 * enough. So use anyway the io thread. */
+ 	}
+ 	stack = alloc_stack(0, 0);
+-	io_pid = start_io_thread(stack + PAGE_SIZE - sizeof(void *),
+-				 &thread_fd);
++	io_pid = start_io_thread(stack + PAGE_SIZE, &thread_fd);
+ 	if(io_pid < 0){
+ 		printk(KERN_ERR
+ 		       "ubd : Failed to start I/O thread (errno = %d) - "
+diff --git a/arch/um/kernel/skas/clone.c b/arch/um/kernel/skas/clone.c
+index 592cdb1384415..5afac0fef24ea 100644
+--- a/arch/um/kernel/skas/clone.c
++++ b/arch/um/kernel/skas/clone.c
+@@ -29,7 +29,7 @@ stub_clone_handler(void)
+ 	long err;
+ 
+ 	err = stub_syscall2(__NR_clone, CLONE_PARENT | CLONE_FILES | SIGCHLD,
+-			    (unsigned long)data + UM_KERN_PAGE_SIZE / 2 - sizeof(void *));
++			    (unsigned long)data + UM_KERN_PAGE_SIZE / 2);
+ 	if (err) {
+ 		data->parent_err = err;
+ 		goto done;
+diff --git a/arch/um/os-Linux/helper.c b/arch/um/os-Linux/helper.c
+index 9fa6e4187d4fb..32e88baf18dd4 100644
+--- a/arch/um/os-Linux/helper.c
++++ b/arch/um/os-Linux/helper.c
+@@ -64,7 +64,7 @@ int run_helper(void (*pre_exec)(void *), void *pre_data, char **argv)
+ 		goto out_close;
+ 	}
+ 
+-	sp = stack + UM_KERN_PAGE_SIZE - sizeof(void *);
++	sp = stack + UM_KERN_PAGE_SIZE;
+ 	data.pre_exec = pre_exec;
+ 	data.pre_data = pre_data;
+ 	data.argv = argv;
+@@ -120,7 +120,7 @@ int run_helper_thread(int (*proc)(void *), void *arg, unsigned int flags,
+ 	if (stack == 0)
+ 		return -ENOMEM;
+ 
+-	sp = stack + UM_KERN_PAGE_SIZE - sizeof(void *);
++	sp = stack + UM_KERN_PAGE_SIZE;
+ 	pid = clone(proc, (void *) sp, flags, arg);
+ 	if (pid < 0) {
+ 		err = -errno;
+diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c
+index 96f511d1aabe6..e283f130aadc5 100644
+--- a/arch/um/os-Linux/signal.c
++++ b/arch/um/os-Linux/signal.c
+@@ -129,7 +129,7 @@ void set_sigstack(void *sig_stack, int size)
+ 	stack_t stack = {
+ 		.ss_flags = 0,
+ 		.ss_sp = sig_stack,
+-		.ss_size = size - sizeof(void *)
++		.ss_size = size
+ 	};
+ 
+ 	if (sigaltstack(&stack, NULL) != 0)
+diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
+index fba674fac8b73..87d3129e7362e 100644
+--- a/arch/um/os-Linux/skas/process.c
++++ b/arch/um/os-Linux/skas/process.c
+@@ -327,7 +327,7 @@ int start_userspace(unsigned long stub_stack)
+ 	}
+ 
+ 	/* set stack pointer to the end of the stack page, so it can grow downwards */
+-	sp = (unsigned long) stack + UM_KERN_PAGE_SIZE - sizeof(void *);
++	sp = (unsigned long)stack + UM_KERN_PAGE_SIZE;
+ 
+ 	flags = CLONE_FILES | SIGCHLD;
+ 
+diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
+index 16bf4d4a8159e..4e5af2b00d89b 100644
+--- a/arch/x86/include/asm/fpu/internal.h
++++ b/arch/x86/include/asm/fpu/internal.h
+@@ -103,6 +103,7 @@ static inline void fpstate_init_fxstate(struct fxregs_state *fx)
+ }
+ extern void fpstate_sanitize_xstate(struct fpu *fpu);
+ 
++/* Returns 0 or the negated trap number, which results in -EFAULT for #PF */
+ #define user_insn(insn, output, input...)				\
+ ({									\
+ 	int err;							\
+@@ -110,14 +111,14 @@ extern void fpstate_sanitize_xstate(struct fpu *fpu);
+ 	might_fault();							\
+ 									\
+ 	asm volatile(ASM_STAC "\n"					\
+-		     "1:" #insn "\n\t"					\
++		     "1: " #insn "\n"					\
+ 		     "2: " ASM_CLAC "\n"				\
+ 		     ".section .fixup,\"ax\"\n"				\
+-		     "3:  movl $-1,%[err]\n"				\
++		     "3:  negl %%eax\n"					\
+ 		     "    jmp  2b\n"					\
+ 		     ".previous\n"					\
+-		     _ASM_EXTABLE(1b, 3b)				\
+-		     : [err] "=r" (err), output				\
++		     _ASM_EXTABLE_FAULT(1b, 3b)				\
++		     : [err] "=a" (err), output				\
+ 		     : "0"(0), input);					\
+ 	err;								\
+ })
+@@ -219,16 +220,20 @@ static inline void fxsave(struct fxregs_state *fx)
+ #define XRSTOR		".byte " REX_PREFIX "0x0f,0xae,0x2f"
+ #define XRSTORS		".byte " REX_PREFIX "0x0f,0xc7,0x1f"
+ 
++/*
++ * After this @err contains 0 on success or the negated trap number when
++ * the operation raises an exception. For faults this results in -EFAULT.
++ */
+ #define XSTATE_OP(op, st, lmask, hmask, err)				\
+ 	asm volatile("1:" op "\n\t"					\
+ 		     "xor %[err], %[err]\n"				\
+ 		     "2:\n\t"						\
+ 		     ".pushsection .fixup,\"ax\"\n\t"			\
+-		     "3: movl $-2,%[err]\n\t"				\
++		     "3: negl %%eax\n\t"				\
+ 		     "jmp 2b\n\t"					\
+ 		     ".popsection\n\t"					\
+-		     _ASM_EXTABLE(1b, 3b)				\
+-		     : [err] "=r" (err)					\
++		     _ASM_EXTABLE_FAULT(1b, 3b)				\
++		     : [err] "=a" (err)					\
+ 		     : "D" (st), "m" (*st), "a" (lmask), "d" (hmask)	\
+ 		     : "memory")
+ 
+diff --git a/arch/x86/kernel/fpu/regset.c b/arch/x86/kernel/fpu/regset.c
+index c413756ba89fa..6bb874441de8b 100644
+--- a/arch/x86/kernel/fpu/regset.c
++++ b/arch/x86/kernel/fpu/regset.c
+@@ -117,7 +117,7 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
+ 	/*
+ 	 * A whole standard-format XSAVE buffer is needed:
+ 	 */
+-	if ((pos != 0) || (count < fpu_user_xstate_size))
++	if (pos != 0 || count != fpu_user_xstate_size)
+ 		return -EFAULT;
+ 
+ 	xsave = &fpu->state.xsave;
+diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
+index 451435d7ff413..038a119114c4f 100644
+--- a/arch/x86/kernel/fpu/xstate.c
++++ b/arch/x86/kernel/fpu/xstate.c
+@@ -1084,20 +1084,10 @@ static inline bool xfeatures_mxcsr_quirk(u64 xfeatures)
+ 	return true;
+ }
+ 
+-static void fill_gap(struct membuf *to, unsigned *last, unsigned offset)
++static void copy_feature(bool from_xstate, struct membuf *to, void *xstate,
++			 void *init_xstate, unsigned int size)
+ {
+-	if (*last >= offset)
+-		return;
+-	membuf_write(to, (void *)&init_fpstate.xsave + *last, offset - *last);
+-	*last = offset;
+-}
+-
+-static void copy_part(struct membuf *to, unsigned *last, unsigned offset,
+-		      unsigned size, void *from)
+-{
+-	fill_gap(to, last, offset);
+-	membuf_write(to, from, size);
+-	*last = offset + size;
++	membuf_write(to, from_xstate ? xstate : init_xstate, size);
+ }
+ 
+ /*
+@@ -1109,10 +1099,10 @@ static void copy_part(struct membuf *to, unsigned *last, unsigned offset,
+  */
+ void copy_xstate_to_kernel(struct membuf to, struct xregs_state *xsave)
+ {
++	const unsigned int off_mxcsr = offsetof(struct fxregs_state, mxcsr);
++	struct xregs_state *xinit = &init_fpstate.xsave;
+ 	struct xstate_header header;
+-	const unsigned off_mxcsr = offsetof(struct fxregs_state, mxcsr);
+-	unsigned size = to.left;
+-	unsigned last = 0;
++	unsigned int zerofrom;
+ 	int i;
+ 
+ 	/*
+@@ -1122,41 +1112,68 @@ void copy_xstate_to_kernel(struct membuf to, struct xregs_state *xsave)
+ 	header.xfeatures = xsave->header.xfeatures;
+ 	header.xfeatures &= xfeatures_mask_user();
+ 
+-	if (header.xfeatures & XFEATURE_MASK_FP)
+-		copy_part(&to, &last, 0, off_mxcsr, &xsave->i387);
+-	if (header.xfeatures & (XFEATURE_MASK_SSE | XFEATURE_MASK_YMM))
+-		copy_part(&to, &last, off_mxcsr,
+-			  MXCSR_AND_FLAGS_SIZE, &xsave->i387.mxcsr);
+-	if (header.xfeatures & XFEATURE_MASK_FP)
+-		copy_part(&to, &last, offsetof(struct fxregs_state, st_space),
+-			  128, &xsave->i387.st_space);
+-	if (header.xfeatures & XFEATURE_MASK_SSE)
+-		copy_part(&to, &last, xstate_offsets[XFEATURE_SSE],
+-			  256, &xsave->i387.xmm_space);
+-	/*
+-	 * Fill xsave->i387.sw_reserved value for ptrace frame:
+-	 */
+-	copy_part(&to, &last, offsetof(struct fxregs_state, sw_reserved),
+-		  48, xstate_fx_sw_bytes);
+-	/*
+-	 * Copy xregs_state->header:
+-	 */
+-	copy_part(&to, &last, offsetof(struct xregs_state, header),
+-		  sizeof(header), &header);
++	/* Copy FP state up to MXCSR */
++	copy_feature(header.xfeatures & XFEATURE_MASK_FP, &to, &xsave->i387,
++		     &xinit->i387, off_mxcsr);
++
++	/* Copy MXCSR when SSE or YMM are set in the feature mask */
++	copy_feature(header.xfeatures & (XFEATURE_MASK_SSE | XFEATURE_MASK_YMM),
++		     &to, &xsave->i387.mxcsr, &xinit->i387.mxcsr,
++		     MXCSR_AND_FLAGS_SIZE);
++
++	/* Copy the remaining FP state */
++	copy_feature(header.xfeatures & XFEATURE_MASK_FP,
++		     &to, &xsave->i387.st_space, &xinit->i387.st_space,
++		     sizeof(xsave->i387.st_space));
++
++	/* Copy the SSE state - shared with YMM, but independently managed */
++	copy_feature(header.xfeatures & XFEATURE_MASK_SSE,
++		     &to, &xsave->i387.xmm_space, &xinit->i387.xmm_space,
++		     sizeof(xsave->i387.xmm_space));
++
++	/* Zero the padding area */
++	membuf_zero(&to, sizeof(xsave->i387.padding));
++
++	/* Copy xsave->i387.sw_reserved */
++	membuf_write(&to, xstate_fx_sw_bytes, sizeof(xsave->i387.sw_reserved));
++
++	/* Copy the user space relevant state of @xsave->header */
++	membuf_write(&to, &header, sizeof(header));
++
++	zerofrom = offsetof(struct xregs_state, extended_state_area);
+ 
+ 	for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
+ 		/*
+-		 * Copy only in-use xstates:
++		 * The ptrace buffer is in non-compacted XSAVE format.
++		 * In non-compacted format disabled features still occupy
++		 * state space, but there is no state to copy from in the
++		 * compacted init_fpstate. The gap tracking will zero this
++		 * later.
+ 		 */
+-		if ((header.xfeatures >> i) & 1) {
+-			void *src = __raw_xsave_addr(xsave, i);
++		if (!(xfeatures_mask_user() & BIT_ULL(i)))
++			continue;
+ 
+-			copy_part(&to, &last, xstate_offsets[i],
+-				  xstate_sizes[i], src);
+-		}
++		/*
++		 * If there was a feature or alignment gap, zero the space
++		 * in the destination buffer.
++		 */
++		if (zerofrom < xstate_offsets[i])
++			membuf_zero(&to, xstate_offsets[i] - zerofrom);
++
++		copy_feature(header.xfeatures & BIT_ULL(i), &to,
++			     __raw_xsave_addr(xsave, i),
++			     __raw_xsave_addr(xinit, i),
++			     xstate_sizes[i]);
+ 
++		/*
++		 * Keep track of the last copied state in the non-compacted
++		 * target buffer for gap zeroing.
++		 */
++		zerofrom = xstate_offsets[i] + xstate_sizes[i];
+ 	}
+-	fill_gap(&to, &last, size);
++
++	if (to.left)
++		membuf_zero(&to, to.left);
+ }
+ 
+ /*
+diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
+index f306e85a08a64..6f4e261071e0d 100644
+--- a/arch/x86/kernel/signal.c
++++ b/arch/x86/kernel/signal.c
+@@ -234,10 +234,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
+ 	     void __user **fpstate)
+ {
+ 	/* Default to using normal stack */
++	bool nested_altstack = on_sig_stack(regs->sp);
++	bool entering_altstack = false;
+ 	unsigned long math_size = 0;
+ 	unsigned long sp = regs->sp;
+ 	unsigned long buf_fx = 0;
+-	int onsigstack = on_sig_stack(sp);
+ 	int ret;
+ 
+ 	/* redzone */
+@@ -246,15 +247,23 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
+ 
+ 	/* This is the X/Open sanctioned signal stack switching.  */
+ 	if (ka->sa.sa_flags & SA_ONSTACK) {
+-		if (sas_ss_flags(sp) == 0)
++		/*
++		 * This checks nested_altstack via sas_ss_flags(). Sensible
++		 * programs use SS_AUTODISARM, which disables that check, and
++		 * programs that don't use SS_AUTODISARM get compatible.
++		 */
++		if (sas_ss_flags(sp) == 0) {
+ 			sp = current->sas_ss_sp + current->sas_ss_size;
++			entering_altstack = true;
++		}
+ 	} else if (IS_ENABLED(CONFIG_X86_32) &&
+-		   !onsigstack &&
++		   !nested_altstack &&
+ 		   regs->ss != __USER_DS &&
+ 		   !(ka->sa.sa_flags & SA_RESTORER) &&
+ 		   ka->sa.sa_restorer) {
+ 		/* This is the legacy signal stack switching. */
+ 		sp = (unsigned long) ka->sa.sa_restorer;
++		entering_altstack = true;
+ 	}
+ 
+ 	sp = fpu__alloc_mathframe(sp, IS_ENABLED(CONFIG_X86_32),
+@@ -267,8 +276,15 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
+ 	 * If we are on the alternate signal stack and would overflow it, don't.
+ 	 * Return an always-bogus address instead so we will die with SIGSEGV.
+ 	 */
+-	if (onsigstack && !likely(on_sig_stack(sp)))
++	if (unlikely((nested_altstack || entering_altstack) &&
++		     !__on_sig_stack(sp))) {
++
++		if (show_unhandled_signals && printk_ratelimit())
++			pr_info("%s[%d] overflowed sigaltstack\n",
++				current->comm, task_pid_nr(current));
++
+ 		return (void __user *)-1L;
++	}
+ 
+ 	/* save i387 and extended state */
+ 	ret = copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size);
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index 0ed116b8c211d..40d479a59f6c2 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -844,8 +844,14 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
+ 		unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
+ 		unsigned phys_as = entry->eax & 0xff;
+ 
+-		if (!g_phys_as)
++		/*
++		 * Use bare metal's MAXPHADDR if the CPU doesn't report guest
++		 * MAXPHYADDR separately, or if TDP (NPT) is disabled, as the
++		 * guest version "applies only to guests using nested paging".
++		 */
++		if (!g_phys_as || !tdp_enabled)
+ 			g_phys_as = phys_as;
++
+ 		entry->eax = g_phys_as | (virt_as << 8);
+ 		entry->edx = 0;
+ 		cpuid_entry_override(entry, CPUID_8000_0008_EBX);
+diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
+index 7ffeeb6880d93..cd7c8bae79ed1 100644
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -52,6 +52,8 @@
+ #include <asm/kvm_page_track.h>
+ #include "trace.h"
+ 
++#include "paging.h"
++
+ extern bool itlb_multihit_kvm_mitigation;
+ 
+ static int __read_mostly nx_huge_pages = -1;
+diff --git a/arch/x86/kvm/mmu/paging.h b/arch/x86/kvm/mmu/paging.h
+new file mode 100644
+index 0000000000000..de8ab323bb707
+--- /dev/null
++++ b/arch/x86/kvm/mmu/paging.h
+@@ -0,0 +1,14 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++/* Shadow paging constants/helpers that don't need to be #undef'd. */
++#ifndef __KVM_X86_PAGING_H
++#define __KVM_X86_PAGING_H
++
++#define GUEST_PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
++#define PT64_LVL_ADDR_MASK(level) \
++	(GUEST_PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
++						* PT64_LEVEL_BITS))) - 1))
++#define PT64_LVL_OFFSET_MASK(level) \
++	(GUEST_PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
++						* PT64_LEVEL_BITS))) - 1))
++#endif /* __KVM_X86_PAGING_H */
++
+diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
+index de6407610b19a..db8c3188aae03 100644
+--- a/arch/x86/kvm/mmu/paging_tmpl.h
++++ b/arch/x86/kvm/mmu/paging_tmpl.h
+@@ -24,7 +24,7 @@
+ 	#define pt_element_t u64
+ 	#define guest_walker guest_walker64
+ 	#define FNAME(name) paging##64_##name
+-	#define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
++	#define PT_BASE_ADDR_MASK GUEST_PT64_BASE_ADDR_MASK
+ 	#define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
+ 	#define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
+ 	#define PT_INDEX(addr, level) PT64_INDEX(addr, level)
+@@ -57,7 +57,7 @@
+ 	#define pt_element_t u64
+ 	#define guest_walker guest_walkerEPT
+ 	#define FNAME(name) ept_##name
+-	#define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
++	#define PT_BASE_ADDR_MASK GUEST_PT64_BASE_ADDR_MASK
+ 	#define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
+ 	#define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
+ 	#define PT_INDEX(addr, level) PT64_INDEX(addr, level)
+diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
+index 6de3950fd704f..8a8e47bbf8040 100644
+--- a/arch/x86/kvm/mmu/spte.h
++++ b/arch/x86/kvm/mmu/spte.h
+@@ -23,12 +23,6 @@
+ #else
+ #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
+ #endif
+-#define PT64_LVL_ADDR_MASK(level) \
+-	(PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
+-						* PT64_LEVEL_BITS))) - 1))
+-#define PT64_LVL_OFFSET_MASK(level) \
+-	(PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
+-						* PT64_LEVEL_BITS))) - 1))
+ 
+ #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | shadow_user_mask \
+ 			| shadow_x_mask | shadow_nx_mask | shadow_me_mask)
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index 30569bbbca9ac..f262edf7551b3 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -2982,7 +2982,16 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
+ 			svm_disable_lbrv(vcpu);
+ 		break;
+ 	case MSR_VM_HSAVE_PA:
+-		svm->nested.hsave_msr = data;
++		/*
++		 * Old kernels did not validate the value written to
++		 * MSR_VM_HSAVE_PA.  Allow KVM_SET_MSR to set an invalid
++		 * value to allow live migrating buggy or malicious guests
++		 * originating from those kernels.
++		 */
++		if (!msr->host_initiated && !page_address_valid(vcpu, data))
++			return 1;
++
++		svm->nested.hsave_msr = data & PAGE_MASK;
+ 		break;
+ 	case MSR_VM_CR:
+ 		return svm_set_vm_cr(vcpu, data);
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 615dd236e8429..78b3a18d24e07 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -9194,6 +9194,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
+ 		set_debugreg(vcpu->arch.eff_db[3], 3);
+ 		set_debugreg(vcpu->arch.dr6, 6);
+ 		vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
++	} else if (unlikely(hw_breakpoint_active())) {
++		set_debugreg(0, 7);
+ 	}
+ 
+ 	for (;;) {
+diff --git a/block/blk-core.c b/block/blk-core.c
+index fc60ff2084973..e34dfa13b7bcd 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -1255,7 +1255,7 @@ static void update_io_ticks(struct block_device *part, unsigned long now,
+ 	unsigned long stamp;
+ again:
+ 	stamp = READ_ONCE(part->bd_stamp);
+-	if (unlikely(stamp != now)) {
++	if (unlikely(time_after(now, stamp))) {
+ 		if (likely(cmpxchg(&part->bd_stamp, stamp, now) == stamp))
+ 			__part_stat_add(part, io_ticks, end ? now - stamp : 1);
+ 	}
+diff --git a/block/partitions/ldm.c b/block/partitions/ldm.c
+index d333786b5c7eb..cc86534c80ad9 100644
+--- a/block/partitions/ldm.c
++++ b/block/partitions/ldm.c
+@@ -510,7 +510,7 @@ static bool ldm_validate_partition_table(struct parsed_partitions *state)
+ 
+ 	p = (struct msdos_partition *)(data + 0x01BE);
+ 	for (i = 0; i < 4; i++, p++)
+-		if (SYS_IND (p) == LDM_PARTITION) {
++		if (p->sys_ind == LDM_PARTITION) {
+ 			result = true;
+ 			break;
+ 		}
+diff --git a/block/partitions/ldm.h b/block/partitions/ldm.h
+index d8d6beaa72c4d..8693704dcf5e9 100644
+--- a/block/partitions/ldm.h
++++ b/block/partitions/ldm.h
+@@ -84,9 +84,6 @@ struct parsed_partitions;
+ #define TOC_BITMAP1		"config"	/* Names of the two defined */
+ #define TOC_BITMAP2		"log"		/* bitmaps in the TOCBLOCK. */
+ 
+-/* Borrowed from msdos.c */
+-#define SYS_IND(p)		(get_unaligned(&(p)->sys_ind))
+-
+ struct frag {				/* VBLK Fragment handling */
+ 	struct list_head list;
+ 	u32		group;
+diff --git a/block/partitions/msdos.c b/block/partitions/msdos.c
+index 8f2fcc0802642..c94de377c5025 100644
+--- a/block/partitions/msdos.c
++++ b/block/partitions/msdos.c
+@@ -38,8 +38,6 @@
+  */
+ #include <asm/unaligned.h>
+ 
+-#define SYS_IND(p)	get_unaligned(&p->sys_ind)
+-
+ static inline sector_t nr_sects(struct msdos_partition *p)
+ {
+ 	return (sector_t)get_unaligned_le32(&p->nr_sects);
+@@ -52,9 +50,9 @@ static inline sector_t start_sect(struct msdos_partition *p)
+ 
+ static inline int is_extended_partition(struct msdos_partition *p)
+ {
+-	return (SYS_IND(p) == DOS_EXTENDED_PARTITION ||
+-		SYS_IND(p) == WIN98_EXTENDED_PARTITION ||
+-		SYS_IND(p) == LINUX_EXTENDED_PARTITION);
++	return (p->sys_ind == DOS_EXTENDED_PARTITION ||
++		p->sys_ind == WIN98_EXTENDED_PARTITION ||
++		p->sys_ind == LINUX_EXTENDED_PARTITION);
+ }
+ 
+ #define MSDOS_LABEL_MAGIC1	0x55
+@@ -193,7 +191,7 @@ static void parse_extended(struct parsed_partitions *state,
+ 
+ 			put_partition(state, state->next, next, size);
+ 			set_info(state, state->next, disksig);
+-			if (SYS_IND(p) == LINUX_RAID_PARTITION)
++			if (p->sys_ind == LINUX_RAID_PARTITION)
+ 				state->parts[state->next].flags = ADDPART_FLAG_RAID;
+ 			loopct = 0;
+ 			if (++state->next == state->limit)
+@@ -546,7 +544,7 @@ static void parse_minix(struct parsed_partitions *state,
+ 	 * a secondary MBR describing its subpartitions, or
+ 	 * the normal boot sector. */
+ 	if (msdos_magic_present(data + 510) &&
+-	    SYS_IND(p) == MINIX_PARTITION) { /* subpartition table present */
++	    p->sys_ind == MINIX_PARTITION) { /* subpartition table present */
+ 		char tmp[1 + BDEVNAME_SIZE + 10 + 9 + 1];
+ 
+ 		snprintf(tmp, sizeof(tmp), " %s%d: <minix:", state->name, origin);
+@@ -555,7 +553,7 @@ static void parse_minix(struct parsed_partitions *state,
+ 			if (state->next == state->limit)
+ 				break;
+ 			/* add each partition in use */
+-			if (SYS_IND(p) == MINIX_PARTITION)
++			if (p->sys_ind == MINIX_PARTITION)
+ 				put_partition(state, state->next++,
+ 					      start_sect(p), nr_sects(p));
+ 		}
+@@ -643,7 +641,7 @@ int msdos_partition(struct parsed_partitions *state)
+ 	p = (struct msdos_partition *) (data + 0x1be);
+ 	for (slot = 1 ; slot <= 4 ; slot++, p++) {
+ 		/* If this is an EFI GPT disk, msdos should ignore it. */
+-		if (SYS_IND(p) == EFI_PMBR_OSTYPE_EFI_GPT) {
++		if (p->sys_ind == EFI_PMBR_OSTYPE_EFI_GPT) {
+ 			put_dev_sector(sect);
+ 			return 0;
+ 		}
+@@ -685,11 +683,11 @@ int msdos_partition(struct parsed_partitions *state)
+ 		}
+ 		put_partition(state, slot, start, size);
+ 		set_info(state, slot, disksig);
+-		if (SYS_IND(p) == LINUX_RAID_PARTITION)
++		if (p->sys_ind == LINUX_RAID_PARTITION)
+ 			state->parts[slot].flags = ADDPART_FLAG_RAID;
+-		if (SYS_IND(p) == DM6_PARTITION)
++		if (p->sys_ind == DM6_PARTITION)
+ 			strlcat(state->pp_buf, "[DM]", PAGE_SIZE);
+-		if (SYS_IND(p) == EZD_PARTITION)
++		if (p->sys_ind == EZD_PARTITION)
+ 			strlcat(state->pp_buf, "[EZD]", PAGE_SIZE);
+ 	}
+ 
+@@ -698,7 +696,7 @@ int msdos_partition(struct parsed_partitions *state)
+ 	/* second pass - output for each on a separate line */
+ 	p = (struct msdos_partition *) (0x1be + data);
+ 	for (slot = 1 ; slot <= 4 ; slot++, p++) {
+-		unsigned char id = SYS_IND(p);
++		unsigned char id = p->sys_ind;
+ 		int n;
+ 
+ 		if (!nr_sects(p))
+diff --git a/certs/.gitignore b/certs/.gitignore
+index 2a24839906863..6cbd1f1a5837b 100644
+--- a/certs/.gitignore
++++ b/certs/.gitignore
+@@ -1,2 +1,3 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+ x509_certificate_list
++x509_revocation_list
+diff --git a/drivers/acpi/acpi_amba.c b/drivers/acpi/acpi_amba.c
+index 49b781a9cd979..ab8a4e0191b19 100644
+--- a/drivers/acpi/acpi_amba.c
++++ b/drivers/acpi/acpi_amba.c
+@@ -76,6 +76,7 @@ static int amba_handler_attach(struct acpi_device *adev,
+ 		case IORESOURCE_MEM:
+ 			if (!address_found) {
+ 				dev->res = *rentry->res;
++				dev->res.name = dev_name(&dev->dev);
+ 				address_found = true;
+ 			}
+ 			break;
+diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
+index 2ea1781290ccf..1b68d116d808c 100644
+--- a/drivers/acpi/acpi_video.c
++++ b/drivers/acpi/acpi_video.c
+@@ -540,6 +540,15 @@ static const struct dmi_system_id video_dmi_table[] = {
+ 		DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V131"),
+ 		},
+ 	},
++	{
++	 .callback = video_set_report_key_events,
++	 .driver_data = (void *)((uintptr_t)REPORT_BRIGHTNESS_KEY_EVENTS),
++	 .ident = "Dell Vostro 3350",
++	 .matches = {
++		DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++		DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3350"),
++		},
++	},
+ 	/*
+ 	 * Some machines change the brightness themselves when a brightness
+ 	 * hotkey gets pressed, despite us telling them not to. In this case
+diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
+index b9fa3ef5b57c0..425bae6181319 100644
+--- a/drivers/block/virtio_blk.c
++++ b/drivers/block/virtio_blk.c
+@@ -948,6 +948,8 @@ static int virtblk_freeze(struct virtio_device *vdev)
+ 	blk_mq_quiesce_queue(vblk->disk->queue);
+ 
+ 	vdev->config->del_vqs(vdev);
++	kfree(vblk->vqs);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
+index 1836cc56e357b..673522874cec4 100644
+--- a/drivers/char/virtio_console.c
++++ b/drivers/char/virtio_console.c
+@@ -475,7 +475,7 @@ static struct port_buffer *get_inbuf(struct port *port)
+ 
+ 	buf = virtqueue_get_buf(port->in_vq, &len);
+ 	if (buf) {
+-		buf->len = len;
++		buf->len = min_t(size_t, len, buf->size);
+ 		buf->offset = 0;
+ 		port->stats.bytes_received += len;
+ 	}
+@@ -1712,7 +1712,7 @@ static void control_work_handler(struct work_struct *work)
+ 	while ((buf = virtqueue_get_buf(vq, &len))) {
+ 		spin_unlock(&portdev->c_ivq_lock);
+ 
+-		buf->len = len;
++		buf->len = min_t(size_t, len, buf->size);
+ 		buf->offset = 0;
+ 
+ 		handle_control_message(vq->vdev, portdev, buf);
+diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
+index 8a482c434ea6a..9ff81e2603127 100644
+--- a/drivers/cpufreq/cppc_cpufreq.c
++++ b/drivers/cpufreq/cppc_cpufreq.c
+@@ -182,6 +182,16 @@ static int cppc_verify_policy(struct cpufreq_policy_data *policy)
+ 	return 0;
+ }
+ 
++static void cppc_cpufreq_put_cpu_data(struct cpufreq_policy *policy)
++{
++	struct cppc_cpudata *cpu_data = policy->driver_data;
++
++	list_del(&cpu_data->node);
++	free_cpumask_var(cpu_data->shared_cpu_map);
++	kfree(cpu_data);
++	policy->driver_data = NULL;
++}
++
+ static void cppc_cpufreq_stop_cpu(struct cpufreq_policy *policy)
+ {
+ 	struct cppc_cpudata *cpu_data = policy->driver_data;
+@@ -196,11 +206,7 @@ static void cppc_cpufreq_stop_cpu(struct cpufreq_policy *policy)
+ 		pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
+ 			 caps->lowest_perf, cpu, ret);
+ 
+-	/* Remove CPU node from list and free driver data for policy */
+-	free_cpumask_var(cpu_data->shared_cpu_map);
+-	list_del(&cpu_data->node);
+-	kfree(policy->driver_data);
+-	policy->driver_data = NULL;
++	cppc_cpufreq_put_cpu_data(policy);
+ }
+ 
+ /*
+@@ -340,7 +346,8 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
+ 	default:
+ 		pr_debug("Unsupported CPU co-ord type: %d\n",
+ 			 policy->shared_type);
+-		return -EFAULT;
++		ret = -EFAULT;
++		goto out;
+ 	}
+ 
+ 	/*
+@@ -355,10 +362,16 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
+ 	cpu_data->perf_ctrls.desired_perf =  caps->highest_perf;
+ 
+ 	ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
+-	if (ret)
++	if (ret) {
+ 		pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
+ 			 caps->highest_perf, cpu, ret);
++		goto out;
++	}
+ 
++	return 0;
++
++out:
++	cppc_cpufreq_put_cpu_data(policy);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
+index ed2ab46b15e7f..045ead46ec8fc 100644
+--- a/drivers/dma/fsl-qdma.c
++++ b/drivers/dma/fsl-qdma.c
+@@ -1235,7 +1235,11 @@ static int fsl_qdma_probe(struct platform_device *pdev)
+ 	fsl_qdma->dma_dev.device_synchronize = fsl_qdma_synchronize;
+ 	fsl_qdma->dma_dev.device_terminate_all = fsl_qdma_terminate_all;
+ 
+-	dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
++	ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
++	if (ret) {
++		dev_err(&pdev->dev, "dma_set_mask failure.\n");
++		return ret;
++	}
+ 
+ 	platform_set_drvdata(pdev, fsl_qdma);
+ 
+diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
+index 9b21e45debc22..8c825242d6572 100644
+--- a/drivers/edac/Kconfig
++++ b/drivers/edac/Kconfig
+@@ -271,7 +271,7 @@ config EDAC_PND2
+ config EDAC_IGEN6
+ 	tristate "Intel client SoC Integrated MC"
+ 	depends on PCI && PCI_MMCONFIG && ARCH_HAVE_NMI_SAFE_CMPXCHG
+-	depends on X64_64 && X86_MCE_INTEL
++	depends on X86_64 && X86_MCE_INTEL
+ 	help
+ 	  Support for error detection and correction on the Intel
+ 	  client SoC Integrated Memory Controller using In-Band ECC IP.
+diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
+index cacdf1589b101..9485e0f1f05fb 100644
+--- a/drivers/firmware/arm_scmi/driver.c
++++ b/drivers/firmware/arm_scmi/driver.c
+@@ -268,6 +268,10 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
+ 		return;
+ 	}
+ 
++	/* rx.len could be shrunk in the sync do_xfer, so reset to maxsz */
++	if (msg_type == MSG_TYPE_DELAYED_RESP)
++		xfer->rx.len = info->desc->max_msg_size;
++
+ 	scmi_dump_header_dbg(dev, &xfer->hdr);
+ 
+ 	info->desc->ops->fetch_response(cinfo, xfer);
+diff --git a/drivers/firmware/tegra/bpmp-tegra210.c b/drivers/firmware/tegra/bpmp-tegra210.c
+index ae15940a078e3..c32754055c60b 100644
+--- a/drivers/firmware/tegra/bpmp-tegra210.c
++++ b/drivers/firmware/tegra/bpmp-tegra210.c
+@@ -210,7 +210,7 @@ static int tegra210_bpmp_init(struct tegra_bpmp *bpmp)
+ 	priv->tx_irq_data = irq_get_irq_data(err);
+ 	if (!priv->tx_irq_data) {
+ 		dev_err(&pdev->dev, "failed to get IRQ data for TX IRQ\n");
+-		return err;
++		return -ENOENT;
+ 	}
+ 
+ 	err = platform_get_irq_byname(pdev, "rx");
+diff --git a/drivers/firmware/turris-mox-rwtm.c b/drivers/firmware/turris-mox-rwtm.c
+index 62f0d1a5dd324..1cf4f10874923 100644
+--- a/drivers/firmware/turris-mox-rwtm.c
++++ b/drivers/firmware/turris-mox-rwtm.c
+@@ -147,11 +147,14 @@ MOX_ATTR_RO(pubkey, "%s\n", pubkey);
+ 
+ static int mox_get_status(enum mbox_cmd cmd, u32 retval)
+ {
+-	if (MBOX_STS_CMD(retval) != cmd ||
+-	    MBOX_STS_ERROR(retval) != MBOX_STS_SUCCESS)
++	if (MBOX_STS_CMD(retval) != cmd)
+ 		return -EIO;
+ 	else if (MBOX_STS_ERROR(retval) == MBOX_STS_FAIL)
+ 		return -(int)MBOX_STS_VALUE(retval);
++	else if (MBOX_STS_ERROR(retval) == MBOX_STS_BADCMD)
++		return -ENOSYS;
++	else if (MBOX_STS_ERROR(retval) != MBOX_STS_SUCCESS)
++		return -EIO;
+ 	else
+ 		return MBOX_STS_VALUE(retval);
+ }
+@@ -201,11 +204,14 @@ static int mox_get_board_info(struct mox_rwtm *rwtm)
+ 		return ret;
+ 
+ 	ret = mox_get_status(MBOX_CMD_BOARD_INFO, reply->retval);
+-	if (ret < 0 && ret != -ENODATA) {
+-		return ret;
+-	} else if (ret == -ENODATA) {
++	if (ret == -ENODATA) {
+ 		dev_warn(rwtm->dev,
+ 			 "Board does not have manufacturing information burned!\n");
++	} else if (ret == -ENOSYS) {
++		dev_notice(rwtm->dev,
++			   "Firmware does not support the BOARD_INFO command\n");
++	} else if (ret < 0) {
++		return ret;
+ 	} else {
+ 		rwtm->serial_number = reply->status[1];
+ 		rwtm->serial_number <<= 32;
+@@ -234,10 +240,13 @@ static int mox_get_board_info(struct mox_rwtm *rwtm)
+ 		return ret;
+ 
+ 	ret = mox_get_status(MBOX_CMD_ECDSA_PUB_KEY, reply->retval);
+-	if (ret < 0 && ret != -ENODATA) {
+-		return ret;
+-	} else if (ret == -ENODATA) {
++	if (ret == -ENODATA) {
+ 		dev_warn(rwtm->dev, "Board has no public key burned!\n");
++	} else if (ret == -ENOSYS) {
++		dev_notice(rwtm->dev,
++			   "Firmware does not support the ECDSA_PUB_KEY command\n");
++	} else if (ret < 0) {
++		return ret;
+ 	} else {
+ 		u32 *s = reply->status;
+ 
+@@ -251,6 +260,27 @@ static int mox_get_board_info(struct mox_rwtm *rwtm)
+ 	return 0;
+ }
+ 
++static int check_get_random_support(struct mox_rwtm *rwtm)
++{
++	struct armada_37xx_rwtm_tx_msg msg;
++	int ret;
++
++	msg.command = MBOX_CMD_GET_RANDOM;
++	msg.args[0] = 1;
++	msg.args[1] = rwtm->buf_phys;
++	msg.args[2] = 4;
++
++	ret = mbox_send_message(rwtm->mbox, &msg);
++	if (ret < 0)
++		return ret;
++
++	ret = wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2);
++	if (ret < 0)
++		return ret;
++
++	return mox_get_status(MBOX_CMD_GET_RANDOM, rwtm->reply.retval);
++}
++
+ static int mox_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+ {
+ 	struct mox_rwtm *rwtm = (struct mox_rwtm *) rng->priv;
+@@ -488,6 +518,13 @@ static int turris_mox_rwtm_probe(struct platform_device *pdev)
+ 	if (ret < 0)
+ 		dev_warn(dev, "Cannot read board information: %i\n", ret);
+ 
++	ret = check_get_random_support(rwtm);
++	if (ret < 0) {
++		dev_notice(dev,
++			   "Firmware does not support the GET_RANDOM command\n");
++		goto free_channel;
++	}
++
+ 	rwtm->hwrng.name = DRIVER_NAME "_hwrng";
+ 	rwtm->hwrng.read = mox_hwrng_read;
+ 	rwtm->hwrng.priv = (unsigned long) rwtm;
+@@ -505,6 +542,8 @@ static int turris_mox_rwtm_probe(struct platform_device *pdev)
+ 		goto free_channel;
+ 	}
+ 
++	dev_info(dev, "HWRNG successfully registered\n");
++
+ 	return 0;
+ 
+ free_channel:
+diff --git a/drivers/fsi/fsi-master-aspeed.c b/drivers/fsi/fsi-master-aspeed.c
+index 90dbe58ca1edc..dbad73162c833 100644
+--- a/drivers/fsi/fsi-master-aspeed.c
++++ b/drivers/fsi/fsi-master-aspeed.c
+@@ -645,6 +645,7 @@ static const struct of_device_id fsi_master_aspeed_match[] = {
+ 	{ .compatible = "aspeed,ast2600-fsi-master" },
+ 	{ },
+ };
++MODULE_DEVICE_TABLE(of, fsi_master_aspeed_match);
+ 
+ static struct platform_driver fsi_master_aspeed_driver = {
+ 	.driver = {
+diff --git a/drivers/fsi/fsi-master-ast-cf.c b/drivers/fsi/fsi-master-ast-cf.c
+index 57a779a89b073..70c03e304d6c8 100644
+--- a/drivers/fsi/fsi-master-ast-cf.c
++++ b/drivers/fsi/fsi-master-ast-cf.c
+@@ -1427,6 +1427,7 @@ static const struct of_device_id fsi_master_acf_match[] = {
+ 	{ .compatible = "aspeed,ast2500-cf-fsi-master" },
+ 	{ },
+ };
++MODULE_DEVICE_TABLE(of, fsi_master_acf_match);
+ 
+ static struct platform_driver fsi_master_acf = {
+ 	.driver = {
+diff --git a/drivers/fsi/fsi-master-gpio.c b/drivers/fsi/fsi-master-gpio.c
+index aa97c4a250cb4..7d5f29b4b595d 100644
+--- a/drivers/fsi/fsi-master-gpio.c
++++ b/drivers/fsi/fsi-master-gpio.c
+@@ -882,6 +882,7 @@ static const struct of_device_id fsi_master_gpio_match[] = {
+ 	{ .compatible = "fsi-master-gpio" },
+ 	{ },
+ };
++MODULE_DEVICE_TABLE(of, fsi_master_gpio_match);
+ 
+ static struct platform_driver fsi_master_gpio_driver = {
+ 	.driver = {
+diff --git a/drivers/fsi/fsi-occ.c b/drivers/fsi/fsi-occ.c
+index cb05b6dacc9d5..dc74bffedd727 100644
+--- a/drivers/fsi/fsi-occ.c
++++ b/drivers/fsi/fsi-occ.c
+@@ -636,6 +636,7 @@ static const struct of_device_id occ_match[] = {
+ 	},
+ 	{ },
+ };
++MODULE_DEVICE_TABLE(of, occ_match);
+ 
+ static struct platform_driver occ_driver = {
+ 	.driver = {
+diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
+index c91d056515966..f5cfc0698799a 100644
+--- a/drivers/gpio/gpio-pca953x.c
++++ b/drivers/gpio/gpio-pca953x.c
+@@ -1241,6 +1241,7 @@ static const struct of_device_id pca953x_dt_ids[] = {
+ 
+ 	{ .compatible = "onnn,cat9554", .data = OF_953X( 8, PCA_INT), },
+ 	{ .compatible = "onnn,pca9654", .data = OF_953X( 8, PCA_INT), },
++	{ .compatible = "onnn,pca9655", .data = OF_953X(16, PCA_INT), },
+ 
+ 	{ .compatible = "exar,xra1202", .data = OF_953X( 8, 0), },
+ 	{ }
+diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
+index 3521c1dc3ac00..c288a7502de25 100644
+--- a/drivers/gpio/gpio-zynq.c
++++ b/drivers/gpio/gpio-zynq.c
+@@ -736,6 +736,11 @@ static int __maybe_unused zynq_gpio_suspend(struct device *dev)
+ 	struct zynq_gpio *gpio = dev_get_drvdata(dev);
+ 	struct irq_data *data = irq_get_irq_data(gpio->irq);
+ 
++	if (!data) {
++		dev_err(dev, "irq_get_irq_data() failed\n");
++		return -EINVAL;
++	}
++
+ 	if (!device_may_wakeup(dev))
+ 		disable_irq(gpio->irq);
+ 
+@@ -753,6 +758,11 @@ static int __maybe_unused zynq_gpio_resume(struct device *dev)
+ 	struct irq_data *data = irq_get_irq_data(gpio->irq);
+ 	int ret;
+ 
++	if (!data) {
++		dev_err(dev, "irq_get_irq_data() failed\n");
++		return -EINVAL;
++	}
++
+ 	if (!device_may_wakeup(dev))
+ 		enable_irq(gpio->irq);
+ 
+@@ -1001,8 +1011,11 @@ err_pm_dis:
+ static int zynq_gpio_remove(struct platform_device *pdev)
+ {
+ 	struct zynq_gpio *gpio = platform_get_drvdata(pdev);
++	int ret;
+ 
+-	pm_runtime_get_sync(&pdev->dev);
++	ret = pm_runtime_get_sync(&pdev->dev);
++	if (ret < 0)
++		dev_warn(&pdev->dev, "pm_runtime_get_sync() Failed\n");
+ 	gpiochip_remove(&gpio->chip);
+ 	clk_disable_unprepare(gpio->clk);
+ 	device_set_wakeup_capable(&pdev->dev, 0);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 41ebc9d21e5c6..de394de9e6303 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -1092,6 +1092,7 @@ static const struct pci_device_id pciidlist[] = {
+ 	{0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
+ 
+ 	/* Renoir */
++	{0x1002, 0x15E7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
+ 	{0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
+ 	{0x1002, 0x1638, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
+ 	{0x1002, 0x164C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+index 2342c5d216f9b..5c40912b51d1c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+@@ -7744,6 +7744,97 @@ static void gfx_v10_0_update_fine_grain_clock_gating(struct amdgpu_device *adev,
+ 	}
+ }
+ 
++static void gfx_v10_0_apply_medium_grain_clock_gating_workaround(struct amdgpu_device *adev)
++{
++	uint32_t reg_data = 0;
++	uint32_t reg_idx = 0;
++	uint32_t i;
++
++	const uint32_t tcp_ctrl_regs[] = {
++		mmCGTS_SA0_WGP00_CU0_TCP_CTRL_REG,
++		mmCGTS_SA0_WGP00_CU1_TCP_CTRL_REG,
++		mmCGTS_SA0_WGP01_CU0_TCP_CTRL_REG,
++		mmCGTS_SA0_WGP01_CU1_TCP_CTRL_REG,
++		mmCGTS_SA0_WGP02_CU0_TCP_CTRL_REG,
++		mmCGTS_SA0_WGP02_CU1_TCP_CTRL_REG,
++		mmCGTS_SA0_WGP10_CU0_TCP_CTRL_REG,
++		mmCGTS_SA0_WGP10_CU1_TCP_CTRL_REG,
++		mmCGTS_SA0_WGP11_CU0_TCP_CTRL_REG,
++		mmCGTS_SA0_WGP11_CU1_TCP_CTRL_REG,
++		mmCGTS_SA0_WGP12_CU0_TCP_CTRL_REG,
++		mmCGTS_SA0_WGP12_CU1_TCP_CTRL_REG,
++		mmCGTS_SA1_WGP00_CU0_TCP_CTRL_REG,
++		mmCGTS_SA1_WGP00_CU1_TCP_CTRL_REG,
++		mmCGTS_SA1_WGP01_CU0_TCP_CTRL_REG,
++		mmCGTS_SA1_WGP01_CU1_TCP_CTRL_REG,
++		mmCGTS_SA1_WGP02_CU0_TCP_CTRL_REG,
++		mmCGTS_SA1_WGP02_CU1_TCP_CTRL_REG,
++		mmCGTS_SA1_WGP10_CU0_TCP_CTRL_REG,
++		mmCGTS_SA1_WGP10_CU1_TCP_CTRL_REG,
++		mmCGTS_SA1_WGP11_CU0_TCP_CTRL_REG,
++		mmCGTS_SA1_WGP11_CU1_TCP_CTRL_REG,
++		mmCGTS_SA1_WGP12_CU0_TCP_CTRL_REG,
++		mmCGTS_SA1_WGP12_CU1_TCP_CTRL_REG
++	};
++
++	const uint32_t tcp_ctrl_regs_nv12[] = {
++		mmCGTS_SA0_WGP00_CU0_TCP_CTRL_REG,
++		mmCGTS_SA0_WGP00_CU1_TCP_CTRL_REG,
++		mmCGTS_SA0_WGP01_CU0_TCP_CTRL_REG,
++		mmCGTS_SA0_WGP01_CU1_TCP_CTRL_REG,
++		mmCGTS_SA0_WGP02_CU0_TCP_CTRL_REG,
++		mmCGTS_SA0_WGP02_CU1_TCP_CTRL_REG,
++		mmCGTS_SA0_WGP10_CU0_TCP_CTRL_REG,
++		mmCGTS_SA0_WGP10_CU1_TCP_CTRL_REG,
++		mmCGTS_SA0_WGP11_CU0_TCP_CTRL_REG,
++		mmCGTS_SA0_WGP11_CU1_TCP_CTRL_REG,
++		mmCGTS_SA1_WGP00_CU0_TCP_CTRL_REG,
++		mmCGTS_SA1_WGP00_CU1_TCP_CTRL_REG,
++		mmCGTS_SA1_WGP01_CU0_TCP_CTRL_REG,
++		mmCGTS_SA1_WGP01_CU1_TCP_CTRL_REG,
++		mmCGTS_SA1_WGP02_CU0_TCP_CTRL_REG,
++		mmCGTS_SA1_WGP02_CU1_TCP_CTRL_REG,
++		mmCGTS_SA1_WGP10_CU0_TCP_CTRL_REG,
++		mmCGTS_SA1_WGP10_CU1_TCP_CTRL_REG,
++		mmCGTS_SA1_WGP11_CU0_TCP_CTRL_REG,
++		mmCGTS_SA1_WGP11_CU1_TCP_CTRL_REG,
++	};
++
++	const uint32_t sm_ctlr_regs[] = {
++		mmCGTS_SA0_QUAD0_SM_CTRL_REG,
++		mmCGTS_SA0_QUAD1_SM_CTRL_REG,
++		mmCGTS_SA1_QUAD0_SM_CTRL_REG,
++		mmCGTS_SA1_QUAD1_SM_CTRL_REG
++	};
++
++	if (adev->asic_type == CHIP_NAVI12) {
++		for (i = 0; i < ARRAY_SIZE(tcp_ctrl_regs_nv12); i++) {
++			reg_idx = adev->reg_offset[GC_HWIP][0][mmCGTS_SA0_WGP00_CU0_TCP_CTRL_REG_BASE_IDX] +
++				  tcp_ctrl_regs_nv12[i];
++			reg_data = RREG32(reg_idx);
++			reg_data |= CGTS_SA0_WGP00_CU0_TCP_CTRL_REG__TCPI_LS_OVERRIDE_MASK;
++			WREG32(reg_idx, reg_data);
++		}
++	} else {
++		for (i = 0; i < ARRAY_SIZE(tcp_ctrl_regs); i++) {
++			reg_idx = adev->reg_offset[GC_HWIP][0][mmCGTS_SA0_WGP00_CU0_TCP_CTRL_REG_BASE_IDX] +
++				  tcp_ctrl_regs[i];
++			reg_data = RREG32(reg_idx);
++			reg_data |= CGTS_SA0_WGP00_CU0_TCP_CTRL_REG__TCPI_LS_OVERRIDE_MASK;
++			WREG32(reg_idx, reg_data);
++		}
++	}
++
++	for (i = 0; i < ARRAY_SIZE(sm_ctlr_regs); i++) {
++		reg_idx = adev->reg_offset[GC_HWIP][0][mmCGTS_SA0_QUAD0_SM_CTRL_REG_BASE_IDX] +
++			  sm_ctlr_regs[i];
++		reg_data = RREG32(reg_idx);
++		reg_data &= ~CGTS_SA0_QUAD0_SM_CTRL_REG__SM_MODE_MASK;
++		reg_data |= 2 << CGTS_SA0_QUAD0_SM_CTRL_REG__SM_MODE__SHIFT;
++		WREG32(reg_idx, reg_data);
++	}
++}
++
+ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
+ 					    bool enable)
+ {
+@@ -7760,6 +7851,10 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
+ 		gfx_v10_0_update_3d_clock_gating(adev, enable);
+ 		/* ===  CGCG + CGLS === */
+ 		gfx_v10_0_update_coarse_grain_clock_gating(adev, enable);
++
++		if ((adev->asic_type >= CHIP_NAVI10) &&
++		     (adev->asic_type <= CHIP_NAVI12))
++			gfx_v10_0_apply_medium_grain_clock_gating_workaround(adev);
+ 	} else {
+ 		/* CGCG/CGLS should be disabled before MGCG/MGLS
+ 		 * ===  CGCG + CGLS ===
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index 65803e153a223..d243e60c6eef7 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -452,13 +452,9 @@ static const struct sysfs_ops procfs_stats_ops = {
+ 	.show = kfd_procfs_stats_show,
+ };
+ 
+-static struct attribute *procfs_stats_attrs[] = {
+-	NULL
+-};
+-
+ static struct kobj_type procfs_stats_type = {
+ 	.sysfs_ops = &procfs_stats_ops,
+-	.default_attrs = procfs_stats_attrs,
++	.release = kfd_procfs_kobj_release,
+ };
+ 
+ int kfd_procfs_add_queue(struct queue *q)
+@@ -973,9 +969,11 @@ static void kfd_process_wq_release(struct work_struct *work)
+ 		list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
+ 			sysfs_remove_file(p->kobj, &pdd->attr_vram);
+ 			sysfs_remove_file(p->kobj, &pdd->attr_sdma);
+-			sysfs_remove_file(p->kobj, &pdd->attr_evict);
+-			if (pdd->dev->kfd2kgd->get_cu_occupancy != NULL)
+-				sysfs_remove_file(p->kobj, &pdd->attr_cu_occupancy);
++
++			sysfs_remove_file(pdd->kobj_stats, &pdd->attr_evict);
++			if (pdd->dev->kfd2kgd->get_cu_occupancy)
++				sysfs_remove_file(pdd->kobj_stats,
++						  &pdd->attr_cu_occupancy);
+ 			kobject_del(pdd->kobj_stats);
+ 			kobject_put(pdd->kobj_stats);
+ 			pdd->kobj_stats = NULL;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+index eb1635ac89887..43c07ac2c6fce 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+@@ -153,6 +153,7 @@ void pqm_uninit(struct process_queue_manager *pqm)
+ 		if (pqn->q && pqn->q->gws)
+ 			amdgpu_amdkfd_remove_gws_from_process(pqm->process->kgd_process_info,
+ 				pqn->q->gws);
++		kfd_procfs_del_queue(pqn->q);
+ 		uninit_queue(pqn->q);
+ 		list_del(&pqn->process_queue_list);
+ 		kfree(pqn);
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
+index 9c75c88150569..dfbd904310435 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -94,6 +94,9 @@ static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port);
+ static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port *port);
+ static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
+ 
++static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
++						 struct drm_dp_mst_branch *branch);
++
+ #define DBG_PREFIX "[dp_mst]"
+ 
+ #define DP_STR(x) [DP_ ## x] = #x
+@@ -2499,7 +2502,7 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
+ {
+ 	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
+ 	struct drm_dp_mst_port *port;
+-	int old_ddps, old_input, ret, i;
++	int old_ddps, ret;
+ 	u8 new_pdt;
+ 	bool new_mcs;
+ 	bool dowork = false, create_connector = false;
+@@ -2531,7 +2534,6 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
+ 	}
+ 
+ 	old_ddps = port->ddps;
+-	old_input = port->input;
+ 	port->input = conn_stat->input_port;
+ 	port->ldps = conn_stat->legacy_device_plug_status;
+ 	port->ddps = conn_stat->displayport_device_plug_status;
+@@ -2554,28 +2556,6 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
+ 		dowork = false;
+ 	}
+ 
+-	if (!old_input && old_ddps != port->ddps && !port->ddps) {
+-		for (i = 0; i < mgr->max_payloads; i++) {
+-			struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
+-			struct drm_dp_mst_port *port_validated;
+-
+-			if (!vcpi)
+-				continue;
+-
+-			port_validated =
+-				container_of(vcpi, struct drm_dp_mst_port, vcpi);
+-			port_validated =
+-				drm_dp_mst_topology_get_port_validated(mgr, port_validated);
+-			if (!port_validated) {
+-				mutex_lock(&mgr->payload_lock);
+-				vcpi->num_slots = 0;
+-				mutex_unlock(&mgr->payload_lock);
+-			} else {
+-				drm_dp_mst_topology_put_port(port_validated);
+-			}
+-		}
+-	}
+-
+ 	if (port->connector)
+ 		drm_modeset_unlock(&mgr->base.lock);
+ 	else if (create_connector)
+@@ -3385,6 +3365,7 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
+ 	struct drm_dp_mst_port *port;
+ 	int i, j;
+ 	int cur_slots = 1;
++	bool skip;
+ 
+ 	mutex_lock(&mgr->payload_lock);
+ 	for (i = 0; i < mgr->max_payloads; i++) {
+@@ -3399,6 +3380,16 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
+ 			port = container_of(vcpi, struct drm_dp_mst_port,
+ 					    vcpi);
+ 
++			mutex_lock(&mgr->lock);
++			skip = !drm_dp_mst_port_downstream_of_branch(port, mgr->mst_primary);
++			mutex_unlock(&mgr->lock);
++
++			if (skip) {
++				drm_dbg_kms(mgr->dev,
++					    "Virtual channel %d is not in current topology\n",
++					    i);
++				continue;
++			}
+ 			/* Validated ports don't matter if we're releasing
+ 			 * VCPI
+ 			 */
+@@ -3406,8 +3397,16 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
+ 				port = drm_dp_mst_topology_get_port_validated(
+ 				    mgr, port);
+ 				if (!port) {
+-					mutex_unlock(&mgr->payload_lock);
+-					return -EINVAL;
++					if (vcpi->num_slots == payload->num_slots) {
++						cur_slots += vcpi->num_slots;
++						payload->start_slot = req_payload.start_slot;
++						continue;
++					} else {
++						drm_dbg_kms(mgr->dev,
++							    "Fail:set payload to invalid sink");
++						mutex_unlock(&mgr->payload_lock);
++						return -EINVAL;
++					}
+ 				}
+ 				put_port = true;
+ 			}
+@@ -3491,6 +3490,7 @@ int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
+ 	struct drm_dp_mst_port *port;
+ 	int i;
+ 	int ret = 0;
++	bool skip;
+ 
+ 	mutex_lock(&mgr->payload_lock);
+ 	for (i = 0; i < mgr->max_payloads; i++) {
+@@ -3500,6 +3500,13 @@ int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
+ 
+ 		port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
+ 
++		mutex_lock(&mgr->lock);
++		skip = !drm_dp_mst_port_downstream_of_branch(port, mgr->mst_primary);
++		mutex_unlock(&mgr->lock);
++
++		if (skip)
++			continue;
++
+ 		DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
+ 		if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
+ 			ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
+@@ -4581,9 +4588,18 @@ EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
+ void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
+ 				struct drm_dp_mst_port *port)
+ {
++	bool skip;
++
+ 	if (!port->vcpi.vcpi)
+ 		return;
+ 
++	mutex_lock(&mgr->lock);
++	skip = !drm_dp_mst_port_downstream_of_branch(port, mgr->mst_primary);
++	mutex_unlock(&mgr->lock);
++
++	if (skip)
++		return;
++
+ 	drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
+ 	port->vcpi.num_slots = 0;
+ 	port->vcpi.pbn = 0;
+diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
+index ebe9dccf2d830..0b8648396fb24 100644
+--- a/drivers/gpu/drm/gma500/framebuffer.c
++++ b/drivers/gpu/drm/gma500/framebuffer.c
+@@ -352,6 +352,7 @@ static struct drm_framebuffer *psb_user_framebuffer_create
+ 			 const struct drm_mode_fb_cmd2 *cmd)
+ {
+ 	struct drm_gem_object *obj;
++	struct drm_framebuffer *fb;
+ 
+ 	/*
+ 	 *	Find the GEM object and thus the gtt range object that is
+@@ -362,7 +363,11 @@ static struct drm_framebuffer *psb_user_framebuffer_create
+ 		return ERR_PTR(-ENOENT);
+ 
+ 	/* Let the core code do all the work */
+-	return psb_framebuffer_create(dev, cmd, obj);
++	fb = psb_framebuffer_create(dev, cmd, obj);
++	if (IS_ERR(fb))
++		drm_gem_object_put(obj);
++
++	return fb;
+ }
+ 
+ static int psbfb_probe(struct drm_fb_helper *fb_helper,
+diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+index 3ae16945bd436..c11e6b468fc49 100644
+--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
++++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+@@ -298,10 +298,7 @@ static void __gen8_ppgtt_alloc(struct i915_address_space * const vm,
+ 			__i915_gem_object_pin_pages(pt->base);
+ 			i915_gem_object_make_unshrinkable(pt->base);
+ 
+-			if (lvl ||
+-			    gen8_pt_count(*start, end) < I915_PDES ||
+-			    intel_vgpu_active(vm->i915))
+-				fill_px(pt, vm->scratch[lvl]->encode);
++			fill_px(pt, vm->scratch[lvl]->encode);
+ 
+ 			spin_lock(&pd->lock);
+ 			if (likely(!pd->entry[idx])) {
+diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+index 4b09490c20c0a..e144aef0fd2fe 100644
+--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
++++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+@@ -366,7 +366,7 @@ static struct i915_fence_reg *fence_find(struct i915_ggtt *ggtt)
+ 	if (intel_has_pending_fb_unpin(ggtt->vm.i915))
+ 		return ERR_PTR(-EAGAIN);
+ 
+-	return ERR_PTR(-EDEADLK);
++	return ERR_PTR(-ENOBUFS);
+ }
+ 
+ int __i915_vma_pin_fence(struct i915_vma *vma)
+diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
+index c9ac3dc651135..9cb8c7d13d46b 100644
+--- a/drivers/hwtracing/intel_th/core.c
++++ b/drivers/hwtracing/intel_th/core.c
+@@ -215,6 +215,22 @@ static ssize_t port_show(struct device *dev, struct device_attribute *attr,
+ 
+ static DEVICE_ATTR_RO(port);
+ 
++static void intel_th_trace_prepare(struct intel_th_device *thdev)
++{
++	struct intel_th_device *hub = to_intel_th_hub(thdev);
++	struct intel_th_driver *hubdrv = to_intel_th_driver(hub->dev.driver);
++
++	if (hub->type != INTEL_TH_SWITCH)
++		return;
++
++	if (thdev->type != INTEL_TH_OUTPUT)
++		return;
++
++	pm_runtime_get_sync(&thdev->dev);
++	hubdrv->prepare(hub, &thdev->output);
++	pm_runtime_put(&thdev->dev);
++}
++
+ static int intel_th_output_activate(struct intel_th_device *thdev)
+ {
+ 	struct intel_th_driver *thdrv =
+@@ -235,6 +251,7 @@ static int intel_th_output_activate(struct intel_th_device *thdev)
+ 	if (ret)
+ 		goto fail_put;
+ 
++	intel_th_trace_prepare(thdev);
+ 	if (thdrv->activate)
+ 		ret = thdrv->activate(thdev);
+ 	else
+diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
+index 28509b02a0b56..b3308934a687d 100644
+--- a/drivers/hwtracing/intel_th/gth.c
++++ b/drivers/hwtracing/intel_th/gth.c
+@@ -564,6 +564,21 @@ static void gth_tscu_resync(struct gth_device *gth)
+ 	iowrite32(reg, gth->base + REG_TSCU_TSUCTRL);
+ }
+ 
++static void intel_th_gth_prepare(struct intel_th_device *thdev,
++				 struct intel_th_output *output)
++{
++	struct gth_device *gth = dev_get_drvdata(&thdev->dev);
++	int count;
++
++	/*
++	 * Wait until the output port is in reset before we start
++	 * programming it.
++	 */
++	for (count = GTH_PLE_WAITLOOP_DEPTH;
++	     count && !(gth_output_get(gth, output->port) & BIT(5)); count--)
++		cpu_relax();
++}
++
+ /**
+  * intel_th_gth_enable() - enable tracing to an output device
+  * @thdev:	GTH device
+@@ -815,6 +830,7 @@ static struct intel_th_driver intel_th_gth_driver = {
+ 	.assign		= intel_th_gth_assign,
+ 	.unassign	= intel_th_gth_unassign,
+ 	.set_output	= intel_th_gth_set_output,
++	.prepare	= intel_th_gth_prepare,
+ 	.enable		= intel_th_gth_enable,
+ 	.trig_switch	= intel_th_gth_switch,
+ 	.disable	= intel_th_gth_disable,
+diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h
+index 5fe694708b7a3..595615b791086 100644
+--- a/drivers/hwtracing/intel_th/intel_th.h
++++ b/drivers/hwtracing/intel_th/intel_th.h
+@@ -143,6 +143,7 @@ intel_th_output_assigned(struct intel_th_device *thdev)
+  * @remove:	remove method
+  * @assign:	match a given output type device against available outputs
+  * @unassign:	deassociate an output type device from an output port
++ * @prepare:	prepare output port for tracing
+  * @enable:	enable tracing for a given output device
+  * @disable:	disable tracing for a given output device
+  * @irq:	interrupt callback
+@@ -164,6 +165,8 @@ struct intel_th_driver {
+ 					  struct intel_th_device *othdev);
+ 	void			(*unassign)(struct intel_th_device *thdev,
+ 					    struct intel_th_device *othdev);
++	void			(*prepare)(struct intel_th_device *thdev,
++					   struct intel_th_output *output);
+ 	void			(*enable)(struct intel_th_device *thdev,
+ 					  struct intel_th_output *output);
+ 	void			(*trig_switch)(struct intel_th_device *thdev,
+diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
+index f21362355973e..8e4be0d4ce34e 100644
+--- a/drivers/i2c/i2c-core-base.c
++++ b/drivers/i2c/i2c-core-base.c
+@@ -24,6 +24,7 @@
+ #include <linux/i2c-smbus.h>
+ #include <linux/idr.h>
+ #include <linux/init.h>
++#include <linux/interrupt.h>
+ #include <linux/irqflags.h>
+ #include <linux/jump_label.h>
+ #include <linux/kernel.h>
+@@ -587,6 +588,8 @@ static void i2c_device_shutdown(struct device *dev)
+ 	driver = to_i2c_driver(dev->driver);
+ 	if (driver->shutdown)
+ 		driver->shutdown(client);
++	else if (client->irq > 0)
++		disable_irq(client->irq);
+ }
+ 
+ static void i2c_client_dev_release(struct device *dev)
+diff --git a/drivers/iio/gyro/fxas21002c_core.c b/drivers/iio/gyro/fxas21002c_core.c
+index b7523357d8eba..ec6bd15bd2d4c 100644
+--- a/drivers/iio/gyro/fxas21002c_core.c
++++ b/drivers/iio/gyro/fxas21002c_core.c
+@@ -366,14 +366,7 @@ out_unlock:
+ 
+ static int  fxas21002c_pm_get(struct fxas21002c_data *data)
+ {
+-	struct device *dev = regmap_get_device(data->regmap);
+-	int ret;
+-
+-	ret = pm_runtime_get_sync(dev);
+-	if (ret < 0)
+-		pm_runtime_put_noidle(dev);
+-
+-	return ret;
++	return pm_runtime_resume_and_get(regmap_get_device(data->regmap));
+ }
+ 
+ static int  fxas21002c_pm_put(struct fxas21002c_data *data)
+@@ -1005,7 +998,6 @@ int fxas21002c_core_probe(struct device *dev, struct regmap *regmap, int irq,
+ pm_disable:
+ 	pm_runtime_disable(dev);
+ 	pm_runtime_set_suspended(dev);
+-	pm_runtime_put_noidle(dev);
+ 
+ 	return ret;
+ }
+@@ -1019,7 +1011,6 @@ void fxas21002c_core_remove(struct device *dev)
+ 
+ 	pm_runtime_disable(dev);
+ 	pm_runtime_set_suspended(dev);
+-	pm_runtime_put_noidle(dev);
+ }
+ EXPORT_SYMBOL_GPL(fxas21002c_core_remove);
+ 
+diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+index 7cedaab096a7f..e8d242ee6743b 100644
+--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
++++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+@@ -15,19 +15,19 @@
+  *
+  * Supported sensors:
+  * - LSM6DS3:
+- *   - Accelerometer/Gyroscope supported ODR [Hz]: 13, 26, 52, 104, 208, 416
++ *   - Accelerometer/Gyroscope supported ODR [Hz]: 12.5, 26, 52, 104, 208, 416
+  *   - Accelerometer supported full-scale [g]: +-2/+-4/+-8/+-16
+  *   - Gyroscope supported full-scale [dps]: +-125/+-245/+-500/+-1000/+-2000
+  *   - FIFO size: 8KB
+  *
+  * - LSM6DS3H/LSM6DSL/LSM6DSM/ISM330DLC/LSM6DS3TR-C:
+- *   - Accelerometer/Gyroscope supported ODR [Hz]: 13, 26, 52, 104, 208, 416
++ *   - Accelerometer/Gyroscope supported ODR [Hz]: 12.5, 26, 52, 104, 208, 416
+  *   - Accelerometer supported full-scale [g]: +-2/+-4/+-8/+-16
+  *   - Gyroscope supported full-scale [dps]: +-125/+-245/+-500/+-1000/+-2000
+  *   - FIFO size: 4KB
+  *
+  * - LSM6DSO/LSM6DSOX/ASM330LHH/LSM6DSR/ISM330DHCX/LSM6DST/LSM6DSOP:
+- *   - Accelerometer/Gyroscope supported ODR [Hz]: 13, 26, 52, 104, 208, 416,
++ *   - Accelerometer/Gyroscope supported ODR [Hz]: 12.5, 26, 52, 104, 208, 416,
+  *     833
+  *   - Accelerometer supported full-scale [g]: +-2/+-4/+-8/+-16
+  *   - Gyroscope supported full-scale [dps]: +-125/+-245/+-500/+-1000/+-2000
+diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c
+index 20a0842f0e3a6..26c6400cb08ca 100644
+--- a/drivers/iio/magnetometer/bmc150_magn.c
++++ b/drivers/iio/magnetometer/bmc150_magn.c
+@@ -265,7 +265,7 @@ static int bmc150_magn_set_power_state(struct bmc150_magn_data *data, bool on)
+ 	int ret;
+ 
+ 	if (on) {
+-		ret = pm_runtime_get_sync(data->dev);
++		ret = pm_runtime_resume_and_get(data->dev);
+ 	} else {
+ 		pm_runtime_mark_last_busy(data->dev);
+ 		ret = pm_runtime_put_autosuspend(data->dev);
+@@ -274,9 +274,6 @@ static int bmc150_magn_set_power_state(struct bmc150_magn_data *data, bool on)
+ 	if (ret < 0) {
+ 		dev_err(data->dev,
+ 			"failed to change power state to %d\n", on);
+-		if (on)
+-			pm_runtime_put_noidle(data->dev);
+-
+ 		return ret;
+ 	}
+ #endif
+@@ -967,12 +964,14 @@ int bmc150_magn_probe(struct device *dev, struct regmap *regmap,
+ 	ret = iio_device_register(indio_dev);
+ 	if (ret < 0) {
+ 		dev_err(dev, "unable to register iio device\n");
+-		goto err_buffer_cleanup;
++		goto err_disable_runtime_pm;
+ 	}
+ 
+ 	dev_dbg(dev, "Registered device %s\n", name);
+ 	return 0;
+ 
++err_disable_runtime_pm:
++	pm_runtime_disable(dev);
+ err_buffer_cleanup:
+ 	iio_triggered_buffer_cleanup(indio_dev);
+ err_free_irq:
+@@ -996,7 +995,6 @@ int bmc150_magn_remove(struct device *dev)
+ 
+ 	pm_runtime_disable(dev);
+ 	pm_runtime_set_suspended(dev);
+-	pm_runtime_put_noidle(dev);
+ 
+ 	iio_triggered_buffer_cleanup(indio_dev);
+ 
+diff --git a/drivers/input/touchscreen/hideep.c b/drivers/input/touchscreen/hideep.c
+index ddad4a82a5e55..e9547ee297564 100644
+--- a/drivers/input/touchscreen/hideep.c
++++ b/drivers/input/touchscreen/hideep.c
+@@ -361,13 +361,16 @@ static int hideep_enter_pgm(struct hideep_ts *ts)
+ 	return -EIO;
+ }
+ 
+-static void hideep_nvm_unlock(struct hideep_ts *ts)
++static int hideep_nvm_unlock(struct hideep_ts *ts)
+ {
+ 	u32 unmask_code;
++	int error;
+ 
+ 	hideep_pgm_w_reg(ts, HIDEEP_FLASH_CFG, HIDEEP_NVM_SFR_RPAGE);
+-	hideep_pgm_r_reg(ts, 0x0000000C, &unmask_code);
++	error = hideep_pgm_r_reg(ts, 0x0000000C, &unmask_code);
+ 	hideep_pgm_w_reg(ts, HIDEEP_FLASH_CFG, HIDEEP_NVM_DEFAULT_PAGE);
++	if (error)
++		return error;
+ 
+ 	/* make it unprotected code */
+ 	unmask_code &= ~HIDEEP_PROT_MODE;
+@@ -384,6 +387,8 @@ static void hideep_nvm_unlock(struct hideep_ts *ts)
+ 	NVM_W_SFR(HIDEEP_NVM_MASK_OFS, ts->nvm_mask);
+ 	SET_FLASH_HWCONTROL();
+ 	hideep_pgm_w_reg(ts, HIDEEP_FLASH_CFG, HIDEEP_NVM_DEFAULT_PAGE);
++
++	return 0;
+ }
+ 
+ static int hideep_check_status(struct hideep_ts *ts)
+@@ -462,7 +467,9 @@ static int hideep_program_nvm(struct hideep_ts *ts,
+ 	u32 addr = 0;
+ 	int error;
+ 
+-	hideep_nvm_unlock(ts);
++       error = hideep_nvm_unlock(ts);
++       if (error)
++               return error;
+ 
+ 	while (ucode_len > 0) {
+ 		xfer_len = min_t(size_t, ucode_len, HIDEEP_NVM_PAGE_SIZE);
+diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+index 98b3a1c2a1813..44a427833385e 100644
+--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
++++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+@@ -130,6 +130,16 @@ static int qcom_adreno_smmu_alloc_context_bank(struct arm_smmu_domain *smmu_doma
+ 	return __arm_smmu_alloc_bitmap(smmu->context_map, start, count);
+ }
+ 
++static bool qcom_adreno_can_do_ttbr1(struct arm_smmu_device *smmu)
++{
++	const struct device_node *np = smmu->dev->of_node;
++
++	if (of_device_is_compatible(np, "qcom,msm8996-smmu-v2"))
++		return false;
++
++	return true;
++}
++
+ static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
+ 		struct io_pgtable_cfg *pgtbl_cfg, struct device *dev)
+ {
+@@ -144,7 +154,8 @@ static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
+ 	 * be AARCH64 stage 1 but double check because the arm-smmu code assumes
+ 	 * that is the case when the TTBR1 quirk is enabled
+ 	 */
+-	if ((smmu_domain->stage == ARM_SMMU_DOMAIN_S1) &&
++	if (qcom_adreno_can_do_ttbr1(smmu_domain->smmu) &&
++	    (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) &&
+ 	    (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64))
+ 		pgtbl_cfg->quirks |= IO_PGTABLE_QUIRK_ARM_TTBR1;
+ 
+diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
+index d8c6bfde6a615..c6ff32797a238 100644
+--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
++++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
+@@ -74,7 +74,7 @@ static bool using_legacy_binding, using_generic_binding;
+ static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
+ {
+ 	if (pm_runtime_enabled(smmu->dev))
+-		return pm_runtime_get_sync(smmu->dev);
++		return pm_runtime_resume_and_get(smmu->dev);
+ 
+ 	return 0;
+ }
+@@ -1268,6 +1268,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
+ 	u64 phys;
+ 	unsigned long va, flags;
+ 	int ret, idx = cfg->cbndx;
++	phys_addr_t addr = 0;
+ 
+ 	ret = arm_smmu_rpm_get(smmu);
+ 	if (ret < 0)
+@@ -1287,6 +1288,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
+ 		dev_err(dev,
+ 			"iova to phys timed out on %pad. Falling back to software table walk.\n",
+ 			&iova);
++		arm_smmu_rpm_put(smmu);
+ 		return ops->iova_to_phys(ops, iova);
+ 	}
+ 
+@@ -1295,12 +1297,14 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
+ 	if (phys & ARM_SMMU_CB_PAR_F) {
+ 		dev_err(dev, "translation fault!\n");
+ 		dev_err(dev, "PAR = 0x%llx\n", phys);
+-		return 0;
++		goto out;
+ 	}
+ 
++	addr = (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
++out:
+ 	arm_smmu_rpm_put(smmu);
+ 
+-	return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
++	return addr;
+ }
+ 
+ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index 56930e0b8f590..8cdee1a0d25a8 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -2434,10 +2434,11 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
+ 	return 0;
+ }
+ 
+-static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
++static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8 devfn)
+ {
+-	unsigned long flags;
++	struct intel_iommu *iommu = info->iommu;
+ 	struct context_entry *context;
++	unsigned long flags;
+ 	u16 did_old;
+ 
+ 	if (!iommu)
+@@ -2449,7 +2450,16 @@ static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn
+ 		spin_unlock_irqrestore(&iommu->lock, flags);
+ 		return;
+ 	}
+-	did_old = context_domain_id(context);
++
++	if (sm_supported(iommu)) {
++		if (hw_pass_through && domain_type_is_si(info->domain))
++			did_old = FLPT_DEFAULT_DID;
++		else
++			did_old = info->domain->iommu_did[iommu->seq_id];
++	} else {
++		did_old = context_domain_id(context);
++	}
++
+ 	context_clear_entry(context);
+ 	__iommu_flush_cache(iommu, context, sizeof(*context));
+ 	spin_unlock_irqrestore(&iommu->lock, flags);
+@@ -2467,6 +2477,8 @@ static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn
+ 				 0,
+ 				 0,
+ 				 DMA_TLB_DSI_FLUSH);
++
++	__iommu_flush_dev_iotlb(info, 0, MAX_AGAW_PFN_WIDTH);
+ }
+ 
+ static inline void unlink_domain_info(struct device_domain_info *info)
+@@ -4456,9 +4468,9 @@ out_free_dmar:
+ 
+ static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
+ {
+-	struct intel_iommu *iommu = opaque;
++	struct device_domain_info *info = opaque;
+ 
+-	domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
++	domain_context_clear_one(info, PCI_BUS_NUM(alias), alias & 0xff);
+ 	return 0;
+ }
+ 
+@@ -4468,12 +4480,13 @@ static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *op
+  * devices, unbinding the driver from any one of them will possibly leave
+  * the others unable to operate.
+  */
+-static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
++static void domain_context_clear(struct device_domain_info *info)
+ {
+-	if (!iommu || !dev || !dev_is_pci(dev))
++	if (!info->iommu || !info->dev || !dev_is_pci(info->dev))
+ 		return;
+ 
+-	pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
++	pci_for_each_dma_alias(to_pci_dev(info->dev),
++			       &domain_context_clear_one_cb, info);
+ }
+ 
+ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
+@@ -4490,14 +4503,13 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
+ 	iommu = info->iommu;
+ 	domain = info->domain;
+ 
+-	if (info->dev) {
++	if (info->dev && !dev_is_real_dma_subdevice(info->dev)) {
+ 		if (dev_is_pci(info->dev) && sm_supported(iommu))
+ 			intel_pasid_tear_down_entry(iommu, info->dev,
+ 					PASID_RID2PASID, false);
+ 
+ 		iommu_disable_dev_iotlb(info);
+-		if (!dev_is_real_dma_subdevice(info->dev))
+-			domain_context_clear(iommu, info->dev);
++		domain_context_clear(info);
+ 		intel_pasid_free_table(info->dev);
+ 	}
+ 
+diff --git a/drivers/leds/leds-tlc591xx.c b/drivers/leds/leds-tlc591xx.c
+index 5b9dfdf743ecd..cb7bd1353f9f0 100644
+--- a/drivers/leds/leds-tlc591xx.c
++++ b/drivers/leds/leds-tlc591xx.c
+@@ -148,16 +148,20 @@ static int
+ tlc591xx_probe(struct i2c_client *client,
+ 	       const struct i2c_device_id *id)
+ {
+-	struct device_node *np = dev_of_node(&client->dev), *child;
++	struct device_node *np, *child;
+ 	struct device *dev = &client->dev;
+ 	const struct tlc591xx *tlc591xx;
+ 	struct tlc591xx_priv *priv;
+ 	int err, count, reg;
+ 
+-	tlc591xx = device_get_match_data(dev);
++	np = dev_of_node(dev);
+ 	if (!np)
+ 		return -ENODEV;
+ 
++	tlc591xx = device_get_match_data(dev);
++	if (!tlc591xx)
++		return -ENODEV;
++
+ 	count = of_get_available_child_count(np);
+ 	if (!count || count > tlc591xx->max_leds)
+ 		return -EINVAL;
+diff --git a/drivers/leds/leds-turris-omnia.c b/drivers/leds/leds-turris-omnia.c
+index 2f9a289ab2456..1adfed1c0619b 100644
+--- a/drivers/leds/leds-turris-omnia.c
++++ b/drivers/leds/leds-turris-omnia.c
+@@ -274,6 +274,7 @@ static const struct i2c_device_id omnia_id[] = {
+ 	{ "omnia", 0 },
+ 	{ }
+ };
++MODULE_DEVICE_TABLE(i2c, omnia_id);
+ 
+ static struct i2c_driver omnia_leds_driver = {
+ 	.probe		= omnia_leds_probe,
+diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c
+index 14386d0b5f578..c267283b01fda 100644
+--- a/drivers/memory/atmel-ebi.c
++++ b/drivers/memory/atmel-ebi.c
+@@ -600,8 +600,10 @@ static int atmel_ebi_probe(struct platform_device *pdev)
+ 				child);
+ 
+ 			ret = atmel_ebi_dev_disable(ebi, child);
+-			if (ret)
++			if (ret) {
++				of_node_put(child);
+ 				return ret;
++			}
+ 		}
+ 	}
+ 
+diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c
+index 89f99b5b64504..d062c2f8250f4 100644
+--- a/drivers/memory/fsl_ifc.c
++++ b/drivers/memory/fsl_ifc.c
+@@ -97,7 +97,6 @@ static int fsl_ifc_ctrl_remove(struct platform_device *dev)
+ 	iounmap(ctrl->gregs);
+ 
+ 	dev_set_drvdata(&dev->dev, NULL);
+-	kfree(ctrl);
+ 
+ 	return 0;
+ }
+@@ -209,7 +208,8 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev)
+ 
+ 	dev_info(&dev->dev, "Freescale Integrated Flash Controller\n");
+ 
+-	fsl_ifc_ctrl_dev = kzalloc(sizeof(*fsl_ifc_ctrl_dev), GFP_KERNEL);
++	fsl_ifc_ctrl_dev = devm_kzalloc(&dev->dev, sizeof(*fsl_ifc_ctrl_dev),
++					GFP_KERNEL);
+ 	if (!fsl_ifc_ctrl_dev)
+ 		return -ENOMEM;
+ 
+@@ -219,8 +219,7 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev)
+ 	fsl_ifc_ctrl_dev->gregs = of_iomap(dev->dev.of_node, 0);
+ 	if (!fsl_ifc_ctrl_dev->gregs) {
+ 		dev_err(&dev->dev, "failed to get memory region\n");
+-		ret = -ENODEV;
+-		goto err;
++		return -ENODEV;
+ 	}
+ 
+ 	if (of_property_read_bool(dev->dev.of_node, "little-endian")) {
+@@ -295,6 +294,7 @@ err_irq:
+ 	free_irq(fsl_ifc_ctrl_dev->irq, fsl_ifc_ctrl_dev);
+ 	irq_dispose_mapping(fsl_ifc_ctrl_dev->irq);
+ err:
++	iounmap(fsl_ifc_ctrl_dev->gregs);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/memory/pl353-smc.c b/drivers/memory/pl353-smc.c
+index 9c0a284167773..b0b251bb207f3 100644
+--- a/drivers/memory/pl353-smc.c
++++ b/drivers/memory/pl353-smc.c
+@@ -407,6 +407,7 @@ static int pl353_smc_probe(struct amba_device *adev, const struct amba_id *id)
+ 		break;
+ 	}
+ 	if (!match) {
++		err = -ENODEV;
+ 		dev_err(&adev->dev, "no matching children\n");
+ 		goto out_clk_disable;
+ 	}
+diff --git a/drivers/memory/stm32-fmc2-ebi.c b/drivers/memory/stm32-fmc2-ebi.c
+index 4d5758c419c55..ffec26a99313b 100644
+--- a/drivers/memory/stm32-fmc2-ebi.c
++++ b/drivers/memory/stm32-fmc2-ebi.c
+@@ -1048,16 +1048,19 @@ static int stm32_fmc2_ebi_parse_dt(struct stm32_fmc2_ebi *ebi)
+ 		if (ret) {
+ 			dev_err(dev, "could not retrieve reg property: %d\n",
+ 				ret);
++			of_node_put(child);
+ 			return ret;
+ 		}
+ 
+ 		if (bank >= FMC2_MAX_BANKS) {
+ 			dev_err(dev, "invalid reg value: %d\n", bank);
++			of_node_put(child);
+ 			return -EINVAL;
+ 		}
+ 
+ 		if (ebi->bank_assigned & BIT(bank)) {
+ 			dev_err(dev, "bank already assigned: %d\n", bank);
++			of_node_put(child);
+ 			return -EINVAL;
+ 		}
+ 
+@@ -1066,6 +1069,7 @@ static int stm32_fmc2_ebi_parse_dt(struct stm32_fmc2_ebi *ebi)
+ 			if (ret) {
+ 				dev_err(dev, "setup chip select %d failed: %d\n",
+ 					bank, ret);
++				of_node_put(child);
+ 				return ret;
+ 			}
+ 		}
+diff --git a/drivers/mfd/da9052-i2c.c b/drivers/mfd/da9052-i2c.c
+index 47556d2d9abe2..8ebfc7bbe4e01 100644
+--- a/drivers/mfd/da9052-i2c.c
++++ b/drivers/mfd/da9052-i2c.c
+@@ -113,6 +113,7 @@ static const struct i2c_device_id da9052_i2c_id[] = {
+ 	{"da9053-bc", DA9053_BC},
+ 	{}
+ };
++MODULE_DEVICE_TABLE(i2c, da9052_i2c_id);
+ 
+ #ifdef CONFIG_OF
+ static const struct of_device_id dialog_dt_ids[] = {
+diff --git a/drivers/mfd/motorola-cpcap.c b/drivers/mfd/motorola-cpcap.c
+index 30d82bfe5b02f..6fb206da27298 100644
+--- a/drivers/mfd/motorola-cpcap.c
++++ b/drivers/mfd/motorola-cpcap.c
+@@ -327,6 +327,10 @@ static int cpcap_probe(struct spi_device *spi)
+ 	if (ret)
+ 		return ret;
+ 
++	/* Parent SPI controller uses DMA, CPCAP and child devices do not */
++	spi->dev.coherent_dma_mask = 0;
++	spi->dev.dma_mask = &spi->dev.coherent_dma_mask;
++
+ 	return devm_mfd_add_devices(&spi->dev, 0, cpcap_mfd_devices,
+ 				    ARRAY_SIZE(cpcap_mfd_devices), NULL, 0, NULL);
+ }
+diff --git a/drivers/mfd/stmpe-i2c.c b/drivers/mfd/stmpe-i2c.c
+index 61aa020199f57..cd2f45257dc16 100644
+--- a/drivers/mfd/stmpe-i2c.c
++++ b/drivers/mfd/stmpe-i2c.c
+@@ -109,7 +109,7 @@ static const struct i2c_device_id stmpe_i2c_id[] = {
+ 	{ "stmpe2403", STMPE2403 },
+ 	{ }
+ };
+-MODULE_DEVICE_TABLE(i2c, stmpe_id);
++MODULE_DEVICE_TABLE(i2c, stmpe_i2c_id);
+ 
+ static struct i2c_driver stmpe_i2c_driver = {
+ 	.driver = {
+diff --git a/drivers/misc/cardreader/alcor_pci.c b/drivers/misc/cardreader/alcor_pci.c
+index cd402c89189ea..de6d44a158bba 100644
+--- a/drivers/misc/cardreader/alcor_pci.c
++++ b/drivers/misc/cardreader/alcor_pci.c
+@@ -139,7 +139,13 @@ static void alcor_pci_init_check_aspm(struct alcor_pci_priv *priv)
+ 	u32 val32;
+ 
+ 	priv->pdev_cap_off    = alcor_pci_find_cap_offset(priv, priv->pdev);
+-	priv->parent_cap_off = alcor_pci_find_cap_offset(priv,
++	/*
++	 * A device might be attached to root complex directly and
++	 * priv->parent_pdev will be NULL. In this case we don't check its
++	 * capability and disable ASPM completely.
++	 */
++	if (priv->parent_pdev)
++		priv->parent_cap_off = alcor_pci_find_cap_offset(priv,
+ 							 priv->parent_pdev);
+ 
+ 	if ((priv->pdev_cap_off == 0) || (priv->parent_cap_off == 0)) {
+diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c
+index 334009e838236..b11e9830422ed 100644
+--- a/drivers/misc/habanalabs/common/device.c
++++ b/drivers/misc/habanalabs/common/device.c
+@@ -1370,8 +1370,9 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
+ 	}
+ 
+ 	/*
+-	 * From this point, in case of an error, add char devices and create
+-	 * sysfs nodes as part of the error flow, to allow debugging.
++	 * From this point, override rc (=0) in case of an error to allow
++	 * debugging (by adding char devices and create sysfs nodes as part of
++	 * the error flow).
+ 	 */
+ 	add_cdev_sysfs_on_err = true;
+ 
+diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c
+index 09706c571e950..7a96c9753dbfb 100644
+--- a/drivers/misc/habanalabs/common/firmware_if.c
++++ b/drivers/misc/habanalabs/common/firmware_if.c
+@@ -803,11 +803,14 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
+ 
+ 	if (!(hdev->fw_loading & FW_TYPE_LINUX)) {
+ 		dev_info(hdev->dev, "Skip loading Linux F/W\n");
++		rc = 0;
+ 		goto out;
+ 	}
+ 
+-	if (status == CPU_BOOT_STATUS_SRAM_AVAIL)
++	if (status == CPU_BOOT_STATUS_SRAM_AVAIL) {
++		rc = 0;
+ 		goto out;
++	}
+ 
+ 	dev_info(hdev->dev,
+ 		"Loading firmware to device, may take some time...\n");
+diff --git a/drivers/misc/habanalabs/common/mmu/mmu.c b/drivers/misc/habanalabs/common/mmu/mmu.c
+index 93c9e5f587e1a..86dfa7c41ee71 100644
+--- a/drivers/misc/habanalabs/common/mmu/mmu.c
++++ b/drivers/misc/habanalabs/common/mmu/mmu.c
+@@ -501,12 +501,20 @@ static void hl_mmu_pa_page_with_offset(struct hl_ctx *ctx, u64 virt_addr,
+ 
+ 	if ((hops->range_type == HL_VA_RANGE_TYPE_DRAM) &&
+ 			!is_power_of_2(prop->dram_page_size)) {
+-		u32 bit;
++		unsigned long dram_page_size = prop->dram_page_size;
+ 		u64 page_offset_mask;
+ 		u64 phys_addr_mask;
++		u32 bit;
+ 
+-		bit = __ffs64((u64)prop->dram_page_size);
+-		page_offset_mask = ((1ull << bit) - 1);
++		/*
++		 * find last set bit in page_size to cover all bits of page
++		 * offset. note that 1 has to be added to bit index.
++		 * note that the internal ulong variable is used to avoid
++		 * alignment issue.
++		 */
++		bit = find_last_bit(&dram_page_size,
++					sizeof(dram_page_size) * BITS_PER_BYTE) + 1;
++		page_offset_mask = (BIT_ULL(bit) - 1);
+ 		phys_addr_mask = ~page_offset_mask;
+ 		*phys_addr = (tmp_phys_addr & phys_addr_mask) |
+ 				(virt_addr & page_offset_mask);
+diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
+index ecdedd87f8ccf..894056da22cfb 100644
+--- a/drivers/misc/habanalabs/gaudi/gaudi.c
++++ b/drivers/misc/habanalabs/gaudi/gaudi.c
+@@ -2801,7 +2801,7 @@ static void gaudi_init_mme_qman(struct hl_device *hdev, u32 mme_offset,
+ 
+ 		/* Configure RAZWI IRQ */
+ 		mme_id = mme_offset /
+-				(mmMME1_QM_GLBL_CFG0 - mmMME0_QM_GLBL_CFG0);
++				(mmMME1_QM_GLBL_CFG0 - mmMME0_QM_GLBL_CFG0) / 2;
+ 
+ 		mme_qm_err_cfg = MME_QMAN_GLBL_ERR_CFG_MSG_EN_MASK;
+ 		if (hdev->stop_on_err) {
+@@ -4901,6 +4901,7 @@ already_pinned:
+ 	return 0;
+ 
+ unpin_memory:
++	list_del(&userptr->job_node);
+ 	hl_unpin_host_memory(hdev, userptr);
+ free_userptr:
+ 	kfree(userptr);
+@@ -8069,8 +8070,10 @@ static int gaudi_internal_cb_pool_init(struct hl_device *hdev,
+ 			HL_VA_RANGE_TYPE_HOST, HOST_SPACE_INTERNAL_CB_SZ,
+ 			HL_MMU_VA_ALIGNMENT_NOT_NEEDED);
+ 
+-	if (!hdev->internal_cb_va_base)
++	if (!hdev->internal_cb_va_base) {
++		rc = -ENOMEM;
+ 		goto destroy_internal_cb_pool;
++	}
+ 
+ 	mutex_lock(&ctx->mmu_lock);
+ 	rc = hl_mmu_map_contiguous(ctx, hdev->internal_cb_va_base,
+diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
+index ed566c52ccaa0..45c9065c4b922 100644
+--- a/drivers/misc/habanalabs/goya/goya.c
++++ b/drivers/misc/habanalabs/goya/goya.c
+@@ -3249,6 +3249,7 @@ already_pinned:
+ 	return 0;
+ 
+ unpin_memory:
++	list_del(&userptr->job_node);
+ 	hl_unpin_host_memory(hdev, userptr);
+ free_userptr:
+ 	kfree(userptr);
+diff --git a/drivers/misc/ibmasm/module.c b/drivers/misc/ibmasm/module.c
+index 4edad6c445d37..dc8a06c06c637 100644
+--- a/drivers/misc/ibmasm/module.c
++++ b/drivers/misc/ibmasm/module.c
+@@ -111,7 +111,7 @@ static int ibmasm_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	result = ibmasm_init_remote_input_dev(sp);
+ 	if (result) {
+ 		dev_err(sp->dev, "Failed to initialize remote queue\n");
+-		goto error_send_message;
++		goto error_init_remote;
+ 	}
+ 
+ 	result = ibmasm_send_driver_vpd(sp);
+@@ -131,8 +131,9 @@ static int ibmasm_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	return 0;
+ 
+ error_send_message:
+-	disable_sp_interrupts(sp->base_address);
+ 	ibmasm_free_remote_input_dev(sp);
++error_init_remote:
++	disable_sp_interrupts(sp->base_address);
+ 	free_irq(sp->irq, (void *)sp);
+ error_request_irq:
+ 	iounmap(sp->base_address);
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index f7ce341bb328f..beb0860230932 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -1514,6 +1514,8 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
+ 	struct virtnet_info *vi = sq->vq->vdev->priv;
+ 	unsigned int index = vq2txq(sq->vq);
+ 	struct netdev_queue *txq;
++	int opaque;
++	bool done;
+ 
+ 	if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
+ 		/* We don't need to enable cb for XDP */
+@@ -1523,10 +1525,28 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
+ 
+ 	txq = netdev_get_tx_queue(vi->dev, index);
+ 	__netif_tx_lock(txq, raw_smp_processor_id());
++	virtqueue_disable_cb(sq->vq);
+ 	free_old_xmit_skbs(sq, true);
++
++	opaque = virtqueue_enable_cb_prepare(sq->vq);
++
++	done = napi_complete_done(napi, 0);
++
++	if (!done)
++		virtqueue_disable_cb(sq->vq);
++
+ 	__netif_tx_unlock(txq);
+ 
+-	virtqueue_napi_complete(napi, sq->vq, 0);
++	if (done) {
++		if (unlikely(virtqueue_poll(sq->vq, opaque))) {
++			if (napi_schedule_prep(napi)) {
++				__netif_tx_lock(txq, raw_smp_processor_id());
++				virtqueue_disable_cb(sq->vq);
++				__netif_tx_unlock(txq);
++				__napi_schedule(napi);
++			}
++		}
++	}
+ 
+ 	if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
+ 		netif_tx_wake_queue(txq);
+@@ -3229,8 +3249,11 @@ static __maybe_unused int virtnet_restore(struct virtio_device *vdev)
+ 	virtnet_set_queues(vi, vi->curr_queue_pairs);
+ 
+ 	err = virtnet_cpu_notif_add(vi);
+-	if (err)
++	if (err) {
++		virtnet_freeze_down(vdev);
++		remove_vq_common(vi);
+ 		return err;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
+index 4df4f37e6b895..dedcb7aaf0d82 100644
+--- a/drivers/nvme/target/tcp.c
++++ b/drivers/nvme/target/tcp.c
+@@ -1467,7 +1467,6 @@ static void nvmet_tcp_state_change(struct sock *sk)
+ 	case TCP_CLOSE_WAIT:
+ 	case TCP_CLOSE:
+ 		/* FALLTHRU */
+-		sk->sk_user_data = NULL;
+ 		nvmet_tcp_schedule_release_queue(queue);
+ 		break;
+ 	default:
+diff --git a/drivers/pci/controller/dwc/pcie-intel-gw.c b/drivers/pci/controller/dwc/pcie-intel-gw.c
+index 0cedd1f95f372..ae96bfbb6c832 100644
+--- a/drivers/pci/controller/dwc/pcie-intel-gw.c
++++ b/drivers/pci/controller/dwc/pcie-intel-gw.c
+@@ -39,6 +39,10 @@
+ #define PCIE_APP_IRN_PM_TO_ACK		BIT(9)
+ #define PCIE_APP_IRN_LINK_AUTO_BW_STAT	BIT(11)
+ #define PCIE_APP_IRN_BW_MGT		BIT(12)
++#define PCIE_APP_IRN_INTA		BIT(13)
++#define PCIE_APP_IRN_INTB		BIT(14)
++#define PCIE_APP_IRN_INTC		BIT(15)
++#define PCIE_APP_IRN_INTD		BIT(16)
+ #define PCIE_APP_IRN_MSG_LTR		BIT(18)
+ #define PCIE_APP_IRN_SYS_ERR_RC		BIT(29)
+ #define PCIE_APP_INTX_OFST		12
+@@ -48,10 +52,8 @@
+ 	PCIE_APP_IRN_RX_VDM_MSG | PCIE_APP_IRN_SYS_ERR_RC | \
+ 	PCIE_APP_IRN_PM_TO_ACK | PCIE_APP_IRN_MSG_LTR | \
+ 	PCIE_APP_IRN_BW_MGT | PCIE_APP_IRN_LINK_AUTO_BW_STAT | \
+-	(PCIE_APP_INTX_OFST + PCI_INTERRUPT_INTA) | \
+-	(PCIE_APP_INTX_OFST + PCI_INTERRUPT_INTB) | \
+-	(PCIE_APP_INTX_OFST + PCI_INTERRUPT_INTC) | \
+-	(PCIE_APP_INTX_OFST + PCI_INTERRUPT_INTD))
++	PCIE_APP_IRN_INTA | PCIE_APP_IRN_INTB | \
++	PCIE_APP_IRN_INTC | PCIE_APP_IRN_INTD)
+ 
+ #define BUS_IATU_OFFSET			SZ_256M
+ #define RESET_INTERVAL_MS		100
+diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
+index 8dee6d3f33a70..fb1df066a2364 100644
+--- a/drivers/pci/controller/dwc/pcie-tegra194.c
++++ b/drivers/pci/controller/dwc/pcie-tegra194.c
+@@ -1826,7 +1826,7 @@ static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq)
+ 	if (unlikely(irq > 31))
+ 		return -EINVAL;
+ 
+-	appl_writel(pcie, (1 << irq), APPL_MSI_CTRL_1);
++	appl_writel(pcie, BIT(irq), APPL_MSI_CTRL_1);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/pci/controller/pci-ftpci100.c b/drivers/pci/controller/pci-ftpci100.c
+index da3cd216da007..aefef1986201a 100644
+--- a/drivers/pci/controller/pci-ftpci100.c
++++ b/drivers/pci/controller/pci-ftpci100.c
+@@ -34,12 +34,12 @@
+  * Special configuration registers directly in the first few words
+  * in I/O space.
+  */
+-#define PCI_IOSIZE	0x00
+-#define PCI_PROT	0x04 /* AHB protection */
+-#define PCI_CTRL	0x08 /* PCI control signal */
+-#define PCI_SOFTRST	0x10 /* Soft reset counter and response error enable */
+-#define PCI_CONFIG	0x28 /* PCI configuration command register */
+-#define PCI_DATA	0x2C
++#define FTPCI_IOSIZE	0x00
++#define FTPCI_PROT	0x04 /* AHB protection */
++#define FTPCI_CTRL	0x08 /* PCI control signal */
++#define FTPCI_SOFTRST	0x10 /* Soft reset counter and response error enable */
++#define FTPCI_CONFIG	0x28 /* PCI configuration command register */
++#define FTPCI_DATA	0x2C
+ 
+ #define FARADAY_PCI_STATUS_CMD		0x04 /* Status and command */
+ #define FARADAY_PCI_PMC			0x40 /* Power management control */
+@@ -195,9 +195,9 @@ static int faraday_raw_pci_read_config(struct faraday_pci *p, int bus_number,
+ 			PCI_CONF_FUNCTION(PCI_FUNC(fn)) |
+ 			PCI_CONF_WHERE(config) |
+ 			PCI_CONF_ENABLE,
+-			p->base + PCI_CONFIG);
++			p->base + FTPCI_CONFIG);
+ 
+-	*value = readl(p->base + PCI_DATA);
++	*value = readl(p->base + FTPCI_DATA);
+ 
+ 	if (size == 1)
+ 		*value = (*value >> (8 * (config & 3))) & 0xFF;
+@@ -230,17 +230,17 @@ static int faraday_raw_pci_write_config(struct faraday_pci *p, int bus_number,
+ 			PCI_CONF_FUNCTION(PCI_FUNC(fn)) |
+ 			PCI_CONF_WHERE(config) |
+ 			PCI_CONF_ENABLE,
+-			p->base + PCI_CONFIG);
++			p->base + FTPCI_CONFIG);
+ 
+ 	switch (size) {
+ 	case 4:
+-		writel(value, p->base + PCI_DATA);
++		writel(value, p->base + FTPCI_DATA);
+ 		break;
+ 	case 2:
+-		writew(value, p->base + PCI_DATA + (config & 3));
++		writew(value, p->base + FTPCI_DATA + (config & 3));
+ 		break;
+ 	case 1:
+-		writeb(value, p->base + PCI_DATA + (config & 3));
++		writeb(value, p->base + FTPCI_DATA + (config & 3));
+ 		break;
+ 	default:
+ 		ret = PCIBIOS_BAD_REGISTER_NUMBER;
+@@ -469,7 +469,7 @@ static int faraday_pci_probe(struct platform_device *pdev)
+ 		if (!faraday_res_to_memcfg(io->start - win->offset,
+ 					   resource_size(io), &val)) {
+ 			/* setup I/O space size */
+-			writel(val, p->base + PCI_IOSIZE);
++			writel(val, p->base + FTPCI_IOSIZE);
+ 		} else {
+ 			dev_err(dev, "illegal IO mem size\n");
+ 			return -EINVAL;
+@@ -477,11 +477,11 @@ static int faraday_pci_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	/* Setup hostbridge */
+-	val = readl(p->base + PCI_CTRL);
++	val = readl(p->base + FTPCI_CTRL);
+ 	val |= PCI_COMMAND_IO;
+ 	val |= PCI_COMMAND_MEMORY;
+ 	val |= PCI_COMMAND_MASTER;
+-	writel(val, p->base + PCI_CTRL);
++	writel(val, p->base + FTPCI_CTRL);
+ 	/* Mask and clear all interrupts */
+ 	faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2 + 2, 2, 0xF000);
+ 	if (variant->cascaded_irq) {
+diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
+index 7479edf3676c1..fd30f70c7560e 100644
+--- a/drivers/pci/controller/pci-hyperv.c
++++ b/drivers/pci/controller/pci-hyperv.c
+@@ -444,7 +444,6 @@ enum hv_pcibus_state {
+ 	hv_pcibus_probed,
+ 	hv_pcibus_installed,
+ 	hv_pcibus_removing,
+-	hv_pcibus_removed,
+ 	hv_pcibus_maximum
+ };
+ 
+@@ -3247,8 +3246,9 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
+ 		struct pci_packet teardown_packet;
+ 		u8 buffer[sizeof(struct pci_message)];
+ 	} pkt;
+-	struct hv_dr_state *dr;
+ 	struct hv_pci_compl comp_pkt;
++	struct hv_pci_dev *hpdev, *tmp;
++	unsigned long flags;
+ 	int ret;
+ 
+ 	/*
+@@ -3260,9 +3260,16 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
+ 
+ 	if (!keep_devs) {
+ 		/* Delete any children which might still exist. */
+-		dr = kzalloc(sizeof(*dr), GFP_KERNEL);
+-		if (dr && hv_pci_start_relations_work(hbus, dr))
+-			kfree(dr);
++		spin_lock_irqsave(&hbus->device_list_lock, flags);
++		list_for_each_entry_safe(hpdev, tmp, &hbus->children, list_entry) {
++			list_del(&hpdev->list_entry);
++			if (hpdev->pci_slot)
++				pci_destroy_slot(hpdev->pci_slot);
++			/* For the two refs got in new_pcichild_device() */
++			put_pcichild(hpdev);
++			put_pcichild(hpdev);
++		}
++		spin_unlock_irqrestore(&hbus->device_list_lock, flags);
+ 	}
+ 
+ 	ret = hv_send_resources_released(hdev);
+@@ -3305,13 +3312,23 @@ static int hv_pci_remove(struct hv_device *hdev)
+ 
+ 	hbus = hv_get_drvdata(hdev);
+ 	if (hbus->state == hv_pcibus_installed) {
++		tasklet_disable(&hdev->channel->callback_event);
++		hbus->state = hv_pcibus_removing;
++		tasklet_enable(&hdev->channel->callback_event);
++		destroy_workqueue(hbus->wq);
++		hbus->wq = NULL;
++		/*
++		 * At this point, no work is running or can be scheduled
++		 * on hbus-wq. We can't race with hv_pci_devices_present()
++		 * or hv_pci_eject_device(), it's safe to proceed.
++		 */
++
+ 		/* Remove the bus from PCI's point of view. */
+ 		pci_lock_rescan_remove();
+ 		pci_stop_root_bus(hbus->pci_bus);
+ 		hv_pci_remove_slots(hbus);
+ 		pci_remove_root_bus(hbus->pci_bus);
+ 		pci_unlock_rescan_remove();
+-		hbus->state = hv_pcibus_removed;
+ 	}
+ 
+ 	ret = hv_pci_bus_exit(hdev, false);
+@@ -3326,7 +3343,6 @@ static int hv_pci_remove(struct hv_device *hdev)
+ 	irq_domain_free_fwnode(hbus->sysdata.fwnode);
+ 	put_hvpcibus(hbus);
+ 	wait_for_completion(&hbus->remove_event);
+-	destroy_workqueue(hbus->wq);
+ 
+ 	hv_put_dom_num(hbus->sysdata.domain);
+ 
+diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
+index 8fcabed7c6a67..1a2af963599ca 100644
+--- a/drivers/pci/controller/pci-tegra.c
++++ b/drivers/pci/controller/pci-tegra.c
+@@ -2506,6 +2506,7 @@ static const struct of_device_id tegra_pcie_of_match[] = {
+ 	{ .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie },
+ 	{ },
+ };
++MODULE_DEVICE_TABLE(of, tegra_pcie_of_match);
+ 
+ static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
+ {
+diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c
+index eede4e8f3f75a..81b4effeb1309 100644
+--- a/drivers/pci/controller/pcie-iproc-msi.c
++++ b/drivers/pci/controller/pcie-iproc-msi.c
+@@ -171,7 +171,7 @@ static struct irq_chip iproc_msi_irq_chip = {
+ 
+ static struct msi_domain_info iproc_msi_domain_info = {
+ 	.flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+-		MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX,
++		MSI_FLAG_PCI_MSIX,
+ 	.chip = &iproc_msi_irq_chip,
+ };
+ 
+@@ -250,20 +250,23 @@ static int iproc_msi_irq_domain_alloc(struct irq_domain *domain,
+ 	struct iproc_msi *msi = domain->host_data;
+ 	int hwirq, i;
+ 
++	if (msi->nr_cpus > 1 && nr_irqs > 1)
++		return -EINVAL;
++
+ 	mutex_lock(&msi->bitmap_lock);
+ 
+-	/* Allocate 'nr_cpus' number of MSI vectors each time */
+-	hwirq = bitmap_find_next_zero_area(msi->bitmap, msi->nr_msi_vecs, 0,
+-					   msi->nr_cpus, 0);
+-	if (hwirq < msi->nr_msi_vecs) {
+-		bitmap_set(msi->bitmap, hwirq, msi->nr_cpus);
+-	} else {
+-		mutex_unlock(&msi->bitmap_lock);
+-		return -ENOSPC;
+-	}
++	/*
++	 * Allocate 'nr_irqs' multiplied by 'nr_cpus' number of MSI vectors
++	 * each time
++	 */
++	hwirq = bitmap_find_free_region(msi->bitmap, msi->nr_msi_vecs,
++					order_base_2(msi->nr_cpus * nr_irqs));
+ 
+ 	mutex_unlock(&msi->bitmap_lock);
+ 
++	if (hwirq < 0)
++		return -ENOSPC;
++
+ 	for (i = 0; i < nr_irqs; i++) {
+ 		irq_domain_set_info(domain, virq + i, hwirq + i,
+ 				    &iproc_msi_bottom_irq_chip,
+@@ -284,7 +287,8 @@ static void iproc_msi_irq_domain_free(struct irq_domain *domain,
+ 	mutex_lock(&msi->bitmap_lock);
+ 
+ 	hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq);
+-	bitmap_clear(msi->bitmap, hwirq, msi->nr_cpus);
++	bitmap_release_region(msi->bitmap, hwirq,
++			      order_base_2(msi->nr_cpus * nr_irqs));
+ 
+ 	mutex_unlock(&msi->bitmap_lock);
+ 
+@@ -539,6 +543,9 @@ int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node)
+ 	mutex_init(&msi->bitmap_lock);
+ 	msi->nr_cpus = num_possible_cpus();
+ 
++	if (msi->nr_cpus == 1)
++		iproc_msi_domain_info.flags |=  MSI_FLAG_MULTI_PCI_MSI;
++
+ 	msi->nr_irqs = of_irq_count(node);
+ 	if (!msi->nr_irqs) {
+ 		dev_err(pcie->dev, "found no MSI GIC interrupt\n");
+diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c
+index f1d08a1b1591f..78d04ac29cd56 100644
+--- a/drivers/pci/controller/pcie-rockchip-host.c
++++ b/drivers/pci/controller/pcie-rockchip-host.c
+@@ -592,10 +592,6 @@ static int rockchip_pcie_parse_host_dt(struct rockchip_pcie *rockchip)
+ 	if (err)
+ 		return err;
+ 
+-	err = rockchip_pcie_setup_irq(rockchip);
+-	if (err)
+-		return err;
+-
+ 	rockchip->vpcie12v = devm_regulator_get_optional(dev, "vpcie12v");
+ 	if (IS_ERR(rockchip->vpcie12v)) {
+ 		if (PTR_ERR(rockchip->vpcie12v) != -ENODEV)
+@@ -973,8 +969,6 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
+ 	if (err)
+ 		goto err_vpcie;
+ 
+-	rockchip_pcie_enable_interrupts(rockchip);
+-
+ 	err = rockchip_pcie_init_irq_domain(rockchip);
+ 	if (err < 0)
+ 		goto err_deinit_port;
+@@ -992,6 +986,12 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
+ 	bridge->sysdata = rockchip;
+ 	bridge->ops = &rockchip_pcie_ops;
+ 
++	err = rockchip_pcie_setup_irq(rockchip);
++	if (err)
++		goto err_remove_irq_domain;
++
++	rockchip_pcie_enable_interrupts(rockchip);
++
+ 	err = pci_host_probe(bridge);
+ 	if (err < 0)
+ 		goto err_remove_irq_domain;
+diff --git a/drivers/pci/ecam.c b/drivers/pci/ecam.c
+index d2a1920bb0557..1c40d2506aef3 100644
+--- a/drivers/pci/ecam.c
++++ b/drivers/pci/ecam.c
+@@ -32,7 +32,7 @@ struct pci_config_window *pci_ecam_create(struct device *dev,
+ 	struct pci_config_window *cfg;
+ 	unsigned int bus_range, bus_range_max, bsz;
+ 	struct resource *conflict;
+-	int i, err;
++	int err;
+ 
+ 	if (busr->start > busr->end)
+ 		return ERR_PTR(-EINVAL);
+@@ -50,6 +50,7 @@ struct pci_config_window *pci_ecam_create(struct device *dev,
+ 	cfg->busr.start = busr->start;
+ 	cfg->busr.end = busr->end;
+ 	cfg->busr.flags = IORESOURCE_BUS;
++	cfg->bus_shift = bus_shift;
+ 	bus_range = resource_size(&cfg->busr);
+ 	bus_range_max = resource_size(cfgres) >> bus_shift;
+ 	if (bus_range > bus_range_max) {
+@@ -77,13 +78,6 @@ struct pci_config_window *pci_ecam_create(struct device *dev,
+ 		cfg->winp = kcalloc(bus_range, sizeof(*cfg->winp), GFP_KERNEL);
+ 		if (!cfg->winp)
+ 			goto err_exit_malloc;
+-		for (i = 0; i < bus_range; i++) {
+-			cfg->winp[i] =
+-				pci_remap_cfgspace(cfgres->start + i * bsz,
+-						   bsz);
+-			if (!cfg->winp[i])
+-				goto err_exit_iomap;
+-		}
+ 	} else {
+ 		cfg->win = pci_remap_cfgspace(cfgres->start, bus_range * bsz);
+ 		if (!cfg->win)
+@@ -129,6 +123,44 @@ void pci_ecam_free(struct pci_config_window *cfg)
+ }
+ EXPORT_SYMBOL_GPL(pci_ecam_free);
+ 
++static int pci_ecam_add_bus(struct pci_bus *bus)
++{
++	struct pci_config_window *cfg = bus->sysdata;
++	unsigned int bsz = 1 << cfg->bus_shift;
++	unsigned int busn = bus->number;
++	phys_addr_t start;
++
++	if (!per_bus_mapping)
++		return 0;
++
++	if (busn < cfg->busr.start || busn > cfg->busr.end)
++		return -EINVAL;
++
++	busn -= cfg->busr.start;
++	start = cfg->res.start + busn * bsz;
++
++	cfg->winp[busn] = pci_remap_cfgspace(start, bsz);
++	if (!cfg->winp[busn])
++		return -ENOMEM;
++
++	return 0;
++}
++
++static void pci_ecam_remove_bus(struct pci_bus *bus)
++{
++	struct pci_config_window *cfg = bus->sysdata;
++	unsigned int busn = bus->number;
++
++	if (!per_bus_mapping || busn < cfg->busr.start || busn > cfg->busr.end)
++		return;
++
++	busn -= cfg->busr.start;
++	if (cfg->winp[busn]) {
++		iounmap(cfg->winp[busn]);
++		cfg->winp[busn] = NULL;
++	}
++}
++
+ /*
+  * Function to implement the pci_ops ->map_bus method
+  */
+@@ -167,6 +199,8 @@ EXPORT_SYMBOL_GPL(pci_ecam_map_bus);
+ /* ECAM ops */
+ const struct pci_ecam_ops pci_generic_ecam_ops = {
+ 	.pci_ops	= {
++		.add_bus	= pci_ecam_add_bus,
++		.remove_bus	= pci_ecam_remove_bus,
+ 		.map_bus	= pci_ecam_map_bus,
+ 		.read		= pci_generic_config_read,
+ 		.write		= pci_generic_config_write,
+@@ -178,6 +212,8 @@ EXPORT_SYMBOL_GPL(pci_generic_ecam_ops);
+ /* ECAM ops for 32-bit access only (non-compliant) */
+ const struct pci_ecam_ops pci_32b_ops = {
+ 	.pci_ops	= {
++		.add_bus	= pci_ecam_add_bus,
++		.remove_bus	= pci_ecam_remove_bus,
+ 		.map_bus	= pci_ecam_map_bus,
+ 		.read		= pci_generic_config_read32,
+ 		.write		= pci_generic_config_write32,
+@@ -187,6 +223,8 @@ const struct pci_ecam_ops pci_32b_ops = {
+ /* ECAM ops for 32-bit read only (non-compliant) */
+ const struct pci_ecam_ops pci_32b_read_ops = {
+ 	.pci_ops	= {
++		.add_bus	= pci_ecam_add_bus,
++		.remove_bus	= pci_ecam_remove_bus,
+ 		.map_bus	= pci_ecam_map_bus,
+ 		.read		= pci_generic_config_read32,
+ 		.write		= pci_generic_config_write,
+diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
+index fb3840e222add..9d06939736c0f 100644
+--- a/drivers/pci/hotplug/pciehp_hpc.c
++++ b/drivers/pci/hotplug/pciehp_hpc.c
+@@ -563,6 +563,32 @@ void pciehp_power_off_slot(struct controller *ctrl)
+ 		 PCI_EXP_SLTCTL_PWR_OFF);
+ }
+ 
++static void pciehp_ignore_dpc_link_change(struct controller *ctrl,
++					  struct pci_dev *pdev, int irq)
++{
++	/*
++	 * Ignore link changes which occurred while waiting for DPC recovery.
++	 * Could be several if DPC triggered multiple times consecutively.
++	 */
++	synchronize_hardirq(irq);
++	atomic_and(~PCI_EXP_SLTSTA_DLLSC, &ctrl->pending_events);
++	if (pciehp_poll_mode)
++		pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
++					   PCI_EXP_SLTSTA_DLLSC);
++	ctrl_info(ctrl, "Slot(%s): Link Down/Up ignored (recovered by DPC)\n",
++		  slot_name(ctrl));
++
++	/*
++	 * If the link is unexpectedly down after successful recovery,
++	 * the corresponding link change may have been ignored above.
++	 * Synthesize it to ensure that it is acted on.
++	 */
++	down_read(&ctrl->reset_lock);
++	if (!pciehp_check_link_active(ctrl))
++		pciehp_request(ctrl, PCI_EXP_SLTSTA_DLLSC);
++	up_read(&ctrl->reset_lock);
++}
++
+ static irqreturn_t pciehp_isr(int irq, void *dev_id)
+ {
+ 	struct controller *ctrl = (struct controller *)dev_id;
+@@ -706,6 +732,16 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
+ 				      PCI_EXP_SLTCTL_ATTN_IND_ON);
+ 	}
+ 
++	/*
++	 * Ignore Link Down/Up events caused by Downstream Port Containment
++	 * if recovery from the error succeeded.
++	 */
++	if ((events & PCI_EXP_SLTSTA_DLLSC) && pci_dpc_recovered(pdev) &&
++	    ctrl->state == ON_STATE) {
++		events &= ~PCI_EXP_SLTSTA_DLLSC;
++		pciehp_ignore_dpc_link_change(ctrl, pdev, irq);
++	}
++
+ 	/*
+ 	 * Disable requests have higher priority than Presence Detect Changed
+ 	 * or Data Link Layer State Changed events.
+diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
+index 1963826303631..c49c13a5fedc7 100644
+--- a/drivers/pci/p2pdma.c
++++ b/drivers/pci/p2pdma.c
+@@ -308,10 +308,41 @@ static const struct pci_p2pdma_whitelist_entry {
+ 	{}
+ };
+ 
++/*
++ * This lookup function tries to find the PCI device corresponding to a given
++ * host bridge.
++ *
++ * It assumes the host bridge device is the first PCI device in the
++ * bus->devices list and that the devfn is 00.0. These assumptions should hold
++ * for all the devices in the whitelist above.
++ *
++ * This function is equivalent to pci_get_slot(host->bus, 0), however it does
++ * not take the pci_bus_sem lock seeing __host_bridge_whitelist() must not
++ * sleep.
++ *
++ * For this to be safe, the caller should hold a reference to a device on the
++ * bridge, which should ensure the host_bridge device will not be freed
++ * or removed from the head of the devices list.
++ */
++static struct pci_dev *pci_host_bridge_dev(struct pci_host_bridge *host)
++{
++	struct pci_dev *root;
++
++	root = list_first_entry_or_null(&host->bus->devices,
++					struct pci_dev, bus_list);
++
++	if (!root)
++		return NULL;
++	if (root->devfn != PCI_DEVFN(0, 0))
++		return NULL;
++
++	return root;
++}
++
+ static bool __host_bridge_whitelist(struct pci_host_bridge *host,
+ 				    bool same_host_bridge)
+ {
+-	struct pci_dev *root = pci_get_slot(host->bus, PCI_DEVFN(0, 0));
++	struct pci_dev *root = pci_host_bridge_dev(host);
+ 	const struct pci_p2pdma_whitelist_entry *entry;
+ 	unsigned short vendor, device;
+ 
+@@ -320,7 +351,6 @@ static bool __host_bridge_whitelist(struct pci_host_bridge *host,
+ 
+ 	vendor = root->vendor;
+ 	device = root->device;
+-	pci_dev_put(root);
+ 
+ 	for (entry = pci_p2pdma_whitelist; entry->vendor; entry++) {
+ 		if (vendor != entry->vendor || device != entry->device)
+diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c
+index 781e45cf60d1c..cd84cf52a92e1 100644
+--- a/drivers/pci/pci-label.c
++++ b/drivers/pci/pci-label.c
+@@ -162,7 +162,7 @@ static void dsm_label_utf16s_to_utf8s(union acpi_object *obj, char *buf)
+ 	len = utf16s_to_utf8s((const wchar_t *)obj->buffer.pointer,
+ 			      obj->buffer.length,
+ 			      UTF16_LITTLE_ENDIAN,
+-			      buf, PAGE_SIZE);
++			      buf, PAGE_SIZE - 1);
+ 	buf[len] = '\n';
+ }
+ 
+diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
+index 9684b468267f2..e5ae5e8604310 100644
+--- a/drivers/pci/pci.h
++++ b/drivers/pci/pci.h
+@@ -392,6 +392,8 @@ static inline bool pci_dev_is_disconnected(const struct pci_dev *dev)
+ 
+ /* pci_dev priv_flags */
+ #define PCI_DEV_ADDED 0
++#define PCI_DPC_RECOVERED 1
++#define PCI_DPC_RECOVERING 2
+ 
+ static inline void pci_dev_assign_added(struct pci_dev *dev, bool added)
+ {
+@@ -446,10 +448,12 @@ void pci_restore_dpc_state(struct pci_dev *dev);
+ void pci_dpc_init(struct pci_dev *pdev);
+ void dpc_process_error(struct pci_dev *pdev);
+ pci_ers_result_t dpc_reset_link(struct pci_dev *pdev);
++bool pci_dpc_recovered(struct pci_dev *pdev);
+ #else
+ static inline void pci_save_dpc_state(struct pci_dev *dev) {}
+ static inline void pci_restore_dpc_state(struct pci_dev *dev) {}
+ static inline void pci_dpc_init(struct pci_dev *pdev) {}
++static inline bool pci_dpc_recovered(struct pci_dev *pdev) { return false; }
+ #endif
+ 
+ #ifdef CONFIG_PCIEPORTBUS
+diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
+index e05aba86a3179..c556e7beafe38 100644
+--- a/drivers/pci/pcie/dpc.c
++++ b/drivers/pci/pcie/dpc.c
+@@ -71,6 +71,58 @@ void pci_restore_dpc_state(struct pci_dev *dev)
+ 	pci_write_config_word(dev, dev->dpc_cap + PCI_EXP_DPC_CTL, *cap);
+ }
+ 
++static DECLARE_WAIT_QUEUE_HEAD(dpc_completed_waitqueue);
++
++#ifdef CONFIG_HOTPLUG_PCI_PCIE
++static bool dpc_completed(struct pci_dev *pdev)
++{
++	u16 status;
++
++	pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_STATUS, &status);
++	if ((status != 0xffff) && (status & PCI_EXP_DPC_STATUS_TRIGGER))
++		return false;
++
++	if (test_bit(PCI_DPC_RECOVERING, &pdev->priv_flags))
++		return false;
++
++	return true;
++}
++
++/**
++ * pci_dpc_recovered - whether DPC triggered and has recovered successfully
++ * @pdev: PCI device
++ *
++ * Return true if DPC was triggered for @pdev and has recovered successfully.
++ * Wait for recovery if it hasn't completed yet.  Called from the PCIe hotplug
++ * driver to recognize and ignore Link Down/Up events caused by DPC.
++ */
++bool pci_dpc_recovered(struct pci_dev *pdev)
++{
++	struct pci_host_bridge *host;
++
++	if (!pdev->dpc_cap)
++		return false;
++
++	/*
++	 * Synchronization between hotplug and DPC is not supported
++	 * if DPC is owned by firmware and EDR is not enabled.
++	 */
++	host = pci_find_host_bridge(pdev->bus);
++	if (!host->native_dpc && !IS_ENABLED(CONFIG_PCIE_EDR))
++		return false;
++
++	/*
++	 * Need a timeout in case DPC never completes due to failure of
++	 * dpc_wait_rp_inactive().  The spec doesn't mandate a time limit,
++	 * but reports indicate that DPC completes within 4 seconds.
++	 */
++	wait_event_timeout(dpc_completed_waitqueue, dpc_completed(pdev),
++			   msecs_to_jiffies(4000));
++
++	return test_and_clear_bit(PCI_DPC_RECOVERED, &pdev->priv_flags);
++}
++#endif /* CONFIG_HOTPLUG_PCI_PCIE */
++
+ static int dpc_wait_rp_inactive(struct pci_dev *pdev)
+ {
+ 	unsigned long timeout = jiffies + HZ;
+@@ -91,8 +143,11 @@ static int dpc_wait_rp_inactive(struct pci_dev *pdev)
+ 
+ pci_ers_result_t dpc_reset_link(struct pci_dev *pdev)
+ {
++	pci_ers_result_t ret;
+ 	u16 cap;
+ 
++	set_bit(PCI_DPC_RECOVERING, &pdev->priv_flags);
++
+ 	/*
+ 	 * DPC disables the Link automatically in hardware, so it has
+ 	 * already been reset by the time we get here.
+@@ -106,18 +161,27 @@ pci_ers_result_t dpc_reset_link(struct pci_dev *pdev)
+ 	if (!pcie_wait_for_link(pdev, false))
+ 		pci_info(pdev, "Data Link Layer Link Active not cleared in 1000 msec\n");
+ 
+-	if (pdev->dpc_rp_extensions && dpc_wait_rp_inactive(pdev))
+-		return PCI_ERS_RESULT_DISCONNECT;
++	if (pdev->dpc_rp_extensions && dpc_wait_rp_inactive(pdev)) {
++		clear_bit(PCI_DPC_RECOVERED, &pdev->priv_flags);
++		ret = PCI_ERS_RESULT_DISCONNECT;
++		goto out;
++	}
+ 
+ 	pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
+ 			      PCI_EXP_DPC_STATUS_TRIGGER);
+ 
+ 	if (!pcie_wait_for_link(pdev, true)) {
+ 		pci_info(pdev, "Data Link Layer Link Active not set in 1000 msec\n");
+-		return PCI_ERS_RESULT_DISCONNECT;
++		clear_bit(PCI_DPC_RECOVERED, &pdev->priv_flags);
++		ret = PCI_ERS_RESULT_DISCONNECT;
++	} else {
++		set_bit(PCI_DPC_RECOVERED, &pdev->priv_flags);
++		ret = PCI_ERS_RESULT_RECOVERED;
+ 	}
+-
+-	return PCI_ERS_RESULT_RECOVERED;
++out:
++	clear_bit(PCI_DPC_RECOVERING, &pdev->priv_flags);
++	wake_up_all(&dpc_completed_waitqueue);
++	return ret;
+ }
+ 
+ static void dpc_process_rp_pio_error(struct pci_dev *pdev)
+diff --git a/drivers/phy/intel/phy-intel-keembay-emmc.c b/drivers/phy/intel/phy-intel-keembay-emmc.c
+index eb7c635ed89ae..0eb11ac7c2e2e 100644
+--- a/drivers/phy/intel/phy-intel-keembay-emmc.c
++++ b/drivers/phy/intel/phy-intel-keembay-emmc.c
+@@ -95,7 +95,8 @@ static int keembay_emmc_phy_power(struct phy *phy, bool on_off)
+ 	else
+ 		freqsel = 0x0;
+ 
+-	if (mhz < 50 || mhz > 200)
++	/* Check for EMMC clock rate*/
++	if (mhz > 175)
+ 		dev_warn(&phy->dev, "Unsupported rate: %d MHz\n", mhz);
+ 
+ 	/*
+diff --git a/drivers/power/reset/gpio-poweroff.c b/drivers/power/reset/gpio-poweroff.c
+index c5067eb753706..1c5af2fef1423 100644
+--- a/drivers/power/reset/gpio-poweroff.c
++++ b/drivers/power/reset/gpio-poweroff.c
+@@ -90,6 +90,7 @@ static const struct of_device_id of_gpio_poweroff_match[] = {
+ 	{ .compatible = "gpio-poweroff", },
+ 	{},
+ };
++MODULE_DEVICE_TABLE(of, of_gpio_poweroff_match);
+ 
+ static struct platform_driver gpio_poweroff_driver = {
+ 	.probe = gpio_poweroff_probe,
+diff --git a/drivers/power/reset/regulator-poweroff.c b/drivers/power/reset/regulator-poweroff.c
+index f697088e0ad1a..20701203935f0 100644
+--- a/drivers/power/reset/regulator-poweroff.c
++++ b/drivers/power/reset/regulator-poweroff.c
+@@ -64,6 +64,7 @@ static const struct of_device_id of_regulator_poweroff_match[] = {
+ 	{ .compatible = "regulator-poweroff", },
+ 	{},
+ };
++MODULE_DEVICE_TABLE(of, of_regulator_poweroff_match);
+ 
+ static struct platform_driver regulator_poweroff_driver = {
+ 	.probe = regulator_poweroff_probe,
+diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
+index 006b95eca6739..5207beea9d232 100644
+--- a/drivers/power/supply/Kconfig
++++ b/drivers/power/supply/Kconfig
+@@ -712,7 +712,8 @@ config BATTERY_GOLDFISH
+ 
+ config BATTERY_RT5033
+ 	tristate "RT5033 fuel gauge support"
+-	depends on MFD_RT5033
++	depends on I2C
++	select REGMAP_I2C
+ 	help
+ 	  This adds support for battery fuel gauge in Richtek RT5033 PMIC.
+ 	  The fuelgauge calculates and determines the battery state of charge
+diff --git a/drivers/power/supply/ab8500_btemp.c b/drivers/power/supply/ab8500_btemp.c
+index d20345386b1e6..69f67edf83c55 100644
+--- a/drivers/power/supply/ab8500_btemp.c
++++ b/drivers/power/supply/ab8500_btemp.c
+@@ -1135,6 +1135,7 @@ static const struct of_device_id ab8500_btemp_match[] = {
+ 	{ .compatible = "stericsson,ab8500-btemp", },
+ 	{ },
+ };
++MODULE_DEVICE_TABLE(of, ab8500_btemp_match);
+ 
+ static struct platform_driver ab8500_btemp_driver = {
+ 	.probe = ab8500_btemp_probe,
+diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c
+index ac77c8882d17a..163d6098e8e0f 100644
+--- a/drivers/power/supply/ab8500_charger.c
++++ b/drivers/power/supply/ab8500_charger.c
+@@ -413,6 +413,14 @@ disable_otp:
+ static void ab8500_power_supply_changed(struct ab8500_charger *di,
+ 					struct power_supply *psy)
+ {
++	/*
++	 * This happens if we get notifications or interrupts and
++	 * the platform has been configured not to support one or
++	 * other type of charging.
++	 */
++	if (!psy)
++		return;
++
+ 	if (di->autopower_cfg) {
+ 		if (!di->usb.charger_connected &&
+ 		    !di->ac.charger_connected &&
+@@ -439,7 +447,15 @@ static void ab8500_charger_set_usb_connected(struct ab8500_charger *di,
+ 		if (!connected)
+ 			di->flags.vbus_drop_end = false;
+ 
+-		sysfs_notify(&di->usb_chg.psy->dev.kobj, NULL, "present");
++		/*
++		 * Sometimes the platform is configured not to support
++		 * USB charging and no psy has been created, but we still
++		 * will get these notifications.
++		 */
++		if (di->usb_chg.psy) {
++			sysfs_notify(&di->usb_chg.psy->dev.kobj, NULL,
++				     "present");
++		}
+ 
+ 		if (connected) {
+ 			mutex_lock(&di->charger_attached_mutex);
+@@ -3651,6 +3667,7 @@ static const struct of_device_id ab8500_charger_match[] = {
+ 	{ .compatible = "stericsson,ab8500-charger", },
+ 	{ },
+ };
++MODULE_DEVICE_TABLE(of, ab8500_charger_match);
+ 
+ static struct platform_driver ab8500_charger_driver = {
+ 	.probe = ab8500_charger_probe,
+diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
+index 06ff42c71f241..2fe41091fa03f 100644
+--- a/drivers/power/supply/ab8500_fg.c
++++ b/drivers/power/supply/ab8500_fg.c
+@@ -3218,6 +3218,7 @@ static const struct of_device_id ab8500_fg_match[] = {
+ 	{ .compatible = "stericsson,ab8500-fg", },
+ 	{ },
+ };
++MODULE_DEVICE_TABLE(of, ab8500_fg_match);
+ 
+ static struct platform_driver ab8500_fg_driver = {
+ 	.probe = ab8500_fg_probe,
+diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c
+index 39e16ecb76386..37af0e216bc3a 100644
+--- a/drivers/power/supply/axp288_fuel_gauge.c
++++ b/drivers/power/supply/axp288_fuel_gauge.c
+@@ -723,15 +723,6 @@ static const struct dmi_system_id axp288_fuel_gauge_blacklist[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "MEEGOPAD T02"),
+ 		},
+ 	},
+-	{
+-		/* Meegopad T08 */
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Default string"),
+-			DMI_MATCH(DMI_BOARD_VENDOR, "To be filled by OEM."),
+-			DMI_MATCH(DMI_BOARD_NAME, "T3 MRD"),
+-			DMI_MATCH(DMI_BOARD_VERSION, "V1.1"),
+-		},
+-	},
+ 	{	/* Mele PCG03 Mini PC */
+ 		.matches = {
+ 			DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Mini PC"),
+@@ -745,6 +736,15 @@ static const struct dmi_system_id axp288_fuel_gauge_blacklist[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
+ 		}
+ 	},
++	{
++		/* Various Ace PC/Meegopad/MinisForum/Wintel Mini-PCs/HDMI-sticks */
++		.matches = {
++			DMI_MATCH(DMI_BOARD_NAME, "T3 MRD"),
++			DMI_MATCH(DMI_CHASSIS_TYPE, "3"),
++			DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
++			DMI_MATCH(DMI_BIOS_VERSION, "5.11"),
++		},
++	},
+ 	{}
+ };
+ 
+diff --git a/drivers/power/supply/charger-manager.c b/drivers/power/supply/charger-manager.c
+index 4dea8ecd70bca..331803c221da8 100644
+--- a/drivers/power/supply/charger-manager.c
++++ b/drivers/power/supply/charger-manager.c
+@@ -1279,6 +1279,7 @@ static const struct of_device_id charger_manager_match[] = {
+ 	},
+ 	{},
+ };
++MODULE_DEVICE_TABLE(of, charger_manager_match);
+ 
+ static struct charger_desc *of_cm_parse_desc(struct device *dev)
+ {
+diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c
+index 79d4b5988360a..8117ecabe31cb 100644
+--- a/drivers/power/supply/max17042_battery.c
++++ b/drivers/power/supply/max17042_battery.c
+@@ -1104,7 +1104,7 @@ static int max17042_probe(struct i2c_client *client,
+ 	}
+ 
+ 	if (client->irq) {
+-		unsigned int flags = IRQF_TRIGGER_FALLING | IRQF_ONESHOT;
++		unsigned int flags = IRQF_ONESHOT;
+ 
+ 		/*
+ 		 * On ACPI systems the IRQ may be handled by ACPI-event code,
+diff --git a/drivers/power/supply/rt5033_battery.c b/drivers/power/supply/rt5033_battery.c
+index f330452341f02..9ad0afe83d1b7 100644
+--- a/drivers/power/supply/rt5033_battery.c
++++ b/drivers/power/supply/rt5033_battery.c
+@@ -164,9 +164,16 @@ static const struct i2c_device_id rt5033_battery_id[] = {
+ };
+ MODULE_DEVICE_TABLE(i2c, rt5033_battery_id);
+ 
++static const struct of_device_id rt5033_battery_of_match[] = {
++	{ .compatible = "richtek,rt5033-battery", },
++	{ }
++};
++MODULE_DEVICE_TABLE(of, rt5033_battery_of_match);
++
+ static struct i2c_driver rt5033_battery_driver = {
+ 	.driver = {
+ 		.name = "rt5033-battery",
++		.of_match_table = rt5033_battery_of_match,
+ 	},
+ 	.probe = rt5033_battery_probe,
+ 	.remove = rt5033_battery_remove,
+diff --git a/drivers/power/supply/sc2731_charger.c b/drivers/power/supply/sc2731_charger.c
+index 335cb857ef307..288b79836c139 100644
+--- a/drivers/power/supply/sc2731_charger.c
++++ b/drivers/power/supply/sc2731_charger.c
+@@ -524,6 +524,7 @@ static const struct of_device_id sc2731_charger_of_match[] = {
+ 	{ .compatible = "sprd,sc2731-charger", },
+ 	{ }
+ };
++MODULE_DEVICE_TABLE(of, sc2731_charger_of_match);
+ 
+ static struct platform_driver sc2731_charger_driver = {
+ 	.driver = {
+diff --git a/drivers/power/supply/sc27xx_fuel_gauge.c b/drivers/power/supply/sc27xx_fuel_gauge.c
+index 9c627618c2249..1ae8374e1cebe 100644
+--- a/drivers/power/supply/sc27xx_fuel_gauge.c
++++ b/drivers/power/supply/sc27xx_fuel_gauge.c
+@@ -1342,6 +1342,7 @@ static const struct of_device_id sc27xx_fgu_of_match[] = {
+ 	{ .compatible = "sprd,sc2731-fgu", },
+ 	{ }
+ };
++MODULE_DEVICE_TABLE(of, sc27xx_fgu_of_match);
+ 
+ static struct platform_driver sc27xx_fgu_driver = {
+ 	.probe = sc27xx_fgu_probe,
+diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
+index 6faf5b5a55842..37200850cdc6d 100644
+--- a/drivers/pwm/pwm-img.c
++++ b/drivers/pwm/pwm-img.c
+@@ -156,7 +156,7 @@ static int img_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+ 	struct img_pwm_chip *pwm_chip = to_img_pwm_chip(chip);
+ 	int ret;
+ 
+-	ret = pm_runtime_get_sync(chip->dev);
++	ret = pm_runtime_resume_and_get(chip->dev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+diff --git a/drivers/pwm/pwm-imx1.c b/drivers/pwm/pwm-imx1.c
+index 727e0d3e249ea..cd48136e5a53b 100644
+--- a/drivers/pwm/pwm-imx1.c
++++ b/drivers/pwm/pwm-imx1.c
+@@ -169,8 +169,6 @@ static int pwm_imx1_remove(struct platform_device *pdev)
+ {
+ 	struct pwm_imx1_chip *imx = platform_get_drvdata(pdev);
+ 
+-	pwm_imx1_clk_disable_unprepare(&imx->chip);
+-
+ 	return pwmchip_remove(&imx->chip);
+ }
+ 
+diff --git a/drivers/pwm/pwm-spear.c b/drivers/pwm/pwm-spear.c
+index f63b54aae1b48..7467e03d2fb53 100644
+--- a/drivers/pwm/pwm-spear.c
++++ b/drivers/pwm/pwm-spear.c
+@@ -229,10 +229,6 @@ static int spear_pwm_probe(struct platform_device *pdev)
+ static int spear_pwm_remove(struct platform_device *pdev)
+ {
+ 	struct spear_pwm_chip *pc = platform_get_drvdata(pdev);
+-	int i;
+-
+-	for (i = 0; i < NUM_PWM; i++)
+-		pwm_disable(&pc->chip.pwms[i]);
+ 
+ 	/* clk was prepared in probe, hence unprepare it here */
+ 	clk_unprepare(pc->clk);
+diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c
+index 55bc63d5a0ae1..6d8e324864fa7 100644
+--- a/drivers/pwm/pwm-tegra.c
++++ b/drivers/pwm/pwm-tegra.c
+@@ -301,7 +301,6 @@ static int tegra_pwm_probe(struct platform_device *pdev)
+ static int tegra_pwm_remove(struct platform_device *pdev)
+ {
+ 	struct tegra_pwm_chip *pc = platform_get_drvdata(pdev);
+-	unsigned int i;
+ 	int err;
+ 
+ 	if (WARN_ON(!pc))
+@@ -311,18 +310,6 @@ static int tegra_pwm_remove(struct platform_device *pdev)
+ 	if (err < 0)
+ 		return err;
+ 
+-	for (i = 0; i < pc->chip.npwm; i++) {
+-		struct pwm_device *pwm = &pc->chip.pwms[i];
+-
+-		if (!pwm_is_enabled(pwm))
+-			if (clk_prepare_enable(pc->clk) < 0)
+-				continue;
+-
+-		pwm_writel(pc, i, 0);
+-
+-		clk_disable_unprepare(pc->clk);
+-	}
+-
+ 	reset_control_assert(pc->rst);
+ 	clk_disable_unprepare(pc->clk);
+ 
+diff --git a/drivers/remoteproc/remoteproc_cdev.c b/drivers/remoteproc/remoteproc_cdev.c
+index b19ea3057bde4..ff92ed25d8b0a 100644
+--- a/drivers/remoteproc/remoteproc_cdev.c
++++ b/drivers/remoteproc/remoteproc_cdev.c
+@@ -111,7 +111,7 @@ int rproc_char_device_add(struct rproc *rproc)
+ 
+ void rproc_char_device_remove(struct rproc *rproc)
+ {
+-	__unregister_chrdev(MAJOR(rproc->dev.devt), rproc->index, 1, "remoteproc");
++	cdev_del(&rproc->cdev);
+ }
+ 
+ void __init rproc_init_cdev(void)
+diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
+index ab150765d1243..d9d2a240dd589 100644
+--- a/drivers/remoteproc/remoteproc_core.c
++++ b/drivers/remoteproc/remoteproc_core.c
+@@ -2357,7 +2357,6 @@ int rproc_del(struct rproc *rproc)
+ 	mutex_unlock(&rproc->lock);
+ 
+ 	rproc_delete_debug_dir(rproc);
+-	rproc_char_device_remove(rproc);
+ 
+ 	/* the rproc is downref'ed as soon as it's removed from the klist */
+ 	mutex_lock(&rproc_list_mutex);
+@@ -2368,6 +2367,7 @@ int rproc_del(struct rproc *rproc)
+ 	synchronize_rcu();
+ 
+ 	device_del(&rproc->dev);
++	rproc_char_device_remove(rproc);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c
+index 62b5a4c294562..914db1d1f6381 100644
+--- a/drivers/remoteproc/ti_k3_r5_remoteproc.c
++++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c
+@@ -1272,9 +1272,9 @@ static int k3_r5_core_of_init(struct platform_device *pdev)
+ 
+ 	core->tsp = k3_r5_core_of_get_tsp(dev, core->ti_sci);
+ 	if (IS_ERR(core->tsp)) {
++		ret = PTR_ERR(core->tsp);
+ 		dev_err(dev, "failed to construct ti-sci proc control, ret = %d\n",
+ 			ret);
+-		ret = PTR_ERR(core->tsp);
+ 		goto err;
+ 	}
+ 
+diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
+index 4171c6f763858..c5dc1503de790 100644
+--- a/drivers/reset/Kconfig
++++ b/drivers/reset/Kconfig
+@@ -59,7 +59,8 @@ config RESET_BRCMSTB
+ config RESET_BRCMSTB_RESCAL
+ 	bool "Broadcom STB RESCAL reset controller"
+ 	depends on HAS_IOMEM
+-	default ARCH_BRCMSTB || COMPILE_TEST
++	depends on ARCH_BRCMSTB || COMPILE_TEST
++	default ARCH_BRCMSTB
+ 	help
+ 	  This enables the RESCAL reset controller for SATA, PCIe0, or PCIe1 on
+ 	  BCM7216.
+@@ -82,6 +83,7 @@ config RESET_IMX7
+ 
+ config RESET_INTEL_GW
+ 	bool "Intel Reset Controller Driver"
++	depends on X86 || COMPILE_TEST
+ 	depends on OF && HAS_IOMEM
+ 	select REGMAP_MMIO
+ 	help
+diff --git a/drivers/reset/core.c b/drivers/reset/core.c
+index dbf881b586d9d..fd7e1a3525f45 100644
+--- a/drivers/reset/core.c
++++ b/drivers/reset/core.c
+@@ -640,7 +640,10 @@ static struct reset_control *__reset_control_get_internal(
+ 	if (!rstc)
+ 		return ERR_PTR(-ENOMEM);
+ 
+-	try_module_get(rcdev->owner);
++	if (!try_module_get(rcdev->owner)) {
++		kfree(rstc);
++		return ERR_PTR(-ENODEV);
++	}
+ 
+ 	rstc->rcdev = rcdev;
+ 	list_add(&rstc->list, &rcdev->reset_control_head);
+diff --git a/drivers/reset/reset-a10sr.c b/drivers/reset/reset-a10sr.c
+index 7eacc89382f8a..99b3bc8382f35 100644
+--- a/drivers/reset/reset-a10sr.c
++++ b/drivers/reset/reset-a10sr.c
+@@ -118,6 +118,7 @@ static struct platform_driver a10sr_reset_driver = {
+ 	.probe	= a10sr_reset_probe,
+ 	.driver = {
+ 		.name		= "altr_a10sr_reset",
++		.of_match_table	= a10sr_reset_of_match,
+ 	},
+ };
+ module_platform_driver(a10sr_reset_driver);
+diff --git a/drivers/reset/reset-brcmstb.c b/drivers/reset/reset-brcmstb.c
+index f213264c8567b..42c9d5241c530 100644
+--- a/drivers/reset/reset-brcmstb.c
++++ b/drivers/reset/reset-brcmstb.c
+@@ -111,6 +111,7 @@ static const struct of_device_id brcmstb_reset_of_match[] = {
+ 	{ .compatible = "brcm,brcmstb-reset" },
+ 	{ /* sentinel */ }
+ };
++MODULE_DEVICE_TABLE(of, brcmstb_reset_of_match);
+ 
+ static struct platform_driver brcmstb_reset_driver = {
+ 	.probe	= brcmstb_reset_probe,
+diff --git a/drivers/rtc/proc.c b/drivers/rtc/proc.c
+index 73344598fc1be..cbcdbb19d848e 100644
+--- a/drivers/rtc/proc.c
++++ b/drivers/rtc/proc.c
+@@ -23,8 +23,8 @@ static bool is_rtc_hctosys(struct rtc_device *rtc)
+ 	int size;
+ 	char name[NAME_SIZE];
+ 
+-	size = scnprintf(name, NAME_SIZE, "rtc%d", rtc->id);
+-	if (size > NAME_SIZE)
++	size = snprintf(name, NAME_SIZE, "rtc%d", rtc->id);
++	if (size >= NAME_SIZE)
+ 		return false;
+ 
+ 	return !strncmp(name, CONFIG_RTC_HCTOSYS_DEVICE, NAME_SIZE);
+diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
+index 047f812d1a1c8..71ed1bf155984 100644
+--- a/drivers/s390/char/sclp_vt220.c
++++ b/drivers/s390/char/sclp_vt220.c
+@@ -35,8 +35,8 @@
+ #define SCLP_VT220_MINOR		65
+ #define SCLP_VT220_DRIVER_NAME		"sclp_vt220"
+ #define SCLP_VT220_DEVICE_NAME		"ttysclp"
+-#define SCLP_VT220_CONSOLE_NAME		"ttyS"
+-#define SCLP_VT220_CONSOLE_INDEX	1	/* console=ttyS1 */
++#define SCLP_VT220_CONSOLE_NAME		"ttysclp"
++#define SCLP_VT220_CONSOLE_INDEX	0	/* console=ttysclp0 */
+ 
+ /* Representation of a single write request */
+ struct sclp_vt220_request {
+diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
+index 8d9662e8b7179..3c7f5ecf5511d 100644
+--- a/drivers/s390/scsi/zfcp_sysfs.c
++++ b/drivers/s390/scsi/zfcp_sysfs.c
+@@ -487,6 +487,7 @@ static ssize_t zfcp_sysfs_port_fc_security_show(struct device *dev,
+ 	if (0 == (status & ZFCP_STATUS_COMMON_OPEN) ||
+ 	    0 == (status & ZFCP_STATUS_COMMON_UNBLOCKED) ||
+ 	    0 == (status & ZFCP_STATUS_PORT_PHYS_OPEN) ||
++	    0 != (status & ZFCP_STATUS_PORT_LINK_TEST) ||
+ 	    0 != (status & ZFCP_STATUS_COMMON_ERP_FAILED) ||
+ 	    0 != (status & ZFCP_STATUS_COMMON_ACCESS_BOXED))
+ 		i = sprintf(buf, "unknown\n");
+diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
+index 4b79661275c90..42e494a7106cd 100644
+--- a/drivers/scsi/arcmsr/arcmsr_hba.c
++++ b/drivers/scsi/arcmsr/arcmsr_hba.c
+@@ -1923,8 +1923,12 @@ static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandContr
+ 
+ 		if (ccb->arc_cdb_size <= 0x300)
+ 			arc_cdb_size = (ccb->arc_cdb_size - 1) >> 6 | 1;
+-		else
+-			arc_cdb_size = (((ccb->arc_cdb_size + 0xff) >> 8) + 2) << 1 | 1;
++		else {
++			arc_cdb_size = ((ccb->arc_cdb_size + 0xff) >> 8) + 2;
++			if (arc_cdb_size > 0xF)
++				arc_cdb_size = 0xF;
++			arc_cdb_size = (arc_cdb_size << 1) | 1;
++		}
+ 		ccb_post_stamp = (ccb->smid | arc_cdb_size);
+ 		writel(0, &pmu->inbound_queueport_high);
+ 		writel(ccb_post_stamp, &pmu->inbound_queueport_low);
+@@ -2415,10 +2419,17 @@ static void arcmsr_hbaD_doorbell_isr(struct AdapterControlBlock *pACB)
+ 
+ static void arcmsr_hbaE_doorbell_isr(struct AdapterControlBlock *pACB)
+ {
+-	uint32_t outbound_doorbell, in_doorbell, tmp;
++	uint32_t outbound_doorbell, in_doorbell, tmp, i;
+ 	struct MessageUnit_E __iomem *reg = pACB->pmuE;
+ 
+-	in_doorbell = readl(&reg->iobound_doorbell);
++	if (pACB->adapter_type == ACB_ADAPTER_TYPE_F) {
++		for (i = 0; i < 5; i++) {
++			in_doorbell = readl(&reg->iobound_doorbell);
++			if (in_doorbell != 0)
++				break;
++		}
++	} else
++		in_doorbell = readl(&reg->iobound_doorbell);
+ 	outbound_doorbell = in_doorbell ^ pACB->in_doorbell;
+ 	do {
+ 		writel(0, &reg->host_int_status); /* clear interrupt */
+diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
+index e9658a67d9da0..207e3f53c638d 100644
+--- a/drivers/scsi/be2iscsi/be_main.c
++++ b/drivers/scsi/be2iscsi/be_main.c
+@@ -416,7 +416,7 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
+ 			"beiscsi_hba_alloc - iscsi_host_alloc failed\n");
+ 		return NULL;
+ 	}
+-	shost->max_id = BE2_MAX_SESSIONS;
++	shost->max_id = BE2_MAX_SESSIONS - 1;
+ 	shost->max_channel = 0;
+ 	shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
+ 	shost->max_lun = BEISCSI_NUM_MAX_LUN;
+@@ -5318,7 +5318,7 @@ static int beiscsi_enable_port(struct beiscsi_hba *phba)
+ 	/* Re-enable UER. If different TPE occurs then it is recoverable. */
+ 	beiscsi_set_uer_feature(phba);
+ 
+-	phba->shost->max_id = phba->params.cxns_per_ctrl;
++	phba->shost->max_id = phba->params.cxns_per_ctrl - 1;
+ 	phba->shost->can_queue = phba->params.ios_per_ctrl;
+ 	ret = beiscsi_init_port(phba);
+ 	if (ret < 0) {
+@@ -5745,6 +5745,7 @@ free_hba:
+ 	pci_disable_msix(phba->pcidev);
+ 	pci_dev_put(phba->pcidev);
+ 	iscsi_host_free(phba->shost);
++	pci_disable_pcie_error_reporting(pcidev);
+ 	pci_set_drvdata(pcidev, NULL);
+ disable_pci:
+ 	pci_release_regions(pcidev);
+diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
+index 2ad85c6b99fd2..8cf2f9a7cfdc2 100644
+--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
++++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
+@@ -791,7 +791,7 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
+ 		return NULL;
+ 	shost->dma_boundary = cnic->pcidev->dma_mask;
+ 	shost->transportt = bnx2i_scsi_xport_template;
+-	shost->max_id = ISCSI_MAX_CONNS_PER_HBA;
++	shost->max_id = ISCSI_MAX_CONNS_PER_HBA - 1;
+ 	shost->max_channel = 0;
+ 	shost->max_lun = 512;
+ 	shost->max_cmd_len = 16;
+diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
+index f6bcae829c29b..506b561670af0 100644
+--- a/drivers/scsi/cxgbi/libcxgbi.c
++++ b/drivers/scsi/cxgbi/libcxgbi.c
+@@ -337,7 +337,7 @@ void cxgbi_hbas_remove(struct cxgbi_device *cdev)
+ EXPORT_SYMBOL_GPL(cxgbi_hbas_remove);
+ 
+ int cxgbi_hbas_add(struct cxgbi_device *cdev, u64 max_lun,
+-		unsigned int max_id, struct scsi_host_template *sht,
++		unsigned int max_conns, struct scsi_host_template *sht,
+ 		struct scsi_transport_template *stt)
+ {
+ 	struct cxgbi_hba *chba;
+@@ -357,7 +357,7 @@ int cxgbi_hbas_add(struct cxgbi_device *cdev, u64 max_lun,
+ 
+ 		shost->transportt = stt;
+ 		shost->max_lun = max_lun;
+-		shost->max_id = max_id;
++		shost->max_id = max_conns - 1;
+ 		shost->max_channel = 0;
+ 		shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
+ 
+diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
+index 5eff3368143d3..19a3b3c9909c6 100644
+--- a/drivers/scsi/device_handler/scsi_dh_alua.c
++++ b/drivers/scsi/device_handler/scsi_dh_alua.c
+@@ -516,7 +516,8 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
+ 	struct alua_port_group *tmp_pg;
+ 	int len, k, off, bufflen = ALUA_RTPG_SIZE;
+ 	unsigned char *desc, *buff;
+-	unsigned err, retval;
++	unsigned err;
++	int retval;
+ 	unsigned int tpg_desc_tbl_off;
+ 	unsigned char orig_transition_tmo;
+ 	unsigned long flags;
+@@ -556,12 +557,12 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
+ 			kfree(buff);
+ 			return SCSI_DH_OK;
+ 		}
+-		if (!scsi_sense_valid(&sense_hdr)) {
++		if (retval < 0 || !scsi_sense_valid(&sense_hdr)) {
+ 			sdev_printk(KERN_INFO, sdev,
+ 				    "%s: rtpg failed, result %d\n",
+ 				    ALUA_DH_NAME, retval);
+ 			kfree(buff);
+-			if (driver_byte(retval) == DRIVER_ERROR)
++			if (retval < 0)
+ 				return SCSI_DH_DEV_TEMP_BUSY;
+ 			return SCSI_DH_IO;
+ 		}
+@@ -783,11 +784,11 @@ static unsigned alua_stpg(struct scsi_device *sdev, struct alua_port_group *pg)
+ 	retval = submit_stpg(sdev, pg->group_id, &sense_hdr);
+ 
+ 	if (retval) {
+-		if (!scsi_sense_valid(&sense_hdr)) {
++		if (retval < 0 || !scsi_sense_valid(&sense_hdr)) {
+ 			sdev_printk(KERN_INFO, sdev,
+ 				    "%s: stpg failed, result %d",
+ 				    ALUA_DH_NAME, retval);
+-			if (driver_byte(retval) == DRIVER_ERROR)
++			if (retval < 0)
+ 				return SCSI_DH_DEV_TEMP_BUSY;
+ 		} else {
+ 			sdev_printk(KERN_INFO, sdev, "%s: stpg failed\n",
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+index 3e359ac752fd2..3cba7bfba2965 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+@@ -1649,7 +1649,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
+ 			if (irq < 0) {
+ 				dev_err(dev, "irq init: fail map phy interrupt %d\n",
+ 					idx);
+-				return -ENOENT;
++				return irq;
+ 			}
+ 
+ 			rc = devm_request_irq(dev, irq, phy_interrupts[j], 0,
+@@ -1657,7 +1657,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
+ 			if (rc) {
+ 				dev_err(dev, "irq init: could not request phy interrupt %d, rc=%d\n",
+ 					irq, rc);
+-				return -ENOENT;
++				return rc;
+ 			}
+ 		}
+ 	}
+@@ -1668,7 +1668,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
+ 		if (irq < 0) {
+ 			dev_err(dev, "irq init: could not map cq interrupt %d\n",
+ 				idx);
+-			return -ENOENT;
++			return irq;
+ 		}
+ 
+ 		rc = devm_request_irq(dev, irq, cq_interrupt_v1_hw, 0,
+@@ -1676,7 +1676,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
+ 		if (rc) {
+ 			dev_err(dev, "irq init: could not request cq interrupt %d, rc=%d\n",
+ 				irq, rc);
+-			return -ENOENT;
++			return rc;
+ 		}
+ 	}
+ 
+@@ -1686,7 +1686,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
+ 		if (irq < 0) {
+ 			dev_err(dev, "irq init: could not map fatal interrupt %d\n",
+ 				idx);
+-			return -ENOENT;
++			return irq;
+ 		}
+ 
+ 		rc = devm_request_irq(dev, irq, fatal_interrupts[i], 0,
+@@ -1694,7 +1694,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
+ 		if (rc) {
+ 			dev_err(dev, "irq init: could not request fatal interrupt %d, rc=%d\n",
+ 				irq, rc);
+-			return -ENOENT;
++			return rc;
+ 		}
+ 	}
+ 
+diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
+index b93dd8ef4ac82..da3920a19d53d 100644
+--- a/drivers/scsi/hosts.c
++++ b/drivers/scsi/hosts.c
+@@ -220,6 +220,9 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
+ 		goto fail;
+ 	}
+ 
++	shost->cmd_per_lun = min_t(short, shost->cmd_per_lun,
++				   shost->can_queue);
++
+ 	error = scsi_init_sense_cache(shost);
+ 	if (error)
+ 		goto fail;
+@@ -490,6 +493,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
+ 		shost_printk(KERN_WARNING, shost,
+ 			"error handler thread failed to spawn, error = %ld\n",
+ 			PTR_ERR(shost->ehandler));
++		shost->ehandler = NULL;
+ 		goto fail;
+ 	}
+ 
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index 2aaf836786548..dfe906307e55d 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -230,11 +230,11 @@ static int iscsi_prep_ecdb_ahs(struct iscsi_task *task)
+  */
+ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
+ {
+-	struct iscsi_conn *conn = task->conn;
+-	struct iscsi_tm *tmf = &conn->tmhdr;
++	struct iscsi_session *session = task->conn->session;
++	struct iscsi_tm *tmf = &session->tmhdr;
+ 	u64 hdr_lun;
+ 
+-	if (conn->tmf_state == TMF_INITIAL)
++	if (session->tmf_state == TMF_INITIAL)
+ 		return 0;
+ 
+ 	if ((tmf->opcode & ISCSI_OPCODE_MASK) != ISCSI_OP_SCSI_TMFUNC)
+@@ -254,24 +254,19 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
+ 		 * Fail all SCSI cmd PDUs
+ 		 */
+ 		if (opcode != ISCSI_OP_SCSI_DATA_OUT) {
+-			iscsi_conn_printk(KERN_INFO, conn,
+-					  "task [op %x itt "
+-					  "0x%x/0x%x] "
+-					  "rejected.\n",
+-					  opcode, task->itt,
+-					  task->hdr_itt);
++			iscsi_session_printk(KERN_INFO, session,
++					     "task [op %x itt 0x%x/0x%x] rejected.\n",
++					     opcode, task->itt, task->hdr_itt);
+ 			return -EACCES;
+ 		}
+ 		/*
+ 		 * And also all data-out PDUs in response to R2T
+ 		 * if fast_abort is set.
+ 		 */
+-		if (conn->session->fast_abort) {
+-			iscsi_conn_printk(KERN_INFO, conn,
+-					  "task [op %x itt "
+-					  "0x%x/0x%x] fast abort.\n",
+-					  opcode, task->itt,
+-					  task->hdr_itt);
++		if (session->fast_abort) {
++			iscsi_session_printk(KERN_INFO, session,
++					     "task [op %x itt 0x%x/0x%x] fast abort.\n",
++					     opcode, task->itt, task->hdr_itt);
+ 			return -EACCES;
+ 		}
+ 		break;
+@@ -284,7 +279,7 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
+ 		 */
+ 		if (opcode == ISCSI_OP_SCSI_DATA_OUT &&
+ 		    task->hdr_itt == tmf->rtt) {
+-			ISCSI_DBG_SESSION(conn->session,
++			ISCSI_DBG_SESSION(session,
+ 					  "Preventing task %x/%x from sending "
+ 					  "data-out due to abort task in "
+ 					  "progress\n", task->itt,
+@@ -936,20 +931,21 @@ iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+ {
+ 	struct iscsi_tm_rsp *tmf = (struct iscsi_tm_rsp *)hdr;
++	struct iscsi_session *session = conn->session;
+ 
+ 	conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
+ 	conn->tmfrsp_pdus_cnt++;
+ 
+-	if (conn->tmf_state != TMF_QUEUED)
++	if (session->tmf_state != TMF_QUEUED)
+ 		return;
+ 
+ 	if (tmf->response == ISCSI_TMF_RSP_COMPLETE)
+-		conn->tmf_state = TMF_SUCCESS;
++		session->tmf_state = TMF_SUCCESS;
+ 	else if (tmf->response == ISCSI_TMF_RSP_NO_TASK)
+-		conn->tmf_state = TMF_NOT_FOUND;
++		session->tmf_state = TMF_NOT_FOUND;
+ 	else
+-		conn->tmf_state = TMF_FAILED;
+-	wake_up(&conn->ehwait);
++		session->tmf_state = TMF_FAILED;
++	wake_up(&session->ehwait);
+ }
+ 
+ static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
+@@ -1361,7 +1357,6 @@ void iscsi_session_failure(struct iscsi_session *session,
+ 			   enum iscsi_err err)
+ {
+ 	struct iscsi_conn *conn;
+-	struct device *dev;
+ 
+ 	spin_lock_bh(&session->frwd_lock);
+ 	conn = session->leadconn;
+@@ -1370,10 +1365,8 @@ void iscsi_session_failure(struct iscsi_session *session,
+ 		return;
+ 	}
+ 
+-	dev = get_device(&conn->cls_conn->dev);
++	iscsi_get_conn(conn->cls_conn);
+ 	spin_unlock_bh(&session->frwd_lock);
+-	if (!dev)
+-	        return;
+ 	/*
+ 	 * if the host is being removed bypass the connection
+ 	 * recovery initialization because we are going to kill
+@@ -1383,7 +1376,7 @@ void iscsi_session_failure(struct iscsi_session *session,
+ 		iscsi_conn_error_event(conn->cls_conn, err);
+ 	else
+ 		iscsi_conn_failure(conn, err);
+-	put_device(dev);
++	iscsi_put_conn(conn->cls_conn);
+ }
+ EXPORT_SYMBOL_GPL(iscsi_session_failure);
+ 
+@@ -1829,15 +1822,14 @@ EXPORT_SYMBOL_GPL(iscsi_target_alloc);
+ 
+ static void iscsi_tmf_timedout(struct timer_list *t)
+ {
+-	struct iscsi_conn *conn = from_timer(conn, t, tmf_timer);
+-	struct iscsi_session *session = conn->session;
++	struct iscsi_session *session = from_timer(session, t, tmf_timer);
+ 
+ 	spin_lock(&session->frwd_lock);
+-	if (conn->tmf_state == TMF_QUEUED) {
+-		conn->tmf_state = TMF_TIMEDOUT;
++	if (session->tmf_state == TMF_QUEUED) {
++		session->tmf_state = TMF_TIMEDOUT;
+ 		ISCSI_DBG_EH(session, "tmf timedout\n");
+ 		/* unblock eh_abort() */
+-		wake_up(&conn->ehwait);
++		wake_up(&session->ehwait);
+ 	}
+ 	spin_unlock(&session->frwd_lock);
+ }
+@@ -1860,8 +1852,8 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
+ 		return -EPERM;
+ 	}
+ 	conn->tmfcmd_pdus_cnt++;
+-	conn->tmf_timer.expires = timeout * HZ + jiffies;
+-	add_timer(&conn->tmf_timer);
++	session->tmf_timer.expires = timeout * HZ + jiffies;
++	add_timer(&session->tmf_timer);
+ 	ISCSI_DBG_EH(session, "tmf set timeout\n");
+ 
+ 	spin_unlock_bh(&session->frwd_lock);
+@@ -1875,12 +1867,12 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
+ 	 * 3) session is terminated or restarted or userspace has
+ 	 * given up on recovery
+ 	 */
+-	wait_event_interruptible(conn->ehwait, age != session->age ||
++	wait_event_interruptible(session->ehwait, age != session->age ||
+ 				 session->state != ISCSI_STATE_LOGGED_IN ||
+-				 conn->tmf_state != TMF_QUEUED);
++				 session->tmf_state != TMF_QUEUED);
+ 	if (signal_pending(current))
+ 		flush_signals(current);
+-	del_timer_sync(&conn->tmf_timer);
++	del_timer_sync(&session->tmf_timer);
+ 
+ 	mutex_lock(&session->eh_mutex);
+ 	spin_lock_bh(&session->frwd_lock);
+@@ -2312,17 +2304,17 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ 	}
+ 
+ 	/* only have one tmf outstanding at a time */
+-	if (conn->tmf_state != TMF_INITIAL)
++	if (session->tmf_state != TMF_INITIAL)
+ 		goto failed;
+-	conn->tmf_state = TMF_QUEUED;
++	session->tmf_state = TMF_QUEUED;
+ 
+-	hdr = &conn->tmhdr;
++	hdr = &session->tmhdr;
+ 	iscsi_prep_abort_task_pdu(task, hdr);
+ 
+ 	if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout))
+ 		goto failed;
+ 
+-	switch (conn->tmf_state) {
++	switch (session->tmf_state) {
+ 	case TMF_SUCCESS:
+ 		spin_unlock_bh(&session->frwd_lock);
+ 		/*
+@@ -2337,7 +2329,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ 		 */
+ 		spin_lock_bh(&session->frwd_lock);
+ 		fail_scsi_task(task, DID_ABORT);
+-		conn->tmf_state = TMF_INITIAL;
++		session->tmf_state = TMF_INITIAL;
+ 		memset(hdr, 0, sizeof(*hdr));
+ 		spin_unlock_bh(&session->frwd_lock);
+ 		iscsi_start_tx(conn);
+@@ -2348,7 +2340,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ 		goto failed_unlocked;
+ 	case TMF_NOT_FOUND:
+ 		if (!sc->SCp.ptr) {
+-			conn->tmf_state = TMF_INITIAL;
++			session->tmf_state = TMF_INITIAL;
+ 			memset(hdr, 0, sizeof(*hdr));
+ 			/* task completed before tmf abort response */
+ 			ISCSI_DBG_EH(session, "sc completed while abort	in "
+@@ -2357,7 +2349,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ 		}
+ 		fallthrough;
+ 	default:
+-		conn->tmf_state = TMF_INITIAL;
++		session->tmf_state = TMF_INITIAL;
+ 		goto failed;
+ 	}
+ 
+@@ -2416,11 +2408,11 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
+ 	conn = session->leadconn;
+ 
+ 	/* only have one tmf outstanding at a time */
+-	if (conn->tmf_state != TMF_INITIAL)
++	if (session->tmf_state != TMF_INITIAL)
+ 		goto unlock;
+-	conn->tmf_state = TMF_QUEUED;
++	session->tmf_state = TMF_QUEUED;
+ 
+-	hdr = &conn->tmhdr;
++	hdr = &session->tmhdr;
+ 	iscsi_prep_lun_reset_pdu(sc, hdr);
+ 
+ 	if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age,
+@@ -2429,7 +2421,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
+ 		goto unlock;
+ 	}
+ 
+-	switch (conn->tmf_state) {
++	switch (session->tmf_state) {
+ 	case TMF_SUCCESS:
+ 		break;
+ 	case TMF_TIMEDOUT:
+@@ -2437,7 +2429,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
+ 		iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
+ 		goto done;
+ 	default:
+-		conn->tmf_state = TMF_INITIAL;
++		session->tmf_state = TMF_INITIAL;
+ 		goto unlock;
+ 	}
+ 
+@@ -2449,7 +2441,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
+ 	spin_lock_bh(&session->frwd_lock);
+ 	memset(hdr, 0, sizeof(*hdr));
+ 	fail_scsi_tasks(conn, sc->device->lun, DID_ERROR);
+-	conn->tmf_state = TMF_INITIAL;
++	session->tmf_state = TMF_INITIAL;
+ 	spin_unlock_bh(&session->frwd_lock);
+ 
+ 	iscsi_start_tx(conn);
+@@ -2472,8 +2464,7 @@ void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session)
+ 	spin_lock_bh(&session->frwd_lock);
+ 	if (session->state != ISCSI_STATE_LOGGED_IN) {
+ 		session->state = ISCSI_STATE_RECOVERY_FAILED;
+-		if (session->leadconn)
+-			wake_up(&session->leadconn->ehwait);
++		wake_up(&session->ehwait);
+ 	}
+ 	spin_unlock_bh(&session->frwd_lock);
+ }
+@@ -2518,7 +2509,7 @@ failed:
+ 	iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
+ 
+ 	ISCSI_DBG_EH(session, "wait for relogin\n");
+-	wait_event_interruptible(conn->ehwait,
++	wait_event_interruptible(session->ehwait,
+ 				 session->state == ISCSI_STATE_TERMINATE ||
+ 				 session->state == ISCSI_STATE_LOGGED_IN ||
+ 				 session->state == ISCSI_STATE_RECOVERY_FAILED);
+@@ -2579,11 +2570,11 @@ static int iscsi_eh_target_reset(struct scsi_cmnd *sc)
+ 	conn = session->leadconn;
+ 
+ 	/* only have one tmf outstanding at a time */
+-	if (conn->tmf_state != TMF_INITIAL)
++	if (session->tmf_state != TMF_INITIAL)
+ 		goto unlock;
+-	conn->tmf_state = TMF_QUEUED;
++	session->tmf_state = TMF_QUEUED;
+ 
+-	hdr = &conn->tmhdr;
++	hdr = &session->tmhdr;
+ 	iscsi_prep_tgt_reset_pdu(sc, hdr);
+ 
+ 	if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age,
+@@ -2592,7 +2583,7 @@ static int iscsi_eh_target_reset(struct scsi_cmnd *sc)
+ 		goto unlock;
+ 	}
+ 
+-	switch (conn->tmf_state) {
++	switch (session->tmf_state) {
+ 	case TMF_SUCCESS:
+ 		break;
+ 	case TMF_TIMEDOUT:
+@@ -2600,7 +2591,7 @@ static int iscsi_eh_target_reset(struct scsi_cmnd *sc)
+ 		iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
+ 		goto done;
+ 	default:
+-		conn->tmf_state = TMF_INITIAL;
++		session->tmf_state = TMF_INITIAL;
+ 		goto unlock;
+ 	}
+ 
+@@ -2612,7 +2603,7 @@ static int iscsi_eh_target_reset(struct scsi_cmnd *sc)
+ 	spin_lock_bh(&session->frwd_lock);
+ 	memset(hdr, 0, sizeof(*hdr));
+ 	fail_scsi_tasks(conn, -1, DID_ERROR);
+-	conn->tmf_state = TMF_INITIAL;
++	session->tmf_state = TMF_INITIAL;
+ 	spin_unlock_bh(&session->frwd_lock);
+ 
+ 	iscsi_start_tx(conn);
+@@ -2942,7 +2933,10 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
+ 	session->tt = iscsit;
+ 	session->dd_data = cls_session->dd_data + sizeof(*session);
+ 
++	session->tmf_state = TMF_INITIAL;
++	timer_setup(&session->tmf_timer, iscsi_tmf_timedout, 0);
+ 	mutex_init(&session->eh_mutex);
++
+ 	spin_lock_init(&session->frwd_lock);
+ 	spin_lock_init(&session->back_lock);
+ 
+@@ -3046,7 +3040,6 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
+ 	conn->c_stage = ISCSI_CONN_INITIAL_STAGE;
+ 	conn->id = conn_idx;
+ 	conn->exp_statsn = 0;
+-	conn->tmf_state = TMF_INITIAL;
+ 
+ 	timer_setup(&conn->transport_timer, iscsi_check_transport_timeouts, 0);
+ 
+@@ -3071,8 +3064,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
+ 		goto login_task_data_alloc_fail;
+ 	conn->login_task->data = conn->data = data;
+ 
+-	timer_setup(&conn->tmf_timer, iscsi_tmf_timedout, 0);
+-	init_waitqueue_head(&conn->ehwait);
++	init_waitqueue_head(&session->ehwait);
+ 
+ 	return cls_conn;
+ 
+@@ -3107,7 +3099,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
+ 		 * leading connection? then give up on recovery.
+ 		 */
+ 		session->state = ISCSI_STATE_TERMINATE;
+-		wake_up(&conn->ehwait);
++		wake_up(&session->ehwait);
+ 	}
+ 	spin_unlock_bh(&session->frwd_lock);
+ 
+@@ -3182,7 +3174,7 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
+ 		 * commands after successful recovery
+ 		 */
+ 		conn->stop_stage = 0;
+-		conn->tmf_state = TMF_INITIAL;
++		session->tmf_state = TMF_INITIAL;
+ 		session->age++;
+ 		if (session->age == 16)
+ 			session->age = 0;
+@@ -3196,7 +3188,7 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
+ 	spin_unlock_bh(&session->frwd_lock);
+ 
+ 	iscsi_unblock_session(session->cls_session);
+-	wake_up(&conn->ehwait);
++	wake_up(&session->ehwait);
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_conn_start);
+@@ -3290,7 +3282,7 @@ void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+ 	spin_lock_bh(&session->frwd_lock);
+ 	fail_scsi_tasks(conn, -1, DID_TRANSPORT_DISRUPTED);
+ 	fail_mgmt_tasks(session, conn);
+-	memset(&conn->tmhdr, 0, sizeof(conn->tmhdr));
++	memset(&session->tmhdr, 0, sizeof(session->tmhdr));
+ 	spin_unlock_bh(&session->frwd_lock);
+ 	mutex_unlock(&session->eh_mutex);
+ }
+diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
+index 5c4172e8c81b2..3b72aea9d15d5 100644
+--- a/drivers/scsi/lpfc/lpfc_els.c
++++ b/drivers/scsi/lpfc/lpfc_els.c
+@@ -1175,6 +1175,15 @@ stop_rr_fcf_flogi:
+ 			phba->fcf.fcf_redisc_attempted = 0; /* reset */
+ 			goto out;
+ 		}
++	} else if (vport->port_state > LPFC_FLOGI &&
++		   vport->fc_flag & FC_PT2PT) {
++		/*
++		 * In a p2p topology, it is possible that discovery has
++		 * already progressed, and this completion can be ignored.
++		 * Recheck the indicated topology.
++		 */
++		if (!sp->cmn.fPort)
++			goto out;
+ 	}
+ 
+ flogifail:
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index 7551743835fc0..c063a6d2b690f 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -7962,7 +7962,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
+ 				"0393 Error %d during rpi post operation\n",
+ 				rc);
+ 		rc = -ENODEV;
+-		goto out_destroy_queue;
++		goto out_free_iocblist;
+ 	}
+ 	lpfc_sli4_node_prep(phba);
+ 
+@@ -8128,8 +8128,9 @@ out_io_buff_free:
+ out_unset_queue:
+ 	/* Unset all the queues set up in this routine when error out */
+ 	lpfc_sli4_queue_unset(phba);
+-out_destroy_queue:
++out_free_iocblist:
+ 	lpfc_free_iocb_list(phba);
++out_destroy_queue:
+ 	lpfc_sli4_queue_destroy(phba);
+ out_stop_timers:
+ 	lpfc_stop_hba_timers(phba);
+diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
+index 0f808d63580ee..69fed93184680 100644
+--- a/drivers/scsi/megaraid/megaraid_sas.h
++++ b/drivers/scsi/megaraid/megaraid_sas.h
+@@ -2259,6 +2259,15 @@ enum MR_PERF_MODE {
+ 		 (mode) == MR_LATENCY_PERF_MODE ? "Latency" : \
+ 		 "Unknown")
+ 
++enum MEGASAS_LD_TARGET_ID_STATUS {
++	LD_TARGET_ID_INITIAL,
++	LD_TARGET_ID_ACTIVE,
++	LD_TARGET_ID_DELETED,
++};
++
++#define MEGASAS_TARGET_ID(sdev)						\
++	(((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id)
++
+ struct megasas_instance {
+ 
+ 	unsigned int *reply_map;
+@@ -2323,6 +2332,9 @@ struct megasas_instance {
+ 	struct megasas_pd_list          pd_list[MEGASAS_MAX_PD];
+ 	struct megasas_pd_list          local_pd_list[MEGASAS_MAX_PD];
+ 	u8 ld_ids[MEGASAS_MAX_LD_IDS];
++	u8 ld_tgtid_status[MEGASAS_MAX_LD_IDS];
++	u8 ld_ids_prev[MEGASAS_MAX_LD_IDS];
++	u8 ld_ids_from_raidmap[MEGASAS_MAX_LD_IDS];
+ 	s8 init_id;
+ 
+ 	u16 max_num_sge;
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index 63a4f48bdc755..c9c8a5e4925cd 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -132,6 +132,8 @@ static int megasas_register_aen(struct megasas_instance *instance,
+ 				u32 seq_num, u32 class_locale_word);
+ static void megasas_get_pd_info(struct megasas_instance *instance,
+ 				struct scsi_device *sdev);
++static void
++megasas_set_ld_removed_by_fw(struct megasas_instance *instance);
+ 
+ /*
+  * PCI ID table for all supported controllers
+@@ -426,6 +428,12 @@ megasas_decode_evt(struct megasas_instance *instance)
+ 			(class_locale.members.locale),
+ 			format_class(class_locale.members.class),
+ 			evt_detail->description);
++
++	if (megasas_dbg_lvl & LD_PD_DEBUG)
++		dev_info(&instance->pdev->dev,
++			 "evt_detail.args.ld.target_id/index %d/%d\n",
++			 evt_detail->args.ld.target_id, evt_detail->args.ld.ld_index);
++
+ }
+ 
+ /*
+@@ -1769,6 +1777,7 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
+ {
+ 	struct megasas_instance *instance;
+ 	struct MR_PRIV_DEVICE *mr_device_priv_data;
++	u32 ld_tgt_id;
+ 
+ 	instance = (struct megasas_instance *)
+ 	    scmd->device->host->hostdata;
+@@ -1795,17 +1804,21 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
+ 		}
+ 	}
+ 
+-	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
++	mr_device_priv_data = scmd->device->hostdata;
++	if (!mr_device_priv_data ||
++	    (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)) {
+ 		scmd->result = DID_NO_CONNECT << 16;
+ 		scmd->scsi_done(scmd);
+ 		return 0;
+ 	}
+ 
+-	mr_device_priv_data = scmd->device->hostdata;
+-	if (!mr_device_priv_data) {
+-		scmd->result = DID_NO_CONNECT << 16;
+-		scmd->scsi_done(scmd);
+-		return 0;
++	if (MEGASAS_IS_LOGICAL(scmd->device)) {
++		ld_tgt_id = MEGASAS_TARGET_ID(scmd->device);
++		if (instance->ld_tgtid_status[ld_tgt_id] == LD_TARGET_ID_DELETED) {
++			scmd->result = DID_NO_CONNECT << 16;
++			scmd->scsi_done(scmd);
++			return 0;
++		}
+ 	}
+ 
+ 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
+@@ -2085,7 +2098,7 @@ static int megasas_slave_configure(struct scsi_device *sdev)
+ 
+ static int megasas_slave_alloc(struct scsi_device *sdev)
+ {
+-	u16 pd_index = 0;
++	u16 pd_index = 0, ld_tgt_id;
+ 	struct megasas_instance *instance ;
+ 	struct MR_PRIV_DEVICE *mr_device_priv_data;
+ 
+@@ -2110,6 +2123,14 @@ scan_target:
+ 					GFP_KERNEL);
+ 	if (!mr_device_priv_data)
+ 		return -ENOMEM;
++
++	if (MEGASAS_IS_LOGICAL(sdev)) {
++		ld_tgt_id = MEGASAS_TARGET_ID(sdev);
++		instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_ACTIVE;
++		if (megasas_dbg_lvl & LD_PD_DEBUG)
++			sdev_printk(KERN_INFO, sdev, "LD target ID %d created.\n", ld_tgt_id);
++	}
++
+ 	sdev->hostdata = mr_device_priv_data;
+ 
+ 	atomic_set(&mr_device_priv_data->r1_ldio_hint,
+@@ -2119,6 +2140,19 @@ scan_target:
+ 
+ static void megasas_slave_destroy(struct scsi_device *sdev)
+ {
++	u16 ld_tgt_id;
++	struct megasas_instance *instance;
++
++	instance = megasas_lookup_instance(sdev->host->host_no);
++
++	if (MEGASAS_IS_LOGICAL(sdev)) {
++		ld_tgt_id = MEGASAS_TARGET_ID(sdev);
++		instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_DELETED;
++		if (megasas_dbg_lvl & LD_PD_DEBUG)
++			sdev_printk(KERN_INFO, sdev,
++				    "LD target ID %d removed from OS stack\n", ld_tgt_id);
++	}
++
+ 	kfree(sdev->hostdata);
+ 	sdev->hostdata = NULL;
+ }
+@@ -3491,6 +3525,22 @@ megasas_complete_abort(struct megasas_instance *instance,
+ 	}
+ }
+ 
++static void
++megasas_set_ld_removed_by_fw(struct megasas_instance *instance)
++{
++	uint i;
++
++	for (i = 0; (i < MEGASAS_MAX_LD_IDS); i++) {
++		if (instance->ld_ids_prev[i] != 0xff &&
++		    instance->ld_ids_from_raidmap[i] == 0xff) {
++			if (megasas_dbg_lvl & LD_PD_DEBUG)
++				dev_info(&instance->pdev->dev,
++					 "LD target ID %d removed from RAID map\n", i);
++			instance->ld_tgtid_status[i] = LD_TARGET_ID_DELETED;
++		}
++	}
++}
++
+ /**
+  * megasas_complete_cmd -	Completes a command
+  * @instance:			Adapter soft state
+@@ -3653,9 +3703,13 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
+ 				fusion->fast_path_io = 0;
+ 			}
+ 
++			if (instance->adapter_type >= INVADER_SERIES)
++				megasas_set_ld_removed_by_fw(instance);
++
+ 			megasas_sync_map_info(instance);
+ 			spin_unlock_irqrestore(instance->host->host_lock,
+ 					       flags);
++
+ 			break;
+ 		}
+ 		if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
+@@ -7478,11 +7532,16 @@ static int megasas_probe_one(struct pci_dev *pdev,
+ 	return 0;
+ 
+ fail_start_aen:
++	instance->unload = 1;
++	scsi_remove_host(instance->host);
+ fail_io_attach:
+ 	megasas_mgmt_info.count--;
+ 	megasas_mgmt_info.max_index--;
+ 	megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
+ 
++	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
++		del_timer_sync(&instance->sriov_heartbeat_timer);
++
+ 	instance->instancet->disable_intr(instance);
+ 	megasas_destroy_irqs(instance);
+ 
+@@ -7490,8 +7549,16 @@ fail_io_attach:
+ 		megasas_release_fusion(instance);
+ 	else
+ 		megasas_release_mfi(instance);
++
+ 	if (instance->msix_vectors)
+ 		pci_free_irq_vectors(instance->pdev);
++	instance->msix_vectors = 0;
++
++	if (instance->fw_crash_state != UNAVAILABLE)
++		megasas_free_host_crash_buffer(instance);
++
++	if (instance->adapter_type != MFI_SERIES)
++		megasas_fusion_stop_watchdog(instance);
+ fail_init_mfi:
+ 	scsi_host_put(host);
+ fail_alloc_instance:
+@@ -8751,8 +8818,10 @@ megasas_aen_polling(struct work_struct *work)
+ 	union megasas_evt_class_locale class_locale;
+ 	int event_type = 0;
+ 	u32 seq_num;
++	u16 ld_target_id;
+ 	int error;
+ 	u8  dcmd_ret = DCMD_SUCCESS;
++	struct scsi_device *sdev1;
+ 
+ 	if (!instance) {
+ 		printk(KERN_ERR "invalid instance!\n");
+@@ -8775,12 +8844,23 @@ megasas_aen_polling(struct work_struct *work)
+ 			break;
+ 
+ 		case MR_EVT_LD_OFFLINE:
+-		case MR_EVT_CFG_CLEARED:
+ 		case MR_EVT_LD_DELETED:
++			ld_target_id = instance->evt_detail->args.ld.target_id;
++			sdev1 = scsi_device_lookup(instance->host,
++						   MEGASAS_MAX_PD_CHANNELS +
++						   (ld_target_id / MEGASAS_MAX_DEV_PER_CHANNEL),
++						   (ld_target_id - MEGASAS_MAX_DEV_PER_CHANNEL),
++						   0);
++			if (sdev1)
++				megasas_remove_scsi_device(sdev1);
++
++			event_type = SCAN_VD_CHANNEL;
++			break;
+ 		case MR_EVT_LD_CREATED:
+ 			event_type = SCAN_VD_CHANNEL;
+ 			break;
+ 
++		case MR_EVT_CFG_CLEARED:
+ 		case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
+ 		case MR_EVT_FOREIGN_CFG_IMPORTED:
+ 		case MR_EVT_LD_STATE_CHANGE:
+diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
+index b6c08d6200335..83f69c33b01a9 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
++++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
+@@ -349,6 +349,10 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id)
+ 
+ 	num_lds = le16_to_cpu(drv_map->raidMap.ldCount);
+ 
++	memcpy(instance->ld_ids_prev,
++	       instance->ld_ids_from_raidmap,
++	       sizeof(instance->ld_ids_from_raidmap));
++	memset(instance->ld_ids_from_raidmap, 0xff, MEGASAS_MAX_LD_IDS);
+ 	/*Convert Raid capability values to CPU arch */
+ 	for (i = 0; (num_lds > 0) && (i < MAX_LOGICAL_DRIVES_EXT); i++) {
+ 		ld = MR_TargetIdToLdGet(i, drv_map);
+@@ -359,7 +363,7 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id)
+ 
+ 		raid = MR_LdRaidGet(ld, drv_map);
+ 		le32_to_cpus((u32 *)&raid->capability);
+-
++		instance->ld_ids_from_raidmap[i] = i;
+ 		num_lds--;
+ 	}
+ 
+diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
+index 73295cf74cbe3..5abb84ebc0ed3 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
++++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
+@@ -3676,6 +3676,7 @@ static void megasas_sync_irqs(unsigned long instance_addr)
+ 		if (irq_ctx->irq_poll_scheduled) {
+ 			irq_ctx->irq_poll_scheduled = false;
+ 			enable_irq(irq_ctx->os_irq);
++			complete_cmd_fusion(instance, irq_ctx->MSIxIndex, irq_ctx);
+ 		}
+ 	}
+ }
+@@ -3707,6 +3708,7 @@ int megasas_irqpoll(struct irq_poll *irqpoll, int budget)
+ 		irq_poll_complete(irqpoll);
+ 		irq_ctx->irq_poll_scheduled = false;
+ 		enable_irq(irq_ctx->os_irq);
++		complete_cmd_fusion(instance, irq_ctx->MSIxIndex, irq_ctx);
+ 	}
+ 
+ 	return num_entries;
+@@ -3723,6 +3725,7 @@ megasas_complete_cmd_dpc_fusion(unsigned long instance_addr)
+ {
+ 	struct megasas_instance *instance =
+ 		(struct megasas_instance *)instance_addr;
++	struct megasas_irq_context *irq_ctx = NULL;
+ 	u32 count, MSIxIndex;
+ 
+ 	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+@@ -3731,8 +3734,10 @@ megasas_complete_cmd_dpc_fusion(unsigned long instance_addr)
+ 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
+ 		return;
+ 
+-	for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++)
+-		complete_cmd_fusion(instance, MSIxIndex, NULL);
++	for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++) {
++		irq_ctx = &instance->irq_context[MSIxIndex];
++		complete_cmd_fusion(instance, MSIxIndex, irq_ctx);
++	}
+ }
+ 
+ /**
+@@ -5201,6 +5206,7 @@ megasas_alloc_fusion_context(struct megasas_instance *instance)
+ 		if (!fusion->log_to_span) {
+ 			dev_err(&instance->pdev->dev, "Failed from %s %d\n",
+ 				__func__, __LINE__);
++			kfree(instance->ctrl_context);
+ 			return -ENOMEM;
+ 		}
+ 	}
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+index 7824e77bc6e26..15d1f1fbeee75 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+@@ -3696,6 +3696,28 @@ _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
+ 	ioc->fw_events_cleanup = 1;
+ 	while ((fw_event = dequeue_next_fw_event(ioc)) ||
+ 	     (fw_event = ioc->current_event)) {
++
++		/*
++		 * Don't call cancel_work_sync() for current_event
++		 * other than MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
++		 * otherwise we may observe deadlock if current
++		 * hard reset issued as part of processing the current_event.
++		 *
++		 * Orginal logic of cleaning the current_event is added
++		 * for handling the back to back host reset issued by the user.
++		 * i.e. during back to back host reset, driver use to process
++		 * the two instances of MPT3SAS_REMOVE_UNRESPONDING_DEVICES
++		 * event back to back and this made the drives to unregister
++		 * the devices from SML.
++		 */
++
++		if (fw_event == ioc->current_event &&
++		    ioc->current_event->event !=
++		    MPT3SAS_REMOVE_UNRESPONDING_DEVICES) {
++			ioc->current_event = NULL;
++			continue;
++		}
++
+ 		/*
+ 		 * Wait on the fw_event to complete. If this returns 1, then
+ 		 * the event was never executed, and we need a put for the
+diff --git a/drivers/scsi/qedi/qedi.h b/drivers/scsi/qedi/qedi.h
+index c342defc3f522..ce199a7a16b80 100644
+--- a/drivers/scsi/qedi/qedi.h
++++ b/drivers/scsi/qedi/qedi.h
+@@ -284,6 +284,7 @@ struct qedi_ctx {
+ #define QEDI_IN_RECOVERY	5
+ #define QEDI_IN_OFFLINE		6
+ #define QEDI_IN_SHUTDOWN	7
++#define QEDI_BLOCK_IO		8
+ 
+ 	u8 mac[ETH_ALEN];
+ 	u32 src_ip[4];
+diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
+index 440ddd2309f1d..4c87640e6a911 100644
+--- a/drivers/scsi/qedi/qedi_fw.c
++++ b/drivers/scsi/qedi/qedi_fw.c
+@@ -73,7 +73,6 @@ static void qedi_process_logout_resp(struct qedi_ctx *qedi,
+ 	spin_unlock(&qedi_conn->list_lock);
+ 
+ 	cmd->state = RESPONSE_RECEIVED;
+-	qedi_clear_task_idx(qedi, cmd->task_id);
+ 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
+ 
+ 	spin_unlock(&session->back_lock);
+@@ -138,7 +137,6 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi,
+ 	spin_unlock(&qedi_conn->list_lock);
+ 
+ 	cmd->state = RESPONSE_RECEIVED;
+-	qedi_clear_task_idx(qedi, cmd->task_id);
+ 
+ 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr,
+ 			     qedi_conn->gen_pdu.resp_buf,
+@@ -161,16 +159,9 @@ static void qedi_tmf_resp_work(struct work_struct *work)
+ 	set_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+ 	resp_hdr_ptr =  (struct iscsi_tm_rsp *)qedi_cmd->tmf_resp_buf;
+ 
+-	iscsi_block_session(session->cls_session);
+ 	rval = qedi_cleanup_all_io(qedi, qedi_conn, qedi_cmd->task, true);
+-	if (rval) {
+-		qedi_clear_task_idx(qedi, qedi_cmd->task_id);
+-		iscsi_unblock_session(session->cls_session);
++	if (rval)
+ 		goto exit_tmf_resp;
+-	}
+-
+-	iscsi_unblock_session(session->cls_session);
+-	qedi_clear_task_idx(qedi, qedi_cmd->task_id);
+ 
+ 	spin_lock(&session->back_lock);
+ 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0);
+@@ -245,8 +236,6 @@ static void qedi_process_tmf_resp(struct qedi_ctx *qedi,
+ 		goto unblock_sess;
+ 	}
+ 
+-	qedi_clear_task_idx(qedi, qedi_cmd->task_id);
+-
+ 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0);
+ 	kfree(resp_hdr_ptr);
+ 
+@@ -314,7 +303,6 @@ static void qedi_process_login_resp(struct qedi_ctx *qedi,
+ 		  "Freeing tid=0x%x for cid=0x%x\n",
+ 		  cmd->task_id, qedi_conn->iscsi_conn_id);
+ 	cmd->state = RESPONSE_RECEIVED;
+-	qedi_clear_task_idx(qedi, cmd->task_id);
+ }
+ 
+ static void qedi_get_rq_bdq_buf(struct qedi_ctx *qedi,
+@@ -468,7 +456,6 @@ static int qedi_process_nopin_mesg(struct qedi_ctx *qedi,
+ 		}
+ 
+ 		spin_unlock(&qedi_conn->list_lock);
+-		qedi_clear_task_idx(qedi, cmd->task_id);
+ 	}
+ 
+ done:
+@@ -673,7 +660,6 @@ static void qedi_scsi_completion(struct qedi_ctx *qedi,
+ 	if (qedi_io_tracing)
+ 		qedi_trace_io(qedi, task, cmd->task_id, QEDI_IO_TRACE_RSP);
+ 
+-	qedi_clear_task_idx(qedi, cmd->task_id);
+ 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
+ 			     conn->data, datalen);
+ error:
+@@ -730,7 +716,6 @@ static void qedi_process_nopin_local_cmpl(struct qedi_ctx *qedi,
+ 		  cqe->itid, cmd->task_id);
+ 
+ 	cmd->state = RESPONSE_RECEIVED;
+-	qedi_clear_task_idx(qedi, cmd->task_id);
+ 
+ 	spin_lock_bh(&session->back_lock);
+ 	__iscsi_put_task(task);
+@@ -748,7 +733,6 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
+ 	itt_t protoitt = 0;
+ 	int found = 0;
+ 	struct qedi_cmd *qedi_cmd = NULL;
+-	u32 rtid = 0;
+ 	u32 iscsi_cid;
+ 	struct qedi_conn *qedi_conn;
+ 	struct qedi_cmd *dbg_cmd;
+@@ -779,7 +763,6 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
+ 			found = 1;
+ 			mtask = qedi_cmd->task;
+ 			tmf_hdr = (struct iscsi_tm *)mtask->hdr;
+-			rtid = work->rtid;
+ 
+ 			list_del_init(&work->list);
+ 			kfree(work);
+@@ -821,8 +804,6 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
+ 			if (qedi_cmd->state == CLEANUP_WAIT_FAILED)
+ 				qedi_cmd->state = CLEANUP_RECV;
+ 
+-			qedi_clear_task_idx(qedi_conn->qedi, rtid);
+-
+ 			spin_lock(&qedi_conn->list_lock);
+ 			if (likely(dbg_cmd->io_cmd_in_list)) {
+ 				dbg_cmd->io_cmd_in_list = false;
+@@ -856,7 +837,6 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
+ 		QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
+ 			  "Freeing tid=0x%x for cid=0x%x\n",
+ 			  cqe->itid, qedi_conn->iscsi_conn_id);
+-		qedi_clear_task_idx(qedi_conn->qedi, cqe->itid);
+ 
+ 	} else {
+ 		qedi_get_proto_itt(qedi, cqe->itid, &ptmp_itt);
+@@ -1453,7 +1433,7 @@ abort_ret:
+ 
+ ldel_exit:
+ 	spin_lock_bh(&qedi_conn->tmf_work_lock);
+-	if (!qedi_cmd->list_tmf_work) {
++	if (qedi_cmd->list_tmf_work) {
+ 		list_del_init(&list_work->list);
+ 		qedi_cmd->list_tmf_work = NULL;
+ 		kfree(list_work);
+diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
+index 087c7ff28cd52..5f7e62f19d83a 100644
+--- a/drivers/scsi/qedi/qedi_iscsi.c
++++ b/drivers/scsi/qedi/qedi_iscsi.c
+@@ -330,12 +330,22 @@ free_conn:
+ 
+ void qedi_mark_device_missing(struct iscsi_cls_session *cls_session)
+ {
+-	iscsi_block_session(cls_session);
++	struct iscsi_session *session = cls_session->dd_data;
++	struct qedi_conn *qedi_conn = session->leadconn->dd_data;
++
++	spin_lock_bh(&session->frwd_lock);
++	set_bit(QEDI_BLOCK_IO, &qedi_conn->qedi->flags);
++	spin_unlock_bh(&session->frwd_lock);
+ }
+ 
+ void qedi_mark_device_available(struct iscsi_cls_session *cls_session)
+ {
+-	iscsi_unblock_session(cls_session);
++	struct iscsi_session *session = cls_session->dd_data;
++	struct qedi_conn *qedi_conn = session->leadconn->dd_data;
++
++	spin_lock_bh(&session->frwd_lock);
++	clear_bit(QEDI_BLOCK_IO, &qedi_conn->qedi->flags);
++	spin_unlock_bh(&session->frwd_lock);
+ }
+ 
+ static int qedi_bind_conn_to_iscsi_cid(struct qedi_ctx *qedi,
+@@ -783,7 +793,6 @@ static int qedi_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
+ 	}
+ 
+ 	cmd->conn = conn->dd_data;
+-	cmd->scsi_cmd = NULL;
+ 	return qedi_iscsi_send_generic_request(task);
+ }
+ 
+@@ -794,9 +803,16 @@ static int qedi_task_xmit(struct iscsi_task *task)
+ 	struct qedi_cmd *cmd = task->dd_data;
+ 	struct scsi_cmnd *sc = task->sc;
+ 
++	/* Clear now so in cleanup_task we know it didn't make it */
++	cmd->scsi_cmd = NULL;
++	cmd->task_id = U16_MAX;
++
+ 	if (test_bit(QEDI_IN_SHUTDOWN, &qedi_conn->qedi->flags))
+ 		return -ENODEV;
+ 
++	if (test_bit(QEDI_BLOCK_IO, &qedi_conn->qedi->flags))
++		return -EACCES;
++
+ 	cmd->state = 0;
+ 	cmd->task = NULL;
+ 	cmd->use_slowpath = false;
+@@ -1394,13 +1410,24 @@ static umode_t qedi_attr_is_visible(int param_type, int param)
+ 
+ static void qedi_cleanup_task(struct iscsi_task *task)
+ {
+-	if (!task->sc || task->state == ISCSI_TASK_PENDING) {
++	struct qedi_cmd *cmd;
++
++	if (task->state == ISCSI_TASK_PENDING) {
+ 		QEDI_INFO(NULL, QEDI_LOG_IO, "Returning ref_cnt=%d\n",
+ 			  refcount_read(&task->refcount));
+ 		return;
+ 	}
+ 
+-	qedi_iscsi_unmap_sg_list(task->dd_data);
++	if (task->sc)
++		qedi_iscsi_unmap_sg_list(task->dd_data);
++
++	cmd = task->dd_data;
++	if (cmd->task_id != U16_MAX)
++		qedi_clear_task_idx(iscsi_host_priv(task->conn->session->host),
++				    cmd->task_id);
++
++	cmd->task_id = U16_MAX;
++	cmd->scsi_cmd = NULL;
+ }
+ 
+ struct iscsi_transport qedi_iscsi_transport = {
+diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
+index 69c5b5ee2169b..b33eff9ea80ba 100644
+--- a/drivers/scsi/qedi/qedi_main.c
++++ b/drivers/scsi/qedi/qedi_main.c
+@@ -642,7 +642,7 @@ static struct qedi_ctx *qedi_host_alloc(struct pci_dev *pdev)
+ 		goto exit_setup_shost;
+ 	}
+ 
+-	shost->max_id = QEDI_MAX_ISCSI_CONNS_PER_HBA;
++	shost->max_id = QEDI_MAX_ISCSI_CONNS_PER_HBA - 1;
+ 	shost->max_channel = 0;
+ 	shost->max_lun = ~0;
+ 	shost->max_cmd_len = 16;
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index e172c660dcd53..1b02556f9ec00 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -2081,9 +2081,7 @@ EXPORT_SYMBOL_GPL(scsi_mode_select);
+  *	@sshdr: place to put sense data (or NULL if no sense to be collected).
+  *		must be SCSI_SENSE_BUFFERSIZE big.
+  *
+- *	Returns zero if unsuccessful, or the header offset (either 4
+- *	or 8 depending on whether a six or ten byte command was
+- *	issued) if successful.
++ *	Returns zero if successful, or a negative error number on failure
+  */
+ int
+ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
+@@ -2130,6 +2128,8 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
+ 
+ 	result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
+ 				  sshdr, timeout, retries, NULL);
++	if (result < 0)
++		return result;
+ 
+ 	/* This code looks awful: what it's doing is making sure an
+ 	 * ILLEGAL REQUEST sense return identifies the actual command
+@@ -2174,13 +2174,15 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
+ 			data->block_descriptor_length = buffer[3];
+ 		}
+ 		data->header_length = header_length;
++		result = 0;
+ 	} else if ((status_byte(result) == CHECK_CONDITION) &&
+ 		   scsi_sense_valid(sshdr) &&
+ 		   sshdr->sense_key == UNIT_ATTENTION && retry_count) {
+ 		retry_count--;
+ 		goto retry;
+ 	}
+-
++	if (result > 0)
++		result = -EIO;
+ 	return result;
+ }
+ EXPORT_SYMBOL(scsi_mode_sense);
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index 6ce1cc992d1d0..b07105ae7c917 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -2459,6 +2459,18 @@ int iscsi_destroy_conn(struct iscsi_cls_conn *conn)
+ }
+ EXPORT_SYMBOL_GPL(iscsi_destroy_conn);
+ 
++void iscsi_put_conn(struct iscsi_cls_conn *conn)
++{
++	put_device(&conn->dev);
++}
++EXPORT_SYMBOL_GPL(iscsi_put_conn);
++
++void iscsi_get_conn(struct iscsi_cls_conn *conn)
++{
++	get_device(&conn->dev);
++}
++EXPORT_SYMBOL_GPL(iscsi_get_conn);
++
+ /*
+  * iscsi interface functions
+  */
+diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
+index c9abed8429c9a..4a96fb05731d2 100644
+--- a/drivers/scsi/scsi_transport_sas.c
++++ b/drivers/scsi/scsi_transport_sas.c
+@@ -1229,16 +1229,15 @@ int sas_read_port_mode_page(struct scsi_device *sdev)
+ 	char *buffer = kzalloc(BUF_SIZE, GFP_KERNEL), *msdata;
+ 	struct sas_end_device *rdev = sas_sdev_to_rdev(sdev);
+ 	struct scsi_mode_data mode_data;
+-	int res, error;
++	int error;
+ 
+ 	if (!buffer)
+ 		return -ENOMEM;
+ 
+-	res = scsi_mode_sense(sdev, 1, 0x19, buffer, BUF_SIZE, 30*HZ, 3,
+-			      &mode_data, NULL);
++	error = scsi_mode_sense(sdev, 1, 0x19, buffer, BUF_SIZE, 30*HZ, 3,
++				&mode_data, NULL);
+ 
+-	error = -EINVAL;
+-	if (!scsi_status_is_good(res))
++	if (error)
+ 		goto out;
+ 
+ 	msdata = buffer +  mode_data.header_length +
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index a0356f3707b86..3431ac12b7308 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -2683,18 +2683,18 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
+ 		 * 5: Illegal Request, Sense Code 24: Invalid field in
+ 		 * CDB.
+ 		 */
+-		if (!scsi_status_is_good(res))
++		if (res < 0)
+ 			res = sd_do_mode_sense(sdkp, 0, 0, buffer, 4, &data, NULL);
+ 
+ 		/*
+ 		 * Third attempt: ask 255 bytes, as we did earlier.
+ 		 */
+-		if (!scsi_status_is_good(res))
++		if (res < 0)
+ 			res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 255,
+ 					       &data, NULL);
+ 	}
+ 
+-	if (!scsi_status_is_good(res)) {
++	if (res < 0) {
+ 		sd_first_printk(KERN_WARNING, sdkp,
+ 			  "Test WP failed, assume Write Enabled\n");
+ 	} else {
+@@ -2755,7 +2755,7 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
+ 	res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, first_len,
+ 			&data, &sshdr);
+ 
+-	if (!scsi_status_is_good(res))
++	if (res < 0)
+ 		goto bad_sense;
+ 
+ 	if (!data.header_length) {
+@@ -2787,7 +2787,7 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
+ 		res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, len,
+ 				&data, &sshdr);
+ 
+-	if (scsi_status_is_good(res)) {
++	if (!res) {
+ 		int offset = data.header_length + data.block_descriptor_length;
+ 
+ 		while (offset < len) {
+@@ -2905,7 +2905,7 @@ static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
+ 	res = scsi_mode_sense(sdp, 1, 0x0a, buffer, 36, SD_TIMEOUT,
+ 			      sdkp->max_retries, &data, &sshdr);
+ 
+-	if (!scsi_status_is_good(res) || !data.header_length ||
++	if (res < 0 || !data.header_length ||
+ 	    data.length < 6) {
+ 		sd_first_printk(KERN_WARNING, sdkp,
+ 			  "getting Control mode page failed, assume no ATO\n");
+diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
+index 7815ed642d434..1a94c7b1de2df 100644
+--- a/drivers/scsi/sr.c
++++ b/drivers/scsi/sr.c
+@@ -913,7 +913,7 @@ static void get_capabilities(struct scsi_cd *cd)
+ 	rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, ms_len,
+ 			     SR_TIMEOUT, 3, &data, NULL);
+ 
+-	if (!scsi_status_is_good(rc) || data.length > ms_len ||
++	if (rc < 0 || data.length > ms_len ||
+ 	    data.header_length + data.block_descriptor_length > data.length) {
+ 		/* failed, drive doesn't have capabilities mode page */
+ 		cd->cdi.speed = 1;
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index 6bc5453cea8a7..181d7b372fd0d 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -1005,17 +1005,40 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
+ 	struct storvsc_scan_work *wrk;
+ 	void (*process_err_fn)(struct work_struct *work);
+ 	struct hv_host_device *host_dev = shost_priv(host);
+-	bool do_work = false;
+ 
+-	switch (SRB_STATUS(vm_srb->srb_status)) {
+-	case SRB_STATUS_ERROR:
++	/*
++	 * In some situations, Hyper-V sets multiple bits in the
++	 * srb_status, such as ABORTED and ERROR. So process them
++	 * individually, with the most specific bits first.
++	 */
++
++	if (vm_srb->srb_status & SRB_STATUS_INVALID_LUN) {
++		set_host_byte(scmnd, DID_NO_CONNECT);
++		process_err_fn = storvsc_remove_lun;
++		goto do_work;
++	}
++
++	if (vm_srb->srb_status & SRB_STATUS_ABORTED) {
++		if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID &&
++		    /* Capacity data has changed */
++		    (asc == 0x2a) && (ascq == 0x9)) {
++			process_err_fn = storvsc_device_scan;
++			/*
++			 * Retry the I/O that triggered this.
++			 */
++			set_host_byte(scmnd, DID_REQUEUE);
++			goto do_work;
++		}
++	}
++
++	if (vm_srb->srb_status & SRB_STATUS_ERROR) {
+ 		/*
+ 		 * Let upper layer deal with error when
+ 		 * sense message is present.
+ 		 */
+-
+ 		if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID)
+-			break;
++			return;
++
+ 		/*
+ 		 * If there is an error; offline the device since all
+ 		 * error recovery strategies would have already been
+@@ -1028,37 +1051,19 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
+ 			set_host_byte(scmnd, DID_PASSTHROUGH);
+ 			break;
+ 		/*
+-		 * On Some Windows hosts TEST_UNIT_READY command can return
+-		 * SRB_STATUS_ERROR, let the upper level code deal with it
+-		 * based on the sense information.
++		 * On some Hyper-V hosts TEST_UNIT_READY command can
++		 * return SRB_STATUS_ERROR. Let the upper level code
++		 * deal with it based on the sense information.
+ 		 */
+ 		case TEST_UNIT_READY:
+ 			break;
+ 		default:
+ 			set_host_byte(scmnd, DID_ERROR);
+ 		}
+-		break;
+-	case SRB_STATUS_INVALID_LUN:
+-		set_host_byte(scmnd, DID_NO_CONNECT);
+-		do_work = true;
+-		process_err_fn = storvsc_remove_lun;
+-		break;
+-	case SRB_STATUS_ABORTED:
+-		if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID &&
+-		    (asc == 0x2a) && (ascq == 0x9)) {
+-			do_work = true;
+-			process_err_fn = storvsc_device_scan;
+-			/*
+-			 * Retry the I/O that triggered this.
+-			 */
+-			set_host_byte(scmnd, DID_REQUEUE);
+-		}
+-		break;
+ 	}
++	return;
+ 
+-	if (!do_work)
+-		return;
+-
++do_work:
+ 	/*
+ 	 * We need to schedule work to process this error; schedule it.
+ 	 */
+diff --git a/drivers/soc/mediatek/mtk-pm-domains.c b/drivers/soc/mediatek/mtk-pm-domains.c
+index 0af00efa0ef83..b762bc40f56bd 100644
+--- a/drivers/soc/mediatek/mtk-pm-domains.c
++++ b/drivers/soc/mediatek/mtk-pm-domains.c
+@@ -211,7 +211,7 @@ static int scpsys_power_on(struct generic_pm_domain *genpd)
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = clk_bulk_enable(pd->num_clks, pd->clks);
++	ret = clk_bulk_prepare_enable(pd->num_clks, pd->clks);
+ 	if (ret)
+ 		goto err_reg;
+ 
+@@ -229,7 +229,7 @@ static int scpsys_power_on(struct generic_pm_domain *genpd)
+ 	regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_ISO_BIT);
+ 	regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RST_B_BIT);
+ 
+-	ret = clk_bulk_enable(pd->num_subsys_clks, pd->subsys_clks);
++	ret = clk_bulk_prepare_enable(pd->num_subsys_clks, pd->subsys_clks);
+ 	if (ret)
+ 		goto err_pwr_ack;
+ 
+@@ -246,9 +246,9 @@ static int scpsys_power_on(struct generic_pm_domain *genpd)
+ err_disable_sram:
+ 	scpsys_sram_disable(pd);
+ err_disable_subsys_clks:
+-	clk_bulk_disable(pd->num_subsys_clks, pd->subsys_clks);
++	clk_bulk_disable_unprepare(pd->num_subsys_clks, pd->subsys_clks);
+ err_pwr_ack:
+-	clk_bulk_disable(pd->num_clks, pd->clks);
++	clk_bulk_disable_unprepare(pd->num_clks, pd->clks);
+ err_reg:
+ 	scpsys_regulator_disable(pd->supply);
+ 	return ret;
+@@ -269,7 +269,7 @@ static int scpsys_power_off(struct generic_pm_domain *genpd)
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	clk_bulk_disable(pd->num_subsys_clks, pd->subsys_clks);
++	clk_bulk_disable_unprepare(pd->num_subsys_clks, pd->subsys_clks);
+ 
+ 	/* subsys power off */
+ 	regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RST_B_BIT);
+@@ -284,7 +284,7 @@ static int scpsys_power_off(struct generic_pm_domain *genpd)
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	clk_bulk_disable(pd->num_clks, pd->clks);
++	clk_bulk_disable_unprepare(pd->num_clks, pd->clks);
+ 
+ 	scpsys_regulator_disable(pd->supply);
+ 
+@@ -297,6 +297,7 @@ generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_no
+ 	const struct scpsys_domain_data *domain_data;
+ 	struct scpsys_domain *pd;
+ 	struct device_node *root_node = scpsys->dev->of_node;
++	struct device_node *smi_node;
+ 	struct property *prop;
+ 	const char *clk_name;
+ 	int i, ret, num_clks;
+@@ -352,9 +353,13 @@ generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_no
+ 	if (IS_ERR(pd->infracfg))
+ 		return ERR_CAST(pd->infracfg);
+ 
+-	pd->smi = syscon_regmap_lookup_by_phandle_optional(node, "mediatek,smi");
+-	if (IS_ERR(pd->smi))
+-		return ERR_CAST(pd->smi);
++	smi_node = of_parse_phandle(node, "mediatek,smi", 0);
++	if (smi_node) {
++		pd->smi = device_node_to_regmap(smi_node);
++		of_node_put(smi_node);
++		if (IS_ERR(pd->smi))
++			return ERR_CAST(pd->smi);
++	}
+ 
+ 	num_clks = of_clk_get_parent_count(node);
+ 	if (num_clks > 0) {
+@@ -405,14 +410,6 @@ generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_no
+ 		pd->subsys_clks[i].clk = clk;
+ 	}
+ 
+-	ret = clk_bulk_prepare(pd->num_clks, pd->clks);
+-	if (ret)
+-		goto err_put_subsys_clocks;
+-
+-	ret = clk_bulk_prepare(pd->num_subsys_clks, pd->subsys_clks);
+-	if (ret)
+-		goto err_unprepare_clocks;
+-
+ 	/*
+ 	 * Initially turn on all domains to make the domains usable
+ 	 * with !CONFIG_PM and to get the hardware in sync with the
+@@ -427,7 +424,7 @@ generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_no
+ 		ret = scpsys_power_on(&pd->genpd);
+ 		if (ret < 0) {
+ 			dev_err(scpsys->dev, "%pOF: failed to power on domain: %d\n", node, ret);
+-			goto err_unprepare_clocks;
++			goto err_put_subsys_clocks;
+ 		}
+ 	}
+ 
+@@ -435,7 +432,7 @@ generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_no
+ 		ret = -EINVAL;
+ 		dev_err(scpsys->dev,
+ 			"power domain with id %d already exists, check your device-tree\n", id);
+-		goto err_unprepare_subsys_clocks;
++		goto err_put_subsys_clocks;
+ 	}
+ 
+ 	if (!pd->data->name)
+@@ -455,10 +452,6 @@ generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_no
+ 
+ 	return scpsys->pd_data.domains[id];
+ 
+-err_unprepare_subsys_clocks:
+-	clk_bulk_unprepare(pd->num_subsys_clks, pd->subsys_clks);
+-err_unprepare_clocks:
+-	clk_bulk_unprepare(pd->num_clks, pd->clks);
+ err_put_subsys_clocks:
+ 	clk_bulk_put(pd->num_subsys_clks, pd->subsys_clks);
+ err_put_clocks:
+@@ -537,10 +530,7 @@ static void scpsys_remove_one_domain(struct scpsys_domain *pd)
+ 			"failed to remove domain '%s' : %d - state may be inconsistent\n",
+ 			pd->genpd.name, ret);
+ 
+-	clk_bulk_unprepare(pd->num_clks, pd->clks);
+ 	clk_bulk_put(pd->num_clks, pd->clks);
+-
+-	clk_bulk_unprepare(pd->num_subsys_clks, pd->subsys_clks);
+ 	clk_bulk_put(pd->num_subsys_clks, pd->subsys_clks);
+ }
+ 
+diff --git a/drivers/staging/rtl8723bs/hal/odm.h b/drivers/staging/rtl8723bs/hal/odm.h
+index 16e8f66a31714..a8d232245227b 100644
+--- a/drivers/staging/rtl8723bs/hal/odm.h
++++ b/drivers/staging/rtl8723bs/hal/odm.h
+@@ -197,10 +197,7 @@ typedef struct _ODM_RATE_ADAPTIVE {
+ 
+ #define AVG_THERMAL_NUM		8
+ #define IQK_Matrix_REG_NUM	8
+-#define IQK_Matrix_Settings_NUM	(14 + 24 + 21) /*   Channels_2_4G_NUM
+-						* + Channels_5G_20M_NUM
+-						* + Channels_5G
+-						*/
++#define IQK_Matrix_Settings_NUM	14 /* Channels_2_4G_NUM */
+ 
+ #define		DM_Type_ByFW			0
+ #define		DM_Type_ByDriver		1
+diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c
+index 75c69fe6e9553..c02922d2d1d5d 100644
+--- a/drivers/thermal/rcar_gen3_thermal.c
++++ b/drivers/thermal/rcar_gen3_thermal.c
+@@ -142,7 +142,7 @@ static void rcar_gen3_thermal_calc_coefs(struct rcar_gen3_thermal_tsc *tsc,
+ 	 * Division is not scaled in BSP and if scaled it might overflow
+ 	 * the dividend (4095 * 4095 << 14 > INT_MAX) so keep it unscaled
+ 	 */
+-	tsc->tj_t = (FIXPT_INT((ptat[1] - ptat[2]) * 157)
++	tsc->tj_t = (FIXPT_INT((ptat[1] - ptat[2]) * (ths_tj_1 - TJ_3))
+ 		     / (ptat[0] - ptat[2])) + FIXPT_INT(TJ_3);
+ 
+ 	tsc->coef.a1 = FIXPT_DIV(FIXPT_INT(thcode[1] - thcode[2]),
+diff --git a/drivers/thermal/sprd_thermal.c b/drivers/thermal/sprd_thermal.c
+index 3682edb2f4669..fe06cccf14b38 100644
+--- a/drivers/thermal/sprd_thermal.c
++++ b/drivers/thermal/sprd_thermal.c
+@@ -532,6 +532,7 @@ static const struct of_device_id sprd_thermal_of_match[] = {
+ 	{ .compatible = "sprd,ums512-thermal", .data = &ums512_data },
+ 	{ },
+ };
++MODULE_DEVICE_TABLE(of, sprd_thermal_of_match);
+ 
+ static const struct dev_pm_ops sprd_thermal_pm_ops = {
+ 	SET_SYSTEM_SLEEP_PM_OPS(sprd_thm_suspend, sprd_thm_resume)
+diff --git a/drivers/tty/serial/8250/serial_cs.c b/drivers/tty/serial/8250/serial_cs.c
+index 1cc749903d127..ff6a95a352260 100644
+--- a/drivers/tty/serial/8250/serial_cs.c
++++ b/drivers/tty/serial/8250/serial_cs.c
+@@ -306,6 +306,7 @@ static int serial_resume(struct pcmcia_device *link)
+ static int serial_probe(struct pcmcia_device *link)
+ {
+ 	struct serial_info *info;
++	int ret;
+ 
+ 	dev_dbg(&link->dev, "serial_attach()\n");
+ 
+@@ -320,7 +321,15 @@ static int serial_probe(struct pcmcia_device *link)
+ 	if (do_sound)
+ 		link->config_flags |= CONF_ENABLE_SPKR;
+ 
+-	return serial_config(link);
++	ret = serial_config(link);
++	if (ret)
++		goto free_info;
++
++	return 0;
++
++free_info:
++	kfree(info);
++	return ret;
+ }
+ 
+ static void serial_detach(struct pcmcia_device *link)
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index 9c78e43e669d7..0d7ea144a4a6d 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -1571,6 +1571,9 @@ static void lpuart_tx_dma_startup(struct lpuart_port *sport)
+ 	u32 uartbaud;
+ 	int ret;
+ 
++	if (uart_console(&sport->port))
++		goto err;
++
+ 	if (!sport->dma_tx_chan)
+ 		goto err;
+ 
+@@ -1600,6 +1603,9 @@ static void lpuart_rx_dma_startup(struct lpuart_port *sport)
+ 	int ret;
+ 	unsigned char cr3;
+ 
++	if (uart_console(&sport->port))
++		goto err;
++
+ 	if (!sport->dma_rx_chan)
+ 		goto err;
+ 
+@@ -2404,6 +2410,9 @@ lpuart32_console_get_options(struct lpuart_port *sport, int *baud,
+ 
+ 	bd = lpuart32_read(&sport->port, UARTBAUD);
+ 	bd &= UARTBAUD_SBR_MASK;
++	if (!bd)
++		return;
++
+ 	sbr = bd;
+ 	uartclk = lpuart_get_baud_clk_rate(sport);
+ 	/*
+diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
+index f42ccc40ffa64..a5f15f22d9efe 100644
+--- a/drivers/tty/serial/uartlite.c
++++ b/drivers/tty/serial/uartlite.c
+@@ -505,21 +505,23 @@ static void ulite_console_write(struct console *co, const char *s,
+ 
+ static int ulite_console_setup(struct console *co, char *options)
+ {
+-	struct uart_port *port;
++	struct uart_port *port = NULL;
+ 	int baud = 9600;
+ 	int bits = 8;
+ 	int parity = 'n';
+ 	int flow = 'n';
+ 
+-
+-	port = console_port;
++	if (co->index >= 0 && co->index < ULITE_NR_UARTS)
++		port = ulite_ports + co->index;
+ 
+ 	/* Has the device been initialized yet? */
+-	if (!port->mapbase) {
++	if (!port || !port->mapbase) {
+ 		pr_debug("console on ttyUL%i not present\n", co->index);
+ 		return -ENODEV;
+ 	}
+ 
++	console_port = port;
++
+ 	/* not initialized yet? */
+ 	if (!port->membase) {
+ 		if (ulite_request_port(port))
+@@ -655,17 +657,6 @@ static int ulite_assign(struct device *dev, int id, u32 base, int irq,
+ 
+ 	dev_set_drvdata(dev, port);
+ 
+-#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE
+-	/*
+-	 * If console hasn't been found yet try to assign this port
+-	 * because it is required to be assigned for console setup function.
+-	 * If register_console() don't assign value, then console_port pointer
+-	 * is cleanup.
+-	 */
+-	if (ulite_uart_driver.cons->index == -1)
+-		console_port = port;
+-#endif
+-
+ 	/* Register the port */
+ 	rc = uart_add_one_port(&ulite_uart_driver, port);
+ 	if (rc) {
+@@ -675,12 +666,6 @@ static int ulite_assign(struct device *dev, int id, u32 base, int irq,
+ 		return rc;
+ 	}
+ 
+-#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE
+-	/* This is not port which is used for console that's why clean it up */
+-	if (ulite_uart_driver.cons->index == -1)
+-		console_port = NULL;
+-#endif
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/usb/common/usb-conn-gpio.c b/drivers/usb/common/usb-conn-gpio.c
+index 6c4e3a19f42cb..c9545a4eff664 100644
+--- a/drivers/usb/common/usb-conn-gpio.c
++++ b/drivers/usb/common/usb-conn-gpio.c
+@@ -149,14 +149,32 @@ static int usb_charger_get_property(struct power_supply *psy,
+ 	return 0;
+ }
+ 
+-static int usb_conn_probe(struct platform_device *pdev)
++static int usb_conn_psy_register(struct usb_conn_info *info)
+ {
+-	struct device *dev = &pdev->dev;
+-	struct power_supply_desc *desc;
+-	struct usb_conn_info *info;
++	struct device *dev = info->dev;
++	struct power_supply_desc *desc = &info->desc;
+ 	struct power_supply_config cfg = {
+ 		.of_node = dev->of_node,
+ 	};
++
++	desc->name = "usb-charger";
++	desc->properties = usb_charger_properties;
++	desc->num_properties = ARRAY_SIZE(usb_charger_properties);
++	desc->get_property = usb_charger_get_property;
++	desc->type = POWER_SUPPLY_TYPE_USB;
++	cfg.drv_data = info;
++
++	info->charger = devm_power_supply_register(dev, desc, &cfg);
++	if (IS_ERR(info->charger))
++		dev_err(dev, "Unable to register charger\n");
++
++	return PTR_ERR_OR_ZERO(info->charger);
++}
++
++static int usb_conn_probe(struct platform_device *pdev)
++{
++	struct device *dev = &pdev->dev;
++	struct usb_conn_info *info;
+ 	bool need_vbus = true;
+ 	int ret = 0;
+ 
+@@ -218,6 +236,10 @@ static int usb_conn_probe(struct platform_device *pdev)
+ 		return PTR_ERR(info->role_sw);
+ 	}
+ 
++	ret = usb_conn_psy_register(info);
++	if (ret)
++		goto put_role_sw;
++
+ 	if (info->id_gpiod) {
+ 		info->id_irq = gpiod_to_irq(info->id_gpiod);
+ 		if (info->id_irq < 0) {
+@@ -252,20 +274,6 @@ static int usb_conn_probe(struct platform_device *pdev)
+ 		}
+ 	}
+ 
+-	desc = &info->desc;
+-	desc->name = "usb-charger";
+-	desc->properties = usb_charger_properties;
+-	desc->num_properties = ARRAY_SIZE(usb_charger_properties);
+-	desc->get_property = usb_charger_get_property;
+-	desc->type = POWER_SUPPLY_TYPE_USB;
+-	cfg.drv_data = info;
+-
+-	info->charger = devm_power_supply_register(dev, desc, &cfg);
+-	if (IS_ERR(info->charger)) {
+-		dev_err(dev, "Unable to register charger\n");
+-		return PTR_ERR(info->charger);
+-	}
+-
+ 	platform_set_drvdata(pdev, info);
+ 
+ 	/* Perform initial detection */
+diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
+index 19789e94bbd00..45ec5ac9876e4 100644
+--- a/drivers/usb/dwc3/dwc3-pci.c
++++ b/drivers/usb/dwc3/dwc3-pci.c
+@@ -36,7 +36,7 @@
+ #define PCI_DEVICE_ID_INTEL_CNPH		0xa36e
+ #define PCI_DEVICE_ID_INTEL_CNPV		0xa3b0
+ #define PCI_DEVICE_ID_INTEL_ICLLP		0x34ee
+-#define PCI_DEVICE_ID_INTEL_EHLLP		0x4b7e
++#define PCI_DEVICE_ID_INTEL_EHL			0x4b7e
+ #define PCI_DEVICE_ID_INTEL_TGPLP		0xa0ee
+ #define PCI_DEVICE_ID_INTEL_TGPH		0x43ee
+ #define PCI_DEVICE_ID_INTEL_JSP			0x4dee
+@@ -167,7 +167,7 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc)
+ 	if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
+ 		if (pdev->device == PCI_DEVICE_ID_INTEL_BXT ||
+ 		    pdev->device == PCI_DEVICE_ID_INTEL_BXT_M ||
+-		    pdev->device == PCI_DEVICE_ID_INTEL_EHLLP) {
++		    pdev->device == PCI_DEVICE_ID_INTEL_EHL) {
+ 			guid_parse(PCI_INTEL_BXT_DSM_GUID, &dwc->guid);
+ 			dwc->has_dsm_for_pm = true;
+ 		}
+@@ -375,8 +375,8 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
+ 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICLLP),
+ 	  (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+ 
+-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_EHLLP),
+-	  (kernel_ulong_t) &dwc3_pci_intel_swnode },
++	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_EHL),
++	  (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+ 
+ 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGPLP),
+ 	  (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
+index e556993081170..a82b3de1a54be 100644
+--- a/drivers/usb/gadget/function/f_hid.c
++++ b/drivers/usb/gadget/function/f_hid.c
+@@ -88,7 +88,7 @@ static struct usb_interface_descriptor hidg_interface_desc = {
+ static struct hid_descriptor hidg_desc = {
+ 	.bLength			= sizeof hidg_desc,
+ 	.bDescriptorType		= HID_DT_HID,
+-	.bcdHID				= 0x0101,
++	.bcdHID				= cpu_to_le16(0x0101),
+ 	.bCountryCode			= 0x00,
+ 	.bNumDescriptors		= 0x1,
+ 	/*.desc[0].bDescriptorType	= DYNAMIC */
+diff --git a/drivers/usb/gadget/legacy/hid.c b/drivers/usb/gadget/legacy/hid.c
+index c4eda7fe7ab45..5b27d289443fe 100644
+--- a/drivers/usb/gadget/legacy/hid.c
++++ b/drivers/usb/gadget/legacy/hid.c
+@@ -171,8 +171,10 @@ static int hid_bind(struct usb_composite_dev *cdev)
+ 		struct usb_descriptor_header *usb_desc;
+ 
+ 		usb_desc = usb_otg_descriptor_alloc(gadget);
+-		if (!usb_desc)
++		if (!usb_desc) {
++			status = -ENOMEM;
+ 			goto put;
++		}
+ 		usb_otg_descriptor_init(gadget, usb_desc);
+ 		otg_desc[0] = usb_desc;
+ 		otg_desc[1] = NULL;
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 0d2f1c37ab745..73cdaa3f30675 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -1362,12 +1362,17 @@ static void xhci_unmap_temp_buf(struct usb_hcd *hcd, struct urb *urb)
+ 				 urb->transfer_buffer_length,
+ 				 dir);
+ 
+-	if (usb_urb_dir_in(urb))
++	if (usb_urb_dir_in(urb)) {
+ 		len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs,
+ 					   urb->transfer_buffer,
+ 					   buf_len,
+ 					   0);
+-
++		if (len != buf_len) {
++			xhci_dbg(hcd_to_xhci(hcd),
++				 "Copy from tmp buf to urb sg list failed\n");
++			urb->actual_length = len;
++		}
++	}
+ 	urb->transfer_flags &= ~URB_DMA_MAP_SINGLE;
+ 	kfree(urb->transfer_buffer);
+ 	urb->transfer_buffer = NULL;
+diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
+index 800cfd1967ada..cfa56a58b271b 100644
+--- a/drivers/vdpa/mlx5/core/mr.c
++++ b/drivers/vdpa/mlx5/core/mr.c
+@@ -219,11 +219,6 @@ static void destroy_indirect_key(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_m
+ 	mlx5_vdpa_destroy_mkey(mvdev, &mkey->mkey);
+ }
+ 
+-static struct device *get_dma_device(struct mlx5_vdpa_dev *mvdev)
+-{
+-	return &mvdev->mdev->pdev->dev;
+-}
+-
+ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr,
+ 			 struct vhost_iotlb *iotlb)
+ {
+@@ -239,7 +234,7 @@ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr
+ 	u64 pa;
+ 	u64 paend;
+ 	struct scatterlist *sg;
+-	struct device *dma = get_dma_device(mvdev);
++	struct device *dma = mvdev->vdev.dma_dev;
+ 
+ 	for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1);
+ 	     map; map = vhost_iotlb_itree_next(map, start, mr->end - 1)) {
+@@ -298,7 +293,7 @@ err_map:
+ 
+ static void unmap_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
+ {
+-	struct device *dma = get_dma_device(mvdev);
++	struct device *dma = mvdev->vdev.dma_dev;
+ 
+ 	destroy_direct_mr(mvdev, mr);
+ 	dma_unmap_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
+diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+index a0e86c5d7cd7a..68fd117fd7a50 100644
+--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
++++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+@@ -611,8 +611,8 @@ static void cq_destroy(struct mlx5_vdpa_net *ndev, u16 idx)
+ 	mlx5_db_free(ndev->mvdev.mdev, &vcq->db);
+ }
+ 
+-static int umem_size(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num,
+-		     struct mlx5_vdpa_umem **umemp)
++static void set_umem_size(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num,
++			  struct mlx5_vdpa_umem **umemp)
+ {
+ 	struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
+ 	int p_a;
+@@ -635,7 +635,7 @@ static int umem_size(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq
+ 		*umemp = &mvq->umem3;
+ 		break;
+ 	}
+-	return p_a * mvq->num_ent + p_b;
++	(*umemp)->size = p_a * mvq->num_ent + p_b;
+ }
+ 
+ static void umem_frag_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_umem *umem)
+@@ -651,15 +651,10 @@ static int create_umem(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *m
+ 	void *in;
+ 	int err;
+ 	__be64 *pas;
+-	int size;
+ 	struct mlx5_vdpa_umem *umem;
+ 
+-	size = umem_size(ndev, mvq, num, &umem);
+-	if (size < 0)
+-		return size;
+-
+-	umem->size = size;
+-	err = umem_frag_buf_alloc(ndev, umem, size);
++	set_umem_size(ndev, mvq, num, &umem);
++	err = umem_frag_buf_alloc(ndev, umem, umem->size);
+ 	if (err)
+ 		return err;
+ 
+@@ -829,9 +824,9 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
+ 	MLX5_SET(virtio_q, vq_ctx, umem_1_id, mvq->umem1.id);
+ 	MLX5_SET(virtio_q, vq_ctx, umem_1_size, mvq->umem1.size);
+ 	MLX5_SET(virtio_q, vq_ctx, umem_2_id, mvq->umem2.id);
+-	MLX5_SET(virtio_q, vq_ctx, umem_2_size, mvq->umem1.size);
++	MLX5_SET(virtio_q, vq_ctx, umem_2_size, mvq->umem2.size);
+ 	MLX5_SET(virtio_q, vq_ctx, umem_3_id, mvq->umem3.id);
+-	MLX5_SET(virtio_q, vq_ctx, umem_3_size, mvq->umem1.size);
++	MLX5_SET(virtio_q, vq_ctx, umem_3_size, mvq->umem3.size);
+ 	MLX5_SET(virtio_q, vq_ctx, pd, ndev->mvdev.res.pdn);
+ 	if (MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, eth_frame_offload_type))
+ 		MLX5_SET(virtio_q, vq_ctx, virtio_version_1_0, 1);
+@@ -1772,6 +1767,14 @@ out:
+ 	mutex_unlock(&ndev->reslock);
+ }
+ 
++static void clear_vqs_ready(struct mlx5_vdpa_net *ndev)
++{
++	int i;
++
++	for (i = 0; i < ndev->mvdev.max_vqs; i++)
++		ndev->vqs[i].ready = false;
++}
++
+ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
+ {
+ 	struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+@@ -1782,6 +1785,7 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
+ 	if (!status) {
+ 		mlx5_vdpa_info(mvdev, "performing device reset\n");
+ 		teardown_driver(ndev);
++		clear_vqs_ready(ndev);
+ 		mlx5_vdpa_destroy_mr(&ndev->mvdev);
+ 		ndev->mvdev.status = 0;
+ 		ndev->mvdev.mlx_features = 0;
+@@ -2022,7 +2026,7 @@ static int mlx5v_probe(struct auxiliary_device *adev,
+ 			goto err_mtu;
+ 	}
+ 
+-	mvdev->vdev.dma_dev = mdev->device;
++	mvdev->vdev.dma_dev = &mdev->pdev->dev;
+ 	err = mlx5_vdpa_alloc_resources(&ndev->mvdev);
+ 	if (err)
+ 		goto err_mpfs;
+diff --git a/drivers/video/backlight/lm3630a_bl.c b/drivers/video/backlight/lm3630a_bl.c
+index 662029d6a3dc9..419b0334cf087 100644
+--- a/drivers/video/backlight/lm3630a_bl.c
++++ b/drivers/video/backlight/lm3630a_bl.c
+@@ -190,7 +190,7 @@ static int lm3630a_bank_a_update_status(struct backlight_device *bl)
+ 	if ((pwm_ctrl & LM3630A_PWM_BANK_A) != 0) {
+ 		lm3630a_pwm_ctrl(pchip, bl->props.brightness,
+ 				 bl->props.max_brightness);
+-		return bl->props.brightness;
++		return 0;
+ 	}
+ 
+ 	/* disable sleep */
+@@ -210,8 +210,8 @@ static int lm3630a_bank_a_update_status(struct backlight_device *bl)
+ 	return 0;
+ 
+ out_i2c_err:
+-	dev_err(pchip->dev, "i2c failed to access\n");
+-	return bl->props.brightness;
++	dev_err(pchip->dev, "i2c failed to access (%pe)\n", ERR_PTR(ret));
++	return ret;
+ }
+ 
+ static int lm3630a_bank_a_get_brightness(struct backlight_device *bl)
+@@ -267,7 +267,7 @@ static int lm3630a_bank_b_update_status(struct backlight_device *bl)
+ 	if ((pwm_ctrl & LM3630A_PWM_BANK_B) != 0) {
+ 		lm3630a_pwm_ctrl(pchip, bl->props.brightness,
+ 				 bl->props.max_brightness);
+-		return bl->props.brightness;
++		return 0;
+ 	}
+ 
+ 	/* disable sleep */
+@@ -287,8 +287,8 @@ static int lm3630a_bank_b_update_status(struct backlight_device *bl)
+ 	return 0;
+ 
+ out_i2c_err:
+-	dev_err(pchip->dev, "i2c failed to access REG_CTRL\n");
+-	return bl->props.brightness;
++	dev_err(pchip->dev, "i2c failed to access (%pe)\n", ERR_PTR(ret));
++	return ret;
+ }
+ 
+ static int lm3630a_bank_b_get_brightness(struct backlight_device *bl)
+diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
+index 06f5805de2dee..199c5e85f56e3 100644
+--- a/drivers/video/fbdev/core/fbmem.c
++++ b/drivers/video/fbdev/core/fbmem.c
+@@ -970,13 +970,11 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
+ 		fb_var_to_videomode(&mode2, &info->var);
+ 		/* make sure we don't delete the videomode of current var */
+ 		ret = fb_mode_is_equal(&mode1, &mode2);
+-
+-		if (!ret)
+-			fbcon_mode_deleted(info, &mode1);
+-
+-		if (!ret)
+-			fb_delete_videomode(&mode1, &info->modelist);
+-
++		if (!ret) {
++			ret = fbcon_mode_deleted(info, &mode1);
++			if (!ret)
++				fb_delete_videomode(&mode1, &info->modelist);
++		}
+ 
+ 		return ret ? -EINVAL : 0;
+ 	}
+diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c
+index 10ec60d81e847..3bf08b5bb3595 100644
+--- a/drivers/virtio/virtio_mem.c
++++ b/drivers/virtio/virtio_mem.c
+@@ -2420,6 +2420,10 @@ static int virtio_mem_init(struct virtio_mem *vm)
+ 		dev_warn(&vm->vdev->dev,
+ 			 "Some device memory is not addressable/pluggable. This can make some memory unusable.\n");
+ 
++	/* Prepare the offline threshold - make sure we can add two blocks. */
++	vm->offline_threshold = max_t(uint64_t, 2 * memory_block_size_bytes(),
++				      VIRTIO_MEM_DEFAULT_OFFLINE_THRESHOLD);
++
+ 	/*
+ 	 * We want subblocks to span at least MAX_ORDER_NR_PAGES and
+ 	 * pageblock_nr_pages pages. This:
+@@ -2466,14 +2470,11 @@ static int virtio_mem_init(struct virtio_mem *vm)
+ 		       vm->bbm.bb_size - 1;
+ 		vm->bbm.first_bb_id = virtio_mem_phys_to_bb_id(vm, addr);
+ 		vm->bbm.next_bb_id = vm->bbm.first_bb_id;
+-	}
+ 
+-	/* Prepare the offline threshold - make sure we can add two blocks. */
+-	vm->offline_threshold = max_t(uint64_t, 2 * memory_block_size_bytes(),
+-				      VIRTIO_MEM_DEFAULT_OFFLINE_THRESHOLD);
+-	/* In BBM, we also want at least two big blocks. */
+-	vm->offline_threshold = max_t(uint64_t, 2 * vm->bbm.bb_size,
+-				      vm->offline_threshold);
++		/* Make sure we can add two big blocks. */
++		vm->offline_threshold = max_t(uint64_t, 2 * vm->bbm.bb_size,
++					      vm->offline_threshold);
++	}
+ 
+ 	dev_info(&vm->vdev->dev, "start address: 0x%llx", vm->addr);
+ 	dev_info(&vm->vdev->dev, "region size: 0x%llx", vm->region_size);
+diff --git a/drivers/w1/slaves/w1_ds2438.c b/drivers/w1/slaves/w1_ds2438.c
+index 5cfb0ae23e916..5698566b0ee01 100644
+--- a/drivers/w1/slaves/w1_ds2438.c
++++ b/drivers/w1/slaves/w1_ds2438.c
+@@ -62,13 +62,13 @@ static int w1_ds2438_get_page(struct w1_slave *sl, int pageno, u8 *buf)
+ 		if (w1_reset_select_slave(sl))
+ 			continue;
+ 		w1_buf[0] = W1_DS2438_RECALL_MEMORY;
+-		w1_buf[1] = 0x00;
++		w1_buf[1] = (u8)pageno;
+ 		w1_write_block(sl->master, w1_buf, 2);
+ 
+ 		if (w1_reset_select_slave(sl))
+ 			continue;
+ 		w1_buf[0] = W1_DS2438_READ_SCRATCH;
+-		w1_buf[1] = 0x00;
++		w1_buf[1] = (u8)pageno;
+ 		w1_write_block(sl->master, w1_buf, 2);
+ 
+ 		count = w1_read_block(sl->master, buf, DS2438_PAGE_SIZE + 1);
+diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c
+index 7e00960651fa2..507fd815d7679 100644
+--- a/drivers/watchdog/aspeed_wdt.c
++++ b/drivers/watchdog/aspeed_wdt.c
+@@ -147,7 +147,7 @@ static int aspeed_wdt_set_timeout(struct watchdog_device *wdd,
+ 
+ 	wdd->timeout = timeout;
+ 
+-	actual = min(timeout, wdd->max_hw_heartbeat_ms * 1000);
++	actual = min(timeout, wdd->max_hw_heartbeat_ms / 1000);
+ 
+ 	writel(actual * WDT_RATE_1MHZ, wdt->base + WDT_RELOAD_VALUE);
+ 	writel(WDT_RESTART_MAGIC, wdt->base + WDT_RESTART);
+diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
+index bf31d7b67a697..3f1324871cfd6 100644
+--- a/drivers/watchdog/iTCO_wdt.c
++++ b/drivers/watchdog/iTCO_wdt.c
+@@ -71,6 +71,8 @@
+ #define TCOBASE(p)	((p)->tco_res->start)
+ /* SMI Control and Enable Register */
+ #define SMI_EN(p)	((p)->smi_res->start)
++#define TCO_EN		(1 << 13)
++#define GBL_SMI_EN	(1 << 0)
+ 
+ #define TCO_RLD(p)	(TCOBASE(p) + 0x00) /* TCO Timer Reload/Curr. Value */
+ #define TCOv1_TMR(p)	(TCOBASE(p) + 0x01) /* TCOv1 Timer Initial Value*/
+@@ -355,8 +357,12 @@ static int iTCO_wdt_set_timeout(struct watchdog_device *wd_dev, unsigned int t)
+ 
+ 	tmrval = seconds_to_ticks(p, t);
+ 
+-	/* For TCO v1 the timer counts down twice before rebooting */
+-	if (p->iTCO_version == 1)
++	/*
++	 * If TCO SMIs are off, the timer counts down twice before rebooting.
++	 * Otherwise, the BIOS generally reboots when the SMI triggers.
++	 */
++	if (p->smi_res &&
++	    (SMI_EN(p) & (TCO_EN | GBL_SMI_EN)) != (TCO_EN | GBL_SMI_EN))
+ 		tmrval /= 2;
+ 
+ 	/* from the specs: */
+@@ -521,7 +527,7 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
+ 		 * Disables TCO logic generating an SMI#
+ 		 */
+ 		val32 = inl(SMI_EN(p));
+-		val32 &= 0xffffdfff;	/* Turn off SMI clearing watchdog */
++		val32 &= ~TCO_EN;	/* Turn off SMI clearing watchdog */
+ 		outl(val32, SMI_EN(p));
+ 	}
+ 
+diff --git a/drivers/watchdog/imx_sc_wdt.c b/drivers/watchdog/imx_sc_wdt.c
+index e9ee22a7cb456..8ac021748d160 100644
+--- a/drivers/watchdog/imx_sc_wdt.c
++++ b/drivers/watchdog/imx_sc_wdt.c
+@@ -183,16 +183,12 @@ static int imx_sc_wdt_probe(struct platform_device *pdev)
+ 	watchdog_stop_on_reboot(wdog);
+ 	watchdog_stop_on_unregister(wdog);
+ 
+-	ret = devm_watchdog_register_device(dev, wdog);
+-	if (ret)
+-		return ret;
+-
+ 	ret = imx_scu_irq_group_enable(SC_IRQ_GROUP_WDOG,
+ 				       SC_IRQ_WDOG,
+ 				       true);
+ 	if (ret) {
+ 		dev_warn(dev, "Enable irq failed, pretimeout NOT supported\n");
+-		return 0;
++		goto register_device;
+ 	}
+ 
+ 	imx_sc_wdd->wdt_notifier.notifier_call = imx_sc_wdt_notify;
+@@ -203,7 +199,7 @@ static int imx_sc_wdt_probe(struct platform_device *pdev)
+ 					 false);
+ 		dev_warn(dev,
+ 			 "Register irq notifier failed, pretimeout NOT supported\n");
+-		return 0;
++		goto register_device;
+ 	}
+ 
+ 	ret = devm_add_action_or_reset(dev, imx_sc_wdt_action,
+@@ -213,7 +209,8 @@ static int imx_sc_wdt_probe(struct platform_device *pdev)
+ 	else
+ 		dev_warn(dev, "Add action failed, pretimeout NOT supported\n");
+ 
+-	return 0;
++register_device:
++	return devm_watchdog_register_device(dev, wdog);
+ }
+ 
+ static int __maybe_unused imx_sc_wdt_suspend(struct device *dev)
+diff --git a/drivers/watchdog/jz4740_wdt.c b/drivers/watchdog/jz4740_wdt.c
+index bdf9564efa29e..395bde79e2920 100644
+--- a/drivers/watchdog/jz4740_wdt.c
++++ b/drivers/watchdog/jz4740_wdt.c
+@@ -176,9 +176,9 @@ static int jz4740_wdt_probe(struct platform_device *pdev)
+ 	watchdog_set_drvdata(jz4740_wdt, drvdata);
+ 
+ 	drvdata->map = device_node_to_regmap(dev->parent->of_node);
+-	if (!drvdata->map) {
++	if (IS_ERR(drvdata->map)) {
+ 		dev_err(dev, "regmap not found\n");
+-		return -EINVAL;
++		return PTR_ERR(drvdata->map);
+ 	}
+ 
+ 	return devm_watchdog_register_device(dev, &drvdata->wdt);
+diff --git a/drivers/watchdog/keembay_wdt.c b/drivers/watchdog/keembay_wdt.c
+index 547d3fea33ff6..dd192b8dff551 100644
+--- a/drivers/watchdog/keembay_wdt.c
++++ b/drivers/watchdog/keembay_wdt.c
+@@ -23,12 +23,14 @@
+ #define TIM_WDOG_EN		0x8
+ #define TIM_SAFE		0xc
+ 
+-#define WDT_ISR_MASK		GENMASK(9, 8)
++#define WDT_TH_INT_MASK		BIT(8)
++#define WDT_TO_INT_MASK		BIT(9)
+ #define WDT_ISR_CLEAR		0x8200ff18
+ #define WDT_UNLOCK		0xf1d0dead
+ #define WDT_LOAD_MAX		U32_MAX
+ #define WDT_LOAD_MIN		1
+ #define WDT_TIMEOUT		5
++#define WDT_PRETIMEOUT		4
+ 
+ static unsigned int timeout = WDT_TIMEOUT;
+ module_param(timeout, int, 0);
+@@ -82,7 +84,6 @@ static int keembay_wdt_start(struct watchdog_device *wdog)
+ {
+ 	struct keembay_wdt *wdt = watchdog_get_drvdata(wdog);
+ 
+-	keembay_wdt_set_timeout_reg(wdog);
+ 	keembay_wdt_writel(wdt, TIM_WDOG_EN, 1);
+ 
+ 	return 0;
+@@ -108,6 +109,7 @@ static int keembay_wdt_set_timeout(struct watchdog_device *wdog, u32 t)
+ {
+ 	wdog->timeout = t;
+ 	keembay_wdt_set_timeout_reg(wdog);
++	keembay_wdt_set_pretimeout_reg(wdog);
+ 
+ 	return 0;
+ }
+@@ -139,8 +141,7 @@ static irqreturn_t keembay_wdt_to_isr(int irq, void *dev_id)
+ 	struct keembay_wdt *wdt = dev_id;
+ 	struct arm_smccc_res res;
+ 
+-	keembay_wdt_writel(wdt, TIM_WATCHDOG, 1);
+-	arm_smccc_smc(WDT_ISR_CLEAR, WDT_ISR_MASK, 0, 0, 0, 0, 0, 0, &res);
++	arm_smccc_smc(WDT_ISR_CLEAR, WDT_TO_INT_MASK, 0, 0, 0, 0, 0, 0, &res);
+ 	dev_crit(wdt->wdd.parent, "Intel Keem Bay non-sec wdt timeout.\n");
+ 	emergency_restart();
+ 
+@@ -152,7 +153,9 @@ static irqreturn_t keembay_wdt_th_isr(int irq, void *dev_id)
+ 	struct keembay_wdt *wdt = dev_id;
+ 	struct arm_smccc_res res;
+ 
+-	arm_smccc_smc(WDT_ISR_CLEAR, WDT_ISR_MASK, 0, 0, 0, 0, 0, 0, &res);
++	keembay_wdt_set_pretimeout(&wdt->wdd, 0x0);
++
++	arm_smccc_smc(WDT_ISR_CLEAR, WDT_TH_INT_MASK, 0, 0, 0, 0, 0, 0, &res);
+ 	dev_crit(wdt->wdd.parent, "Intel Keem Bay non-sec wdt pre-timeout.\n");
+ 	watchdog_notify_pretimeout(&wdt->wdd);
+ 
+@@ -224,11 +227,13 @@ static int keembay_wdt_probe(struct platform_device *pdev)
+ 	wdt->wdd.min_timeout	= WDT_LOAD_MIN;
+ 	wdt->wdd.max_timeout	= WDT_LOAD_MAX / wdt->rate;
+ 	wdt->wdd.timeout	= WDT_TIMEOUT;
++	wdt->wdd.pretimeout	= WDT_PRETIMEOUT;
+ 
+ 	watchdog_set_drvdata(&wdt->wdd, wdt);
+ 	watchdog_set_nowayout(&wdt->wdd, nowayout);
+ 	watchdog_init_timeout(&wdt->wdd, timeout, dev);
+ 	keembay_wdt_set_timeout(&wdt->wdd, wdt->wdd.timeout);
++	keembay_wdt_set_pretimeout(&wdt->wdd, wdt->wdd.pretimeout);
+ 
+ 	ret = devm_watchdog_register_device(dev, &wdt->wdd);
+ 	if (ret)
+diff --git a/drivers/watchdog/lpc18xx_wdt.c b/drivers/watchdog/lpc18xx_wdt.c
+index 78cf11c949416..60b6d74f267dd 100644
+--- a/drivers/watchdog/lpc18xx_wdt.c
++++ b/drivers/watchdog/lpc18xx_wdt.c
+@@ -292,7 +292,7 @@ static int lpc18xx_wdt_remove(struct platform_device *pdev)
+ 	struct lpc18xx_wdt_dev *lpc18xx_wdt = platform_get_drvdata(pdev);
+ 
+ 	dev_warn(&pdev->dev, "I quit now, hardware will probably reboot!\n");
+-	del_timer(&lpc18xx_wdt->timer);
++	del_timer_sync(&lpc18xx_wdt->timer);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/watchdog/sbc60xxwdt.c b/drivers/watchdog/sbc60xxwdt.c
+index a947a63fb44ae..7b974802dfc7c 100644
+--- a/drivers/watchdog/sbc60xxwdt.c
++++ b/drivers/watchdog/sbc60xxwdt.c
+@@ -146,7 +146,7 @@ static void wdt_startup(void)
+ static void wdt_turnoff(void)
+ {
+ 	/* Stop the timer */
+-	del_timer(&timer);
++	del_timer_sync(&timer);
+ 	inb_p(wdt_stop);
+ 	pr_info("Watchdog timer is now disabled...\n");
+ }
+diff --git a/drivers/watchdog/sc520_wdt.c b/drivers/watchdog/sc520_wdt.c
+index e66e6b905964b..ca65468f4b9ce 100644
+--- a/drivers/watchdog/sc520_wdt.c
++++ b/drivers/watchdog/sc520_wdt.c
+@@ -186,7 +186,7 @@ static int wdt_startup(void)
+ static int wdt_turnoff(void)
+ {
+ 	/* Stop the timer */
+-	del_timer(&timer);
++	del_timer_sync(&timer);
+ 
+ 	/* Stop the watchdog */
+ 	wdt_config(0);
+diff --git a/drivers/watchdog/w83877f_wdt.c b/drivers/watchdog/w83877f_wdt.c
+index 5772cc5d37804..f2650863fd027 100644
+--- a/drivers/watchdog/w83877f_wdt.c
++++ b/drivers/watchdog/w83877f_wdt.c
+@@ -166,7 +166,7 @@ static void wdt_startup(void)
+ static void wdt_turnoff(void)
+ {
+ 	/* Stop the timer */
+-	del_timer(&timer);
++	del_timer_sync(&timer);
+ 
+ 	wdt_change(WDT_DISABLE);
+ 
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index 595fd083c4ad1..1947fa34577bc 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -2101,6 +2101,13 @@ error:
+ 	return ret;
+ }
+ 
++/*
++ * This function, insert_block_group_item(), belongs to the phase 2 of chunk
++ * allocation.
++ *
++ * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
++ * phases.
++ */
+ static int insert_block_group_item(struct btrfs_trans_handle *trans,
+ 				   struct btrfs_block_group *block_group)
+ {
+@@ -2123,15 +2130,19 @@ static int insert_block_group_item(struct btrfs_trans_handle *trans,
+ 	return btrfs_insert_item(trans, root, &key, &bgi, sizeof(bgi));
+ }
+ 
++/*
++ * This function, btrfs_create_pending_block_groups(), belongs to the phase 2 of
++ * chunk allocation.
++ *
++ * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
++ * phases.
++ */
+ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
+ {
+ 	struct btrfs_fs_info *fs_info = trans->fs_info;
+ 	struct btrfs_block_group *block_group;
+ 	int ret = 0;
+ 
+-	if (!trans->can_flush_pending_bgs)
+-		return;
+-
+ 	while (!list_empty(&trans->new_bgs)) {
+ 		int index;
+ 
+@@ -2146,6 +2157,13 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
+ 		ret = insert_block_group_item(trans, block_group);
+ 		if (ret)
+ 			btrfs_abort_transaction(trans, ret);
++		if (!block_group->chunk_item_inserted) {
++			mutex_lock(&fs_info->chunk_mutex);
++			ret = btrfs_chunk_alloc_add_chunk_item(trans, block_group);
++			mutex_unlock(&fs_info->chunk_mutex);
++			if (ret)
++				btrfs_abort_transaction(trans, ret);
++		}
+ 		ret = btrfs_finish_chunk_alloc(trans, block_group->start,
+ 					block_group->length);
+ 		if (ret)
+@@ -2169,8 +2187,9 @@ next:
+ 	btrfs_trans_release_chunk_metadata(trans);
+ }
+ 
+-int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
+-			   u64 type, u64 chunk_offset, u64 size)
++struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans,
++						 u64 bytes_used, u64 type,
++						 u64 chunk_offset, u64 size)
+ {
+ 	struct btrfs_fs_info *fs_info = trans->fs_info;
+ 	struct btrfs_block_group *cache;
+@@ -2180,7 +2199,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
+ 
+ 	cache = btrfs_create_block_group_cache(fs_info, chunk_offset);
+ 	if (!cache)
+-		return -ENOMEM;
++		return ERR_PTR(-ENOMEM);
+ 
+ 	cache->length = size;
+ 	set_free_space_tree_thresholds(cache);
+@@ -2194,7 +2213,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
+ 	ret = btrfs_load_block_group_zone_info(cache, true);
+ 	if (ret) {
+ 		btrfs_put_block_group(cache);
+-		return ret;
++		return ERR_PTR(ret);
+ 	}
+ 
+ 	ret = exclude_super_stripes(cache);
+@@ -2202,7 +2221,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
+ 		/* We may have excluded something, so call this just in case */
+ 		btrfs_free_excluded_extents(cache);
+ 		btrfs_put_block_group(cache);
+-		return ret;
++		return ERR_PTR(ret);
+ 	}
+ 
+ 	add_new_free_space(cache, chunk_offset, chunk_offset + size);
+@@ -2229,7 +2248,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
+ 	if (ret) {
+ 		btrfs_remove_free_space_cache(cache);
+ 		btrfs_put_block_group(cache);
+-		return ret;
++		return ERR_PTR(ret);
+ 	}
+ 
+ 	/*
+@@ -2248,7 +2267,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
+ 	btrfs_update_delayed_refs_rsv(trans);
+ 
+ 	set_avail_alloc_bits(fs_info, type);
+-	return 0;
++	return cache;
+ }
+ 
+ /*
+@@ -3124,11 +3143,203 @@ int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type)
+ 	return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
+ }
+ 
++static int do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags)
++{
++	struct btrfs_block_group *bg;
++	int ret;
++
++	/*
++	 * Check if we have enough space in the system space info because we
++	 * will need to update device items in the chunk btree and insert a new
++	 * chunk item in the chunk btree as well. This will allocate a new
++	 * system block group if needed.
++	 */
++	check_system_chunk(trans, flags);
++
++	bg = btrfs_alloc_chunk(trans, flags);
++	if (IS_ERR(bg)) {
++		ret = PTR_ERR(bg);
++		goto out;
++	}
++
++	/*
++	 * If this is a system chunk allocation then stop right here and do not
++	 * add the chunk item to the chunk btree. This is to prevent a deadlock
++	 * because this system chunk allocation can be triggered while COWing
++	 * some extent buffer of the chunk btree and while holding a lock on a
++	 * parent extent buffer, in which case attempting to insert the chunk
++	 * item (or update the device item) would result in a deadlock on that
++	 * parent extent buffer. In this case defer the chunk btree updates to
++	 * the second phase of chunk allocation and keep our reservation until
++	 * the second phase completes.
++	 *
++	 * This is a rare case and can only be triggered by the very few cases
++	 * we have where we need to touch the chunk btree outside chunk allocation
++	 * and chunk removal. These cases are basically adding a device, removing
++	 * a device or resizing a device.
++	 */
++	if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
++		return 0;
++
++	ret = btrfs_chunk_alloc_add_chunk_item(trans, bg);
++	/*
++	 * Normally we are not expected to fail with -ENOSPC here, since we have
++	 * previously reserved space in the system space_info and allocated one
++	 * new system chunk if necessary. However there are two exceptions:
++	 *
++	 * 1) We may have enough free space in the system space_info but all the
++	 *    existing system block groups have a profile which can not be used
++	 *    for extent allocation.
++	 *
++	 *    This happens when mounting in degraded mode. For example we have a
++	 *    RAID1 filesystem with 2 devices, lose one device and mount the fs
++	 *    using the other device in degraded mode. If we then allocate a chunk,
++	 *    we may have enough free space in the existing system space_info, but
++	 *    none of the block groups can be used for extent allocation since they
++	 *    have a RAID1 profile, and because we are in degraded mode with a
++	 *    single device, we are forced to allocate a new system chunk with a
++	 *    SINGLE profile. Making check_system_chunk() iterate over all system
++	 *    block groups and check if they have a usable profile and enough space
++	 *    can be slow on very large filesystems, so we tolerate the -ENOSPC and
++	 *    try again after forcing allocation of a new system chunk. Like this
++	 *    we avoid paying the cost of that search in normal circumstances, when
++	 *    we were not mounted in degraded mode;
++	 *
++	 * 2) We had enough free space info the system space_info, and one suitable
++	 *    block group to allocate from when we called check_system_chunk()
++	 *    above. However right after we called it, the only system block group
++	 *    with enough free space got turned into RO mode by a running scrub,
++	 *    and in this case we have to allocate a new one and retry. We only
++	 *    need do this allocate and retry once, since we have a transaction
++	 *    handle and scrub uses the commit root to search for block groups.
++	 */
++	if (ret == -ENOSPC) {
++		const u64 sys_flags = btrfs_system_alloc_profile(trans->fs_info);
++		struct btrfs_block_group *sys_bg;
++
++		sys_bg = btrfs_alloc_chunk(trans, sys_flags);
++		if (IS_ERR(sys_bg)) {
++			ret = PTR_ERR(sys_bg);
++			btrfs_abort_transaction(trans, ret);
++			goto out;
++		}
++
++		ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg);
++		if (ret) {
++			btrfs_abort_transaction(trans, ret);
++			goto out;
++		}
++
++		ret = btrfs_chunk_alloc_add_chunk_item(trans, bg);
++		if (ret) {
++			btrfs_abort_transaction(trans, ret);
++			goto out;
++		}
++	} else if (ret) {
++		btrfs_abort_transaction(trans, ret);
++		goto out;
++	}
++out:
++	btrfs_trans_release_chunk_metadata(trans);
++
++	return ret;
++}
++
+ /*
+- * If force is CHUNK_ALLOC_FORCE:
++ * Chunk allocation is done in 2 phases:
++ *
++ * 1) Phase 1 - through btrfs_chunk_alloc() we allocate device extents for
++ *    the chunk, the chunk mapping, create its block group and add the items
++ *    that belong in the chunk btree to it - more specifically, we need to
++ *    update device items in the chunk btree and add a new chunk item to it.
++ *
++ * 2) Phase 2 - through btrfs_create_pending_block_groups(), we add the block
++ *    group item to the extent btree and the device extent items to the devices
++ *    btree.
++ *
++ * This is done to prevent deadlocks. For example when COWing a node from the
++ * extent btree we are holding a write lock on the node's parent and if we
++ * trigger chunk allocation and attempted to insert the new block group item
++ * in the extent btree right way, we could deadlock because the path for the
++ * insertion can include that parent node. At first glance it seems impossible
++ * to trigger chunk allocation after starting a transaction since tasks should
++ * reserve enough transaction units (metadata space), however while that is true
++ * most of the time, chunk allocation may still be triggered for several reasons:
++ *
++ * 1) When reserving metadata, we check if there is enough free space in the
++ *    metadata space_info and therefore don't trigger allocation of a new chunk.
++ *    However later when the task actually tries to COW an extent buffer from
++ *    the extent btree or from the device btree for example, it is forced to
++ *    allocate a new block group (chunk) because the only one that had enough
++ *    free space was just turned to RO mode by a running scrub for example (or
++ *    device replace, block group reclaim thread, etc), so we can not use it
++ *    for allocating an extent and end up being forced to allocate a new one;
++ *
++ * 2) Because we only check that the metadata space_info has enough free bytes,
++ *    we end up not allocating a new metadata chunk in that case. However if
++ *    the filesystem was mounted in degraded mode, none of the existing block
++ *    groups might be suitable for extent allocation due to their incompatible
++ *    profile (for e.g. mounting a 2 devices filesystem, where all block groups
++ *    use a RAID1 profile, in degraded mode using a single device). In this case
++ *    when the task attempts to COW some extent buffer of the extent btree for
++ *    example, it will trigger allocation of a new metadata block group with a
++ *    suitable profile (SINGLE profile in the example of the degraded mount of
++ *    the RAID1 filesystem);
++ *
++ * 3) The task has reserved enough transaction units / metadata space, but when
++ *    it attempts to COW an extent buffer from the extent or device btree for
++ *    example, it does not find any free extent in any metadata block group,
++ *    therefore forced to try to allocate a new metadata block group.
++ *    This is because some other task allocated all available extents in the
++ *    meanwhile - this typically happens with tasks that don't reserve space
++ *    properly, either intentionally or as a bug. One example where this is
++ *    done intentionally is fsync, as it does not reserve any transaction units
++ *    and ends up allocating a variable number of metadata extents for log
++ *    tree extent buffers.
++ *
++ * We also need this 2 phases setup when adding a device to a filesystem with
++ * a seed device - we must create new metadata and system chunks without adding
++ * any of the block group items to the chunk, extent and device btrees. If we
++ * did not do it this way, we would get ENOSPC when attempting to update those
++ * btrees, since all the chunks from the seed device are read-only.
++ *
++ * Phase 1 does the updates and insertions to the chunk btree because if we had
++ * it done in phase 2 and have a thundering herd of tasks allocating chunks in
++ * parallel, we risk having too many system chunks allocated by many tasks if
++ * many tasks reach phase 1 without the previous ones completing phase 2. In the
++ * extreme case this leads to exhaustion of the system chunk array in the
++ * superblock. This is easier to trigger if using a btree node/leaf size of 64K
++ * and with RAID filesystems (so we have more device items in the chunk btree).
++ * This has happened before and commit eafa4fd0ad0607 ("btrfs: fix exhaustion of
++ * the system chunk array due to concurrent allocations") provides more details.
++ *
++ * For allocation of system chunks, we defer the updates and insertions into the
++ * chunk btree to phase 2. This is to prevent deadlocks on extent buffers because
++ * if the chunk allocation is triggered while COWing an extent buffer of the
++ * chunk btree, we are holding a lock on the parent of that extent buffer and
++ * doing the chunk btree updates and insertions can require locking that parent.
++ * This is for the very few and rare cases where we update the chunk btree that
++ * are not chunk allocation or chunk removal: adding a device, removing a device
++ * or resizing a device.
++ *
++ * The reservation of system space, done through check_system_chunk(), as well
++ * as all the updates and insertions into the chunk btree must be done while
++ * holding fs_info->chunk_mutex. This is important to guarantee that while COWing
++ * an extent buffer from the chunks btree we never trigger allocation of a new
++ * system chunk, which would result in a deadlock (trying to lock twice an
++ * extent buffer of the chunk btree, first time before triggering the chunk
++ * allocation and the second time during chunk allocation while attempting to
++ * update the chunks btree). The system chunk array is also updated while holding
++ * that mutex. The same logic applies to removing chunks - we must reserve system
++ * space, update the chunk btree and the system chunk array in the superblock
++ * while holding fs_info->chunk_mutex.
++ *
++ * This function, btrfs_chunk_alloc(), belongs to phase 1.
++ *
++ * If @force is CHUNK_ALLOC_FORCE:
+  *    - return 1 if it successfully allocates a chunk,
+  *    - return errors including -ENOSPC otherwise.
+- * If force is NOT CHUNK_ALLOC_FORCE:
++ * If @force is NOT CHUNK_ALLOC_FORCE:
+  *    - return 0 if it doesn't need to allocate a new chunk,
+  *    - return 1 if it successfully allocates a chunk,
+  *    - return errors including -ENOSPC otherwise.
+@@ -3145,6 +3356,13 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
+ 	/* Don't re-enter if we're already allocating a chunk */
+ 	if (trans->allocating_chunk)
+ 		return -ENOSPC;
++	/*
++	 * If we are removing a chunk, don't re-enter or we would deadlock.
++	 * System space reservation and system chunk allocation is done by the
++	 * chunk remove operation (btrfs_remove_chunk()).
++	 */
++	if (trans->removing_chunk)
++		return -ENOSPC;
+ 
+ 	space_info = btrfs_find_space_info(fs_info, flags);
+ 	ASSERT(space_info);
+@@ -3208,13 +3426,7 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
+ 			force_metadata_allocation(fs_info);
+ 	}
+ 
+-	/*
+-	 * Check if we have enough space in SYSTEM chunk because we may need
+-	 * to update devices.
+-	 */
+-	check_system_chunk(trans, flags);
+-
+-	ret = btrfs_alloc_chunk(trans, flags);
++	ret = do_chunk_alloc(trans, flags);
+ 	trans->allocating_chunk = false;
+ 
+ 	spin_lock(&space_info->lock);
+@@ -3233,22 +3445,6 @@ out:
+ 	space_info->chunk_alloc = 0;
+ 	spin_unlock(&space_info->lock);
+ 	mutex_unlock(&fs_info->chunk_mutex);
+-	/*
+-	 * When we allocate a new chunk we reserve space in the chunk block
+-	 * reserve to make sure we can COW nodes/leafs in the chunk tree or
+-	 * add new nodes/leafs to it if we end up needing to do it when
+-	 * inserting the chunk item and updating device items as part of the
+-	 * second phase of chunk allocation, performed by
+-	 * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
+-	 * large number of new block groups to create in our transaction
+-	 * handle's new_bgs list to avoid exhausting the chunk block reserve
+-	 * in extreme cases - like having a single transaction create many new
+-	 * block groups when starting to write out the free space caches of all
+-	 * the block groups that were made dirty during the lifetime of the
+-	 * transaction.
+-	 */
+-	if (trans->chunk_bytes_reserved >= (u64)SZ_2M)
+-		btrfs_create_pending_block_groups(trans);
+ 
+ 	return ret;
+ }
+@@ -3269,7 +3465,6 @@ static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type)
+  */
+ void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
+ {
+-	struct btrfs_transaction *cur_trans = trans->transaction;
+ 	struct btrfs_fs_info *fs_info = trans->fs_info;
+ 	struct btrfs_space_info *info;
+ 	u64 left;
+@@ -3284,7 +3479,6 @@ void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
+ 	lockdep_assert_held(&fs_info->chunk_mutex);
+ 
+ 	info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
+-again:
+ 	spin_lock(&info->lock);
+ 	left = info->total_bytes - btrfs_space_info_used(info, true);
+ 	spin_unlock(&info->lock);
+@@ -3303,76 +3497,39 @@ again:
+ 
+ 	if (left < thresh) {
+ 		u64 flags = btrfs_system_alloc_profile(fs_info);
+-		u64 reserved = atomic64_read(&cur_trans->chunk_bytes_reserved);
+-
+-		/*
+-		 * If there's not available space for the chunk tree (system
+-		 * space) and there are other tasks that reserved space for
+-		 * creating a new system block group, wait for them to complete
+-		 * the creation of their system block group and release excess
+-		 * reserved space. We do this because:
+-		 *
+-		 * *) We can end up allocating more system chunks than necessary
+-		 *    when there are multiple tasks that are concurrently
+-		 *    allocating block groups, which can lead to exhaustion of
+-		 *    the system array in the superblock;
+-		 *
+-		 * *) If we allocate extra and unnecessary system block groups,
+-		 *    despite being empty for a long time, and possibly forever,
+-		 *    they end not being added to the list of unused block groups
+-		 *    because that typically happens only when deallocating the
+-		 *    last extent from a block group - which never happens since
+-		 *    we never allocate from them in the first place. The few
+-		 *    exceptions are when mounting a filesystem or running scrub,
+-		 *    which add unused block groups to the list of unused block
+-		 *    groups, to be deleted by the cleaner kthread.
+-		 *    And even when they are added to the list of unused block
+-		 *    groups, it can take a long time until they get deleted,
+-		 *    since the cleaner kthread might be sleeping or busy with
+-		 *    other work (deleting subvolumes, running delayed iputs,
+-		 *    defrag scheduling, etc);
+-		 *
+-		 * This is rare in practice, but can happen when too many tasks
+-		 * are allocating blocks groups in parallel (via fallocate())
+-		 * and before the one that reserved space for a new system block
+-		 * group finishes the block group creation and releases the space
+-		 * reserved in excess (at btrfs_create_pending_block_groups()),
+-		 * other tasks end up here and see free system space temporarily
+-		 * not enough for updating the chunk tree.
+-		 *
+-		 * We unlock the chunk mutex before waiting for such tasks and
+-		 * lock it again after the wait, otherwise we would deadlock.
+-		 * It is safe to do so because allocating a system chunk is the
+-		 * first thing done while allocating a new block group.
+-		 */
+-		if (reserved > trans->chunk_bytes_reserved) {
+-			const u64 min_needed = reserved - thresh;
+-
+-			mutex_unlock(&fs_info->chunk_mutex);
+-			wait_event(cur_trans->chunk_reserve_wait,
+-			   atomic64_read(&cur_trans->chunk_bytes_reserved) <=
+-			   min_needed);
+-			mutex_lock(&fs_info->chunk_mutex);
+-			goto again;
+-		}
++		struct btrfs_block_group *bg;
+ 
+ 		/*
+ 		 * Ignore failure to create system chunk. We might end up not
+ 		 * needing it, as we might not need to COW all nodes/leafs from
+ 		 * the paths we visit in the chunk tree (they were already COWed
+ 		 * or created in the current transaction for example).
++		 *
++		 * Also, if our caller is allocating a system chunk, do not
++		 * attempt to insert the chunk item in the chunk btree, as we
++		 * could deadlock on an extent buffer since our caller may be
++		 * COWing an extent buffer from the chunk btree.
+ 		 */
+-		ret = btrfs_alloc_chunk(trans, flags);
++		bg = btrfs_alloc_chunk(trans, flags);
++		if (IS_ERR(bg)) {
++			ret = PTR_ERR(bg);
++		} else if (!(type & BTRFS_BLOCK_GROUP_SYSTEM)) {
++			/*
++			 * If we fail to add the chunk item here, we end up
++			 * trying again at phase 2 of chunk allocation, at
++			 * btrfs_create_pending_block_groups(). So ignore
++			 * any error here.
++			 */
++			btrfs_chunk_alloc_add_chunk_item(trans, bg);
++		}
+ 	}
+ 
+ 	if (!ret) {
+ 		ret = btrfs_block_rsv_add(fs_info->chunk_root,
+ 					  &fs_info->chunk_block_rsv,
+ 					  thresh, BTRFS_RESERVE_NO_FLUSH);
+-		if (!ret) {
+-			atomic64_add(thresh, &cur_trans->chunk_bytes_reserved);
++		if (!ret)
+ 			trans->chunk_bytes_reserved += thresh;
+-		}
+ 	}
+ }
+ 
+diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h
+index 3ecc3372a5cea..3359c2ad18320 100644
+--- a/fs/btrfs/block-group.h
++++ b/fs/btrfs/block-group.h
+@@ -97,6 +97,7 @@ struct btrfs_block_group {
+ 	unsigned int removed:1;
+ 	unsigned int to_copy:1;
+ 	unsigned int relocating_repair:1;
++	unsigned int chunk_item_inserted:1;
+ 
+ 	int disk_cache_state;
+ 
+@@ -265,8 +266,9 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
+ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info);
+ void btrfs_mark_bg_unused(struct btrfs_block_group *bg);
+ int btrfs_read_block_groups(struct btrfs_fs_info *info);
+-int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
+-			   u64 type, u64 chunk_offset, u64 size);
++struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans,
++						 u64 bytes_used, u64 type,
++						 u64 chunk_offset, u64 size);
+ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans);
+ int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
+ 			     bool do_chunk_alloc);
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index 8f48553d861ec..4e3c71253a5eb 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -954,49 +954,6 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
+ 	return 0;
+ }
+ 
+-static struct extent_buffer *alloc_tree_block_no_bg_flush(
+-					  struct btrfs_trans_handle *trans,
+-					  struct btrfs_root *root,
+-					  u64 parent_start,
+-					  const struct btrfs_disk_key *disk_key,
+-					  int level,
+-					  u64 hint,
+-					  u64 empty_size,
+-					  enum btrfs_lock_nesting nest)
+-{
+-	struct btrfs_fs_info *fs_info = root->fs_info;
+-	struct extent_buffer *ret;
+-
+-	/*
+-	 * If we are COWing a node/leaf from the extent, chunk, device or free
+-	 * space trees, make sure that we do not finish block group creation of
+-	 * pending block groups. We do this to avoid a deadlock.
+-	 * COWing can result in allocation of a new chunk, and flushing pending
+-	 * block groups (btrfs_create_pending_block_groups()) can be triggered
+-	 * when finishing allocation of a new chunk. Creation of a pending block
+-	 * group modifies the extent, chunk, device and free space trees,
+-	 * therefore we could deadlock with ourselves since we are holding a
+-	 * lock on an extent buffer that btrfs_create_pending_block_groups() may
+-	 * try to COW later.
+-	 * For similar reasons, we also need to delay flushing pending block
+-	 * groups when splitting a leaf or node, from one of those trees, since
+-	 * we are holding a write lock on it and its parent or when inserting a
+-	 * new root node for one of those trees.
+-	 */
+-	if (root == fs_info->extent_root ||
+-	    root == fs_info->chunk_root ||
+-	    root == fs_info->dev_root ||
+-	    root == fs_info->free_space_root)
+-		trans->can_flush_pending_bgs = false;
+-
+-	ret = btrfs_alloc_tree_block(trans, root, parent_start,
+-				     root->root_key.objectid, disk_key, level,
+-				     hint, empty_size, nest);
+-	trans->can_flush_pending_bgs = true;
+-
+-	return ret;
+-}
+-
+ /*
+  * does the dirty work in cow of a single block.  The parent block (if
+  * supplied) is updated to point to the new cow copy.  The new buffer is marked
+@@ -1045,8 +1002,9 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
+ 	if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
+ 		parent_start = parent->start;
+ 
+-	cow = alloc_tree_block_no_bg_flush(trans, root, parent_start, &disk_key,
+-					   level, search_start, empty_size, nest);
++	cow = btrfs_alloc_tree_block(trans, root, parent_start,
++				     root->root_key.objectid, &disk_key, level,
++				     search_start, empty_size, nest);
+ 	if (IS_ERR(cow))
+ 		return PTR_ERR(cow);
+ 
+@@ -3340,9 +3298,9 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
+ 	else
+ 		btrfs_node_key(lower, &lower_key, 0);
+ 
+-	c = alloc_tree_block_no_bg_flush(trans, root, 0, &lower_key, level,
+-					 root->node->start, 0,
+-					 BTRFS_NESTING_NEW_ROOT);
++	c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
++				   &lower_key, level, root->node->start, 0,
++				   BTRFS_NESTING_NEW_ROOT);
+ 	if (IS_ERR(c))
+ 		return PTR_ERR(c);
+ 
+@@ -3471,8 +3429,9 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
+ 	mid = (c_nritems + 1) / 2;
+ 	btrfs_node_key(c, &disk_key, mid);
+ 
+-	split = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, level,
+-					     c->start, 0, BTRFS_NESTING_SPLIT);
++	split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
++				       &disk_key, level, c->start, 0,
++				       BTRFS_NESTING_SPLIT);
+ 	if (IS_ERR(split))
+ 		return PTR_ERR(split);
+ 
+@@ -4263,10 +4222,10 @@ again:
+ 	 * BTRFS_NESTING_SPLIT_THE_SPLITTENING if we need to, but for now just
+ 	 * use BTRFS_NESTING_NEW_ROOT.
+ 	 */
+-	right = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, 0,
+-					     l->start, 0, num_doubles ?
+-					     BTRFS_NESTING_NEW_ROOT :
+-					     BTRFS_NESTING_SPLIT);
++	right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
++				       &disk_key, 0, l->start, 0,
++				       num_doubles ? BTRFS_NESTING_NEW_ROOT :
++				       BTRFS_NESTING_SPLIT);
+ 	if (IS_ERR(right))
+ 		return PTR_ERR(right);
+ 
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 1b22ded1a7994..b30a776a0b25c 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -2271,13 +2271,127 @@ bool btrfs_bio_fits_in_ordered_extent(struct page *page, struct bio *bio,
+ 	return ret;
+ }
+ 
++/*
++ * Split an extent_map at [start, start + len]
++ *
++ * This function is intended to be used only for extract_ordered_extent().
++ */
++static int split_zoned_em(struct btrfs_inode *inode, u64 start, u64 len,
++			  u64 pre, u64 post)
++{
++	struct extent_map_tree *em_tree = &inode->extent_tree;
++	struct extent_map *em;
++	struct extent_map *split_pre = NULL;
++	struct extent_map *split_mid = NULL;
++	struct extent_map *split_post = NULL;
++	int ret = 0;
++	int modified;
++	unsigned long flags;
++
++	/* Sanity check */
++	if (pre == 0 && post == 0)
++		return 0;
++
++	split_pre = alloc_extent_map();
++	if (pre)
++		split_mid = alloc_extent_map();
++	if (post)
++		split_post = alloc_extent_map();
++	if (!split_pre || (pre && !split_mid) || (post && !split_post)) {
++		ret = -ENOMEM;
++		goto out;
++	}
++
++	ASSERT(pre + post < len);
++
++	lock_extent(&inode->io_tree, start, start + len - 1);
++	write_lock(&em_tree->lock);
++	em = lookup_extent_mapping(em_tree, start, len);
++	if (!em) {
++		ret = -EIO;
++		goto out_unlock;
++	}
++
++	ASSERT(em->len == len);
++	ASSERT(!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags));
++	ASSERT(em->block_start < EXTENT_MAP_LAST_BYTE);
++
++	flags = em->flags;
++	clear_bit(EXTENT_FLAG_PINNED, &em->flags);
++	clear_bit(EXTENT_FLAG_LOGGING, &flags);
++	modified = !list_empty(&em->list);
++
++	/* First, replace the em with a new extent_map starting from * em->start */
++	split_pre->start = em->start;
++	split_pre->len = (pre ? pre : em->len - post);
++	split_pre->orig_start = split_pre->start;
++	split_pre->block_start = em->block_start;
++	split_pre->block_len = split_pre->len;
++	split_pre->orig_block_len = split_pre->block_len;
++	split_pre->ram_bytes = split_pre->len;
++	split_pre->flags = flags;
++	split_pre->compress_type = em->compress_type;
++	split_pre->generation = em->generation;
++
++	replace_extent_mapping(em_tree, em, split_pre, modified);
++
++	/*
++	 * Now we only have an extent_map at:
++	 *     [em->start, em->start + pre] if pre != 0
++	 *     [em->start, em->start + em->len - post] if pre == 0
++	 */
++
++	if (pre) {
++		/* Insert the middle extent_map */
++		split_mid->start = em->start + pre;
++		split_mid->len = em->len - pre - post;
++		split_mid->orig_start = split_mid->start;
++		split_mid->block_start = em->block_start + pre;
++		split_mid->block_len = split_mid->len;
++		split_mid->orig_block_len = split_mid->block_len;
++		split_mid->ram_bytes = split_mid->len;
++		split_mid->flags = flags;
++		split_mid->compress_type = em->compress_type;
++		split_mid->generation = em->generation;
++		add_extent_mapping(em_tree, split_mid, modified);
++	}
++
++	if (post) {
++		split_post->start = em->start + em->len - post;
++		split_post->len = post;
++		split_post->orig_start = split_post->start;
++		split_post->block_start = em->block_start + em->len - post;
++		split_post->block_len = split_post->len;
++		split_post->orig_block_len = split_post->block_len;
++		split_post->ram_bytes = split_post->len;
++		split_post->flags = flags;
++		split_post->compress_type = em->compress_type;
++		split_post->generation = em->generation;
++		add_extent_mapping(em_tree, split_post, modified);
++	}
++
++	/* Once for us */
++	free_extent_map(em);
++	/* Once for the tree */
++	free_extent_map(em);
++
++out_unlock:
++	write_unlock(&em_tree->lock);
++	unlock_extent(&inode->io_tree, start, start + len - 1);
++out:
++	free_extent_map(split_pre);
++	free_extent_map(split_mid);
++	free_extent_map(split_post);
++
++	return ret;
++}
++
+ static blk_status_t extract_ordered_extent(struct btrfs_inode *inode,
+ 					   struct bio *bio, loff_t file_offset)
+ {
+ 	struct btrfs_ordered_extent *ordered;
+-	struct extent_map *em = NULL, *em_new = NULL;
+-	struct extent_map_tree *em_tree = &inode->extent_tree;
+ 	u64 start = (u64)bio->bi_iter.bi_sector << SECTOR_SHIFT;
++	u64 file_len;
+ 	u64 len = bio->bi_iter.bi_size;
+ 	u64 end = start + len;
+ 	u64 ordered_end;
+@@ -2317,41 +2431,16 @@ static blk_status_t extract_ordered_extent(struct btrfs_inode *inode,
+ 		goto out;
+ 	}
+ 
++	file_len = ordered->num_bytes;
+ 	pre = start - ordered->disk_bytenr;
+ 	post = ordered_end - end;
+ 
+ 	ret = btrfs_split_ordered_extent(ordered, pre, post);
+ 	if (ret)
+ 		goto out;
+-
+-	read_lock(&em_tree->lock);
+-	em = lookup_extent_mapping(em_tree, ordered->file_offset, len);
+-	if (!em) {
+-		read_unlock(&em_tree->lock);
+-		ret = -EIO;
+-		goto out;
+-	}
+-	read_unlock(&em_tree->lock);
+-
+-	ASSERT(!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags));
+-	/*
+-	 * We cannot reuse em_new here but have to create a new one, as
+-	 * unpin_extent_cache() expects the start of the extent map to be the
+-	 * logical offset of the file, which does not hold true anymore after
+-	 * splitting.
+-	 */
+-	em_new = create_io_em(inode, em->start + pre, len,
+-			      em->start + pre, em->block_start + pre, len,
+-			      len, len, BTRFS_COMPRESS_NONE,
+-			      BTRFS_ORDERED_REGULAR);
+-	if (IS_ERR(em_new)) {
+-		ret = PTR_ERR(em_new);
+-		goto out;
+-	}
+-	free_extent_map(em_new);
++	ret = split_zoned_em(inode, file_offset, file_len, pre, post);
+ 
+ out:
+-	free_extent_map(em);
+ 	btrfs_put_ordered_extent(ordered);
+ 
+ 	return errno_to_blk_status(ret);
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index 81c8567dffee9..f16a482092e77 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -254,23 +254,21 @@ static inline int extwriter_counter_read(struct btrfs_transaction *trans)
+ }
+ 
+ /*
+- * To be called after all the new block groups attached to the transaction
+- * handle have been created (btrfs_create_pending_block_groups()).
++ * To be called after doing the chunk btree updates right after allocating a new
++ * chunk (after btrfs_chunk_alloc_add_chunk_item() is called), when removing a
++ * chunk after all chunk btree updates and after finishing the second phase of
++ * chunk allocation (btrfs_create_pending_block_groups()) in case some block
++ * group had its chunk item insertion delayed to the second phase.
+  */
+ void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
+ {
+ 	struct btrfs_fs_info *fs_info = trans->fs_info;
+-	struct btrfs_transaction *cur_trans = trans->transaction;
+ 
+ 	if (!trans->chunk_bytes_reserved)
+ 		return;
+ 
+-	WARN_ON_ONCE(!list_empty(&trans->new_bgs));
+-
+ 	btrfs_block_rsv_release(fs_info, &fs_info->chunk_block_rsv,
+ 				trans->chunk_bytes_reserved, NULL);
+-	atomic64_sub(trans->chunk_bytes_reserved, &cur_trans->chunk_bytes_reserved);
+-	cond_wake_up(&cur_trans->chunk_reserve_wait);
+ 	trans->chunk_bytes_reserved = 0;
+ }
+ 
+@@ -386,8 +384,6 @@ loop:
+ 	spin_lock_init(&cur_trans->dropped_roots_lock);
+ 	INIT_LIST_HEAD(&cur_trans->releasing_ebs);
+ 	spin_lock_init(&cur_trans->releasing_ebs_lock);
+-	atomic64_set(&cur_trans->chunk_bytes_reserved, 0);
+-	init_waitqueue_head(&cur_trans->chunk_reserve_wait);
+ 	list_add_tail(&cur_trans->list, &fs_info->trans_list);
+ 	extent_io_tree_init(fs_info, &cur_trans->dirty_pages,
+ 			IO_TREE_TRANS_DIRTY_PAGES, fs_info->btree_inode);
+@@ -702,7 +698,6 @@ again:
+ 	h->fs_info = root->fs_info;
+ 
+ 	h->type = type;
+-	h->can_flush_pending_bgs = true;
+ 	INIT_LIST_HEAD(&h->new_bgs);
+ 
+ 	smp_mb();
+diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
+index c49e2266b28ba..21b36f6e9322b 100644
+--- a/fs/btrfs/transaction.h
++++ b/fs/btrfs/transaction.h
+@@ -96,13 +96,6 @@ struct btrfs_transaction {
+ 
+ 	spinlock_t releasing_ebs_lock;
+ 	struct list_head releasing_ebs;
+-
+-	/*
+-	 * The number of bytes currently reserved, by all transaction handles
+-	 * attached to this transaction, for metadata extents of the chunk tree.
+-	 */
+-	atomic64_t chunk_bytes_reserved;
+-	wait_queue_head_t chunk_reserve_wait;
+ };
+ 
+ #define __TRANS_FREEZABLE	(1U << 0)
+@@ -141,7 +134,7 @@ struct btrfs_trans_handle {
+ 	short aborted;
+ 	bool adding_csums;
+ 	bool allocating_chunk;
+-	bool can_flush_pending_bgs;
++	bool removing_chunk;
+ 	bool reloc_reserved;
+ 	bool in_fsync;
+ 	struct btrfs_root *root;
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index f4b0aecdaac72..fae59c8f86405 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -3173,7 +3173,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
+ 		if (!log_root_tree->node) {
+ 			ret = btrfs_alloc_log_tree_node(trans, log_root_tree);
+ 			if (ret) {
+-				mutex_unlock(&fs_info->tree_log_mutex);
++				mutex_unlock(&fs_info->tree_root->log_mutex);
+ 				goto out;
+ 			}
+ 		}
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 3912eda7905f6..58152ac000290 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -1744,19 +1744,14 @@ again:
+ 		extent = btrfs_item_ptr(leaf, path->slots[0],
+ 					struct btrfs_dev_extent);
+ 	} else {
+-		btrfs_handle_fs_error(fs_info, ret, "Slot search failed");
+ 		goto out;
+ 	}
+ 
+ 	*dev_extent_len = btrfs_dev_extent_length(leaf, extent);
+ 
+ 	ret = btrfs_del_item(trans, root, path);
+-	if (ret) {
+-		btrfs_handle_fs_error(fs_info, ret,
+-				      "Failed to remove dev extent item");
+-	} else {
++	if (ret == 0)
+ 		set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
+-	}
+ out:
+ 	btrfs_free_path(path);
+ 	return ret;
+@@ -2941,7 +2936,7 @@ static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
+ 	u32 cur;
+ 	struct btrfs_key key;
+ 
+-	mutex_lock(&fs_info->chunk_mutex);
++	lockdep_assert_held(&fs_info->chunk_mutex);
+ 	array_size = btrfs_super_sys_array_size(super_copy);
+ 
+ 	ptr = super_copy->sys_chunk_array;
+@@ -2971,7 +2966,6 @@ static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
+ 			cur += len;
+ 		}
+ 	}
+-	mutex_unlock(&fs_info->chunk_mutex);
+ 	return ret;
+ }
+ 
+@@ -3011,6 +3005,29 @@ struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
+ 	return em;
+ }
+ 
++static int remove_chunk_item(struct btrfs_trans_handle *trans,
++			     struct map_lookup *map, u64 chunk_offset)
++{
++	int i;
++
++	/*
++	 * Removing chunk items and updating the device items in the chunks btree
++	 * requires holding the chunk_mutex.
++	 * See the comment at btrfs_chunk_alloc() for the details.
++	 */
++	lockdep_assert_held(&trans->fs_info->chunk_mutex);
++
++	for (i = 0; i < map->num_stripes; i++) {
++		int ret;
++
++		ret = btrfs_update_device(trans, map->stripes[i].dev);
++		if (ret)
++			return ret;
++	}
++
++	return btrfs_free_chunk(trans, chunk_offset);
++}
++
+ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
+ {
+ 	struct btrfs_fs_info *fs_info = trans->fs_info;
+@@ -3031,14 +3048,16 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
+ 		return PTR_ERR(em);
+ 	}
+ 	map = em->map_lookup;
+-	mutex_lock(&fs_info->chunk_mutex);
+-	check_system_chunk(trans, map->type);
+-	mutex_unlock(&fs_info->chunk_mutex);
+ 
+ 	/*
+-	 * Take the device list mutex to prevent races with the final phase of
+-	 * a device replace operation that replaces the device object associated
+-	 * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()).
++	 * First delete the device extent items from the devices btree.
++	 * We take the device_list_mutex to avoid racing with the finishing phase
++	 * of a device replace operation. See the comment below before acquiring
++	 * fs_info->chunk_mutex. Note that here we do not acquire the chunk_mutex
++	 * because that can result in a deadlock when deleting the device extent
++	 * items from the devices btree - COWing an extent buffer from the btree
++	 * may result in allocating a new metadata chunk, which would attempt to
++	 * lock again fs_info->chunk_mutex.
+ 	 */
+ 	mutex_lock(&fs_devices->device_list_mutex);
+ 	for (i = 0; i < map->num_stripes; i++) {
+@@ -3060,18 +3079,73 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
+ 			btrfs_clear_space_info_full(fs_info);
+ 			mutex_unlock(&fs_info->chunk_mutex);
+ 		}
++	}
++	mutex_unlock(&fs_devices->device_list_mutex);
+ 
+-		ret = btrfs_update_device(trans, device);
++	/*
++	 * We acquire fs_info->chunk_mutex for 2 reasons:
++	 *
++	 * 1) Just like with the first phase of the chunk allocation, we must
++	 *    reserve system space, do all chunk btree updates and deletions, and
++	 *    update the system chunk array in the superblock while holding this
++	 *    mutex. This is for similar reasons as explained on the comment at
++	 *    the top of btrfs_chunk_alloc();
++	 *
++	 * 2) Prevent races with the final phase of a device replace operation
++	 *    that replaces the device object associated with the map's stripes,
++	 *    because the device object's id can change at any time during that
++	 *    final phase of the device replace operation
++	 *    (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
++	 *    replaced device and then see it with an ID of
++	 *    BTRFS_DEV_REPLACE_DEVID, which would cause a failure when updating
++	 *    the device item, which does not exists on the chunk btree.
++	 *    The finishing phase of device replace acquires both the
++	 *    device_list_mutex and the chunk_mutex, in that order, so we are
++	 *    safe by just acquiring the chunk_mutex.
++	 */
++	trans->removing_chunk = true;
++	mutex_lock(&fs_info->chunk_mutex);
++
++	check_system_chunk(trans, map->type);
++
++	ret = remove_chunk_item(trans, map, chunk_offset);
++	/*
++	 * Normally we should not get -ENOSPC since we reserved space before
++	 * through the call to check_system_chunk().
++	 *
++	 * Despite our system space_info having enough free space, we may not
++	 * be able to allocate extents from its block groups, because all have
++	 * an incompatible profile, which will force us to allocate a new system
++	 * block group with the right profile, or right after we called
++	 * check_system_space() above, a scrub turned the only system block group
++	 * with enough free space into RO mode.
++	 * This is explained with more detail at do_chunk_alloc().
++	 *
++	 * So if we get -ENOSPC, allocate a new system chunk and retry once.
++	 */
++	if (ret == -ENOSPC) {
++		const u64 sys_flags = btrfs_system_alloc_profile(fs_info);
++		struct btrfs_block_group *sys_bg;
++
++		sys_bg = btrfs_alloc_chunk(trans, sys_flags);
++		if (IS_ERR(sys_bg)) {
++			ret = PTR_ERR(sys_bg);
++			btrfs_abort_transaction(trans, ret);
++			goto out;
++		}
++
++		ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg);
+ 		if (ret) {
+-			mutex_unlock(&fs_devices->device_list_mutex);
+ 			btrfs_abort_transaction(trans, ret);
+ 			goto out;
+ 		}
+-	}
+-	mutex_unlock(&fs_devices->device_list_mutex);
+ 
+-	ret = btrfs_free_chunk(trans, chunk_offset);
+-	if (ret) {
++		ret = remove_chunk_item(trans, map, chunk_offset);
++		if (ret) {
++			btrfs_abort_transaction(trans, ret);
++			goto out;
++		}
++	} else if (ret) {
+ 		btrfs_abort_transaction(trans, ret);
+ 		goto out;
+ 	}
+@@ -3086,6 +3160,15 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
+ 		}
+ 	}
+ 
++	mutex_unlock(&fs_info->chunk_mutex);
++	trans->removing_chunk = false;
++
++	/*
++	 * We are done with chunk btree updates and deletions, so release the
++	 * system space we previously reserved (with check_system_chunk()).
++	 */
++	btrfs_trans_release_chunk_metadata(trans);
++
+ 	ret = btrfs_remove_block_group(trans, chunk_offset, em);
+ 	if (ret) {
+ 		btrfs_abort_transaction(trans, ret);
+@@ -3093,6 +3176,10 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
+ 	}
+ 
+ out:
++	if (trans->removing_chunk) {
++		mutex_unlock(&fs_info->chunk_mutex);
++		trans->removing_chunk = false;
++	}
+ 	/* once for us */
+ 	free_extent_map(em);
+ 	return ret;
+@@ -4851,13 +4938,12 @@ static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info,
+ 	u32 array_size;
+ 	u8 *ptr;
+ 
+-	mutex_lock(&fs_info->chunk_mutex);
++	lockdep_assert_held(&fs_info->chunk_mutex);
++
+ 	array_size = btrfs_super_sys_array_size(super_copy);
+ 	if (array_size + item_size + sizeof(disk_key)
+-			> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
+-		mutex_unlock(&fs_info->chunk_mutex);
++			> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
+ 		return -EFBIG;
+-	}
+ 
+ 	ptr = super_copy->sys_chunk_array + array_size;
+ 	btrfs_cpu_key_to_disk(&disk_key, key);
+@@ -4866,7 +4952,6 @@ static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info,
+ 	memcpy(ptr, chunk, item_size);
+ 	item_size += sizeof(disk_key);
+ 	btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
+-	mutex_unlock(&fs_info->chunk_mutex);
+ 
+ 	return 0;
+ }
+@@ -5216,13 +5301,14 @@ static int decide_stripe_size(struct btrfs_fs_devices *fs_devices,
+ 	}
+ }
+ 
+-static int create_chunk(struct btrfs_trans_handle *trans,
++static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans,
+ 			struct alloc_chunk_ctl *ctl,
+ 			struct btrfs_device_info *devices_info)
+ {
+ 	struct btrfs_fs_info *info = trans->fs_info;
+ 	struct map_lookup *map = NULL;
+ 	struct extent_map_tree *em_tree;
++	struct btrfs_block_group *block_group;
+ 	struct extent_map *em;
+ 	u64 start = ctl->start;
+ 	u64 type = ctl->type;
+@@ -5232,7 +5318,7 @@ static int create_chunk(struct btrfs_trans_handle *trans,
+ 
+ 	map = kmalloc(map_lookup_size(ctl->num_stripes), GFP_NOFS);
+ 	if (!map)
+-		return -ENOMEM;
++		return ERR_PTR(-ENOMEM);
+ 	map->num_stripes = ctl->num_stripes;
+ 
+ 	for (i = 0; i < ctl->ndevs; ++i) {
+@@ -5254,7 +5340,7 @@ static int create_chunk(struct btrfs_trans_handle *trans,
+ 	em = alloc_extent_map();
+ 	if (!em) {
+ 		kfree(map);
+-		return -ENOMEM;
++		return ERR_PTR(-ENOMEM);
+ 	}
+ 	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
+ 	em->map_lookup = map;
+@@ -5270,12 +5356,12 @@ static int create_chunk(struct btrfs_trans_handle *trans,
+ 	if (ret) {
+ 		write_unlock(&em_tree->lock);
+ 		free_extent_map(em);
+-		return ret;
++		return ERR_PTR(ret);
+ 	}
+ 	write_unlock(&em_tree->lock);
+ 
+-	ret = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size);
+-	if (ret)
++	block_group = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size);
++	if (IS_ERR(block_group))
+ 		goto error_del_extent;
+ 
+ 	for (i = 0; i < map->num_stripes; i++) {
+@@ -5295,7 +5381,7 @@ static int create_chunk(struct btrfs_trans_handle *trans,
+ 	check_raid56_incompat_flag(info, type);
+ 	check_raid1c34_incompat_flag(info, type);
+ 
+-	return 0;
++	return block_group;
+ 
+ error_del_extent:
+ 	write_lock(&em_tree->lock);
+@@ -5307,34 +5393,36 @@ error_del_extent:
+ 	/* One for the tree reference */
+ 	free_extent_map(em);
+ 
+-	return ret;
++	return block_group;
+ }
+ 
+-int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type)
++struct btrfs_block_group *btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
++					    u64 type)
+ {
+ 	struct btrfs_fs_info *info = trans->fs_info;
+ 	struct btrfs_fs_devices *fs_devices = info->fs_devices;
+ 	struct btrfs_device_info *devices_info = NULL;
+ 	struct alloc_chunk_ctl ctl;
++	struct btrfs_block_group *block_group;
+ 	int ret;
+ 
+ 	lockdep_assert_held(&info->chunk_mutex);
+ 
+ 	if (!alloc_profile_is_valid(type, 0)) {
+ 		ASSERT(0);
+-		return -EINVAL;
++		return ERR_PTR(-EINVAL);
+ 	}
+ 
+ 	if (list_empty(&fs_devices->alloc_list)) {
+ 		if (btrfs_test_opt(info, ENOSPC_DEBUG))
+ 			btrfs_debug(info, "%s: no writable device", __func__);
+-		return -ENOSPC;
++		return ERR_PTR(-ENOSPC);
+ 	}
+ 
+ 	if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
+ 		btrfs_err(info, "invalid chunk type 0x%llx requested", type);
+ 		ASSERT(0);
+-		return -EINVAL;
++		return ERR_PTR(-EINVAL);
+ 	}
+ 
+ 	ctl.start = find_next_chunk(info);
+@@ -5344,46 +5432,43 @@ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type)
+ 	devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
+ 			       GFP_NOFS);
+ 	if (!devices_info)
+-		return -ENOMEM;
++		return ERR_PTR(-ENOMEM);
+ 
+ 	ret = gather_device_info(fs_devices, &ctl, devices_info);
+-	if (ret < 0)
++	if (ret < 0) {
++		block_group = ERR_PTR(ret);
+ 		goto out;
++	}
+ 
+ 	ret = decide_stripe_size(fs_devices, &ctl, devices_info);
+-	if (ret < 0)
++	if (ret < 0) {
++		block_group = ERR_PTR(ret);
+ 		goto out;
++	}
+ 
+-	ret = create_chunk(trans, &ctl, devices_info);
++	block_group = create_chunk(trans, &ctl, devices_info);
+ 
+ out:
+ 	kfree(devices_info);
+-	return ret;
++	return block_group;
+ }
+ 
+ /*
+- * Chunk allocation falls into two parts. The first part does work
+- * that makes the new allocated chunk usable, but does not do any operation
+- * that modifies the chunk tree. The second part does the work that
+- * requires modifying the chunk tree. This division is important for the
+- * bootstrap process of adding storage to a seed btrfs.
++ * This function, btrfs_finish_chunk_alloc(), belongs to phase 2.
++ *
++ * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
++ * phases.
+  */
+ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
+ 			     u64 chunk_offset, u64 chunk_size)
+ {
+ 	struct btrfs_fs_info *fs_info = trans->fs_info;
+-	struct btrfs_root *extent_root = fs_info->extent_root;
+-	struct btrfs_root *chunk_root = fs_info->chunk_root;
+-	struct btrfs_key key;
+ 	struct btrfs_device *device;
+-	struct btrfs_chunk *chunk;
+-	struct btrfs_stripe *stripe;
+ 	struct extent_map *em;
+ 	struct map_lookup *map;
+-	size_t item_size;
+ 	u64 dev_offset;
+ 	u64 stripe_size;
+-	int i = 0;
++	int i;
+ 	int ret = 0;
+ 
+ 	em = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size);
+@@ -5391,53 +5476,117 @@ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
+ 		return PTR_ERR(em);
+ 
+ 	map = em->map_lookup;
+-	item_size = btrfs_chunk_item_size(map->num_stripes);
+ 	stripe_size = em->orig_block_len;
+ 
+-	chunk = kzalloc(item_size, GFP_NOFS);
+-	if (!chunk) {
+-		ret = -ENOMEM;
+-		goto out;
+-	}
+-
+ 	/*
+ 	 * Take the device list mutex to prevent races with the final phase of
+ 	 * a device replace operation that replaces the device object associated
+ 	 * with the map's stripes, because the device object's id can change
+ 	 * at any time during that final phase of the device replace operation
+-	 * (dev-replace.c:btrfs_dev_replace_finishing()).
++	 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
++	 * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID,
++	 * resulting in persisting a device extent item with such ID.
+ 	 */
+ 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
+ 	for (i = 0; i < map->num_stripes; i++) {
+ 		device = map->stripes[i].dev;
+ 		dev_offset = map->stripes[i].physical;
+ 
+-		ret = btrfs_update_device(trans, device);
+-		if (ret)
+-			break;
+ 		ret = btrfs_alloc_dev_extent(trans, device, chunk_offset,
+ 					     dev_offset, stripe_size);
+ 		if (ret)
+ 			break;
+ 	}
+-	if (ret) {
+-		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
++	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
++
++	free_extent_map(em);
++	return ret;
++}
++
++/*
++ * This function, btrfs_chunk_alloc_add_chunk_item(), typically belongs to the
++ * phase 1 of chunk allocation. It belongs to phase 2 only when allocating system
++ * chunks.
++ *
++ * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
++ * phases.
++ */
++int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans,
++				     struct btrfs_block_group *bg)
++{
++	struct btrfs_fs_info *fs_info = trans->fs_info;
++	struct btrfs_root *extent_root = fs_info->extent_root;
++	struct btrfs_root *chunk_root = fs_info->chunk_root;
++	struct btrfs_key key;
++	struct btrfs_chunk *chunk;
++	struct btrfs_stripe *stripe;
++	struct extent_map *em;
++	struct map_lookup *map;
++	size_t item_size;
++	int i;
++	int ret;
++
++	/*
++	 * We take the chunk_mutex for 2 reasons:
++	 *
++	 * 1) Updates and insertions in the chunk btree must be done while holding
++	 *    the chunk_mutex, as well as updating the system chunk array in the
++	 *    superblock. See the comment on top of btrfs_chunk_alloc() for the
++	 *    details;
++	 *
++	 * 2) To prevent races with the final phase of a device replace operation
++	 *    that replaces the device object associated with the map's stripes,
++	 *    because the device object's id can change at any time during that
++	 *    final phase of the device replace operation
++	 *    (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
++	 *    replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID,
++	 *    which would cause a failure when updating the device item, which does
++	 *    not exists, or persisting a stripe of the chunk item with such ID.
++	 *    Here we can't use the device_list_mutex because our caller already
++	 *    has locked the chunk_mutex, and the final phase of device replace
++	 *    acquires both mutexes - first the device_list_mutex and then the
++	 *    chunk_mutex. Using any of those two mutexes protects us from a
++	 *    concurrent device replace.
++	 */
++	lockdep_assert_held(&fs_info->chunk_mutex);
++
++	em = btrfs_get_chunk_map(fs_info, bg->start, bg->length);
++	if (IS_ERR(em)) {
++		ret = PTR_ERR(em);
++		btrfs_abort_transaction(trans, ret);
++		return ret;
++	}
++
++	map = em->map_lookup;
++	item_size = btrfs_chunk_item_size(map->num_stripes);
++
++	chunk = kzalloc(item_size, GFP_NOFS);
++	if (!chunk) {
++		ret = -ENOMEM;
++		btrfs_abort_transaction(trans, ret);
+ 		goto out;
+ 	}
+ 
++	for (i = 0; i < map->num_stripes; i++) {
++		struct btrfs_device *device = map->stripes[i].dev;
++
++		ret = btrfs_update_device(trans, device);
++		if (ret)
++			goto out;
++	}
++
+ 	stripe = &chunk->stripe;
+ 	for (i = 0; i < map->num_stripes; i++) {
+-		device = map->stripes[i].dev;
+-		dev_offset = map->stripes[i].physical;
++		struct btrfs_device *device = map->stripes[i].dev;
++		const u64 dev_offset = map->stripes[i].physical;
+ 
+ 		btrfs_set_stack_stripe_devid(stripe, device->devid);
+ 		btrfs_set_stack_stripe_offset(stripe, dev_offset);
+ 		memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
+ 		stripe++;
+ 	}
+-	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+ 
+-	btrfs_set_stack_chunk_length(chunk, chunk_size);
++	btrfs_set_stack_chunk_length(chunk, bg->length);
+ 	btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
+ 	btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
+ 	btrfs_set_stack_chunk_type(chunk, map->type);
+@@ -5449,15 +5598,18 @@ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
+ 
+ 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
+ 	key.type = BTRFS_CHUNK_ITEM_KEY;
+-	key.offset = chunk_offset;
++	key.offset = bg->start;
+ 
+ 	ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
+-	if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
+-		/*
+-		 * TODO: Cleanup of inserted chunk root in case of
+-		 * failure.
+-		 */
++	if (ret)
++		goto out;
++
++	bg->chunk_item_inserted = 1;
++
++	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
+ 		ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size);
++		if (ret)
++			goto out;
+ 	}
+ 
+ out:
+@@ -5470,16 +5622,41 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans)
+ {
+ 	struct btrfs_fs_info *fs_info = trans->fs_info;
+ 	u64 alloc_profile;
+-	int ret;
++	struct btrfs_block_group *meta_bg;
++	struct btrfs_block_group *sys_bg;
++
++	/*
++	 * When adding a new device for sprouting, the seed device is read-only
++	 * so we must first allocate a metadata and a system chunk. But before
++	 * adding the block group items to the extent, device and chunk btrees,
++	 * we must first:
++	 *
++	 * 1) Create both chunks without doing any changes to the btrees, as
++	 *    otherwise we would get -ENOSPC since the block groups from the
++	 *    seed device are read-only;
++	 *
++	 * 2) Add the device item for the new sprout device - finishing the setup
++	 *    of a new block group requires updating the device item in the chunk
++	 *    btree, so it must exist when we attempt to do it. The previous step
++	 *    ensures this does not fail with -ENOSPC.
++	 *
++	 * After that we can add the block group items to their btrees:
++	 * update existing device item in the chunk btree, add a new block group
++	 * item to the extent btree, add a new chunk item to the chunk btree and
++	 * finally add the new device extent items to the devices btree.
++	 */
+ 
+ 	alloc_profile = btrfs_metadata_alloc_profile(fs_info);
+-	ret = btrfs_alloc_chunk(trans, alloc_profile);
+-	if (ret)
+-		return ret;
++	meta_bg = btrfs_alloc_chunk(trans, alloc_profile);
++	if (IS_ERR(meta_bg))
++		return PTR_ERR(meta_bg);
+ 
+ 	alloc_profile = btrfs_system_alloc_profile(fs_info);
+-	ret = btrfs_alloc_chunk(trans, alloc_profile);
+-	return ret;
++	sys_bg = btrfs_alloc_chunk(trans, alloc_profile);
++	if (IS_ERR(sys_bg))
++		return PTR_ERR(sys_bg);
++
++	return 0;
+ }
+ 
+ static inline int btrfs_chunk_max_errors(struct map_lookup *map)
+@@ -7359,10 +7536,18 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
+ 			total_dev++;
+ 		} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
+ 			struct btrfs_chunk *chunk;
++
++			/*
++			 * We are only called at mount time, so no need to take
++			 * fs_info->chunk_mutex. Plus, to avoid lockdep warnings,
++			 * we always lock first fs_info->chunk_mutex before
++			 * acquiring any locks on the chunk tree. This is a
++			 * requirement for chunk allocation, see the comment on
++			 * top of btrfs_chunk_alloc() for details.
++			 */
++			ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags));
+ 			chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
+-			mutex_lock(&fs_info->chunk_mutex);
+ 			ret = read_one_chunk(&found_key, leaf, chunk);
+-			mutex_unlock(&fs_info->chunk_mutex);
+ 			if (ret)
+ 				goto error;
+ 		}
+diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
+index d4c3e0dd32b87..b7c34019f827f 100644
+--- a/fs/btrfs/volumes.h
++++ b/fs/btrfs/volumes.h
+@@ -447,7 +447,8 @@ int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *map,
+ 			  struct btrfs_io_geometry *io_geom);
+ int btrfs_read_sys_array(struct btrfs_fs_info *fs_info);
+ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info);
+-int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type);
++struct btrfs_block_group *btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
++					    u64 type);
+ void btrfs_mapping_tree_free(struct extent_map_tree *tree);
+ blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
+ 			   int mirror_num);
+@@ -505,6 +506,8 @@ unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
+ 				    u64 logical);
+ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
+ 			     u64 chunk_offset, u64 chunk_size);
++int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans,
++				     struct btrfs_block_group *bg);
+ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset);
+ struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
+ 				       u64 logical, u64 length);
+diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
+index c000fe338f7e0..c54317c10f58d 100644
+--- a/fs/ceph/addr.c
++++ b/fs/ceph/addr.c
+@@ -78,10 +78,6 @@ static int ceph_set_page_dirty(struct page *page)
+ 	struct inode *inode;
+ 	struct ceph_inode_info *ci;
+ 	struct ceph_snap_context *snapc;
+-	int ret;
+-
+-	if (unlikely(!mapping))
+-		return !TestSetPageDirty(page);
+ 
+ 	if (PageDirty(page)) {
+ 		dout("%p set_page_dirty %p idx %lu -- already dirty\n",
+@@ -127,11 +123,7 @@ static int ceph_set_page_dirty(struct page *page)
+ 	page->private = (unsigned long)snapc;
+ 	SetPagePrivate(page);
+ 
+-	ret = __set_page_dirty_nobuffers(page);
+-	WARN_ON(!PageLocked(page));
+-	WARN_ON(!page->mapping);
+-
+-	return ret;
++	return __set_page_dirty_nobuffers(page);
+ }
+ 
+ /*
+diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
+index 6b1ce4efb591c..6988fe10b1a77 100644
+--- a/fs/cifs/cifs_dfs_ref.c
++++ b/fs/cifs/cifs_dfs_ref.c
+@@ -173,7 +173,7 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
+ 		}
+ 	}
+ 
+-	rc = dns_resolve_server_name_to_ip(name, &srvIP);
++	rc = dns_resolve_server_name_to_ip(name, &srvIP, NULL);
+ 	if (rc < 0) {
+ 		cifs_dbg(FYI, "%s: Failed to resolve server part of %s to IP: %d\n",
+ 			 __func__, name, rc);
+@@ -208,6 +208,10 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
+ 		else
+ 			noff = tkn_e - (sb_mountdata + off) + 1;
+ 
++		if (strncasecmp(sb_mountdata + off, "cruid=", 6) == 0) {
++			off += noff;
++			continue;
++		}
+ 		if (strncasecmp(sb_mountdata + off, "unc=", 4) == 0) {
+ 			off += noff;
+ 			continue;
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index 723b97002d8dd..1192aa80cb9a8 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -84,6 +84,9 @@
+ #define SMB_ECHO_INTERVAL_MAX 600
+ #define SMB_ECHO_INTERVAL_DEFAULT 60
+ 
++/* dns resolution interval in seconds */
++#define SMB_DNS_RESOLVE_INTERVAL_DEFAULT 600
++
+ /* maximum number of PDUs in one compound */
+ #define MAX_COMPOUND 5
+ 
+@@ -654,6 +657,7 @@ struct TCP_Server_Info {
+ 	/* point to the SMBD connection if RDMA is used instead of socket */
+ 	struct smbd_connection *smbd_conn;
+ 	struct delayed_work	echo; /* echo ping workqueue job */
++	struct delayed_work	resolve; /* dns resolution workqueue job */
+ 	char	*smallbuf;	/* pointer to current "small" buffer */
+ 	char	*bigbuf;	/* pointer to current "big" buffer */
+ 	/* Total size of this PDU. Only valid from cifs_demultiplex_thread */
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 09a3939f25bf6..4e42fc5e2ee94 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -92,6 +92,8 @@ static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server)
+ 	int rc;
+ 	int len;
+ 	char *unc, *ipaddr = NULL;
++	time64_t expiry, now;
++	unsigned long ttl = SMB_DNS_RESOLVE_INTERVAL_DEFAULT;
+ 
+ 	if (!server->hostname)
+ 		return -EINVAL;
+@@ -105,13 +107,13 @@ static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server)
+ 	}
+ 	scnprintf(unc, len, "\\\\%s", server->hostname);
+ 
+-	rc = dns_resolve_server_name_to_ip(unc, &ipaddr);
++	rc = dns_resolve_server_name_to_ip(unc, &ipaddr, &expiry);
+ 	kfree(unc);
+ 
+ 	if (rc < 0) {
+ 		cifs_dbg(FYI, "%s: failed to resolve server part of %s to IP: %d\n",
+ 			 __func__, server->hostname, rc);
+-		return rc;
++		goto requeue_resolve;
+ 	}
+ 
+ 	spin_lock(&cifs_tcp_ses_lock);
+@@ -120,7 +122,45 @@ static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server)
+ 	spin_unlock(&cifs_tcp_ses_lock);
+ 	kfree(ipaddr);
+ 
+-	return !rc ? -1 : 0;
++	/* rc == 1 means success here */
++	if (rc) {
++		now = ktime_get_real_seconds();
++		if (expiry && expiry > now)
++			/*
++			 * To make sure we don't use the cached entry, retry 1s
++			 * after expiry.
++			 */
++			ttl = (expiry - now + 1);
++	}
++	rc = !rc ? -1 : 0;
++
++requeue_resolve:
++	cifs_dbg(FYI, "%s: next dns resolution scheduled for %lu seconds in the future\n",
++		 __func__, ttl);
++	mod_delayed_work(cifsiod_wq, &server->resolve, (ttl * HZ));
++
++	return rc;
++}
++
++
++static void cifs_resolve_server(struct work_struct *work)
++{
++	int rc;
++	struct TCP_Server_Info *server = container_of(work,
++					struct TCP_Server_Info, resolve.work);
++
++	mutex_lock(&server->srv_mutex);
++
++	/*
++	 * Resolve the hostname again to make sure that IP address is up-to-date.
++	 */
++	rc = reconn_set_ipaddr_from_hostname(server);
++	if (rc) {
++		cifs_dbg(FYI, "%s: failed to resolve hostname: %d\n",
++				__func__, rc);
++	}
++
++	mutex_unlock(&server->srv_mutex);
+ }
+ 
+ #ifdef CONFIG_CIFS_DFS_UPCALL
+@@ -720,6 +760,7 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
+ 	spin_unlock(&cifs_tcp_ses_lock);
+ 
+ 	cancel_delayed_work_sync(&server->echo);
++	cancel_delayed_work_sync(&server->resolve);
+ 
+ 	spin_lock(&GlobalMid_Lock);
+ 	server->tcpStatus = CifsExiting;
+@@ -1300,6 +1341,7 @@ cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect)
+ 	spin_unlock(&cifs_tcp_ses_lock);
+ 
+ 	cancel_delayed_work_sync(&server->echo);
++	cancel_delayed_work_sync(&server->resolve);
+ 
+ 	if (from_reconnect)
+ 		/*
+@@ -1382,6 +1424,7 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx)
+ 	INIT_LIST_HEAD(&tcp_ses->tcp_ses_list);
+ 	INIT_LIST_HEAD(&tcp_ses->smb_ses_list);
+ 	INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request);
++	INIT_DELAYED_WORK(&tcp_ses->resolve, cifs_resolve_server);
+ 	INIT_DELAYED_WORK(&tcp_ses->reconnect, smb2_reconnect_server);
+ 	mutex_init(&tcp_ses->reconnect_mutex);
+ 	memcpy(&tcp_ses->srcaddr, &ctx->srcaddr,
+@@ -1462,6 +1505,12 @@ smbd_connected:
+ 	/* queue echo request delayed work */
+ 	queue_delayed_work(cifsiod_wq, &tcp_ses->echo, tcp_ses->echo_interval);
+ 
++	/* queue dns resolution delayed work */
++	cifs_dbg(FYI, "%s: next dns resolution scheduled for %d seconds in the future\n",
++		 __func__, SMB_DNS_RESOLVE_INTERVAL_DEFAULT);
++
++	queue_delayed_work(cifsiod_wq, &tcp_ses->resolve, (SMB_DNS_RESOLVE_INTERVAL_DEFAULT * HZ));
++
+ 	return tcp_ses;
+ 
+ out_err_crypto_release:
+@@ -4136,7 +4185,8 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
+ 	if (!tree)
+ 		return -ENOMEM;
+ 
+-	if (!tcon->dfs_path) {
++	/* If it is not dfs or there was no cached dfs referral, then reconnect to same share */
++	if (!tcon->dfs_path || dfs_cache_noreq_find(tcon->dfs_path + 1, &ref, &tl)) {
+ 		if (tcon->ipc) {
+ 			scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", server->hostname);
+ 			rc = ops->tree_connect(xid, tcon->ses, tree, tcon, nlsc);
+@@ -4146,9 +4196,6 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
+ 		goto out;
+ 	}
+ 
+-	rc = dfs_cache_noreq_find(tcon->dfs_path + 1, &ref, &tl);
+-	if (rc)
+-		goto out;
+ 	isroot = ref.server_type == DFS_TYPE_ROOT;
+ 	free_dfs_info_param(&ref);
+ 
+diff --git a/fs/cifs/dns_resolve.c b/fs/cifs/dns_resolve.c
+index 534cbba72789f..8c78b48faf015 100644
+--- a/fs/cifs/dns_resolve.c
++++ b/fs/cifs/dns_resolve.c
+@@ -36,6 +36,7 @@
+  * dns_resolve_server_name_to_ip - Resolve UNC server name to ip address.
+  * @unc: UNC path specifying the server (with '/' as delimiter)
+  * @ip_addr: Where to return the IP address.
++ * @expiry: Where to return the expiry time for the dns record.
+  *
+  * The IP address will be returned in string form, and the caller is
+  * responsible for freeing it.
+@@ -43,7 +44,7 @@
+  * Returns length of result on success, -ve on error.
+  */
+ int
+-dns_resolve_server_name_to_ip(const char *unc, char **ip_addr)
++dns_resolve_server_name_to_ip(const char *unc, char **ip_addr, time64_t *expiry)
+ {
+ 	struct sockaddr_storage ss;
+ 	const char *hostname, *sep;
+@@ -78,13 +79,14 @@ dns_resolve_server_name_to_ip(const char *unc, char **ip_addr)
+ 
+ 	/* Perform the upcall */
+ 	rc = dns_query(current->nsproxy->net_ns, NULL, hostname, len,
+-		       NULL, ip_addr, NULL, false);
++		       NULL, ip_addr, expiry, false);
+ 	if (rc < 0)
+ 		cifs_dbg(FYI, "%s: unable to resolve: %*.*s\n",
+ 			 __func__, len, len, hostname);
+ 	else
+-		cifs_dbg(FYI, "%s: resolved: %*.*s to %s\n",
+-			 __func__, len, len, hostname, *ip_addr);
++		cifs_dbg(FYI, "%s: resolved: %*.*s to %s expiry %llu\n",
++			 __func__, len, len, hostname, *ip_addr,
++			 expiry ? (*expiry) : 0);
+ 	return rc;
+ 
+ name_is_IP_address:
+diff --git a/fs/cifs/dns_resolve.h b/fs/cifs/dns_resolve.h
+index d3f5d27f4d06e..ff5483d5244d2 100644
+--- a/fs/cifs/dns_resolve.h
++++ b/fs/cifs/dns_resolve.h
+@@ -24,7 +24,7 @@
+ #define _DNS_RESOLVE_H
+ 
+ #ifdef __KERNEL__
+-extern int dns_resolve_server_name_to_ip(const char *unc, char **ip_addr);
++extern int dns_resolve_server_name_to_ip(const char *unc, char **ip_addr, time64_t *expiry);
+ #endif /* KERNEL */
+ 
+ #endif /* _DNS_RESOLVE_H */
+diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
+index 82e176720ca6a..f736da72e5e0a 100644
+--- a/fs/cifs/misc.c
++++ b/fs/cifs/misc.c
+@@ -1105,7 +1105,7 @@ int match_target_ip(struct TCP_Server_Info *server,
+ 
+ 	cifs_dbg(FYI, "%s: target name: %s\n", __func__, target + 2);
+ 
+-	rc = dns_resolve_server_name_to_ip(target, &tip);
++	rc = dns_resolve_server_name_to_ip(target, &tip, NULL);
+ 	if (rc < 0)
+ 		goto out;
+ 
+diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
+index be799040a4154..b96ecba918990 100644
+--- a/fs/ext4/ext4_jbd2.c
++++ b/fs/ext4/ext4_jbd2.c
+@@ -327,6 +327,7 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
+ 
+ 	set_buffer_meta(bh);
+ 	set_buffer_prio(bh);
++	set_buffer_uptodate(bh);
+ 	if (ext4_handle_valid(handle)) {
+ 		err = jbd2_journal_dirty_metadata(handle, bh);
+ 		/* Errors can only happen due to aborted journal or a nasty bug */
+@@ -355,7 +356,6 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
+ 					 err);
+ 		}
+ 	} else {
+-		set_buffer_uptodate(bh);
+ 		if (inode)
+ 			mark_buffer_dirty_inode(bh, inode);
+ 		else
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 62dd98189e326..265745154945e 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -705,15 +705,23 @@ static void flush_stashed_error_work(struct work_struct *work)
+ 	 * ext4 error handling code during handling of previous errors.
+ 	 */
+ 	if (!sb_rdonly(sbi->s_sb) && journal) {
++		struct buffer_head *sbh = sbi->s_sbh;
+ 		handle = jbd2_journal_start(journal, 1);
+ 		if (IS_ERR(handle))
+ 			goto write_directly;
+-		if (jbd2_journal_get_write_access(handle, sbi->s_sbh)) {
++		if (jbd2_journal_get_write_access(handle, sbh)) {
+ 			jbd2_journal_stop(handle);
+ 			goto write_directly;
+ 		}
+ 		ext4_update_super(sbi->s_sb);
+-		if (jbd2_journal_dirty_metadata(handle, sbi->s_sbh)) {
++		if (buffer_write_io_error(sbh) || !buffer_uptodate(sbh)) {
++			ext4_msg(sbi->s_sb, KERN_ERR, "previous I/O error to "
++				 "superblock detected");
++			clear_buffer_write_io_error(sbh);
++			set_buffer_uptodate(sbh);
++		}
++
++		if (jbd2_journal_dirty_metadata(handle, sbh)) {
+ 			jbd2_journal_stop(handle);
+ 			goto write_directly;
+ 		}
+diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
+index a8567cb476213..70d6047ffacfa 100644
+--- a/fs/f2fs/gc.c
++++ b/fs/f2fs/gc.c
+@@ -1799,6 +1799,7 @@ static void init_atgc_management(struct f2fs_sb_info *sbi)
+ 	am->candidate_ratio = DEF_GC_THREAD_CANDIDATE_RATIO;
+ 	am->max_candidate_count = DEF_GC_THREAD_MAX_CANDIDATE_COUNT;
+ 	am->age_weight = DEF_GC_THREAD_AGE_WEIGHT;
++	am->age_threshold = DEF_GC_THREAD_AGE_THRESHOLD;
+ }
+ 
+ void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
+diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
+index 17bd072a5d393..11aafe93180c0 100644
+--- a/fs/f2fs/namei.c
++++ b/fs/f2fs/namei.c
+@@ -153,7 +153,8 @@ fail_drop:
+ 	return ERR_PTR(err);
+ }
+ 
+-static inline int is_extension_exist(const unsigned char *s, const char *sub)
++static inline int is_extension_exist(const unsigned char *s, const char *sub,
++						bool tmp_ext)
+ {
+ 	size_t slen = strlen(s);
+ 	size_t sublen = strlen(sub);
+@@ -169,6 +170,13 @@ static inline int is_extension_exist(const unsigned char *s, const char *sub)
+ 	if (slen < sublen + 2)
+ 		return 0;
+ 
++	if (!tmp_ext) {
++		/* file has no temp extension */
++		if (s[slen - sublen - 1] != '.')
++			return 0;
++		return !strncasecmp(s + slen - sublen, sub, sublen);
++	}
++
+ 	for (i = 1; i < slen - sublen; i++) {
+ 		if (s[i] != '.')
+ 			continue;
+@@ -194,7 +202,7 @@ static inline void set_file_temperature(struct f2fs_sb_info *sbi, struct inode *
+ 	hot_count = sbi->raw_super->hot_ext_count;
+ 
+ 	for (i = 0; i < cold_count + hot_count; i++) {
+-		if (is_extension_exist(name, extlist[i]))
++		if (is_extension_exist(name, extlist[i], true))
+ 			break;
+ 	}
+ 
+@@ -295,7 +303,7 @@ static void set_compress_inode(struct f2fs_sb_info *sbi, struct inode *inode,
+ 	hot_count = sbi->raw_super->hot_ext_count;
+ 
+ 	for (i = cold_count; i < cold_count + hot_count; i++) {
+-		if (is_extension_exist(name, extlist[i])) {
++		if (is_extension_exist(name, extlist[i], false)) {
+ 			up_read(&sbi->sb_lock);
+ 			return;
+ 		}
+@@ -306,7 +314,7 @@ static void set_compress_inode(struct f2fs_sb_info *sbi, struct inode *inode,
+ 	ext = F2FS_OPTION(sbi).extensions;
+ 
+ 	for (i = 0; i < ext_cnt; i++) {
+-		if (!is_extension_exist(name, ext[i]))
++		if (!is_extension_exist(name, ext[i], false))
+ 			continue;
+ 
+ 		set_compress_context(inode);
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index 1f7bc4b3f6aec..a4b4b8297ec39 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -4281,4 +4281,5 @@ module_exit(exit_f2fs_fs)
+ MODULE_AUTHOR("Samsung Electronics's Praesto Team");
+ MODULE_DESCRIPTION("Flash Friendly File System");
+ MODULE_LICENSE("GPL");
++MODULE_SOFTDEP("pre: crc32");
+ 
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 58ac04cca587f..044d265f74ab1 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -2023,7 +2023,7 @@ static void __io_req_task_submit(struct io_kiocb *req)
+ 
+ 	/* ctx stays valid until unlock, even if we drop all ours ctx->refs */
+ 	mutex_lock(&ctx->uring_lock);
+-	if (!(current->flags & PF_EXITING) && !current->in_execve)
++	if (!(req->task->flags & PF_EXITING) && !req->task->in_execve)
+ 		__io_queue_sqe(req);
+ 	else
+ 		__io_req_task_cancel(req, -EFAULT);
+@@ -5727,12 +5727,9 @@ static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
+ 	int ret;
+ 
+ 	ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
+-	if (ret != -ENOENT) {
+-		spin_lock_irqsave(&ctx->completion_lock, flags);
+-		goto done;
+-	}
+-
+ 	spin_lock_irqsave(&ctx->completion_lock, flags);
++	if (ret != -ENOENT)
++		goto done;
+ 	ret = io_timeout_cancel(ctx, sqe_addr);
+ 	if (ret != -ENOENT)
+ 		goto done;
+@@ -5747,7 +5744,6 @@ done:
+ 
+ 	if (ret < 0)
+ 		req_set_fail_links(req);
+-	io_put_req(req);
+ }
+ 
+ static int io_async_cancel_prep(struct io_kiocb *req,
+@@ -6310,7 +6306,6 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
+ 		io_put_req_deferred(req, 1);
+ 	} else {
+ 		io_req_complete_post(req, -ETIME, 0);
+-		io_put_req_deferred(req, 1);
+ 	}
+ 	return HRTIMER_NORESTART;
+ }
+diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
+index 9330eff210e0c..78fd136ac13b9 100644
+--- a/fs/jfs/jfs_logmgr.c
++++ b/fs/jfs/jfs_logmgr.c
+@@ -1324,6 +1324,7 @@ int lmLogInit(struct jfs_log * log)
+ 		} else {
+ 			if (!uuid_equal(&logsuper->uuid, &log->uuid)) {
+ 				jfs_warn("wrong uuid on JFS log device");
++				rc = -EINVAL;
+ 				goto errout20;
+ 			}
+ 			log->size = le32_to_cpu(logsuper->size);
+diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
+index 04bf8066980c1..d6ac2c4f88b6b 100644
+--- a/fs/nfs/delegation.c
++++ b/fs/nfs/delegation.c
+@@ -75,6 +75,13 @@ void nfs_mark_delegation_referenced(struct nfs_delegation *delegation)
+ 	set_bit(NFS_DELEGATION_REFERENCED, &delegation->flags);
+ }
+ 
++static void nfs_mark_return_delegation(struct nfs_server *server,
++				       struct nfs_delegation *delegation)
++{
++	set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
++	set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
++}
++
+ static bool
+ nfs4_is_valid_delegation(const struct nfs_delegation *delegation,
+ 		fmode_t flags)
+@@ -293,6 +300,7 @@ nfs_start_delegation_return_locked(struct nfs_inode *nfsi)
+ 		goto out;
+ 	spin_lock(&delegation->lock);
+ 	if (!test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
++		clear_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags);
+ 		/* Refcount matched in nfs_end_delegation_return() */
+ 		ret = nfs_get_delegation(delegation);
+ 	}
+@@ -314,16 +322,17 @@ nfs_start_delegation_return(struct nfs_inode *nfsi)
+ 	return delegation;
+ }
+ 
+-static void
+-nfs_abort_delegation_return(struct nfs_delegation *delegation,
+-		struct nfs_client *clp)
++static void nfs_abort_delegation_return(struct nfs_delegation *delegation,
++					struct nfs_client *clp, int err)
+ {
+ 
+ 	spin_lock(&delegation->lock);
+ 	clear_bit(NFS_DELEGATION_RETURNING, &delegation->flags);
+-	set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
++	if (err == -EAGAIN) {
++		set_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags);
++		set_bit(NFS4CLNT_DELEGRETURN_DELAYED, &clp->cl_state);
++	}
+ 	spin_unlock(&delegation->lock);
+-	set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
+ }
+ 
+ static struct nfs_delegation *
+@@ -528,7 +537,7 @@ static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation
+ 	} while (err == 0);
+ 
+ 	if (err) {
+-		nfs_abort_delegation_return(delegation, clp);
++		nfs_abort_delegation_return(delegation, clp, err);
+ 		goto out;
+ 	}
+ 
+@@ -557,6 +566,7 @@ static bool nfs_delegation_need_return(struct nfs_delegation *delegation)
+ 	if (ret)
+ 		clear_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
+ 	if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags) ||
++	    test_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags) ||
+ 	    test_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
+ 		ret = false;
+ 
+@@ -636,6 +646,38 @@ out:
+ 	return err;
+ }
+ 
++static bool nfs_server_clear_delayed_delegations(struct nfs_server *server)
++{
++	struct nfs_delegation *d;
++	bool ret = false;
++
++	list_for_each_entry_rcu (d, &server->delegations, super_list) {
++		if (!test_bit(NFS_DELEGATION_RETURN_DELAYED, &d->flags))
++			continue;
++		nfs_mark_return_delegation(server, d);
++		clear_bit(NFS_DELEGATION_RETURN_DELAYED, &d->flags);
++		ret = true;
++	}
++	return ret;
++}
++
++static bool nfs_client_clear_delayed_delegations(struct nfs_client *clp)
++{
++	struct nfs_server *server;
++	bool ret = false;
++
++	if (!test_and_clear_bit(NFS4CLNT_DELEGRETURN_DELAYED, &clp->cl_state))
++		goto out;
++	rcu_read_lock();
++	list_for_each_entry_rcu (server, &clp->cl_superblocks, client_link) {
++		if (nfs_server_clear_delayed_delegations(server))
++			ret = true;
++	}
++	rcu_read_unlock();
++out:
++	return ret;
++}
++
+ /**
+  * nfs_client_return_marked_delegations - return previously marked delegations
+  * @clp: nfs_client to process
+@@ -648,8 +690,14 @@ out:
+  */
+ int nfs_client_return_marked_delegations(struct nfs_client *clp)
+ {
+-	return nfs_client_for_each_server(clp,
+-			nfs_server_return_marked_delegations, NULL);
++	int err = nfs_client_for_each_server(
++		clp, nfs_server_return_marked_delegations, NULL);
++	if (err)
++		return err;
++	/* If a return was delayed, sleep to prevent hard looping */
++	if (nfs_client_clear_delayed_delegations(clp))
++		ssleep(1);
++	return 0;
+ }
+ 
+ /**
+@@ -764,13 +812,6 @@ static void nfs_mark_return_if_closed_delegation(struct nfs_server *server,
+ 	set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
+ }
+ 
+-static void nfs_mark_return_delegation(struct nfs_server *server,
+-		struct nfs_delegation *delegation)
+-{
+-	set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
+-	set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
+-}
+-
+ static bool nfs_server_mark_return_all_delegations(struct nfs_server *server)
+ {
+ 	struct nfs_delegation *delegation;
+diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
+index 9b00a0b7f8321..26f57a99da84e 100644
+--- a/fs/nfs/delegation.h
++++ b/fs/nfs/delegation.h
+@@ -36,6 +36,7 @@ enum {
+ 	NFS_DELEGATION_REVOKED,
+ 	NFS_DELEGATION_TEST_EXPIRED,
+ 	NFS_DELEGATION_INODE_FREEING,
++	NFS_DELEGATION_RETURN_DELAYED,
+ };
+ 
+ int nfs_inode_set_delegation(struct inode *inode, const struct cred *cred,
+diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
+index 2d30a4da49fa0..2e894fec036b0 100644
+--- a/fs/nfs/direct.c
++++ b/fs/nfs/direct.c
+@@ -700,8 +700,8 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
+ {
+ 	struct nfs_direct_req *dreq = hdr->dreq;
+ 	struct nfs_commit_info cinfo;
+-	bool request_commit = false;
+ 	struct nfs_page *req = nfs_list_entry(hdr->pages.next);
++	int flags = NFS_ODIRECT_DONE;
+ 
+ 	nfs_init_cinfo_from_dreq(&cinfo, dreq);
+ 
+@@ -713,15 +713,9 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
+ 
+ 	nfs_direct_count_bytes(dreq, hdr);
+ 	if (hdr->good_bytes != 0 && nfs_write_need_commit(hdr)) {
+-		switch (dreq->flags) {
+-		case 0:
++		if (!dreq->flags)
+ 			dreq->flags = NFS_ODIRECT_DO_COMMIT;
+-			request_commit = true;
+-			break;
+-		case NFS_ODIRECT_RESCHED_WRITES:
+-		case NFS_ODIRECT_DO_COMMIT:
+-			request_commit = true;
+-		}
++		flags = dreq->flags;
+ 	}
+ 	spin_unlock(&dreq->lock);
+ 
+@@ -729,12 +723,15 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
+ 
+ 		req = nfs_list_entry(hdr->pages.next);
+ 		nfs_list_remove_request(req);
+-		if (request_commit) {
++		if (flags == NFS_ODIRECT_DO_COMMIT) {
+ 			kref_get(&req->wb_kref);
+ 			memcpy(&req->wb_verf, &hdr->verf.verifier,
+ 			       sizeof(req->wb_verf));
+ 			nfs_mark_request_commit(req, hdr->lseg, &cinfo,
+ 				hdr->ds_commit_idx);
++		} else if (flags == NFS_ODIRECT_RESCHED_WRITES) {
++			kref_get(&req->wb_kref);
++			nfs_mark_request_commit(req, NULL, &cinfo, 0);
+ 		}
+ 		nfs_unlock_and_release_request(req);
+ 	}
+diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
+index c4c021c6ebbd8..d743629e05e12 100644
+--- a/fs/nfs/fscache.c
++++ b/fs/nfs/fscache.c
+@@ -385,12 +385,15 @@ static void nfs_readpage_from_fscache_complete(struct page *page,
+ 		 "NFS: readpage_from_fscache_complete (0x%p/0x%p/%d)\n",
+ 		 page, context, error);
+ 
+-	/* if the read completes with an error, we just unlock the page and let
+-	 * the VM reissue the readpage */
+-	if (!error) {
++	/*
++	 * If the read completes with an error, mark the page with PG_checked,
++	 * unlock the page, and let the VM reissue the readpage.
++	 */
++	if (!error)
+ 		SetPageUptodate(page);
+-		unlock_page(page);
+-	}
++	else
++		SetPageChecked(page);
++	unlock_page(page);
+ }
+ 
+ /*
+@@ -405,6 +408,11 @@ int __nfs_readpage_from_fscache(struct nfs_open_context *ctx,
+ 		 "NFS: readpage_from_fscache(fsc:%p/p:%p(i:%lx f:%lx)/0x%p)\n",
+ 		 nfs_i_fscache(inode), page, page->index, page->flags, inode);
+ 
++	if (PageChecked(page)) {
++		ClearPageChecked(page);
++		return 1;
++	}
++
+ 	ret = fscache_read_or_alloc_page(nfs_i_fscache(inode),
+ 					 page,
+ 					 nfs_readpage_from_fscache_complete,
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index ae8bc84e39fb3..6b1f2bf65bee9 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -1079,6 +1079,7 @@ EXPORT_SYMBOL_GPL(nfs_inode_attach_open_context);
+ void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx)
+ {
+ 	filp->private_data = get_nfs_open_context(ctx);
++	set_bit(NFS_CONTEXT_FILE_OPEN, &ctx->flags);
+ 	if (list_empty(&ctx->list))
+ 		nfs_inode_attach_open_context(ctx);
+ }
+@@ -1098,6 +1099,8 @@ struct nfs_open_context *nfs_find_open_context(struct inode *inode, const struct
+ 			continue;
+ 		if ((pos->mode & (FMODE_READ|FMODE_WRITE)) != mode)
+ 			continue;
++		if (!test_bit(NFS_CONTEXT_FILE_OPEN, &pos->flags))
++			continue;
+ 		ctx = get_nfs_open_context(pos);
+ 		if (ctx)
+ 			break;
+@@ -1113,6 +1116,7 @@ void nfs_file_clear_open_context(struct file *filp)
+ 	if (ctx) {
+ 		struct inode *inode = d_inode(ctx->dentry);
+ 
++		clear_bit(NFS_CONTEXT_FILE_OPEN, &ctx->flags);
+ 		/*
+ 		 * We fatal error on write before. Try to writeback
+ 		 * every page again.
+diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
+index 5c4e23abc3451..2299446b3b89b 100644
+--- a/fs/nfs/nfs3proc.c
++++ b/fs/nfs/nfs3proc.c
+@@ -385,7 +385,7 @@ nfs3_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
+ 				break;
+ 
+ 			case NFS3_CREATE_UNCHECKED:
+-				goto out;
++				goto out_release_acls;
+ 		}
+ 		nfs_fattr_init(data->res.dir_attr);
+ 		nfs_fattr_init(data->res.fattr);
+@@ -751,7 +751,7 @@ nfs3_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
+ 		break;
+ 	default:
+ 		status = -EINVAL;
+-		goto out;
++		goto out_release_acls;
+ 	}
+ 
+ 	d_alias = nfs3_do_create(dir, dentry, data);
+diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
+index 543d916f79abb..3e344bec3647b 100644
+--- a/fs/nfs/nfs4_fs.h
++++ b/fs/nfs/nfs4_fs.h
+@@ -45,6 +45,7 @@ enum nfs4_client_state {
+ 	NFS4CLNT_RECALL_RUNNING,
+ 	NFS4CLNT_RECALL_ANY_LAYOUT_READ,
+ 	NFS4CLNT_RECALL_ANY_LAYOUT_RW,
++	NFS4CLNT_DELEGRETURN_DELAYED,
+ };
+ 
+ #define NFS4_RENEW_TIMEOUT		0x01
+diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
+index 42719384e25fe..28431acd1230b 100644
+--- a/fs/nfs/nfs4client.c
++++ b/fs/nfs/nfs4client.c
+@@ -197,8 +197,11 @@ void nfs40_shutdown_client(struct nfs_client *clp)
+ 
+ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init)
+ {
+-	int err;
++	char buf[INET6_ADDRSTRLEN + 1];
++	const char *ip_addr = cl_init->ip_addr;
+ 	struct nfs_client *clp = nfs_alloc_client(cl_init);
++	int err;
++
+ 	if (IS_ERR(clp))
+ 		return clp;
+ 
+@@ -222,6 +225,44 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init)
+ 	init_waitqueue_head(&clp->cl_lock_waitq);
+ #endif
+ 	INIT_LIST_HEAD(&clp->pending_cb_stateids);
++
++	if (cl_init->minorversion != 0)
++		__set_bit(NFS_CS_INFINITE_SLOTS, &clp->cl_flags);
++	__set_bit(NFS_CS_DISCRTRY, &clp->cl_flags);
++	__set_bit(NFS_CS_NO_RETRANS_TIMEOUT, &clp->cl_flags);
++
++	/*
++	 * Set up the connection to the server before we add add to the
++	 * global list.
++	 */
++	err = nfs_create_rpc_client(clp, cl_init, RPC_AUTH_GSS_KRB5I);
++	if (err == -EINVAL)
++		err = nfs_create_rpc_client(clp, cl_init, RPC_AUTH_UNIX);
++	if (err < 0)
++		goto error;
++
++	/* If no clientaddr= option was specified, find a usable cb address */
++	if (ip_addr == NULL) {
++		struct sockaddr_storage cb_addr;
++		struct sockaddr *sap = (struct sockaddr *)&cb_addr;
++
++		err = rpc_localaddr(clp->cl_rpcclient, sap, sizeof(cb_addr));
++		if (err < 0)
++			goto error;
++		err = rpc_ntop(sap, buf, sizeof(buf));
++		if (err < 0)
++			goto error;
++		ip_addr = (const char *)buf;
++	}
++	strlcpy(clp->cl_ipaddr, ip_addr, sizeof(clp->cl_ipaddr));
++
++	err = nfs_idmap_new(clp);
++	if (err < 0) {
++		dprintk("%s: failed to create idmapper. Error = %d\n",
++			__func__, err);
++		goto error;
++	}
++	__set_bit(NFS_CS_IDMAP, &clp->cl_res_state);
+ 	return clp;
+ 
+ error:
+@@ -372,8 +413,6 @@ static int nfs4_init_client_minor_version(struct nfs_client *clp)
+ struct nfs_client *nfs4_init_client(struct nfs_client *clp,
+ 				    const struct nfs_client_initdata *cl_init)
+ {
+-	char buf[INET6_ADDRSTRLEN + 1];
+-	const char *ip_addr = cl_init->ip_addr;
+ 	struct nfs_client *old;
+ 	int error;
+ 
+@@ -381,43 +420,6 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp,
+ 		/* the client is initialised already */
+ 		return clp;
+ 
+-	/* Check NFS protocol revision and initialize RPC op vector */
+-	clp->rpc_ops = &nfs_v4_clientops;
+-
+-	if (clp->cl_minorversion != 0)
+-		__set_bit(NFS_CS_INFINITE_SLOTS, &clp->cl_flags);
+-	__set_bit(NFS_CS_DISCRTRY, &clp->cl_flags);
+-	__set_bit(NFS_CS_NO_RETRANS_TIMEOUT, &clp->cl_flags);
+-
+-	error = nfs_create_rpc_client(clp, cl_init, RPC_AUTH_GSS_KRB5I);
+-	if (error == -EINVAL)
+-		error = nfs_create_rpc_client(clp, cl_init, RPC_AUTH_UNIX);
+-	if (error < 0)
+-		goto error;
+-
+-	/* If no clientaddr= option was specified, find a usable cb address */
+-	if (ip_addr == NULL) {
+-		struct sockaddr_storage cb_addr;
+-		struct sockaddr *sap = (struct sockaddr *)&cb_addr;
+-
+-		error = rpc_localaddr(clp->cl_rpcclient, sap, sizeof(cb_addr));
+-		if (error < 0)
+-			goto error;
+-		error = rpc_ntop(sap, buf, sizeof(buf));
+-		if (error < 0)
+-			goto error;
+-		ip_addr = (const char *)buf;
+-	}
+-	strlcpy(clp->cl_ipaddr, ip_addr, sizeof(clp->cl_ipaddr));
+-
+-	error = nfs_idmap_new(clp);
+-	if (error < 0) {
+-		dprintk("%s: failed to create idmapper. Error = %d\n",
+-			__func__, error);
+-		goto error;
+-	}
+-	__set_bit(NFS_CS_IDMAP, &clp->cl_res_state);
+-
+ 	error = nfs4_init_client_minor_version(clp);
+ 	if (error < 0)
+ 		goto error;
+diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
+index a5e3628a52733..2da35a31508c7 100644
+--- a/fs/nfs/pnfs.c
++++ b/fs/nfs/pnfs.c
+@@ -966,10 +966,8 @@ void
+ pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
+ 			const struct cred *cred, bool update_barrier)
+ {
+-	u32 oldseq, newseq, new_barrier = 0;
+-
+-	oldseq = be32_to_cpu(lo->plh_stateid.seqid);
+-	newseq = be32_to_cpu(new->seqid);
++	u32 oldseq = be32_to_cpu(lo->plh_stateid.seqid);
++	u32 newseq = be32_to_cpu(new->seqid);
+ 
+ 	if (!pnfs_layout_is_valid(lo)) {
+ 		pnfs_set_layout_cred(lo, cred);
+@@ -979,19 +977,21 @@ pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
+ 		clear_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
+ 		return;
+ 	}
+-	if (pnfs_seqid_is_newer(newseq, oldseq)) {
++
++	if (pnfs_seqid_is_newer(newseq, oldseq))
+ 		nfs4_stateid_copy(&lo->plh_stateid, new);
+-		/*
+-		 * Because of wraparound, we want to keep the barrier
+-		 * "close" to the current seqids.
+-		 */
+-		new_barrier = newseq - atomic_read(&lo->plh_outstanding);
+-	}
+-	if (update_barrier)
+-		new_barrier = be32_to_cpu(new->seqid);
+-	else if (new_barrier == 0)
++
++	if (update_barrier) {
++		pnfs_barrier_update(lo, newseq);
+ 		return;
+-	pnfs_barrier_update(lo, new_barrier);
++	}
++	/*
++	 * Because of wraparound, we want to keep the barrier
++	 * "close" to the current seqids. We really only want to
++	 * get here from a layoutget call.
++	 */
++	if (atomic_read(&lo->plh_outstanding) == 1)
++		 pnfs_barrier_update(lo, be32_to_cpu(lo->plh_stateid.seqid));
+ }
+ 
+ static bool
+@@ -2014,7 +2014,7 @@ lookup_again:
+ 	 * If the layout segment list is empty, but there are outstanding
+ 	 * layoutget calls, then they might be subject to a layoutrecall.
+ 	 */
+-	if (list_empty(&lo->plh_segs) &&
++	if ((list_empty(&lo->plh_segs) || !pnfs_layout_is_valid(lo)) &&
+ 	    atomic_read(&lo->plh_outstanding) != 0) {
+ 		spin_unlock(&ino->i_lock);
+ 		lseg = ERR_PTR(wait_var_event_killable(&lo->plh_outstanding,
+@@ -2390,11 +2390,13 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
+ 		goto out_forget;
+ 	}
+ 
++	if (!pnfs_layout_is_valid(lo) && !pnfs_is_first_layoutget(lo))
++		goto out_forget;
++
+ 	if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) {
+ 		/* existing state ID, make sure the sequence number matches. */
+ 		if (pnfs_layout_stateid_blocked(lo, &res->stateid)) {
+-			if (!pnfs_layout_is_valid(lo) &&
+-			    pnfs_is_first_layoutget(lo))
++			if (!pnfs_layout_is_valid(lo))
+ 				lo->plh_barrier = 0;
+ 			dprintk("%s forget reply due to sequence\n", __func__);
+ 			goto out_forget;
+@@ -2415,8 +2417,6 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
+ 		goto out_forget;
+ 	} else {
+ 		/* We have a completely new layout */
+-		if (!pnfs_is_first_layoutget(lo))
+-			goto out_forget;
+ 		pnfs_set_layout_stateid(lo, &res->stateid, lgp->cred, true);
+ 	}
+ 
+diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
+index 49d3389bd8130..1c2c0d08614ee 100644
+--- a/fs/nfs/pnfs_nfs.c
++++ b/fs/nfs/pnfs_nfs.c
+@@ -805,19 +805,16 @@ out:
+ }
+ EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_add);
+ 
+-static void nfs4_wait_ds_connect(struct nfs4_pnfs_ds *ds)
++static int nfs4_wait_ds_connect(struct nfs4_pnfs_ds *ds)
+ {
+ 	might_sleep();
+-	wait_on_bit(&ds->ds_state, NFS4DS_CONNECTING,
+-			TASK_KILLABLE);
++	return wait_on_bit(&ds->ds_state, NFS4DS_CONNECTING, TASK_KILLABLE);
+ }
+ 
+ static void nfs4_clear_ds_conn_bit(struct nfs4_pnfs_ds *ds)
+ {
+ 	smp_mb__before_atomic();
+-	clear_bit(NFS4DS_CONNECTING, &ds->ds_state);
+-	smp_mb__after_atomic();
+-	wake_up_bit(&ds->ds_state, NFS4DS_CONNECTING);
++	clear_and_wake_up_bit(NFS4DS_CONNECTING, &ds->ds_state);
+ }
+ 
+ static struct nfs_client *(*get_v3_ds_connect)(
+@@ -993,30 +990,33 @@ int nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
+ {
+ 	int err;
+ 
+-again:
+-	err = 0;
+-	if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) {
+-		if (version == 3) {
+-			err = _nfs4_pnfs_v3_ds_connect(mds_srv, ds, timeo,
+-						       retrans);
+-		} else if (version == 4) {
+-			err = _nfs4_pnfs_v4_ds_connect(mds_srv, ds, timeo,
+-						       retrans, minor_version);
+-		} else {
+-			dprintk("%s: unsupported DS version %d\n", __func__,
+-				version);
+-			err = -EPROTONOSUPPORT;
+-		}
++	do {
++		err = nfs4_wait_ds_connect(ds);
++		if (err || ds->ds_clp)
++			goto out;
++		if (nfs4_test_deviceid_unavailable(devid))
++			return -ENODEV;
++	} while (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) != 0);
+ 
+-		nfs4_clear_ds_conn_bit(ds);
+-	} else {
+-		nfs4_wait_ds_connect(ds);
++	if (ds->ds_clp)
++		goto connect_done;
+ 
+-		/* what was waited on didn't connect AND didn't mark unavail */
+-		if (!ds->ds_clp && !nfs4_test_deviceid_unavailable(devid))
+-			goto again;
++	switch (version) {
++	case 3:
++		err = _nfs4_pnfs_v3_ds_connect(mds_srv, ds, timeo, retrans);
++		break;
++	case 4:
++		err = _nfs4_pnfs_v4_ds_connect(mds_srv, ds, timeo, retrans,
++					       minor_version);
++		break;
++	default:
++		dprintk("%s: unsupported DS version %d\n", __func__, version);
++		err = -EPROTONOSUPPORT;
+ 	}
+ 
++connect_done:
++	nfs4_clear_ds_conn_bit(ds);
++out:
+ 	/*
+ 	 * At this point the ds->ds_clp should be ready, but it might have
+ 	 * hit an error.
+diff --git a/fs/nfs/read.c b/fs/nfs/read.c
+index d2b6dce1f99f7..d13a22fc94a7d 100644
+--- a/fs/nfs/read.c
++++ b/fs/nfs/read.c
+@@ -363,22 +363,23 @@ int nfs_readpage(struct file *file, struct page *page)
+ 	} else
+ 		desc.ctx = get_nfs_open_context(nfs_file_open_context(file));
+ 
++	xchg(&desc.ctx->error, 0);
+ 	if (!IS_SYNC(inode)) {
+ 		ret = nfs_readpage_from_fscache(desc.ctx, inode, page);
+ 		if (ret == 0)
+-			goto out;
++			goto out_wait;
+ 	}
+ 
+-	xchg(&desc.ctx->error, 0);
+ 	nfs_pageio_init_read(&desc.pgio, inode, false,
+ 			     &nfs_async_read_completion_ops);
+ 
+ 	ret = readpage_async_filler(&desc, page);
++	if (ret)
++		goto out;
+ 
+-	if (!ret)
+-		nfs_pageio_complete_read(&desc.pgio, inode);
+-
++	nfs_pageio_complete_read(&desc.pgio, inode);
+ 	ret = desc.pgio.pg_error < 0 ? desc.pgio.pg_error : 0;
++out_wait:
+ 	if (!ret) {
+ 		ret = wait_on_page_locked_killable(page);
+ 		if (!PageUptodate(page) && !ret)
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 886e50ed07c20..f8f528ce58358 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -7150,7 +7150,6 @@ nfs4_client_to_reclaim(struct xdr_netobj name, struct xdr_netobj princhash,
+ 	unsigned int strhashval;
+ 	struct nfs4_client_reclaim *crp;
+ 
+-	trace_nfsd_clid_reclaim(nn, name.len, name.data);
+ 	crp = alloc_reclaim();
+ 	if (crp) {
+ 		strhashval = clientstr_hashval(name);
+@@ -7200,8 +7199,6 @@ nfsd4_find_reclaim_client(struct xdr_netobj name, struct nfsd_net *nn)
+ 	unsigned int strhashval;
+ 	struct nfs4_client_reclaim *crp = NULL;
+ 
+-	trace_nfsd_clid_find(nn, name.len, name.data);
+-
+ 	strhashval = clientstr_hashval(name);
+ 	list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
+ 		if (compare_blob(&crp->cr_name, &name) == 0) {
+diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h
+index 92a0973dd6713..02a7954702296 100644
+--- a/fs/nfsd/trace.h
++++ b/fs/nfsd/trace.h
+@@ -512,35 +512,6 @@ DEFINE_EVENT(nfsd_net_class, nfsd_##name, \
+ DEFINE_NET_EVENT(grace_start);
+ DEFINE_NET_EVENT(grace_complete);
+ 
+-DECLARE_EVENT_CLASS(nfsd_clid_class,
+-	TP_PROTO(const struct nfsd_net *nn,
+-		 unsigned int namelen,
+-		 const unsigned char *namedata),
+-	TP_ARGS(nn, namelen, namedata),
+-	TP_STRUCT__entry(
+-		__field(unsigned long long, boot_time)
+-		__field(unsigned int, namelen)
+-		__dynamic_array(unsigned char,  name, namelen)
+-	),
+-	TP_fast_assign(
+-		__entry->boot_time = nn->boot_time;
+-		__entry->namelen = namelen;
+-		memcpy(__get_dynamic_array(name), namedata, namelen);
+-	),
+-	TP_printk("boot_time=%16llx nfs4_clientid=%.*s",
+-		__entry->boot_time, __entry->namelen, __get_str(name))
+-)
+-
+-#define DEFINE_CLID_EVENT(name) \
+-DEFINE_EVENT(nfsd_clid_class, nfsd_clid_##name, \
+-	TP_PROTO(const struct nfsd_net *nn, \
+-		 unsigned int namelen, \
+-		 const unsigned char *namedata), \
+-	TP_ARGS(nn, namelen, namedata))
+-
+-DEFINE_CLID_EVENT(find);
+-DEFINE_CLID_EVENT(reclaim);
+-
+ TRACE_EVENT(nfsd_clid_inuse_err,
+ 	TP_PROTO(const struct nfs4_client *clp),
+ 	TP_ARGS(clp),
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index fd6be35a1642c..f83a1d1225050 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -1123,6 +1123,19 @@ out:
+ }
+ 
+ #ifdef CONFIG_NFSD_V3
++static int
++nfsd_filemap_write_and_wait_range(struct nfsd_file *nf, loff_t offset,
++				  loff_t end)
++{
++	struct address_space *mapping = nf->nf_file->f_mapping;
++	int ret = filemap_fdatawrite_range(mapping, offset, end);
++
++	if (ret)
++		return ret;
++	filemap_fdatawait_range_keep_errors(mapping, offset, end);
++	return 0;
++}
++
+ /*
+  * Commit all pending writes to stable storage.
+  *
+@@ -1153,10 +1166,11 @@ nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 	if (err)
+ 		goto out;
+ 	if (EX_ISSYNC(fhp->fh_export)) {
+-		int err2;
++		int err2 = nfsd_filemap_write_and_wait_range(nf, offset, end);
+ 
+ 		down_write(&nf->nf_rwsem);
+-		err2 = vfs_fsync_range(nf->nf_file, offset, end, 0);
++		if (!err2)
++			err2 = vfs_fsync_range(nf->nf_file, offset, end, 0);
+ 		switch (err2) {
+ 		case 0:
+ 			nfsd_copy_boot_verifier(verf, net_generic(nf->nf_net,
+diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c
+index ee5efdc35cc1e..2f2e430461b21 100644
+--- a/fs/orangefs/super.c
++++ b/fs/orangefs/super.c
+@@ -209,7 +209,7 @@ static int orangefs_statfs(struct dentry *dentry, struct kstatfs *buf)
+ 	buf->f_bavail = (sector_t) new_op->downcall.resp.statfs.blocks_avail;
+ 	buf->f_files = (sector_t) new_op->downcall.resp.statfs.files_total;
+ 	buf->f_ffree = (sector_t) new_op->downcall.resp.statfs.files_avail;
+-	buf->f_frsize = sb->s_blocksize;
++	buf->f_frsize = 0;
+ 
+ out_op_release:
+ 	op_release(new_op);
+diff --git a/fs/seq_file.c b/fs/seq_file.c
+index cb11a34fb8714..1cfae052439d9 100644
+--- a/fs/seq_file.c
++++ b/fs/seq_file.c
+@@ -32,6 +32,9 @@ static void seq_set_overflow(struct seq_file *m)
+ 
+ static void *seq_buf_alloc(unsigned long size)
+ {
++	if (unlikely(size > MAX_RW_COUNT))
++		return NULL;
++
+ 	return kvmalloc(size, GFP_KERNEL_ACCOUNT);
+ }
+ 
+diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
+index d9d8d7794eff2..af1ea5fcd38db 100644
+--- a/fs/ubifs/dir.c
++++ b/fs/ubifs/dir.c
+@@ -1337,7 +1337,10 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
+ 			goto out_release;
+ 		}
+ 
++		spin_lock(&whiteout->i_lock);
+ 		whiteout->i_state |= I_LINKABLE;
++		spin_unlock(&whiteout->i_lock);
++
+ 		whiteout_ui = ubifs_inode(whiteout);
+ 		whiteout_ui->data = dev;
+ 		whiteout_ui->data_len = ubifs_encode_dev(dev, MKDEV(0, 0));
+@@ -1430,7 +1433,11 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
+ 
+ 		inc_nlink(whiteout);
+ 		mark_inode_dirty(whiteout);
++
++		spin_lock(&whiteout->i_lock);
+ 		whiteout->i_state &= ~I_LINKABLE;
++		spin_unlock(&whiteout->i_lock);
++
+ 		iput(whiteout);
+ 	}
+ 
+diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
+index 2857e64d673d1..230717384a38e 100644
+--- a/fs/ubifs/journal.c
++++ b/fs/ubifs/journal.c
+@@ -882,6 +882,7 @@ int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode)
+ 		struct ubifs_dent_node *xent, *pxent = NULL;
+ 
+ 		if (ui->xattr_cnt > ubifs_xattr_max_cnt(c)) {
++			err = -EPERM;
+ 			ubifs_err(c, "Cannot delete inode, it has too much xattrs!");
+ 			goto out_release;
+ 		}
+diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
+index d217c382b02de..4d87b36dbc8c5 100644
+--- a/include/linux/compiler-clang.h
++++ b/include/linux/compiler-clang.h
+@@ -13,6 +13,12 @@
+ /* all clang versions usable with the kernel support KASAN ABI version 5 */
+ #define KASAN_ABI_VERSION 5
+ 
++/*
++ * Note: Checking __has_feature(*_sanitizer) is only true if the feature is
++ * enabled. Therefore it is not required to additionally check defined(CONFIG_*)
++ * to avoid adding redundant attributes in other configurations.
++ */
++
+ #if __has_feature(address_sanitizer) || __has_feature(hwaddress_sanitizer)
+ /* Emulate GCC's __SANITIZE_ADDRESS__ flag */
+ #define __SANITIZE_ADDRESS__
+@@ -45,6 +51,17 @@
+ #define __no_sanitize_undefined
+ #endif
+ 
++/*
++ * Support for __has_feature(coverage_sanitizer) was added in Clang 13 together
++ * with no_sanitize("coverage"). Prior versions of Clang support coverage
++ * instrumentation, but cannot be queried for support by the preprocessor.
++ */
++#if __has_feature(coverage_sanitizer)
++#define __no_sanitize_coverage __attribute__((no_sanitize("coverage")))
++#else
++#define __no_sanitize_coverage
++#endif
++
+ /*
+  * Not all versions of clang implement the type-generic versions
+  * of the builtin overflow checkers. Fortunately, clang implements
+diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
+index 48750243db4c8..c6ab0dcb9c0c7 100644
+--- a/include/linux/compiler-gcc.h
++++ b/include/linux/compiler-gcc.h
+@@ -126,6 +126,12 @@
+ #define __no_sanitize_undefined
+ #endif
+ 
++#if defined(CONFIG_KCOV) && __has_attribute(__no_sanitize_coverage__)
++#define __no_sanitize_coverage __attribute__((no_sanitize_coverage))
++#else
++#define __no_sanitize_coverage
++#endif
++
+ #if GCC_VERSION >= 50100
+ #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
+ #endif
+diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
+index e5dd5a4ae9460..652217da3b7cd 100644
+--- a/include/linux/compiler_types.h
++++ b/include/linux/compiler_types.h
+@@ -210,7 +210,7 @@ struct ftrace_likely_data {
+ /* Section for code which can't be instrumented at all */
+ #define noinstr								\
+ 	noinline notrace __attribute((__section__(".noinstr.text")))	\
+-	__no_kcsan __no_sanitize_address
++	__no_kcsan __no_sanitize_address __no_sanitize_coverage
+ 
+ #endif /* __KERNEL__ */
+ 
+diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
+index eadaabd18dc7d..be9eadd23659e 100644
+--- a/include/linux/nfs_fs.h
++++ b/include/linux/nfs_fs.h
+@@ -84,6 +84,7 @@ struct nfs_open_context {
+ #define NFS_CONTEXT_RESEND_WRITES	(1)
+ #define NFS_CONTEXT_BAD			(2)
+ #define NFS_CONTEXT_UNLOCK	(3)
++#define NFS_CONTEXT_FILE_OPEN		(4)
+ 	int error;
+ 
+ 	struct list_head list;
+diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h
+index 65d3d83015c39..944da75ff25cf 100644
+--- a/include/linux/pci-ecam.h
++++ b/include/linux/pci-ecam.h
+@@ -55,6 +55,7 @@ struct pci_ecam_ops {
+ struct pci_config_window {
+ 	struct resource			res;
+ 	struct resource			busr;
++	unsigned int			bus_shift;
+ 	void				*priv;
+ 	const struct pci_ecam_ops	*ops;
+ 	union {
+diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
+index bd04f722714f6..d11bee5d9347f 100644
+--- a/include/linux/rcupdate.h
++++ b/include/linux/rcupdate.h
+@@ -315,7 +315,7 @@ static inline int rcu_read_lock_any_held(void)
+ #define RCU_LOCKDEP_WARN(c, s)						\
+ 	do {								\
+ 		static bool __section(".data.unlikely") __warned;	\
+-		if (debug_lockdep_rcu_enabled() && !__warned && (c)) {	\
++		if ((c) && debug_lockdep_rcu_enabled() && !__warned) {	\
+ 			__warned = true;				\
+ 			lockdep_rcu_suspicious(__FILE__, __LINE__, s);	\
+ 		}							\
+diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
+index 3f6a0fcaa10cf..ae60f838ebb92 100644
+--- a/include/linux/sched/signal.h
++++ b/include/linux/sched/signal.h
+@@ -537,6 +537,17 @@ static inline int kill_cad_pid(int sig, int priv)
+ #define SEND_SIG_NOINFO ((struct kernel_siginfo *) 0)
+ #define SEND_SIG_PRIV	((struct kernel_siginfo *) 1)
+ 
++static inline int __on_sig_stack(unsigned long sp)
++{
++#ifdef CONFIG_STACK_GROWSUP
++	return sp >= current->sas_ss_sp &&
++		sp - current->sas_ss_sp < current->sas_ss_size;
++#else
++	return sp > current->sas_ss_sp &&
++		sp - current->sas_ss_sp <= current->sas_ss_size;
++#endif
++}
++
+ /*
+  * True if we are on the alternate signal stack.
+  */
+@@ -554,13 +565,7 @@ static inline int on_sig_stack(unsigned long sp)
+ 	if (current->sas_ss_flags & SS_AUTODISARM)
+ 		return 0;
+ 
+-#ifdef CONFIG_STACK_GROWSUP
+-	return sp >= current->sas_ss_sp &&
+-		sp - current->sas_ss_sp < current->sas_ss_size;
+-#else
+-	return sp > current->sas_ss_sp &&
+-		sp - current->sas_ss_sp <= current->sas_ss_size;
+-#endif
++	return __on_sig_stack(sp);
+ }
+ 
+ static inline int sas_ss_flags(unsigned long sp)
+diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
+index 091f284bd6e93..2bb452a8f134c 100644
+--- a/include/scsi/libiscsi.h
++++ b/include/scsi/libiscsi.h
+@@ -195,12 +195,6 @@ struct iscsi_conn {
+ 	unsigned long		suspend_tx;	/* suspend Tx */
+ 	unsigned long		suspend_rx;	/* suspend Rx */
+ 
+-	/* abort */
+-	wait_queue_head_t	ehwait;		/* used in eh_abort() */
+-	struct iscsi_tm		tmhdr;
+-	struct timer_list	tmf_timer;
+-	int			tmf_state;	/* see TMF_INITIAL, etc.*/
+-
+ 	/* negotiated params */
+ 	unsigned		max_recv_dlength; /* initiator_max_recv_dsl*/
+ 	unsigned		max_xmit_dlength; /* target_max_recv_dsl */
+@@ -270,6 +264,11 @@ struct iscsi_session {
+ 	 * and recv lock.
+ 	 */
+ 	struct mutex		eh_mutex;
++	/* abort */
++	wait_queue_head_t	ehwait;		/* used in eh_abort() */
++	struct iscsi_tm		tmhdr;
++	struct timer_list	tmf_timer;
++	int			tmf_state;	/* see TMF_INITIAL, etc.*/
+ 
+ 	/* iSCSI session-wide sequencing */
+ 	uint32_t		cmdsn;
+diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
+index 3974329d4d023..c5d7810fd7926 100644
+--- a/include/scsi/scsi_transport_iscsi.h
++++ b/include/scsi/scsi_transport_iscsi.h
+@@ -443,6 +443,8 @@ extern void iscsi_remove_session(struct iscsi_cls_session *session);
+ extern void iscsi_free_session(struct iscsi_cls_session *session);
+ extern struct iscsi_cls_conn *iscsi_create_conn(struct iscsi_cls_session *sess,
+ 						int dd_size, uint32_t cid);
++extern void iscsi_put_conn(struct iscsi_cls_conn *conn);
++extern void iscsi_get_conn(struct iscsi_cls_conn *conn);
+ extern int iscsi_destroy_conn(struct iscsi_cls_conn *conn);
+ extern void iscsi_unblock_session(struct iscsi_cls_session *session);
+ extern void iscsi_block_session(struct iscsi_cls_session *session);
+diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
+index f6dddb3a8f4a2..04eb28f7735fb 100644
+--- a/kernel/cgroup/cgroup-v1.c
++++ b/kernel/cgroup/cgroup-v1.c
+@@ -912,6 +912,8 @@ int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param)
+ 	opt = fs_parse(fc, cgroup1_fs_parameters, param, &result);
+ 	if (opt == -ENOPARAM) {
+ 		if (strcmp(param->key, "source") == 0) {
++			if (param->type != fs_value_is_string)
++				return invalf(fc, "Non-string source");
+ 			if (fc->source)
+ 				return invalf(fc, "Multiple sources not supported");
+ 			fc->source = param->string;
+diff --git a/kernel/jump_label.c b/kernel/jump_label.c
+index ba39fbb1f8e73..af520ca263603 100644
+--- a/kernel/jump_label.c
++++ b/kernel/jump_label.c
+@@ -316,14 +316,16 @@ static int addr_conflict(struct jump_entry *entry, void *start, void *end)
+ }
+ 
+ static int __jump_label_text_reserved(struct jump_entry *iter_start,
+-		struct jump_entry *iter_stop, void *start, void *end)
++		struct jump_entry *iter_stop, void *start, void *end, bool init)
+ {
+ 	struct jump_entry *iter;
+ 
+ 	iter = iter_start;
+ 	while (iter < iter_stop) {
+-		if (addr_conflict(iter, start, end))
+-			return 1;
++		if (init || !jump_entry_is_init(iter)) {
++			if (addr_conflict(iter, start, end))
++				return 1;
++		}
+ 		iter++;
+ 	}
+ 
+@@ -561,7 +563,7 @@ static int __jump_label_mod_text_reserved(void *start, void *end)
+ 
+ 	ret = __jump_label_text_reserved(mod->jump_entries,
+ 				mod->jump_entries + mod->num_jump_entries,
+-				start, end);
++				start, end, mod->state == MODULE_STATE_COMING);
+ 
+ 	module_put(mod);
+ 
+@@ -786,8 +788,9 @@ early_initcall(jump_label_init_module);
+  */
+ int jump_label_text_reserved(void *start, void *end)
+ {
++	bool init = system_state < SYSTEM_RUNNING;
+ 	int ret = __jump_label_text_reserved(__start___jump_table,
+-			__stop___jump_table, start, end);
++			__stop___jump_table, start, end, init);
+ 
+ 	if (ret)
+ 		return ret;
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index 745f08fdd7a69..c6b4c66b8fa27 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -35,6 +35,7 @@
+ #include <linux/ftrace.h>
+ #include <linux/cpu.h>
+ #include <linux/jump_label.h>
++#include <linux/static_call.h>
+ #include <linux/perf_event.h>
+ 
+ #include <asm/sections.h>
+@@ -1569,6 +1570,7 @@ static int check_kprobe_address_safe(struct kprobe *p,
+ 	if (!kernel_text_address((unsigned long) p->addr) ||
+ 	    within_kprobe_blacklist((unsigned long) p->addr) ||
+ 	    jump_label_text_reserved(p->addr, p->addr) ||
++	    static_call_text_reserved(p->addr, p->addr) ||
+ 	    find_bug((unsigned long)p->addr)) {
+ 		ret = -EINVAL;
+ 		goto out;
+diff --git a/kernel/module.c b/kernel/module.c
+index 260d6f3f6d68f..f928037a1b567 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -4410,9 +4410,10 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
+ 			ret = fn(data, kallsyms_symbol_name(kallsyms, i),
+ 				 mod, kallsyms_symbol_value(sym));
+ 			if (ret != 0)
+-				break;
++				goto out;
+ 		}
+ 	}
++out:
+ 	mutex_unlock(&module_mutex);
+ 	return ret;
+ }
+diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
+index bf0827d4b6593..cfd06fb5ba6d0 100644
+--- a/kernel/rcu/rcu.h
++++ b/kernel/rcu/rcu.h
+@@ -308,6 +308,8 @@ static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
+ 	}
+ }
+ 
++extern void rcu_init_geometry(void);
++
+ /* Returns a pointer to the first leaf rcu_node structure. */
+ #define rcu_first_leaf_node() (rcu_state.level[rcu_num_lvls - 1])
+ 
+diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
+index e26547b34ad33..072e47288f1f4 100644
+--- a/kernel/rcu/srcutree.c
++++ b/kernel/rcu/srcutree.c
+@@ -90,6 +90,9 @@ static void init_srcu_struct_nodes(struct srcu_struct *ssp, bool is_static)
+ 	struct srcu_node *snp;
+ 	struct srcu_node *snp_first;
+ 
++	/* Initialize geometry if it has not already been initialized. */
++	rcu_init_geometry();
++
+ 	/* Work out the overall tree geometry. */
+ 	ssp->level[0] = &ssp->node[0];
+ 	for (i = 1; i < rcu_num_lvls; i++)
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index a274622ed6fa2..33bdbe0629d5c 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -4502,11 +4502,25 @@ static void __init rcu_init_one(void)
+  * replace the definitions in tree.h because those are needed to size
+  * the ->node array in the rcu_state structure.
+  */
+-static void __init rcu_init_geometry(void)
++void rcu_init_geometry(void)
+ {
+ 	ulong d;
+ 	int i;
++	static unsigned long old_nr_cpu_ids;
+ 	int rcu_capacity[RCU_NUM_LVLS];
++	static bool initialized;
++
++	if (initialized) {
++		/*
++		 * Warn if setup_nr_cpu_ids() had not yet been invoked,
++		 * unless nr_cpus_ids == NR_CPUS, in which case who cares?
++		 */
++		WARN_ON_ONCE(old_nr_cpu_ids != nr_cpu_ids);
++		return;
++	}
++
++	old_nr_cpu_ids = nr_cpu_ids;
++	initialized = true;
+ 
+ 	/*
+ 	 * Initialize any unspecified boot parameters.
+diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
+index b95ae86c40a7d..dd94a602a6d25 100644
+--- a/kernel/rcu/update.c
++++ b/kernel/rcu/update.c
+@@ -277,7 +277,7 @@ EXPORT_SYMBOL_GPL(rcu_callback_map);
+ 
+ noinstr int notrace debug_lockdep_rcu_enabled(void)
+ {
+-	return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && debug_locks &&
++	return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && READ_ONCE(debug_locks) &&
+ 	       current->lockdep_recursion == 0;
+ }
+ EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index e4e4f47cee6a4..eee49ce2d5960 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -2524,20 +2524,27 @@ static __always_inline
+ unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
+ 				  struct task_struct *p)
+ {
+-	unsigned long min_util;
+-	unsigned long max_util;
++	unsigned long min_util = 0;
++	unsigned long max_util = 0;
+ 
+ 	if (!static_branch_likely(&sched_uclamp_used))
+ 		return util;
+ 
+-	min_util = READ_ONCE(rq->uclamp[UCLAMP_MIN].value);
+-	max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value);
+-
+ 	if (p) {
+-		min_util = max(min_util, uclamp_eff_value(p, UCLAMP_MIN));
+-		max_util = max(max_util, uclamp_eff_value(p, UCLAMP_MAX));
++		min_util = uclamp_eff_value(p, UCLAMP_MIN);
++		max_util = uclamp_eff_value(p, UCLAMP_MAX);
++
++		/*
++		 * Ignore last runnable task's max clamp, as this task will
++		 * reset it. Similarly, no need to read the rq's min clamp.
++		 */
++		if (rq->uclamp_flags & UCLAMP_FLAG_IDLE)
++			goto out;
+ 	}
+ 
++	min_util = max_t(unsigned long, min_util, READ_ONCE(rq->uclamp[UCLAMP_MIN].value));
++	max_util = max_t(unsigned long, max_util, READ_ONCE(rq->uclamp[UCLAMP_MAX].value));
++out:
+ 	/*
+ 	 * Since CPU's {min,max}_util clamps are MAX aggregated considering
+ 	 * RUNNABLE tasks with _different_ clamps, we can end up with an
+diff --git a/kernel/static_call.c b/kernel/static_call.c
+index 2c5950b0b90ef..7eba4912e529d 100644
+--- a/kernel/static_call.c
++++ b/kernel/static_call.c
+@@ -292,13 +292,15 @@ static int addr_conflict(struct static_call_site *site, void *start, void *end)
+ 
+ static int __static_call_text_reserved(struct static_call_site *iter_start,
+ 				       struct static_call_site *iter_stop,
+-				       void *start, void *end)
++				       void *start, void *end, bool init)
+ {
+ 	struct static_call_site *iter = iter_start;
+ 
+ 	while (iter < iter_stop) {
+-		if (addr_conflict(iter, start, end))
+-			return 1;
++		if (init || !static_call_is_init(iter)) {
++			if (addr_conflict(iter, start, end))
++				return 1;
++		}
+ 		iter++;
+ 	}
+ 
+@@ -324,7 +326,7 @@ static int __static_call_mod_text_reserved(void *start, void *end)
+ 
+ 	ret = __static_call_text_reserved(mod->static_call_sites,
+ 			mod->static_call_sites + mod->num_static_call_sites,
+-			start, end);
++			start, end, mod->state == MODULE_STATE_COMING);
+ 
+ 	module_put(mod);
+ 
+@@ -459,8 +461,9 @@ static inline int __static_call_mod_text_reserved(void *start, void *end)
+ 
+ int static_call_text_reserved(void *start, void *end)
+ {
++	bool init = system_state < SYSTEM_RUNNING;
+ 	int ret = __static_call_text_reserved(__start_static_call_sites,
+-			__stop_static_call_sites, start, end);
++			__stop_static_call_sites, start, end, init);
+ 
+ 	if (ret)
+ 		return ret;
+diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
+index a696120b9f1ee..64ddd97d088aa 100644
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -1673,7 +1673,9 @@ static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data,
+ 	if (WARN_ON_ONCE(!field))
+ 		goto out;
+ 
+-	if (is_string_field(field)) {
++	/* Pointers to strings are just pointers and dangerous to dereference */
++	if (is_string_field(field) &&
++	    (field->filter_type != FILTER_PTR_STRING)) {
+ 		flags |= HIST_FIELD_FL_STRING;
+ 
+ 		hist_field->size = MAX_FILTER_STR_VAL;
+@@ -4469,8 +4471,6 @@ static inline void add_to_key(char *compound_key, void *key,
+ 		field = key_field->field;
+ 		if (field->filter_type == FILTER_DYN_STRING)
+ 			size = *(u32 *)(rec + field->offset) >> 16;
+-		else if (field->filter_type == FILTER_PTR_STRING)
+-			size = strlen(key);
+ 		else if (field->filter_type == FILTER_STATIC_STRING)
+ 			size = field->size;
+ 
+diff --git a/lib/decompress_unlz4.c b/lib/decompress_unlz4.c
+index c0cfcfd486be0..e6327391b6b66 100644
+--- a/lib/decompress_unlz4.c
++++ b/lib/decompress_unlz4.c
+@@ -112,6 +112,9 @@ STATIC inline int INIT unlz4(u8 *input, long in_len,
+ 				error("data corrupted");
+ 				goto exit_2;
+ 			}
++		} else if (size < 4) {
++			/* empty or end-of-file */
++			goto exit_3;
+ 		}
+ 
+ 		chunksize = get_unaligned_le32(inp);
+@@ -125,6 +128,10 @@ STATIC inline int INIT unlz4(u8 *input, long in_len,
+ 			continue;
+ 		}
+ 
++		if (!fill && chunksize == 0) {
++			/* empty or end-of-file */
++			goto exit_3;
++		}
+ 
+ 		if (posp)
+ 			*posp += 4;
+@@ -184,6 +191,7 @@ STATIC inline int INIT unlz4(u8 *input, long in_len,
+ 		}
+ 	}
+ 
++exit_3:
+ 	ret = 0;
+ exit_2:
+ 	if (!input)
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index dbf44b92651b7..e7e4a3aac3862 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -5029,8 +5029,9 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ 			continue;
+ 		}
+ 
+-		refs = min3(pages_per_huge_page(h) - pfn_offset,
+-			    (vma->vm_end - vaddr) >> PAGE_SHIFT, remainder);
++		/* vaddr may not be aligned to PAGE_SIZE */
++		refs = min3(pages_per_huge_page(h) - pfn_offset, remainder,
++		    (vma->vm_end - ALIGN_DOWN(vaddr, PAGE_SIZE)) >> PAGE_SHIFT);
+ 
+ 		if (pages || vmas)
+ 			record_subpages_vmas(mem_map_offset(page, pfn_offset),
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index 226bb05c3b42d..869f1608c98a3 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -3087,7 +3087,9 @@ static void br_multicast_pim(struct net_bridge *br,
+ 	    pim_hdr_type(pimhdr) != PIM_TYPE_HELLO)
+ 		return;
+ 
++	spin_lock(&br->multicast_lock);
+ 	br_multicast_mark_router(br, port);
++	spin_unlock(&br->multicast_lock);
+ }
+ 
+ static int br_ip4_multicast_mrd_rcv(struct net_bridge *br,
+@@ -3098,7 +3100,9 @@ static int br_ip4_multicast_mrd_rcv(struct net_bridge *br,
+ 	    igmp_hdr(skb)->type != IGMP_MRDISC_ADV)
+ 		return -ENOMSG;
+ 
++	spin_lock(&br->multicast_lock);
+ 	br_multicast_mark_router(br, port);
++	spin_unlock(&br->multicast_lock);
+ 
+ 	return 0;
+ }
+@@ -3166,7 +3170,9 @@ static void br_ip6_multicast_mrd_rcv(struct net_bridge *br,
+ 	if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV)
+ 		return;
+ 
++	spin_lock(&br->multicast_lock);
+ 	br_multicast_mark_router(br, port);
++	spin_unlock(&br->multicast_lock);
+ }
+ 
+ static int br_multicast_ipv6_rcv(struct net_bridge *br,
+diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
+index 3964ff74ee518..ca10ba2626f27 100644
+--- a/net/sunrpc/xdr.c
++++ b/net/sunrpc/xdr.c
+@@ -1230,10 +1230,9 @@ static unsigned int xdr_set_page_base(struct xdr_stream *xdr,
+ 	void *kaddr;
+ 
+ 	maxlen = xdr->buf->page_len;
+-	if (base >= maxlen) {
+-		base = maxlen;
+-		maxlen = 0;
+-	} else
++	if (base >= maxlen)
++		return 0;
++	else
+ 		maxlen -= base;
+ 	if (len > maxlen)
+ 		len = maxlen;
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index e35760f238a40..87cb0e36eadeb 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -1680,7 +1680,8 @@ static int xs_bind(struct sock_xprt *transport, struct socket *sock)
+ 		err = kernel_bind(sock, (struct sockaddr *)&myaddr,
+ 				transport->xprt.addrlen);
+ 		if (err == 0) {
+-			transport->srcport = port;
++			if (transport->xprt.reuseport)
++				transport->srcport = port;
+ 			break;
+ 		}
+ 		last = port;
+diff --git a/sound/ac97/bus.c b/sound/ac97/bus.c
+index d9077e91382bf..6ddf646cda659 100644
+--- a/sound/ac97/bus.c
++++ b/sound/ac97/bus.c
+@@ -520,7 +520,7 @@ static int ac97_bus_remove(struct device *dev)
+ 	struct ac97_codec_driver *adrv = to_ac97_driver(dev->driver);
+ 	int ret;
+ 
+-	ret = pm_runtime_get_sync(dev);
++	ret = pm_runtime_resume_and_get(dev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+diff --git a/sound/firewire/Kconfig b/sound/firewire/Kconfig
+index 9897bd26a4388..12664c3a14141 100644
+--- a/sound/firewire/Kconfig
++++ b/sound/firewire/Kconfig
+@@ -38,7 +38,7 @@ config SND_OXFW
+ 	   * Mackie(Loud) Onyx 1640i (former model)
+ 	   * Mackie(Loud) Onyx Satellite
+ 	   * Mackie(Loud) Tapco Link.Firewire
+-	   * Mackie(Loud) d.4 pro
++	   * Mackie(Loud) d.2 pro/d.4 pro (built-in FireWire card with OXFW971 ASIC)
+ 	   * Mackie(Loud) U.420/U.420d
+ 	   * TASCAM FireOne
+ 	   * Stanton Controllers & Systems 1 Deck/Mixer
+@@ -84,7 +84,7 @@ config SND_BEBOB
+ 	  * PreSonus FIREBOX/FIREPOD/FP10/Inspire1394
+ 	  * BridgeCo RDAudio1/Audio5
+ 	  * Mackie Onyx 1220/1620/1640 (FireWire I/O Card)
+-	  * Mackie d.2 (FireWire Option) and d.2 Pro
++	  * Mackie d.2 (optional FireWire card with DM1000 ASIC)
+ 	  * Stanton FinalScratch 2 (ScratchAmp)
+ 	  * Tascam IF-FW/DM
+ 	  * Behringer XENIX UFX 1204/1604
+@@ -110,6 +110,7 @@ config SND_BEBOB
+ 	  * M-Audio Ozonic/NRV10/ProfireLightBridge
+ 	  * M-Audio FireWire 1814/ProjectMix IO
+ 	  * Digidesign Mbox 2 Pro
++	  * ToneWeal FW66
+ 
+ 	  To compile this driver as a module, choose M here: the module
+ 	  will be called snd-bebob.
+diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c
+index daeecfa8b9aac..67fa0f2178b01 100644
+--- a/sound/firewire/bebob/bebob.c
++++ b/sound/firewire/bebob/bebob.c
+@@ -59,6 +59,7 @@ static DECLARE_BITMAP(devices_used, SNDRV_CARDS);
+ #define VEN_MAUDIO1	0x00000d6c
+ #define VEN_MAUDIO2	0x000007f5
+ #define VEN_DIGIDESIGN	0x00a07e
++#define OUI_SHOUYO	0x002327
+ 
+ #define MODEL_FOCUSRITE_SAFFIRE_BOTH	0x00000000
+ #define MODEL_MAUDIO_AUDIOPHILE_BOTH	0x00010060
+@@ -387,7 +388,7 @@ static const struct ieee1394_device_id bebob_id_table[] = {
+ 	SND_BEBOB_DEV_ENTRY(VEN_BRIDGECO, 0x00010049, &spec_normal),
+ 	/* Mackie, Onyx 1220/1620/1640 (Firewire I/O Card) */
+ 	SND_BEBOB_DEV_ENTRY(VEN_MACKIE2, 0x00010065, &spec_normal),
+-	// Mackie, d.2 (Firewire option card) and d.2 Pro (the card is built-in).
++	// Mackie, d.2 (optional Firewire card with DM1000).
+ 	SND_BEBOB_DEV_ENTRY(VEN_MACKIE1, 0x00010067, &spec_normal),
+ 	/* Stanton, ScratchAmp */
+ 	SND_BEBOB_DEV_ENTRY(VEN_STANTON, 0x00000001, &spec_normal),
+@@ -486,6 +487,8 @@ static const struct ieee1394_device_id bebob_id_table[] = {
+ 			    &maudio_special_spec),
+ 	/* Digidesign Mbox 2 Pro */
+ 	SND_BEBOB_DEV_ENTRY(VEN_DIGIDESIGN, 0x0000a9, &spec_normal),
++	// Toneweal FW66.
++	SND_BEBOB_DEV_ENTRY(OUI_SHOUYO, 0x020002, &spec_normal),
+ 	/* IDs are unknown but able to be supported */
+ 	/*  Apogee, Mini-ME Firewire */
+ 	/*  Apogee, Mini-DAC Firewire */
+diff --git a/sound/firewire/motu/motu-protocol-v2.c b/sound/firewire/motu/motu-protocol-v2.c
+index 784073aa10265..f0a0ecad4d74a 100644
+--- a/sound/firewire/motu/motu-protocol-v2.c
++++ b/sound/firewire/motu/motu-protocol-v2.c
+@@ -86,24 +86,23 @@ static int detect_clock_source_optical_model(struct snd_motu *motu, u32 data,
+ 		*src = SND_MOTU_CLOCK_SOURCE_INTERNAL;
+ 		break;
+ 	case 1:
++		*src = SND_MOTU_CLOCK_SOURCE_ADAT_ON_OPT;
++		break;
++	case 2:
+ 	{
+ 		__be32 reg;
+ 
+ 		// To check the configuration of optical interface.
+-		int err = snd_motu_transaction_read(motu, V2_IN_OUT_CONF_OFFSET,
+-						    &reg, sizeof(reg));
++		int err = snd_motu_transaction_read(motu, V2_IN_OUT_CONF_OFFSET, &reg, sizeof(reg));
+ 		if (err < 0)
+ 			return err;
+ 
+-		if (be32_to_cpu(reg) & 0x00000200)
++		if (((data & V2_OPT_IN_IFACE_MASK) >> V2_OPT_IN_IFACE_SHIFT) == V2_OPT_IFACE_MODE_SPDIF)
+ 			*src = SND_MOTU_CLOCK_SOURCE_SPDIF_ON_OPT;
+ 		else
+-			*src = SND_MOTU_CLOCK_SOURCE_ADAT_ON_OPT;
++			*src = SND_MOTU_CLOCK_SOURCE_SPDIF_ON_COAX;
+ 		break;
+ 	}
+-	case 2:
+-		*src = SND_MOTU_CLOCK_SOURCE_SPDIF_ON_COAX;
+-		break;
+ 	case 3:
+ 		*src = SND_MOTU_CLOCK_SOURCE_SPH;
+ 		break;
+diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c
+index 9eea25c46dc7e..5490637d278a4 100644
+--- a/sound/firewire/oxfw/oxfw.c
++++ b/sound/firewire/oxfw/oxfw.c
+@@ -355,7 +355,7 @@ static const struct ieee1394_device_id oxfw_id_table[] = {
+ 	 *  Onyx-i series (former models):	0x081216
+ 	 *  Mackie Onyx Satellite:		0x00200f
+ 	 *  Tapco LINK.firewire 4x6:		0x000460
+-	 *  d.4 pro:				Unknown
++	 *  d.2 pro/d.4 pro (built-in card):	Unknown
+ 	 *  U.420:				Unknown
+ 	 *  U.420d:				Unknown
+ 	 */
+diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
+index bc112df10fc57..d1ac9fc994916 100644
+--- a/sound/isa/cmi8330.c
++++ b/sound/isa/cmi8330.c
+@@ -547,7 +547,7 @@ static int snd_cmi8330_probe(struct snd_card *card, int dev)
+ 	}
+ 	if (acard->sb->hardware != SB_HW_16) {
+ 		snd_printk(KERN_ERR PFX "SB16 not found during probe\n");
+-		return err;
++		return -ENODEV;
+ 	}
+ 
+ 	snd_wss_out(acard->wss, CS4231_MISC_INFO, 0x40); /* switch on MODE2 */
+diff --git a/sound/isa/sb/sb16_csp.c b/sound/isa/sb/sb16_csp.c
+index 4789345a8fdde..c98ccd421a2ec 100644
+--- a/sound/isa/sb/sb16_csp.c
++++ b/sound/isa/sb/sb16_csp.c
+@@ -1072,10 +1072,14 @@ static void snd_sb_qsound_destroy(struct snd_sb_csp * p)
+ 	card = p->chip->card;	
+ 	
+ 	down_write(&card->controls_rwsem);
+-	if (p->qsound_switch)
++	if (p->qsound_switch) {
+ 		snd_ctl_remove(card, p->qsound_switch);
+-	if (p->qsound_space)
++		p->qsound_switch = NULL;
++	}
++	if (p->qsound_space) {
+ 		snd_ctl_remove(card, p->qsound_space);
++		p->qsound_space = NULL;
++	}
+ 	up_write(&card->controls_rwsem);
+ 
+ 	/* cancel pending transfer of QSound parameters */
+diff --git a/sound/mips/snd-n64.c b/sound/mips/snd-n64.c
+index e35e93157755f..463a6fe589eb2 100644
+--- a/sound/mips/snd-n64.c
++++ b/sound/mips/snd-n64.c
+@@ -338,6 +338,10 @@ static int __init n64audio_probe(struct platform_device *pdev)
+ 	strcpy(card->longname, "N64 Audio");
+ 
+ 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
++	if (!res) {
++		err = -EINVAL;
++		goto fail_dma_alloc;
++	}
+ 	if (devm_request_irq(&pdev->dev, res->start, n64audio_isr,
+ 				IRQF_SHARED, "N64 Audio", priv)) {
+ 		err = -EBUSY;
+diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c
+index 6f2b743b9d75c..6c6dc3fcde607 100644
+--- a/sound/pci/hda/hda_tegra.c
++++ b/sound/pci/hda/hda_tegra.c
+@@ -262,6 +262,9 @@ static int hda_tegra_first_init(struct azx *chip, struct platform_device *pdev)
+ 	const char *sname, *drv_name = "tegra-hda";
+ 	struct device_node *np = pdev->dev.of_node;
+ 
++	if (irq_id < 0)
++		return irq_id;
++
+ 	err = hda_tegra_init_chip(chip, pdev);
+ 	if (err)
+ 		return err;
+diff --git a/sound/ppc/powermac.c b/sound/ppc/powermac.c
+index 9fb51ebafde16..8088f77d5a74b 100644
+--- a/sound/ppc/powermac.c
++++ b/sound/ppc/powermac.c
+@@ -76,7 +76,11 @@ static int snd_pmac_probe(struct platform_device *devptr)
+ 		sprintf(card->shortname, "PowerMac %s", name_ext);
+ 		sprintf(card->longname, "%s (Dev %d) Sub-frame %d",
+ 			card->shortname, chip->device_id, chip->subframe);
+-		if ( snd_pmac_tumbler_init(chip) < 0 || snd_pmac_tumbler_post_init() < 0)
++		err = snd_pmac_tumbler_init(chip);
++		if (err < 0)
++			goto __error;
++		err = snd_pmac_tumbler_post_init();
++		if (err < 0)
+ 			goto __error;
+ 		break;
+ 	case PMAC_AWACS:
+diff --git a/sound/soc/fsl/fsl_xcvr.c b/sound/soc/fsl/fsl_xcvr.c
+index 070e3f32859fa..244a35122fbce 100644
+--- a/sound/soc/fsl/fsl_xcvr.c
++++ b/sound/soc/fsl/fsl_xcvr.c
+@@ -1205,6 +1205,10 @@ static int fsl_xcvr_probe(struct platform_device *pdev)
+ 
+ 	rx_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rxfifo");
+ 	tx_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "txfifo");
++	if (!rx_res || !tx_res) {
++		dev_err(dev, "could not find rxfifo or txfifo resource\n");
++		return -EINVAL;
++	}
+ 	xcvr->dma_prms_rx.chan_name = "rx";
+ 	xcvr->dma_prms_tx.chan_name = "tx";
+ 	xcvr->dma_prms_rx.addr = rx_res->start;
+diff --git a/sound/soc/img/img-i2s-in.c b/sound/soc/img/img-i2s-in.c
+index 0843235d73c91..fd3432a1d6ab8 100644
+--- a/sound/soc/img/img-i2s-in.c
++++ b/sound/soc/img/img-i2s-in.c
+@@ -464,7 +464,7 @@ static int img_i2s_in_probe(struct platform_device *pdev)
+ 		if (ret)
+ 			goto err_pm_disable;
+ 	}
+-	ret = pm_runtime_get_sync(&pdev->dev);
++	ret = pm_runtime_resume_and_get(&pdev->dev);
+ 	if (ret < 0)
+ 		goto err_suspend;
+ 
+diff --git a/sound/soc/intel/boards/kbl_da7219_max98357a.c b/sound/soc/intel/boards/kbl_da7219_max98357a.c
+index dc3d897ad2802..36f1f49e0b76b 100644
+--- a/sound/soc/intel/boards/kbl_da7219_max98357a.c
++++ b/sound/soc/intel/boards/kbl_da7219_max98357a.c
+@@ -594,7 +594,7 @@ static int kabylake_audio_probe(struct platform_device *pdev)
+ 
+ static const struct platform_device_id kbl_board_ids[] = {
+ 	{
+-		.name = "kbl_da7219_max98357a",
++		.name = "kbl_da7219_mx98357a",
+ 		.driver_data =
+ 			(kernel_ulong_t)&kabylake_audio_card_da7219_m98357a,
+ 	},
+@@ -616,4 +616,4 @@ module_platform_driver(kabylake_audio)
+ MODULE_DESCRIPTION("Audio Machine driver-DA7219 & MAX98357A in I2S mode");
+ MODULE_AUTHOR("Naveen Manohar <naveen.m@intel.com>");
+ MODULE_LICENSE("GPL v2");
+-MODULE_ALIAS("platform:kbl_da7219_max98357a");
++MODULE_ALIAS("platform:kbl_da7219_mx98357a");
+diff --git a/sound/soc/intel/boards/sof_da7219_max98373.c b/sound/soc/intel/boards/sof_da7219_max98373.c
+index f3cb0773e70ee..8d1ad892e86b6 100644
+--- a/sound/soc/intel/boards/sof_da7219_max98373.c
++++ b/sound/soc/intel/boards/sof_da7219_max98373.c
+@@ -440,6 +440,7 @@ static const struct platform_device_id board_ids[] = {
+ 	},
+ 	{ }
+ };
++MODULE_DEVICE_TABLE(platform, board_ids);
+ 
+ static struct platform_driver audio = {
+ 	.probe = audio_probe,
+diff --git a/sound/soc/intel/boards/sof_rt5682.c b/sound/soc/intel/boards/sof_rt5682.c
+index 55505e207bc00..56e92a1ff34d0 100644
+--- a/sound/soc/intel/boards/sof_rt5682.c
++++ b/sound/soc/intel/boards/sof_rt5682.c
+@@ -942,6 +942,7 @@ static const struct platform_device_id board_ids[] = {
+ 	},
+ 	{ }
+ };
++MODULE_DEVICE_TABLE(platform, board_ids);
+ 
+ static struct platform_driver sof_audio = {
+ 	.probe = sof_audio_probe,
+diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
+index dfad2ad129abb..5827a16773c90 100644
+--- a/sound/soc/intel/boards/sof_sdw.c
++++ b/sound/soc/intel/boards/sof_sdw.c
+@@ -354,6 +354,7 @@ static struct sof_sdw_codec_info codec_info_list[] = {
+ 		.part_id = 0x714,
+ 		.version_id = 3,
+ 		.direction = {false, true},
++		.ignore_pch_dmic = true,
+ 		.dai_name = "rt715-aif2",
+ 		.init = sof_sdw_rt715_sdca_init,
+ 	},
+@@ -361,6 +362,7 @@ static struct sof_sdw_codec_info codec_info_list[] = {
+ 		.part_id = 0x715,
+ 		.version_id = 3,
+ 		.direction = {false, true},
++		.ignore_pch_dmic = true,
+ 		.dai_name = "rt715-aif2",
+ 		.init = sof_sdw_rt715_sdca_init,
+ 	},
+@@ -368,6 +370,7 @@ static struct sof_sdw_codec_info codec_info_list[] = {
+ 		.part_id = 0x714,
+ 		.version_id = 2,
+ 		.direction = {false, true},
++		.ignore_pch_dmic = true,
+ 		.dai_name = "rt715-aif2",
+ 		.init = sof_sdw_rt715_init,
+ 	},
+@@ -375,6 +378,7 @@ static struct sof_sdw_codec_info codec_info_list[] = {
+ 		.part_id = 0x715,
+ 		.version_id = 2,
+ 		.direction = {false, true},
++		.ignore_pch_dmic = true,
+ 		.dai_name = "rt715-aif2",
+ 		.init = sof_sdw_rt715_init,
+ 	},
+@@ -730,7 +734,8 @@ static int create_sdw_dailink(struct device *dev, int *be_index,
+ 			      int *cpu_id, bool *group_generated,
+ 			      struct snd_soc_codec_conf *codec_conf,
+ 			      int codec_count,
+-			      int *codec_conf_index)
++			      int *codec_conf_index,
++			      bool *ignore_pch_dmic)
+ {
+ 	const struct snd_soc_acpi_link_adr *link_next;
+ 	struct snd_soc_dai_link_component *codecs;
+@@ -783,6 +788,9 @@ static int create_sdw_dailink(struct device *dev, int *be_index,
+ 	if (codec_index < 0)
+ 		return codec_index;
+ 
++	if (codec_info_list[codec_index].ignore_pch_dmic)
++		*ignore_pch_dmic = true;
++
+ 	cpu_dai_index = *cpu_id;
+ 	for_each_pcm_streams(stream) {
+ 		char *name, *cpu_name;
+@@ -914,6 +922,7 @@ static int sof_card_dai_links_create(struct device *dev,
+ 	const struct snd_soc_acpi_link_adr *adr_link;
+ 	struct snd_soc_dai_link_component *cpus;
+ 	struct snd_soc_codec_conf *codec_conf;
++	bool ignore_pch_dmic = false;
+ 	int codec_conf_count;
+ 	int codec_conf_index = 0;
+ 	bool group_generated[SDW_MAX_GROUPS];
+@@ -1020,7 +1029,8 @@ static int sof_card_dai_links_create(struct device *dev,
+ 					 sdw_cpu_dai_num, cpus, adr_link,
+ 					 &cpu_id, group_generated,
+ 					 codec_conf, codec_conf_count,
+-					 &codec_conf_index);
++					 &codec_conf_index,
++					 &ignore_pch_dmic);
+ 		if (ret < 0) {
+ 			dev_err(dev, "failed to create dai link %d", be_id);
+ 			return -ENOMEM;
+@@ -1088,6 +1098,10 @@ SSP:
+ DMIC:
+ 	/* dmic */
+ 	if (dmic_num > 0) {
++		if (ignore_pch_dmic) {
++			dev_warn(dev, "Ignoring PCH DMIC\n");
++			goto HDMI;
++		}
+ 		cpus[cpu_id].dai_name = "DMIC01 Pin";
+ 		init_dai_link(dev, links + link_id, be_id, "dmic01",
+ 			      0, 1, // DMIC only supports capture
+@@ -1106,6 +1120,7 @@ DMIC:
+ 		INC_ID(be_id, cpu_id, link_id);
+ 	}
+ 
++HDMI:
+ 	/* HDMI */
+ 	if (hdmi_num > 0) {
+ 		idisp_components = devm_kcalloc(dev, hdmi_num,
+diff --git a/sound/soc/intel/boards/sof_sdw_common.h b/sound/soc/intel/boards/sof_sdw_common.h
+index f3cb6796363e7..ea60e8ed215c5 100644
+--- a/sound/soc/intel/boards/sof_sdw_common.h
++++ b/sound/soc/intel/boards/sof_sdw_common.h
+@@ -56,6 +56,7 @@ struct sof_sdw_codec_info {
+ 	int amp_num;
+ 	const u8 acpi_id[ACPI_ID_LEN];
+ 	const bool direction[2]; // playback & capture support
++	const bool ignore_pch_dmic;
+ 	const char *dai_name;
+ 	const struct snd_soc_ops *ops;
+ 
+diff --git a/sound/soc/intel/common/soc-acpi-intel-kbl-match.c b/sound/soc/intel/common/soc-acpi-intel-kbl-match.c
+index 47dadc9d5d2a0..ba5ff468c265a 100644
+--- a/sound/soc/intel/common/soc-acpi-intel-kbl-match.c
++++ b/sound/soc/intel/common/soc-acpi-intel-kbl-match.c
+@@ -113,7 +113,7 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_kbl_machines[] = {
+ 	},
+ 	{
+ 		.id = "DLGS7219",
+-		.drv_name = "kbl_da7219_max98373",
++		.drv_name = "kbl_da7219_mx98373",
+ 		.fw_filename = "intel/dsp_fw_kbl.bin",
+ 		.machine_quirk = snd_soc_acpi_codec_list,
+ 		.quirk_data = &kbl_7219_98373_codecs,
+diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
+index 6680c12716d44..6d4b08e918dec 100644
+--- a/sound/soc/soc-core.c
++++ b/sound/soc/soc-core.c
+@@ -2796,7 +2796,7 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
+ 	if (!routes) {
+ 		dev_err(card->dev,
+ 			"ASoC: Could not allocate DAPM route table\n");
+-		return -EINVAL;
++		return -ENOMEM;
+ 	}
+ 
+ 	for (i = 0; i < num_routes; i++) {
+diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
+index 14d85ca1e4357..4a25a1e39831f 100644
+--- a/sound/soc/soc-pcm.c
++++ b/sound/soc/soc-pcm.c
+@@ -1695,7 +1695,7 @@ static int dpcm_apply_symmetry(struct snd_pcm_substream *fe_substream,
+ 	struct snd_soc_dpcm *dpcm;
+ 	struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(fe_substream);
+ 	struct snd_soc_dai *fe_cpu_dai;
+-	int err;
++	int err = 0;
+ 	int i;
+ 
+ 	/* apply symmetry for FE */
+diff --git a/sound/usb/mixer_scarlett_gen2.c b/sound/usb/mixer_scarlett_gen2.c
+index bca3e7fe27df6..38f4a2a37e0f7 100644
+--- a/sound/usb/mixer_scarlett_gen2.c
++++ b/sound/usb/mixer_scarlett_gen2.c
+@@ -254,10 +254,10 @@ static const struct scarlett2_device_info s6i6_gen2_info = {
+ 	.pad_input_count = 2,
+ 
+ 	.line_out_descrs = {
+-		"Monitor L",
+-		"Monitor R",
+-		"Headphones L",
+-		"Headphones R",
++		"Headphones 1 L",
++		"Headphones 1 R",
++		"Headphones 2 L",
++		"Headphones 2 R",
+ 	},
+ 
+ 	.ports = {
+@@ -356,7 +356,7 @@ static const struct scarlett2_device_info s18i8_gen2_info = {
+ 		},
+ 		[SCARLETT2_PORT_TYPE_PCM] = {
+ 			.id = 0x600,
+-			.num = { 20, 18, 18, 14, 10 },
++			.num = { 8, 18, 18, 14, 10 },
+ 			.src_descr = "PCM %d",
+ 			.src_num_offset = 1,
+ 			.dst_descr = "PCM %02d Capture"
+@@ -1033,11 +1033,10 @@ static int scarlett2_master_volume_ctl_get(struct snd_kcontrol *kctl,
+ 	struct usb_mixer_interface *mixer = elem->head.mixer;
+ 	struct scarlett2_mixer_data *private = mixer->private_data;
+ 
+-	if (private->vol_updated) {
+-		mutex_lock(&private->data_mutex);
++	mutex_lock(&private->data_mutex);
++	if (private->vol_updated)
+ 		scarlett2_update_volumes(mixer);
+-		mutex_unlock(&private->data_mutex);
+-	}
++	mutex_unlock(&private->data_mutex);
+ 
+ 	ucontrol->value.integer.value[0] = private->master_vol;
+ 	return 0;
+@@ -1051,11 +1050,10 @@ static int scarlett2_volume_ctl_get(struct snd_kcontrol *kctl,
+ 	struct scarlett2_mixer_data *private = mixer->private_data;
+ 	int index = elem->control;
+ 
+-	if (private->vol_updated) {
+-		mutex_lock(&private->data_mutex);
++	mutex_lock(&private->data_mutex);
++	if (private->vol_updated)
+ 		scarlett2_update_volumes(mixer);
+-		mutex_unlock(&private->data_mutex);
+-	}
++	mutex_unlock(&private->data_mutex);
+ 
+ 	ucontrol->value.integer.value[0] = private->vol[index];
+ 	return 0;
+@@ -1186,6 +1184,8 @@ static int scarlett2_sw_hw_enum_ctl_put(struct snd_kcontrol *kctl,
+ 	/* Send SW/HW switch change to the device */
+ 	err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_SW_HW_SWITCH,
+ 				       index, val);
++	if (err == 0)
++		err = 1;
+ 
+ unlock:
+ 	mutex_unlock(&private->data_mutex);
+@@ -1246,6 +1246,8 @@ static int scarlett2_level_enum_ctl_put(struct snd_kcontrol *kctl,
+ 	/* Send switch change to the device */
+ 	err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_LEVEL_SWITCH,
+ 				       index, val);
++	if (err == 0)
++		err = 1;
+ 
+ unlock:
+ 	mutex_unlock(&private->data_mutex);
+@@ -1296,6 +1298,8 @@ static int scarlett2_pad_ctl_put(struct snd_kcontrol *kctl,
+ 	/* Send switch change to the device */
+ 	err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_PAD_SWITCH,
+ 				       index, val);
++	if (err == 0)
++		err = 1;
+ 
+ unlock:
+ 	mutex_unlock(&private->data_mutex);
+@@ -1319,11 +1323,10 @@ static int scarlett2_button_ctl_get(struct snd_kcontrol *kctl,
+ 	struct usb_mixer_interface *mixer = elem->head.mixer;
+ 	struct scarlett2_mixer_data *private = mixer->private_data;
+ 
+-	if (private->vol_updated) {
+-		mutex_lock(&private->data_mutex);
++	mutex_lock(&private->data_mutex);
++	if (private->vol_updated)
+ 		scarlett2_update_volumes(mixer);
+-		mutex_unlock(&private->data_mutex);
+-	}
++	mutex_unlock(&private->data_mutex);
+ 
+ 	ucontrol->value.enumerated.item[0] = private->buttons[elem->control];
+ 	return 0;
+@@ -1352,6 +1355,8 @@ static int scarlett2_button_ctl_put(struct snd_kcontrol *kctl,
+ 	/* Send switch change to the device */
+ 	err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_BUTTONS,
+ 				       index, val);
++	if (err == 0)
++		err = 1;
+ 
+ unlock:
+ 	mutex_unlock(&private->data_mutex);
+diff --git a/sound/usb/usx2y/usX2Yhwdep.c b/sound/usb/usx2y/usX2Yhwdep.c
+index 22412cd69e985..10868c3fb6561 100644
+--- a/sound/usb/usx2y/usX2Yhwdep.c
++++ b/sound/usb/usx2y/usX2Yhwdep.c
+@@ -29,7 +29,7 @@ static vm_fault_t snd_us428ctls_vm_fault(struct vm_fault *vmf)
+ 		   vmf->pgoff);
+ 	
+ 	offset = vmf->pgoff << PAGE_SHIFT;
+-	vaddr = (char *)((struct usX2Ydev *)vmf->vma->vm_private_data)->us428ctls_sharedmem + offset;
++	vaddr = (char *)((struct usx2ydev *)vmf->vma->vm_private_data)->us428ctls_sharedmem + offset;
+ 	page = virt_to_page(vaddr);
+ 	get_page(page);
+ 	vmf->page = page;
+@@ -47,7 +47,7 @@ static const struct vm_operations_struct us428ctls_vm_ops = {
+ static int snd_us428ctls_mmap(struct snd_hwdep * hw, struct file *filp, struct vm_area_struct *area)
+ {
+ 	unsigned long	size = (unsigned long)(area->vm_end - area->vm_start);
+-	struct usX2Ydev	*us428 = hw->private_data;
++	struct usx2ydev	*us428 = hw->private_data;
+ 
+ 	// FIXME this hwdep interface is used twice: fpga download and mmap for controlling Lights etc. Maybe better using 2 hwdep devs?
+ 	// so as long as the device isn't fully initialised yet we return -EBUSY here.
+@@ -66,7 +66,7 @@ static int snd_us428ctls_mmap(struct snd_hwdep * hw, struct file *filp, struct v
+ 		if (!us428->us428ctls_sharedmem)
+ 			return -ENOMEM;
+ 		memset(us428->us428ctls_sharedmem, -1, sizeof(struct us428ctls_sharedmem));
+-		us428->us428ctls_sharedmem->CtlSnapShotLast = -2;
++		us428->us428ctls_sharedmem->ctl_snapshot_last = -2;
+ 	}
+ 	area->vm_ops = &us428ctls_vm_ops;
+ 	area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+@@ -77,21 +77,21 @@ static int snd_us428ctls_mmap(struct snd_hwdep * hw, struct file *filp, struct v
+ static __poll_t snd_us428ctls_poll(struct snd_hwdep *hw, struct file *file, poll_table *wait)
+ {
+ 	__poll_t	mask = 0;
+-	struct usX2Ydev	*us428 = hw->private_data;
++	struct usx2ydev	*us428 = hw->private_data;
+ 	struct us428ctls_sharedmem *shm = us428->us428ctls_sharedmem;
+ 	if (us428->chip_status & USX2Y_STAT_CHIP_HUP)
+ 		return EPOLLHUP;
+ 
+ 	poll_wait(file, &us428->us428ctls_wait_queue_head, wait);
+ 
+-	if (shm != NULL && shm->CtlSnapShotLast != shm->CtlSnapShotRed)
++	if (shm != NULL && shm->ctl_snapshot_last != shm->ctl_snapshot_red)
+ 		mask |= EPOLLIN;
+ 
+ 	return mask;
+ }
+ 
+ 
+-static int snd_usX2Y_hwdep_dsp_status(struct snd_hwdep *hw,
++static int snd_usx2y_hwdep_dsp_status(struct snd_hwdep *hw,
+ 				      struct snd_hwdep_dsp_status *info)
+ {
+ 	static const char * const type_ids[USX2Y_TYPE_NUMS] = {
+@@ -99,7 +99,7 @@ static int snd_usX2Y_hwdep_dsp_status(struct snd_hwdep *hw,
+ 		[USX2Y_TYPE_224] = "us224",
+ 		[USX2Y_TYPE_428] = "us428",
+ 	};
+-	struct usX2Ydev	*us428 = hw->private_data;
++	struct usx2ydev	*us428 = hw->private_data;
+ 	int id = -1;
+ 
+ 	switch (le16_to_cpu(us428->dev->descriptor.idProduct)) {
+@@ -124,7 +124,7 @@ static int snd_usX2Y_hwdep_dsp_status(struct snd_hwdep *hw,
+ }
+ 
+ 
+-static int usX2Y_create_usbmidi(struct snd_card *card)
++static int usx2y_create_usbmidi(struct snd_card *card)
+ {
+ 	static const struct snd_usb_midi_endpoint_info quirk_data_1 = {
+ 		.out_ep = 0x06,
+@@ -152,28 +152,28 @@ static int usX2Y_create_usbmidi(struct snd_card *card)
+        		.type = QUIRK_MIDI_FIXED_ENDPOINT,
+ 		.data = &quirk_data_2
+ 	};
+-	struct usb_device *dev = usX2Y(card)->dev;
++	struct usb_device *dev = usx2y(card)->dev;
+ 	struct usb_interface *iface = usb_ifnum_to_if(dev, 0);
+ 	const struct snd_usb_audio_quirk *quirk =
+ 		le16_to_cpu(dev->descriptor.idProduct) == USB_ID_US428 ?
+ 		&quirk_2 : &quirk_1;
+ 
+-	snd_printdd("usX2Y_create_usbmidi \n");
+-	return snd_usbmidi_create(card, iface, &usX2Y(card)->midi_list, quirk);
++	snd_printdd("usx2y_create_usbmidi \n");
++	return snd_usbmidi_create(card, iface, &usx2y(card)->midi_list, quirk);
+ }
+ 
+-static int usX2Y_create_alsa_devices(struct snd_card *card)
++static int usx2y_create_alsa_devices(struct snd_card *card)
+ {
+ 	int err;
+ 
+ 	do {
+-		if ((err = usX2Y_create_usbmidi(card)) < 0) {
+-			snd_printk(KERN_ERR "usX2Y_create_alsa_devices: usX2Y_create_usbmidi error %i \n", err);
++		if ((err = usx2y_create_usbmidi(card)) < 0) {
++			snd_printk(KERN_ERR "usx2y_create_alsa_devices: usx2y_create_usbmidi error %i \n", err);
+ 			break;
+ 		}
+-		if ((err = usX2Y_audio_create(card)) < 0) 
++		if ((err = usx2y_audio_create(card)) < 0) 
+ 			break;
+-		if ((err = usX2Y_hwdep_pcm_new(card)) < 0)
++		if ((err = usx2y_hwdep_pcm_new(card)) < 0)
+ 			break;
+ 		if ((err = snd_card_register(card)) < 0)
+ 			break;
+@@ -182,10 +182,10 @@ static int usX2Y_create_alsa_devices(struct snd_card *card)
+ 	return err;
+ } 
+ 
+-static int snd_usX2Y_hwdep_dsp_load(struct snd_hwdep *hw,
++static int snd_usx2y_hwdep_dsp_load(struct snd_hwdep *hw,
+ 				    struct snd_hwdep_dsp_image *dsp)
+ {
+-	struct usX2Ydev *priv = hw->private_data;
++	struct usx2ydev *priv = hw->private_data;
+ 	struct usb_device* dev = priv->dev;
+ 	int lret, err;
+ 	char *buf;
+@@ -206,19 +206,19 @@ static int snd_usX2Y_hwdep_dsp_load(struct snd_hwdep *hw,
+ 		return err;
+ 	if (dsp->index == 1) {
+ 		msleep(250);				// give the device some time
+-		err = usX2Y_AsyncSeq04_init(priv);
++		err = usx2y_async_seq04_init(priv);
+ 		if (err) {
+-			snd_printk(KERN_ERR "usX2Y_AsyncSeq04_init error \n");
++			snd_printk(KERN_ERR "usx2y_async_seq04_init error \n");
+ 			return err;
+ 		}
+-		err = usX2Y_In04_init(priv);
++		err = usx2y_in04_init(priv);
+ 		if (err) {
+-			snd_printk(KERN_ERR "usX2Y_In04_init error \n");
++			snd_printk(KERN_ERR "usx2y_in04_init error \n");
+ 			return err;
+ 		}
+-		err = usX2Y_create_alsa_devices(hw->card);
++		err = usx2y_create_alsa_devices(hw->card);
+ 		if (err) {
+-			snd_printk(KERN_ERR "usX2Y_create_alsa_devices error %i \n", err);
++			snd_printk(KERN_ERR "usx2y_create_alsa_devices error %i \n", err);
+ 			snd_card_free(hw->card);
+ 			return err;
+ 		}
+@@ -229,7 +229,7 @@ static int snd_usX2Y_hwdep_dsp_load(struct snd_hwdep *hw,
+ }
+ 
+ 
+-int usX2Y_hwdep_new(struct snd_card *card, struct usb_device* device)
++int usx2y_hwdep_new(struct snd_card *card, struct usb_device* device)
+ {
+ 	int err;
+ 	struct snd_hwdep *hw;
+@@ -238,9 +238,9 @@ int usX2Y_hwdep_new(struct snd_card *card, struct usb_device* device)
+ 		return err;
+ 
+ 	hw->iface = SNDRV_HWDEP_IFACE_USX2Y;
+-	hw->private_data = usX2Y(card);
+-	hw->ops.dsp_status = snd_usX2Y_hwdep_dsp_status;
+-	hw->ops.dsp_load = snd_usX2Y_hwdep_dsp_load;
++	hw->private_data = usx2y(card);
++	hw->ops.dsp_status = snd_usx2y_hwdep_dsp_status;
++	hw->ops.dsp_load = snd_usx2y_hwdep_dsp_load;
+ 	hw->ops.mmap = snd_us428ctls_mmap;
+ 	hw->ops.poll = snd_us428ctls_poll;
+ 	hw->exclusive = 1;
+diff --git a/sound/usb/usx2y/usX2Yhwdep.h b/sound/usb/usx2y/usX2Yhwdep.h
+index 457199b5ed03b..34cef625712c6 100644
+--- a/sound/usb/usx2y/usX2Yhwdep.h
++++ b/sound/usb/usx2y/usX2Yhwdep.h
+@@ -2,6 +2,6 @@
+ #ifndef USX2YHWDEP_H
+ #define USX2YHWDEP_H
+ 
+-int usX2Y_hwdep_new(struct snd_card *card, struct usb_device* device);
++int usx2y_hwdep_new(struct snd_card *card, struct usb_device* device);
+ 
+ #endif
+diff --git a/sound/usb/usx2y/usb_stream.c b/sound/usb/usx2y/usb_stream.c
+index 091c071b270af..cff684942c4f0 100644
+--- a/sound/usb/usx2y/usb_stream.c
++++ b/sound/usb/usx2y/usb_stream.c
+@@ -142,8 +142,11 @@ void usb_stream_free(struct usb_stream_kernel *sk)
+ 	if (!s)
+ 		return;
+ 
+-	free_pages_exact(sk->write_page, s->write_size);
+-	sk->write_page = NULL;
++	if (sk->write_page) {
++		free_pages_exact(sk->write_page, s->write_size);
++		sk->write_page = NULL;
++	}
++
+ 	free_pages_exact(s, s->read_size);
+ 	sk->s = NULL;
+ }
+diff --git a/sound/usb/usx2y/usbus428ctldefs.h b/sound/usb/usx2y/usbus428ctldefs.h
+index 5a7518ea3aeb4..7366a940ffbba 100644
+--- a/sound/usb/usx2y/usbus428ctldefs.h
++++ b/sound/usb/usx2y/usbus428ctldefs.h
+@@ -4,28 +4,28 @@
+  * Copyright (c) 2003 by Karsten Wiese <annabellesgarden@yahoo.de>
+  */
+ 
+-enum E_In84{
+-	eFader0 = 0,
+-	eFader1,
+-	eFader2,
+-	eFader3,
+-	eFader4,
+-	eFader5,
+-	eFader6,
+-	eFader7,
+-	eFaderM,
+-	eTransport,
+-	eModifier = 10,
+-	eFilterSelect,
+-	eSelect,
+-	eMute,
++enum E_IN84 {
++	E_FADER_0 = 0,
++	E_FADER_1,
++	E_FADER_2,
++	E_FADER_3,
++	E_FADER_4,
++	E_FADER_5,
++	E_FADER_6,
++	E_FADER_7,
++	E_FADER_M,
++	E_TRANSPORT,
++	E_MODIFIER = 10,
++	E_FILTER_SELECT,
++	E_SELECT,
++	E_MUTE,
+ 
+-	eSwitch   = 15,
+-	eWheelGain,
+-	eWheelFreq,
+-	eWheelQ,
+-	eWheelPan,
+-	eWheel    = 20
++	E_SWITCH   = 15,
++	E_WHEEL_GAIN,
++	E_WHEEL_FREQ,
++	E_WHEEL_Q,
++	E_WHEEL_PAN,
++	E_WHEEL    = 20
+ };
+ 
+ #define T_RECORD   1
+@@ -39,53 +39,53 @@ enum E_In84{
+ 
+ 
+ struct us428_ctls {
+-	unsigned char   Fader[9];
+-	unsigned char 	Transport;
+-	unsigned char 	Modifier;
+-	unsigned char 	FilterSelect;
+-	unsigned char 	Select;
+-	unsigned char   Mute;
+-	unsigned char   UNKNOWN;
+-	unsigned char   Switch;	     
+-	unsigned char   Wheel[5];
++	unsigned char   fader[9];
++	unsigned char 	transport;
++	unsigned char 	modifier;
++	unsigned char 	filters_elect;
++	unsigned char 	select;
++	unsigned char   mute;
++	unsigned char   unknown;
++	unsigned char   wswitch;	     
++	unsigned char   wheel[5];
+ };
+ 
+-struct us428_setByte {
+-	unsigned char Offset,
+-		Value;
++struct us428_set_byte {
++	unsigned char offset,
++		value;
+ };
+ 
+ enum {
+-	eLT_Volume = 0,
+-	eLT_Light
++	ELT_VOLUME = 0,
++	ELT_LIGHT
+ };
+ 
+-struct usX2Y_volume {
+-	unsigned char Channel,
+-		LH,
+-		LL,
+-		RH,
+-		RL;
++struct usx2y_volume {
++	unsigned char channel,
++		lh,
++		ll,
++		rh,
++		rl;
+ };
+ 
+ struct us428_lights {
+-	struct us428_setByte Light[7];
++	struct us428_set_byte light[7];
+ };
+ 
+ struct us428_p4out {
+ 	char type;
+ 	union {
+-		struct usX2Y_volume vol;
++		struct usx2y_volume vol;
+ 		struct us428_lights lights;
+ 	} val;
+ };
+ 
+-#define N_us428_ctl_BUFS 16
+-#define N_us428_p4out_BUFS 16
+-struct us428ctls_sharedmem{
+-	struct us428_ctls	CtlSnapShot[N_us428_ctl_BUFS];
+-	int			CtlSnapShotDiffersAt[N_us428_ctl_BUFS];
+-	int			CtlSnapShotLast, CtlSnapShotRed;
+-	struct us428_p4out	p4out[N_us428_p4out_BUFS];
+-	int			p4outLast, p4outSent;
++#define N_US428_CTL_BUFS 16
++#define N_US428_P4OUT_BUFS 16
++struct us428ctls_sharedmem {
++	struct us428_ctls	ctl_snapshot[N_US428_CTL_BUFS];
++	int			ctl_snapshot_differs_at[N_US428_CTL_BUFS];
++	int			ctl_snapshot_last, ctl_snapshot_red;
++	struct us428_p4out	p4out[N_US428_P4OUT_BUFS];
++	int			p4out_last, p4out_sent;
+ };
+diff --git a/sound/usb/usx2y/usbusx2y.c b/sound/usb/usx2y/usbusx2y.c
+index 3cd28d24f0a73..cdbb27a96e040 100644
+--- a/sound/usb/usx2y/usbusx2y.c
++++ b/sound/usb/usx2y/usbusx2y.c
+@@ -17,7 +17,7 @@
+ 
+ 2004-10-26 Karsten Wiese
+ 	Version 0.8.6:
+-	wake_up() process waiting in usX2Y_urbs_start() on error.
++	wake_up() process waiting in usx2y_urbs_start() on error.
+ 
+ 2004-10-21 Karsten Wiese
+ 	Version 0.8.5:
+@@ -48,7 +48,7 @@
+ 2004-06-12 Karsten Wiese
+ 	Version 0.6.3:
+ 	Made it thus the following rule is enforced:
+-	"All pcm substreams of one usX2Y have to operate at the same rate & format."
++	"All pcm substreams of one usx2y have to operate at the same rate & format."
+ 
+ 2004-04-06 Karsten Wiese
+ 	Version 0.6.0:
+@@ -150,161 +150,161 @@ module_param_array(enable, bool, NULL, 0444);
+ MODULE_PARM_DESC(enable, "Enable "NAME_ALLCAPS".");
+ 
+ 
+-static int snd_usX2Y_card_used[SNDRV_CARDS];
++static int snd_usx2y_card_used[SNDRV_CARDS];
+ 
+-static void usX2Y_usb_disconnect(struct usb_device* usb_device, void* ptr);
+-static void snd_usX2Y_card_private_free(struct snd_card *card);
++static void usx2y_usb_disconnect(struct usb_device* usb_device, void* ptr);
++static void snd_usx2y_card_private_free(struct snd_card *card);
+ 
+ /* 
+  * pipe 4 is used for switching the lamps, setting samplerate, volumes ....   
+  */
+-static void i_usX2Y_Out04Int(struct urb *urb)
++static void i_usx2y_out04_int(struct urb *urb)
+ {
+ #ifdef CONFIG_SND_DEBUG
+ 	if (urb->status) {
+ 		int 		i;
+-		struct usX2Ydev *usX2Y = urb->context;
+-		for (i = 0; i < 10 && usX2Y->AS04.urb[i] != urb; i++);
+-		snd_printdd("i_usX2Y_Out04Int() urb %i status=%i\n", i, urb->status);
++		struct usx2ydev *usx2y = urb->context;
++		for (i = 0; i < 10 && usx2y->as04.urb[i] != urb; i++);
++		snd_printdd("i_usx2y_out04_int() urb %i status=%i\n", i, urb->status);
+ 	}
+ #endif
+ }
+ 
+-static void i_usX2Y_In04Int(struct urb *urb)
++static void i_usx2y_in04_int(struct urb *urb)
+ {
+ 	int			err = 0;
+-	struct usX2Ydev		*usX2Y = urb->context;
+-	struct us428ctls_sharedmem	*us428ctls = usX2Y->us428ctls_sharedmem;
++	struct usx2ydev		*usx2y = urb->context;
++	struct us428ctls_sharedmem	*us428ctls = usx2y->us428ctls_sharedmem;
+ 
+-	usX2Y->In04IntCalls++;
++	usx2y->in04_int_calls++;
+ 
+ 	if (urb->status) {
+ 		snd_printdd("Interrupt Pipe 4 came back with status=%i\n", urb->status);
+ 		return;
+ 	}
+ 
+-	//	printk("%i:0x%02X ", 8, (int)((unsigned char*)usX2Y->In04Buf)[8]); Master volume shows 0 here if fader is at max during boot ?!?
++	//	printk("%i:0x%02X ", 8, (int)((unsigned char*)usx2y->in04_buf)[8]); Master volume shows 0 here if fader is at max during boot ?!?
+ 	if (us428ctls) {
+ 		int diff = -1;
+-		if (-2 == us428ctls->CtlSnapShotLast) {
++		if (-2 == us428ctls->ctl_snapshot_last) {
+ 			diff = 0;
+-			memcpy(usX2Y->In04Last, usX2Y->In04Buf, sizeof(usX2Y->In04Last));
+-			us428ctls->CtlSnapShotLast = -1;
++			memcpy(usx2y->in04_last, usx2y->in04_buf, sizeof(usx2y->in04_last));
++			us428ctls->ctl_snapshot_last = -1;
+ 		} else {
+ 			int i;
+ 			for (i = 0; i < 21; i++) {
+-				if (usX2Y->In04Last[i] != ((char*)usX2Y->In04Buf)[i]) {
++				if (usx2y->in04_last[i] != ((char*)usx2y->in04_buf)[i]) {
+ 					if (diff < 0)
+ 						diff = i;
+-					usX2Y->In04Last[i] = ((char*)usX2Y->In04Buf)[i];
++					usx2y->in04_last[i] = ((char*)usx2y->in04_buf)[i];
+ 				}
+ 			}
+ 		}
+ 		if (0 <= diff) {
+-			int n = us428ctls->CtlSnapShotLast + 1;
+-			if (n >= N_us428_ctl_BUFS  ||  n < 0)
++			int n = us428ctls->ctl_snapshot_last + 1;
++			if (n >= N_US428_CTL_BUFS  ||  n < 0)
+ 				n = 0;
+-			memcpy(us428ctls->CtlSnapShot + n, usX2Y->In04Buf, sizeof(us428ctls->CtlSnapShot[0]));
+-			us428ctls->CtlSnapShotDiffersAt[n] = diff;
+-			us428ctls->CtlSnapShotLast = n;
+-			wake_up(&usX2Y->us428ctls_wait_queue_head);
++			memcpy(us428ctls->ctl_snapshot + n, usx2y->in04_buf, sizeof(us428ctls->ctl_snapshot[0]));
++			us428ctls->ctl_snapshot_differs_at[n] = diff;
++			us428ctls->ctl_snapshot_last = n;
++			wake_up(&usx2y->us428ctls_wait_queue_head);
+ 		}
+ 	}
+ 	
+ 	
+-	if (usX2Y->US04) {
+-		if (0 == usX2Y->US04->submitted)
++	if (usx2y->us04) {
++		if (0 == usx2y->us04->submitted)
+ 			do {
+-				err = usb_submit_urb(usX2Y->US04->urb[usX2Y->US04->submitted++], GFP_ATOMIC);
+-			} while (!err && usX2Y->US04->submitted < usX2Y->US04->len);
++				err = usb_submit_urb(usx2y->us04->urb[usx2y->us04->submitted++], GFP_ATOMIC);
++			} while (!err && usx2y->us04->submitted < usx2y->us04->len);
+ 	} else
+-		if (us428ctls && us428ctls->p4outLast >= 0 && us428ctls->p4outLast < N_us428_p4out_BUFS) {
+-			if (us428ctls->p4outLast != us428ctls->p4outSent) {
+-				int j, send = us428ctls->p4outSent + 1;
+-				if (send >= N_us428_p4out_BUFS)
++		if (us428ctls && us428ctls->p4out_last >= 0 && us428ctls->p4out_last < N_US428_P4OUT_BUFS) {
++			if (us428ctls->p4out_last != us428ctls->p4out_sent) {
++				int j, send = us428ctls->p4out_sent + 1;
++				if (send >= N_US428_P4OUT_BUFS)
+ 					send = 0;
+-				for (j = 0; j < URBS_AsyncSeq  &&  !err; ++j)
+-					if (0 == usX2Y->AS04.urb[j]->status) {
++				for (j = 0; j < URBS_ASYNC_SEQ  &&  !err; ++j)
++					if (0 == usx2y->as04.urb[j]->status) {
+ 						struct us428_p4out *p4out = us428ctls->p4out + send;	// FIXME if more than 1 p4out is new, 1 gets lost.
+-						usb_fill_bulk_urb(usX2Y->AS04.urb[j], usX2Y->dev,
+-								  usb_sndbulkpipe(usX2Y->dev, 0x04), &p4out->val.vol,
+-								  p4out->type == eLT_Light ? sizeof(struct us428_lights) : 5,
+-								  i_usX2Y_Out04Int, usX2Y);
+-						err = usb_submit_urb(usX2Y->AS04.urb[j], GFP_ATOMIC);
+-						us428ctls->p4outSent = send;
++						usb_fill_bulk_urb(usx2y->as04.urb[j], usx2y->dev,
++								  usb_sndbulkpipe(usx2y->dev, 0x04), &p4out->val.vol,
++								  p4out->type == ELT_LIGHT ? sizeof(struct us428_lights) : 5,
++								  i_usx2y_out04_int, usx2y);
++						err = usb_submit_urb(usx2y->as04.urb[j], GFP_ATOMIC);
++						us428ctls->p4out_sent = send;
+ 						break;
+ 					}
+ 			}
+ 		}
+ 
+ 	if (err)
+-		snd_printk(KERN_ERR "In04Int() usb_submit_urb err=%i\n", err);
++		snd_printk(KERN_ERR "in04_int() usb_submit_urb err=%i\n", err);
+ 
+-	urb->dev = usX2Y->dev;
++	urb->dev = usx2y->dev;
+ 	usb_submit_urb(urb, GFP_ATOMIC);
+ }
+ 
+ /*
+  * Prepare some urbs
+  */
+-int usX2Y_AsyncSeq04_init(struct usX2Ydev *usX2Y)
++int usx2y_async_seq04_init(struct usx2ydev *usx2y)
+ {
+ 	int	err = 0,
+ 		i;
+ 
+-	usX2Y->AS04.buffer = kmalloc_array(URBS_AsyncSeq,
+-					   URB_DataLen_AsyncSeq, GFP_KERNEL);
+-	if (NULL == usX2Y->AS04.buffer) {
++	usx2y->as04.buffer = kmalloc_array(URBS_ASYNC_SEQ,
++					   URB_DATA_LEN_ASYNC_SEQ, GFP_KERNEL);
++	if (NULL == usx2y->as04.buffer) {
+ 		err = -ENOMEM;
+ 	} else
+-		for (i = 0; i < URBS_AsyncSeq; ++i) {
+-			if (NULL == (usX2Y->AS04.urb[i] = usb_alloc_urb(0, GFP_KERNEL))) {
++		for (i = 0; i < URBS_ASYNC_SEQ; ++i) {
++			if (NULL == (usx2y->as04.urb[i] = usb_alloc_urb(0, GFP_KERNEL))) {
+ 				err = -ENOMEM;
+ 				break;
+ 			}
+-			usb_fill_bulk_urb(	usX2Y->AS04.urb[i], usX2Y->dev,
+-						usb_sndbulkpipe(usX2Y->dev, 0x04),
+-						usX2Y->AS04.buffer + URB_DataLen_AsyncSeq*i, 0,
+-						i_usX2Y_Out04Int, usX2Y
++			usb_fill_bulk_urb(	usx2y->as04.urb[i], usx2y->dev,
++						usb_sndbulkpipe(usx2y->dev, 0x04),
++						usx2y->as04.buffer + URB_DATA_LEN_ASYNC_SEQ*i, 0,
++						i_usx2y_out04_int, usx2y
+ 				);
+-			err = usb_urb_ep_type_check(usX2Y->AS04.urb[i]);
++			err = usb_urb_ep_type_check(usx2y->as04.urb[i]);
+ 			if (err < 0)
+ 				break;
+ 		}
+ 	return err;
+ }
+ 
+-int usX2Y_In04_init(struct usX2Ydev *usX2Y)
++int usx2y_in04_init(struct usx2ydev *usx2y)
+ {
+-	if (! (usX2Y->In04urb = usb_alloc_urb(0, GFP_KERNEL)))
++	if (! (usx2y->in04_urb = usb_alloc_urb(0, GFP_KERNEL)))
+ 		return -ENOMEM;
+ 
+-	if (! (usX2Y->In04Buf = kmalloc(21, GFP_KERNEL)))
++	if (! (usx2y->in04_buf = kmalloc(21, GFP_KERNEL)))
+ 		return -ENOMEM;
+ 	 
+-	init_waitqueue_head(&usX2Y->In04WaitQueue);
+-	usb_fill_int_urb(usX2Y->In04urb, usX2Y->dev, usb_rcvintpipe(usX2Y->dev, 0x4),
+-			 usX2Y->In04Buf, 21,
+-			 i_usX2Y_In04Int, usX2Y,
++	init_waitqueue_head(&usx2y->in04_wait_queue);
++	usb_fill_int_urb(usx2y->in04_urb, usx2y->dev, usb_rcvintpipe(usx2y->dev, 0x4),
++			 usx2y->in04_buf, 21,
++			 i_usx2y_in04_int, usx2y,
+ 			 10);
+-	if (usb_urb_ep_type_check(usX2Y->In04urb))
++	if (usb_urb_ep_type_check(usx2y->in04_urb))
+ 		return -EINVAL;
+-	return usb_submit_urb(usX2Y->In04urb, GFP_KERNEL);
++	return usb_submit_urb(usx2y->in04_urb, GFP_KERNEL);
+ }
+ 
+-static void usX2Y_unlinkSeq(struct snd_usX2Y_AsyncSeq *S)
++static void usx2y_unlinkseq(struct snd_usx2y_async_seq *s)
+ {
+ 	int	i;
+-	for (i = 0; i < URBS_AsyncSeq; ++i) {
+-		usb_kill_urb(S->urb[i]);
+-		usb_free_urb(S->urb[i]);
+-		S->urb[i] = NULL;
++	for (i = 0; i < URBS_ASYNC_SEQ; ++i) {
++		usb_kill_urb(s->urb[i]);
++		usb_free_urb(s->urb[i]);
++		s->urb[i] = NULL;
+ 	}
+-	kfree(S->buffer);
++	kfree(s->buffer);
+ }
+ 
+ 
+-static const struct usb_device_id snd_usX2Y_usb_id_table[] = {
++static const struct usb_device_id snd_usx2y_usb_id_table[] = {
+ 	{
+ 		.match_flags =	USB_DEVICE_ID_MATCH_DEVICE,
+ 		.idVendor =	0x1604,
+@@ -323,7 +323,7 @@ static const struct usb_device_id snd_usX2Y_usb_id_table[] = {
+ 	{ /* terminator */ }
+ };
+ 
+-static int usX2Y_create_card(struct usb_device *device,
++static int usx2y_create_card(struct usb_device *device,
+ 			     struct usb_interface *intf,
+ 			     struct snd_card **cardp)
+ {
+@@ -332,20 +332,20 @@ static int usX2Y_create_card(struct usb_device *device,
+ 	int err;
+ 
+ 	for (dev = 0; dev < SNDRV_CARDS; ++dev)
+-		if (enable[dev] && !snd_usX2Y_card_used[dev])
++		if (enable[dev] && !snd_usx2y_card_used[dev])
+ 			break;
+ 	if (dev >= SNDRV_CARDS)
+ 		return -ENODEV;
+ 	err = snd_card_new(&intf->dev, index[dev], id[dev], THIS_MODULE,
+-			   sizeof(struct usX2Ydev), &card);
++			   sizeof(struct usx2ydev), &card);
+ 	if (err < 0)
+ 		return err;
+-	snd_usX2Y_card_used[usX2Y(card)->card_index = dev] = 1;
+-	card->private_free = snd_usX2Y_card_private_free;
+-	usX2Y(card)->dev = device;
+-	init_waitqueue_head(&usX2Y(card)->prepare_wait_queue);
+-	mutex_init(&usX2Y(card)->pcm_mutex);
+-	INIT_LIST_HEAD(&usX2Y(card)->midi_list);
++	snd_usx2y_card_used[usx2y(card)->card_index = dev] = 1;
++	card->private_free = snd_usx2y_card_private_free;
++	usx2y(card)->dev = device;
++	init_waitqueue_head(&usx2y(card)->prepare_wait_queue);
++	mutex_init(&usx2y(card)->pcm_mutex);
++	INIT_LIST_HEAD(&usx2y(card)->midi_list);
+ 	strcpy(card->driver, "USB "NAME_ALLCAPS"");
+ 	sprintf(card->shortname, "TASCAM "NAME_ALLCAPS"");
+ 	sprintf(card->longname, "%s (%x:%x if %d at %03d/%03d)",
+@@ -353,14 +353,14 @@ static int usX2Y_create_card(struct usb_device *device,
+ 		le16_to_cpu(device->descriptor.idVendor),
+ 		le16_to_cpu(device->descriptor.idProduct),
+ 		0,//us428(card)->usbmidi.ifnum,
+-		usX2Y(card)->dev->bus->busnum, usX2Y(card)->dev->devnum
++		usx2y(card)->dev->bus->busnum, usx2y(card)->dev->devnum
+ 		);
+ 	*cardp = card;
+ 	return 0;
+ }
+ 
+ 
+-static int usX2Y_usb_probe(struct usb_device *device,
++static int usx2y_usb_probe(struct usb_device *device,
+ 			   struct usb_interface *intf,
+ 			   const struct usb_device_id *device_id,
+ 			   struct snd_card **cardp)
+@@ -375,10 +375,10 @@ static int usX2Y_usb_probe(struct usb_device *device,
+ 	     le16_to_cpu(device->descriptor.idProduct) != USB_ID_US428))
+ 		return -EINVAL;
+ 
+-	err = usX2Y_create_card(device, intf, &card);
++	err = usx2y_create_card(device, intf, &card);
+ 	if (err < 0)
+ 		return err;
+-	if ((err = usX2Y_hwdep_new(card, device)) < 0  ||
++	if ((err = usx2y_hwdep_new(card, device)) < 0  ||
+ 	    (err = snd_card_register(card)) < 0) {
+ 		snd_card_free(card);
+ 		return err;
+@@ -390,64 +390,64 @@ static int usX2Y_usb_probe(struct usb_device *device,
+ /*
+  * new 2.5 USB kernel API
+  */
+-static int snd_usX2Y_probe(struct usb_interface *intf, const struct usb_device_id *id)
++static int snd_usx2y_probe(struct usb_interface *intf, const struct usb_device_id *id)
+ {
+ 	struct snd_card *card;
+ 	int err;
+ 
+-	err = usX2Y_usb_probe(interface_to_usbdev(intf), intf, id, &card);
++	err = usx2y_usb_probe(interface_to_usbdev(intf), intf, id, &card);
+ 	if (err < 0)
+ 		return err;
+ 	dev_set_drvdata(&intf->dev, card);
+ 	return 0;
+ }
+ 
+-static void snd_usX2Y_disconnect(struct usb_interface *intf)
++static void snd_usx2y_disconnect(struct usb_interface *intf)
+ {
+-	usX2Y_usb_disconnect(interface_to_usbdev(intf),
++	usx2y_usb_disconnect(interface_to_usbdev(intf),
+ 				 usb_get_intfdata(intf));
+ }
+ 
+-MODULE_DEVICE_TABLE(usb, snd_usX2Y_usb_id_table);
+-static struct usb_driver snd_usX2Y_usb_driver = {
++MODULE_DEVICE_TABLE(usb, snd_usx2y_usb_id_table);
++static struct usb_driver snd_usx2y_usb_driver = {
+ 	.name =		"snd-usb-usx2y",
+-	.probe =	snd_usX2Y_probe,
+-	.disconnect =	snd_usX2Y_disconnect,
+-	.id_table =	snd_usX2Y_usb_id_table,
++	.probe =	snd_usx2y_probe,
++	.disconnect =	snd_usx2y_disconnect,
++	.id_table =	snd_usx2y_usb_id_table,
+ };
+ 
+-static void snd_usX2Y_card_private_free(struct snd_card *card)
++static void snd_usx2y_card_private_free(struct snd_card *card)
+ {
+-	kfree(usX2Y(card)->In04Buf);
+-	usb_free_urb(usX2Y(card)->In04urb);
+-	if (usX2Y(card)->us428ctls_sharedmem)
+-		free_pages_exact(usX2Y(card)->us428ctls_sharedmem,
+-				 sizeof(*usX2Y(card)->us428ctls_sharedmem));
+-	if (usX2Y(card)->card_index >= 0  &&  usX2Y(card)->card_index < SNDRV_CARDS)
+-		snd_usX2Y_card_used[usX2Y(card)->card_index] = 0;
++	kfree(usx2y(card)->in04_buf);
++	usb_free_urb(usx2y(card)->in04_urb);
++	if (usx2y(card)->us428ctls_sharedmem)
++		free_pages_exact(usx2y(card)->us428ctls_sharedmem,
++				 sizeof(*usx2y(card)->us428ctls_sharedmem));
++	if (usx2y(card)->card_index >= 0  &&  usx2y(card)->card_index < SNDRV_CARDS)
++		snd_usx2y_card_used[usx2y(card)->card_index] = 0;
+ }
+ 
+ /*
+  * Frees the device.
+  */
+-static void usX2Y_usb_disconnect(struct usb_device *device, void* ptr)
++static void usx2y_usb_disconnect(struct usb_device *device, void* ptr)
+ {
+ 	if (ptr) {
+ 		struct snd_card *card = ptr;
+-		struct usX2Ydev *usX2Y = usX2Y(card);
++		struct usx2ydev *usx2y = usx2y(card);
+ 		struct list_head *p;
+-		usX2Y->chip_status = USX2Y_STAT_CHIP_HUP;
+-		usX2Y_unlinkSeq(&usX2Y->AS04);
+-		usb_kill_urb(usX2Y->In04urb);
++		usx2y->chip_status = USX2Y_STAT_CHIP_HUP;
++		usx2y_unlinkseq(&usx2y->as04);
++		usb_kill_urb(usx2y->in04_urb);
+ 		snd_card_disconnect(card);
+ 		/* release the midi resources */
+-		list_for_each(p, &usX2Y->midi_list) {
++		list_for_each(p, &usx2y->midi_list) {
+ 			snd_usbmidi_disconnect(p);
+ 		}
+-		if (usX2Y->us428ctls_sharedmem) 
+-			wake_up(&usX2Y->us428ctls_wait_queue_head);
++		if (usx2y->us428ctls_sharedmem) 
++			wake_up(&usx2y->us428ctls_wait_queue_head);
+ 		snd_card_free(card);
+ 	}
+ }
+ 
+-module_usb_driver(snd_usX2Y_usb_driver);
++module_usb_driver(snd_usx2y_usb_driver);
+diff --git a/sound/usb/usx2y/usbusx2y.h b/sound/usb/usx2y/usbusx2y.h
+index 144b85f57bd2a..c330af628bccd 100644
+--- a/sound/usb/usx2y/usbusx2y.h
++++ b/sound/usb/usx2y/usbusx2y.h
+@@ -8,14 +8,14 @@
+ #define NRURBS	        2	
+ 
+ 
+-#define URBS_AsyncSeq 10
+-#define URB_DataLen_AsyncSeq 32
+-struct snd_usX2Y_AsyncSeq {
+-	struct urb	*urb[URBS_AsyncSeq];
++#define URBS_ASYNC_SEQ 10
++#define URB_DATA_LEN_ASYNC_SEQ 32
++struct snd_usx2y_async_seq {
++	struct urb	*urb[URBS_ASYNC_SEQ];
+ 	char		*buffer;
+ };
+ 
+-struct snd_usX2Y_urbSeq {
++struct snd_usx2y_urb_seq {
+ 	int	submitted;
+ 	int	len;
+ 	struct urb	*urb[];
+@@ -23,17 +23,17 @@ struct snd_usX2Y_urbSeq {
+ 
+ #include "usx2yhwdeppcm.h"
+ 
+-struct usX2Ydev {
++struct usx2ydev {
+ 	struct usb_device	*dev;
+ 	int			card_index;
+ 	int			stride;
+-	struct urb		*In04urb;
+-	void			*In04Buf;
+-	char			In04Last[24];
+-	unsigned		In04IntCalls;
+-	struct snd_usX2Y_urbSeq	*US04;
+-	wait_queue_head_t	In04WaitQueue;
+-	struct snd_usX2Y_AsyncSeq	AS04;
++	struct urb		*in04_urb;
++	void			*in04_buf;
++	char			in04_last[24];
++	unsigned		in04_int_calls;
++	struct snd_usx2y_urb_seq	*us04;
++	wait_queue_head_t	in04_wait_queue;
++	struct snd_usx2y_async_seq	as04;
+ 	unsigned int		rate,
+ 				format;
+ 	int			chip_status;
+@@ -41,9 +41,9 @@ struct usX2Ydev {
+ 	struct us428ctls_sharedmem	*us428ctls_sharedmem;
+ 	int			wait_iso_frame;
+ 	wait_queue_head_t	us428ctls_wait_queue_head;
+-	struct snd_usX2Y_hwdep_pcm_shm	*hwdep_pcm_shm;
+-	struct snd_usX2Y_substream	*subs[4];
+-	struct snd_usX2Y_substream	* volatile  prepare_subs;
++	struct snd_usx2y_hwdep_pcm_shm	*hwdep_pcm_shm;
++	struct snd_usx2y_substream	*subs[4];
++	struct snd_usx2y_substream	* volatile  prepare_subs;
+ 	wait_queue_head_t	prepare_wait_queue;
+ 	struct list_head	midi_list;
+ 	struct list_head	pcm_list;
+@@ -51,21 +51,21 @@ struct usX2Ydev {
+ };
+ 
+ 
+-struct snd_usX2Y_substream {
+-	struct usX2Ydev	*usX2Y;
++struct snd_usx2y_substream {
++	struct usx2ydev	*usx2y;
+ 	struct snd_pcm_substream *pcm_substream;
+ 
+ 	int			endpoint;		
+ 	unsigned int		maxpacksize;		/* max packet size in bytes */
+ 
+ 	atomic_t		state;
+-#define state_STOPPED	0
+-#define state_STARTING1 1
+-#define state_STARTING2 2
+-#define state_STARTING3 3
+-#define state_PREPARED	4
+-#define state_PRERUNNING  6
+-#define state_RUNNING	8
++#define STATE_STOPPED	0
++#define STATE_STARTING1 1
++#define STATE_STARTING2 2
++#define STATE_STARTING3 3
++#define STATE_PREPARED	4
++#define STATE_PRERUNNING  6
++#define STATE_RUNNING	8
+ 
+ 	int			hwptr;			/* free frame position in the buffer (only for playback) */
+ 	int			hwptr_done;		/* processed frame position in the buffer */
+@@ -77,12 +77,12 @@ struct snd_usX2Y_substream {
+ };
+ 
+ 
+-#define usX2Y(c) ((struct usX2Ydev *)(c)->private_data)
++#define usx2y(c) ((struct usx2ydev *)(c)->private_data)
+ 
+-int usX2Y_audio_create(struct snd_card *card);
++int usx2y_audio_create(struct snd_card *card);
+ 
+-int usX2Y_AsyncSeq04_init(struct usX2Ydev *usX2Y);
+-int usX2Y_In04_init(struct usX2Ydev *usX2Y);
++int usx2y_async_seq04_init(struct usx2ydev *usx2y);
++int usx2y_in04_init(struct usx2ydev *usx2y);
+ 
+ #define NAME_ALLCAPS "US-X2Y"
+ 
+diff --git a/sound/usb/usx2y/usbusx2yaudio.c b/sound/usb/usx2y/usbusx2yaudio.c
+index ecaf41265dcd0..8033bb7255d5c 100644
+--- a/sound/usb/usx2y/usbusx2yaudio.c
++++ b/sound/usb/usx2y/usbusx2yaudio.c
+@@ -54,13 +54,13 @@
+ #endif
+ 
+ 
+-static int usX2Y_urb_capt_retire(struct snd_usX2Y_substream *subs)
++static int usx2y_urb_capt_retire(struct snd_usx2y_substream *subs)
+ {
+ 	struct urb	*urb = subs->completed_urb;
+ 	struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime;
+ 	unsigned char	*cp;
+ 	int 		i, len, lens = 0, hwptr_done = subs->hwptr_done;
+-	struct usX2Ydev	*usX2Y = subs->usX2Y;
++	struct usx2ydev	*usx2y = subs->usx2y;
+ 
+ 	for (i = 0; i < nr_of_packs(); i++) {
+ 		cp = (unsigned char*)urb->transfer_buffer + urb->iso_frame_desc[i].offset;
+@@ -70,7 +70,7 @@ static int usX2Y_urb_capt_retire(struct snd_usX2Y_substream *subs)
+ 				   urb->iso_frame_desc[i].status);
+ 			return urb->iso_frame_desc[i].status;
+ 		}
+-		len = urb->iso_frame_desc[i].actual_length / usX2Y->stride;
++		len = urb->iso_frame_desc[i].actual_length / usx2y->stride;
+ 		if (! len) {
+ 			snd_printd("0 == len ERROR!\n");
+ 			continue;
+@@ -79,12 +79,12 @@ static int usX2Y_urb_capt_retire(struct snd_usX2Y_substream *subs)
+ 		/* copy a data chunk */
+ 		if ((hwptr_done + len) > runtime->buffer_size) {
+ 			int cnt = runtime->buffer_size - hwptr_done;
+-			int blen = cnt * usX2Y->stride;
+-			memcpy(runtime->dma_area + hwptr_done * usX2Y->stride, cp, blen);
+-			memcpy(runtime->dma_area, cp + blen, len * usX2Y->stride - blen);
++			int blen = cnt * usx2y->stride;
++			memcpy(runtime->dma_area + hwptr_done * usx2y->stride, cp, blen);
++			memcpy(runtime->dma_area, cp + blen, len * usx2y->stride - blen);
+ 		} else {
+-			memcpy(runtime->dma_area + hwptr_done * usX2Y->stride, cp,
+-			       len * usX2Y->stride);
++			memcpy(runtime->dma_area + hwptr_done * usx2y->stride, cp,
++			       len * usx2y->stride);
+ 		}
+ 		lens += len;
+ 		if ((hwptr_done += len) >= runtime->buffer_size)
+@@ -110,18 +110,18 @@ static int usX2Y_urb_capt_retire(struct snd_usX2Y_substream *subs)
+  * it directly from the buffer.  thus the data is once copied to
+  * a temporary buffer and urb points to that.
+  */
+-static int usX2Y_urb_play_prepare(struct snd_usX2Y_substream *subs,
++static int usx2y_urb_play_prepare(struct snd_usx2y_substream *subs,
+ 				  struct urb *cap_urb,
+ 				  struct urb *urb)
+ {
+ 	int count, counts, pack;
+-	struct usX2Ydev *usX2Y = subs->usX2Y;
++	struct usx2ydev *usx2y = subs->usx2y;
+ 	struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime;
+ 
+ 	count = 0;
+ 	for (pack = 0; pack <  nr_of_packs(); pack++) {
+ 		/* calculate the size of a packet */
+-		counts = cap_urb->iso_frame_desc[pack].actual_length / usX2Y->stride;
++		counts = cap_urb->iso_frame_desc[pack].actual_length / usx2y->stride;
+ 		count += counts;
+ 		if (counts < 43 || counts > 50) {
+ 			snd_printk(KERN_ERR "should not be here with counts=%i\n", counts);
+@@ -134,7 +134,7 @@ static int usX2Y_urb_play_prepare(struct snd_usX2Y_substream *subs,
+ 			0;
+ 		urb->iso_frame_desc[pack].length = cap_urb->iso_frame_desc[pack].actual_length;
+ 	}
+-	if (atomic_read(&subs->state) >= state_PRERUNNING)
++	if (atomic_read(&subs->state) >= STATE_PRERUNNING)
+ 		if (subs->hwptr + count > runtime->buffer_size) {
+ 			/* err, the transferred area goes over buffer boundary.
+ 			 * copy the data to the temp buffer.
+@@ -143,20 +143,20 @@ static int usX2Y_urb_play_prepare(struct snd_usX2Y_substream *subs,
+ 			len = runtime->buffer_size - subs->hwptr;
+ 			urb->transfer_buffer = subs->tmpbuf;
+ 			memcpy(subs->tmpbuf, runtime->dma_area +
+-			       subs->hwptr * usX2Y->stride, len * usX2Y->stride);
+-			memcpy(subs->tmpbuf + len * usX2Y->stride,
+-			       runtime->dma_area, (count - len) * usX2Y->stride);
++			       subs->hwptr * usx2y->stride, len * usx2y->stride);
++			memcpy(subs->tmpbuf + len * usx2y->stride,
++			       runtime->dma_area, (count - len) * usx2y->stride);
+ 			subs->hwptr += count;
+ 			subs->hwptr -= runtime->buffer_size;
+ 		} else {
+ 			/* set the buffer pointer */
+-			urb->transfer_buffer = runtime->dma_area + subs->hwptr * usX2Y->stride;
++			urb->transfer_buffer = runtime->dma_area + subs->hwptr * usx2y->stride;
+ 			if ((subs->hwptr += count) >= runtime->buffer_size)
+ 				subs->hwptr -= runtime->buffer_size;
+ 		}
+ 	else
+ 		urb->transfer_buffer = subs->tmpbuf;
+-	urb->transfer_buffer_length = count * usX2Y->stride;
++	urb->transfer_buffer_length = count * usx2y->stride;
+ 	return 0;
+ }
+ 
+@@ -165,10 +165,10 @@ static int usX2Y_urb_play_prepare(struct snd_usX2Y_substream *subs,
+  *
+  * update the current position and call callback if a period is processed.
+  */
+-static void usX2Y_urb_play_retire(struct snd_usX2Y_substream *subs, struct urb *urb)
++static void usx2y_urb_play_retire(struct snd_usx2y_substream *subs, struct urb *urb)
+ {
+ 	struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime;
+-	int		len = urb->actual_length / subs->usX2Y->stride;
++	int		len = urb->actual_length / subs->usx2y->stride;
+ 
+ 	subs->transfer_done += len;
+ 	subs->hwptr_done +=  len;
+@@ -180,14 +180,14 @@ static void usX2Y_urb_play_retire(struct snd_usX2Y_substream *subs, struct urb *
+ 	}
+ }
+ 
+-static int usX2Y_urb_submit(struct snd_usX2Y_substream *subs, struct urb *urb, int frame)
++static int usx2y_urb_submit(struct snd_usx2y_substream *subs, struct urb *urb, int frame)
+ {
+ 	int err;
+ 	if (!urb)
+ 		return -ENODEV;
+ 	urb->start_frame = (frame + NRURBS * nr_of_packs());  // let hcd do rollover sanity checks
+ 	urb->hcpriv = NULL;
+-	urb->dev = subs->usX2Y->dev; /* we need to set this at each time */
++	urb->dev = subs->usx2y->dev; /* we need to set this at each time */
+ 	if ((err = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {
+ 		snd_printk(KERN_ERR "usb_submit_urb() returned %i\n", err);
+ 		return err;
+@@ -195,8 +195,8 @@ static int usX2Y_urb_submit(struct snd_usX2Y_substream *subs, struct urb *urb, i
+ 	return 0;
+ }
+ 
+-static inline int usX2Y_usbframe_complete(struct snd_usX2Y_substream *capsubs,
+-					  struct snd_usX2Y_substream *playbacksubs,
++static inline int usx2y_usbframe_complete(struct snd_usx2y_substream *capsubs,
++					  struct snd_usx2y_substream *playbacksubs,
+ 					  int frame)
+ {
+ 	int err, state;
+@@ -204,25 +204,25 @@ static inline int usX2Y_usbframe_complete(struct snd_usX2Y_substream *capsubs,
+ 
+ 	state = atomic_read(&playbacksubs->state);
+ 	if (NULL != urb) {
+-		if (state == state_RUNNING)
+-			usX2Y_urb_play_retire(playbacksubs, urb);
+-		else if (state >= state_PRERUNNING)
++		if (state == STATE_RUNNING)
++			usx2y_urb_play_retire(playbacksubs, urb);
++		else if (state >= STATE_PRERUNNING)
+ 			atomic_inc(&playbacksubs->state);
+ 	} else {
+ 		switch (state) {
+-		case state_STARTING1:
++		case STATE_STARTING1:
+ 			urb = playbacksubs->urb[0];
+ 			atomic_inc(&playbacksubs->state);
+ 			break;
+-		case state_STARTING2:
++		case STATE_STARTING2:
+ 			urb = playbacksubs->urb[1];
+ 			atomic_inc(&playbacksubs->state);
+ 			break;
+ 		}
+ 	}
+ 	if (urb) {
+-		if ((err = usX2Y_urb_play_prepare(playbacksubs, capsubs->completed_urb, urb)) ||
+-		    (err = usX2Y_urb_submit(playbacksubs, urb, frame))) {
++		if ((err = usx2y_urb_play_prepare(playbacksubs, capsubs->completed_urb, urb)) ||
++		    (err = usx2y_urb_submit(playbacksubs, urb, frame))) {
+ 			return err;
+ 		}
+ 	}
+@@ -230,13 +230,13 @@ static inline int usX2Y_usbframe_complete(struct snd_usX2Y_substream *capsubs,
+ 	playbacksubs->completed_urb = NULL;
+ 
+ 	state = atomic_read(&capsubs->state);
+-	if (state >= state_PREPARED) {
+-		if (state == state_RUNNING) {
+-			if ((err = usX2Y_urb_capt_retire(capsubs)))
++	if (state >= STATE_PREPARED) {
++		if (state == STATE_RUNNING) {
++			if ((err = usx2y_urb_capt_retire(capsubs)))
+ 				return err;
+-		} else if (state >= state_PRERUNNING)
++		} else if (state >= STATE_PRERUNNING)
+ 			atomic_inc(&capsubs->state);
+-		if ((err = usX2Y_urb_submit(capsubs, capsubs->completed_urb, frame)))
++		if ((err = usx2y_urb_submit(capsubs, capsubs->completed_urb, frame)))
+ 			return err;
+ 	}
+ 	capsubs->completed_urb = NULL;
+@@ -244,21 +244,21 @@ static inline int usX2Y_usbframe_complete(struct snd_usX2Y_substream *capsubs,
+ }
+ 
+ 
+-static void usX2Y_clients_stop(struct usX2Ydev *usX2Y)
++static void usx2y_clients_stop(struct usx2ydev *usx2y)
+ {
+ 	int s, u;
+ 
+ 	for (s = 0; s < 4; s++) {
+-		struct snd_usX2Y_substream *subs = usX2Y->subs[s];
++		struct snd_usx2y_substream *subs = usx2y->subs[s];
+ 		if (subs) {
+ 			snd_printdd("%i %p state=%i\n", s, subs, atomic_read(&subs->state));
+-			atomic_set(&subs->state, state_STOPPED);
++			atomic_set(&subs->state, STATE_STOPPED);
+ 		}
+ 	}
+ 	for (s = 0; s < 4; s++) {
+-		struct snd_usX2Y_substream *subs = usX2Y->subs[s];
++		struct snd_usx2y_substream *subs = usx2y->subs[s];
+ 		if (subs) {
+-			if (atomic_read(&subs->state) >= state_PRERUNNING)
++			if (atomic_read(&subs->state) >= STATE_PRERUNNING)
+ 				snd_pcm_stop_xrun(subs->pcm_substream);
+ 			for (u = 0; u < NRURBS; u++) {
+ 				struct urb *urb = subs->urb[u];
+@@ -268,60 +268,60 @@ static void usX2Y_clients_stop(struct usX2Ydev *usX2Y)
+ 			}
+ 		}
+ 	}
+-	usX2Y->prepare_subs = NULL;
+-	wake_up(&usX2Y->prepare_wait_queue);
++	usx2y->prepare_subs = NULL;
++	wake_up(&usx2y->prepare_wait_queue);
+ }
+ 
+-static void usX2Y_error_urb_status(struct usX2Ydev *usX2Y,
+-				   struct snd_usX2Y_substream *subs, struct urb *urb)
++static void usx2y_error_urb_status(struct usx2ydev *usx2y,
++				   struct snd_usx2y_substream *subs, struct urb *urb)
+ {
+ 	snd_printk(KERN_ERR "ep=%i stalled with status=%i\n", subs->endpoint, urb->status);
+ 	urb->status = 0;
+-	usX2Y_clients_stop(usX2Y);
++	usx2y_clients_stop(usx2y);
+ }
+ 
+-static void i_usX2Y_urb_complete(struct urb *urb)
++static void i_usx2y_urb_complete(struct urb *urb)
+ {
+-	struct snd_usX2Y_substream *subs = urb->context;
+-	struct usX2Ydev *usX2Y = subs->usX2Y;
++	struct snd_usx2y_substream *subs = urb->context;
++	struct usx2ydev *usx2y = subs->usx2y;
+ 
+-	if (unlikely(atomic_read(&subs->state) < state_PREPARED)) {
++	if (unlikely(atomic_read(&subs->state) < STATE_PREPARED)) {
+ 		snd_printdd("hcd_frame=%i ep=%i%s status=%i start_frame=%i\n",
+-			    usb_get_current_frame_number(usX2Y->dev),
++			    usb_get_current_frame_number(usx2y->dev),
+ 			    subs->endpoint, usb_pipein(urb->pipe) ? "in" : "out",
+ 			    urb->status, urb->start_frame);
+ 		return;
+ 	}
+ 	if (unlikely(urb->status)) {
+-		usX2Y_error_urb_status(usX2Y, subs, urb);
++		usx2y_error_urb_status(usx2y, subs, urb);
+ 		return;
+ 	}
+ 
+ 	subs->completed_urb = urb;
+ 
+ 	{
+-		struct snd_usX2Y_substream *capsubs = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE],
+-			*playbacksubs = usX2Y->subs[SNDRV_PCM_STREAM_PLAYBACK];
++		struct snd_usx2y_substream *capsubs = usx2y->subs[SNDRV_PCM_STREAM_CAPTURE],
++			*playbacksubs = usx2y->subs[SNDRV_PCM_STREAM_PLAYBACK];
+ 		if (capsubs->completed_urb &&
+-		    atomic_read(&capsubs->state) >= state_PREPARED &&
++		    atomic_read(&capsubs->state) >= STATE_PREPARED &&
+ 		    (playbacksubs->completed_urb ||
+-		     atomic_read(&playbacksubs->state) < state_PREPARED)) {
+-			if (!usX2Y_usbframe_complete(capsubs, playbacksubs, urb->start_frame))
+-				usX2Y->wait_iso_frame += nr_of_packs();
++		     atomic_read(&playbacksubs->state) < STATE_PREPARED)) {
++			if (!usx2y_usbframe_complete(capsubs, playbacksubs, urb->start_frame))
++				usx2y->wait_iso_frame += nr_of_packs();
+ 			else {
+ 				snd_printdd("\n");
+-				usX2Y_clients_stop(usX2Y);
++				usx2y_clients_stop(usx2y);
+ 			}
+ 		}
+ 	}
+ }
+ 
+-static void usX2Y_urbs_set_complete(struct usX2Ydev * usX2Y,
++static void usx2y_urbs_set_complete(struct usx2ydev * usx2y,
+ 				    void (*complete)(struct urb *))
+ {
+ 	int s, u;
+ 	for (s = 0; s < 4; s++) {
+-		struct snd_usX2Y_substream *subs = usX2Y->subs[s];
++		struct snd_usx2y_substream *subs = usx2y->subs[s];
+ 		if (NULL != subs)
+ 			for (u = 0; u < NRURBS; u++) {
+ 				struct urb * urb = subs->urb[u];
+@@ -331,30 +331,30 @@ static void usX2Y_urbs_set_complete(struct usX2Ydev * usX2Y,
+ 	}
+ }
+ 
+-static void usX2Y_subs_startup_finish(struct usX2Ydev * usX2Y)
++static void usx2y_subs_startup_finish(struct usx2ydev * usx2y)
+ {
+-	usX2Y_urbs_set_complete(usX2Y, i_usX2Y_urb_complete);
+-	usX2Y->prepare_subs = NULL;
++	usx2y_urbs_set_complete(usx2y, i_usx2y_urb_complete);
++	usx2y->prepare_subs = NULL;
+ }
+ 
+-static void i_usX2Y_subs_startup(struct urb *urb)
++static void i_usx2y_subs_startup(struct urb *urb)
+ {
+-	struct snd_usX2Y_substream *subs = urb->context;
+-	struct usX2Ydev *usX2Y = subs->usX2Y;
+-	struct snd_usX2Y_substream *prepare_subs = usX2Y->prepare_subs;
++	struct snd_usx2y_substream *subs = urb->context;
++	struct usx2ydev *usx2y = subs->usx2y;
++	struct snd_usx2y_substream *prepare_subs = usx2y->prepare_subs;
+ 	if (NULL != prepare_subs)
+ 		if (urb->start_frame == prepare_subs->urb[0]->start_frame) {
+-			usX2Y_subs_startup_finish(usX2Y);
++			usx2y_subs_startup_finish(usx2y);
+ 			atomic_inc(&prepare_subs->state);
+-			wake_up(&usX2Y->prepare_wait_queue);
++			wake_up(&usx2y->prepare_wait_queue);
+ 		}
+ 
+-	i_usX2Y_urb_complete(urb);
++	i_usx2y_urb_complete(urb);
+ }
+ 
+-static void usX2Y_subs_prepare(struct snd_usX2Y_substream *subs)
++static void usx2y_subs_prepare(struct snd_usx2y_substream *subs)
+ {
+-	snd_printdd("usX2Y_substream_prepare(%p) ep=%i urb0=%p urb1=%p\n",
++	snd_printdd("usx2y_substream_prepare(%p) ep=%i urb0=%p urb1=%p\n",
+ 		    subs, subs->endpoint, subs->urb[0], subs->urb[1]);
+ 	/* reset the pointer */
+ 	subs->hwptr = 0;
+@@ -363,7 +363,7 @@ static void usX2Y_subs_prepare(struct snd_usX2Y_substream *subs)
+ }
+ 
+ 
+-static void usX2Y_urb_release(struct urb **urb, int free_tb)
++static void usx2y_urb_release(struct urb **urb, int free_tb)
+ {
+ 	if (*urb) {
+ 		usb_kill_urb(*urb);
+@@ -376,13 +376,13 @@ static void usX2Y_urb_release(struct urb **urb, int free_tb)
+ /*
+  * release a substreams urbs
+  */
+-static void usX2Y_urbs_release(struct snd_usX2Y_substream *subs)
++static void usx2y_urbs_release(struct snd_usx2y_substream *subs)
+ {
+ 	int i;
+-	snd_printdd("usX2Y_urbs_release() %i\n", subs->endpoint);
++	snd_printdd("usx2y_urbs_release() %i\n", subs->endpoint);
+ 	for (i = 0; i < NRURBS; i++)
+-		usX2Y_urb_release(subs->urb + i,
+-				  subs != subs->usX2Y->subs[SNDRV_PCM_STREAM_PLAYBACK]);
++		usx2y_urb_release(subs->urb + i,
++				  subs != subs->usx2y->subs[SNDRV_PCM_STREAM_PLAYBACK]);
+ 
+ 	kfree(subs->tmpbuf);
+ 	subs->tmpbuf = NULL;
+@@ -390,12 +390,12 @@ static void usX2Y_urbs_release(struct snd_usX2Y_substream *subs)
+ /*
+  * initialize a substream's urbs
+  */
+-static int usX2Y_urbs_allocate(struct snd_usX2Y_substream *subs)
++static int usx2y_urbs_allocate(struct snd_usx2y_substream *subs)
+ {
+ 	int i;
+ 	unsigned int pipe;
+-	int is_playback = subs == subs->usX2Y->subs[SNDRV_PCM_STREAM_PLAYBACK];
+-	struct usb_device *dev = subs->usX2Y->dev;
++	int is_playback = subs == subs->usx2y->subs[SNDRV_PCM_STREAM_PLAYBACK];
++	struct usb_device *dev = subs->usx2y->dev;
+ 
+ 	pipe = is_playback ? usb_sndisocpipe(dev, subs->endpoint) :
+ 			usb_rcvisocpipe(dev, subs->endpoint);
+@@ -417,7 +417,7 @@ static int usX2Y_urbs_allocate(struct snd_usX2Y_substream *subs)
+ 		}
+ 		*purb = usb_alloc_urb(nr_of_packs(), GFP_KERNEL);
+ 		if (NULL == *purb) {
+-			usX2Y_urbs_release(subs);
++			usx2y_urbs_release(subs);
+ 			return -ENOMEM;
+ 		}
+ 		if (!is_playback && !(*purb)->transfer_buffer) {
+@@ -426,7 +426,7 @@ static int usX2Y_urbs_allocate(struct snd_usX2Y_substream *subs)
+ 				kmalloc_array(subs->maxpacksize,
+ 					      nr_of_packs(), GFP_KERNEL);
+ 			if (NULL == (*purb)->transfer_buffer) {
+-				usX2Y_urbs_release(subs);
++				usx2y_urbs_release(subs);
+ 				return -ENOMEM;
+ 			}
+ 		}
+@@ -435,43 +435,43 @@ static int usX2Y_urbs_allocate(struct snd_usX2Y_substream *subs)
+ 		(*purb)->number_of_packets = nr_of_packs();
+ 		(*purb)->context = subs;
+ 		(*purb)->interval = 1;
+-		(*purb)->complete = i_usX2Y_subs_startup;
++		(*purb)->complete = i_usx2y_subs_startup;
+ 	}
+ 	return 0;
+ }
+ 
+-static void usX2Y_subs_startup(struct snd_usX2Y_substream *subs)
++static void usx2y_subs_startup(struct snd_usx2y_substream *subs)
+ {
+-	struct usX2Ydev *usX2Y = subs->usX2Y;
+-	usX2Y->prepare_subs = subs;
++	struct usx2ydev *usx2y = subs->usx2y;
++	usx2y->prepare_subs = subs;
+ 	subs->urb[0]->start_frame = -1;
+ 	wmb();
+-	usX2Y_urbs_set_complete(usX2Y, i_usX2Y_subs_startup);
++	usx2y_urbs_set_complete(usx2y, i_usx2y_subs_startup);
+ }
+ 
+-static int usX2Y_urbs_start(struct snd_usX2Y_substream *subs)
++static int usx2y_urbs_start(struct snd_usx2y_substream *subs)
+ {
+ 	int i, err;
+-	struct usX2Ydev *usX2Y = subs->usX2Y;
++	struct usx2ydev *usx2y = subs->usx2y;
+ 
+-	if ((err = usX2Y_urbs_allocate(subs)) < 0)
++	if ((err = usx2y_urbs_allocate(subs)) < 0)
+ 		return err;
+ 	subs->completed_urb = NULL;
+ 	for (i = 0; i < 4; i++) {
+-		struct snd_usX2Y_substream *subs = usX2Y->subs[i];
+-		if (subs != NULL && atomic_read(&subs->state) >= state_PREPARED)
++		struct snd_usx2y_substream *subs = usx2y->subs[i];
++		if (subs != NULL && atomic_read(&subs->state) >= STATE_PREPARED)
+ 			goto start;
+ 	}
+ 
+  start:
+-	usX2Y_subs_startup(subs);
++	usx2y_subs_startup(subs);
+ 	for (i = 0; i < NRURBS; i++) {
+ 		struct urb *urb = subs->urb[i];
+ 		if (usb_pipein(urb->pipe)) {
+ 			unsigned long pack;
+ 			if (0 == i)
+-				atomic_set(&subs->state, state_STARTING3);
+-			urb->dev = usX2Y->dev;
++				atomic_set(&subs->state, STATE_STARTING3);
++			urb->dev = usx2y->dev;
+ 			for (pack = 0; pack < nr_of_packs(); pack++) {
+ 				urb->iso_frame_desc[pack].offset = subs->maxpacksize * pack;
+ 				urb->iso_frame_desc[pack].length = subs->maxpacksize;
+@@ -483,22 +483,22 @@ static int usX2Y_urbs_start(struct snd_usX2Y_substream *subs)
+ 				goto cleanup;
+ 			} else
+ 				if (i == 0)
+-					usX2Y->wait_iso_frame = urb->start_frame;
++					usx2y->wait_iso_frame = urb->start_frame;
+ 			urb->transfer_flags = 0;
+ 		} else {
+-			atomic_set(&subs->state, state_STARTING1);
++			atomic_set(&subs->state, STATE_STARTING1);
+ 			break;
+ 		}
+ 	}
+ 	err = 0;
+-	wait_event(usX2Y->prepare_wait_queue, NULL == usX2Y->prepare_subs);
+-	if (atomic_read(&subs->state) != state_PREPARED)
++	wait_event(usx2y->prepare_wait_queue, NULL == usx2y->prepare_subs);
++	if (atomic_read(&subs->state) != STATE_PREPARED)
+ 		err = -EPIPE;
+ 
+  cleanup:
+ 	if (err) {
+-		usX2Y_subs_startup_finish(usX2Y);
+-		usX2Y_clients_stop(usX2Y);		// something is completely wroong > stop evrything
++		usx2y_subs_startup_finish(usx2y);
++		usx2y_clients_stop(usx2y);		// something is completely wroong > stop evrything
+ 	}
+ 	return err;
+ }
+@@ -506,33 +506,33 @@ static int usX2Y_urbs_start(struct snd_usX2Y_substream *subs)
+ /*
+  * return the current pcm pointer.  just return the hwptr_done value.
+  */
+-static snd_pcm_uframes_t snd_usX2Y_pcm_pointer(struct snd_pcm_substream *substream)
++static snd_pcm_uframes_t snd_usx2y_pcm_pointer(struct snd_pcm_substream *substream)
+ {
+-	struct snd_usX2Y_substream *subs = substream->runtime->private_data;
++	struct snd_usx2y_substream *subs = substream->runtime->private_data;
+ 	return subs->hwptr_done;
+ }
+ /*
+  * start/stop substream
+  */
+-static int snd_usX2Y_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
++static int snd_usx2y_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+ {
+-	struct snd_usX2Y_substream *subs = substream->runtime->private_data;
++	struct snd_usx2y_substream *subs = substream->runtime->private_data;
+ 
+ 	switch (cmd) {
+ 	case SNDRV_PCM_TRIGGER_START:
+-		snd_printdd("snd_usX2Y_pcm_trigger(START)\n");
+-		if (atomic_read(&subs->state) == state_PREPARED &&
+-		    atomic_read(&subs->usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE]->state) >= state_PREPARED) {
+-			atomic_set(&subs->state, state_PRERUNNING);
++		snd_printdd("snd_usx2y_pcm_trigger(START)\n");
++		if (atomic_read(&subs->state) == STATE_PREPARED &&
++		    atomic_read(&subs->usx2y->subs[SNDRV_PCM_STREAM_CAPTURE]->state) >= STATE_PREPARED) {
++			atomic_set(&subs->state, STATE_PRERUNNING);
+ 		} else {
+ 			snd_printdd("\n");
+ 			return -EPIPE;
+ 		}
+ 		break;
+ 	case SNDRV_PCM_TRIGGER_STOP:
+-		snd_printdd("snd_usX2Y_pcm_trigger(STOP)\n");
+-		if (atomic_read(&subs->state) >= state_PRERUNNING)
+-			atomic_set(&subs->state, state_PREPARED);
++		snd_printdd("snd_usx2y_pcm_trigger(STOP)\n");
++		if (atomic_read(&subs->state) >= STATE_PRERUNNING)
++			atomic_set(&subs->state, STATE_PREPARED);
+ 		break;
+ 	default:
+ 		return -EINVAL;
+@@ -553,7 +553,7 @@ static const struct s_c2
+ {
+ 	char c1, c2;
+ }
+-	SetRate44100[] =
++	setrate_44100[] =
+ {
+ 	{ 0x14, 0x08},	// this line sets 44100, well actually a little less
+ 	{ 0x18, 0x40},	// only tascam / frontier design knows the further lines .......
+@@ -589,7 +589,7 @@ static const struct s_c2
+ 	{ 0x18, 0x7C},
+ 	{ 0x18, 0x7E}
+ };
+-static const struct s_c2 SetRate48000[] =
++static const struct s_c2 setrate_48000[] =
+ {
+ 	{ 0x14, 0x09},	// this line sets 48000, well actually a little less
+ 	{ 0x18, 0x40},	// only tascam / frontier design knows the further lines .......
+@@ -625,26 +625,26 @@ static const struct s_c2 SetRate48000[] =
+ 	{ 0x18, 0x7C},
+ 	{ 0x18, 0x7E}
+ };
+-#define NOOF_SETRATE_URBS ARRAY_SIZE(SetRate48000)
++#define NOOF_SETRATE_URBS ARRAY_SIZE(setrate_48000)
+ 
+-static void i_usX2Y_04Int(struct urb *urb)
++static void i_usx2y_04int(struct urb *urb)
+ {
+-	struct usX2Ydev *usX2Y = urb->context;
++	struct usx2ydev *usx2y = urb->context;
+ 	
+ 	if (urb->status)
+-		snd_printk(KERN_ERR "snd_usX2Y_04Int() urb->status=%i\n", urb->status);
+-	if (0 == --usX2Y->US04->len)
+-		wake_up(&usX2Y->In04WaitQueue);
++		snd_printk(KERN_ERR "snd_usx2y_04int() urb->status=%i\n", urb->status);
++	if (0 == --usx2y->us04->len)
++		wake_up(&usx2y->in04_wait_queue);
+ }
+ 
+-static int usX2Y_rate_set(struct usX2Ydev *usX2Y, int rate)
++static int usx2y_rate_set(struct usx2ydev *usx2y, int rate)
+ {
+ 	int			err = 0, i;
+-	struct snd_usX2Y_urbSeq	*us = NULL;
++	struct snd_usx2y_urb_seq	*us = NULL;
+ 	int			*usbdata = NULL;
+-	const struct s_c2	*ra = rate == 48000 ? SetRate48000 : SetRate44100;
++	const struct s_c2	*ra = rate == 48000 ? setrate_48000 : setrate_44100;
+ 
+-	if (usX2Y->rate != rate) {
++	if (usx2y->rate != rate) {
+ 		us = kzalloc(sizeof(*us) + sizeof(struct urb*) * NOOF_SETRATE_URBS, GFP_KERNEL);
+ 		if (NULL == us) {
+ 			err = -ENOMEM;
+@@ -663,17 +663,17 @@ static int usX2Y_rate_set(struct usX2Ydev *usX2Y, int rate)
+ 			}
+ 			((char*)(usbdata + i))[0] = ra[i].c1;
+ 			((char*)(usbdata + i))[1] = ra[i].c2;
+-			usb_fill_bulk_urb(us->urb[i], usX2Y->dev, usb_sndbulkpipe(usX2Y->dev, 4),
+-					  usbdata + i, 2, i_usX2Y_04Int, usX2Y);
++			usb_fill_bulk_urb(us->urb[i], usx2y->dev, usb_sndbulkpipe(usx2y->dev, 4),
++					  usbdata + i, 2, i_usx2y_04int, usx2y);
+ 		}
+ 		err = usb_urb_ep_type_check(us->urb[0]);
+ 		if (err < 0)
+ 			goto cleanup;
+ 		us->submitted =	0;
+ 		us->len =	NOOF_SETRATE_URBS;
+-		usX2Y->US04 =	us;
+-		wait_event_timeout(usX2Y->In04WaitQueue, 0 == us->len, HZ);
+-		usX2Y->US04 =	NULL;
++		usx2y->us04 =	us;
++		wait_event_timeout(usx2y->in04_wait_queue, 0 == us->len, HZ);
++		usx2y->us04 =	NULL;
+ 		if (us->len)
+ 			err = -ENODEV;
+ 	cleanup:
+@@ -690,11 +690,11 @@ static int usX2Y_rate_set(struct usX2Ydev *usX2Y, int rate)
+ 				}
+ 				usb_free_urb(urb);
+ 			}
+-			usX2Y->US04 = NULL;
++			usx2y->us04 = NULL;
+ 			kfree(usbdata);
+ 			kfree(us);
+ 			if (!err)
+-				usX2Y->rate = rate;
++				usx2y->rate = rate;
+ 		}
+ 	}
+ 
+@@ -702,53 +702,53 @@ static int usX2Y_rate_set(struct usX2Ydev *usX2Y, int rate)
+ }
+ 
+ 
+-static int usX2Y_format_set(struct usX2Ydev *usX2Y, snd_pcm_format_t format)
++static int usx2y_format_set(struct usx2ydev *usx2y, snd_pcm_format_t format)
+ {
+ 	int alternate, err;
+ 	struct list_head* p;
+ 	if (format == SNDRV_PCM_FORMAT_S24_3LE) {
+ 		alternate = 2;
+-		usX2Y->stride = 6;
++		usx2y->stride = 6;
+ 	} else {
+ 		alternate = 1;
+-		usX2Y->stride = 4;
++		usx2y->stride = 4;
+ 	}
+-	list_for_each(p, &usX2Y->midi_list) {
++	list_for_each(p, &usx2y->midi_list) {
+ 		snd_usbmidi_input_stop(p);
+ 	}
+-	usb_kill_urb(usX2Y->In04urb);
+-	if ((err = usb_set_interface(usX2Y->dev, 0, alternate))) {
++	usb_kill_urb(usx2y->in04_urb);
++	if ((err = usb_set_interface(usx2y->dev, 0, alternate))) {
+ 		snd_printk(KERN_ERR "usb_set_interface error \n");
+ 		return err;
+ 	}
+-	usX2Y->In04urb->dev = usX2Y->dev;
+-	err = usb_submit_urb(usX2Y->In04urb, GFP_KERNEL);
+-	list_for_each(p, &usX2Y->midi_list) {
++	usx2y->in04_urb->dev = usx2y->dev;
++	err = usb_submit_urb(usx2y->in04_urb, GFP_KERNEL);
++	list_for_each(p, &usx2y->midi_list) {
+ 		snd_usbmidi_input_start(p);
+ 	}
+-	usX2Y->format = format;
+-	usX2Y->rate = 0;
++	usx2y->format = format;
++	usx2y->rate = 0;
+ 	return err;
+ }
+ 
+ 
+-static int snd_usX2Y_pcm_hw_params(struct snd_pcm_substream *substream,
++static int snd_usx2y_pcm_hw_params(struct snd_pcm_substream *substream,
+ 				   struct snd_pcm_hw_params *hw_params)
+ {
+ 	int			err = 0;
+ 	unsigned int		rate = params_rate(hw_params);
+ 	snd_pcm_format_t	format = params_format(hw_params);
+ 	struct snd_card *card = substream->pstr->pcm->card;
+-	struct usX2Ydev	*dev = usX2Y(card);
++	struct usx2ydev	*dev = usx2y(card);
+ 	int i;
+ 
+-	mutex_lock(&usX2Y(card)->pcm_mutex);
+-	snd_printdd("snd_usX2Y_hw_params(%p, %p)\n", substream, hw_params);
+-	/* all pcm substreams off one usX2Y have to operate at the same
++	mutex_lock(&usx2y(card)->pcm_mutex);
++	snd_printdd("snd_usx2y_hw_params(%p, %p)\n", substream, hw_params);
++	/* all pcm substreams off one usx2y have to operate at the same
+ 	 * rate & format
+ 	 */
+ 	for (i = 0; i < dev->pcm_devs * 2; i++) {
+-		struct snd_usX2Y_substream *subs = dev->subs[i];
++		struct snd_usx2y_substream *subs = dev->subs[i];
+ 		struct snd_pcm_substream *test_substream;
+ 
+ 		if (!subs)
+@@ -767,39 +767,39 @@ static int snd_usX2Y_pcm_hw_params(struct snd_pcm_substream *substream,
+ 	}
+ 
+  error:
+-	mutex_unlock(&usX2Y(card)->pcm_mutex);
++	mutex_unlock(&usx2y(card)->pcm_mutex);
+ 	return err;
+ }
+ 
+ /*
+  * free the buffer
+  */
+-static int snd_usX2Y_pcm_hw_free(struct snd_pcm_substream *substream)
++static int snd_usx2y_pcm_hw_free(struct snd_pcm_substream *substream)
+ {
+ 	struct snd_pcm_runtime *runtime = substream->runtime;
+-	struct snd_usX2Y_substream *subs = runtime->private_data;
+-	mutex_lock(&subs->usX2Y->pcm_mutex);
+-	snd_printdd("snd_usX2Y_hw_free(%p)\n", substream);
++	struct snd_usx2y_substream *subs = runtime->private_data;
++	mutex_lock(&subs->usx2y->pcm_mutex);
++	snd_printdd("snd_usx2y_hw_free(%p)\n", substream);
+ 
+ 	if (SNDRV_PCM_STREAM_PLAYBACK == substream->stream) {
+-		struct snd_usX2Y_substream *cap_subs = subs->usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE];
+-		atomic_set(&subs->state, state_STOPPED);
+-		usX2Y_urbs_release(subs);
++		struct snd_usx2y_substream *cap_subs = subs->usx2y->subs[SNDRV_PCM_STREAM_CAPTURE];
++		atomic_set(&subs->state, STATE_STOPPED);
++		usx2y_urbs_release(subs);
+ 		if (!cap_subs->pcm_substream ||
+ 		    !cap_subs->pcm_substream->runtime ||
+ 		    !cap_subs->pcm_substream->runtime->status ||
+ 		    cap_subs->pcm_substream->runtime->status->state < SNDRV_PCM_STATE_PREPARED) {
+-			atomic_set(&cap_subs->state, state_STOPPED);
+-			usX2Y_urbs_release(cap_subs);
++			atomic_set(&cap_subs->state, STATE_STOPPED);
++			usx2y_urbs_release(cap_subs);
+ 		}
+ 	} else {
+-		struct snd_usX2Y_substream *playback_subs = subs->usX2Y->subs[SNDRV_PCM_STREAM_PLAYBACK];
+-		if (atomic_read(&playback_subs->state) < state_PREPARED) {
+-			atomic_set(&subs->state, state_STOPPED);
+-			usX2Y_urbs_release(subs);
++		struct snd_usx2y_substream *playback_subs = subs->usx2y->subs[SNDRV_PCM_STREAM_PLAYBACK];
++		if (atomic_read(&playback_subs->state) < STATE_PREPARED) {
++			atomic_set(&subs->state, STATE_STOPPED);
++			usx2y_urbs_release(subs);
+ 		}
+ 	}
+-	mutex_unlock(&subs->usX2Y->pcm_mutex);
++	mutex_unlock(&subs->usx2y->pcm_mutex);
+ 	return 0;
+ }
+ /*
+@@ -807,40 +807,40 @@ static int snd_usX2Y_pcm_hw_free(struct snd_pcm_substream *substream)
+  *
+  * set format and initialize urbs
+  */
+-static int snd_usX2Y_pcm_prepare(struct snd_pcm_substream *substream)
++static int snd_usx2y_pcm_prepare(struct snd_pcm_substream *substream)
+ {
+ 	struct snd_pcm_runtime *runtime = substream->runtime;
+-	struct snd_usX2Y_substream *subs = runtime->private_data;
+-	struct usX2Ydev *usX2Y = subs->usX2Y;
+-	struct snd_usX2Y_substream *capsubs = subs->usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE];
++	struct snd_usx2y_substream *subs = runtime->private_data;
++	struct usx2ydev *usx2y = subs->usx2y;
++	struct snd_usx2y_substream *capsubs = subs->usx2y->subs[SNDRV_PCM_STREAM_CAPTURE];
+ 	int err = 0;
+-	snd_printdd("snd_usX2Y_pcm_prepare(%p)\n", substream);
++	snd_printdd("snd_usx2y_pcm_prepare(%p)\n", substream);
+ 
+-	mutex_lock(&usX2Y->pcm_mutex);
+-	usX2Y_subs_prepare(subs);
++	mutex_lock(&usx2y->pcm_mutex);
++	usx2y_subs_prepare(subs);
+ // Start hardware streams
+ // SyncStream first....
+-	if (atomic_read(&capsubs->state) < state_PREPARED) {
+-		if (usX2Y->format != runtime->format)
+-			if ((err = usX2Y_format_set(usX2Y, runtime->format)) < 0)
++	if (atomic_read(&capsubs->state) < STATE_PREPARED) {
++		if (usx2y->format != runtime->format)
++			if ((err = usx2y_format_set(usx2y, runtime->format)) < 0)
+ 				goto up_prepare_mutex;
+-		if (usX2Y->rate != runtime->rate)
+-			if ((err = usX2Y_rate_set(usX2Y, runtime->rate)) < 0)
++		if (usx2y->rate != runtime->rate)
++			if ((err = usx2y_rate_set(usx2y, runtime->rate)) < 0)
+ 				goto up_prepare_mutex;
+ 		snd_printdd("starting capture pipe for %s\n", subs == capsubs ? "self" : "playpipe");
+-		if (0 > (err = usX2Y_urbs_start(capsubs)))
++		if (0 > (err = usx2y_urbs_start(capsubs)))
+ 			goto up_prepare_mutex;
+ 	}
+ 
+-	if (subs != capsubs && atomic_read(&subs->state) < state_PREPARED)
+-		err = usX2Y_urbs_start(subs);
++	if (subs != capsubs && atomic_read(&subs->state) < STATE_PREPARED)
++		err = usx2y_urbs_start(subs);
+ 
+  up_prepare_mutex:
+-	mutex_unlock(&usX2Y->pcm_mutex);
++	mutex_unlock(&usx2y->pcm_mutex);
+ 	return err;
+ }
+ 
+-static const struct snd_pcm_hardware snd_usX2Y_2c =
++static const struct snd_pcm_hardware snd_usx2y_2c =
+ {
+ 	.info =			(SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
+ 				 SNDRV_PCM_INFO_BLOCK_TRANSFER |
+@@ -862,16 +862,16 @@ static const struct snd_pcm_hardware snd_usX2Y_2c =
+ 
+ 
+ 
+-static int snd_usX2Y_pcm_open(struct snd_pcm_substream *substream)
++static int snd_usx2y_pcm_open(struct snd_pcm_substream *substream)
+ {
+-	struct snd_usX2Y_substream	*subs = ((struct snd_usX2Y_substream **)
++	struct snd_usx2y_substream	*subs = ((struct snd_usx2y_substream **)
+ 					 snd_pcm_substream_chip(substream))[substream->stream];
+ 	struct snd_pcm_runtime	*runtime = substream->runtime;
+ 
+-	if (subs->usX2Y->chip_status & USX2Y_STAT_CHIP_MMAP_PCM_URBS)
++	if (subs->usx2y->chip_status & USX2Y_STAT_CHIP_MMAP_PCM_URBS)
+ 		return -EBUSY;
+ 
+-	runtime->hw = snd_usX2Y_2c;
++	runtime->hw = snd_usx2y_2c;
+ 	runtime->private_data = subs;
+ 	subs->pcm_substream = substream;
+ 	snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_TIME, 1000, 200000);
+@@ -880,10 +880,10 @@ static int snd_usX2Y_pcm_open(struct snd_pcm_substream *substream)
+ 
+ 
+ 
+-static int snd_usX2Y_pcm_close(struct snd_pcm_substream *substream)
++static int snd_usx2y_pcm_close(struct snd_pcm_substream *substream)
+ {
+ 	struct snd_pcm_runtime *runtime = substream->runtime;
+-	struct snd_usX2Y_substream *subs = runtime->private_data;
++	struct snd_usx2y_substream *subs = runtime->private_data;
+ 
+ 	subs->pcm_substream = NULL;
+ 
+@@ -891,75 +891,75 @@ static int snd_usX2Y_pcm_close(struct snd_pcm_substream *substream)
+ }
+ 
+ 
+-static const struct snd_pcm_ops snd_usX2Y_pcm_ops =
++static const struct snd_pcm_ops snd_usx2y_pcm_ops =
+ {
+-	.open =		snd_usX2Y_pcm_open,
+-	.close =	snd_usX2Y_pcm_close,
+-	.hw_params =	snd_usX2Y_pcm_hw_params,
+-	.hw_free =	snd_usX2Y_pcm_hw_free,
+-	.prepare =	snd_usX2Y_pcm_prepare,
+-	.trigger =	snd_usX2Y_pcm_trigger,
+-	.pointer =	snd_usX2Y_pcm_pointer,
++	.open =		snd_usx2y_pcm_open,
++	.close =	snd_usx2y_pcm_close,
++	.hw_params =	snd_usx2y_pcm_hw_params,
++	.hw_free =	snd_usx2y_pcm_hw_free,
++	.prepare =	snd_usx2y_pcm_prepare,
++	.trigger =	snd_usx2y_pcm_trigger,
++	.pointer =	snd_usx2y_pcm_pointer,
+ };
+ 
+ 
+ /*
+  * free a usb stream instance
+  */
+-static void usX2Y_audio_stream_free(struct snd_usX2Y_substream **usX2Y_substream)
++static void usx2y_audio_stream_free(struct snd_usx2y_substream **usx2y_substream)
+ {
+ 	int stream;
+ 
+ 	for_each_pcm_streams(stream) {
+-		kfree(usX2Y_substream[stream]);
+-		usX2Y_substream[stream] = NULL;
++		kfree(usx2y_substream[stream]);
++		usx2y_substream[stream] = NULL;
+ 	}
+ }
+ 
+-static void snd_usX2Y_pcm_private_free(struct snd_pcm *pcm)
++static void snd_usx2y_pcm_private_free(struct snd_pcm *pcm)
+ {
+-	struct snd_usX2Y_substream **usX2Y_stream = pcm->private_data;
+-	if (usX2Y_stream)
+-		usX2Y_audio_stream_free(usX2Y_stream);
++	struct snd_usx2y_substream **usx2y_stream = pcm->private_data;
++	if (usx2y_stream)
++		usx2y_audio_stream_free(usx2y_stream);
+ }
+ 
+-static int usX2Y_audio_stream_new(struct snd_card *card, int playback_endpoint, int capture_endpoint)
++static int usx2y_audio_stream_new(struct snd_card *card, int playback_endpoint, int capture_endpoint)
+ {
+ 	struct snd_pcm *pcm;
+ 	int err, i;
+-	struct snd_usX2Y_substream **usX2Y_substream =
+-		usX2Y(card)->subs + 2 * usX2Y(card)->pcm_devs;
++	struct snd_usx2y_substream **usx2y_substream =
++		usx2y(card)->subs + 2 * usx2y(card)->pcm_devs;
+ 
+ 	for (i = playback_endpoint ? SNDRV_PCM_STREAM_PLAYBACK : SNDRV_PCM_STREAM_CAPTURE;
+ 	     i <= SNDRV_PCM_STREAM_CAPTURE; ++i) {
+-		usX2Y_substream[i] = kzalloc(sizeof(struct snd_usX2Y_substream), GFP_KERNEL);
+-		if (!usX2Y_substream[i])
++		usx2y_substream[i] = kzalloc(sizeof(struct snd_usx2y_substream), GFP_KERNEL);
++		if (!usx2y_substream[i])
+ 			return -ENOMEM;
+ 
+-		usX2Y_substream[i]->usX2Y = usX2Y(card);
++		usx2y_substream[i]->usx2y = usx2y(card);
+ 	}
+ 
+ 	if (playback_endpoint)
+-		usX2Y_substream[SNDRV_PCM_STREAM_PLAYBACK]->endpoint = playback_endpoint;
+-	usX2Y_substream[SNDRV_PCM_STREAM_CAPTURE]->endpoint = capture_endpoint;
++		usx2y_substream[SNDRV_PCM_STREAM_PLAYBACK]->endpoint = playback_endpoint;
++	usx2y_substream[SNDRV_PCM_STREAM_CAPTURE]->endpoint = capture_endpoint;
+ 
+-	err = snd_pcm_new(card, NAME_ALLCAPS" Audio", usX2Y(card)->pcm_devs,
++	err = snd_pcm_new(card, NAME_ALLCAPS" Audio", usx2y(card)->pcm_devs,
+ 			  playback_endpoint ? 1 : 0, 1,
+ 			  &pcm);
+ 	if (err < 0) {
+-		usX2Y_audio_stream_free(usX2Y_substream);
++		usx2y_audio_stream_free(usx2y_substream);
+ 		return err;
+ 	}
+ 
+ 	if (playback_endpoint)
+-		snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_usX2Y_pcm_ops);
+-	snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_usX2Y_pcm_ops);
++		snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_usx2y_pcm_ops);
++	snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_usx2y_pcm_ops);
+ 
+-	pcm->private_data = usX2Y_substream;
+-	pcm->private_free = snd_usX2Y_pcm_private_free;
++	pcm->private_data = usx2y_substream;
++	pcm->private_free = snd_usx2y_pcm_private_free;
+ 	pcm->info_flags = 0;
+ 
+-	sprintf(pcm->name, NAME_ALLCAPS" Audio #%d", usX2Y(card)->pcm_devs);
++	sprintf(pcm->name, NAME_ALLCAPS" Audio #%d", usx2y(card)->pcm_devs);
+ 
+ 	if (playback_endpoint) {
+ 		snd_pcm_set_managed_buffer(pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream,
+@@ -972,7 +972,7 @@ static int usX2Y_audio_stream_new(struct snd_card *card, int playback_endpoint,
+ 				   SNDRV_DMA_TYPE_CONTINUOUS,
+ 				   NULL,
+ 				   64*1024, 128*1024);
+-	usX2Y(card)->pcm_devs++;
++	usx2y(card)->pcm_devs++;
+ 
+ 	return 0;
+ }
+@@ -980,18 +980,18 @@ static int usX2Y_audio_stream_new(struct snd_card *card, int playback_endpoint,
+ /*
+  * create a chip instance and set its names.
+  */
+-int usX2Y_audio_create(struct snd_card *card)
++int usx2y_audio_create(struct snd_card *card)
+ {
+ 	int err = 0;
+ 	
+-	INIT_LIST_HEAD(&usX2Y(card)->pcm_list);
++	INIT_LIST_HEAD(&usx2y(card)->pcm_list);
+ 
+-	if (0 > (err = usX2Y_audio_stream_new(card, 0xA, 0x8)))
++	if (0 > (err = usx2y_audio_stream_new(card, 0xA, 0x8)))
+ 		return err;
+-	if (le16_to_cpu(usX2Y(card)->dev->descriptor.idProduct) == USB_ID_US428)
+-	     if (0 > (err = usX2Y_audio_stream_new(card, 0, 0xA)))
++	if (le16_to_cpu(usx2y(card)->dev->descriptor.idProduct) == USB_ID_US428)
++	     if (0 > (err = usx2y_audio_stream_new(card, 0, 0xA)))
+ 		     return err;
+-	if (le16_to_cpu(usX2Y(card)->dev->descriptor.idProduct) != USB_ID_US122)
+-		err = usX2Y_rate_set(usX2Y(card), 44100);	// Lets us428 recognize output-volume settings, disturbs us122.
++	if (le16_to_cpu(usx2y(card)->dev->descriptor.idProduct) != USB_ID_US122)
++		err = usx2y_rate_set(usx2y(card), 44100);	// Lets us428 recognize output-volume settings, disturbs us122.
+ 	return err;
+ }
+diff --git a/sound/usb/usx2y/usx2yhwdeppcm.c b/sound/usb/usx2y/usx2yhwdeppcm.c
+index 8253669c6a7d7..399470e51c411 100644
+--- a/sound/usb/usx2y/usx2yhwdeppcm.c
++++ b/sound/usb/usx2y/usx2yhwdeppcm.c
+@@ -47,17 +47,17 @@
+ #include <sound/hwdep.h>
+ 
+ 
+-static int usX2Y_usbpcm_urb_capt_retire(struct snd_usX2Y_substream *subs)
++static int usx2y_usbpcm_urb_capt_retire(struct snd_usx2y_substream *subs)
+ {
+ 	struct urb	*urb = subs->completed_urb;
+ 	struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime;
+ 	int 		i, lens = 0, hwptr_done = subs->hwptr_done;
+-	struct usX2Ydev	*usX2Y = subs->usX2Y;
+-	if (0 > usX2Y->hwdep_pcm_shm->capture_iso_start) { //FIXME
+-		int head = usX2Y->hwdep_pcm_shm->captured_iso_head + 1;
+-		if (head >= ARRAY_SIZE(usX2Y->hwdep_pcm_shm->captured_iso))
++	struct usx2ydev	*usx2y = subs->usx2y;
++	if (0 > usx2y->hwdep_pcm_shm->capture_iso_start) { //FIXME
++		int head = usx2y->hwdep_pcm_shm->captured_iso_head + 1;
++		if (head >= ARRAY_SIZE(usx2y->hwdep_pcm_shm->captured_iso))
+ 			head = 0;
+-		usX2Y->hwdep_pcm_shm->capture_iso_start = head;
++		usx2y->hwdep_pcm_shm->capture_iso_start = head;
+ 		snd_printdd("cap start %i\n", head);
+ 	}
+ 	for (i = 0; i < nr_of_packs(); i++) {
+@@ -65,7 +65,7 @@ static int usX2Y_usbpcm_urb_capt_retire(struct snd_usX2Y_substream *subs)
+ 			snd_printk(KERN_ERR "active frame status %i. Most probably some hardware problem.\n", urb->iso_frame_desc[i].status);
+ 			return urb->iso_frame_desc[i].status;
+ 		}
+-		lens += urb->iso_frame_desc[i].actual_length / usX2Y->stride;
++		lens += urb->iso_frame_desc[i].actual_length / usx2y->stride;
+ 	}
+ 	if ((hwptr_done += lens) >= runtime->buffer_size)
+ 		hwptr_done -= runtime->buffer_size;
+@@ -79,10 +79,10 @@ static int usX2Y_usbpcm_urb_capt_retire(struct snd_usX2Y_substream *subs)
+ 	return 0;
+ }
+ 
+-static inline int usX2Y_iso_frames_per_buffer(struct snd_pcm_runtime *runtime,
+-					      struct usX2Ydev * usX2Y)
++static inline int usx2y_iso_frames_per_buffer(struct snd_pcm_runtime *runtime,
++					      struct usx2ydev * usx2y)
+ {
+-	return (runtime->buffer_size * 1000) / usX2Y->rate + 1;	//FIXME: so far only correct period_size == 2^x ?
++	return (runtime->buffer_size * 1000) / usx2y->rate + 1;	//FIXME: so far only correct period_size == 2^x ?
+ }
+ 
+ /*
+@@ -95,17 +95,17 @@ static inline int usX2Y_iso_frames_per_buffer(struct snd_pcm_runtime *runtime,
+  * it directly from the buffer.  thus the data is once copied to
+  * a temporary buffer and urb points to that.
+  */
+-static int usX2Y_hwdep_urb_play_prepare(struct snd_usX2Y_substream *subs,
++static int usx2y_hwdep_urb_play_prepare(struct snd_usx2y_substream *subs,
+ 					struct urb *urb)
+ {
+ 	int count, counts, pack;
+-	struct usX2Ydev *usX2Y = subs->usX2Y;
+-	struct snd_usX2Y_hwdep_pcm_shm *shm = usX2Y->hwdep_pcm_shm;
++	struct usx2ydev *usx2y = subs->usx2y;
++	struct snd_usx2y_hwdep_pcm_shm *shm = usx2y->hwdep_pcm_shm;
+ 	struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime;
+ 
+ 	if (0 > shm->playback_iso_start) {
+ 		shm->playback_iso_start = shm->captured_iso_head -
+-			usX2Y_iso_frames_per_buffer(runtime, usX2Y);
++			usx2y_iso_frames_per_buffer(runtime, usx2y);
+ 		if (0 > shm->playback_iso_start)
+ 			shm->playback_iso_start += ARRAY_SIZE(shm->captured_iso);
+ 		shm->playback_iso_head = shm->playback_iso_start;
+@@ -114,7 +114,7 @@ static int usX2Y_hwdep_urb_play_prepare(struct snd_usX2Y_substream *subs,
+ 	count = 0;
+ 	for (pack = 0; pack < nr_of_packs(); pack++) {
+ 		/* calculate the size of a packet */
+-		counts = shm->captured_iso[shm->playback_iso_head].length / usX2Y->stride;
++		counts = shm->captured_iso[shm->playback_iso_head].length / usx2y->stride;
+ 		if (counts < 43 || counts > 50) {
+ 			snd_printk(KERN_ERR "should not be here with counts=%i\n", counts);
+ 			return -EPIPE;
+@@ -122,26 +122,26 @@ static int usX2Y_hwdep_urb_play_prepare(struct snd_usX2Y_substream *subs,
+ 		/* set up descriptor */
+ 		urb->iso_frame_desc[pack].offset = shm->captured_iso[shm->playback_iso_head].offset;
+ 		urb->iso_frame_desc[pack].length = shm->captured_iso[shm->playback_iso_head].length;
+-		if (atomic_read(&subs->state) != state_RUNNING)
++		if (atomic_read(&subs->state) != STATE_RUNNING)
+ 			memset((char *)urb->transfer_buffer + urb->iso_frame_desc[pack].offset, 0,
+ 			       urb->iso_frame_desc[pack].length);
+ 		if (++shm->playback_iso_head >= ARRAY_SIZE(shm->captured_iso))
+ 			shm->playback_iso_head = 0;
+ 		count += counts;
+ 	}
+-	urb->transfer_buffer_length = count * usX2Y->stride;
++	urb->transfer_buffer_length = count * usx2y->stride;
+ 	return 0;
+ }
+ 
+ 
+-static inline void usX2Y_usbpcm_urb_capt_iso_advance(struct snd_usX2Y_substream *subs,
++static inline void usx2y_usbpcm_urb_capt_iso_advance(struct snd_usx2y_substream *subs,
+ 						     struct urb *urb)
+ {
+ 	int pack;
+ 	for (pack = 0; pack < nr_of_packs(); ++pack) {
+ 		struct usb_iso_packet_descriptor *desc = urb->iso_frame_desc + pack;
+ 		if (NULL != subs) {
+-			struct snd_usX2Y_hwdep_pcm_shm *shm = subs->usX2Y->hwdep_pcm_shm;
++			struct snd_usx2y_hwdep_pcm_shm *shm = subs->usx2y->hwdep_pcm_shm;
+ 			int head = shm->captured_iso_head + 1;
+ 			if (head >= ARRAY_SIZE(shm->captured_iso))
+ 				head = 0;
+@@ -157,9 +157,9 @@ static inline void usX2Y_usbpcm_urb_capt_iso_advance(struct snd_usX2Y_substream
+ 	}
+ }
+ 
+-static inline int usX2Y_usbpcm_usbframe_complete(struct snd_usX2Y_substream *capsubs,
+-						 struct snd_usX2Y_substream *capsubs2,
+-						 struct snd_usX2Y_substream *playbacksubs,
++static inline int usx2y_usbpcm_usbframe_complete(struct snd_usx2y_substream *capsubs,
++						 struct snd_usx2y_substream *capsubs2,
++						 struct snd_usx2y_substream *playbacksubs,
+ 						 int frame)
+ {
+ 	int err, state;
+@@ -167,25 +167,25 @@ static inline int usX2Y_usbpcm_usbframe_complete(struct snd_usX2Y_substream *cap
+ 
+ 	state = atomic_read(&playbacksubs->state);
+ 	if (NULL != urb) {
+-		if (state == state_RUNNING)
+-			usX2Y_urb_play_retire(playbacksubs, urb);
+-		else if (state >= state_PRERUNNING)
++		if (state == STATE_RUNNING)
++			usx2y_urb_play_retire(playbacksubs, urb);
++		else if (state >= STATE_PRERUNNING)
+ 			atomic_inc(&playbacksubs->state);
+ 	} else {
+ 		switch (state) {
+-		case state_STARTING1:
++		case STATE_STARTING1:
+ 			urb = playbacksubs->urb[0];
+ 			atomic_inc(&playbacksubs->state);
+ 			break;
+-		case state_STARTING2:
++		case STATE_STARTING2:
+ 			urb = playbacksubs->urb[1];
+ 			atomic_inc(&playbacksubs->state);
+ 			break;
+ 		}
+ 	}
+ 	if (urb) {
+-		if ((err = usX2Y_hwdep_urb_play_prepare(playbacksubs, urb)) ||
+-		    (err = usX2Y_urb_submit(playbacksubs, urb, frame))) {
++		if ((err = usx2y_hwdep_urb_play_prepare(playbacksubs, urb)) ||
++		    (err = usx2y_urb_submit(playbacksubs, urb, frame))) {
+ 			return err;
+ 		}
+ 	}
+@@ -193,19 +193,19 @@ static inline int usX2Y_usbpcm_usbframe_complete(struct snd_usX2Y_substream *cap
+ 	playbacksubs->completed_urb = NULL;
+ 
+ 	state = atomic_read(&capsubs->state);
+-	if (state >= state_PREPARED) {
+-		if (state == state_RUNNING) {
+-			if ((err = usX2Y_usbpcm_urb_capt_retire(capsubs)))
++	if (state >= STATE_PREPARED) {
++		if (state == STATE_RUNNING) {
++			if ((err = usx2y_usbpcm_urb_capt_retire(capsubs)))
+ 				return err;
+-		} else if (state >= state_PRERUNNING)
++		} else if (state >= STATE_PRERUNNING)
+ 			atomic_inc(&capsubs->state);
+-		usX2Y_usbpcm_urb_capt_iso_advance(capsubs, capsubs->completed_urb);
++		usx2y_usbpcm_urb_capt_iso_advance(capsubs, capsubs->completed_urb);
+ 		if (NULL != capsubs2)
+-			usX2Y_usbpcm_urb_capt_iso_advance(NULL, capsubs2->completed_urb);
+-		if ((err = usX2Y_urb_submit(capsubs, capsubs->completed_urb, frame)))
++			usx2y_usbpcm_urb_capt_iso_advance(NULL, capsubs2->completed_urb);
++		if ((err = usx2y_urb_submit(capsubs, capsubs->completed_urb, frame)))
+ 			return err;
+ 		if (NULL != capsubs2)
+-			if ((err = usX2Y_urb_submit(capsubs2, capsubs2->completed_urb, frame)))
++			if ((err = usx2y_urb_submit(capsubs2, capsubs2->completed_urb, frame)))
+ 				return err;
+ 	}
+ 	capsubs->completed_urb = NULL;
+@@ -215,42 +215,42 @@ static inline int usX2Y_usbpcm_usbframe_complete(struct snd_usX2Y_substream *cap
+ }
+ 
+ 
+-static void i_usX2Y_usbpcm_urb_complete(struct urb *urb)
++static void i_usx2y_usbpcm_urb_complete(struct urb *urb)
+ {
+-	struct snd_usX2Y_substream *subs = urb->context;
+-	struct usX2Ydev *usX2Y = subs->usX2Y;
+-	struct snd_usX2Y_substream *capsubs, *capsubs2, *playbacksubs;
++	struct snd_usx2y_substream *subs = urb->context;
++	struct usx2ydev *usx2y = subs->usx2y;
++	struct snd_usx2y_substream *capsubs, *capsubs2, *playbacksubs;
+ 
+-	if (unlikely(atomic_read(&subs->state) < state_PREPARED)) {
++	if (unlikely(atomic_read(&subs->state) < STATE_PREPARED)) {
+ 		snd_printdd("hcd_frame=%i ep=%i%s status=%i start_frame=%i\n",
+-			    usb_get_current_frame_number(usX2Y->dev),
++			    usb_get_current_frame_number(usx2y->dev),
+ 			    subs->endpoint, usb_pipein(urb->pipe) ? "in" : "out",
+ 			    urb->status, urb->start_frame);
+ 		return;
+ 	}
+ 	if (unlikely(urb->status)) {
+-		usX2Y_error_urb_status(usX2Y, subs, urb);
++		usx2y_error_urb_status(usx2y, subs, urb);
+ 		return;
+ 	}
+ 
+ 	subs->completed_urb = urb;
+-	capsubs = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE];
+-	capsubs2 = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE + 2];
+-	playbacksubs = usX2Y->subs[SNDRV_PCM_STREAM_PLAYBACK];
+-	if (capsubs->completed_urb && atomic_read(&capsubs->state) >= state_PREPARED &&
++	capsubs = usx2y->subs[SNDRV_PCM_STREAM_CAPTURE];
++	capsubs2 = usx2y->subs[SNDRV_PCM_STREAM_CAPTURE + 2];
++	playbacksubs = usx2y->subs[SNDRV_PCM_STREAM_PLAYBACK];
++	if (capsubs->completed_urb && atomic_read(&capsubs->state) >= STATE_PREPARED &&
+ 	    (NULL == capsubs2 || capsubs2->completed_urb) &&
+-	    (playbacksubs->completed_urb || atomic_read(&playbacksubs->state) < state_PREPARED)) {
+-		if (!usX2Y_usbpcm_usbframe_complete(capsubs, capsubs2, playbacksubs, urb->start_frame))
+-			usX2Y->wait_iso_frame += nr_of_packs();
++	    (playbacksubs->completed_urb || atomic_read(&playbacksubs->state) < STATE_PREPARED)) {
++		if (!usx2y_usbpcm_usbframe_complete(capsubs, capsubs2, playbacksubs, urb->start_frame))
++			usx2y->wait_iso_frame += nr_of_packs();
+ 		else {
+ 			snd_printdd("\n");
+-			usX2Y_clients_stop(usX2Y);
++			usx2y_clients_stop(usx2y);
+ 		}
+ 	}
+ }
+ 
+ 
+-static void usX2Y_hwdep_urb_release(struct urb **urb)
++static void usx2y_hwdep_urb_release(struct urb **urb)
+ {
+ 	usb_kill_urb(*urb);
+ 	usb_free_urb(*urb);
+@@ -260,49 +260,49 @@ static void usX2Y_hwdep_urb_release(struct urb **urb)
+ /*
+  * release a substream
+  */
+-static void usX2Y_usbpcm_urbs_release(struct snd_usX2Y_substream *subs)
++static void usx2y_usbpcm_urbs_release(struct snd_usx2y_substream *subs)
+ {
+ 	int i;
+-	snd_printdd("snd_usX2Y_urbs_release() %i\n", subs->endpoint);
++	snd_printdd("snd_usx2y_urbs_release() %i\n", subs->endpoint);
+ 	for (i = 0; i < NRURBS; i++)
+-		usX2Y_hwdep_urb_release(subs->urb + i);
++		usx2y_hwdep_urb_release(subs->urb + i);
+ }
+ 
+-static void usX2Y_usbpcm_subs_startup_finish(struct usX2Ydev * usX2Y)
++static void usx2y_usbpcm_subs_startup_finish(struct usx2ydev * usx2y)
+ {
+-	usX2Y_urbs_set_complete(usX2Y, i_usX2Y_usbpcm_urb_complete);
+-	usX2Y->prepare_subs = NULL;
++	usx2y_urbs_set_complete(usx2y, i_usx2y_usbpcm_urb_complete);
++	usx2y->prepare_subs = NULL;
+ }
+ 
+-static void i_usX2Y_usbpcm_subs_startup(struct urb *urb)
++static void i_usx2y_usbpcm_subs_startup(struct urb *urb)
+ {
+-	struct snd_usX2Y_substream *subs = urb->context;
+-	struct usX2Ydev *usX2Y = subs->usX2Y;
+-	struct snd_usX2Y_substream *prepare_subs = usX2Y->prepare_subs;
++	struct snd_usx2y_substream *subs = urb->context;
++	struct usx2ydev *usx2y = subs->usx2y;
++	struct snd_usx2y_substream *prepare_subs = usx2y->prepare_subs;
+ 	if (NULL != prepare_subs &&
+ 	    urb->start_frame == prepare_subs->urb[0]->start_frame) {
+ 		atomic_inc(&prepare_subs->state);
+-		if (prepare_subs == usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE]) {
+-			struct snd_usX2Y_substream *cap_subs2 = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE + 2];
++		if (prepare_subs == usx2y->subs[SNDRV_PCM_STREAM_CAPTURE]) {
++			struct snd_usx2y_substream *cap_subs2 = usx2y->subs[SNDRV_PCM_STREAM_CAPTURE + 2];
+ 			if (cap_subs2 != NULL)
+ 				atomic_inc(&cap_subs2->state);
+ 		}
+-		usX2Y_usbpcm_subs_startup_finish(usX2Y);
+-		wake_up(&usX2Y->prepare_wait_queue);
++		usx2y_usbpcm_subs_startup_finish(usx2y);
++		wake_up(&usx2y->prepare_wait_queue);
+ 	}
+ 
+-	i_usX2Y_usbpcm_urb_complete(urb);
++	i_usx2y_usbpcm_urb_complete(urb);
+ }
+ 
+ /*
+  * initialize a substream's urbs
+  */
+-static int usX2Y_usbpcm_urbs_allocate(struct snd_usX2Y_substream *subs)
++static int usx2y_usbpcm_urbs_allocate(struct snd_usx2y_substream *subs)
+ {
+ 	int i;
+ 	unsigned int pipe;
+-	int is_playback = subs == subs->usX2Y->subs[SNDRV_PCM_STREAM_PLAYBACK];
+-	struct usb_device *dev = subs->usX2Y->dev;
++	int is_playback = subs == subs->usx2y->subs[SNDRV_PCM_STREAM_PLAYBACK];
++	struct usb_device *dev = subs->usx2y->dev;
+ 
+ 	pipe = is_playback ? usb_sndisocpipe(dev, subs->endpoint) :
+ 			usb_rcvisocpipe(dev, subs->endpoint);
+@@ -319,21 +319,21 @@ static int usX2Y_usbpcm_urbs_allocate(struct snd_usX2Y_substream *subs)
+ 		}
+ 		*purb = usb_alloc_urb(nr_of_packs(), GFP_KERNEL);
+ 		if (NULL == *purb) {
+-			usX2Y_usbpcm_urbs_release(subs);
++			usx2y_usbpcm_urbs_release(subs);
+ 			return -ENOMEM;
+ 		}
+ 		(*purb)->transfer_buffer = is_playback ?
+-			subs->usX2Y->hwdep_pcm_shm->playback : (
++			subs->usx2y->hwdep_pcm_shm->playback : (
+ 				subs->endpoint == 0x8 ?
+-				subs->usX2Y->hwdep_pcm_shm->capture0x8 :
+-				subs->usX2Y->hwdep_pcm_shm->capture0xA);
++				subs->usx2y->hwdep_pcm_shm->capture0x8 :
++				subs->usx2y->hwdep_pcm_shm->capture0xA);
+ 
+ 		(*purb)->dev = dev;
+ 		(*purb)->pipe = pipe;
+ 		(*purb)->number_of_packets = nr_of_packs();
+ 		(*purb)->context = subs;
+ 		(*purb)->interval = 1;
+-		(*purb)->complete = i_usX2Y_usbpcm_subs_startup;
++		(*purb)->complete = i_usx2y_usbpcm_subs_startup;
+ 	}
+ 	return 0;
+ }
+@@ -341,91 +341,91 @@ static int usX2Y_usbpcm_urbs_allocate(struct snd_usX2Y_substream *subs)
+ /*
+  * free the buffer
+  */
+-static int snd_usX2Y_usbpcm_hw_free(struct snd_pcm_substream *substream)
++static int snd_usx2y_usbpcm_hw_free(struct snd_pcm_substream *substream)
+ {
+ 	struct snd_pcm_runtime *runtime = substream->runtime;
+-	struct snd_usX2Y_substream *subs = runtime->private_data,
+-		*cap_subs2 = subs->usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE + 2];
+-	mutex_lock(&subs->usX2Y->pcm_mutex);
+-	snd_printdd("snd_usX2Y_usbpcm_hw_free(%p)\n", substream);
++	struct snd_usx2y_substream *subs = runtime->private_data,
++		*cap_subs2 = subs->usx2y->subs[SNDRV_PCM_STREAM_CAPTURE + 2];
++	mutex_lock(&subs->usx2y->pcm_mutex);
++	snd_printdd("snd_usx2y_usbpcm_hw_free(%p)\n", substream);
+ 
+ 	if (SNDRV_PCM_STREAM_PLAYBACK == substream->stream) {
+-		struct snd_usX2Y_substream *cap_subs = subs->usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE];
+-		atomic_set(&subs->state, state_STOPPED);
+-		usX2Y_usbpcm_urbs_release(subs);
++		struct snd_usx2y_substream *cap_subs = subs->usx2y->subs[SNDRV_PCM_STREAM_CAPTURE];
++		atomic_set(&subs->state, STATE_STOPPED);
++		usx2y_usbpcm_urbs_release(subs);
+ 		if (!cap_subs->pcm_substream ||
+ 		    !cap_subs->pcm_substream->runtime ||
+ 		    !cap_subs->pcm_substream->runtime->status ||
+ 		    cap_subs->pcm_substream->runtime->status->state < SNDRV_PCM_STATE_PREPARED) {
+-			atomic_set(&cap_subs->state, state_STOPPED);
++			atomic_set(&cap_subs->state, STATE_STOPPED);
+ 			if (NULL != cap_subs2)
+-				atomic_set(&cap_subs2->state, state_STOPPED);
+-			usX2Y_usbpcm_urbs_release(cap_subs);
++				atomic_set(&cap_subs2->state, STATE_STOPPED);
++			usx2y_usbpcm_urbs_release(cap_subs);
+ 			if (NULL != cap_subs2)
+-				usX2Y_usbpcm_urbs_release(cap_subs2);
++				usx2y_usbpcm_urbs_release(cap_subs2);
+ 		}
+ 	} else {
+-		struct snd_usX2Y_substream *playback_subs = subs->usX2Y->subs[SNDRV_PCM_STREAM_PLAYBACK];
+-		if (atomic_read(&playback_subs->state) < state_PREPARED) {
+-			atomic_set(&subs->state, state_STOPPED);
++		struct snd_usx2y_substream *playback_subs = subs->usx2y->subs[SNDRV_PCM_STREAM_PLAYBACK];
++		if (atomic_read(&playback_subs->state) < STATE_PREPARED) {
++			atomic_set(&subs->state, STATE_STOPPED);
+ 			if (NULL != cap_subs2)
+-				atomic_set(&cap_subs2->state, state_STOPPED);
+-			usX2Y_usbpcm_urbs_release(subs);
++				atomic_set(&cap_subs2->state, STATE_STOPPED);
++			usx2y_usbpcm_urbs_release(subs);
+ 			if (NULL != cap_subs2)
+-				usX2Y_usbpcm_urbs_release(cap_subs2);
++				usx2y_usbpcm_urbs_release(cap_subs2);
+ 		}
+ 	}
+-	mutex_unlock(&subs->usX2Y->pcm_mutex);
++	mutex_unlock(&subs->usx2y->pcm_mutex);
+ 	return 0;
+ }
+ 
+-static void usX2Y_usbpcm_subs_startup(struct snd_usX2Y_substream *subs)
++static void usx2y_usbpcm_subs_startup(struct snd_usx2y_substream *subs)
+ {
+-	struct usX2Ydev * usX2Y = subs->usX2Y;
+-	usX2Y->prepare_subs = subs;
++	struct usx2ydev * usx2y = subs->usx2y;
++	usx2y->prepare_subs = subs;
+ 	subs->urb[0]->start_frame = -1;
+-	smp_wmb();	// Make sure above modifications are seen by i_usX2Y_subs_startup()
+-	usX2Y_urbs_set_complete(usX2Y, i_usX2Y_usbpcm_subs_startup);
++	smp_wmb();	// Make sure above modifications are seen by i_usx2y_subs_startup()
++	usx2y_urbs_set_complete(usx2y, i_usx2y_usbpcm_subs_startup);
+ }
+ 
+-static int usX2Y_usbpcm_urbs_start(struct snd_usX2Y_substream *subs)
++static int usx2y_usbpcm_urbs_start(struct snd_usx2y_substream *subs)
+ {
+ 	int	p, u, err,
+ 		stream = subs->pcm_substream->stream;
+-	struct usX2Ydev *usX2Y = subs->usX2Y;
++	struct usx2ydev *usx2y = subs->usx2y;
+ 
+ 	if (SNDRV_PCM_STREAM_CAPTURE == stream) {
+-		usX2Y->hwdep_pcm_shm->captured_iso_head = -1;
+-		usX2Y->hwdep_pcm_shm->captured_iso_frames = 0;
++		usx2y->hwdep_pcm_shm->captured_iso_head = -1;
++		usx2y->hwdep_pcm_shm->captured_iso_frames = 0;
+ 	}
+ 
+ 	for (p = 0; 3 >= (stream + p); p += 2) {
+-		struct snd_usX2Y_substream *subs = usX2Y->subs[stream + p];
++		struct snd_usx2y_substream *subs = usx2y->subs[stream + p];
+ 		if (subs != NULL) {
+-			if ((err = usX2Y_usbpcm_urbs_allocate(subs)) < 0)
++			if ((err = usx2y_usbpcm_urbs_allocate(subs)) < 0)
+ 				return err;
+ 			subs->completed_urb = NULL;
+ 		}
+ 	}
+ 
+ 	for (p = 0; p < 4; p++) {
+-		struct snd_usX2Y_substream *subs = usX2Y->subs[p];
+-		if (subs != NULL && atomic_read(&subs->state) >= state_PREPARED)
++		struct snd_usx2y_substream *subs = usx2y->subs[p];
++		if (subs != NULL && atomic_read(&subs->state) >= STATE_PREPARED)
+ 			goto start;
+ 	}
+ 
+  start:
+-	usX2Y_usbpcm_subs_startup(subs);
++	usx2y_usbpcm_subs_startup(subs);
+ 	for (u = 0; u < NRURBS; u++) {
+ 		for (p = 0; 3 >= (stream + p); p += 2) {
+-			struct snd_usX2Y_substream *subs = usX2Y->subs[stream + p];
++			struct snd_usx2y_substream *subs = usx2y->subs[stream + p];
+ 			if (subs != NULL) {
+ 				struct urb *urb = subs->urb[u];
+ 				if (usb_pipein(urb->pipe)) {
+ 					unsigned long pack;
+ 					if (0 == u)
+-						atomic_set(&subs->state, state_STARTING3);
+-					urb->dev = usX2Y->dev;
++						atomic_set(&subs->state, STATE_STARTING3);
++					urb->dev = usx2y->dev;
+ 					for (pack = 0; pack < nr_of_packs(); pack++) {
+ 						urb->iso_frame_desc[pack].offset = subs->maxpacksize * (pack + u * nr_of_packs());
+ 						urb->iso_frame_desc[pack].length = subs->maxpacksize;
+@@ -438,25 +438,25 @@ static int usX2Y_usbpcm_urbs_start(struct snd_usX2Y_substream *subs)
+ 					}  else {
+ 						snd_printdd("%i\n", urb->start_frame);
+ 						if (u == 0)
+-							usX2Y->wait_iso_frame = urb->start_frame;
++							usx2y->wait_iso_frame = urb->start_frame;
+ 					}
+ 					urb->transfer_flags = 0;
+ 				} else {
+-					atomic_set(&subs->state, state_STARTING1);
++					atomic_set(&subs->state, STATE_STARTING1);
+ 					break;
+ 				}			
+ 			}
+ 		}
+ 	}
+ 	err = 0;
+-	wait_event(usX2Y->prepare_wait_queue, NULL == usX2Y->prepare_subs);
+-	if (atomic_read(&subs->state) != state_PREPARED)
++	wait_event(usx2y->prepare_wait_queue, NULL == usx2y->prepare_subs);
++	if (atomic_read(&subs->state) != STATE_PREPARED)
+ 		err = -EPIPE;
+ 		
+  cleanup:
+ 	if (err) {
+-		usX2Y_subs_startup_finish(usX2Y);	// Call it now
+-		usX2Y_clients_stop(usX2Y);		// something is completely wroong > stop evrything			
++		usx2y_subs_startup_finish(usx2y);	// Call it now
++		usx2y_clients_stop(usx2y);		// something is completely wroong > stop evrything			
+ 	}
+ 	return err;
+ }
+@@ -466,69 +466,69 @@ static int usX2Y_usbpcm_urbs_start(struct snd_usX2Y_substream *subs)
+  *
+  * set format and initialize urbs
+  */
+-static int snd_usX2Y_usbpcm_prepare(struct snd_pcm_substream *substream)
++static int snd_usx2y_usbpcm_prepare(struct snd_pcm_substream *substream)
+ {
+ 	struct snd_pcm_runtime *runtime = substream->runtime;
+-	struct snd_usX2Y_substream *subs = runtime->private_data;
+-	struct usX2Ydev *usX2Y = subs->usX2Y;
+-	struct snd_usX2Y_substream *capsubs = subs->usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE];
++	struct snd_usx2y_substream *subs = runtime->private_data;
++	struct usx2ydev *usx2y = subs->usx2y;
++	struct snd_usx2y_substream *capsubs = subs->usx2y->subs[SNDRV_PCM_STREAM_CAPTURE];
+ 	int err = 0;
+-	snd_printdd("snd_usX2Y_pcm_prepare(%p)\n", substream);
++	snd_printdd("snd_usx2y_pcm_prepare(%p)\n", substream);
+ 
+-	if (NULL == usX2Y->hwdep_pcm_shm) {
+-		usX2Y->hwdep_pcm_shm = alloc_pages_exact(sizeof(struct snd_usX2Y_hwdep_pcm_shm),
++	if (NULL == usx2y->hwdep_pcm_shm) {
++		usx2y->hwdep_pcm_shm = alloc_pages_exact(sizeof(struct snd_usx2y_hwdep_pcm_shm),
+ 							 GFP_KERNEL);
+-		if (!usX2Y->hwdep_pcm_shm)
++		if (!usx2y->hwdep_pcm_shm)
+ 			return -ENOMEM;
+-		memset(usX2Y->hwdep_pcm_shm, 0, sizeof(struct snd_usX2Y_hwdep_pcm_shm));
++		memset(usx2y->hwdep_pcm_shm, 0, sizeof(struct snd_usx2y_hwdep_pcm_shm));
+ 	}
+ 
+-	mutex_lock(&usX2Y->pcm_mutex);
+-	usX2Y_subs_prepare(subs);
++	mutex_lock(&usx2y->pcm_mutex);
++	usx2y_subs_prepare(subs);
+ // Start hardware streams
+ // SyncStream first....
+-	if (atomic_read(&capsubs->state) < state_PREPARED) {
+-		if (usX2Y->format != runtime->format)
+-			if ((err = usX2Y_format_set(usX2Y, runtime->format)) < 0)
++	if (atomic_read(&capsubs->state) < STATE_PREPARED) {
++		if (usx2y->format != runtime->format)
++			if ((err = usx2y_format_set(usx2y, runtime->format)) < 0)
+ 				goto up_prepare_mutex;
+-		if (usX2Y->rate != runtime->rate)
+-			if ((err = usX2Y_rate_set(usX2Y, runtime->rate)) < 0)
++		if (usx2y->rate != runtime->rate)
++			if ((err = usx2y_rate_set(usx2y, runtime->rate)) < 0)
+ 				goto up_prepare_mutex;
+ 		snd_printdd("starting capture pipe for %s\n", subs == capsubs ?
+ 			    "self" : "playpipe");
+-		if (0 > (err = usX2Y_usbpcm_urbs_start(capsubs)))
++		if (0 > (err = usx2y_usbpcm_urbs_start(capsubs)))
+ 			goto up_prepare_mutex;
+ 	}
+ 
+ 	if (subs != capsubs) {
+-		usX2Y->hwdep_pcm_shm->playback_iso_start = -1;
+-		if (atomic_read(&subs->state) < state_PREPARED) {
+-			while (usX2Y_iso_frames_per_buffer(runtime, usX2Y) >
+-			       usX2Y->hwdep_pcm_shm->captured_iso_frames) {
++		usx2y->hwdep_pcm_shm->playback_iso_start = -1;
++		if (atomic_read(&subs->state) < STATE_PREPARED) {
++			while (usx2y_iso_frames_per_buffer(runtime, usx2y) >
++			       usx2y->hwdep_pcm_shm->captured_iso_frames) {
+ 				snd_printdd("Wait: iso_frames_per_buffer=%i,"
+ 					    "captured_iso_frames=%i\n",
+-					    usX2Y_iso_frames_per_buffer(runtime, usX2Y),
+-					    usX2Y->hwdep_pcm_shm->captured_iso_frames);
++					    usx2y_iso_frames_per_buffer(runtime, usx2y),
++					    usx2y->hwdep_pcm_shm->captured_iso_frames);
+ 				if (msleep_interruptible(10)) {
+ 					err = -ERESTARTSYS;
+ 					goto up_prepare_mutex;
+ 				}
+ 			} 
+-			if (0 > (err = usX2Y_usbpcm_urbs_start(subs)))
++			if (0 > (err = usx2y_usbpcm_urbs_start(subs)))
+ 				goto up_prepare_mutex;
+ 		}
+ 		snd_printdd("Ready: iso_frames_per_buffer=%i,captured_iso_frames=%i\n",
+-			    usX2Y_iso_frames_per_buffer(runtime, usX2Y),
+-			    usX2Y->hwdep_pcm_shm->captured_iso_frames);
++			    usx2y_iso_frames_per_buffer(runtime, usx2y),
++			    usx2y->hwdep_pcm_shm->captured_iso_frames);
+ 	} else
+-		usX2Y->hwdep_pcm_shm->capture_iso_start = -1;
++		usx2y->hwdep_pcm_shm->capture_iso_start = -1;
+ 
+  up_prepare_mutex:
+-	mutex_unlock(&usX2Y->pcm_mutex);
++	mutex_unlock(&usx2y->pcm_mutex);
+ 	return err;
+ }
+ 
+-static const struct snd_pcm_hardware snd_usX2Y_4c =
++static const struct snd_pcm_hardware snd_usx2y_4c =
+ {
+ 	.info =			(SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
+ 				 SNDRV_PCM_INFO_BLOCK_TRANSFER |
+@@ -549,17 +549,17 @@ static const struct snd_pcm_hardware snd_usX2Y_4c =
+ 
+ 
+ 
+-static int snd_usX2Y_usbpcm_open(struct snd_pcm_substream *substream)
++static int snd_usx2y_usbpcm_open(struct snd_pcm_substream *substream)
+ {
+-	struct snd_usX2Y_substream	*subs = ((struct snd_usX2Y_substream **)
++	struct snd_usx2y_substream	*subs = ((struct snd_usx2y_substream **)
+ 					 snd_pcm_substream_chip(substream))[substream->stream];
+ 	struct snd_pcm_runtime	*runtime = substream->runtime;
+ 
+-	if (!(subs->usX2Y->chip_status & USX2Y_STAT_CHIP_MMAP_PCM_URBS))
++	if (!(subs->usx2y->chip_status & USX2Y_STAT_CHIP_MMAP_PCM_URBS))
+ 		return -EBUSY;
+ 
+-	runtime->hw = SNDRV_PCM_STREAM_PLAYBACK == substream->stream ? snd_usX2Y_2c :
+-		(subs->usX2Y->subs[3] ? snd_usX2Y_4c : snd_usX2Y_2c);
++	runtime->hw = SNDRV_PCM_STREAM_PLAYBACK == substream->stream ? snd_usx2y_2c :
++		(subs->usx2y->subs[3] ? snd_usx2y_4c : snd_usx2y_2c);
+ 	runtime->private_data = subs;
+ 	subs->pcm_substream = substream;
+ 	snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_TIME, 1000, 200000);
+@@ -567,35 +567,35 @@ static int snd_usX2Y_usbpcm_open(struct snd_pcm_substream *substream)
+ }
+ 
+ 
+-static int snd_usX2Y_usbpcm_close(struct snd_pcm_substream *substream)
++static int snd_usx2y_usbpcm_close(struct snd_pcm_substream *substream)
+ {
+ 	struct snd_pcm_runtime *runtime = substream->runtime;
+-	struct snd_usX2Y_substream *subs = runtime->private_data;
++	struct snd_usx2y_substream *subs = runtime->private_data;
+ 
+ 	subs->pcm_substream = NULL;
+ 	return 0;
+ }
+ 
+ 
+-static const struct snd_pcm_ops snd_usX2Y_usbpcm_ops =
++static const struct snd_pcm_ops snd_usx2y_usbpcm_ops =
+ {
+-	.open =		snd_usX2Y_usbpcm_open,
+-	.close =	snd_usX2Y_usbpcm_close,
+-	.hw_params =	snd_usX2Y_pcm_hw_params,
+-	.hw_free =	snd_usX2Y_usbpcm_hw_free,
+-	.prepare =	snd_usX2Y_usbpcm_prepare,
+-	.trigger =	snd_usX2Y_pcm_trigger,
+-	.pointer =	snd_usX2Y_pcm_pointer,
++	.open =		snd_usx2y_usbpcm_open,
++	.close =	snd_usx2y_usbpcm_close,
++	.hw_params =	snd_usx2y_pcm_hw_params,
++	.hw_free =	snd_usx2y_usbpcm_hw_free,
++	.prepare =	snd_usx2y_usbpcm_prepare,
++	.trigger =	snd_usx2y_pcm_trigger,
++	.pointer =	snd_usx2y_pcm_pointer,
+ };
+ 
+ 
+-static int usX2Y_pcms_busy_check(struct snd_card *card)
++static int usx2y_pcms_busy_check(struct snd_card *card)
+ {
+-	struct usX2Ydev	*dev = usX2Y(card);
++	struct usx2ydev	*dev = usx2y(card);
+ 	int i;
+ 
+ 	for (i = 0; i < dev->pcm_devs * 2; i++) {
+-		struct snd_usX2Y_substream *subs = dev->subs[i];
++		struct snd_usx2y_substream *subs = dev->subs[i];
+ 		if (subs && subs->pcm_substream &&
+ 		    SUBSTREAM_BUSY(subs->pcm_substream))
+ 			return -EBUSY;
+@@ -603,102 +603,102 @@ static int usX2Y_pcms_busy_check(struct snd_card *card)
+ 	return 0;
+ }
+ 
+-static int snd_usX2Y_hwdep_pcm_open(struct snd_hwdep *hw, struct file *file)
++static int snd_usx2y_hwdep_pcm_open(struct snd_hwdep *hw, struct file *file)
+ {
+ 	struct snd_card *card = hw->card;
+ 	int err;
+ 
+-	mutex_lock(&usX2Y(card)->pcm_mutex);
+-	err = usX2Y_pcms_busy_check(card);
++	mutex_lock(&usx2y(card)->pcm_mutex);
++	err = usx2y_pcms_busy_check(card);
+ 	if (!err)
+-		usX2Y(card)->chip_status |= USX2Y_STAT_CHIP_MMAP_PCM_URBS;
+-	mutex_unlock(&usX2Y(card)->pcm_mutex);
++		usx2y(card)->chip_status |= USX2Y_STAT_CHIP_MMAP_PCM_URBS;
++	mutex_unlock(&usx2y(card)->pcm_mutex);
+ 	return err;
+ }
+ 
+ 
+-static int snd_usX2Y_hwdep_pcm_release(struct snd_hwdep *hw, struct file *file)
++static int snd_usx2y_hwdep_pcm_release(struct snd_hwdep *hw, struct file *file)
+ {
+ 	struct snd_card *card = hw->card;
+ 	int err;
+ 
+-	mutex_lock(&usX2Y(card)->pcm_mutex);
+-	err = usX2Y_pcms_busy_check(card);
++	mutex_lock(&usx2y(card)->pcm_mutex);
++	err = usx2y_pcms_busy_check(card);
+ 	if (!err)
+-		usX2Y(hw->card)->chip_status &= ~USX2Y_STAT_CHIP_MMAP_PCM_URBS;
+-	mutex_unlock(&usX2Y(card)->pcm_mutex);
++		usx2y(hw->card)->chip_status &= ~USX2Y_STAT_CHIP_MMAP_PCM_URBS;
++	mutex_unlock(&usx2y(card)->pcm_mutex);
+ 	return err;
+ }
+ 
+ 
+-static void snd_usX2Y_hwdep_pcm_vm_open(struct vm_area_struct *area)
++static void snd_usx2y_hwdep_pcm_vm_open(struct vm_area_struct *area)
+ {
+ }
+ 
+ 
+-static void snd_usX2Y_hwdep_pcm_vm_close(struct vm_area_struct *area)
++static void snd_usx2y_hwdep_pcm_vm_close(struct vm_area_struct *area)
+ {
+ }
+ 
+ 
+-static vm_fault_t snd_usX2Y_hwdep_pcm_vm_fault(struct vm_fault *vmf)
++static vm_fault_t snd_usx2y_hwdep_pcm_vm_fault(struct vm_fault *vmf)
+ {
+ 	unsigned long offset;
+ 	void *vaddr;
+ 
+ 	offset = vmf->pgoff << PAGE_SHIFT;
+-	vaddr = (char *)((struct usX2Ydev *)vmf->vma->vm_private_data)->hwdep_pcm_shm + offset;
++	vaddr = (char *)((struct usx2ydev *)vmf->vma->vm_private_data)->hwdep_pcm_shm + offset;
+ 	vmf->page = virt_to_page(vaddr);
+ 	get_page(vmf->page);
+ 	return 0;
+ }
+ 
+ 
+-static const struct vm_operations_struct snd_usX2Y_hwdep_pcm_vm_ops = {
+-	.open = snd_usX2Y_hwdep_pcm_vm_open,
+-	.close = snd_usX2Y_hwdep_pcm_vm_close,
+-	.fault = snd_usX2Y_hwdep_pcm_vm_fault,
++static const struct vm_operations_struct snd_usx2y_hwdep_pcm_vm_ops = {
++	.open = snd_usx2y_hwdep_pcm_vm_open,
++	.close = snd_usx2y_hwdep_pcm_vm_close,
++	.fault = snd_usx2y_hwdep_pcm_vm_fault,
+ };
+ 
+ 
+-static int snd_usX2Y_hwdep_pcm_mmap(struct snd_hwdep * hw, struct file *filp, struct vm_area_struct *area)
++static int snd_usx2y_hwdep_pcm_mmap(struct snd_hwdep * hw, struct file *filp, struct vm_area_struct *area)
+ {
+ 	unsigned long	size = (unsigned long)(area->vm_end - area->vm_start);
+-	struct usX2Ydev	*usX2Y = hw->private_data;
++	struct usx2ydev	*usx2y = hw->private_data;
+ 
+-	if (!(usX2Y->chip_status & USX2Y_STAT_CHIP_INIT))
++	if (!(usx2y->chip_status & USX2Y_STAT_CHIP_INIT))
+ 		return -EBUSY;
+ 
+ 	/* if userspace tries to mmap beyond end of our buffer, fail */ 
+-	if (size > PAGE_ALIGN(sizeof(struct snd_usX2Y_hwdep_pcm_shm))) {
+-		snd_printd("%lu > %lu\n", size, (unsigned long)sizeof(struct snd_usX2Y_hwdep_pcm_shm)); 
++	if (size > PAGE_ALIGN(sizeof(struct snd_usx2y_hwdep_pcm_shm))) {
++		snd_printd("%lu > %lu\n", size, (unsigned long)sizeof(struct snd_usx2y_hwdep_pcm_shm)); 
+ 		return -EINVAL;
+ 	}
+ 
+-	if (!usX2Y->hwdep_pcm_shm) {
++	if (!usx2y->hwdep_pcm_shm) {
+ 		return -ENODEV;
+ 	}
+-	area->vm_ops = &snd_usX2Y_hwdep_pcm_vm_ops;
++	area->vm_ops = &snd_usx2y_hwdep_pcm_vm_ops;
+ 	area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ 	area->vm_private_data = hw->private_data;
+ 	return 0;
+ }
+ 
+ 
+-static void snd_usX2Y_hwdep_pcm_private_free(struct snd_hwdep *hwdep)
++static void snd_usx2y_hwdep_pcm_private_free(struct snd_hwdep *hwdep)
+ {
+-	struct usX2Ydev *usX2Y = hwdep->private_data;
+-	if (NULL != usX2Y->hwdep_pcm_shm)
+-		free_pages_exact(usX2Y->hwdep_pcm_shm, sizeof(struct snd_usX2Y_hwdep_pcm_shm));
++	struct usx2ydev *usx2y = hwdep->private_data;
++	if (NULL != usx2y->hwdep_pcm_shm)
++		free_pages_exact(usx2y->hwdep_pcm_shm, sizeof(struct snd_usx2y_hwdep_pcm_shm));
+ }
+ 
+ 
+-int usX2Y_hwdep_pcm_new(struct snd_card *card)
++int usx2y_hwdep_pcm_new(struct snd_card *card)
+ {
+ 	int err;
+ 	struct snd_hwdep *hw;
+ 	struct snd_pcm *pcm;
+-	struct usb_device *dev = usX2Y(card)->dev;
++	struct usb_device *dev = usx2y(card)->dev;
+ 	if (1 != nr_of_packs())
+ 		return 0;
+ 
+@@ -706,11 +706,11 @@ int usX2Y_hwdep_pcm_new(struct snd_card *card)
+ 		return err;
+ 
+ 	hw->iface = SNDRV_HWDEP_IFACE_USX2Y_PCM;
+-	hw->private_data = usX2Y(card);
+-	hw->private_free = snd_usX2Y_hwdep_pcm_private_free;
+-	hw->ops.open = snd_usX2Y_hwdep_pcm_open;
+-	hw->ops.release = snd_usX2Y_hwdep_pcm_release;
+-	hw->ops.mmap = snd_usX2Y_hwdep_pcm_mmap;
++	hw->private_data = usx2y(card);
++	hw->private_free = snd_usx2y_hwdep_pcm_private_free;
++	hw->ops.open = snd_usx2y_hwdep_pcm_open;
++	hw->ops.release = snd_usx2y_hwdep_pcm_release;
++	hw->ops.mmap = snd_usx2y_hwdep_pcm_mmap;
+ 	hw->exclusive = 1;
+ 	sprintf(hw->name, "/dev/bus/usb/%03d/%03d/hwdeppcm", dev->bus->busnum, dev->devnum);
+ 
+@@ -718,10 +718,10 @@ int usX2Y_hwdep_pcm_new(struct snd_card *card)
+ 	if (err < 0) {
+ 		return err;
+ 	}
+-	snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_usX2Y_usbpcm_ops);
+-	snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_usX2Y_usbpcm_ops);
++	snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_usx2y_usbpcm_ops);
++	snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_usx2y_usbpcm_ops);
+ 
+-	pcm->private_data = usX2Y(card)->subs;
++	pcm->private_data = usx2y(card)->subs;
+ 	pcm->info_flags = 0;
+ 
+ 	sprintf(pcm->name, NAME_ALLCAPS" hwdep Audio");
+@@ -739,7 +739,7 @@ int usX2Y_hwdep_pcm_new(struct snd_card *card)
+ 
+ #else
+ 
+-int usX2Y_hwdep_pcm_new(struct snd_card *card)
++int usx2y_hwdep_pcm_new(struct snd_card *card)
+ {
+ 	return 0;
+ }
+diff --git a/sound/usb/usx2y/usx2yhwdeppcm.h b/sound/usb/usx2y/usx2yhwdeppcm.h
+index eb5a46466f0e6..731b1c5a34741 100644
+--- a/sound/usb/usx2y/usx2yhwdeppcm.h
++++ b/sound/usb/usx2y/usx2yhwdeppcm.h
+@@ -4,7 +4,7 @@
+ #define MAXSTRIDE 3
+ 
+ #define SSS (((MAXPACK*MAXBUFFERMS*MAXSTRIDE + 4096) / 4096) * 4096)
+-struct snd_usX2Y_hwdep_pcm_shm {
++struct snd_usx2y_hwdep_pcm_shm {
+ 	char playback[SSS];
+ 	char capture0x8[SSS];
+ 	char capture0xA[SSS];
+@@ -20,4 +20,4 @@ struct snd_usX2Y_hwdep_pcm_shm {
+ 	int capture_iso_start;
+ };
+ 
+-int usX2Y_hwdep_pcm_new(struct snd_card *card);
++int usx2y_hwdep_pcm_new(struct snd_card *card);
+diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
+index 23dc5014e7119..a61be9c075656 100644
+--- a/tools/perf/util/scripting-engines/trace-event-python.c
++++ b/tools/perf/util/scripting-engines/trace-event-python.c
+@@ -687,7 +687,7 @@ static void set_sample_datasrc_in_dict(PyObject *dict,
+ 			_PyUnicode_FromString(decode));
+ }
+ 
+-static int regs_map(struct regs_dump *regs, uint64_t mask, char *bf, int size)
++static void regs_map(struct regs_dump *regs, uint64_t mask, char *bf, int size)
+ {
+ 	unsigned int i = 0, r;
+ 	int printed = 0;
+@@ -695,7 +695,7 @@ static int regs_map(struct regs_dump *regs, uint64_t mask, char *bf, int size)
+ 	bf[0] = 0;
+ 
+ 	if (!regs || !regs->regs)
+-		return 0;
++		return;
+ 
+ 	for_each_set_bit(r, (unsigned long *) &mask, sizeof(mask) * 8) {
+ 		u64 val = regs->regs[i++];
+@@ -704,8 +704,6 @@ static int regs_map(struct regs_dump *regs, uint64_t mask, char *bf, int size)
+ 				     "%5s:0x%" PRIx64 " ",
+ 				     perf_reg_name(r), val);
+ 	}
+-
+-	return printed;
+ }
+ 
+ static void set_regs_in_dict(PyObject *dict,
+@@ -713,7 +711,16 @@ static void set_regs_in_dict(PyObject *dict,
+ 			     struct evsel *evsel)
+ {
+ 	struct perf_event_attr *attr = &evsel->core.attr;
+-	char bf[512];
++
++	/*
++	 * Here value 28 is a constant size which can be used to print
++	 * one register value and its corresponds to:
++	 * 16 chars is to specify 64 bit register in hexadecimal.
++	 * 2 chars is for appending "0x" to the hexadecimal value and
++	 * 10 chars is for register name.
++	 */
++	int size = __sw_hweight64(attr->sample_regs_intr) * 28;
++	char bf[size];
+ 
+ 	regs_map(&sample->intr_regs, attr->sample_regs_intr, bf, sizeof(bf));
+ 
+diff --git a/tools/testing/selftests/powerpc/pmu/ebb/no_handler_test.c b/tools/testing/selftests/powerpc/pmu/ebb/no_handler_test.c
+index fc5bf4870d8e6..01e827c31169d 100644
+--- a/tools/testing/selftests/powerpc/pmu/ebb/no_handler_test.c
++++ b/tools/testing/selftests/powerpc/pmu/ebb/no_handler_test.c
+@@ -50,8 +50,6 @@ static int no_handler_test(void)
+ 
+ 	event_close(&event);
+ 
+-	dump_ebb_state();
+-
+ 	/* The real test is that we never took an EBB at 0x0 */
+ 
+ 	return 0;
+diff --git a/tools/testing/selftests/timers/rtcpie.c b/tools/testing/selftests/timers/rtcpie.c
+index 47b5bad1b3933..4ef2184f15588 100644
+--- a/tools/testing/selftests/timers/rtcpie.c
++++ b/tools/testing/selftests/timers/rtcpie.c
+@@ -18,6 +18,8 @@
+ #include <stdlib.h>
+ #include <errno.h>
+ 
++#include "../kselftest.h"
++
+ /*
+  * This expects the new RTC class driver framework, working with
+  * clocks that will often not be clones of what the PC-AT had.
+@@ -35,8 +37,14 @@ int main(int argc, char **argv)
+ 	switch (argc) {
+ 	case 2:
+ 		rtc = argv[1];
+-		/* FALLTHROUGH */
++		break;
+ 	case 1:
++		fd = open(default_rtc, O_RDONLY);
++		if (fd == -1) {
++			printf("Default RTC %s does not exist. Test Skipped!\n", default_rtc);
++			exit(KSFT_SKIP);
++		}
++		close(fd);
+ 		break;
+ 	default:
+ 		fprintf(stderr, "usage:  rtctest [rtcdev] [d]\n");
+diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
+index f08f5e82460b1..0be80c213f7f2 100644
+--- a/virt/kvm/coalesced_mmio.c
++++ b/virt/kvm/coalesced_mmio.c
+@@ -186,7 +186,6 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
+ 		    coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
+ 			r = kvm_io_bus_unregister_dev(kvm,
+ 				zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev);
+-			kvm_iodevice_destructor(&dev->dev);
+ 
+ 			/*
+ 			 * On failure, unregister destroys all devices on the
+@@ -196,6 +195,7 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
+ 			 */
+ 			if (r)
+ 				break;
++			kvm_iodevice_destructor(&dev->dev);
+ 		}
+ 	}
+ 


^ permalink raw reply related	[flat|nested] 33+ messages in thread
* [gentoo-commits] proj/linux-patches:5.12 commit in: /
@ 2021-07-19 11:16 Mike Pagano
  0 siblings, 0 replies; 33+ messages in thread
From: Mike Pagano @ 2021-07-19 11:16 UTC (permalink / raw
  To: gentoo-commits

commit:     ba205b2adcbb5f1df739bcfaa5502cb8304108e4
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Jul 19 11:16:07 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Jul 19 11:16:07 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ba205b2a

Linux patch 5.12.18

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |     4 +
 1017_linux-5.12.18.patch | 11124 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 11128 insertions(+)

diff --git a/0000_README b/0000_README
index 10634ea..15e30b2 100644
--- a/0000_README
+++ b/0000_README
@@ -111,6 +111,10 @@ Patch:  1016_linux-5.12.17.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.12.17
 
+Patch:  1017_linux-5.12.18.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.12.18
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1017_linux-5.12.18.patch b/1017_linux-5.12.18.patch
new file mode 100644
index 0000000..31b4fe1
--- /dev/null
+++ b/1017_linux-5.12.18.patch
@@ -0,0 +1,11124 @@
+diff --git a/Documentation/Makefile b/Documentation/Makefile
+index 9c42dde97671f..c3feb657b6548 100644
+--- a/Documentation/Makefile
++++ b/Documentation/Makefile
+@@ -76,7 +76,7 @@ quiet_cmd_sphinx = SPHINX  $@ --> file://$(abspath $(BUILDDIR)/$3/$4)
+ 	PYTHONDONTWRITEBYTECODE=1 \
+ 	BUILDDIR=$(abspath $(BUILDDIR)) SPHINX_CONF=$(abspath $(srctree)/$(src)/$5/$(SPHINX_CONF)) \
+ 	$(PYTHON3) $(srctree)/scripts/jobserver-exec \
+-	$(SHELL) $(srctree)/Documentation/sphinx/parallel-wrapper.sh \
++	$(CONFIG_SHELL) $(srctree)/Documentation/sphinx/parallel-wrapper.sh \
+ 	$(SPHINXBUILD) \
+ 	-b $2 \
+ 	-c $(abspath $(srctree)/$(src)) \
+diff --git a/Makefile b/Makefile
+index f1d0775925cc6..b708d7c665e12 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 12
+-SUBLEVEL = 17
++SUBLEVEL = 18
+ EXTRAVERSION =
+ NAME = Frozen Wasteland
+ 
+diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h
+index 57420356ce4c1..6b3daba60987e 100644
+--- a/arch/alpha/include/uapi/asm/socket.h
++++ b/arch/alpha/include/uapi/asm/socket.h
+@@ -127,6 +127,8 @@
+ #define SO_PREFER_BUSY_POLL	69
+ #define SO_BUSY_POLL_BUDGET	70
+ 
++#define SO_NETNS_COOKIE		71
++
+ #if !defined(__KERNEL__)
+ 
+ #if __BITS_PER_LONG == 64
+diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
+index 61c97d3b58c70..c995d1f4594f6 100644
+--- a/arch/arm64/include/asm/tlb.h
++++ b/arch/arm64/include/asm/tlb.h
+@@ -28,6 +28,10 @@ static void tlb_flush(struct mmu_gather *tlb);
+  */
+ static inline int tlb_get_level(struct mmu_gather *tlb)
+ {
++	/* The TTL field is only valid for the leaf entry. */
++	if (tlb->freed_tables)
++		return 0;
++
+ 	if (tlb->cleared_ptes && !(tlb->cleared_pmds ||
+ 				   tlb->cleared_puds ||
+ 				   tlb->cleared_p4ds))
+diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
+index e89d63cd92d10..ab73622b14dd6 100644
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -425,6 +425,8 @@ config MACH_INGENIC_SOC
+ 	select MIPS_GENERIC
+ 	select MACH_INGENIC
+ 	select SYS_SUPPORTS_ZBOOT_UART16550
++	select CPU_SUPPORTS_CPUFREQ
++	select MIPS_EXTERNAL_TIMER
+ 
+ config LANTIQ
+ 	bool "Lantiq based platforms"
+diff --git a/arch/mips/boot/dts/ingenic/ci20.dts b/arch/mips/boot/dts/ingenic/ci20.dts
+index 8877c62609de5..3a4eaf1f3f487 100644
+--- a/arch/mips/boot/dts/ingenic/ci20.dts
++++ b/arch/mips/boot/dts/ingenic/ci20.dts
+@@ -525,10 +525,10 @@
+ 
+ &tcu {
+ 	/*
+-	 * 750 kHz for the system timer and 3 MHz for the clocksource,
++	 * 750 kHz for the system timer and clocksource,
+ 	 * use channel #0 for the system timer, #1 for the clocksource.
+ 	 */
+ 	assigned-clocks = <&tcu TCU_CLK_TIMER0>, <&tcu TCU_CLK_TIMER1>,
+ 					  <&tcu TCU_CLK_OST>;
+-	assigned-clock-rates = <750000>, <3000000>, <3000000>;
++	assigned-clock-rates = <750000>, <750000>, <3000000>;
+ };
+diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
+index 336e02b3b3cea..3d71081afc55f 100644
+--- a/arch/mips/include/asm/cpu-features.h
++++ b/arch/mips/include/asm/cpu-features.h
+@@ -64,6 +64,8 @@
+ 	((MIPS_ISA_REV >= (ge)) && (MIPS_ISA_REV < (lt)))
+ #define __isa_range_or_flag(ge, lt, flag) \
+ 	(__isa_range(ge, lt) || ((MIPS_ISA_REV < (lt)) && __isa(flag)))
++#define __isa_range_and_ase(ge, lt, ase) \
++	(__isa_range(ge, lt) && __ase(ase))
+ 
+ /*
+  * SMP assumption: Options of CPU 0 are a superset of all processors.
+@@ -421,7 +423,7 @@
+ #endif
+ 
+ #ifndef cpu_has_mipsmt
+-#define cpu_has_mipsmt		__isa_lt_and_ase(6, MIPS_ASE_MIPSMT)
++#define cpu_has_mipsmt		__isa_range_and_ase(2, 6, MIPS_ASE_MIPSMT)
+ #endif
+ 
+ #ifndef cpu_has_vp
+diff --git a/arch/mips/include/asm/hugetlb.h b/arch/mips/include/asm/hugetlb.h
+index 10e3be870df78..c2144409c0c40 100644
+--- a/arch/mips/include/asm/hugetlb.h
++++ b/arch/mips/include/asm/hugetlb.h
+@@ -46,7 +46,13 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+ 					 unsigned long addr, pte_t *ptep)
+ {
+-	flush_tlb_page(vma, addr & huge_page_mask(hstate_vma(vma)));
++	/*
++	 * clear the huge pte entry firstly, so that the other smp threads will
++	 * not get old pte entry after finishing flush_tlb_page and before
++	 * setting new huge pte entry
++	 */
++	huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
++	flush_tlb_page(vma, addr);
+ }
+ 
+ #define __HAVE_ARCH_HUGE_PTE_NONE
+diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
+index 9c8099a6ffed1..acdf8c69220b0 100644
+--- a/arch/mips/include/asm/mipsregs.h
++++ b/arch/mips/include/asm/mipsregs.h
+@@ -2077,7 +2077,7 @@ _ASM_MACRO_0(tlbginvf, _ASM_INSN_IF_MIPS(0x4200000c)
+ ({ int __res;								\
+ 	__asm__ __volatile__(						\
+ 		".set\tpush\n\t"					\
+-		".set\tmips32r2\n\t"					\
++		".set\tmips32r5\n\t"					\
+ 		_ASM_SET_VIRT						\
+ 		"mfgc0\t%0, " #source ", %1\n\t"			\
+ 		".set\tpop"						\
+@@ -2090,7 +2090,7 @@ _ASM_MACRO_0(tlbginvf, _ASM_INSN_IF_MIPS(0x4200000c)
+ ({ unsigned long long __res;						\
+ 	__asm__ __volatile__(						\
+ 		".set\tpush\n\t"					\
+-		".set\tmips64r2\n\t"					\
++		".set\tmips64r5\n\t"					\
+ 		_ASM_SET_VIRT						\
+ 		"dmfgc0\t%0, " #source ", %1\n\t"			\
+ 		".set\tpop"						\
+@@ -2103,7 +2103,7 @@ _ASM_MACRO_0(tlbginvf, _ASM_INSN_IF_MIPS(0x4200000c)
+ do {									\
+ 	__asm__ __volatile__(						\
+ 		".set\tpush\n\t"					\
+-		".set\tmips32r2\n\t"					\
++		".set\tmips32r5\n\t"					\
+ 		_ASM_SET_VIRT						\
+ 		"mtgc0\t%z0, " #register ", %1\n\t"			\
+ 		".set\tpop"						\
+@@ -2115,7 +2115,7 @@ do {									\
+ do {									\
+ 	__asm__ __volatile__(						\
+ 		".set\tpush\n\t"					\
+-		".set\tmips64r2\n\t"					\
++		".set\tmips64r5\n\t"					\
+ 		_ASM_SET_VIRT						\
+ 		"dmtgc0\t%z0, " #register ", %1\n\t"			\
+ 		".set\tpop"						\
+diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
+index 8b18424b31208..d0cf997b4ba84 100644
+--- a/arch/mips/include/asm/pgalloc.h
++++ b/arch/mips/include/asm/pgalloc.h
+@@ -59,11 +59,15 @@ do {							\
+ 
+ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
+ {
+-	pmd_t *pmd;
++	pmd_t *pmd = NULL;
++	struct page *pg;
+ 
+-	pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ORDER);
+-	if (pmd)
++	pg = alloc_pages(GFP_KERNEL | __GFP_ACCOUNT, PMD_ORDER);
++	if (pg) {
++		pgtable_pmd_page_ctor(pg);
++		pmd = (pmd_t *)page_address(pg);
+ 		pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table);
++	}
+ 	return pmd;
+ }
+ 
+diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h
+index 2d949969313b6..cdf404a831b25 100644
+--- a/arch/mips/include/uapi/asm/socket.h
++++ b/arch/mips/include/uapi/asm/socket.h
+@@ -138,6 +138,8 @@
+ #define SO_PREFER_BUSY_POLL	69
+ #define SO_BUSY_POLL_BUDGET	70
+ 
++#define SO_NETNS_COOKIE		71
++
+ #if !defined(__KERNEL__)
+ 
+ #if __BITS_PER_LONG == 64
+diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
+index 0ef240adefb59..630fcb4cb30e7 100644
+--- a/arch/mips/kernel/cpu-probe.c
++++ b/arch/mips/kernel/cpu-probe.c
+@@ -1840,6 +1840,11 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
+ 		 */
+ 		case PRID_COMP_INGENIC_D0:
+ 			c->isa_level &= ~MIPS_CPU_ISA_M32R2;
++
++			/* FPU is not properly detected on JZ4760(B). */
++			if (c->processor_id == 0x2ed0024f)
++				c->options |= MIPS_CPU_FPU;
++
+ 			fallthrough;
+ 
+ 		/*
+diff --git a/arch/mips/loongson64/numa.c b/arch/mips/loongson64/numa.c
+index a8f57bf012854..a791863ed352f 100644
+--- a/arch/mips/loongson64/numa.c
++++ b/arch/mips/loongson64/numa.c
+@@ -118,6 +118,9 @@ static void __init node_mem_init(unsigned int node)
+ 		if (node_end_pfn(0) >= (0xffffffff >> PAGE_SHIFT))
+ 			memblock_reserve((node_addrspace_offset | 0xfe000000),
+ 					 32 << 20);
++
++		/* Reserve pfn range 0~node[0]->node_start_pfn */
++		memblock_reserve(0, PAGE_SIZE * start_pfn);
+ 	}
+ }
+ 
+diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h
+index f60904329bbcc..5b5351cdcb338 100644
+--- a/arch/parisc/include/uapi/asm/socket.h
++++ b/arch/parisc/include/uapi/asm/socket.h
+@@ -119,6 +119,8 @@
+ #define SO_PREFER_BUSY_POLL	0x4043
+ #define SO_BUSY_POLL_BUDGET	0x4044
+ 
++#define SO_NETNS_COOKIE		0x4045
++
+ #if !defined(__KERNEL__)
+ 
+ #if __BITS_PER_LONG == 64
+diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
+index aecfde829d5da..6274b864f363d 100644
+--- a/arch/powerpc/include/asm/barrier.h
++++ b/arch/powerpc/include/asm/barrier.h
+@@ -46,6 +46,8 @@
+ #    define SMPWMB      eieio
+ #endif
+ 
++/* clang defines this macro for a builtin, which will not work with runtime patching */
++#undef __lwsync
+ #define __lwsync()	__asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
+ #define dma_rmb()	__lwsync()
+ #define dma_wmb()	__asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
+diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
+index bb368257b55cb..178525cf686a5 100644
+--- a/arch/powerpc/mm/fault.c
++++ b/arch/powerpc/mm/fault.c
+@@ -199,9 +199,7 @@ static bool bad_kernel_fault(struct pt_regs *regs, unsigned long error_code,
+ {
+ 	int is_exec = TRAP(regs) == 0x400;
+ 
+-	/* NX faults set DSISR_PROTFAULT on the 8xx, DSISR_NOEXEC_OR_G on others */
+-	if (is_exec && (error_code & (DSISR_NOEXEC_OR_G | DSISR_KEYFAULT |
+-				      DSISR_PROTFAULT))) {
++	if (is_exec) {
+ 		pr_crit_ratelimited("kernel tried to execute %s page (%lx) - exploit attempt? (uid: %d)\n",
+ 				    address >= TASK_SIZE ? "exec-protected" : "user",
+ 				    address,
+diff --git a/arch/powerpc/platforms/powernv/vas-window.c b/arch/powerpc/platforms/powernv/vas-window.c
+index 5f5fe63a3d1ce..7ba0840fc3b55 100644
+--- a/arch/powerpc/platforms/powernv/vas-window.c
++++ b/arch/powerpc/platforms/powernv/vas-window.c
+@@ -1093,9 +1093,9 @@ struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop,
+ 		/*
+ 		 * Process closes window during exit. In the case of
+ 		 * multithread application, the child thread can open
+-		 * window and can exit without closing it. Expects parent
+-		 * thread to use and close the window. So do not need
+-		 * to take pid reference for parent thread.
++		 * window and can exit without closing it. so takes tgid
++		 * reference until window closed to make sure tgid is not
++		 * reused.
+ 		 */
+ 		txwin->tgid = find_get_pid(task_tgid_vnr(current));
+ 		/*
+@@ -1339,8 +1339,9 @@ int vas_win_close(struct vas_window *window)
+ 	/* if send window, drop reference to matching receive window */
+ 	if (window->tx_win) {
+ 		if (window->user_win) {
+-			/* Drop references to pid and mm */
++			/* Drop references to pid. tgid and mm */
+ 			put_pid(window->pid);
++			put_pid(window->tgid);
+ 			if (window->mm) {
+ 				mm_context_remove_vas_window(window->mm);
+ 				mmdrop(window->mm);
+diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h
+index 848a22fbac20e..92675dc380fa2 100644
+--- a/arch/sparc/include/uapi/asm/socket.h
++++ b/arch/sparc/include/uapi/asm/socket.h
+@@ -120,6 +120,8 @@
+ #define SO_PREFER_BUSY_POLL	 0x0048
+ #define SO_BUSY_POLL_BUDGET	 0x0049
+ 
++#define SO_NETNS_COOKIE          0x0050
++
+ #if !defined(__KERNEL__)
+ 
+ 
+diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c
+index 656460636ad34..e83af7bc75919 100644
+--- a/block/blk-rq-qos.c
++++ b/block/blk-rq-qos.c
+@@ -266,8 +266,8 @@ void rq_qos_wait(struct rq_wait *rqw, void *private_data,
+ 	if (!has_sleeper && acquire_inflight_cb(rqw, private_data))
+ 		return;
+ 
+-	prepare_to_wait_exclusive(&rqw->wait, &data.wq, TASK_UNINTERRUPTIBLE);
+-	has_sleeper = !wq_has_single_sleeper(&rqw->wait);
++	has_sleeper = !prepare_to_wait_exclusive(&rqw->wait, &data.wq,
++						 TASK_UNINTERRUPTIBLE);
+ 	do {
+ 		/* The memory barrier in set_task_state saves us here. */
+ 		if (data.got_token)
+diff --git a/drivers/ata/ahci_sunxi.c b/drivers/ata/ahci_sunxi.c
+index cb69b737cb499..56b695136977a 100644
+--- a/drivers/ata/ahci_sunxi.c
++++ b/drivers/ata/ahci_sunxi.c
+@@ -200,7 +200,7 @@ static void ahci_sunxi_start_engine(struct ata_port *ap)
+ }
+ 
+ static const struct ata_port_info ahci_sunxi_port_info = {
+-	.flags		= AHCI_FLAG_COMMON | ATA_FLAG_NCQ,
++	.flags		= AHCI_FLAG_COMMON | ATA_FLAG_NCQ | ATA_FLAG_NO_DIPM,
+ 	.pio_mask	= ATA_PIO4,
+ 	.udma_mask	= ATA_UDMA6,
+ 	.port_ops	= &ahci_platform_ops,
+diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
+index eef637fd90b32..a59554e5b8b0f 100644
+--- a/drivers/atm/iphase.c
++++ b/drivers/atm/iphase.c
+@@ -3279,7 +3279,7 @@ static void __exit ia_module_exit(void)
+ {
+ 	pci_unregister_driver(&ia_driver);
+ 
+-        del_timer(&ia_timer);
++	del_timer_sync(&ia_timer);
+ }
+ 
+ module_init(ia_module_init);
+diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
+index 5c7e4df159b91..bc5a6ab6fa4b4 100644
+--- a/drivers/atm/nicstar.c
++++ b/drivers/atm/nicstar.c
+@@ -299,7 +299,7 @@ static void __exit nicstar_cleanup(void)
+ {
+ 	XPRINTK("nicstar: nicstar_cleanup() called.\n");
+ 
+-	del_timer(&ns_timer);
++	del_timer_sync(&ns_timer);
+ 
+ 	pci_unregister_driver(&nicstar_driver);
+ 
+@@ -527,6 +527,15 @@ static int ns_init_card(int i, struct pci_dev *pcidev)
+ 	/* Set the VPI/VCI MSb mask to zero so we can receive OAM cells */
+ 	writel(0x00000000, card->membase + VPM);
+ 
++	card->intcnt = 0;
++	if (request_irq
++	    (pcidev->irq, &ns_irq_handler, IRQF_SHARED, "nicstar", card) != 0) {
++		pr_err("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq);
++		error = 9;
++		ns_init_card_error(card, error);
++		return error;
++	}
++
+ 	/* Initialize TSQ */
+ 	card->tsq.org = dma_alloc_coherent(&card->pcidev->dev,
+ 					   NS_TSQSIZE + NS_TSQ_ALIGNMENT,
+@@ -753,15 +762,6 @@ static int ns_init_card(int i, struct pci_dev *pcidev)
+ 
+ 	card->efbie = 1;
+ 
+-	card->intcnt = 0;
+-	if (request_irq
+-	    (pcidev->irq, &ns_irq_handler, IRQF_SHARED, "nicstar", card) != 0) {
+-		printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq);
+-		error = 9;
+-		ns_init_card_error(card, error);
+-		return error;
+-	}
+-
+ 	/* Register device */
+ 	card->atmdev = atm_dev_register("nicstar", &card->pcidev->dev, &atm_ops,
+ 					-1, NULL);
+@@ -839,10 +839,12 @@ static void ns_init_card_error(ns_dev *card, int error)
+ 			dev_kfree_skb_any(hb);
+ 	}
+ 	if (error >= 12) {
+-		kfree(card->rsq.org);
++		dma_free_coherent(&card->pcidev->dev, NS_RSQSIZE + NS_RSQ_ALIGNMENT,
++				card->rsq.org, card->rsq.dma);
+ 	}
+ 	if (error >= 11) {
+-		kfree(card->tsq.org);
++		dma_free_coherent(&card->pcidev->dev, NS_TSQSIZE + NS_TSQ_ALIGNMENT,
++				card->tsq.org, card->tsq.dma);
+ 	}
+ 	if (error >= 10) {
+ 		free_irq(card->pcidev->irq, card);
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index ddc7b86725cd7..a97e5c476adf1 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -270,6 +270,8 @@ static const struct usb_device_id blacklist_table[] = {
+ 						     BTUSB_WIDEBAND_SPEECH },
+ 	{ USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME |
+ 						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x0cf3, 0xe500), .driver_info = BTUSB_QCA_ROME |
++						     BTUSB_WIDEBAND_SPEECH },
+ 	{ USB_DEVICE(0x0489, 0xe092), .driver_info = BTUSB_QCA_ROME |
+ 						     BTUSB_WIDEBAND_SPEECH },
+ 	{ USB_DEVICE(0x0489, 0xe09f), .driver_info = BTUSB_QCA_ROME |
+@@ -1747,6 +1749,13 @@ static void btusb_work(struct work_struct *work)
+ 			 * which work with WBS at all.
+ 			 */
+ 			new_alts = btusb_find_altsetting(data, 6) ? 6 : 1;
++			/* Because mSBC frames do not need to be aligned to the
++			 * SCO packet boundary. If support the Alt 3, use the
++			 * Alt 3 for HCI payload >= 60 Bytes let air packet
++			 * data satisfy 60 bytes.
++			 */
++			if (new_alts == 1 && btusb_find_altsetting(data, 3))
++				new_alts = 3;
+ 		}
+ 
+ 		if (btusb_switch_alt_setting(hdev, new_alts) < 0)
+@@ -3377,11 +3386,6 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
+ 	struct btmtk_wmt_hdr *hdr;
+ 	int err;
+ 
+-	/* Submit control IN URB on demand to process the WMT event */
+-	err = btusb_mtk_submit_wmt_recv_urb(hdev);
+-	if (err < 0)
+-		return err;
+-
+ 	/* Send the WMT command and wait until the WMT event returns */
+ 	hlen = sizeof(*hdr) + wmt_params->dlen;
+ 	if (hlen > 255)
+@@ -3407,6 +3411,11 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
+ 		goto err_free_wc;
+ 	}
+ 
++	/* Submit control IN URB on demand to process the WMT event */
++	err = btusb_mtk_submit_wmt_recv_urb(hdev);
++	if (err < 0)
++		return err;
++
+ 	/* The vendor specific WMT commands are all answered by a vendor
+ 	 * specific event and will have the Command Status or Command
+ 	 * Complete as with usual HCI command flow control.
+@@ -4127,6 +4136,11 @@ static int btusb_setup_qca_download_fw(struct hci_dev *hdev,
+ 	sent += size;
+ 	count -= size;
+ 
++	/* ep2 need time to switch from function acl to function dfu,
++	 * so we add 20ms delay here.
++	 */
++	msleep(20);
++
+ 	while (count) {
+ 		size = min_t(size_t, count, QCA_DFU_PACKET_LEN);
+ 
+@@ -4219,9 +4233,15 @@ static int btusb_setup_qca_load_nvm(struct hci_dev *hdev,
+ 	int err;
+ 
+ 	if (((ver->flag >> 8) & 0xff) == QCA_FLAG_MULTI_NVM) {
+-		snprintf(fwname, sizeof(fwname), "qca/nvm_usb_%08x_%04x.bin",
+-			 le32_to_cpu(ver->rom_version),
+-			 le16_to_cpu(ver->board_id));
++		/* if boardid equal 0, use default nvm without surfix */
++		if (le16_to_cpu(ver->board_id) == 0x0) {
++			snprintf(fwname, sizeof(fwname), "qca/nvm_usb_%08x.bin",
++				 le32_to_cpu(ver->rom_version));
++		} else {
++			snprintf(fwname, sizeof(fwname), "qca/nvm_usb_%08x_%04x.bin",
++				le32_to_cpu(ver->rom_version),
++				le16_to_cpu(ver->board_id));
++		}
+ 	} else {
+ 		snprintf(fwname, sizeof(fwname), "qca/nvm_usb_%08x.bin",
+ 			 le32_to_cpu(ver->rom_version));
+diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
+index 32c334e34d553..e4ff3b50de7f3 100644
+--- a/drivers/char/ipmi/ipmi_watchdog.c
++++ b/drivers/char/ipmi/ipmi_watchdog.c
+@@ -371,16 +371,18 @@ static int __ipmi_set_timeout(struct ipmi_smi_msg  *smi_msg,
+ 	data[0] = 0;
+ 	WDOG_SET_TIMER_USE(data[0], WDOG_TIMER_USE_SMS_OS);
+ 
+-	if ((ipmi_version_major > 1)
+-	    || ((ipmi_version_major == 1) && (ipmi_version_minor >= 5))) {
+-		/* This is an IPMI 1.5-only feature. */
+-		data[0] |= WDOG_DONT_STOP_ON_SET;
+-	} else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
+-		/*
+-		 * In ipmi 1.0, setting the timer stops the watchdog, we
+-		 * need to start it back up again.
+-		 */
+-		hbnow = 1;
++	if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
++		if ((ipmi_version_major > 1) ||
++		    ((ipmi_version_major == 1) && (ipmi_version_minor >= 5))) {
++			/* This is an IPMI 1.5-only feature. */
++			data[0] |= WDOG_DONT_STOP_ON_SET;
++		} else {
++			/*
++			 * In ipmi 1.0, setting the timer stops the watchdog, we
++			 * need to start it back up again.
++			 */
++			hbnow = 1;
++		}
+ 	}
+ 
+ 	data[1] = 0;
+diff --git a/drivers/clk/renesas/r8a77995-cpg-mssr.c b/drivers/clk/renesas/r8a77995-cpg-mssr.c
+index 9cfd00cf4e69a..81c0bc1e78af8 100644
+--- a/drivers/clk/renesas/r8a77995-cpg-mssr.c
++++ b/drivers/clk/renesas/r8a77995-cpg-mssr.c
+@@ -75,6 +75,7 @@ static const struct cpg_core_clk r8a77995_core_clks[] __initconst = {
+ 	DEF_RATE(".oco",       CLK_OCO,            8 * 1000 * 1000),
+ 
+ 	/* Core Clock Outputs */
++	DEF_FIXED("za2",       R8A77995_CLK_ZA2,   CLK_PLL0D3,     2, 1),
+ 	DEF_FIXED("z2",        R8A77995_CLK_Z2,    CLK_PLL0D3,     1, 1),
+ 	DEF_FIXED("ztr",       R8A77995_CLK_ZTR,   CLK_PLL1,       6, 1),
+ 	DEF_FIXED("zt",        R8A77995_CLK_ZT,    CLK_PLL1,       4, 1),
+diff --git a/drivers/clk/renesas/rcar-usb2-clock-sel.c b/drivers/clk/renesas/rcar-usb2-clock-sel.c
+index 3abafd78f7c8a..8b4e43659023a 100644
+--- a/drivers/clk/renesas/rcar-usb2-clock-sel.c
++++ b/drivers/clk/renesas/rcar-usb2-clock-sel.c
+@@ -128,10 +128,8 @@ static int rcar_usb2_clock_sel_resume(struct device *dev)
+ static int rcar_usb2_clock_sel_remove(struct platform_device *pdev)
+ {
+ 	struct device *dev = &pdev->dev;
+-	struct usb2_clock_sel_priv *priv = platform_get_drvdata(pdev);
+ 
+ 	of_clk_del_provider(dev->of_node);
+-	clk_hw_unregister(&priv->hw);
+ 	pm_runtime_put(dev);
+ 	pm_runtime_disable(dev);
+ 
+@@ -164,9 +162,6 @@ static int rcar_usb2_clock_sel_probe(struct platform_device *pdev)
+ 	if (IS_ERR(priv->rsts))
+ 		return PTR_ERR(priv->rsts);
+ 
+-	pm_runtime_enable(dev);
+-	pm_runtime_get_sync(dev);
+-
+ 	clk = devm_clk_get(dev, "usb_extal");
+ 	if (!IS_ERR(clk) && !clk_prepare_enable(clk)) {
+ 		priv->extal = !!clk_get_rate(clk);
+@@ -183,6 +178,8 @@ static int rcar_usb2_clock_sel_probe(struct platform_device *pdev)
+ 		return -ENOENT;
+ 	}
+ 
++	pm_runtime_enable(dev);
++	pm_runtime_get_sync(dev);
+ 	platform_set_drvdata(pdev, priv);
+ 	dev_set_drvdata(dev, priv);
+ 
+@@ -193,11 +190,20 @@ static int rcar_usb2_clock_sel_probe(struct platform_device *pdev)
+ 	init.num_parents = 0;
+ 	priv->hw.init = &init;
+ 
+-	clk = clk_register(NULL, &priv->hw);
+-	if (IS_ERR(clk))
+-		return PTR_ERR(clk);
++	ret = devm_clk_hw_register(NULL, &priv->hw);
++	if (ret)
++		goto pm_put;
++
++	ret = of_clk_add_hw_provider(np, of_clk_hw_simple_get, &priv->hw);
++	if (ret)
++		goto pm_put;
++
++	return 0;
+ 
+-	return of_clk_add_hw_provider(np, of_clk_hw_simple_get, &priv->hw);
++pm_put:
++	pm_runtime_put(dev);
++	pm_runtime_disable(dev);
++	return ret;
+ }
+ 
+ static const struct dev_pm_ops rcar_usb2_clock_sel_pm_ops = {
+diff --git a/drivers/clk/tegra/clk-periph-gate.c b/drivers/clk/tegra/clk-periph-gate.c
+index 4b31beefc9fc2..dc3f92678407b 100644
+--- a/drivers/clk/tegra/clk-periph-gate.c
++++ b/drivers/clk/tegra/clk-periph-gate.c
+@@ -48,18 +48,9 @@ static int clk_periph_is_enabled(struct clk_hw *hw)
+ 	return state;
+ }
+ 
+-static int clk_periph_enable(struct clk_hw *hw)
++static void clk_periph_enable_locked(struct clk_hw *hw)
+ {
+ 	struct tegra_clk_periph_gate *gate = to_clk_periph_gate(hw);
+-	unsigned long flags = 0;
+-
+-	spin_lock_irqsave(&periph_ref_lock, flags);
+-
+-	gate->enable_refcnt[gate->clk_num]++;
+-	if (gate->enable_refcnt[gate->clk_num] > 1) {
+-		spin_unlock_irqrestore(&periph_ref_lock, flags);
+-		return 0;
+-	}
+ 
+ 	write_enb_set(periph_clk_to_bit(gate), gate);
+ 	udelay(2);
+@@ -78,6 +69,32 @@ static int clk_periph_enable(struct clk_hw *hw)
+ 		udelay(1);
+ 		writel_relaxed(0, gate->clk_base + LVL2_CLK_GATE_OVRE);
+ 	}
++}
++
++static void clk_periph_disable_locked(struct clk_hw *hw)
++{
++	struct tegra_clk_periph_gate *gate = to_clk_periph_gate(hw);
++
++	/*
++	 * If peripheral is in the APB bus then read the APB bus to
++	 * flush the write operation in apb bus. This will avoid the
++	 * peripheral access after disabling clock
++	 */
++	if (gate->flags & TEGRA_PERIPH_ON_APB)
++		tegra_read_chipid();
++
++	write_enb_clr(periph_clk_to_bit(gate), gate);
++}
++
++static int clk_periph_enable(struct clk_hw *hw)
++{
++	struct tegra_clk_periph_gate *gate = to_clk_periph_gate(hw);
++	unsigned long flags = 0;
++
++	spin_lock_irqsave(&periph_ref_lock, flags);
++
++	if (!gate->enable_refcnt[gate->clk_num]++)
++		clk_periph_enable_locked(hw);
+ 
+ 	spin_unlock_irqrestore(&periph_ref_lock, flags);
+ 
+@@ -91,21 +108,28 @@ static void clk_periph_disable(struct clk_hw *hw)
+ 
+ 	spin_lock_irqsave(&periph_ref_lock, flags);
+ 
+-	gate->enable_refcnt[gate->clk_num]--;
+-	if (gate->enable_refcnt[gate->clk_num] > 0) {
+-		spin_unlock_irqrestore(&periph_ref_lock, flags);
+-		return;
+-	}
++	WARN_ON(!gate->enable_refcnt[gate->clk_num]);
++
++	if (--gate->enable_refcnt[gate->clk_num] == 0)
++		clk_periph_disable_locked(hw);
++
++	spin_unlock_irqrestore(&periph_ref_lock, flags);
++}
++
++static void clk_periph_disable_unused(struct clk_hw *hw)
++{
++	struct tegra_clk_periph_gate *gate = to_clk_periph_gate(hw);
++	unsigned long flags = 0;
++
++	spin_lock_irqsave(&periph_ref_lock, flags);
+ 
+ 	/*
+-	 * If peripheral is in the APB bus then read the APB bus to
+-	 * flush the write operation in apb bus. This will avoid the
+-	 * peripheral access after disabling clock
++	 * Some clocks are duplicated and some of them are marked as critical,
++	 * like fuse and fuse_burn for example, thus the enable_refcnt will
++	 * be non-zero here if the "unused" duplicate is disabled by CCF.
+ 	 */
+-	if (gate->flags & TEGRA_PERIPH_ON_APB)
+-		tegra_read_chipid();
+-
+-	write_enb_clr(periph_clk_to_bit(gate), gate);
++	if (!gate->enable_refcnt[gate->clk_num])
++		clk_periph_disable_locked(hw);
+ 
+ 	spin_unlock_irqrestore(&periph_ref_lock, flags);
+ }
+@@ -114,6 +138,7 @@ const struct clk_ops tegra_clk_periph_gate_ops = {
+ 	.is_enabled = clk_periph_is_enabled,
+ 	.enable = clk_periph_enable,
+ 	.disable = clk_periph_disable,
++	.disable_unused = clk_periph_disable_unused,
+ };
+ 
+ struct clk *tegra_clk_register_periph_gate(const char *name,
+@@ -148,9 +173,6 @@ struct clk *tegra_clk_register_periph_gate(const char *name,
+ 	gate->enable_refcnt = enable_refcnt;
+ 	gate->regs = pregs;
+ 
+-	if (read_enb(gate) & periph_clk_to_bit(gate))
+-		enable_refcnt[clk_num]++;
+-
+ 	/* Data in .init is copied by clk_register(), so stack variable OK */
+ 	gate->hw.init = &init;
+ 
+diff --git a/drivers/clk/tegra/clk-periph.c b/drivers/clk/tegra/clk-periph.c
+index 67620c7ecd9ee..79ca3aa072b70 100644
+--- a/drivers/clk/tegra/clk-periph.c
++++ b/drivers/clk/tegra/clk-periph.c
+@@ -100,6 +100,15 @@ static void clk_periph_disable(struct clk_hw *hw)
+ 	gate_ops->disable(gate_hw);
+ }
+ 
++static void clk_periph_disable_unused(struct clk_hw *hw)
++{
++	struct tegra_clk_periph *periph = to_clk_periph(hw);
++	const struct clk_ops *gate_ops = periph->gate_ops;
++	struct clk_hw *gate_hw = &periph->gate.hw;
++
++	gate_ops->disable_unused(gate_hw);
++}
++
+ static void clk_periph_restore_context(struct clk_hw *hw)
+ {
+ 	struct tegra_clk_periph *periph = to_clk_periph(hw);
+@@ -126,6 +135,7 @@ const struct clk_ops tegra_clk_periph_ops = {
+ 	.is_enabled = clk_periph_is_enabled,
+ 	.enable = clk_periph_enable,
+ 	.disable = clk_periph_disable,
++	.disable_unused = clk_periph_disable_unused,
+ 	.restore_context = clk_periph_restore_context,
+ };
+ 
+@@ -135,6 +145,7 @@ static const struct clk_ops tegra_clk_periph_nodiv_ops = {
+ 	.is_enabled = clk_periph_is_enabled,
+ 	.enable = clk_periph_enable,
+ 	.disable = clk_periph_disable,
++	.disable_unused = clk_periph_disable_unused,
+ 	.restore_context = clk_periph_restore_context,
+ };
+ 
+diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
+index c5cc0a2dac6ff..d709ecb7d8d7e 100644
+--- a/drivers/clk/tegra/clk-pll.c
++++ b/drivers/clk/tegra/clk-pll.c
+@@ -1131,7 +1131,8 @@ static int clk_pllu_enable(struct clk_hw *hw)
+ 	if (pll->lock)
+ 		spin_lock_irqsave(pll->lock, flags);
+ 
+-	_clk_pll_enable(hw);
++	if (!clk_pll_is_enabled(hw))
++		_clk_pll_enable(hw);
+ 
+ 	ret = clk_pll_wait_for_lock(pll);
+ 	if (ret < 0)
+@@ -1748,15 +1749,13 @@ static int clk_pllu_tegra114_enable(struct clk_hw *hw)
+ 		return -EINVAL;
+ 	}
+ 
+-	if (clk_pll_is_enabled(hw))
+-		return 0;
+-
+ 	input_rate = clk_hw_get_rate(__clk_get_hw(osc));
+ 
+ 	if (pll->lock)
+ 		spin_lock_irqsave(pll->lock, flags);
+ 
+-	_clk_pll_enable(hw);
++	if (!clk_pll_is_enabled(hw))
++		_clk_pll_enable(hw);
+ 
+ 	ret = clk_pll_wait_for_lock(pll);
+ 	if (ret < 0)
+diff --git a/drivers/clk/tegra/clk-tegra124-emc.c b/drivers/clk/tegra/clk-tegra124-emc.c
+index bdf6f4a516176..74c1d894cca86 100644
+--- a/drivers/clk/tegra/clk-tegra124-emc.c
++++ b/drivers/clk/tegra/clk-tegra124-emc.c
+@@ -249,8 +249,10 @@ static int emc_set_timing(struct tegra_clk_emc *tegra,
+ 	div = timing->parent_rate / (timing->rate / 2) - 2;
+ 
+ 	err = tegra->prepare_timing_change(emc, timing->rate);
+-	if (err)
++	if (err) {
++		clk_disable_unprepare(timing->parent);
+ 		return err;
++	}
+ 
+ 	spin_lock_irqsave(tegra->lock, flags);
+ 
+diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
+index d0177824c518b..f4881764bf8f4 100644
+--- a/drivers/clocksource/arm_arch_timer.c
++++ b/drivers/clocksource/arm_arch_timer.c
+@@ -352,7 +352,7 @@ static u64 notrace arm64_858921_read_cntvct_el0(void)
+ 	do {								\
+ 		_val = read_sysreg(reg);				\
+ 		_retries--;						\
+-	} while (((_val + 1) & GENMASK(9, 0)) <= 1 && _retries);	\
++	} while (((_val + 1) & GENMASK(8, 0)) <= 1 && _retries);	\
+ 									\
+ 	WARN_ON_ONCE(!_retries);					\
+ 	_val;								\
+diff --git a/drivers/extcon/extcon-intel-mrfld.c b/drivers/extcon/extcon-intel-mrfld.c
+index f47016fb28a84..cd1a5f230077c 100644
+--- a/drivers/extcon/extcon-intel-mrfld.c
++++ b/drivers/extcon/extcon-intel-mrfld.c
+@@ -197,6 +197,7 @@ static int mrfld_extcon_probe(struct platform_device *pdev)
+ 	struct intel_soc_pmic *pmic = dev_get_drvdata(dev->parent);
+ 	struct regmap *regmap = pmic->regmap;
+ 	struct mrfld_extcon_data *data;
++	unsigned int status;
+ 	unsigned int id;
+ 	int irq, ret;
+ 
+@@ -244,6 +245,14 @@ static int mrfld_extcon_probe(struct platform_device *pdev)
+ 	/* Get initial state */
+ 	mrfld_extcon_role_detect(data);
+ 
++	/*
++	 * Cached status value is used for cable detection, see comments
++	 * in mrfld_extcon_cable_detect(), we need to sync cached value
++	 * with a real state of the hardware.
++	 */
++	regmap_read(regmap, BCOVE_SCHGRIRQ1, &status);
++	data->status = status;
++
+ 	mrfld_extcon_clear(data, BCOVE_MIRQLVL1, BCOVE_LVL1_CHGR);
+ 	mrfld_extcon_clear(data, BCOVE_MCHGRIRQ1, BCOVE_CHGRIRQ_ALL);
+ 
+diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
+index 0078260fbabea..172c751a4f6c2 100644
+--- a/drivers/firmware/qemu_fw_cfg.c
++++ b/drivers/firmware/qemu_fw_cfg.c
+@@ -299,15 +299,13 @@ static int fw_cfg_do_platform_probe(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
+-static ssize_t fw_cfg_showrev(struct kobject *k, struct attribute *a, char *buf)
++static ssize_t fw_cfg_showrev(struct kobject *k, struct kobj_attribute *a,
++			      char *buf)
+ {
+ 	return sprintf(buf, "%u\n", fw_cfg_rev);
+ }
+ 
+-static const struct {
+-	struct attribute attr;
+-	ssize_t (*show)(struct kobject *k, struct attribute *a, char *buf);
+-} fw_cfg_rev_attr = {
++static const struct kobj_attribute fw_cfg_rev_attr = {
+ 	.attr = { .name = "rev", .mode = S_IRUSR },
+ 	.show = fw_cfg_showrev,
+ };
+diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c
+index 657a70c5fc996..9e34bbbce26e2 100644
+--- a/drivers/fpga/stratix10-soc.c
++++ b/drivers/fpga/stratix10-soc.c
+@@ -454,6 +454,7 @@ static int s10_remove(struct platform_device *pdev)
+ 	struct s10_priv *priv = mgr->priv;
+ 
+ 	fpga_mgr_unregister(mgr);
++	fpga_mgr_free(mgr);
+ 	stratix10_svc_free_channel(priv->chan);
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index ac0a432a9bf79..3c3f05d1f4daf 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -49,12 +49,6 @@ static struct {
+ 	spinlock_t mem_limit_lock;
+ } kfd_mem_limit;
+ 
+-/* Struct used for amdgpu_amdkfd_bo_validate */
+-struct amdgpu_vm_parser {
+-	uint32_t        domain;
+-	bool            wait;
+-};
+-
+ static const char * const domain_bit_to_string[] = {
+ 		"CPU",
+ 		"GTT",
+@@ -337,11 +331,9 @@ validate_fail:
+ 	return ret;
+ }
+ 
+-static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
++static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
+ {
+-	struct amdgpu_vm_parser *p = param;
+-
+-	return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
++	return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false);
+ }
+ 
+ /* vm_validate_pt_pd_bos - Validate page table and directory BOs
+@@ -355,20 +347,15 @@ static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
+ {
+ 	struct amdgpu_bo *pd = vm->root.base.bo;
+ 	struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
+-	struct amdgpu_vm_parser param;
+ 	int ret;
+ 
+-	param.domain = AMDGPU_GEM_DOMAIN_VRAM;
+-	param.wait = false;
+-
+-	ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
+-					&param);
++	ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate_vm_bo, NULL);
+ 	if (ret) {
+ 		pr_err("failed to validate PT BOs\n");
+ 		return ret;
+ 	}
+ 
+-	ret = amdgpu_amdkfd_validate(&param, pd);
++	ret = amdgpu_amdkfd_validate_vm_bo(NULL, pd);
+ 	if (ret) {
+ 		pr_err("failed to validate PD\n");
+ 		return ret;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 85d90e857693a..1b69aa74056db 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -2818,7 +2818,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
+ 		AMD_IP_BLOCK_TYPE_IH,
+ 	};
+ 
+-	for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
++	for (i = 0; i < adev->num_ip_blocks; i++) {
+ 		int j;
+ 		struct amdgpu_ip_block *block;
+ 
+@@ -3141,8 +3141,8 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
+ 	int ret = 0;
+ 
+ 	/*
+-	 * By default timeout for non compute jobs is 10000.
+-	 * And there is no timeout enforced on compute jobs.
++	 * By default timeout for non compute jobs is 10000
++	 * and 60000 for compute jobs.
+ 	 * In SR-IOV or passthrough mode, timeout for compute
+ 	 * jobs are 60000 by default.
+ 	 */
+@@ -3151,10 +3151,8 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
+ 	if (amdgpu_sriov_vf(adev))
+ 		adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
+ 					msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
+-	else if (amdgpu_passthrough(adev))
+-		adev->compute_timeout =  msecs_to_jiffies(60000);
+ 	else
+-		adev->compute_timeout = MAX_SCHEDULE_TIMEOUT;
++		adev->compute_timeout =  msecs_to_jiffies(60000);
+ 
+ 	if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
+ 		while ((timeout_setting = strsep(&input, ",")) &&
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index e92e7dea71da1..41ebc9d21e5c6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -272,9 +272,9 @@ module_param_named(msi, amdgpu_msi, int, 0444);
+  *   for SDMA and Video.
+  *
+  * By default(with no lockup_timeout settings), the timeout for all non-compute(GFX, SDMA and Video)
+- * jobs is 10000. And there is no timeout enforced on compute jobs.
++ * jobs is 10000. The timeout for compute is 60000.
+  */
+-MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: for bare metal 10000 for non-compute jobs and infinity timeout for compute jobs; "
++MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: for bare metal 10000 for non-compute jobs and 60000 for compute jobs; "
+ 		"for passthrough or sriov, 10000 for all jobs."
+ 		" 0: keep default value. negative: infinity timeout), "
+ 		"format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; "
+@@ -1123,6 +1123,7 @@ static const struct pci_device_id pciidlist[] = {
+ 	{0x1002, 0x73E0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ 	{0x1002, 0x73E1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ 	{0x1002, 0x73E2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
++	{0x1002, 0x73E3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ 	{0x1002, 0x73FF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ 
+ 	{0, 0, 0}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+index 97c11aa47ad0b..7892958d5f599 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+@@ -131,7 +131,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
+ 	struct amdgpu_device *adev = ring->adev;
+ 	struct amdgpu_ib *ib = &ibs[0];
+ 	struct dma_fence *tmp = NULL;
+-	bool skip_preamble, need_ctx_switch;
++	bool need_ctx_switch;
+ 	unsigned patch_offset = ~0;
+ 	struct amdgpu_vm *vm;
+ 	uint64_t fence_ctx;
+@@ -228,7 +228,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
+ 	if (need_ctx_switch)
+ 		status |= AMDGPU_HAVE_CTX_SWITCH;
+ 
+-	skip_preamble = ring->current_ctx == fence_ctx;
+ 	if (job && ring->funcs->emit_cntxcntl) {
+ 		status |= job->preamble_status;
+ 		status |= job->preemption_status;
+@@ -246,14 +245,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
+ 	for (i = 0; i < num_ibs; ++i) {
+ 		ib = &ibs[i];
+ 
+-		/* drop preamble IBs if we don't have a context switch */
+-		if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
+-		    skip_preamble &&
+-		    !(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST) &&
+-		    !amdgpu_mcbp &&
+-		    !amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
+-			continue;
+-
+ 		if (job && ring->funcs->emit_frame_cntl) {
+ 			if (secure != !!(ib->flags & AMDGPU_IB_FLAGS_SECURE)) {
+ 				amdgpu_ring_emit_frame_cntl(ring, false, secure);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
+index 7c11bce4514bd..1ad7191994ac5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
+@@ -89,6 +89,8 @@ struct amdgpu_nbio_funcs {
+ 	void (*enable_aspm)(struct amdgpu_device *adev,
+ 			    bool enable);
+ 	void (*program_aspm)(struct amdgpu_device *adev);
++	void (*apply_lc_spc_mode_wa)(struct amdgpu_device *adev);
++	void (*apply_l1_link_width_reconfig_wa)(struct amdgpu_device *adev);
+ };
+ 
+ struct amdgpu_nbio {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+index 1838144936580..bda4438c39256 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+@@ -21,6 +21,11 @@
+ #ifndef __AMDGPU_UMC_H__
+ #define __AMDGPU_UMC_H__
+ 
++/*
++ * (addr / 256) * 4096, the higher 26 bits in ErrorAddr
++ * is the index of 4KB block
++ */
++#define ADDR_OF_4KB_BLOCK(addr)			(((addr) & ~0xffULL) << 4)
+ /*
+  * (addr / 256) * 8192, the higher 26 bits in ErrorAddr
+  * is the index of 8KB block
+diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+index 05ddec7ba7e2e..754b11dea6f04 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+@@ -51,6 +51,8 @@
+ #define mmBIF_MMSCH1_DOORBELL_RANGE		0x01d8
+ #define mmBIF_MMSCH1_DOORBELL_RANGE_BASE_IDX	2
+ 
++#define smnPCIE_LC_LINK_WIDTH_CNTL		0x11140288
++
+ static void nbio_v2_3_remap_hdp_registers(struct amdgpu_device *adev)
+ {
+ 	WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL,
+@@ -463,6 +465,43 @@ static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
+ 		WREG32_PCIE(smnPCIE_LC_CNTL3, data);
+ }
+ 
++static void nbio_v2_3_apply_lc_spc_mode_wa(struct amdgpu_device *adev)
++{
++	uint32_t reg_data = 0;
++	uint32_t link_width = 0;
++
++	if (!((adev->asic_type >= CHIP_NAVI10) &&
++	     (adev->asic_type <= CHIP_NAVI12)))
++		return;
++
++	reg_data = RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL);
++	link_width = (reg_data & PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
++		>> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
++
++	/*
++	 * Program PCIE_LC_CNTL6.LC_SPC_MODE_8GT to 0x2 (4 symbols per clock data)
++	 * if link_width is 0x3 (x4)
++	 */
++	if (0x3 == link_width) {
++		reg_data = RREG32_PCIE(smnPCIE_LC_CNTL6);
++		reg_data &= ~PCIE_LC_CNTL6__LC_SPC_MODE_8GT_MASK;
++		reg_data |= (0x2 << PCIE_LC_CNTL6__LC_SPC_MODE_8GT__SHIFT);
++		WREG32_PCIE(smnPCIE_LC_CNTL6, reg_data);
++	}
++}
++
++static void nbio_v2_3_apply_l1_link_width_reconfig_wa(struct amdgpu_device *adev)
++{
++	uint32_t reg_data = 0;
++
++	if (adev->asic_type != CHIP_NAVI10)
++		return;
++
++	reg_data = RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL);
++	reg_data |= PCIE_LC_LINK_WIDTH_CNTL__LC_L1_RECONFIG_EN_MASK;
++	WREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL, reg_data);
++}
++
+ const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
+ 	.get_hdp_flush_req_offset = nbio_v2_3_get_hdp_flush_req_offset,
+ 	.get_hdp_flush_done_offset = nbio_v2_3_get_hdp_flush_done_offset,
+@@ -484,4 +523,6 @@ const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
+ 	.remap_hdp_registers = nbio_v2_3_remap_hdp_registers,
+ 	.enable_aspm = nbio_v2_3_enable_aspm,
+ 	.program_aspm =  nbio_v2_3_program_aspm,
++	.apply_lc_spc_mode_wa = nbio_v2_3_apply_lc_spc_mode_wa,
++	.apply_l1_link_width_reconfig_wa = nbio_v2_3_apply_l1_link_width_reconfig_wa,
+ };
+diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
+index c625c5d8ed890..3677788d47711 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nv.c
++++ b/drivers/gpu/drm/amd/amdgpu/nv.c
+@@ -1073,6 +1073,12 @@ static int nv_common_hw_init(void *handle)
+ {
+ 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ 
++	if (adev->nbio.funcs->apply_lc_spc_mode_wa)
++		adev->nbio.funcs->apply_lc_spc_mode_wa(adev);
++
++	if (adev->nbio.funcs->apply_l1_link_width_reconfig_wa)
++		adev->nbio.funcs->apply_l1_link_width_reconfig_wa(adev);
++
+ 	/* enable pcie gen2/3 link */
+ 	nv_pcie_gen3_enable(adev);
+ 	/* enable aspm */
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+index c8c22c1d1e655..03a6f57c425ab 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+@@ -142,7 +142,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4_1[] = {
+ 	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ 	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100),
+ 	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+-	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
++	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003e0),
+ 	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000)
+ };
+ 
+@@ -268,7 +268,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4_3[] = {
+ 	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_POWER_CNTL, 0x003fff07, 0x40000051),
+ 	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ 	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+-	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
++	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003e0),
+ 	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x03fbe1fe)
+ };
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+index 32c6aa03d2673..f884d43d4ff00 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+@@ -145,9 +145,6 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev)
+ 	struct amdgpu_firmware_info *info = NULL;
+ 	const struct common_firmware_header *header = NULL;
+ 
+-	if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_SIENNA_CICHLID))
+-		return 0;
+-
+ 	DRM_DEBUG("\n");
+ 
+ 	switch (adev->asic_type) {
+@@ -182,6 +179,9 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev)
+ 		       (void *)&adev->sdma.instance[0],
+ 		       sizeof(struct amdgpu_sdma_instance));
+ 
++	if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_SIENNA_CICHLID))
++		return 0;
++
+ 	DRM_DEBUG("psp_load == '%s'\n",
+ 		  adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
+index a064c097690c7..66fd797a6f0ec 100644
+--- a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
++++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
+@@ -233,7 +233,7 @@ static void umc_v8_7_query_error_address(struct amdgpu_device *adev,
+ 		err_addr &= ~((0x1ULL << lsb) - 1);
+ 
+ 		/* translate umc channel address to soc pa, 3 parts are included */
+-		retired_page = ADDR_OF_8KB_BLOCK(err_addr) |
++		retired_page = ADDR_OF_4KB_BLOCK(err_addr) |
+ 				ADDR_OF_256B_BLOCK(channel_index) |
+ 				OFFSET_IN_256B_BLOCK(err_addr);
+ 
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index a4266c4bca135..b2e55917c308e 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -486,9 +486,6 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
+ 	if (retval == -ETIME)
+ 		qpd->reset_wavefronts = true;
+ 
+-
+-	mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
+-
+ 	list_del(&q->list);
+ 	if (list_empty(&qpd->queues_list)) {
+ 		if (qpd->reset_wavefronts) {
+@@ -523,6 +520,8 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
+ 	int retval;
+ 	uint64_t sdma_val = 0;
+ 	struct kfd_process_device *pdd = qpd_to_pdd(qpd);
++	struct mqd_manager *mqd_mgr =
++		dqm->mqd_mgrs[get_mqd_type_from_queue_type(q->properties.type)];
+ 
+ 	/* Get the SDMA queue stats */
+ 	if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
+@@ -540,6 +539,8 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
+ 		pdd->sdma_past_activity_counter += sdma_val;
+ 	dqm_unlock(dqm);
+ 
++	mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
++
+ 	return retval;
+ }
+ 
+@@ -1632,7 +1633,7 @@ static int set_trap_handler(struct device_queue_manager *dqm,
+ static int process_termination_nocpsch(struct device_queue_manager *dqm,
+ 		struct qcm_process_device *qpd)
+ {
+-	struct queue *q, *next;
++	struct queue *q;
+ 	struct device_process_node *cur, *next_dpn;
+ 	int retval = 0;
+ 	bool found = false;
+@@ -1640,12 +1641,19 @@ static int process_termination_nocpsch(struct device_queue_manager *dqm,
+ 	dqm_lock(dqm);
+ 
+ 	/* Clear all user mode queues */
+-	list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
++	while (!list_empty(&qpd->queues_list)) {
++		struct mqd_manager *mqd_mgr;
+ 		int ret;
+ 
++		q = list_first_entry(&qpd->queues_list, struct queue, list);
++		mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
++				q->properties.type)];
+ 		ret = destroy_queue_nocpsch_locked(dqm, qpd, q);
+ 		if (ret)
+ 			retval = ret;
++		dqm_unlock(dqm);
++		mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
++		dqm_lock(dqm);
+ 	}
+ 
+ 	/* Unregister process */
+@@ -1677,36 +1685,34 @@ static int get_wave_state(struct device_queue_manager *dqm,
+ 			  u32 *save_area_used_size)
+ {
+ 	struct mqd_manager *mqd_mgr;
+-	int r;
+ 
+ 	dqm_lock(dqm);
+ 
+-	if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE ||
+-	    q->properties.is_active || !q->device->cwsr_enabled) {
+-		r = -EINVAL;
+-		goto dqm_unlock;
+-	}
+-
+ 	mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP];
+ 
+-	if (!mqd_mgr->get_wave_state) {
+-		r = -EINVAL;
+-		goto dqm_unlock;
++	if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE ||
++	    q->properties.is_active || !q->device->cwsr_enabled ||
++	    !mqd_mgr->get_wave_state) {
++		dqm_unlock(dqm);
++		return -EINVAL;
+ 	}
+ 
+-	r = mqd_mgr->get_wave_state(mqd_mgr, q->mqd, ctl_stack,
+-			ctl_stack_used_size, save_area_used_size);
+-
+-dqm_unlock:
+ 	dqm_unlock(dqm);
+-	return r;
++
++	/*
++	 * get_wave_state is outside the dqm lock to prevent circular locking
++	 * and the queue should be protected against destruction by the process
++	 * lock.
++	 */
++	return mqd_mgr->get_wave_state(mqd_mgr, q->mqd, ctl_stack,
++			ctl_stack_used_size, save_area_used_size);
+ }
+ 
+ static int process_termination_cpsch(struct device_queue_manager *dqm,
+ 		struct qcm_process_device *qpd)
+ {
+ 	int retval;
+-	struct queue *q, *next;
++	struct queue *q;
+ 	struct kernel_queue *kq, *kq_next;
+ 	struct mqd_manager *mqd_mgr;
+ 	struct device_process_node *cur, *next_dpn;
+@@ -1763,24 +1769,26 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
+ 		qpd->reset_wavefronts = false;
+ 	}
+ 
+-	dqm_unlock(dqm);
+-
+-	/* Outside the DQM lock because under the DQM lock we can't do
+-	 * reclaim or take other locks that others hold while reclaiming.
+-	 */
+-	if (found)
+-		kfd_dec_compute_active(dqm->dev);
+-
+ 	/* Lastly, free mqd resources.
+ 	 * Do free_mqd() after dqm_unlock to avoid circular locking.
+ 	 */
+-	list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
++	while (!list_empty(&qpd->queues_list)) {
++		q = list_first_entry(&qpd->queues_list, struct queue, list);
+ 		mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
+ 				q->properties.type)];
+ 		list_del(&q->list);
+ 		qpd->queue_count--;
++		dqm_unlock(dqm);
+ 		mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
++		dqm_lock(dqm);
+ 	}
++	dqm_unlock(dqm);
++
++	/* Outside the DQM lock because under the DQM lock we can't do
++	 * reclaim or take other locks that others hold while reclaiming.
++	 */
++	if (found)
++		kfd_dec_compute_active(dqm->dev);
+ 
+ 	return retval;
+ }
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 0858e0c7b7a1d..9cac833bc9d6a 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -3867,6 +3867,23 @@ static int fill_dc_scaling_info(const struct drm_plane_state *state,
+ 	scaling_info->src_rect.x = state->src_x >> 16;
+ 	scaling_info->src_rect.y = state->src_y >> 16;
+ 
++	/*
++	 * For reasons we don't (yet) fully understand a non-zero
++	 * src_y coordinate into an NV12 buffer can cause a
++	 * system hang. To avoid hangs (and maybe be overly cautious)
++	 * let's reject both non-zero src_x and src_y.
++	 *
++	 * We currently know of only one use-case to reproduce a
++	 * scenario with non-zero src_x and src_y for NV12, which
++	 * is to gesture the YouTube Android app into full screen
++	 * on ChromeOS.
++	 */
++	if (state->fb &&
++	    state->fb->format->format == DRM_FORMAT_NV12 &&
++	    (scaling_info->src_rect.x != 0 ||
++	     scaling_info->src_rect.y != 0))
++		return -EINVAL;
++
+ 	/*
+ 	 * For reasons we don't (yet) fully understand a non-zero
+ 	 * src_y coordinate into an NV12 buffer can cause a
+@@ -8939,7 +8956,8 @@ skip_modeset:
+ 	BUG_ON(dm_new_crtc_state->stream == NULL);
+ 
+ 	/* Scaling or underscan settings */
+-	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
++	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
++				drm_atomic_crtc_needs_modeset(new_crtc_state))
+ 		update_stream_scaling_settings(
+ 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
+ 
+@@ -9498,6 +9516,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ 			dm_old_crtc_state->dsc_force_changed == false)
+ 			continue;
+ 
++		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
++		if (ret)
++			goto fail;
++
+ 		if (!new_crtc_state->enable)
+ 			continue;
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+index 52cc817052802..250adc92dfd0f 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+@@ -529,6 +529,7 @@ void amdgpu_dm_trigger_timing_sync(struct drm_device *dev);
+ #define MAX_COLOR_LEGACY_LUT_ENTRIES 256
+ 
+ void amdgpu_dm_init_color_mod(void);
++int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state);
+ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc);
+ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
+ 				      struct dc_plane_state *dc_plane_state);
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+index 157fe4efbb599..a022e5bb30a5c 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+@@ -284,6 +284,37 @@ static int __set_input_tf(struct dc_transfer_func *func,
+ 	return res ? 0 : -ENOMEM;
+ }
+ 
++/**
++ * Verifies that the Degamma and Gamma LUTs attached to the |crtc_state| are of
++ * the expected size.
++ * Returns 0 on success.
++ */
++int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state)
++{
++	const struct drm_color_lut *lut = NULL;
++	uint32_t size = 0;
++
++	lut = __extract_blob_lut(crtc_state->degamma_lut, &size);
++	if (lut && size != MAX_COLOR_LUT_ENTRIES) {
++		DRM_DEBUG_DRIVER(
++			"Invalid Degamma LUT size. Should be %u but got %u.\n",
++			MAX_COLOR_LUT_ENTRIES, size);
++		return -EINVAL;
++	}
++
++	lut = __extract_blob_lut(crtc_state->gamma_lut, &size);
++	if (lut && size != MAX_COLOR_LUT_ENTRIES &&
++	    size != MAX_COLOR_LEGACY_LUT_ENTRIES) {
++		DRM_DEBUG_DRIVER(
++			"Invalid Gamma LUT size. Should be %u (or %u for legacy) but got %u.\n",
++			MAX_COLOR_LUT_ENTRIES, MAX_COLOR_LEGACY_LUT_ENTRIES,
++			size);
++		return -EINVAL;
++	}
++
++	return 0;
++}
++
+ /**
+  * amdgpu_dm_update_crtc_color_mgmt: Maps DRM color management to DC stream.
+  * @crtc: amdgpu_dm crtc state
+@@ -317,14 +348,12 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
+ 	bool is_legacy;
+ 	int r;
+ 
+-	degamma_lut = __extract_blob_lut(crtc->base.degamma_lut, &degamma_size);
+-	if (degamma_lut && degamma_size != MAX_COLOR_LUT_ENTRIES)
+-		return -EINVAL;
++	r = amdgpu_dm_verify_lut_sizes(&crtc->base);
++	if (r)
++		return r;
+ 
++	degamma_lut = __extract_blob_lut(crtc->base.degamma_lut, &degamma_size);
+ 	regamma_lut = __extract_blob_lut(crtc->base.gamma_lut, &regamma_size);
+-	if (regamma_lut && regamma_size != MAX_COLOR_LUT_ENTRIES &&
+-	    regamma_size != MAX_COLOR_LEGACY_LUT_ENTRIES)
+-		return -EINVAL;
+ 
+ 	has_degamma =
+ 		degamma_lut && !__is_lut_linear(degamma_lut, degamma_size);
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+index 9f9fda3118d1f..500bcd0ecf4d2 100644
+--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
++++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+@@ -1944,7 +1944,7 @@ static enum bp_result get_integrated_info_v2_1(
+ 		info_v2_1->edp1_info.edp_pwr_down_bloff_to_vary_bloff;
+ 	info->edp1_info.edp_panel_bpc =
+ 		info_v2_1->edp1_info.edp_panel_bpc;
+-	info->edp1_info.edp_bootup_bl_level =
++	info->edp1_info.edp_bootup_bl_level = info_v2_1->edp1_info.edp_bootup_bl_level;
+ 
+ 	info->edp2_info.edp_backlight_pwm_hz =
+ 	le16_to_cpu(info_v2_1->edp2_info.edp_backlight_pwm_hz);
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+index b85f67341a9a0..c957d7d055ba3 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+@@ -1726,6 +1726,8 @@ static void set_dp_mst_mode(struct dc_link *link, bool mst_enable)
+ 		link->type = dc_connection_single;
+ 		link->local_sink = link->remote_sinks[0];
+ 		link->local_sink->sink_signal = SIGNAL_TYPE_DISPLAY_PORT;
++		dc_sink_retain(link->local_sink);
++		dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
+ 	} else if (mst_enable == true &&
+ 			link->type == dc_connection_single &&
+ 			link->remote_sinks[0] != NULL) {
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index 0c26c2ade782c..749189eb20bab 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -652,124 +652,23 @@ static void calculate_split_count_and_index(struct pipe_ctx *pipe_ctx, int *spli
+ 	}
+ }
+ 
+-static void calculate_viewport(struct pipe_ctx *pipe_ctx)
++/*
++ * This is a preliminary vp size calculation to allow us to check taps support.
++ * The result is completely overridden afterwards.
++ */
++static void calculate_viewport_size(struct pipe_ctx *pipe_ctx)
+ {
+-	const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+-	const struct dc_stream_state *stream = pipe_ctx->stream;
+ 	struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
+-	struct rect surf_src = plane_state->src_rect;
+-	struct rect clip, dest;
+-	int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
+-			|| data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
+-	int split_count = 0;
+-	int split_idx = 0;
+-	bool orthogonal_rotation, flip_y_start, flip_x_start;
+-
+-	calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx);
+ 
+-	if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE ||
+-		stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) {
+-		split_count = 0;
+-		split_idx = 0;
+-	}
+-
+-	/* The actual clip is an intersection between stream
+-	 * source and surface clip
+-	 */
+-	dest = plane_state->dst_rect;
+-	clip.x = stream->src.x > plane_state->clip_rect.x ?
+-			stream->src.x : plane_state->clip_rect.x;
+-
+-	clip.width = stream->src.x + stream->src.width <
+-			plane_state->clip_rect.x + plane_state->clip_rect.width ?
+-			stream->src.x + stream->src.width - clip.x :
+-			plane_state->clip_rect.x + plane_state->clip_rect.width - clip.x ;
+-
+-	clip.y = stream->src.y > plane_state->clip_rect.y ?
+-			stream->src.y : plane_state->clip_rect.y;
+-
+-	clip.height = stream->src.y + stream->src.height <
+-			plane_state->clip_rect.y + plane_state->clip_rect.height ?
+-			stream->src.y + stream->src.height - clip.y :
+-			plane_state->clip_rect.y + plane_state->clip_rect.height - clip.y ;
+-
+-	/*
+-	 * Need to calculate how scan origin is shifted in vp space
+-	 * to correctly rotate clip and dst
+-	 */
+-	get_vp_scan_direction(
+-			plane_state->rotation,
+-			plane_state->horizontal_mirror,
+-			&orthogonal_rotation,
+-			&flip_y_start,
+-			&flip_x_start);
+-
+-	if (orthogonal_rotation) {
+-		swap(clip.x, clip.y);
+-		swap(clip.width, clip.height);
+-		swap(dest.x, dest.y);
+-		swap(dest.width, dest.height);
+-	}
+-	if (flip_x_start) {
+-		clip.x = dest.x + dest.width - clip.x - clip.width;
+-		dest.x = 0;
+-	}
+-	if (flip_y_start) {
+-		clip.y = dest.y + dest.height - clip.y - clip.height;
+-		dest.y = 0;
+-	}
+-
+-	/* offset = surf_src.ofs + (clip.ofs - surface->dst_rect.ofs) * scl_ratio
+-	 * num_pixels = clip.num_pix * scl_ratio
+-	 */
+-	data->viewport.x = surf_src.x + (clip.x - dest.x) * surf_src.width / dest.width;
+-	data->viewport.width = clip.width * surf_src.width / dest.width;
+-
+-	data->viewport.y = surf_src.y + (clip.y - dest.y) * surf_src.height / dest.height;
+-	data->viewport.height = clip.height * surf_src.height / dest.height;
+-
+-	/* Handle split */
+-	if (split_count) {
+-		/* extra pixels in the division remainder need to go to pipes after
+-		 * the extra pixel index minus one(epimo) defined here as:
+-		 */
+-		int epimo = 0;
+-
+-		if (orthogonal_rotation) {
+-			if (flip_y_start)
+-				split_idx = split_count - split_idx;
+-
+-			epimo = split_count - data->viewport.height % (split_count + 1);
+-
+-			data->viewport.y += (data->viewport.height / (split_count + 1)) * split_idx;
+-			if (split_idx > epimo)
+-				data->viewport.y += split_idx - epimo - 1;
+-			data->viewport.height = data->viewport.height / (split_count + 1) + (split_idx > epimo ? 1 : 0);
+-		} else {
+-			if (flip_x_start)
+-				split_idx = split_count - split_idx;
+-
+-			epimo = split_count - data->viewport.width % (split_count + 1);
+-
+-			data->viewport.x += (data->viewport.width / (split_count + 1)) * split_idx;
+-			if (split_idx > epimo)
+-				data->viewport.x += split_idx - epimo - 1;
+-			data->viewport.width = data->viewport.width / (split_count + 1) + (split_idx > epimo ? 1 : 0);
+-		}
++	data->viewport.width = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.horz, data->recout.width));
++	data->viewport.height = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.vert, data->recout.height));
++	data->viewport_c.width = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.horz_c, data->recout.width));
++	data->viewport_c.height = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.vert_c, data->recout.height));
++	if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
++			pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) {
++		swap(data->viewport.width, data->viewport.height);
++		swap(data->viewport_c.width, data->viewport_c.height);
+ 	}
+-
+-	/* Round down, compensate in init */
+-	data->viewport_c.x = data->viewport.x / vpc_div;
+-	data->viewport_c.y = data->viewport.y / vpc_div;
+-	data->inits.h_c = (data->viewport.x % vpc_div) != 0 ? dc_fixpt_half : dc_fixpt_zero;
+-	data->inits.v_c = (data->viewport.y % vpc_div) != 0 ? dc_fixpt_half : dc_fixpt_zero;
+-
+-	/* Round up, assume original video size always even dimensions */
+-	data->viewport_c.width = (data->viewport.width + vpc_div - 1) / vpc_div;
+-	data->viewport_c.height = (data->viewport.height + vpc_div - 1) / vpc_div;
+-
+-	data->viewport_unadjusted = data->viewport;
+-	data->viewport_c_unadjusted = data->viewport_c;
+ }
+ 
+ static void calculate_recout(struct pipe_ctx *pipe_ctx)
+@@ -778,26 +677,21 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx)
+ 	const struct dc_stream_state *stream = pipe_ctx->stream;
+ 	struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
+ 	struct rect surf_clip = plane_state->clip_rect;
+-	bool pri_split_tb = pipe_ctx->bottom_pipe &&
+-			pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state &&
+-			stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM;
+-	bool sec_split_tb = pipe_ctx->top_pipe &&
+-			pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state &&
+-			stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM;
+-	int split_count = 0;
+-	int split_idx = 0;
++	bool split_tb = stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM;
++	int split_count, split_idx;
+ 
+ 	calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx);
++	if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE)
++		split_idx = 0;
+ 
+ 	/*
+ 	 * Only the leftmost ODM pipe should be offset by a nonzero distance
+ 	 */
+-	if (!pipe_ctx->prev_odm_pipe) {
++	if (!pipe_ctx->prev_odm_pipe || split_idx == split_count) {
+ 		data->recout.x = stream->dst.x;
+ 		if (stream->src.x < surf_clip.x)
+ 			data->recout.x += (surf_clip.x - stream->src.x) * stream->dst.width
+ 						/ stream->src.width;
+-
+ 	} else
+ 		data->recout.x = 0;
+ 
+@@ -818,26 +712,36 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx)
+ 	if (data->recout.height + data->recout.y > stream->dst.y + stream->dst.height)
+ 		data->recout.height = stream->dst.y + stream->dst.height - data->recout.y;
+ 
+-	/* Handle h & v split, handle rotation using viewport */
+-	if (sec_split_tb) {
+-		data->recout.y += data->recout.height / 2;
+-		/* Floor primary pipe, ceil 2ndary pipe */
+-		data->recout.height = (data->recout.height + 1) / 2;
+-	} else if (pri_split_tb)
++	/* Handle h & v split */
++	if (split_tb) {
++		ASSERT(data->recout.height % 2 == 0);
+ 		data->recout.height /= 2;
+-	else if (split_count) {
+-		/* extra pixels in the division remainder need to go to pipes after
+-		 * the extra pixel index minus one(epimo) defined here as:
+-		 */
+-		int epimo = split_count - data->recout.width % (split_count + 1);
+-
+-		/*no recout offset due to odm */
++	} else if (split_count) {
+ 		if (!pipe_ctx->next_odm_pipe && !pipe_ctx->prev_odm_pipe) {
++			/* extra pixels in the division remainder need to go to pipes after
++			 * the extra pixel index minus one(epimo) defined here as:
++			 */
++			int epimo = split_count - data->recout.width % (split_count + 1);
++
+ 			data->recout.x += (data->recout.width / (split_count + 1)) * split_idx;
+ 			if (split_idx > epimo)
+ 				data->recout.x += split_idx - epimo - 1;
++			ASSERT(stream->view_format != VIEW_3D_FORMAT_SIDE_BY_SIDE || data->recout.width % 2 == 0);
++			data->recout.width = data->recout.width / (split_count + 1) + (split_idx > epimo ? 1 : 0);
++		} else {
++			/* odm */
++			if (split_idx == split_count) {
++				/* rightmost pipe is the remainder recout */
++				data->recout.width -= data->h_active * split_count - data->recout.x;
++
++				/* ODM combine cases with MPO we can get negative widths */
++				if (data->recout.width < 0)
++					data->recout.width = 0;
++
++				data->recout.x = 0;
++			} else
++				data->recout.width = data->h_active - data->recout.x;
+ 		}
+-		data->recout.width = data->recout.width / (split_count + 1) + (split_idx > epimo ? 1 : 0);
+ 	}
+ }
+ 
+@@ -891,9 +795,15 @@ static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
+ 			pipe_ctx->plane_res.scl_data.ratios.vert_c, 19);
+ }
+ 
+-static inline void adjust_vp_and_init_for_seamless_clip(
++
++/*
++ * We completely calculate vp offset, size and inits here based entirely on scaling
++ * ratios and recout for pixel perfect pipe combine.
++ */
++static void calculate_init_and_vp(
+ 		bool flip_scan_dir,
+-		int recout_skip,
++		int recout_offset_within_recout_full,
++		int recout_size,
+ 		int src_size,
+ 		int taps,
+ 		struct fixed31_32 ratio,
+@@ -901,91 +811,87 @@ static inline void adjust_vp_and_init_for_seamless_clip(
+ 		int *vp_offset,
+ 		int *vp_size)
+ {
+-	if (!flip_scan_dir) {
+-		/* Adjust for viewport end clip-off */
+-		if ((*vp_offset + *vp_size) < src_size) {
+-			int vp_clip = src_size - *vp_size - *vp_offset;
+-			int int_part = dc_fixpt_floor(dc_fixpt_sub(*init, ratio));
+-
+-			int_part = int_part > 0 ? int_part : 0;
+-			*vp_size += int_part < vp_clip ? int_part : vp_clip;
+-		}
+-
+-		/* Adjust for non-0 viewport offset */
+-		if (*vp_offset) {
+-			int int_part;
+-
+-			*init = dc_fixpt_add(*init, dc_fixpt_mul_int(ratio, recout_skip));
+-			int_part = dc_fixpt_floor(*init) - *vp_offset;
+-			if (int_part < taps) {
+-				int int_adj = *vp_offset >= (taps - int_part) ?
+-							(taps - int_part) : *vp_offset;
+-				*vp_offset -= int_adj;
+-				*vp_size += int_adj;
+-				int_part += int_adj;
+-			} else if (int_part > taps) {
+-				*vp_offset += int_part - taps;
+-				*vp_size -= int_part - taps;
+-				int_part = taps;
+-			}
+-			init->value &= 0xffffffff;
+-			*init = dc_fixpt_add_int(*init, int_part);
+-		}
+-	} else {
+-		/* Adjust for non-0 viewport offset */
+-		if (*vp_offset) {
+-			int int_part = dc_fixpt_floor(dc_fixpt_sub(*init, ratio));
+-
+-			int_part = int_part > 0 ? int_part : 0;
+-			*vp_size += int_part < *vp_offset ? int_part : *vp_offset;
+-			*vp_offset -= int_part < *vp_offset ? int_part : *vp_offset;
+-		}
++	struct fixed31_32 temp;
++	int int_part;
+ 
+-		/* Adjust for viewport end clip-off */
+-		if ((*vp_offset + *vp_size) < src_size) {
+-			int int_part;
+-			int end_offset = src_size - *vp_offset - *vp_size;
+-
+-			/*
+-			 * this is init if vp had no offset, keep in mind this is from the
+-			 * right side of vp due to scan direction
+-			 */
+-			*init = dc_fixpt_add(*init, dc_fixpt_mul_int(ratio, recout_skip));
+-			/*
+-			 * this is the difference between first pixel of viewport available to read
+-			 * and init position, takning into account scan direction
+-			 */
+-			int_part = dc_fixpt_floor(*init) - end_offset;
+-			if (int_part < taps) {
+-				int int_adj = end_offset >= (taps - int_part) ?
+-							(taps - int_part) : end_offset;
+-				*vp_size += int_adj;
+-				int_part += int_adj;
+-			} else if (int_part > taps) {
+-				*vp_size += int_part - taps;
+-				int_part = taps;
+-			}
+-			init->value &= 0xffffffff;
+-			*init = dc_fixpt_add_int(*init, int_part);
+-		}
++	/*
++	 * First of the taps starts sampling pixel number <init_int_part> corresponding to recout
++	 * pixel 1. Next recout pixel samples int part of <init + scaling ratio> and so on.
++	 * All following calculations are based on this logic.
++	 *
++	 * Init calculated according to formula:
++	 * 	init = (scaling_ratio + number_of_taps + 1) / 2
++	 * 	init_bot = init + scaling_ratio
++	 * 	to get pixel perfect combine add the fraction from calculating vp offset
++	 */
++	temp = dc_fixpt_mul_int(ratio, recout_offset_within_recout_full);
++	*vp_offset = dc_fixpt_floor(temp);
++	temp.value &= 0xffffffff;
++	*init = dc_fixpt_truncate(dc_fixpt_add(dc_fixpt_div_int(
++			dc_fixpt_add_int(ratio, taps + 1), 2), temp), 19);
++	/*
++	 * If viewport has non 0 offset and there are more taps than covered by init then
++	 * we should decrease the offset and increase init so we are never sampling
++	 * outside of viewport.
++	 */
++	int_part = dc_fixpt_floor(*init);
++	if (int_part < taps) {
++		int_part = taps - int_part;
++		if (int_part > *vp_offset)
++			int_part = *vp_offset;
++		*vp_offset -= int_part;
++		*init = dc_fixpt_add_int(*init, int_part);
+ 	}
++	/*
++	 * If taps are sampling outside of viewport at end of recout and there are more pixels
++	 * available in the surface we should increase the viewport size, regardless set vp to
++	 * only what is used.
++	 */
++	temp = dc_fixpt_add(*init, dc_fixpt_mul_int(ratio, recout_size - 1));
++	*vp_size = dc_fixpt_floor(temp);
++	if (*vp_size + *vp_offset > src_size)
++		*vp_size = src_size - *vp_offset;
++
++	/* We did all the math assuming we are scanning same direction as display does,
++	 * however mirror/rotation changes how vp scans vs how it is offset. If scan direction
++	 * is flipped we simply need to calculate offset from the other side of plane.
++	 * Note that outside of viewport all scaling hardware works in recout space.
++	 */
++	if (flip_scan_dir)
++		*vp_offset = src_size - *vp_offset - *vp_size;
+ }
+ 
+-static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)
++static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx)
+ {
+ 	const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ 	const struct dc_stream_state *stream = pipe_ctx->stream;
+-	struct pipe_ctx *odm_pipe = pipe_ctx;
+ 	struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
+-	struct rect src = pipe_ctx->plane_state->src_rect;
+-	int recout_skip_h, recout_skip_v, surf_size_h, surf_size_v;
++	struct rect src = plane_state->src_rect;
+ 	int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
+-			|| data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
++				|| data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
++	int split_count, split_idx, ro_lb, ro_tb, recout_full_x, recout_full_y;
+ 	bool orthogonal_rotation, flip_vert_scan_dir, flip_horz_scan_dir;
+-	int odm_idx = 0;
+ 
++	calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx);
+ 	/*
+-	 * Need to calculate the scan direction for viewport to make adjustments
++	 * recout full is what the recout would have been if we didnt clip
++	 * the source plane at all. We only care about left(ro_lb) and top(ro_tb)
++	 * offsets of recout within recout full because those are the directions
++	 * we scan from and therefore the only ones that affect inits.
++	 */
++	recout_full_x = stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
++			* stream->dst.width / stream->src.width;
++	recout_full_y = stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
++			* stream->dst.height / stream->src.height;
++	if (pipe_ctx->prev_odm_pipe && split_idx)
++		ro_lb = data->h_active * split_idx - recout_full_x;
++	else
++		ro_lb = data->recout.x - recout_full_x;
++	ro_tb = data->recout.y - recout_full_y;
++	ASSERT(ro_lb >= 0 && ro_tb >= 0);
++
++	/*
++	 * Work in recout rotation since that requires less transformations
+ 	 */
+ 	get_vp_scan_direction(
+ 			plane_state->rotation,
+@@ -994,145 +900,62 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)
+ 			&flip_vert_scan_dir,
+ 			&flip_horz_scan_dir);
+ 
+-	/* Calculate src rect rotation adjusted to recout space */
+-	surf_size_h = src.x + src.width;
+-	surf_size_v = src.y + src.height;
+-	if (flip_horz_scan_dir)
+-		src.x = 0;
+-	if (flip_vert_scan_dir)
+-		src.y = 0;
+ 	if (orthogonal_rotation) {
+-		swap(src.x, src.y);
+ 		swap(src.width, src.height);
++		swap(flip_vert_scan_dir, flip_horz_scan_dir);
+ 	}
+ 
+-	/*modified recout_skip_h calculation due to odm having no recout offset*/
+-	while (odm_pipe->prev_odm_pipe) {
+-		odm_idx++;
+-		odm_pipe = odm_pipe->prev_odm_pipe;
+-	}
+-	/*odm_pipe is the leftmost pipe in the ODM group*/
+-	recout_skip_h = odm_idx * data->recout.width;
+-
+-	/* Recout matching initial vp offset = recout_offset - (stream dst offset +
+-	 *			((surf dst offset - stream src offset) * 1/ stream scaling ratio)
+-	 *			- (surf surf_src offset * 1/ full scl ratio))
+-	 */
+-	recout_skip_h += odm_pipe->plane_res.scl_data.recout.x
+-				- (stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
+-					* stream->dst.width / stream->src.width -
+-					src.x * plane_state->dst_rect.width / src.width
+-					* stream->dst.width / stream->src.width);
+-
+-
+-	recout_skip_v = data->recout.y - (stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
+-					* stream->dst.height / stream->src.height -
+-					src.y * plane_state->dst_rect.height / src.height
+-					* stream->dst.height / stream->src.height);
+-	if (orthogonal_rotation)
+-		swap(recout_skip_h, recout_skip_v);
+-	/*
+-	 * Init calculated according to formula:
+-	 * 	init = (scaling_ratio + number_of_taps + 1) / 2
+-	 * 	init_bot = init + scaling_ratio
+-	 * 	init_c = init + truncated_vp_c_offset(from calculate viewport)
+-	 */
+-	data->inits.h = dc_fixpt_truncate(dc_fixpt_div_int(
+-			dc_fixpt_add_int(data->ratios.horz, data->taps.h_taps + 1), 2), 19);
+-
+-	data->inits.h_c = dc_fixpt_truncate(dc_fixpt_add(data->inits.h_c, dc_fixpt_div_int(
+-			dc_fixpt_add_int(data->ratios.horz_c, data->taps.h_taps_c + 1), 2)), 19);
+-
+-	data->inits.v = dc_fixpt_truncate(dc_fixpt_div_int(
+-			dc_fixpt_add_int(data->ratios.vert, data->taps.v_taps + 1), 2), 19);
+-
+-	data->inits.v_c = dc_fixpt_truncate(dc_fixpt_add(data->inits.v_c, dc_fixpt_div_int(
+-			dc_fixpt_add_int(data->ratios.vert_c, data->taps.v_taps_c + 1), 2)), 19);
+-
+-	/*
+-	 * Taps, inits and scaling ratios are in recout space need to rotate
+-	 * to viewport rotation before adjustment
+-	 */
+-	adjust_vp_and_init_for_seamless_clip(
++	calculate_init_and_vp(
+ 			flip_horz_scan_dir,
+-			recout_skip_h,
+-			surf_size_h,
+-			orthogonal_rotation ? data->taps.v_taps : data->taps.h_taps,
+-			orthogonal_rotation ? data->ratios.vert : data->ratios.horz,
+-			orthogonal_rotation ? &data->inits.v : &data->inits.h,
++			ro_lb,
++			data->recout.width,
++			src.width,
++			data->taps.h_taps,
++			data->ratios.horz,
++			&data->inits.h,
+ 			&data->viewport.x,
+ 			&data->viewport.width);
+-	adjust_vp_and_init_for_seamless_clip(
++	calculate_init_and_vp(
+ 			flip_horz_scan_dir,
+-			recout_skip_h,
+-			surf_size_h / vpc_div,
+-			orthogonal_rotation ? data->taps.v_taps_c : data->taps.h_taps_c,
+-			orthogonal_rotation ? data->ratios.vert_c : data->ratios.horz_c,
+-			orthogonal_rotation ? &data->inits.v_c : &data->inits.h_c,
++			ro_lb,
++			data->recout.width,
++			src.width / vpc_div,
++			data->taps.h_taps_c,
++			data->ratios.horz_c,
++			&data->inits.h_c,
+ 			&data->viewport_c.x,
+ 			&data->viewport_c.width);
+-	adjust_vp_and_init_for_seamless_clip(
++	calculate_init_and_vp(
+ 			flip_vert_scan_dir,
+-			recout_skip_v,
+-			surf_size_v,
+-			orthogonal_rotation ? data->taps.h_taps : data->taps.v_taps,
+-			orthogonal_rotation ? data->ratios.horz : data->ratios.vert,
+-			orthogonal_rotation ? &data->inits.h : &data->inits.v,
++			ro_tb,
++			data->recout.height,
++			src.height,
++			data->taps.v_taps,
++			data->ratios.vert,
++			&data->inits.v,
+ 			&data->viewport.y,
+ 			&data->viewport.height);
+-	adjust_vp_and_init_for_seamless_clip(
++	calculate_init_and_vp(
+ 			flip_vert_scan_dir,
+-			recout_skip_v,
+-			surf_size_v / vpc_div,
+-			orthogonal_rotation ? data->taps.h_taps_c : data->taps.v_taps_c,
+-			orthogonal_rotation ? data->ratios.horz_c : data->ratios.vert_c,
+-			orthogonal_rotation ? &data->inits.h_c : &data->inits.v_c,
++			ro_tb,
++			data->recout.height,
++			src.height / vpc_div,
++			data->taps.v_taps_c,
++			data->ratios.vert_c,
++			&data->inits.v_c,
+ 			&data->viewport_c.y,
+ 			&data->viewport_c.height);
+-
+-	/* Interlaced inits based on final vert inits */
+-	data->inits.v_bot = dc_fixpt_add(data->inits.v, data->ratios.vert);
+-	data->inits.v_c_bot = dc_fixpt_add(data->inits.v_c, data->ratios.vert_c);
+-
+-}
+-
+-/*
+- * When handling 270 rotation in mixed SLS mode, we have
+- * stream->timing.h_border_left that is non zero.  If we are doing
+- * pipe-splitting, this h_border_left value gets added to recout.x and when it
+- * calls calculate_inits_and_adj_vp() and
+- * adjust_vp_and_init_for_seamless_clip(), it can cause viewport.height for a
+- * pipe to be incorrect.
+- *
+- * To fix this, instead of using stream->timing.h_border_left, we can use
+- * stream->dst.x to represent the border instead.  So we will set h_border_left
+- * to 0 and shift the appropriate amount in stream->dst.x.  We will then
+- * perform all calculations in resource_build_scaling_params() based on this
+- * and then restore the h_border_left and stream->dst.x to their original
+- * values.
+- *
+- * shift_border_left_to_dst() will shift the amount of h_border_left to
+- * stream->dst.x and set h_border_left to 0.  restore_border_left_from_dst()
+- * will restore h_border_left and stream->dst.x back to their original values
+- * We also need to make sure pipe_ctx->plane_res.scl_data.h_active uses the
+- * original h_border_left value in its calculation.
+- */
+-static int shift_border_left_to_dst(struct pipe_ctx *pipe_ctx)
+-{
+-	int store_h_border_left = pipe_ctx->stream->timing.h_border_left;
+-
+-	if (store_h_border_left) {
+-		pipe_ctx->stream->timing.h_border_left = 0;
+-		pipe_ctx->stream->dst.x += store_h_border_left;
++	if (orthogonal_rotation) {
++		swap(data->viewport.x, data->viewport.y);
++		swap(data->viewport.width, data->viewport.height);
++		swap(data->viewport_c.x, data->viewport_c.y);
++		swap(data->viewport_c.width, data->viewport_c.height);
+ 	}
+-	return store_h_border_left;
+-}
+-
+-static void restore_border_left_from_dst(struct pipe_ctx *pipe_ctx,
+-					 int store_h_border_left)
+-{
+-	pipe_ctx->stream->dst.x -= store_h_border_left;
+-	pipe_ctx->stream->timing.h_border_left = store_h_border_left;
++	data->viewport.x += src.x;
++	data->viewport.y += src.y;
++	ASSERT(src.x % vpc_div == 0 && src.y % vpc_div == 0);
++	data->viewport_c.x += src.x / vpc_div;
++	data->viewport_c.y += src.y / vpc_div;
+ }
+ 
+ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
+@@ -1140,48 +963,42 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
+ 	const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ 	struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
+ 	bool res = false;
+-	int store_h_border_left = shift_border_left_to_dst(pipe_ctx);
+ 	DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
+-	/* Important: scaling ratio calculation requires pixel format,
+-	 * lb depth calculation requires recout and taps require scaling ratios.
+-	 * Inits require viewport, taps, ratios and recout of split pipe
+-	 */
++
+ 	pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface(
+ 			pipe_ctx->plane_state->format);
+ 
+-	calculate_scaling_ratios(pipe_ctx);
+-
+-	calculate_viewport(pipe_ctx);
++	/* Timing borders are part of vactive that we are also supposed to skip in addition
++	 * to any stream dst offset. Since dm logic assumes dst is in addressable
++	 * space we need to add the the left and top borders to dst offsets temporarily.
++	 * TODO: fix in DM, stream dst is supposed to be in vactive
++	 */
++	pipe_ctx->stream->dst.x += timing->h_border_left;
++	pipe_ctx->stream->dst.y += timing->v_border_top;
+ 
+-	if (pipe_ctx->plane_res.scl_data.viewport.height < MIN_VIEWPORT_SIZE ||
+-		pipe_ctx->plane_res.scl_data.viewport.width < MIN_VIEWPORT_SIZE) {
+-		if (store_h_border_left) {
+-			restore_border_left_from_dst(pipe_ctx,
+-				store_h_border_left);
+-		}
+-		return false;
+-	}
++	/* Calculate H and V active size */
++	pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable +
++			timing->h_border_left + timing->h_border_right;
++	pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable +
++		timing->v_border_top + timing->v_border_bottom;
++	if (pipe_ctx->next_odm_pipe || pipe_ctx->prev_odm_pipe)
++		pipe_ctx->plane_res.scl_data.h_active /= get_num_odm_splits(pipe_ctx) + 1;
+ 
++	/* depends on h_active */
+ 	calculate_recout(pipe_ctx);
++	/* depends on pixel format */
++	calculate_scaling_ratios(pipe_ctx);
++	/* depends on scaling ratios and recout, does not calculate offset yet */
++	calculate_viewport_size(pipe_ctx);
+ 
+-	/**
++	/*
++	 * LB calculations depend on vp size, h/v_active and scaling ratios
+ 	 * Setting line buffer pixel depth to 24bpp yields banding
+ 	 * on certain displays, such as the Sharp 4k
+ 	 */
+ 	pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
+ 	pipe_ctx->plane_res.scl_data.lb_params.alpha_en = plane_state->per_pixel_alpha;
+ 
+-	pipe_ctx->plane_res.scl_data.recout.x += timing->h_border_left;
+-	pipe_ctx->plane_res.scl_data.recout.y += timing->v_border_top;
+-
+-	pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable +
+-		store_h_border_left + timing->h_border_right;
+-	pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable +
+-		timing->v_border_top + timing->v_border_bottom;
+-	if (pipe_ctx->next_odm_pipe || pipe_ctx->prev_odm_pipe)
+-		pipe_ctx->plane_res.scl_data.h_active /= get_num_odm_splits(pipe_ctx) + 1;
+-
+-	/* Taps calculations */
+ 	if (pipe_ctx->plane_res.xfm != NULL)
+ 		res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(
+ 				pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
+@@ -1208,9 +1025,31 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
+ 					&plane_state->scaling_quality);
+ 	}
+ 
++	/*
++	 * Depends on recout, scaling ratios, h_active and taps
++	 * May need to re-check lb size after this in some obscure scenario
++	 */
+ 	if (res)
+-		/* May need to re-check lb size after this in some obscure scenario */
+-		calculate_inits_and_adj_vp(pipe_ctx);
++		calculate_inits_and_viewports(pipe_ctx);
++
++	/*
++	 * Handle side by side and top bottom 3d recout offsets after vp calculation
++	 * since 3d is special and needs to calculate vp as if there is no recout offset
++	 * This may break with rotation, good thing we aren't mixing hw rotation and 3d
++	 */
++	if (pipe_ctx->top_pipe && pipe_ctx->top_pipe->plane_state == plane_state) {
++		ASSERT(plane_state->rotation == ROTATION_ANGLE_0 ||
++			(pipe_ctx->stream->view_format != VIEW_3D_FORMAT_TOP_AND_BOTTOM &&
++				pipe_ctx->stream->view_format != VIEW_3D_FORMAT_SIDE_BY_SIDE));
++		if (pipe_ctx->stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM)
++			pipe_ctx->plane_res.scl_data.recout.y += pipe_ctx->plane_res.scl_data.recout.height;
++		else if (pipe_ctx->stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE)
++			pipe_ctx->plane_res.scl_data.recout.x += pipe_ctx->plane_res.scl_data.recout.width;
++	}
++
++	if (pipe_ctx->plane_res.scl_data.viewport.height < MIN_VIEWPORT_SIZE ||
++			pipe_ctx->plane_res.scl_data.viewport.width < MIN_VIEWPORT_SIZE)
++		res = false;
+ 
+ 	DC_LOG_SCALER("%s pipe %d:\nViewport: height:%d width:%d x:%d y:%d  Recout: height:%d width:%d x:%d y:%d  HACTIVE:%d VACTIVE:%d\n"
+ 			"src_rect: height:%d width:%d x:%d y:%d  dst_rect: height:%d width:%d x:%d y:%d  clip_rect: height:%d width:%d x:%d y:%d\n",
+@@ -1239,8 +1078,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
+ 			plane_state->clip_rect.x,
+ 			plane_state->clip_rect.y);
+ 
+-	if (store_h_border_left)
+-		restore_border_left_from_dst(pipe_ctx, store_h_border_left);
++	pipe_ctx->stream->dst.x -= timing->h_border_left;
++	pipe_ctx->stream->dst.y -= timing->v_border_top;
+ 
+ 	return res;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
+index 80757a0ea7c65..2c3ce5d657180 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
+@@ -270,11 +270,6 @@ struct dc_edid_caps {
+ 	struct dc_panel_patch panel_patch;
+ };
+ 
+-struct view {
+-	uint32_t width;
+-	uint32_t height;
+-};
+-
+ struct dc_mode_flags {
+ 	/* note: part of refresh rate flag*/
+ 	uint32_t INTERLACE :1;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+index efa86d5c68470..a33f522a2648a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+@@ -496,10 +496,13 @@ static enum lb_memory_config dpp1_dscl_find_lb_memory_config(struct dcn10_dpp *d
+ 	int vtaps_c = scl_data->taps.v_taps_c;
+ 	int ceil_vratio = dc_fixpt_ceil(scl_data->ratios.vert);
+ 	int ceil_vratio_c = dc_fixpt_ceil(scl_data->ratios.vert_c);
+-	enum lb_memory_config mem_cfg = LB_MEMORY_CONFIG_0;
+ 
+-	if (dpp->base.ctx->dc->debug.use_max_lb)
+-		return mem_cfg;
++	if (dpp->base.ctx->dc->debug.use_max_lb) {
++		if (scl_data->format == PIXEL_FORMAT_420BPP8
++				|| scl_data->format == PIXEL_FORMAT_420BPP10)
++			return LB_MEMORY_CONFIG_3;
++		return LB_MEMORY_CONFIG_0;
++	}
+ 
+ 	dpp->base.caps->dscl_calc_lb_num_partitions(
+ 			scl_data, LB_MEMORY_CONFIG_1, &num_part_y, &num_part_c);
+@@ -628,8 +631,10 @@ static void dpp1_dscl_set_manual_ratio_init(
+ 		SCL_V_INIT_INT, init_int);
+ 
+ 	if (REG(SCL_VERT_FILTER_INIT_BOT)) {
+-		init_frac = dc_fixpt_u0d19(data->inits.v_bot) << 5;
+-		init_int = dc_fixpt_floor(data->inits.v_bot);
++		struct fixed31_32 bot = dc_fixpt_add(data->inits.v, data->ratios.vert);
++
++		init_frac = dc_fixpt_u0d19(bot) << 5;
++		init_int = dc_fixpt_floor(bot);
+ 		REG_SET_2(SCL_VERT_FILTER_INIT_BOT, 0,
+ 			SCL_V_INIT_FRAC_BOT, init_frac,
+ 			SCL_V_INIT_INT_BOT, init_int);
+@@ -642,8 +647,10 @@ static void dpp1_dscl_set_manual_ratio_init(
+ 		SCL_V_INIT_INT_C, init_int);
+ 
+ 	if (REG(SCL_VERT_FILTER_INIT_BOT_C)) {
+-		init_frac = dc_fixpt_u0d19(data->inits.v_c_bot) << 5;
+-		init_int = dc_fixpt_floor(data->inits.v_c_bot);
++		struct fixed31_32 bot = dc_fixpt_add(data->inits.v_c, data->ratios.vert_c);
++
++		init_frac = dc_fixpt_u0d19(bot) << 5;
++		init_int = dc_fixpt_floor(bot);
+ 		REG_SET_2(SCL_VERT_FILTER_INIT_BOT_C, 0,
+ 			SCL_V_INIT_FRAC_BOT_C, init_frac,
+ 			SCL_V_INIT_INT_BOT_C, init_int);
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+index aece1103331dd..d8a03d8256235 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+@@ -243,7 +243,7 @@ void dcn20_dccg_init(struct dce_hwseq *hws)
+ 	REG_WRITE(MILLISECOND_TIME_BASE_DIV, 0x1186a0);
+ 
+ 	/* This value is dependent on the hardware pipeline delay so set once per SOC */
+-	REG_WRITE(DISPCLK_FREQ_CHANGE_CNTL, 0x801003c);
++	REG_WRITE(DISPCLK_FREQ_CHANGE_CNTL, 0xe01003c);
+ }
+ 
+ void dcn20_disable_vga(
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+index 3e3c898848bd8..2b6eb8c5614b8 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+@@ -2284,12 +2284,14 @@ int dcn20_populate_dml_pipes_from_context(
+ 
+ 			pipes[pipe_cnt].pipe.src.source_scan = pln->rotation == ROTATION_ANGLE_90
+ 					|| pln->rotation == ROTATION_ANGLE_270 ? dm_vert : dm_horz;
+-			pipes[pipe_cnt].pipe.src.viewport_y_y = scl->viewport_unadjusted.y;
+-			pipes[pipe_cnt].pipe.src.viewport_y_c = scl->viewport_c_unadjusted.y;
+-			pipes[pipe_cnt].pipe.src.viewport_width = scl->viewport_unadjusted.width;
+-			pipes[pipe_cnt].pipe.src.viewport_width_c = scl->viewport_c_unadjusted.width;
+-			pipes[pipe_cnt].pipe.src.viewport_height = scl->viewport_unadjusted.height;
+-			pipes[pipe_cnt].pipe.src.viewport_height_c = scl->viewport_c_unadjusted.height;
++			pipes[pipe_cnt].pipe.src.viewport_y_y = scl->viewport.y;
++			pipes[pipe_cnt].pipe.src.viewport_y_c = scl->viewport_c.y;
++			pipes[pipe_cnt].pipe.src.viewport_width = scl->viewport.width;
++			pipes[pipe_cnt].pipe.src.viewport_width_c = scl->viewport_c.width;
++			pipes[pipe_cnt].pipe.src.viewport_height = scl->viewport.height;
++			pipes[pipe_cnt].pipe.src.viewport_height_c = scl->viewport_c.height;
++			pipes[pipe_cnt].pipe.src.viewport_width_max = pln->src_rect.width;
++			pipes[pipe_cnt].pipe.src.viewport_height_max = pln->src_rect.height;
+ 			pipes[pipe_cnt].pipe.src.surface_width_y = pln->plane_size.surface_size.width;
+ 			pipes[pipe_cnt].pipe.src.surface_height_y = pln->plane_size.surface_size.height;
+ 			pipes[pipe_cnt].pipe.src.surface_width_c = pln->plane_size.chroma_size.width;
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+index bc07082c13575..6e326290f2147 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+@@ -64,6 +64,7 @@ typedef struct {
+ #define BPP_INVALID 0
+ #define BPP_BLENDED_PIPE 0xffffffff
+ #define DCN30_MAX_DSC_IMAGE_WIDTH 5184
++#define DCN30_MAX_FMT_420_BUFFER_WIDTH 4096
+ 
+ static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib);
+ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(
+@@ -2052,7 +2053,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
+ 			v->DISPCLKWithoutRamping,
+ 			v->DISPCLKDPPCLKVCOSpeed);
+ 	v->MaxDispclkRoundedToDFSGranularity = RoundToDFSGranularityDown(
+-			v->soc.clock_limits[mode_lib->soc.num_states].dispclk_mhz,
++			v->soc.clock_limits[mode_lib->soc.num_states - 1].dispclk_mhz,
+ 			v->DISPCLKDPPCLKVCOSpeed);
+ 	if (v->DISPCLKWithoutRampingRoundedToDFSGranularity
+ 			> v->MaxDispclkRoundedToDFSGranularity) {
+@@ -3957,20 +3958,20 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ 			for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+ 				v->PlaneRequiredDISPCLKWithoutODMCombine = v->PixelClock[k] * (1.0 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
+ 						* (1.0 + v->DISPCLKRampingMargin / 100.0);
+-				if ((v->PlaneRequiredDISPCLKWithoutODMCombine >= v->MaxDispclk[i] && v->MaxDispclk[i] == v->MaxDispclk[mode_lib->soc.num_states]
+-						&& v->MaxDppclk[i] == v->MaxDppclk[mode_lib->soc.num_states])) {
++				if ((v->PlaneRequiredDISPCLKWithoutODMCombine >= v->MaxDispclk[i] && v->MaxDispclk[i] == v->MaxDispclk[mode_lib->soc.num_states - 1]
++						&& v->MaxDppclk[i] == v->MaxDppclk[mode_lib->soc.num_states - 1])) {
+ 					v->PlaneRequiredDISPCLKWithoutODMCombine = v->PixelClock[k] * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
+ 				}
+ 				v->PlaneRequiredDISPCLKWithODMCombine2To1 = v->PixelClock[k] / 2 * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
+ 						* (1 + v->DISPCLKRampingMargin / 100.0);
+-				if ((v->PlaneRequiredDISPCLKWithODMCombine2To1 >= v->MaxDispclk[i] && v->MaxDispclk[i] == v->MaxDispclk[mode_lib->soc.num_states]
+-						&& v->MaxDppclk[i] == v->MaxDppclk[mode_lib->soc.num_states])) {
++				if ((v->PlaneRequiredDISPCLKWithODMCombine2To1 >= v->MaxDispclk[i] && v->MaxDispclk[i] == v->MaxDispclk[mode_lib->soc.num_states - 1]
++						&& v->MaxDppclk[i] == v->MaxDppclk[mode_lib->soc.num_states - 1])) {
+ 					v->PlaneRequiredDISPCLKWithODMCombine2To1 = v->PixelClock[k] / 2 * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
+ 				}
+ 				v->PlaneRequiredDISPCLKWithODMCombine4To1 = v->PixelClock[k] / 4 * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
+ 						* (1 + v->DISPCLKRampingMargin / 100.0);
+-				if ((v->PlaneRequiredDISPCLKWithODMCombine4To1 >= v->MaxDispclk[i] && v->MaxDispclk[i] == v->MaxDispclk[mode_lib->soc.num_states]
+-						&& v->MaxDppclk[i] == v->MaxDppclk[mode_lib->soc.num_states])) {
++				if ((v->PlaneRequiredDISPCLKWithODMCombine4To1 >= v->MaxDispclk[i] && v->MaxDispclk[i] == v->MaxDispclk[mode_lib->soc.num_states - 1]
++						&& v->MaxDppclk[i] == v->MaxDppclk[mode_lib->soc.num_states - 1])) {
+ 					v->PlaneRequiredDISPCLKWithODMCombine4To1 = v->PixelClock[k] / 4 * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
+ 				}
+ 
+@@ -3987,19 +3988,30 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ 				} else if (v->PlaneRequiredDISPCLKWithoutODMCombine > v->MaxDispclkRoundedDownToDFSGranularity) {
+ 					v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
+ 					v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine2To1;
+-				} else if (v->DSCEnabled[k] && (v->HActive[k] > DCN30_MAX_DSC_IMAGE_WIDTH)) {
+-					v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
+-					v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine2To1;
+ 				} else {
+ 					v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
+ 					v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithoutODMCombine;
+-					/*420 format workaround*/
+-					if (v->HActive[k] > 4096 && v->OutputFormat[k] == dm_420) {
++				}
++				if (v->DSCEnabled[k] && v->HActive[k] > DCN30_MAX_DSC_IMAGE_WIDTH
++						&& v->ODMCombineEnablePerState[i][k] != dm_odm_combine_mode_4to1) {
++					if (v->HActive[k] / 2 > DCN30_MAX_DSC_IMAGE_WIDTH) {
++						v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_4to1;
++						v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine4To1;
++					} else {
++						v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
++						v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine2To1;
++					}
++				}
++				if (v->OutputFormat[k] == dm_420 && v->HActive[k] > DCN30_MAX_FMT_420_BUFFER_WIDTH
++						&& v->ODMCombineEnablePerState[i][k] != dm_odm_combine_mode_4to1) {
++					if (v->HActive[k] / 2 > DCN30_MAX_FMT_420_BUFFER_WIDTH) {
++						v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_4to1;
++						v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine4To1;
++					} else {
+ 						v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
+ 						v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine2To1;
+ 					}
+ 				}
+-
+ 				if (v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_4to1) {
+ 					v->MPCCombine[i][j][k] = false;
+ 					v->NoOfDPP[i][j][k] = 4;
+@@ -4281,42 +4293,8 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ 		}
+ 	}
+ 
+-	for (i = 0; i < v->soc.num_states; i++) {
+-		v->DSCCLKRequiredMoreThanSupported[i] = false;
+-		for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+-			if (v->BlendingAndTiming[k] == k) {
+-				if (v->Output[k] == dm_dp || v->Output[k] == dm_edp) {
+-					if (v->OutputFormat[k] == dm_420) {
+-						v->DSCFormatFactor = 2;
+-					} else if (v->OutputFormat[k] == dm_444) {
+-						v->DSCFormatFactor = 1;
+-					} else if (v->OutputFormat[k] == dm_n422) {
+-						v->DSCFormatFactor = 2;
+-					} else {
+-						v->DSCFormatFactor = 1;
+-					}
+-					if (v->RequiresDSC[i][k] == true) {
+-						if (v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_4to1) {
+-							if (v->PixelClockBackEnd[k] / 12.0 / v->DSCFormatFactor
+-									> (1.0 - v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * v->MaxDSCCLK[i]) {
+-								v->DSCCLKRequiredMoreThanSupported[i] = true;
+-							}
+-						} else if (v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
+-							if (v->PixelClockBackEnd[k] / 6.0 / v->DSCFormatFactor
+-									> (1.0 - v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * v->MaxDSCCLK[i]) {
+-								v->DSCCLKRequiredMoreThanSupported[i] = true;
+-							}
+-						} else {
+-							if (v->PixelClockBackEnd[k] / 3.0 / v->DSCFormatFactor
+-									> (1.0 - v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * v->MaxDSCCLK[i]) {
+-								v->DSCCLKRequiredMoreThanSupported[i] = true;
+-							}
+-						}
+-					}
+-				}
+-			}
+-		}
+-	}
++	/* Skip dscclk validation: as long as dispclk is supported, dscclk is also implicitly supported */
++
+ 	for (i = 0; i < v->soc.num_states; i++) {
+ 		v->NotEnoughDSCUnits[i] = false;
+ 		v->TotalDSCUnitsRequired = 0.0;
+@@ -5319,7 +5297,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ 		for (j = 0; j < 2; j++) {
+ 			if (v->ScaleRatioAndTapsSupport == 1 && v->SourceFormatPixelAndScanSupport == 1 && v->ViewportSizeSupport[i][j] == 1
+ 					&& v->DIOSupport[i] == 1 && v->ODMCombine4To1SupportCheckOK[i] == 1
+-					&& v->NotEnoughDSCUnits[i] == 0 && v->DSCCLKRequiredMoreThanSupported[i] == 0
++					&& v->NotEnoughDSCUnits[i] == 0
+ 					&& v->DTBCLKRequiredMoreThanSupported[i] == 0
+ 					&& v->ROBSupport[i][j] == 1 && v->DISPCLK_DPPCLK_Support[i][j] == 1 && v->TotalAvailablePipesSupport[i][j] == 1
+ 					&& EnoughWritebackUnits == 1 && WritebackModeSupport == 1
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+index 0c5128187e089..ea01994a61338 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
++++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+@@ -253,6 +253,8 @@ struct _vcs_dpi_display_pipe_source_params_st {
+ 	unsigned int viewport_y_c;
+ 	unsigned int viewport_width_c;
+ 	unsigned int viewport_height_c;
++	unsigned int viewport_width_max;
++	unsigned int viewport_height_max;
+ 	unsigned int data_pitch;
+ 	unsigned int data_pitch_c;
+ 	unsigned int meta_pitch;
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+index bc0485a59018a..c1bf3cab2f156 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+@@ -627,6 +627,19 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
+ 				}
+ 			}
+ 		}
++		if (src->viewport_width_max) {
++			int hdiv_c = src->source_format >= dm_420_8 && src->source_format <= dm_422_10 ? 2 : 1;
++			int vdiv_c = src->source_format >= dm_420_8 && src->source_format <= dm_420_12 ? 2 : 1;
++
++			if (mode_lib->vba.ViewportWidth[mode_lib->vba.NumberOfActivePlanes] > src->viewport_width_max)
++				mode_lib->vba.ViewportWidth[mode_lib->vba.NumberOfActivePlanes] = src->viewport_width_max;
++			if (mode_lib->vba.ViewportHeight[mode_lib->vba.NumberOfActivePlanes] > src->viewport_height_max)
++				mode_lib->vba.ViewportHeight[mode_lib->vba.NumberOfActivePlanes] = src->viewport_height_max;
++			if (mode_lib->vba.ViewportWidthChroma[mode_lib->vba.NumberOfActivePlanes] > src->viewport_width_max / hdiv_c)
++				mode_lib->vba.ViewportWidthChroma[mode_lib->vba.NumberOfActivePlanes] = src->viewport_width_max / hdiv_c;
++			if (mode_lib->vba.ViewportHeightChroma[mode_lib->vba.NumberOfActivePlanes] > src->viewport_height_max / vdiv_c)
++				mode_lib->vba.ViewportHeightChroma[mode_lib->vba.NumberOfActivePlanes] = src->viewport_height_max / vdiv_c;
++		}
+ 
+ 		if (pipes[k].pipe.src.immediate_flip) {
+ 			mode_lib->vba.ImmediateFlipSupport = true;
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
+index 2947d1b155129..2a0db2b03047e 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
+@@ -162,9 +162,7 @@ struct scl_inits {
+ 	struct fixed31_32 h;
+ 	struct fixed31_32 h_c;
+ 	struct fixed31_32 v;
+-	struct fixed31_32 v_bot;
+ 	struct fixed31_32 v_c;
+-	struct fixed31_32 v_c_bot;
+ };
+ 
+ struct scaler_data {
+@@ -173,8 +171,6 @@ struct scaler_data {
+ 	struct scaling_taps taps;
+ 	struct rect viewport;
+ 	struct rect viewport_c;
+-	struct rect viewport_unadjusted;
+-	struct rect viewport_c_unadjusted;
+ 	struct rect recout;
+ 	struct scaling_ratios ratios;
+ 	struct scl_inits inits;
+diff --git a/drivers/gpu/drm/amd/display/dc/irq_types.h b/drivers/gpu/drm/amd/display/dc/irq_types.h
+index 87812d81fed3c..52fbf575e0004 100644
+--- a/drivers/gpu/drm/amd/display/dc/irq_types.h
++++ b/drivers/gpu/drm/amd/display/dc/irq_types.h
+@@ -164,7 +164,7 @@ enum irq_type
+ };
+ 
+ #define DAL_VALID_IRQ_SRC_NUM(src) \
+-	((src) <= DAL_IRQ_SOURCES_NUMBER && (src) > DC_IRQ_SOURCE_INVALID)
++	((src) < DAL_IRQ_SOURCES_NUMBER && (src) > DC_IRQ_SOURCE_INVALID)
+ 
+ /* Number of Page Flip IRQ Sources. */
+ #define DAL_PFLIP_IRQ_SRC_NUM \
+diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
+index 20e554e771d16..fa8aeec304ef4 100644
+--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
++++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
+@@ -260,7 +260,6 @@ enum mod_hdcp_status mod_hdcp_setup(struct mod_hdcp *hdcp,
+ 	struct mod_hdcp_output output;
+ 	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ 
+-	memset(hdcp, 0, sizeof(struct mod_hdcp));
+ 	memset(&output, 0, sizeof(output));
+ 	hdcp->config = *config;
+ 	HDCP_TOP_INTERFACE_TRACE(hdcp);
+diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
+index 73ca49f05bd32..eb56526ec32c6 100644
+--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
++++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
+@@ -29,8 +29,10 @@ static inline enum mod_hdcp_status validate_bksv(struct mod_hdcp *hdcp)
+ {
+ 	uint64_t n = 0;
+ 	uint8_t count = 0;
++	u8 bksv[sizeof(n)] = { };
+ 
+-	memcpy(&n, hdcp->auth.msg.hdcp1.bksv, sizeof(uint64_t));
++	memcpy(bksv, hdcp->auth.msg.hdcp1.bksv, sizeof(hdcp->auth.msg.hdcp1.bksv));
++	n = *(uint64_t *)bksv;
+ 
+ 	while (n) {
+ 		count++;
+diff --git a/drivers/gpu/drm/amd/include/navi10_enum.h b/drivers/gpu/drm/amd/include/navi10_enum.h
+index d5ead9680c6ed..84bcb96f76ea4 100644
+--- a/drivers/gpu/drm/amd/include/navi10_enum.h
++++ b/drivers/gpu/drm/amd/include/navi10_enum.h
+@@ -430,7 +430,7 @@ ARRAY_2D_DEPTH                           = 0x00000001,
+  */
+ 
+ typedef enum ENUM_NUM_SIMD_PER_CU {
+-NUM_SIMD_PER_CU                          = 0x00000004,
++NUM_SIMD_PER_CU                          = 0x00000002,
+ } ENUM_NUM_SIMD_PER_CU;
+ 
+ /*
+diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
+index 351a85088d0ec..f1e8bc39b16d3 100644
+--- a/drivers/gpu/drm/arm/malidp_planes.c
++++ b/drivers/gpu/drm/arm/malidp_planes.c
+@@ -922,6 +922,11 @@ static const struct drm_plane_helper_funcs malidp_de_plane_helper_funcs = {
+ 	.atomic_disable = malidp_de_plane_disable,
+ };
+ 
++static const uint64_t linear_only_modifiers[] = {
++	DRM_FORMAT_MOD_LINEAR,
++	DRM_FORMAT_MOD_INVALID
++};
++
+ int malidp_de_planes_init(struct drm_device *drm)
+ {
+ 	struct malidp_drm *malidp = drm->dev_private;
+@@ -985,8 +990,8 @@ int malidp_de_planes_init(struct drm_device *drm)
+ 		 */
+ 		ret = drm_universal_plane_init(drm, &plane->base, crtcs,
+ 				&malidp_de_plane_funcs, formats, n,
+-				(id == DE_SMART) ? NULL : modifiers, plane_type,
+-				NULL);
++				(id == DE_SMART) ? linear_only_modifiers : modifiers,
++				plane_type, NULL);
+ 
+ 		if (ret < 0)
+ 			goto cleanup;
+diff --git a/drivers/gpu/drm/ast/ast_dp501.c b/drivers/gpu/drm/ast/ast_dp501.c
+index 88121c0e0d05c..cd93c44f26627 100644
+--- a/drivers/gpu/drm/ast/ast_dp501.c
++++ b/drivers/gpu/drm/ast/ast_dp501.c
+@@ -189,6 +189,9 @@ bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size)
+ 	u32 i, data;
+ 	u32 boot_address;
+ 
++	if (ast->config_mode != ast_use_p2a)
++		return false;
++
+ 	data = ast_mindwm(ast, 0x1e6e2100) & 0x01;
+ 	if (data) {
+ 		boot_address = get_fw_base(ast);
+@@ -207,6 +210,9 @@ static bool ast_launch_m68k(struct drm_device *dev)
+ 	u8 *fw_addr = NULL;
+ 	u8 jreg;
+ 
++	if (ast->config_mode != ast_use_p2a)
++		return false;
++
+ 	data = ast_mindwm(ast, 0x1e6e2100) & 0x01;
+ 	if (!data) {
+ 
+@@ -271,25 +277,55 @@ u8 ast_get_dp501_max_clk(struct drm_device *dev)
+ 	struct ast_private *ast = to_ast_private(dev);
+ 	u32 boot_address, offset, data;
+ 	u8 linkcap[4], linkrate, linklanes, maxclk = 0xff;
++	u32 *plinkcap;
+ 
+-	boot_address = get_fw_base(ast);
+-
+-	/* validate FW version */
+-	offset = 0xf000;
+-	data = ast_mindwm(ast, boot_address + offset);
+-	if ((data & 0xf0) != 0x10) /* version: 1x */
+-		return maxclk;
+-
+-	/* Read Link Capability */
+-	offset  = 0xf014;
+-	*(u32 *)linkcap = ast_mindwm(ast, boot_address + offset);
+-	if (linkcap[2] == 0) {
+-		linkrate = linkcap[0];
+-		linklanes = linkcap[1];
+-		data = (linkrate == 0x0a) ? (90 * linklanes) : (54 * linklanes);
+-		if (data > 0xff)
+-			data = 0xff;
+-		maxclk = (u8)data;
++	if (ast->config_mode == ast_use_p2a) {
++		boot_address = get_fw_base(ast);
++
++		/* validate FW version */
++		offset = AST_DP501_GBL_VERSION;
++		data = ast_mindwm(ast, boot_address + offset);
++		if ((data & AST_DP501_FW_VERSION_MASK) != AST_DP501_FW_VERSION_1) /* version: 1x */
++			return maxclk;
++
++		/* Read Link Capability */
++		offset  = AST_DP501_LINKRATE;
++		plinkcap = (u32 *)linkcap;
++		*plinkcap  = ast_mindwm(ast, boot_address + offset);
++		if (linkcap[2] == 0) {
++			linkrate = linkcap[0];
++			linklanes = linkcap[1];
++			data = (linkrate == 0x0a) ? (90 * linklanes) : (54 * linklanes);
++			if (data > 0xff)
++				data = 0xff;
++			maxclk = (u8)data;
++		}
++	} else {
++		if (!ast->dp501_fw_buf)
++			return AST_DP501_DEFAULT_DCLK;	/* 1024x768 as default */
++
++		/* dummy read */
++		offset = 0x0000;
++		data = readl(ast->dp501_fw_buf + offset);
++
++		/* validate FW version */
++		offset = AST_DP501_GBL_VERSION;
++		data = readl(ast->dp501_fw_buf + offset);
++		if ((data & AST_DP501_FW_VERSION_MASK) != AST_DP501_FW_VERSION_1) /* version: 1x */
++			return maxclk;
++
++		/* Read Link Capability */
++		offset = AST_DP501_LINKRATE;
++		plinkcap = (u32 *)linkcap;
++		*plinkcap = readl(ast->dp501_fw_buf + offset);
++		if (linkcap[2] == 0) {
++			linkrate = linkcap[0];
++			linklanes = linkcap[1];
++			data = (linkrate == 0x0a) ? (90 * linklanes) : (54 * linklanes);
++			if (data > 0xff)
++				data = 0xff;
++			maxclk = (u8)data;
++		}
+ 	}
+ 	return maxclk;
+ }
+@@ -298,26 +334,57 @@ bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata)
+ {
+ 	struct ast_private *ast = to_ast_private(dev);
+ 	u32 i, boot_address, offset, data;
++	u32 *pEDIDidx;
+ 
+-	boot_address = get_fw_base(ast);
+-
+-	/* validate FW version */
+-	offset = 0xf000;
+-	data = ast_mindwm(ast, boot_address + offset);
+-	if ((data & 0xf0) != 0x10)
+-		return false;
+-
+-	/* validate PnP Monitor */
+-	offset = 0xf010;
+-	data = ast_mindwm(ast, boot_address + offset);
+-	if (!(data & 0x01))
+-		return false;
++	if (ast->config_mode == ast_use_p2a) {
++		boot_address = get_fw_base(ast);
+ 
+-	/* Read EDID */
+-	offset = 0xf020;
+-	for (i = 0; i < 128; i += 4) {
+-		data = ast_mindwm(ast, boot_address + offset + i);
+-		*(u32 *)(ediddata + i) = data;
++		/* validate FW version */
++		offset = AST_DP501_GBL_VERSION;
++		data = ast_mindwm(ast, boot_address + offset);
++		if ((data & AST_DP501_FW_VERSION_MASK) != AST_DP501_FW_VERSION_1)
++			return false;
++
++		/* validate PnP Monitor */
++		offset = AST_DP501_PNPMONITOR;
++		data = ast_mindwm(ast, boot_address + offset);
++		if (!(data & AST_DP501_PNP_CONNECTED))
++			return false;
++
++		/* Read EDID */
++		offset = AST_DP501_EDID_DATA;
++		for (i = 0; i < 128; i += 4) {
++			data = ast_mindwm(ast, boot_address + offset + i);
++			pEDIDidx = (u32 *)(ediddata + i);
++			*pEDIDidx = data;
++		}
++	} else {
++		if (!ast->dp501_fw_buf)
++			return false;
++
++		/* dummy read */
++		offset = 0x0000;
++		data = readl(ast->dp501_fw_buf + offset);
++
++		/* validate FW version */
++		offset = AST_DP501_GBL_VERSION;
++		data = readl(ast->dp501_fw_buf + offset);
++		if ((data & AST_DP501_FW_VERSION_MASK) != AST_DP501_FW_VERSION_1)
++			return false;
++
++		/* validate PnP Monitor */
++		offset = AST_DP501_PNPMONITOR;
++		data = readl(ast->dp501_fw_buf + offset);
++		if (!(data & AST_DP501_PNP_CONNECTED))
++			return false;
++
++		/* Read EDID */
++		offset = AST_DP501_EDID_DATA;
++		for (i = 0; i < 128; i += 4) {
++			data = readl(ast->dp501_fw_buf + offset + i);
++			pEDIDidx = (u32 *)(ediddata + i);
++			*pEDIDidx = data;
++		}
+ 	}
+ 
+ 	return true;
+diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
+index f871fc36c2f7a..a3f67a34f6168 100644
+--- a/drivers/gpu/drm/ast/ast_drv.h
++++ b/drivers/gpu/drm/ast/ast_drv.h
+@@ -121,6 +121,7 @@ struct ast_private {
+ 
+ 	void __iomem *regs;
+ 	void __iomem *ioregs;
++	void __iomem *dp501_fw_buf;
+ 
+ 	enum ast_chip chip;
+ 	bool vga2_clone;
+@@ -298,6 +299,17 @@ int ast_mode_config_init(struct ast_private *ast);
+ #define AST_MM_ALIGN_SHIFT 4
+ #define AST_MM_ALIGN_MASK ((1 << AST_MM_ALIGN_SHIFT) - 1)
+ 
++#define AST_DP501_FW_VERSION_MASK	GENMASK(7, 4)
++#define AST_DP501_FW_VERSION_1		BIT(4)
++#define AST_DP501_PNP_CONNECTED		BIT(1)
++
++#define AST_DP501_DEFAULT_DCLK	65
++
++#define AST_DP501_GBL_VERSION	0xf000
++#define AST_DP501_PNPMONITOR	0xf010
++#define AST_DP501_LINKRATE	0xf014
++#define AST_DP501_EDID_DATA	0xf020
++
+ int ast_mm_init(struct ast_private *ast);
+ 
+ /* ast post */
+diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
+index c29cc7f19863a..2aff2e6cf450c 100644
+--- a/drivers/gpu/drm/ast/ast_main.c
++++ b/drivers/gpu/drm/ast/ast_main.c
+@@ -99,7 +99,7 @@ static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev)
+ 	if (!(jregd0 & 0x80) || !(jregd1 & 0x10)) {
+ 		/* Double check it's actually working */
+ 		data = ast_read32(ast, 0xf004);
+-		if (data != 0xFFFFFFFF) {
++		if ((data != 0xFFFFFFFF) && (data != 0x00)) {
+ 			/* P2A works, grab silicon revision */
+ 			ast->config_mode = ast_use_p2a;
+ 
+@@ -450,6 +450,14 @@ struct ast_private *ast_device_create(const struct drm_driver *drv,
+ 	if (ret)
+ 		return ERR_PTR(ret);
+ 
++	/* map reserved buffer */
++	ast->dp501_fw_buf = NULL;
++	if (dev->vram_mm->vram_size < pci_resource_len(pdev, 0)) {
++		ast->dp501_fw_buf = pci_iomap_range(pdev, 0, dev->vram_mm->vram_size, 0);
++		if (!ast->dp501_fw_buf)
++			drm_info(dev, "failed to map reserved buffer!\n");
++	}
++
+ 	ret = ast_mode_config_init(ast);
+ 	if (ret)
+ 		return ERR_PTR(ret);
+diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+index d0c65610ebb5c..f56ff97c98990 100644
+--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
++++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+@@ -2369,9 +2369,9 @@ static int cdns_mhdp_probe(struct platform_device *pdev)
+ 	clk_prepare_enable(clk);
+ 
+ 	pm_runtime_enable(dev);
+-	ret = pm_runtime_get_sync(dev);
++	ret = pm_runtime_resume_and_get(dev);
+ 	if (ret < 0) {
+-		dev_err(dev, "pm_runtime_get_sync failed\n");
++		dev_err(dev, "pm_runtime_resume_and_get failed\n");
+ 		pm_runtime_disable(dev);
+ 		goto clk_disable;
+ 	}
+diff --git a/drivers/gpu/drm/bridge/cdns-dsi.c b/drivers/gpu/drm/bridge/cdns-dsi.c
+index 76373e31df92d..b31281f76117c 100644
+--- a/drivers/gpu/drm/bridge/cdns-dsi.c
++++ b/drivers/gpu/drm/bridge/cdns-dsi.c
+@@ -1028,7 +1028,7 @@ static ssize_t cdns_dsi_transfer(struct mipi_dsi_host *host,
+ 	struct mipi_dsi_packet packet;
+ 	int ret, i, tx_len, rx_len;
+ 
+-	ret = pm_runtime_get_sync(host->dev);
++	ret = pm_runtime_resume_and_get(host->dev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c
+index d734d9402c350..c1926154eda84 100644
+--- a/drivers/gpu/drm/bridge/lontium-lt9611.c
++++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
+@@ -1209,6 +1209,7 @@ static struct i2c_device_id lt9611_id[] = {
+ 	{ "lontium,lt9611", 0 },
+ 	{}
+ };
++MODULE_DEVICE_TABLE(i2c, lt9611_id);
+ 
+ static const struct of_device_id lt9611_match_table[] = {
+ 	{ .compatible = "lontium,lt9611" },
+diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c
+index 66b67402f1acd..c65ca860712d2 100644
+--- a/drivers/gpu/drm/bridge/nwl-dsi.c
++++ b/drivers/gpu/drm/bridge/nwl-dsi.c
+@@ -21,6 +21,7 @@
+ #include <linux/sys_soc.h>
+ #include <linux/time64.h>
+ 
++#include <drm/drm_atomic_state_helper.h>
+ #include <drm/drm_bridge.h>
+ #include <drm/drm_mipi_dsi.h>
+ #include <drm/drm_of.h>
+@@ -742,7 +743,9 @@ static int nwl_dsi_disable(struct nwl_dsi *dsi)
+ 	return 0;
+ }
+ 
+-static void nwl_dsi_bridge_disable(struct drm_bridge *bridge)
++static void
++nwl_dsi_bridge_atomic_disable(struct drm_bridge *bridge,
++			      struct drm_bridge_state *old_bridge_state)
+ {
+ 	struct nwl_dsi *dsi = bridge_to_dsi(bridge);
+ 	int ret;
+@@ -803,17 +806,6 @@ static int nwl_dsi_get_dphy_params(struct nwl_dsi *dsi,
+ 	return 0;
+ }
+ 
+-static bool nwl_dsi_bridge_mode_fixup(struct drm_bridge *bridge,
+-				      const struct drm_display_mode *mode,
+-				      struct drm_display_mode *adjusted_mode)
+-{
+-	/* At least LCDIF + NWL needs active high sync */
+-	adjusted_mode->flags |= (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
+-	adjusted_mode->flags &= ~(DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC);
+-
+-	return true;
+-}
+-
+ static enum drm_mode_status
+ nwl_dsi_bridge_mode_valid(struct drm_bridge *bridge,
+ 			  const struct drm_display_info *info,
+@@ -831,6 +823,24 @@ nwl_dsi_bridge_mode_valid(struct drm_bridge *bridge,
+ 	return MODE_OK;
+ }
+ 
++static int nwl_dsi_bridge_atomic_check(struct drm_bridge *bridge,
++				       struct drm_bridge_state *bridge_state,
++				       struct drm_crtc_state *crtc_state,
++				       struct drm_connector_state *conn_state)
++{
++	struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
++
++	/* At least LCDIF + NWL needs active high sync */
++	adjusted_mode->flags |= (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
++	adjusted_mode->flags &= ~(DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC);
++
++	/* Do a full modeset if crtc_state->active is changed to be true. */
++	if (crtc_state->active_changed && crtc_state->active)
++		crtc_state->mode_changed = true;
++
++	return 0;
++}
++
+ static void
+ nwl_dsi_bridge_mode_set(struct drm_bridge *bridge,
+ 			const struct drm_display_mode *mode,
+@@ -862,7 +872,9 @@ nwl_dsi_bridge_mode_set(struct drm_bridge *bridge,
+ 	drm_mode_debug_printmodeline(adjusted_mode);
+ }
+ 
+-static void nwl_dsi_bridge_pre_enable(struct drm_bridge *bridge)
++static void
++nwl_dsi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
++				 struct drm_bridge_state *old_bridge_state)
+ {
+ 	struct nwl_dsi *dsi = bridge_to_dsi(bridge);
+ 	int ret;
+@@ -897,7 +909,9 @@ static void nwl_dsi_bridge_pre_enable(struct drm_bridge *bridge)
+ 	}
+ }
+ 
+-static void nwl_dsi_bridge_enable(struct drm_bridge *bridge)
++static void
++nwl_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
++			     struct drm_bridge_state *old_bridge_state)
+ {
+ 	struct nwl_dsi *dsi = bridge_to_dsi(bridge);
+ 	int ret;
+@@ -942,14 +956,17 @@ static void nwl_dsi_bridge_detach(struct drm_bridge *bridge)
+ }
+ 
+ static const struct drm_bridge_funcs nwl_dsi_bridge_funcs = {
+-	.pre_enable = nwl_dsi_bridge_pre_enable,
+-	.enable     = nwl_dsi_bridge_enable,
+-	.disable    = nwl_dsi_bridge_disable,
+-	.mode_fixup = nwl_dsi_bridge_mode_fixup,
+-	.mode_set   = nwl_dsi_bridge_mode_set,
+-	.mode_valid = nwl_dsi_bridge_mode_valid,
+-	.attach	    = nwl_dsi_bridge_attach,
+-	.detach	    = nwl_dsi_bridge_detach,
++	.atomic_duplicate_state	= drm_atomic_helper_bridge_duplicate_state,
++	.atomic_destroy_state	= drm_atomic_helper_bridge_destroy_state,
++	.atomic_reset		= drm_atomic_helper_bridge_reset,
++	.atomic_check		= nwl_dsi_bridge_atomic_check,
++	.atomic_pre_enable	= nwl_dsi_bridge_atomic_pre_enable,
++	.atomic_enable		= nwl_dsi_bridge_atomic_enable,
++	.atomic_disable		= nwl_dsi_bridge_atomic_disable,
++	.mode_set		= nwl_dsi_bridge_mode_set,
++	.mode_valid		= nwl_dsi_bridge_mode_valid,
++	.attach			= nwl_dsi_bridge_attach,
++	.detach			= nwl_dsi_bridge_detach,
+ };
+ 
+ static int nwl_dsi_parse_dt(struct nwl_dsi *dsi)
+diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
+index eedbb48815b7b..c7d4d43cdee07 100644
+--- a/drivers/gpu/drm/drm_dp_helper.c
++++ b/drivers/gpu/drm/drm_dp_helper.c
+@@ -679,7 +679,14 @@ int drm_dp_read_downstream_info(struct drm_dp_aux *aux,
+ 	    !(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
+ 		return 0;
+ 
++	/* Some branches advertise having 0 downstream ports, despite also advertising they have a
++	 * downstream port present. The DP spec isn't clear on if this is allowed or not, but since
++	 * some branches do it we need to handle it regardless.
++	 */
+ 	len = drm_dp_downstream_port_count(dpcd);
++	if (!len)
++		return 0;
++
+ 	if (dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE)
+ 		len *= 4;
+ 
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
+index 97a785aa8839b..9ebc558fd5163 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -3913,7 +3913,7 @@ static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc,
+ 	if (size < sizeof(struct dp_sdp))
+ 		return -EINVAL;
+ 
+-	memset(vsc, 0, size);
++	memset(vsc, 0, sizeof(*vsc));
+ 
+ 	if (sdp->sdp_header.HB0 != 0)
+ 		return -EINVAL;
+diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+index fd8870edde0ed..63f38be391b6f 100644
+--- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
++++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+@@ -341,7 +341,7 @@ static void ingenic_drm_crtc_atomic_flush(struct drm_crtc *crtc,
+ 	if (priv->update_clk_rate) {
+ 		mutex_lock(&priv->clk_mutex);
+ 		clk_set_rate(priv->pix_clk,
+-			     crtc_state->adjusted_mode.clock * 1000);
++			     crtc_state->adjusted_mode.crtc_clock * 1000);
+ 		priv->update_clk_rate = false;
+ 		mutex_unlock(&priv->clk_mutex);
+ 	}
+@@ -413,7 +413,7 @@ static void ingenic_drm_plane_enable(struct ingenic_drm *priv,
+ 	unsigned int en_bit;
+ 
+ 	if (priv->soc_info->has_osd) {
+-		if (plane->type == DRM_PLANE_TYPE_PRIMARY)
++		if (plane != &priv->f0)
+ 			en_bit = JZ_LCD_OSDC_F1EN;
+ 		else
+ 			en_bit = JZ_LCD_OSDC_F0EN;
+@@ -428,7 +428,7 @@ void ingenic_drm_plane_disable(struct device *dev, struct drm_plane *plane)
+ 	unsigned int en_bit;
+ 
+ 	if (priv->soc_info->has_osd) {
+-		if (plane->type == DRM_PLANE_TYPE_PRIMARY)
++		if (plane != &priv->f0)
+ 			en_bit = JZ_LCD_OSDC_F1EN;
+ 		else
+ 			en_bit = JZ_LCD_OSDC_F0EN;
+@@ -455,8 +455,7 @@ void ingenic_drm_plane_config(struct device *dev,
+ 
+ 	ingenic_drm_plane_enable(priv, plane);
+ 
+-	if (priv->soc_info->has_osd &&
+-	    plane->type == DRM_PLANE_TYPE_PRIMARY) {
++	if (priv->soc_info->has_osd && plane != &priv->f0) {
+ 		switch (fourcc) {
+ 		case DRM_FORMAT_XRGB1555:
+ 			ctrl |= JZ_LCD_OSDCTRL_RGB555;
+@@ -504,7 +503,7 @@ void ingenic_drm_plane_config(struct device *dev,
+ 	}
+ 
+ 	if (priv->soc_info->has_osd) {
+-		if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
++		if (plane != &priv->f0) {
+ 			xy_reg = JZ_REG_LCD_XYP1;
+ 			size_reg = JZ_REG_LCD_SIZE1;
+ 		} else {
+@@ -554,7 +553,7 @@ static void ingenic_drm_plane_atomic_update(struct drm_plane *plane,
+ 		height = state->src_h >> 16;
+ 		cpp = state->fb->format->cpp[0];
+ 
+-		if (!priv->soc_info->has_osd || plane->type == DRM_PLANE_TYPE_OVERLAY)
++		if (!priv->soc_info->has_osd || plane == &priv->f0)
+ 			hwdesc = &priv->dma_hwdescs->hwdesc_f0;
+ 		else
+ 			hwdesc = &priv->dma_hwdescs->hwdesc_f1;
+diff --git a/drivers/gpu/drm/ingenic/ingenic-ipu.c b/drivers/gpu/drm/ingenic/ingenic-ipu.c
+index e52777ef85fd7..1cab0a743e987 100644
+--- a/drivers/gpu/drm/ingenic/ingenic-ipu.c
++++ b/drivers/gpu/drm/ingenic/ingenic-ipu.c
+@@ -760,7 +760,7 @@ static int ingenic_ipu_bind(struct device *dev, struct device *master, void *d)
+ 
+ 	err = drm_universal_plane_init(drm, plane, 1, &ingenic_ipu_plane_funcs,
+ 				       soc_info->formats, soc_info->num_formats,
+-				       NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
++				       NULL, DRM_PLANE_TYPE_OVERLAY, NULL);
+ 	if (err) {
+ 		dev_err(dev, "Failed to init plane: %i\n", err);
+ 		return err;
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+index 8b0de90156c6c..69d23ce56d2c6 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+@@ -259,7 +259,7 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
+ 		drm_connector_list_iter_end(&conn_iter);
+ 	}
+ 
+-	ret = pm_runtime_get_sync(crtc->dev->dev);
++	ret = pm_runtime_resume_and_get(crtc->dev->dev);
+ 	if (ret < 0) {
+ 		DRM_ERROR("Failed to enable power domain: %d\n", ret);
+ 		return ret;
+diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+index 3d729270bde15..4a5b518288b06 100644
+--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
++++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+@@ -88,8 +88,6 @@ static int mdp4_hw_init(struct msm_kms *kms)
+ 	if (mdp4_kms->rev > 1)
+ 		mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1);
+ 
+-	dev->mode_config.allow_fb_modifiers = true;
+-
+ out:
+ 	pm_runtime_put_sync(dev->dev);
+ 
+diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
+index da3cc1d8c3312..ee1dbb2b84af4 100644
+--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
++++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
+@@ -347,6 +347,12 @@ enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane)
+ 	return mdp4_plane->pipe;
+ }
+ 
++static const uint64_t supported_format_modifiers[] = {
++	DRM_FORMAT_MOD_SAMSUNG_64_32_TILE,
++	DRM_FORMAT_MOD_LINEAR,
++	DRM_FORMAT_MOD_INVALID
++};
++
+ /* initialize plane */
+ struct drm_plane *mdp4_plane_init(struct drm_device *dev,
+ 		enum mdp4_pipe pipe_id, bool private_plane)
+@@ -375,7 +381,7 @@ struct drm_plane *mdp4_plane_init(struct drm_device *dev,
+ 	type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
+ 	ret = drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs,
+ 				 mdp4_plane->formats, mdp4_plane->nformats,
+-				 NULL, type, NULL);
++				 supported_format_modifiers, type, NULL);
+ 	if (ret)
+ 		goto fail;
+ 
+diff --git a/drivers/gpu/drm/mxsfb/Kconfig b/drivers/gpu/drm/mxsfb/Kconfig
+index 0143d539f8f82..ee22cd25d3e3d 100644
+--- a/drivers/gpu/drm/mxsfb/Kconfig
++++ b/drivers/gpu/drm/mxsfb/Kconfig
+@@ -10,7 +10,6 @@ config DRM_MXSFB
+ 	depends on COMMON_CLK
+ 	select DRM_MXS
+ 	select DRM_KMS_HELPER
+-	select DRM_KMS_FB_HELPER
+ 	select DRM_KMS_CMA_HELPER
+ 	select DRM_PANEL
+ 	select DRM_PANEL_BRIDGE
+diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
+index 17831ee897ea4..1376992482b55 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_display.c
++++ b/drivers/gpu/drm/nouveau/nouveau_display.c
+@@ -700,7 +700,6 @@ nouveau_display_create(struct drm_device *dev)
+ 
+ 	dev->mode_config.preferred_depth = 24;
+ 	dev->mode_config.prefer_shadow = 1;
+-	dev->mode_config.allow_fb_modifiers = true;
+ 
+ 	if (drm->client.device.info.chipset < 0x11)
+ 		dev->mode_config.async_page_flip = false;
+diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
+index 652af7a134bd0..1d03ec7636047 100644
+--- a/drivers/gpu/drm/radeon/radeon_display.c
++++ b/drivers/gpu/drm/radeon/radeon_display.c
+@@ -1325,6 +1325,7 @@ radeon_user_framebuffer_create(struct drm_device *dev,
+ 	/* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
+ 	if (obj->import_attach) {
+ 		DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n");
++		drm_gem_object_put(obj);
+ 		return ERR_PTR(-EINVAL);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
+index efeb115ae70ec..40aece37e0b4e 100644
+--- a/drivers/gpu/drm/radeon/radeon_drv.c
++++ b/drivers/gpu/drm/radeon/radeon_drv.c
+@@ -386,13 +386,13 @@ radeon_pci_shutdown(struct pci_dev *pdev)
+ 	if (radeon_device_is_virtual())
+ 		radeon_pci_remove(pdev);
+ 
+-#ifdef CONFIG_PPC64
++#if defined(CONFIG_PPC64) || defined(CONFIG_MACH_LOONGSON64)
+ 	/*
+ 	 * Some adapters need to be suspended before a
+ 	 * shutdown occurs in order to prevent an error
+-	 * during kexec.
+-	 * Make this power specific becauase it breaks
+-	 * some non-power boards.
++	 * during kexec, shutdown or reboot.
++	 * Make this power and Loongson specific because
++	 * it breaks some other boards.
+ 	 */
+ 	radeon_suspend_kms(pci_get_drvdata(pdev), true, true, false);
+ #endif
+diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+index d8c47ee3cad37..520a0a0cd2b56 100644
+--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
++++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+@@ -243,7 +243,6 @@ struct dw_mipi_dsi_rockchip {
+ 	struct dw_mipi_dsi *dmd;
+ 	const struct rockchip_dw_dsi_chip_data *cdata;
+ 	struct dw_mipi_dsi_plat_data pdata;
+-	int devcnt;
+ };
+ 
+ struct dphy_pll_parameter_map {
+@@ -1141,9 +1140,6 @@ static int dw_mipi_dsi_rockchip_remove(struct platform_device *pdev)
+ {
+ 	struct dw_mipi_dsi_rockchip *dsi = platform_get_drvdata(pdev);
+ 
+-	if (dsi->devcnt == 0)
+-		component_del(dsi->dev, &dw_mipi_dsi_rockchip_ops);
+-
+ 	dw_mipi_dsi_remove(dsi->dmd);
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+index 80053d91a301f..a6fe03c3748aa 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
++++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+@@ -349,8 +349,8 @@ static const struct vop_win_phy rk3066_win0_data = {
+ 	.nformats = ARRAY_SIZE(formats_win_full),
+ 	.format_modifiers = format_modifiers_win_full,
+ 	.enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 0),
+-	.format = VOP_REG(RK3066_SYS_CTRL0, 0x7, 4),
+-	.rb_swap = VOP_REG(RK3066_SYS_CTRL0, 0x1, 19),
++	.format = VOP_REG(RK3066_SYS_CTRL1, 0x7, 4),
++	.rb_swap = VOP_REG(RK3066_SYS_CTRL1, 0x1, 19),
+ 	.act_info = VOP_REG(RK3066_WIN0_ACT_INFO, 0x1fff1fff, 0),
+ 	.dsp_info = VOP_REG(RK3066_WIN0_DSP_INFO, 0x0fff0fff, 0),
+ 	.dsp_st = VOP_REG(RK3066_WIN0_DSP_ST, 0x1fff1fff, 0),
+@@ -361,13 +361,12 @@ static const struct vop_win_phy rk3066_win0_data = {
+ };
+ 
+ static const struct vop_win_phy rk3066_win1_data = {
+-	.scl = &rk3066_win_scl,
+ 	.data_formats = formats_win_full,
+ 	.nformats = ARRAY_SIZE(formats_win_full),
+ 	.format_modifiers = format_modifiers_win_full,
+ 	.enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 1),
+-	.format = VOP_REG(RK3066_SYS_CTRL0, 0x7, 7),
+-	.rb_swap = VOP_REG(RK3066_SYS_CTRL0, 0x1, 23),
++	.format = VOP_REG(RK3066_SYS_CTRL1, 0x7, 7),
++	.rb_swap = VOP_REG(RK3066_SYS_CTRL1, 0x1, 23),
+ 	.act_info = VOP_REG(RK3066_WIN1_ACT_INFO, 0x1fff1fff, 0),
+ 	.dsp_info = VOP_REG(RK3066_WIN1_DSP_INFO, 0x0fff0fff, 0),
+ 	.dsp_st = VOP_REG(RK3066_WIN1_DSP_ST, 0x1fff1fff, 0),
+@@ -382,8 +381,8 @@ static const struct vop_win_phy rk3066_win2_data = {
+ 	.nformats = ARRAY_SIZE(formats_win_lite),
+ 	.format_modifiers = format_modifiers_win_lite,
+ 	.enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 2),
+-	.format = VOP_REG(RK3066_SYS_CTRL0, 0x7, 10),
+-	.rb_swap = VOP_REG(RK3066_SYS_CTRL0, 0x1, 27),
++	.format = VOP_REG(RK3066_SYS_CTRL1, 0x7, 10),
++	.rb_swap = VOP_REG(RK3066_SYS_CTRL1, 0x1, 27),
+ 	.dsp_info = VOP_REG(RK3066_WIN2_DSP_INFO, 0x0fff0fff, 0),
+ 	.dsp_st = VOP_REG(RK3066_WIN2_DSP_ST, 0x1fff1fff, 0),
+ 	.yrgb_mst = VOP_REG(RK3066_WIN2_MST, 0xffffffff, 0),
+@@ -408,6 +407,9 @@ static const struct vop_common rk3066_common = {
+ 	.dither_down_en = VOP_REG(RK3066_DSP_CTRL0, 0x1, 11),
+ 	.dither_down_mode = VOP_REG(RK3066_DSP_CTRL0, 0x1, 10),
+ 	.dsp_blank = VOP_REG(RK3066_DSP_CTRL1, 0x1, 24),
++	.dither_up = VOP_REG(RK3066_DSP_CTRL0, 0x1, 9),
++	.dsp_lut_en = VOP_REG(RK3066_SYS_CTRL1, 0x1, 31),
++	.data_blank = VOP_REG(RK3066_DSP_CTRL1, 0x1, 25),
+ };
+ 
+ static const struct vop_win_data rk3066_vop_win_data[] = {
+@@ -505,7 +507,10 @@ static const struct vop_common rk3188_common = {
+ 	.dither_down_sel = VOP_REG(RK3188_DSP_CTRL0, 0x1, 27),
+ 	.dither_down_en = VOP_REG(RK3188_DSP_CTRL0, 0x1, 11),
+ 	.dither_down_mode = VOP_REG(RK3188_DSP_CTRL0, 0x1, 10),
+-	.dsp_blank = VOP_REG(RK3188_DSP_CTRL1, 0x3, 24),
++	.dsp_blank = VOP_REG(RK3188_DSP_CTRL1, 0x1, 24),
++	.dither_up = VOP_REG(RK3188_DSP_CTRL0, 0x1, 9),
++	.dsp_lut_en = VOP_REG(RK3188_SYS_CTRL, 0x1, 28),
++	.data_blank = VOP_REG(RK3188_DSP_CTRL1, 0x1, 25),
+ };
+ 
+ static const struct vop_win_data rk3188_vop_win_data[] = {
+diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
+index c1ac3e4003c6f..1b2fdf7f3ccd8 100644
+--- a/drivers/gpu/drm/scheduler/sched_entity.c
++++ b/drivers/gpu/drm/scheduler/sched_entity.c
+@@ -116,7 +116,8 @@ static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
+ 	rmb(); /* for list_empty to work without lock */
+ 
+ 	if (list_empty(&entity->list) ||
+-	    spsc_queue_count(&entity->job_queue) == 0)
++	    spsc_queue_count(&entity->job_queue) == 0 ||
++	    entity->stopped)
+ 		return true;
+ 
+ 	return false;
+@@ -221,11 +222,16 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
+ static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
+ {
+ 	struct drm_sched_job *job;
++	struct dma_fence *f;
+ 	int r;
+ 
+ 	while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
+ 		struct drm_sched_fence *s_fence = job->s_fence;
+ 
++		/* Wait for all dependencies to avoid data corruptions */
++		while ((f = job->sched->ops->dependency(job, entity)))
++			dma_fence_wait(f, false);
++
+ 		drm_sched_fence_scheduled(s_fence);
+ 		dma_fence_set_error(&s_fence->finished, -ESRCH);
+ 
+diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
+index 92637b70c9bf6..16244e9669b90 100644
+--- a/drivers/gpu/drm/scheduler/sched_main.c
++++ b/drivers/gpu/drm/scheduler/sched_main.c
+@@ -896,9 +896,33 @@ EXPORT_SYMBOL(drm_sched_init);
+  */
+ void drm_sched_fini(struct drm_gpu_scheduler *sched)
+ {
++	struct drm_sched_entity *s_entity;
++	int i;
++
+ 	if (sched->thread)
+ 		kthread_stop(sched->thread);
+ 
++	for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
++		struct drm_sched_rq *rq = &sched->sched_rq[i];
++
++		if (!rq)
++			continue;
++
++		spin_lock(&rq->lock);
++		list_for_each_entry(s_entity, &rq->entities, list)
++			/*
++			 * Prevents reinsertion and marks job_queue as idle,
++			 * it will removed from rq in drm_sched_entity_fini
++			 * eventually
++			 */
++			s_entity->stopped = true;
++		spin_unlock(&rq->lock);
++
++	}
++
++	/* Wakeup everyone stuck in drm_sched_entity_flush for this scheduler */
++	wake_up_all(&sched->job_scheduled);
++
+ 	/* Confirm no work left behind accessing device structures */
+ 	cancel_delayed_work_sync(&sched->work_tdr);
+ 
+diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
+index 134986dc2783f..2c0510bbd0fbd 100644
+--- a/drivers/gpu/drm/tegra/dc.c
++++ b/drivers/gpu/drm/tegra/dc.c
+@@ -947,6 +947,11 @@ static const struct drm_plane_helper_funcs tegra_cursor_plane_helper_funcs = {
+ 	.atomic_disable = tegra_cursor_atomic_disable,
+ };
+ 
++static const uint64_t linear_modifiers[] = {
++	DRM_FORMAT_MOD_LINEAR,
++	DRM_FORMAT_MOD_INVALID
++};
++
+ static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm,
+ 						      struct tegra_dc *dc)
+ {
+@@ -975,7 +980,7 @@ static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm,
+ 
+ 	err = drm_universal_plane_init(drm, &plane->base, possible_crtcs,
+ 				       &tegra_plane_funcs, formats,
+-				       num_formats, NULL,
++				       num_formats, linear_modifiers,
+ 				       DRM_PLANE_TYPE_CURSOR, NULL);
+ 	if (err < 0) {
+ 		kfree(plane);
+@@ -1094,7 +1099,8 @@ static struct drm_plane *tegra_dc_overlay_plane_create(struct drm_device *drm,
+ 
+ 	err = drm_universal_plane_init(drm, &plane->base, possible_crtcs,
+ 				       &tegra_plane_funcs, formats,
+-				       num_formats, NULL, type, NULL);
++				       num_formats, linear_modifiers,
++				       type, NULL);
+ 	if (err < 0) {
+ 		kfree(plane);
+ 		return ERR_PTR(err);
+diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
+index e9ce7d6992d21..e3d4fd76b6a0b 100644
+--- a/drivers/gpu/drm/tegra/drm.c
++++ b/drivers/gpu/drm/tegra/drm.c
+@@ -1122,8 +1122,6 @@ static int host1x_drm_probe(struct host1x_device *dev)
+ 	drm->mode_config.max_width = 4096;
+ 	drm->mode_config.max_height = 4096;
+ 
+-	drm->mode_config.allow_fb_modifiers = true;
+-
+ 	drm->mode_config.normalize_zpos = true;
+ 
+ 	drm->mode_config.funcs = &tegra_drm_mode_config_funcs;
+diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
+index 1f36b67cd6ce9..18f5009ce90e3 100644
+--- a/drivers/gpu/drm/vc4/vc4_crtc.c
++++ b/drivers/gpu/drm/vc4/vc4_crtc.c
+@@ -1035,7 +1035,7 @@ static const struct vc4_pv_data bcm2711_pv3_data = {
+ 	.fifo_depth = 64,
+ 	.pixels_per_clock = 1,
+ 	.encoder_types = {
+-		[0] = VC4_ENCODER_TYPE_VEC,
++		[PV_CONTROL_CLK_SELECT_VEC] = VC4_ENCODER_TYPE_VEC,
+ 	},
+ };
+ 
+@@ -1076,6 +1076,9 @@ static void vc4_set_crtc_possible_masks(struct drm_device *drm,
+ 		struct vc4_encoder *vc4_encoder;
+ 		int i;
+ 
++		if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
++			continue;
++
+ 		vc4_encoder = to_vc4_encoder(encoder);
+ 		for (i = 0; i < ARRAY_SIZE(pv_data->encoder_types); i++) {
+ 			if (vc4_encoder->type == encoder_types[i]) {
+diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
+index a7500716cf3f1..5dceadc61600e 100644
+--- a/drivers/gpu/drm/vc4/vc4_drv.h
++++ b/drivers/gpu/drm/vc4/vc4_drv.h
+@@ -825,7 +825,7 @@ void vc4_crtc_destroy_state(struct drm_crtc *crtc,
+ void vc4_crtc_reset(struct drm_crtc *crtc);
+ void vc4_crtc_handle_vblank(struct vc4_crtc *crtc);
+ void vc4_crtc_get_margins(struct drm_crtc_state *state,
+-			  unsigned int *right, unsigned int *left,
++			  unsigned int *left, unsigned int *right,
+ 			  unsigned int *top, unsigned int *bottom);
+ 
+ /* vc4_debugfs.c */
+diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
+index e94730beb15b7..188b74c9e9fff 100644
+--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
++++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
+@@ -745,7 +745,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
+ 	unsigned long pixel_rate, hsm_rate;
+ 	int ret;
+ 
+-	ret = pm_runtime_get_sync(&vc4_hdmi->pdev->dev);
++	ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev);
+ 	if (ret < 0) {
+ 		DRM_ERROR("Failed to retain power domain: %d\n", ret);
+ 		return;
+@@ -2012,6 +2012,14 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
+ 	if (vc4_hdmi->variant->reset)
+ 		vc4_hdmi->variant->reset(vc4_hdmi);
+ 
++	if ((of_device_is_compatible(dev->of_node, "brcm,bcm2711-hdmi0") ||
++	     of_device_is_compatible(dev->of_node, "brcm,bcm2711-hdmi1")) &&
++	    HDMI_READ(HDMI_VID_CTL) & VC4_HD_VID_CTL_ENABLE) {
++		clk_prepare_enable(vc4_hdmi->pixel_clock);
++		clk_prepare_enable(vc4_hdmi->hsm_clock);
++		clk_prepare_enable(vc4_hdmi->pixel_bvb_clock);
++	}
++
+ 	pm_runtime_enable(dev);
+ 
+ 	drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
+diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
+index c0122d83b6511..2fc7f4b5fa098 100644
+--- a/drivers/gpu/drm/vc4/vc4_txp.c
++++ b/drivers/gpu/drm/vc4/vc4_txp.c
+@@ -507,7 +507,7 @@ static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
+ 		return ret;
+ 
+ 	encoder = &txp->connector.encoder;
+-	encoder->possible_crtcs |= drm_crtc_mask(crtc);
++	encoder->possible_crtcs = drm_crtc_mask(crtc);
+ 
+ 	ret = devm_request_irq(dev, irq, vc4_txp_interrupt, 0,
+ 			       dev_name(dev), txp);
+diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
+index b375394193be8..37a21a88d674c 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
++++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
+@@ -234,6 +234,7 @@ err_scanouts:
+ err_vbufs:
+ 	vgdev->vdev->config->del_vqs(vgdev->vdev);
+ err_vqs:
++	dev->dev_private = NULL;
+ 	kfree(vgdev);
+ 	return ret;
+ }
+diff --git a/drivers/gpu/drm/zte/Kconfig b/drivers/gpu/drm/zte/Kconfig
+index 90ebaedc11fdf..aa8594190b509 100644
+--- a/drivers/gpu/drm/zte/Kconfig
++++ b/drivers/gpu/drm/zte/Kconfig
+@@ -3,7 +3,6 @@ config DRM_ZTE
+ 	tristate "DRM Support for ZTE SoCs"
+ 	depends on DRM && ARCH_ZX
+ 	select DRM_KMS_CMA_HELPER
+-	select DRM_KMS_FB_HELPER
+ 	select DRM_KMS_HELPER
+ 	select SND_SOC_HDMI_CODEC if SND_SOC
+ 	select VIDEOMODE_HELPERS
+diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
+index 237a8c0d6c244..30271d9e470de 100644
+--- a/drivers/hwtracing/coresight/coresight-core.c
++++ b/drivers/hwtracing/coresight/coresight-core.c
+@@ -1367,7 +1367,7 @@ static int coresight_fixup_device_conns(struct coresight_device *csdev)
+ 		}
+ 	}
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ static int coresight_remove_match(struct device *dev, void *data)
+diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
+index 45b85edfc6900..cd0fb7bfba684 100644
+--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
++++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
+@@ -530,7 +530,7 @@ static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
+ 		buf_ptr = buf->data_pages[cur] + offset;
+ 		*buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);
+ 
+-		if (lost && *barrier) {
++		if (lost && i < CORESIGHT_BARRIER_PKT_SIZE) {
+ 			*buf_ptr = *barrier;
+ 			barrier++;
+ 		}
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index bb46f794f3240..8d94a6bfcac12 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -2793,7 +2793,8 @@ static int cma_resolve_ib_route(struct rdma_id_private *id_priv,
+ 
+ 	cma_init_resolve_route_work(work, id_priv);
+ 
+-	route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
++	if (!route->path_rec)
++		route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
+ 	if (!route->path_rec) {
+ 		ret = -ENOMEM;
+ 		goto err1;
+diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
+index d109bb3822a5f..c9403743346e1 100644
+--- a/drivers/infiniband/hw/cxgb4/qp.c
++++ b/drivers/infiniband/hw/cxgb4/qp.c
+@@ -295,6 +295,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
+ 	if (user && (!wq->sq.bar2_pa || (need_rq && !wq->rq.bar2_pa))) {
+ 		pr_warn("%s: sqid %u or rqid %u not in BAR2 range\n",
+ 			pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
++		ret = -EINVAL;
+ 		goto free_dma;
+ 	}
+ 
+diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
+index 9f63947bab123..fe2b7d223183f 100644
+--- a/drivers/infiniband/sw/rxe/rxe_mr.c
++++ b/drivers/infiniband/sw/rxe/rxe_mr.c
+@@ -135,7 +135,7 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
+ 	if (IS_ERR(umem)) {
+ 		pr_warn("err %d from rxe_umem_get\n",
+ 			(int)PTR_ERR(umem));
+-		err = -EINVAL;
++		err = PTR_ERR(umem);
+ 		goto err1;
+ 	}
+ 
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
+index 18266f07c58d9..de3fc05fd2e8a 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -35,10 +35,10 @@ static const struct kernel_param_ops sg_tablesize_ops = {
+ 	.get = param_get_int,
+ };
+ 
+-static int isert_sg_tablesize = ISCSI_ISER_DEF_SG_TABLESIZE;
++static int isert_sg_tablesize = ISCSI_ISER_MIN_SG_TABLESIZE;
+ module_param_cb(sg_tablesize, &sg_tablesize_ops, &isert_sg_tablesize, 0644);
+ MODULE_PARM_DESC(sg_tablesize,
+-		 "Number of gather/scatter entries in a single scsi command, should >= 128 (default: 256, max: 4096)");
++		 "Number of gather/scatter entries in a single scsi command, should >= 128 (default: 128, max: 4096)");
+ 
+ static DEFINE_MUTEX(device_list_mutex);
+ static LIST_HEAD(device_list);
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
+index 6c5af13db4e0d..ca8cfebe26ca7 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.h
++++ b/drivers/infiniband/ulp/isert/ib_isert.h
+@@ -65,9 +65,6 @@
+  */
+ #define ISER_RX_SIZE		(ISCSI_DEF_MAX_RECV_SEG_LEN + 1024)
+ 
+-/* Default I/O size is 1MB */
+-#define ISCSI_ISER_DEF_SG_TABLESIZE 256
+-
+ /* Minimum I/O size is 512KB */
+ #define ISCSI_ISER_MIN_SG_TABLESIZE 128
+ 
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-pri.h b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
+index 8caad0a2322bf..51c60f5428761 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-pri.h
++++ b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
+@@ -47,12 +47,15 @@ enum {
+ 	MAX_PATHS_NUM = 128,
+ 
+ 	/*
+-	 * With the size of struct rtrs_permit allocated on the client, 4K
+-	 * is the maximum number of rtrs_permits we can allocate. This number is
+-	 * also used on the client to allocate the IU for the user connection
+-	 * to receive the RDMA addresses from the server.
++	 * Max IB immediate data size is 2^28 (MAX_IMM_PAYL_BITS)
++	 * and the minimum chunk size is 4096 (2^12).
++	 * So the maximum sess_queue_depth is 65536 (2^16) in theory.
++	 * But mempool_create, create_qp and ib_post_send fail with
++	 * "cannot allocate memory" error if sess_queue_depth is too big.
++	 * Therefore the pratical max value of sess_queue_depth is
++	 * somewhere between 1 and 65536 and it depends on the system.
+ 	 */
+-	MAX_SESS_QUEUE_DEPTH = 4096,
++	MAX_SESS_QUEUE_DEPTH = 65536,
+ 
+ 	RTRS_HB_INTERVAL_MS = 5000,
+ 	RTRS_HB_MISSED_MAX = 5,
+diff --git a/drivers/ipack/carriers/tpci200.c b/drivers/ipack/carriers/tpci200.c
+index ec71063fff76a..e1822e87ec3d2 100644
+--- a/drivers/ipack/carriers/tpci200.c
++++ b/drivers/ipack/carriers/tpci200.c
+@@ -596,8 +596,11 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
+ 
+ out_err_bus_register:
+ 	tpci200_uninstall(tpci200);
++	/* tpci200->info->cfg_regs is unmapped in tpci200_uninstall */
++	tpci200->info->cfg_regs = NULL;
+ out_err_install:
+-	iounmap(tpci200->info->cfg_regs);
++	if (tpci200->info->cfg_regs)
++		iounmap(tpci200->info->cfg_regs);
+ out_err_ioremap:
+ 	pci_release_region(pdev, TPCI200_CFG_MEM_BAR);
+ out_err_pci_request:
+diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
+index 56bd2e9db6ed6..e501cb03f211d 100644
+--- a/drivers/isdn/hardware/mISDN/hfcpci.c
++++ b/drivers/isdn/hardware/mISDN/hfcpci.c
+@@ -2342,7 +2342,7 @@ static void __exit
+ HFC_cleanup(void)
+ {
+ 	if (timer_pending(&hfc_tl))
+-		del_timer(&hfc_tl);
++		del_timer_sync(&hfc_tl);
+ 
+ 	pci_unregister_driver(&hfc_driver);
+ }
+diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
+index 4f72b6f66c3ae..9e9f1ee141652 100644
+--- a/drivers/md/dm-writecache.c
++++ b/drivers/md/dm-writecache.c
+@@ -532,7 +532,7 @@ static void ssd_commit_superblock(struct dm_writecache *wc)
+ 
+ 	region.bdev = wc->ssd_dev->bdev;
+ 	region.sector = 0;
+-	region.count = PAGE_SIZE >> SECTOR_SHIFT;
++	region.count = max(4096U, wc->block_size) >> SECTOR_SHIFT;
+ 
+ 	if (unlikely(region.sector + region.count > wc->metadata_sectors))
+ 		region.count = wc->metadata_sectors - region.sector;
+@@ -1301,8 +1301,12 @@ static int writecache_map(struct dm_target *ti, struct bio *bio)
+ 			writecache_flush(wc);
+ 			if (writecache_has_error(wc))
+ 				goto unlock_error;
++			if (unlikely(wc->cleaner))
++				goto unlock_remap_origin;
+ 			goto unlock_submit;
+ 		} else {
++			if (dm_bio_get_target_bio_nr(bio))
++				goto unlock_remap_origin;
+ 			writecache_offload_bio(wc, bio);
+ 			goto unlock_return;
+ 		}
+@@ -1360,14 +1364,18 @@ read_next_block:
+ 	} else {
+ 		do {
+ 			bool found_entry = false;
++			bool search_used = false;
+ 			if (writecache_has_error(wc))
+ 				goto unlock_error;
+ 			e = writecache_find_entry(wc, bio->bi_iter.bi_sector, 0);
+ 			if (e) {
+-				if (!writecache_entry_is_committed(wc, e))
++				if (!writecache_entry_is_committed(wc, e)) {
++					search_used = true;
+ 					goto bio_copy;
++				}
+ 				if (!WC_MODE_PMEM(wc) && !e->write_in_progress) {
+ 					wc->overwrote_committed = true;
++					search_used = true;
+ 					goto bio_copy;
+ 				}
+ 				found_entry = true;
+@@ -1377,7 +1385,7 @@ read_next_block:
+ 			}
+ 			e = writecache_pop_from_freelist(wc, (sector_t)-1);
+ 			if (unlikely(!e)) {
+-				if (!found_entry) {
++				if (!WC_MODE_PMEM(wc) && !found_entry) {
+ direct_write:
+ 					e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING);
+ 					if (e) {
+@@ -1404,13 +1412,31 @@ bio_copy:
+ 				sector_t current_cache_sec = start_cache_sec + (bio_size >> SECTOR_SHIFT);
+ 
+ 				while (bio_size < bio->bi_iter.bi_size) {
+-					struct wc_entry *f = writecache_pop_from_freelist(wc, current_cache_sec);
+-					if (!f)
+-						break;
+-					write_original_sector_seq_count(wc, f, bio->bi_iter.bi_sector +
+-									(bio_size >> SECTOR_SHIFT), wc->seq_count);
+-					writecache_insert_entry(wc, f);
+-					wc->uncommitted_blocks++;
++					if (!search_used) {
++						struct wc_entry *f = writecache_pop_from_freelist(wc, current_cache_sec);
++						if (!f)
++							break;
++						write_original_sector_seq_count(wc, f, bio->bi_iter.bi_sector +
++										(bio_size >> SECTOR_SHIFT), wc->seq_count);
++						writecache_insert_entry(wc, f);
++						wc->uncommitted_blocks++;
++					} else {
++						struct wc_entry *f;
++						struct rb_node *next = rb_next(&e->rb_node);
++						if (!next)
++							break;
++						f = container_of(next, struct wc_entry, rb_node);
++						if (f != e + 1)
++							break;
++						if (read_original_sector(wc, f) !=
++						    read_original_sector(wc, e) + (wc->block_size >> SECTOR_SHIFT))
++							break;
++						if (unlikely(f->write_in_progress))
++							break;
++						if (writecache_entry_is_committed(wc, f))
++							wc->overwrote_committed = true;
++						e = f;
++					}
+ 					bio_size += wc->block_size;
+ 					current_cache_sec += wc->block_size >> SECTOR_SHIFT;
+ 				}
+@@ -2463,7 +2489,7 @@ overflow:
+ 		goto bad;
+ 	}
+ 
+-	ti->num_flush_bios = 1;
++	ti->num_flush_bios = WC_MODE_PMEM(wc) ? 1 : 2;
+ 	ti->flush_supported = true;
+ 	ti->num_discard_bios = 1;
+ 
+diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
+index 039d17b289384..ee4626d085574 100644
+--- a/drivers/md/dm-zoned-metadata.c
++++ b/drivers/md/dm-zoned-metadata.c
+@@ -1390,6 +1390,13 @@ static int dmz_init_zone(struct blk_zone *blkz, unsigned int num, void *data)
+ 		return -ENXIO;
+ 	}
+ 
++	/*
++	 * Devices that have zones with a capacity smaller than the zone size
++	 * (e.g. NVMe zoned namespaces) are not supported.
++	 */
++	if (blkz->capacity != blkz->len)
++		return -ENXIO;
++
+ 	switch (blkz->type) {
+ 	case BLK_ZONE_TYPE_CONVENTIONAL:
+ 		set_bit(DMZ_RND, &zone->flags);
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 3f3be9408afa7..05747d2736a77 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -1239,8 +1239,8 @@ static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
+ 
+ /*
+  * A target may call dm_accept_partial_bio only from the map routine.  It is
+- * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_RESET,
+- * REQ_OP_ZONE_OPEN, REQ_OP_ZONE_CLOSE and REQ_OP_ZONE_FINISH.
++ * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management
++ * operations and REQ_OP_ZONE_APPEND (zone append writes).
+  *
+  * dm_accept_partial_bio informs the dm that the target only wants to process
+  * additional n_sectors sectors of the bio and the rest of the data should be
+@@ -1270,9 +1270,13 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
+ {
+ 	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
+ 	unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
++
+ 	BUG_ON(bio->bi_opf & REQ_PREFLUSH);
++	BUG_ON(op_is_zone_mgmt(bio_op(bio)));
++	BUG_ON(bio_op(bio) == REQ_OP_ZONE_APPEND);
+ 	BUG_ON(bi_size > *tio->len_ptr);
+ 	BUG_ON(n_sectors > bi_size);
++
+ 	*tio->len_ptr -= bi_size - n_sectors;
+ 	bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
+ }
+diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
+index eff04fa23dfad..9e4d1212f4c16 100644
+--- a/drivers/md/persistent-data/dm-btree-remove.c
++++ b/drivers/md/persistent-data/dm-btree-remove.c
+@@ -549,7 +549,8 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
+ 		delete_at(n, index);
+ 	}
+ 
+-	*new_root = shadow_root(&spine);
++	if (!r)
++		*new_root = shadow_root(&spine);
+ 	exit_shadow_spine(&spine);
+ 
+ 	return r;
+diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
+index bf4c5e2ccb6ff..e0acae7a3815d 100644
+--- a/drivers/md/persistent-data/dm-space-map-disk.c
++++ b/drivers/md/persistent-data/dm-space-map-disk.c
+@@ -171,6 +171,14 @@ static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b)
+ 	 * Any block we allocate has to be free in both the old and current ll.
+ 	 */
+ 	r = sm_ll_find_common_free_block(&smd->old_ll, &smd->ll, smd->begin, smd->ll.nr_blocks, b);
++	if (r == -ENOSPC) {
++		/*
++		 * There's no free block between smd->begin and the end of the metadata device.
++		 * We search before smd->begin in case something has been freed.
++		 */
++		r = sm_ll_find_common_free_block(&smd->old_ll, &smd->ll, 0, smd->begin, b);
++	}
++
+ 	if (r)
+ 		return r;
+ 
+@@ -199,7 +207,6 @@ static int sm_disk_commit(struct dm_space_map *sm)
+ 		return r;
+ 
+ 	memcpy(&smd->old_ll, &smd->ll, sizeof(smd->old_ll));
+-	smd->begin = 0;
+ 	smd->nr_allocated_this_transaction = 0;
+ 
+ 	r = sm_disk_get_nr_free(sm, &nr_free);
+diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
+index 9e3c64ec2026f..da439ac857963 100644
+--- a/drivers/md/persistent-data/dm-space-map-metadata.c
++++ b/drivers/md/persistent-data/dm-space-map-metadata.c
+@@ -452,6 +452,14 @@ static int sm_metadata_new_block_(struct dm_space_map *sm, dm_block_t *b)
+ 	 * Any block we allocate has to be free in both the old and current ll.
+ 	 */
+ 	r = sm_ll_find_common_free_block(&smm->old_ll, &smm->ll, smm->begin, smm->ll.nr_blocks, b);
++	if (r == -ENOSPC) {
++		/*
++		 * There's no free block between smm->begin and the end of the metadata device.
++		 * We search before smm->begin in case something has been freed.
++		 */
++		r = sm_ll_find_common_free_block(&smm->old_ll, &smm->ll, 0, smm->begin, b);
++	}
++
+ 	if (r)
+ 		return r;
+ 
+@@ -503,7 +511,6 @@ static int sm_metadata_commit(struct dm_space_map *sm)
+ 		return r;
+ 
+ 	memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll));
+-	smm->begin = 0;
+ 	smm->allocated_this_transaction = 0;
+ 
+ 	return 0;
+diff --git a/drivers/media/i2c/ccs/ccs-core.c b/drivers/media/i2c/ccs/ccs-core.c
+index fde0c51f04069..220240f62a666 100644
+--- a/drivers/media/i2c/ccs/ccs-core.c
++++ b/drivers/media/i2c/ccs/ccs-core.c
+@@ -1880,21 +1880,33 @@ static int ccs_pm_get_init(struct ccs_sensor *sensor)
+ 	struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ 	int rval;
+ 
++	/*
++	 * It can't use pm_runtime_resume_and_get() here, as the driver
++	 * relies at the returned value to detect if the device was already
++	 * active or not.
++	 */
+ 	rval = pm_runtime_get_sync(&client->dev);
+-	if (rval < 0) {
+-		pm_runtime_put_noidle(&client->dev);
++	if (rval < 0)
++		goto error;
+ 
+-		return rval;
+-	} else if (!rval) {
+-		rval = v4l2_ctrl_handler_setup(&sensor->pixel_array->
+-					       ctrl_handler);
+-		if (rval)
+-			return rval;
++	/* Device was already active, so don't set controls */
++	if (rval == 1)
++		return 0;
+ 
+-		return v4l2_ctrl_handler_setup(&sensor->src->ctrl_handler);
+-	}
++	/* Restore V4L2 controls to the previously suspended device */
++	rval = v4l2_ctrl_handler_setup(&sensor->pixel_array->ctrl_handler);
++	if (rval)
++		goto error;
+ 
++	rval = v4l2_ctrl_handler_setup(&sensor->src->ctrl_handler);
++	if (rval)
++		goto error;
++
++	/* Keep PM runtime usage_count incremented on success */
+ 	return 0;
++error:
++	pm_runtime_put(&client->dev);
++	return rval;
+ }
+ 
+ static int ccs_set_stream(struct v4l2_subdev *subdev, int enable)
+diff --git a/drivers/media/i2c/ccs/ccs-limits.c b/drivers/media/i2c/ccs/ccs-limits.c
+index f5511789ac837..4969fa425317d 100644
+--- a/drivers/media/i2c/ccs/ccs-limits.c
++++ b/drivers/media/i2c/ccs/ccs-limits.c
+@@ -1,5 +1,9 @@
+ // SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
+ /* Copyright (C) 2019--2020 Intel Corporation */
++/*
++ * Generated by Documentation/driver-api/media/drivers/ccs/mk-ccs-regs;
++ * do not modify.
++ */
+ 
+ #include "ccs-limits.h"
+ #include "ccs-regs.h"
+diff --git a/drivers/media/i2c/ccs/ccs-limits.h b/drivers/media/i2c/ccs/ccs-limits.h
+index 1efa43c23a2eb..551d3ee9d04e1 100644
+--- a/drivers/media/i2c/ccs/ccs-limits.h
++++ b/drivers/media/i2c/ccs/ccs-limits.h
+@@ -1,5 +1,9 @@
+ /* SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause */
+ /* Copyright (C) 2019--2020 Intel Corporation */
++/*
++ * Generated by Documentation/driver-api/media/drivers/ccs/mk-ccs-regs;
++ * do not modify.
++ */
+ 
+ #ifndef __CCS_LIMITS_H__
+ #define __CCS_LIMITS_H__
+diff --git a/drivers/media/i2c/ccs/ccs-regs.h b/drivers/media/i2c/ccs/ccs-regs.h
+index 4b3e5df2121f8..6ce84c5ecf207 100644
+--- a/drivers/media/i2c/ccs/ccs-regs.h
++++ b/drivers/media/i2c/ccs/ccs-regs.h
+@@ -1,5 +1,9 @@
+ /* SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause */
+ /* Copyright (C) 2019--2020 Intel Corporation */
++/*
++ * Generated by Documentation/driver-api/media/drivers/ccs/mk-ccs-regs;
++ * do not modify.
++ */
+ 
+ #ifndef __CCS_REGS_H__
+ #define __CCS_REGS_H__
+@@ -202,7 +206,7 @@
+ #define CCS_R_OP_PIX_CLK_DIV					(0x0308 | CCS_FL_16BIT)
+ #define CCS_R_OP_SYS_CLK_DIV					(0x030a | CCS_FL_16BIT)
+ #define CCS_R_OP_PRE_PLL_CLK_DIV				(0x030c | CCS_FL_16BIT)
+-#define CCS_R_OP_PLL_MULTIPLIER					(0x031e | CCS_FL_16BIT)
++#define CCS_R_OP_PLL_MULTIPLIER					(0x030e | CCS_FL_16BIT)
+ #define CCS_R_PLL_MODE						0x0310
+ #define CCS_PLL_MODE_SHIFT					0U
+ #define CCS_PLL_MODE_MASK					0x1
+diff --git a/drivers/media/i2c/saa6588.c b/drivers/media/i2c/saa6588.c
+index ecb491d5f2ab8..d1e0716bdfffd 100644
+--- a/drivers/media/i2c/saa6588.c
++++ b/drivers/media/i2c/saa6588.c
+@@ -380,7 +380,7 @@ static void saa6588_configure(struct saa6588 *s)
+ 
+ /* ---------------------------------------------------------------------- */
+ 
+-static long saa6588_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
++static long saa6588_command(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+ {
+ 	struct saa6588 *s = to_saa6588(sd);
+ 	struct saa6588_command *a = arg;
+@@ -433,7 +433,7 @@ static int saa6588_s_tuner(struct v4l2_subdev *sd, const struct v4l2_tuner *vt)
+ /* ----------------------------------------------------------------------- */
+ 
+ static const struct v4l2_subdev_core_ops saa6588_core_ops = {
+-	.ioctl = saa6588_ioctl,
++	.command = saa6588_command,
+ };
+ 
+ static const struct v4l2_subdev_tuner_ops saa6588_tuner_ops = {
+diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
+index 1f62a9d8ea1d3..0e9df8b35ac66 100644
+--- a/drivers/media/pci/bt8xx/bttv-driver.c
++++ b/drivers/media/pci/bt8xx/bttv-driver.c
+@@ -3179,7 +3179,7 @@ static int radio_release(struct file *file)
+ 
+ 	btv->radio_user--;
+ 
+-	bttv_call_all(btv, core, ioctl, SAA6588_CMD_CLOSE, &cmd);
++	bttv_call_all(btv, core, command, SAA6588_CMD_CLOSE, &cmd);
+ 
+ 	if (btv->radio_user == 0)
+ 		btv->has_radio_tuner = 0;
+@@ -3260,7 +3260,7 @@ static ssize_t radio_read(struct file *file, char __user *data,
+ 	cmd.result = -ENODEV;
+ 	radio_enable(btv);
+ 
+-	bttv_call_all(btv, core, ioctl, SAA6588_CMD_READ, &cmd);
++	bttv_call_all(btv, core, command, SAA6588_CMD_READ, &cmd);
+ 
+ 	return cmd.result;
+ }
+@@ -3281,7 +3281,7 @@ static __poll_t radio_poll(struct file *file, poll_table *wait)
+ 	cmd.instance = file;
+ 	cmd.event_list = wait;
+ 	cmd.poll_mask = res;
+-	bttv_call_all(btv, core, ioctl, SAA6588_CMD_POLL, &cmd);
++	bttv_call_all(btv, core, command, SAA6588_CMD_POLL, &cmd);
+ 
+ 	return cmd.poll_mask;
+ }
+diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
+index 0f9d6b9edb90a..374c8e1087de1 100644
+--- a/drivers/media/pci/saa7134/saa7134-video.c
++++ b/drivers/media/pci/saa7134/saa7134-video.c
+@@ -1181,7 +1181,7 @@ static int video_release(struct file *file)
+ 
+ 	saa_call_all(dev, tuner, standby);
+ 	if (vdev->vfl_type == VFL_TYPE_RADIO)
+-		saa_call_all(dev, core, ioctl, SAA6588_CMD_CLOSE, &cmd);
++		saa_call_all(dev, core, command, SAA6588_CMD_CLOSE, &cmd);
+ 	mutex_unlock(&dev->lock);
+ 
+ 	return 0;
+@@ -1200,7 +1200,7 @@ static ssize_t radio_read(struct file *file, char __user *data,
+ 	cmd.result = -ENODEV;
+ 
+ 	mutex_lock(&dev->lock);
+-	saa_call_all(dev, core, ioctl, SAA6588_CMD_READ, &cmd);
++	saa_call_all(dev, core, command, SAA6588_CMD_READ, &cmd);
+ 	mutex_unlock(&dev->lock);
+ 
+ 	return cmd.result;
+@@ -1216,7 +1216,7 @@ static __poll_t radio_poll(struct file *file, poll_table *wait)
+ 	cmd.event_list = wait;
+ 	cmd.poll_mask = 0;
+ 	mutex_lock(&dev->lock);
+-	saa_call_all(dev, core, ioctl, SAA6588_CMD_POLL, &cmd);
++	saa_call_all(dev, core, command, SAA6588_CMD_POLL, &cmd);
+ 	mutex_unlock(&dev->lock);
+ 
+ 	return rc | cmd.poll_mask;
+diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c
+index d19bad997f30c..bf3c3e76b9213 100644
+--- a/drivers/media/platform/davinci/vpbe_display.c
++++ b/drivers/media/platform/davinci/vpbe_display.c
+@@ -47,7 +47,7 @@ static int venc_is_second_field(struct vpbe_display *disp_dev)
+ 
+ 	ret = v4l2_subdev_call(vpbe_dev->venc,
+ 			       core,
+-			       ioctl,
++			       command,
+ 			       VENC_GET_FLD,
+ 			       &val);
+ 	if (ret < 0) {
+diff --git a/drivers/media/platform/davinci/vpbe_venc.c b/drivers/media/platform/davinci/vpbe_venc.c
+index 8caa084e57046..bde241c26d795 100644
+--- a/drivers/media/platform/davinci/vpbe_venc.c
++++ b/drivers/media/platform/davinci/vpbe_venc.c
+@@ -521,9 +521,7 @@ static int venc_s_routing(struct v4l2_subdev *sd, u32 input, u32 output,
+ 	return ret;
+ }
+ 
+-static long venc_ioctl(struct v4l2_subdev *sd,
+-			unsigned int cmd,
+-			void *arg)
++static long venc_command(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+ {
+ 	u32 val;
+ 
+@@ -542,7 +540,7 @@ static long venc_ioctl(struct v4l2_subdev *sd,
+ }
+ 
+ static const struct v4l2_subdev_core_ops venc_core_ops = {
+-	.ioctl      = venc_ioctl,
++	.command      = venc_command,
+ };
+ 
+ static const struct v4l2_subdev_video_ops venc_video_ops = {
+diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c
+index 3fe3edd808765..afae0afe3f810 100644
+--- a/drivers/media/rc/bpf-lirc.c
++++ b/drivers/media/rc/bpf-lirc.c
+@@ -326,7 +326,8 @@ int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr)
+ 	}
+ 
+ 	if (attr->query.prog_cnt != 0 && prog_ids && cnt)
+-		ret = bpf_prog_array_copy_to_user(progs, prog_ids, cnt);
++		ret = bpf_prog_array_copy_to_user(progs, prog_ids,
++						  attr->query.prog_cnt);
+ 
+ unlock:
+ 	mutex_unlock(&ir_raw_handler_lock);
+diff --git a/drivers/media/usb/dvb-usb/dtv5100.c b/drivers/media/usb/dvb-usb/dtv5100.c
+index fba06932a9e0e..1c13e493322cc 100644
+--- a/drivers/media/usb/dvb-usb/dtv5100.c
++++ b/drivers/media/usb/dvb-usb/dtv5100.c
+@@ -26,6 +26,7 @@ static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr,
+ 			   u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen)
+ {
+ 	struct dtv5100_state *st = d->priv;
++	unsigned int pipe;
+ 	u8 request;
+ 	u8 type;
+ 	u16 value;
+@@ -34,6 +35,7 @@ static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr,
+ 	switch (wlen) {
+ 	case 1:
+ 		/* write { reg }, read { value } */
++		pipe = usb_rcvctrlpipe(d->udev, 0);
+ 		request = (addr == DTV5100_DEMOD_ADDR ? DTV5100_DEMOD_READ :
+ 							DTV5100_TUNER_READ);
+ 		type = USB_TYPE_VENDOR | USB_DIR_IN;
+@@ -41,6 +43,7 @@ static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr,
+ 		break;
+ 	case 2:
+ 		/* write { reg, value } */
++		pipe = usb_sndctrlpipe(d->udev, 0);
+ 		request = (addr == DTV5100_DEMOD_ADDR ? DTV5100_DEMOD_WRITE :
+ 							DTV5100_TUNER_WRITE);
+ 		type = USB_TYPE_VENDOR | USB_DIR_OUT;
+@@ -54,7 +57,7 @@ static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr,
+ 
+ 	memcpy(st->data, rbuf, rlen);
+ 	msleep(1); /* avoid I2C errors */
+-	return usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), request,
++	return usb_control_msg(d->udev, pipe, request,
+ 			       type, value, index, st->data, rlen,
+ 			       DTV5100_USB_TIMEOUT);
+ }
+@@ -141,7 +144,7 @@ static int dtv5100_probe(struct usb_interface *intf,
+ 
+ 	/* initialize non qt1010/zl10353 part? */
+ 	for (i = 0; dtv5100_init[i].request; i++) {
+-		ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
++		ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ 				      dtv5100_init[i].request,
+ 				      USB_TYPE_VENDOR | USB_DIR_OUT,
+ 				      dtv5100_init[i].value,
+diff --git a/drivers/media/usb/gspca/sq905.c b/drivers/media/usb/gspca/sq905.c
+index 9491110709718..32504ebcfd4de 100644
+--- a/drivers/media/usb/gspca/sq905.c
++++ b/drivers/media/usb/gspca/sq905.c
+@@ -116,7 +116,7 @@ static int sq905_command(struct gspca_dev *gspca_dev, u16 index)
+ 	}
+ 
+ 	ret = usb_control_msg(gspca_dev->dev,
+-			      usb_sndctrlpipe(gspca_dev->dev, 0),
++			      usb_rcvctrlpipe(gspca_dev->dev, 0),
+ 			      USB_REQ_SYNCH_FRAME,                /* request */
+ 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 			      SQ905_PING, 0, gspca_dev->usb_buf, 1,
+diff --git a/drivers/media/usb/gspca/sunplus.c b/drivers/media/usb/gspca/sunplus.c
+index ace3da40006e7..971dee0a56dae 100644
+--- a/drivers/media/usb/gspca/sunplus.c
++++ b/drivers/media/usb/gspca/sunplus.c
+@@ -242,6 +242,10 @@ static void reg_r(struct gspca_dev *gspca_dev,
+ 		gspca_err(gspca_dev, "reg_r: buffer overflow\n");
+ 		return;
+ 	}
++	if (len == 0) {
++		gspca_err(gspca_dev, "reg_r: zero-length read\n");
++		return;
++	}
+ 	if (gspca_dev->usb_err < 0)
+ 		return;
+ 	ret = usb_control_msg(gspca_dev->dev,
+@@ -250,7 +254,7 @@ static void reg_r(struct gspca_dev *gspca_dev,
+ 			USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 			0,		/* value */
+ 			index,
+-			len ? gspca_dev->usb_buf : NULL, len,
++			gspca_dev->usb_buf, len,
+ 			500);
+ 	if (ret < 0) {
+ 		pr_err("reg_r err %d\n", ret);
+@@ -727,7 +731,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
+ 		case MegaImageVI:
+ 			reg_w_riv(gspca_dev, 0xf0, 0, 0);
+ 			spca504B_WaitCmdStatus(gspca_dev);
+-			reg_r(gspca_dev, 0xf0, 4, 0);
++			reg_w_riv(gspca_dev, 0xf0, 4, 0);
+ 			spca504B_WaitCmdStatus(gspca_dev);
+ 			break;
+ 		default:
+diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
+index f2f565281e63f..9046d8ba7c634 100644
+--- a/drivers/media/usb/uvc/uvc_video.c
++++ b/drivers/media/usb/uvc/uvc_video.c
+@@ -124,10 +124,37 @@ int uvc_query_ctrl(struct uvc_device *dev, u8 query, u8 unit,
+ static void uvc_fixup_video_ctrl(struct uvc_streaming *stream,
+ 	struct uvc_streaming_control *ctrl)
+ {
++	static const struct usb_device_id elgato_cam_link_4k = {
++		USB_DEVICE(0x0fd9, 0x0066)
++	};
+ 	struct uvc_format *format = NULL;
+ 	struct uvc_frame *frame = NULL;
+ 	unsigned int i;
+ 
++	/*
++	 * The response of the Elgato Cam Link 4K is incorrect: The second byte
++	 * contains bFormatIndex (instead of being the second byte of bmHint).
++	 * The first byte is always zero. The third byte is always 1.
++	 *
++	 * The UVC 1.5 class specification defines the first five bits in the
++	 * bmHint bitfield. The remaining bits are reserved and should be zero.
++	 * Therefore a valid bmHint will be less than 32.
++	 *
++	 * Latest Elgato Cam Link 4K firmware as of 2021-03-23 needs this fix.
++	 * MCU: 20.02.19, FPGA: 67
++	 */
++	if (usb_match_one_id(stream->dev->intf, &elgato_cam_link_4k) &&
++	    ctrl->bmHint > 255) {
++		u8 corrected_format_index = ctrl->bmHint >> 8;
++
++		uvc_dbg(stream->dev, VIDEO,
++			"Correct USB video probe response from {bmHint: 0x%04x, bFormatIndex: %u} to {bmHint: 0x%04x, bFormatIndex: %u}\n",
++			ctrl->bmHint, ctrl->bFormatIndex,
++			1, corrected_format_index);
++		ctrl->bmHint = 1;
++		ctrl->bFormatIndex = corrected_format_index;
++	}
++
+ 	for (i = 0; i < stream->nformats; ++i) {
+ 		if (stream->format[i].index == ctrl->bFormatIndex) {
+ 			format = &stream->format[i];
+diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
+index 1ef611e083237..538a330046ec9 100644
+--- a/drivers/media/usb/zr364xx/zr364xx.c
++++ b/drivers/media/usb/zr364xx/zr364xx.c
+@@ -1032,6 +1032,7 @@ static int zr364xx_start_readpipe(struct zr364xx_camera *cam)
+ 	DBG("submitting URB %p\n", pipe_info->stream_urb);
+ 	retval = usb_submit_urb(pipe_info->stream_urb, GFP_KERNEL);
+ 	if (retval) {
++		usb_free_urb(pipe_info->stream_urb);
+ 		printk(KERN_ERR KBUILD_MODNAME ": start read pipe failed\n");
+ 		return retval;
+ 	}
+diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
+index 7e8bf4b1ab2e5..efdf6a5475cb3 100644
+--- a/drivers/media/v4l2-core/v4l2-ioctl.c
++++ b/drivers/media/v4l2-core/v4l2-ioctl.c
+@@ -3166,8 +3166,10 @@ static int video_get_user(void __user *arg, void *parg,
+ 		if (copy_from_user(parg, (void __user *)arg, n))
+ 			err = -EFAULT;
+ 	} else if (in_compat_syscall()) {
++		memset(parg, 0, n);
+ 		err = v4l2_compat_get_user(arg, parg, cmd);
+ 	} else {
++		memset(parg, 0, n);
+ #if !defined(CONFIG_64BIT) && defined(CONFIG_COMPAT_32BIT_TIME)
+ 		switch (cmd) {
+ 		case VIDIOC_QUERYBUF_TIME32:
+diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
+index c6f139b2e0c09..765c0210cb528 100644
+--- a/drivers/mfd/syscon.c
++++ b/drivers/mfd/syscon.c
+@@ -108,6 +108,7 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_clk)
+ 	syscon_config.max_register = resource_size(&res) - reg_io_width;
+ 
+ 	regmap = regmap_init_mmio(NULL, base, &syscon_config);
++	kfree(syscon_config.name);
+ 	if (IS_ERR(regmap)) {
+ 		pr_err("regmap init failed\n");
+ 		ret = PTR_ERR(regmap);
+@@ -144,7 +145,6 @@ err_clk:
+ 	regmap_exit(regmap);
+ err_regmap:
+ 	iounmap(base);
+-	kfree(syscon_config.name);
+ err_map:
+ 	kfree(syscon);
+ 	return ERR_PTR(ret);
+diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
+index 110f5a8538e96..8787d1d6774b1 100644
+--- a/drivers/misc/lkdtm/bugs.c
++++ b/drivers/misc/lkdtm/bugs.c
+@@ -144,6 +144,9 @@ void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
+ 	if (*p == 0)
+ 		val = 0x87654321;
+ 	*p = val;
++
++	if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
++		pr_err("XFAIL: arch has CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS\n");
+ }
+ 
+ void lkdtm_SOFTLOCKUP(void)
+diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
+index b2aff4d87c014..94ed7860cfee3 100644
+--- a/drivers/misc/lkdtm/core.c
++++ b/drivers/misc/lkdtm/core.c
+@@ -176,9 +176,7 @@ static const struct crashtype crashtypes[] = {
+ 	CRASHTYPE(STACKLEAK_ERASING),
+ 	CRASHTYPE(CFI_FORWARD_PROTO),
+ 	CRASHTYPE(FORTIFIED_STRSCPY),
+-#ifdef CONFIG_X86_32
+ 	CRASHTYPE(DOUBLE_FAULT),
+-#endif
+ #ifdef CONFIG_PPC_BOOK3S_64
+ 	CRASHTYPE(PPC_SLB_MULTIHIT),
+ #endif
+diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
+index e30c4e88e4043..f988bc91e65f3 100644
+--- a/drivers/mmc/core/core.c
++++ b/drivers/mmc/core/core.c
+@@ -937,11 +937,14 @@ int mmc_execute_tuning(struct mmc_card *card)
+ 
+ 	err = host->ops->execute_tuning(host, opcode);
+ 
+-	if (err)
++	if (err) {
+ 		pr_err("%s: tuning execution failed: %d\n",
+ 			mmc_hostname(host), err);
+-	else
++	} else {
++		host->retune_now = 0;
++		host->need_retune = 0;
+ 		mmc_retune_enable(host);
++	}
+ 
+ 	return err;
+ }
+diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
+index 2c48d65041013..e277ef4fa0995 100644
+--- a/drivers/mmc/core/sd.c
++++ b/drivers/mmc/core/sd.c
+@@ -847,11 +847,13 @@ try_again:
+ 		return err;
+ 
+ 	/*
+-	 * In case CCS and S18A in the response is set, start Signal Voltage
+-	 * Switch procedure. SPI mode doesn't support CMD11.
++	 * In case the S18A bit is set in the response, let's start the signal
++	 * voltage switch procedure. SPI mode doesn't support CMD11.
++	 * Note that, according to the spec, the S18A bit is not valid unless
++	 * the CCS bit is set as well. We deliberately deviate from the spec in
++	 * regards to this, which allows UHS-I to be supported for SDSC cards.
+ 	 */
+-	if (!mmc_host_is_spi(host) && rocr &&
+-	   ((*rocr & 0x41000000) == 0x41000000)) {
++	if (!mmc_host_is_spi(host) && rocr && (*rocr & 0x01000000)) {
+ 		err = mmc_set_uhs_voltage(host, pocr);
+ 		if (err == -EAGAIN) {
+ 			retries--;
+diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
+index b6574e7fd26bf..9e4358d7a0a66 100644
+--- a/drivers/mmc/host/sdhci-acpi.c
++++ b/drivers/mmc/host/sdhci-acpi.c
+@@ -820,6 +820,17 @@ static const struct dmi_system_id sdhci_acpi_quirks[] = {
+ 		},
+ 		.driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT,
+ 	},
++	{
++		/*
++		 * The Toshiba WT8-B's microSD slot always reports the card being
++		 * write-protected.
++		 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "TOSHIBA ENCORE 2 WT8-B"),
++		},
++		.driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT,
++	},
+ 	{} /* Terminating entry */
+ };
+ 
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index a9e20818ff3a3..76ef837430caa 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -1812,6 +1812,10 @@ static u16 sdhci_get_preset_value(struct sdhci_host *host)
+ 	u16 preset = 0;
+ 
+ 	switch (host->timing) {
++	case MMC_TIMING_MMC_HS:
++	case MMC_TIMING_SD_HS:
++		preset = sdhci_readw(host, SDHCI_PRESET_FOR_HIGH_SPEED);
++		break;
+ 	case MMC_TIMING_UHS_SDR12:
+ 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
+ 		break;
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+index 0770c036e2ff5..960fed78529e1 100644
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -253,6 +253,7 @@
+ 
+ /* 60-FB reserved */
+ 
++#define SDHCI_PRESET_FOR_HIGH_SPEED	0x64
+ #define SDHCI_PRESET_FOR_SDR12 0x66
+ #define SDHCI_PRESET_FOR_SDR25 0x68
+ #define SDHCI_PRESET_FOR_SDR50 0x6A
+diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
+index 84f93a874d502..deae923c8b7a8 100644
+--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
++++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
+@@ -1206,6 +1206,11 @@ static int seville_probe(struct platform_device *pdev)
+ 	felix->info = &seville_info_vsc9953;
+ 
+ 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++	if (!res) {
++		err = -EINVAL;
++		dev_err(&pdev->dev, "Invalid resource\n");
++		goto err_alloc_felix;
++	}
+ 	felix->switch_base = res->start;
+ 
+ 	ds = kzalloc(sizeof(struct dsa_switch), GFP_KERNEL);
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
+index 5335244e4577a..89d16c587bb7d 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
+@@ -423,6 +423,10 @@ static int bcmgenet_mii_register(struct bcmgenet_priv *priv)
+ 	int id, ret;
+ 
+ 	pres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++	if (!pres) {
++		dev_err(&pdev->dev, "Invalid resource\n");
++		return -EINVAL;
++	}
+ 	memset(&res, 0, sizeof(res));
+ 	memset(&ppd, 0, sizeof(ppd));
+ 
+diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
+index 0602d5d5d2eee..2e002e4b4b4aa 100644
+--- a/drivers/net/ethernet/freescale/fec.h
++++ b/drivers/net/ethernet/freescale/fec.h
+@@ -467,6 +467,11 @@ struct bufdesc_ex {
+  */
+ #define FEC_QUIRK_NO_HARD_RESET		(1 << 18)
+ 
++/* i.MX6SX ENET IP supports multiple queues (3 queues), use this quirk to
++ * represents this ENET IP.
++ */
++#define FEC_QUIRK_HAS_MULTI_QUEUES	(1 << 19)
++
+ struct bufdesc_prop {
+ 	int qid;
+ 	/* Address of Rx and Tx buffers */
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index 89393fbc726f6..6c097ae3bac58 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -75,6 +75,8 @@ static void fec_enet_itr_coal_init(struct net_device *ndev);
+ 
+ #define DRIVER_NAME	"fec"
+ 
++static const u16 fec_enet_vlan_pri_to_queue[8] = {0, 0, 1, 1, 1, 2, 2, 2};
++
+ /* Pause frame feild and FIFO threshold */
+ #define FEC_ENET_FCE	(1 << 5)
+ #define FEC_ENET_RSEM_V	0x84
+@@ -121,7 +123,7 @@ static const struct fec_devinfo fec_imx6x_info = {
+ 		  FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
+ 		  FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
+ 		  FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
+-		  FEC_QUIRK_CLEAR_SETUP_MII,
++		  FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES,
+ };
+ 
+ static const struct fec_devinfo fec_imx6ul_info = {
+@@ -420,6 +422,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
+ 				estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
+ 			if (skb->ip_summed == CHECKSUM_PARTIAL)
+ 				estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
++
+ 			ebdp->cbd_bdu = 0;
+ 			ebdp->cbd_esc = cpu_to_fec32(estatus);
+ 		}
+@@ -953,7 +956,7 @@ fec_restart(struct net_device *ndev)
+ 	 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
+ 	 * instead of reset MAC itself.
+ 	 */
+-	if (fep->quirks & FEC_QUIRK_HAS_AVB ||
++	if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES ||
+ 	    ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) {
+ 		writel(0, fep->hwp + FEC_ECNTRL);
+ 	} else {
+@@ -1164,7 +1167,7 @@ fec_stop(struct net_device *ndev)
+ 	 * instead of reset MAC itself.
+ 	 */
+ 	if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
+-		if (fep->quirks & FEC_QUIRK_HAS_AVB) {
++		if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
+ 			writel(0, fep->hwp + FEC_ECNTRL);
+ 		} else {
+ 			writel(1, fep->hwp + FEC_ECNTRL);
+@@ -2559,7 +2562,7 @@ static void fec_enet_itr_coal_set(struct net_device *ndev)
+ 
+ 	writel(tx_itr, fep->hwp + FEC_TXIC0);
+ 	writel(rx_itr, fep->hwp + FEC_RXIC0);
+-	if (fep->quirks & FEC_QUIRK_HAS_AVB) {
++	if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
+ 		writel(tx_itr, fep->hwp + FEC_TXIC1);
+ 		writel(rx_itr, fep->hwp + FEC_RXIC1);
+ 		writel(tx_itr, fep->hwp + FEC_TXIC2);
+@@ -3227,10 +3230,40 @@ static int fec_set_features(struct net_device *netdev,
+ 	return 0;
+ }
+ 
++static u16 fec_enet_get_raw_vlan_tci(struct sk_buff *skb)
++{
++	struct vlan_ethhdr *vhdr;
++	unsigned short vlan_TCI = 0;
++
++	if (skb->protocol == htons(ETH_P_ALL)) {
++		vhdr = (struct vlan_ethhdr *)(skb->data);
++		vlan_TCI = ntohs(vhdr->h_vlan_TCI);
++	}
++
++	return vlan_TCI;
++}
++
++static u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb,
++				 struct net_device *sb_dev)
++{
++	struct fec_enet_private *fep = netdev_priv(ndev);
++	u16 vlan_tag;
++
++	if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
++		return netdev_pick_tx(ndev, skb, NULL);
++
++	vlan_tag = fec_enet_get_raw_vlan_tci(skb);
++	if (!vlan_tag)
++		return vlan_tag;
++
++	return fec_enet_vlan_pri_to_queue[vlan_tag >> 13];
++}
++
+ static const struct net_device_ops fec_netdev_ops = {
+ 	.ndo_open		= fec_enet_open,
+ 	.ndo_stop		= fec_enet_close,
+ 	.ndo_start_xmit		= fec_enet_start_xmit,
++	.ndo_select_queue       = fec_enet_select_queue,
+ 	.ndo_set_rx_mode	= set_multicast_list,
+ 	.ndo_validate_addr	= eth_validate_addr,
+ 	.ndo_tx_timeout		= fec_timeout,
+@@ -3356,7 +3389,7 @@ static int fec_enet_init(struct net_device *ndev)
+ 		fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
+ 	}
+ 
+-	if (fep->quirks & FEC_QUIRK_HAS_AVB) {
++	if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
+ 		fep->tx_align = 0;
+ 		fep->rx_align = 0x3f;
+ 	}
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index 3c77897b3f31f..df10b87ca0f80 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -1528,7 +1528,8 @@ static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
+ 
+ /**
+  * build_hdr_descs_arr - build a header descriptor array
+- * @txbuff: tx buffer
++ * @skb: tx socket buffer
++ * @indir_arr: indirect array
+  * @num_entries: number of descriptors to be sent
+  * @hdr_field: bit field determining which headers will be sent
+  *
+diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
+index f8d78af76d7de..1b0958bd24f6c 100644
+--- a/drivers/net/ethernet/intel/e100.c
++++ b/drivers/net/ethernet/intel/e100.c
+@@ -1395,7 +1395,7 @@ static int e100_phy_check_without_mii(struct nic *nic)
+ 	u8 phy_type;
+ 	int without_mii;
+ 
+-	phy_type = (nic->eeprom[eeprom_phy_iface] >> 8) & 0x0f;
++	phy_type = (le16_to_cpu(nic->eeprom[eeprom_phy_iface]) >> 8) & 0x0f;
+ 
+ 	switch (phy_type) {
+ 	case NoSuchPhy: /* Non-MII PHY; UNTESTED! */
+@@ -1515,7 +1515,7 @@ static int e100_phy_init(struct nic *nic)
+ 		mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
+ 	} else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
+ 	   (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
+-		(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
++	   (le16_to_cpu(nic->eeprom[eeprom_cnfg_mdix]) & eeprom_mdix_enabled))) {
+ 		/* enable/disable MDI/MDI-X auto-switching. */
+ 		mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
+ 				nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
+@@ -2269,9 +2269,9 @@ static int e100_asf(struct nic *nic)
+ {
+ 	/* ASF can be enabled from eeprom */
+ 	return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
+-	   (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
+-	   !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
+-	   ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE);
++	   (le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_asf) &&
++	   !(le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_gcl) &&
++	   ((le16_to_cpu(nic->eeprom[eeprom_smbus_addr]) & 0xFF) != 0xFE);
+ }
+ 
+ static int e100_up(struct nic *nic)
+@@ -2926,7 +2926,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 
+ 	/* Wol magic packet can be enabled from eeprom */
+ 	if ((nic->mac >= mac_82558_D101_A4) &&
+-	   (nic->eeprom[eeprom_id] & eeprom_id_wol)) {
++	   (le16_to_cpu(nic->eeprom[eeprom_id]) & eeprom_id_wol)) {
+ 		nic->flags |= wol_magic;
+ 		device_set_wakeup_enable(&pdev->dev, true);
+ 	}
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+index 7a879614ca55e..1dad6c93ac9c8 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+@@ -11,13 +11,14 @@
+  * operate with the nanosecond field directly without fear of overflow.
+  *
+  * Much like the 82599, the update period is dependent upon the link speed:
+- * At 40Gb link or no link, the period is 1.6ns.
+- * At 10Gb link, the period is multiplied by 2. (3.2ns)
++ * At 40Gb, 25Gb, or no link, the period is 1.6ns.
++ * At 10Gb or 5Gb link, the period is multiplied by 2. (3.2ns)
+  * At 1Gb link, the period is multiplied by 20. (32ns)
+  * 1588 functionality is not supported at 100Mbps.
+  */
+ #define I40E_PTP_40GB_INCVAL		0x0199999999ULL
+ #define I40E_PTP_10GB_INCVAL_MULT	2
++#define I40E_PTP_5GB_INCVAL_MULT	2
+ #define I40E_PTP_1GB_INCVAL_MULT	20
+ 
+ #define I40E_PRTTSYN_CTL1_TSYNTYPE_V1  BIT(I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+@@ -465,6 +466,9 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
+ 	case I40E_LINK_SPEED_10GB:
+ 		mult = I40E_PTP_10GB_INCVAL_MULT;
+ 		break;
++	case I40E_LINK_SPEED_5GB:
++		mult = I40E_PTP_5GB_INCVAL_MULT;
++		break;
+ 	case I40E_LINK_SPEED_1GB:
+ 		mult = I40E_PTP_1GB_INCVAL_MULT;
+ 		break;
+diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+index f80fff97d8dce..0d136708f9607 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
++++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+@@ -3492,13 +3492,9 @@ static int
+ ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type,
+ 		    struct ice_ring_container *rc)
+ {
+-	struct ice_pf *pf;
+-
+ 	if (!rc->ring)
+ 		return -EINVAL;
+ 
+-	pf = rc->ring->vsi->back;
+-
+ 	switch (c_type) {
+ 	case ICE_RX_CONTAINER:
+ 		ec->use_adaptive_rx_coalesce = ITR_IS_DYNAMIC(rc->itr_setting);
+@@ -3510,7 +3506,7 @@ ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type,
+ 		ec->tx_coalesce_usecs = rc->itr_setting & ~ICE_ITR_DYNAMIC;
+ 		break;
+ 	default:
+-		dev_dbg(ice_pf_to_dev(pf), "Invalid c_type %d\n", c_type);
++		dev_dbg(ice_pf_to_dev(rc->ring->vsi->back), "Invalid c_type %d\n", c_type);
+ 		return -EINVAL;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+index 4ec24c3e813fe..c0ee0541e53fc 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
++++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+@@ -608,7 +608,7 @@ static const struct ice_rx_ptype_decoded ice_ptype_lkup[] = {
+ 	/* L2 Packet types */
+ 	ICE_PTT_UNUSED_ENTRY(0),
+ 	ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+-	ICE_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
++	ICE_PTT_UNUSED_ENTRY(2),
+ 	ICE_PTT_UNUSED_ENTRY(3),
+ 	ICE_PTT_UNUSED_ENTRY(4),
+ 	ICE_PTT_UNUSED_ENTRY(5),
+@@ -722,7 +722,7 @@ static const struct ice_rx_ptype_decoded ice_ptype_lkup[] = {
+ 	/* Non Tunneled IPv6 */
+ 	ICE_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
+ 	ICE_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
+-	ICE_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP,  PAY3),
++	ICE_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP,  PAY4),
+ 	ICE_PTT_UNUSED_ENTRY(91),
+ 	ICE_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP,  PAY4),
+ 	ICE_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
+diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
+index 266036b7a49ab..8a90c47e337df 100644
+--- a/drivers/net/ethernet/intel/ice/ice_type.h
++++ b/drivers/net/ethernet/intel/ice/ice_type.h
+@@ -63,7 +63,7 @@ enum ice_aq_res_ids {
+ /* FW update timeout definitions are in milliseconds */
+ #define ICE_NVM_TIMEOUT			180000
+ #define ICE_CHANGE_LOCK_TIMEOUT		1000
+-#define ICE_GLOBAL_CFG_LOCK_TIMEOUT	3000
++#define ICE_GLOBAL_CFG_LOCK_TIMEOUT	5000
+ 
+ enum ice_aq_res_access_type {
+ 	ICE_RES_READ = 1,
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index caa8929289ae7..434220a342dfb 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -2643,7 +2643,8 @@ static int igb_parse_cls_flower(struct igb_adapter *adapter,
+ 			}
+ 
+ 			input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI;
+-			input->filter.vlan_tci = match.key->vlan_priority;
++			input->filter.vlan_tci =
++				(__force __be16)match.key->vlan_priority;
+ 		}
+ 	}
+ 
+@@ -6276,12 +6277,12 @@ int igb_xmit_xdp_ring(struct igb_adapter *adapter,
+ 	cmd_type |= len | IGB_TXD_DCMD;
+ 	tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ 
+-	olinfo_status = cpu_to_le32(len << E1000_ADVTXD_PAYLEN_SHIFT);
++	olinfo_status = len << E1000_ADVTXD_PAYLEN_SHIFT;
+ 	/* 82575 requires a unique index per ring */
+ 	if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
+ 		olinfo_status |= tx_ring->reg_idx << 4;
+ 
+-	tx_desc->read.olinfo_status = olinfo_status;
++	tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+ 
+ 	netdev_tx_sent_queue(txring_txq(tx_ring), tx_buffer->bytecount);
+ 
+@@ -8593,7 +8594,7 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
+ 
+ 		if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
+ 		    test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
+-			vid = be16_to_cpu(rx_desc->wb.upper.vlan);
++			vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan);
+ 		else
+ 			vid = le16_to_cpu(rx_desc->wb.upper.vlan);
+ 
+diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
+index fb3fbcb133310..630c1155f1965 100644
+--- a/drivers/net/ethernet/intel/igbvf/netdev.c
++++ b/drivers/net/ethernet/intel/igbvf/netdev.c
+@@ -83,14 +83,14 @@ static int igbvf_desc_unused(struct igbvf_ring *ring)
+ static void igbvf_receive_skb(struct igbvf_adapter *adapter,
+ 			      struct net_device *netdev,
+ 			      struct sk_buff *skb,
+-			      u32 status, u16 vlan)
++			      u32 status, __le16 vlan)
+ {
+ 	u16 vid;
+ 
+ 	if (status & E1000_RXD_STAT_VP) {
+ 		if ((adapter->flags & IGBVF_FLAG_RX_LB_VLAN_BSWAP) &&
+ 		    (status & E1000_RXDEXT_STATERR_LB))
+-			vid = be16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
++			vid = be16_to_cpu((__force __be16)vlan) & E1000_RXD_SPC_VLAN_MASK;
+ 		else
+ 			vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
+ 		if (test_bit(vid, adapter->active_vlans))
+diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+index bf06f2d785db6..fbee581d02b4b 100644
+--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+@@ -7388,6 +7388,10 @@ static int mvpp2_probe(struct platform_device *pdev)
+ 			return PTR_ERR(priv->lms_base);
+ 	} else {
+ 		res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
++		if (!res) {
++			dev_err(&pdev->dev, "Invalid resource\n");
++			return -EINVAL;
++		}
+ 		if (has_acpi_companion(&pdev->dev)) {
+ 			/* In case the MDIO memory region is declared in
+ 			 * the ACPI, it can already appear as 'in-use'
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+index 249d8905e644a..e7528fa7e1876 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+@@ -1308,7 +1308,8 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+ 	if (rep->vlan && skb_vlan_tag_present(skb))
+ 		skb_vlan_pop(skb);
+ 
+-	if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv)) {
++	if (unlikely(!mlx5_ipsec_is_rx_flow(cqe) &&
++		     !mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))) {
+ 		dev_kfree_skb_any(skb);
+ 		goto free_wqe;
+ 	}
+@@ -1365,7 +1366,8 @@ static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64
+ 
+ 	mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ 
+-	if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv)) {
++	if (unlikely(!mlx5_ipsec_is_rx_flow(cqe) &&
++		     !mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))) {
+ 		dev_kfree_skb_any(skb);
+ 		goto mpwrq_cqe_out;
+ 	}
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+index 83a05371e2aa1..8d029401ca6cc 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+@@ -118,17 +118,24 @@ static bool __mlx5_lag_is_sriov(struct mlx5_lag *ldev)
+ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
+ 					   u8 *port1, u8 *port2)
+ {
++	bool p1en;
++	bool p2en;
++
++	p1en = tracker->netdev_state[MLX5_LAG_P1].tx_enabled &&
++	       tracker->netdev_state[MLX5_LAG_P1].link_up;
++
++	p2en = tracker->netdev_state[MLX5_LAG_P2].tx_enabled &&
++	       tracker->netdev_state[MLX5_LAG_P2].link_up;
++
+ 	*port1 = 1;
+ 	*port2 = 2;
+-	if (!tracker->netdev_state[MLX5_LAG_P1].tx_enabled ||
+-	    !tracker->netdev_state[MLX5_LAG_P1].link_up) {
+-		*port1 = 2;
++	if ((!p1en && !p2en) || (p1en && p2en))
+ 		return;
+-	}
+ 
+-	if (!tracker->netdev_state[MLX5_LAG_P2].tx_enabled ||
+-	    !tracker->netdev_state[MLX5_LAG_P2].link_up)
++	if (p1en)
+ 		*port2 = 1;
++	else
++		*port1 = 2;
+ }
+ 
+ void mlx5_modify_lag(struct mlx5_lag *ldev,
+diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
+index caa251d0e3815..b27713906d3a6 100644
+--- a/drivers/net/ethernet/micrel/ks8842.c
++++ b/drivers/net/ethernet/micrel/ks8842.c
+@@ -1135,6 +1135,10 @@ static int ks8842_probe(struct platform_device *pdev)
+ 	unsigned i;
+ 
+ 	iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++	if (!iomem) {
++		dev_err(&pdev->dev, "Invalid resource\n");
++		return -EINVAL;
++	}
+ 	if (!request_mem_region(iomem->start, resource_size(iomem), DRV_NAME))
+ 		goto err_mem_region;
+ 
+diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
+index 49fd843c4c8a0..a4380c45f6689 100644
+--- a/drivers/net/ethernet/moxa/moxart_ether.c
++++ b/drivers/net/ethernet/moxa/moxart_ether.c
+@@ -481,14 +481,13 @@ static int moxart_mac_probe(struct platform_device *pdev)
+ 	priv->ndev = ndev;
+ 	priv->pdev = pdev;
+ 
+-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+-	ndev->base_addr = res->start;
+-	priv->base = devm_ioremap_resource(p_dev, res);
++	priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ 	if (IS_ERR(priv->base)) {
+ 		dev_err(p_dev, "devm_ioremap_resource failed\n");
+ 		ret = PTR_ERR(priv->base);
+ 		goto init_fail;
+ 	}
++	ndev->base_addr = res->start;
+ 
+ 	spin_lock_init(&priv->txlock);
+ 
+diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+index 1b32a43f70242..df0501b5fe830 100644
+--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
++++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+@@ -107,7 +107,7 @@ static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
+ {
+ 	u8 *data = skb->data;
+ 	unsigned int offset;
+-	u16 *hi, *id;
++	u16 hi, id;
+ 	u32 lo;
+ 
+ 	if (ptp_classify_raw(skb) == PTP_CLASS_NONE)
+@@ -118,14 +118,11 @@ static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
+ 	if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid))
+ 		return 0;
+ 
+-	hi = (u16 *)(data + offset + OFF_PTP_SOURCE_UUID);
+-	id = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
++	hi = get_unaligned_be16(data + offset + OFF_PTP_SOURCE_UUID + 0);
++	lo = get_unaligned_be32(data + offset + OFF_PTP_SOURCE_UUID + 2);
++	id = get_unaligned_be16(data + offset + OFF_PTP_SEQUENCE_ID);
+ 
+-	memcpy(&lo, &hi[1], sizeof(lo));
+-
+-	return (uid_hi == *hi &&
+-		uid_lo == lo &&
+-		seqid  == *id);
++	return (uid_hi == hi && uid_lo == lo && seqid == id);
+ }
+ 
+ static void
+@@ -135,7 +132,6 @@ pch_rx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
+ 	struct pci_dev *pdev;
+ 	u64 ns;
+ 	u32 hi, lo, val;
+-	u16 uid, seq;
+ 
+ 	if (!adapter->hwts_rx_en)
+ 		return;
+@@ -151,10 +147,7 @@ pch_rx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
+ 	lo = pch_src_uuid_lo_read(pdev);
+ 	hi = pch_src_uuid_hi_read(pdev);
+ 
+-	uid = hi & 0xffff;
+-	seq = (hi >> 16) & 0xffff;
+-
+-	if (!pch_ptp_match(skb, htons(uid), htonl(lo), htons(seq)))
++	if (!pch_ptp_match(skb, hi, lo, hi >> 16))
+ 		goto out;
+ 
+ 	ns = pch_rx_snap_read(pdev);
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index f7a56e05ec8a4..552164af2dd4b 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -3477,7 +3477,6 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
+ 	rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000);
+ 
+ 	rtl_pcie_state_l2l3_disable(tp);
+-	rtl_hw_aspm_clkreq_enable(tp, true);
+ }
+ 
+ DECLARE_RTL_COND(rtl_mac_ocp_e00e_cond)
+diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
+index 21fa6c0e88734..84041cd587d78 100644
+--- a/drivers/net/ethernet/sfc/ef10_sriov.c
++++ b/drivers/net/ethernet/sfc/ef10_sriov.c
+@@ -402,12 +402,17 @@ fail1:
+ 	return rc;
+ }
+ 
++/* Disable SRIOV and remove VFs
++ * If some VFs are attached to a guest (using Xen, only) nothing is
++ * done if force=false, and vports are freed if force=true (for the non
++ * attachedc ones, only) but SRIOV is not disabled and VFs are not
++ * removed in either case.
++ */
+ static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force)
+ {
+ 	struct pci_dev *dev = efx->pci_dev;
+-	unsigned int vfs_assigned = 0;
+-
+-	vfs_assigned = pci_vfs_assigned(dev);
++	unsigned int vfs_assigned = pci_vfs_assigned(dev);
++	int rc = 0;
+ 
+ 	if (vfs_assigned && !force) {
+ 		netif_info(efx, drv, efx->net_dev, "VFs are assigned to guests; "
+@@ -417,10 +422,12 @@ static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force)
+ 
+ 	if (!vfs_assigned)
+ 		pci_disable_sriov(dev);
++	else
++		rc = -EBUSY;
+ 
+ 	efx_ef10_sriov_free_vf_vswitching(efx);
+ 	efx->vf_count = 0;
+-	return 0;
++	return rc;
+ }
+ 
+ int efx_ef10_sriov_configure(struct efx_nic *efx, int num_vfs)
+@@ -439,7 +446,6 @@ int efx_ef10_sriov_init(struct efx_nic *efx)
+ void efx_ef10_sriov_fini(struct efx_nic *efx)
+ {
+ 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+-	unsigned int i;
+ 	int rc;
+ 
+ 	if (!nic_data->vf) {
+@@ -449,14 +455,7 @@ void efx_ef10_sriov_fini(struct efx_nic *efx)
+ 		return;
+ 	}
+ 
+-	/* Remove any VFs in the host */
+-	for (i = 0; i < efx->vf_count; ++i) {
+-		struct efx_nic *vf_efx = nic_data->vf[i].efx;
+-
+-		if (vf_efx)
+-			vf_efx->pci_dev->driver->remove(vf_efx->pci_dev);
+-	}
+-
++	/* Disable SRIOV and remove any VFs in the host */
+ 	rc = efx_ef10_pci_sriov_disable(efx, true);
+ 	if (rc)
+ 		netif_dbg(efx, drv, efx->net_dev,
+diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
+index 6eef0f45b133b..2b29fd4cbdf44 100644
+--- a/drivers/net/ethernet/sgi/ioc3-eth.c
++++ b/drivers/net/ethernet/sgi/ioc3-eth.c
+@@ -835,6 +835,10 @@ static int ioc3eth_probe(struct platform_device *pdev)
+ 	int err;
+ 
+ 	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++	if (!regs) {
++		dev_err(&pdev->dev, "Invalid resource\n");
++		return -EINVAL;
++	}
+ 	/* get mac addr from one wire prom */
+ 	if (ioc3eth_get_mac_addr(regs, mac_addr))
+ 		return -EPROBE_DEFER; /* not available yet */
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+index d64116e0543ed..a4ba27bf3131e 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+@@ -444,6 +444,12 @@ int stmmac_mdio_register(struct net_device *ndev)
+ 		found = 1;
+ 	}
+ 
++	if (!found && !mdio_node) {
++		dev_warn(dev, "No PHY found\n");
++		err = -ENODEV;
++		goto no_phy_found;
++	}
++
+ 	/* Try to probe the XPCS by scanning all addresses. */
+ 	if (priv->hw->xpcs) {
+ 		struct mdio_xpcs_args *xpcs = &priv->hw->xpcs_args;
+@@ -452,6 +458,7 @@ int stmmac_mdio_register(struct net_device *ndev)
+ 
+ 		xpcs->bus = new_bus;
+ 
++		found = 0;
+ 		for (addr = 0; addr < max_addr; addr++) {
+ 			xpcs->addr = addr;
+ 
+@@ -461,13 +468,12 @@ int stmmac_mdio_register(struct net_device *ndev)
+ 				break;
+ 			}
+ 		}
+-	}
+ 
+-	if (!found && !mdio_node) {
+-		dev_warn(dev, "No PHY found\n");
+-		mdiobus_unregister(new_bus);
+-		mdiobus_free(new_bus);
+-		return -ENODEV;
++		if (!found && !mdio_node) {
++			dev_warn(dev, "No XPCS found\n");
++			err = -ENODEV;
++			goto no_xpcs_found;
++		}
+ 	}
+ 
+ bus_register_done:
+@@ -475,6 +481,9 @@ bus_register_done:
+ 
+ 	return 0;
+ 
++no_xpcs_found:
++no_phy_found:
++	mdiobus_unregister(new_bus);
+ bus_register_fail:
+ 	mdiobus_free(new_bus);
+ 	return err;
+diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+index 007840d4a8073..3ffe8d2a1f14b 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
++++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+@@ -1193,9 +1193,8 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
+ 	}
+ 
+ 	dev_info(dev,
+-		 "Xilinx EmacLite at 0x%08lX mapped to 0x%08lX, irq=%d\n",
+-		 (unsigned long __force)ndev->mem_start,
+-		 (unsigned long __force)lp->base_addr, ndev->irq);
++		 "Xilinx EmacLite at 0x%08lX mapped to 0x%p, irq=%d\n",
++		 (unsigned long __force)ndev->mem_start, lp->base_addr, ndev->irq);
+ 	return 0;
+ 
+ error:
+diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
+index 466622664424d..e449d94661225 100644
+--- a/drivers/net/fjes/fjes_main.c
++++ b/drivers/net/fjes/fjes_main.c
+@@ -1262,6 +1262,10 @@ static int fjes_probe(struct platform_device *plat_dev)
+ 	adapter->interrupt_watch_enable = false;
+ 
+ 	res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
++	if (!res) {
++		err = -EINVAL;
++		goto err_free_control_wq;
++	}
+ 	hw->hw_res.start = res->start;
+ 	hw->hw_res.size = resource_size(res);
+ 	hw->hw_res.irq = platform_get_irq(plat_dev, 0);
+diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
+index 97c1b55405cbf..ba27bcde901e3 100644
+--- a/drivers/net/ipa/ipa_main.c
++++ b/drivers/net/ipa/ipa_main.c
+@@ -679,6 +679,7 @@ static int ipa_firmware_load(struct device *dev)
+ 	}
+ 
+ 	ret = of_address_to_resource(node, 0, &res);
++	of_node_put(node);
+ 	if (ret) {
+ 		dev_err(dev, "error %d getting \"memory-region\" resource\n",
+ 			ret);
+diff --git a/drivers/net/mdio/mdio-ipq8064.c b/drivers/net/mdio/mdio-ipq8064.c
+index 1bd18857e1c5e..f0a6bfa61645e 100644
+--- a/drivers/net/mdio/mdio-ipq8064.c
++++ b/drivers/net/mdio/mdio-ipq8064.c
+@@ -10,7 +10,7 @@
+ #include <linux/module.h>
+ #include <linux/regmap.h>
+ #include <linux/of_mdio.h>
+-#include <linux/phy.h>
++#include <linux/of_address.h>
+ #include <linux/platform_device.h>
+ #include <linux/mfd/syscon.h>
+ 
+@@ -96,14 +96,34 @@ ipq8064_mdio_write(struct mii_bus *bus, int phy_addr, int reg_offset, u16 data)
+ 	return ipq8064_mdio_wait_busy(priv);
+ }
+ 
++static const struct regmap_config ipq8064_mdio_regmap_config = {
++	.reg_bits = 32,
++	.reg_stride = 4,
++	.val_bits = 32,
++	.can_multi_write = false,
++	/* the mdio lock is used by any user of this mdio driver */
++	.disable_locking = true,
++
++	.cache_type = REGCACHE_NONE,
++};
++
+ static int
+ ipq8064_mdio_probe(struct platform_device *pdev)
+ {
+ 	struct device_node *np = pdev->dev.of_node;
+ 	struct ipq8064_mdio *priv;
++	struct resource res;
+ 	struct mii_bus *bus;
++	void __iomem *base;
+ 	int ret;
+ 
++	if (of_address_to_resource(np, 0, &res))
++		return -ENOMEM;
++
++	base = ioremap(res.start, resource_size(&res));
++	if (!base)
++		return -ENOMEM;
++
+ 	bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*priv));
+ 	if (!bus)
+ 		return -ENOMEM;
+@@ -115,15 +135,10 @@ ipq8064_mdio_probe(struct platform_device *pdev)
+ 	bus->parent = &pdev->dev;
+ 
+ 	priv = bus->priv;
+-	priv->base = device_node_to_regmap(np);
+-	if (IS_ERR(priv->base)) {
+-		if (priv->base == ERR_PTR(-EPROBE_DEFER))
+-			return -EPROBE_DEFER;
+-
+-		dev_err(&pdev->dev, "error getting device regmap, error=%pe\n",
+-			priv->base);
++	priv->base = devm_regmap_init_mmio(&pdev->dev, base,
++					   &ipq8064_mdio_regmap_config);
++	if (IS_ERR(priv->base))
+ 		return PTR_ERR(priv->base);
+-	}
+ 
+ 	ret = of_mdiobus_register(bus, np);
+ 	if (ret)
+diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
+index 821e85a973679..7b99a3234c652 100644
+--- a/drivers/net/phy/realtek.c
++++ b/drivers/net/phy/realtek.c
+@@ -357,6 +357,19 @@ static int rtl8211f_config_init(struct phy_device *phydev)
+ 	return 0;
+ }
+ 
++static int rtl821x_resume(struct phy_device *phydev)
++{
++	int ret;
++
++	ret = genphy_resume(phydev);
++	if (ret < 0)
++		return ret;
++
++	msleep(20);
++
++	return 0;
++}
++
+ static int rtl8211e_config_init(struct phy_device *phydev)
+ {
+ 	int ret = 0, oldpage;
+@@ -852,7 +865,7 @@ static struct phy_driver realtek_drvs[] = {
+ 		.config_intr	= &rtl8211f_config_intr,
+ 		.handle_interrupt = rtl8211f_handle_interrupt,
+ 		.suspend	= genphy_suspend,
+-		.resume		= genphy_resume,
++		.resume		= rtl821x_resume,
+ 		.read_page	= rtl821x_read_page,
+ 		.write_page	= rtl821x_write_page,
+ 	}, {
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 0824e6999e499..f7ce341bb328f 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -660,6 +660,12 @@ static struct sk_buff *receive_small(struct net_device *dev,
+ 	len -= vi->hdr_len;
+ 	stats->bytes += len;
+ 
++	if (unlikely(len > GOOD_PACKET_LEN)) {
++		pr_debug("%s: rx error: len %u exceeds max size %d\n",
++			 dev->name, len, GOOD_PACKET_LEN);
++		dev->stats.rx_length_errors++;
++		goto err_len;
++	}
+ 	rcu_read_lock();
+ 	xdp_prog = rcu_dereference(rq->xdp_prog);
+ 	if (xdp_prog) {
+@@ -761,6 +767,7 @@ err:
+ err_xdp:
+ 	rcu_read_unlock();
+ 	stats->xdp_drops++;
++err_len:
+ 	stats->drops++;
+ 	put_page(page);
+ xdp_xmit:
+@@ -814,6 +821,12 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
+ 	head_skb = NULL;
+ 	stats->bytes += len - vi->hdr_len;
+ 
++	if (unlikely(len > truesize)) {
++		pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
++			 dev->name, len, (unsigned long)ctx);
++		dev->stats.rx_length_errors++;
++		goto err_skb;
++	}
+ 	rcu_read_lock();
+ 	xdp_prog = rcu_dereference(rq->xdp_prog);
+ 	if (xdp_prog) {
+@@ -938,13 +951,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
+ 	}
+ 	rcu_read_unlock();
+ 
+-	if (unlikely(len > truesize)) {
+-		pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
+-			 dev->name, len, (unsigned long)ctx);
+-		dev->stats.rx_length_errors++;
+-		goto err_skb;
+-	}
+-
+ 	head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog,
+ 			       metasize);
+ 	curr_skb = head_skb;
+@@ -1552,7 +1558,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
+ 	if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
+ 				    virtio_is_little_endian(vi->vdev), false,
+ 				    0))
+-		BUG();
++		return -EPROTO;
+ 
+ 	if (vi->mergeable_rx_bufs)
+ 		hdr->num_buffers = 0;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+index baf7404c137db..873d41c21a779 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+@@ -3798,6 +3798,7 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
+ 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ 	struct cfg80211_chan_def chandef;
+ 	struct iwl_mvm_phy_ctxt *phy_ctxt;
++	bool band_change_removal;
+ 	int ret, i;
+ 
+ 	IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
+@@ -3878,19 +3879,30 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
+ 	cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
+ 
+ 	/*
+-	 * Change the PHY context configuration as it is currently referenced
+-	 * only by the P2P Device MAC
++	 * Check if the remain-on-channel is on a different band and that
++	 * requires context removal, see iwl_mvm_phy_ctxt_changed(). If
++	 * so, we'll need to release and then re-configure here, since we
++	 * must not remove a PHY context that's part of a binding.
+ 	 */
+-	if (mvmvif->phy_ctxt->ref == 1) {
++	band_change_removal =
++		fw_has_capa(&mvm->fw->ucode_capa,
++			    IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
++		mvmvif->phy_ctxt->channel->band != chandef.chan->band;
++
++	if (mvmvif->phy_ctxt->ref == 1 && !band_change_removal) {
++		/*
++		 * Change the PHY context configuration as it is currently
++		 * referenced only by the P2P Device MAC (and we can modify it)
++		 */
+ 		ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt,
+ 					       &chandef, 1, 1);
+ 		if (ret)
+ 			goto out_unlock;
+ 	} else {
+ 		/*
+-		 * The PHY context is shared with other MACs. Need to remove the
+-		 * P2P Device from the binding, allocate an new PHY context and
+-		 * create a new binding
++		 * The PHY context is shared with other MACs (or we're trying to
++		 * switch bands), so remove the P2P Device from the binding,
++		 * allocate an new PHY context and create a new binding.
+ 		 */
+ 		phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
+ 		if (!phy_ctxt) {
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+index 0a963d01b8250..f17dfdf2bfaf4 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+@@ -1828,7 +1828,8 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
+ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ 				enum iwl_mvm_smps_type_request req_type,
+ 				enum ieee80211_smps_mode smps_request);
+-bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm);
++bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm,
++				  struct iwl_mvm_phy_ctxt *ctxt);
+ 
+ /* Low latency */
+ int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
+index 0fd51f6aa2061..4ed2338027d13 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+ /*
+- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
++ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+  * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
+  * Copyright (C) 2017 Intel Deutschland GmbH
+  */
+@@ -76,6 +76,7 @@ static void iwl_mvm_phy_ctxt_cmd_hdr(struct iwl_mvm_phy_ctxt *ctxt,
+ }
+ 
+ static void iwl_mvm_phy_ctxt_set_rxchain(struct iwl_mvm *mvm,
++					 struct iwl_mvm_phy_ctxt *ctxt,
+ 					 __le32 *rxchain_info,
+ 					 u8 chains_static,
+ 					 u8 chains_dynamic)
+@@ -93,7 +94,7 @@ static void iwl_mvm_phy_ctxt_set_rxchain(struct iwl_mvm *mvm,
+ 	 * between the two antennas is sufficiently different to impact
+ 	 * performance.
+ 	 */
+-	if (active_cnt == 1 && iwl_mvm_rx_diversity_allowed(mvm)) {
++	if (active_cnt == 1 && iwl_mvm_rx_diversity_allowed(mvm, ctxt)) {
+ 		idle_cnt = 2;
+ 		active_cnt = 2;
+ 	}
+@@ -113,6 +114,7 @@ static void iwl_mvm_phy_ctxt_set_rxchain(struct iwl_mvm *mvm,
+  * Add the phy configuration to the PHY context command
+  */
+ static void iwl_mvm_phy_ctxt_cmd_data_v1(struct iwl_mvm *mvm,
++					 struct iwl_mvm_phy_ctxt *ctxt,
+ 					 struct iwl_phy_context_cmd_v1 *cmd,
+ 					 struct cfg80211_chan_def *chandef,
+ 					 u8 chains_static, u8 chains_dynamic)
+@@ -123,7 +125,7 @@ static void iwl_mvm_phy_ctxt_cmd_data_v1(struct iwl_mvm *mvm,
+ 	/* Set the channel info data */
+ 	iwl_mvm_set_chan_info_chandef(mvm, &cmd->ci, chandef);
+ 
+-	iwl_mvm_phy_ctxt_set_rxchain(mvm, &tail->rxchain_info,
++	iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &tail->rxchain_info,
+ 				     chains_static, chains_dynamic);
+ 
+ 	tail->txchain_info = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
+@@ -133,6 +135,7 @@ static void iwl_mvm_phy_ctxt_cmd_data_v1(struct iwl_mvm *mvm,
+  * Add the phy configuration to the PHY context command
+  */
+ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
++				      struct iwl_mvm_phy_ctxt *ctxt,
+ 				      struct iwl_phy_context_cmd *cmd,
+ 				      struct cfg80211_chan_def *chandef,
+ 				      u8 chains_static, u8 chains_dynamic)
+@@ -143,7 +146,7 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
+ 	/* Set the channel info data */
+ 	iwl_mvm_set_chan_info_chandef(mvm, &cmd->ci, chandef);
+ 
+-	iwl_mvm_phy_ctxt_set_rxchain(mvm, &cmd->rxchain_info,
++	iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &cmd->rxchain_info,
+ 				     chains_static, chains_dynamic);
+ }
+ 
+@@ -170,7 +173,7 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
+ 		iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, action);
+ 
+ 		/* Set the command data */
+-		iwl_mvm_phy_ctxt_cmd_data(mvm, &cmd, chandef,
++		iwl_mvm_phy_ctxt_cmd_data(mvm, ctxt, &cmd, chandef,
+ 					  chains_static,
+ 					  chains_dynamic);
+ 
+@@ -186,7 +189,7 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
+ 					 action);
+ 
+ 		/* Set the command data */
+-		iwl_mvm_phy_ctxt_cmd_data_v1(mvm, &cmd, chandef,
++		iwl_mvm_phy_ctxt_cmd_data_v1(mvm, ctxt, &cmd, chandef,
+ 					     chains_static,
+ 					     chains_dynamic);
+ 		ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD,
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+index 0b012f8c9eb22..9a4a1b363254f 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+@@ -289,6 +289,8 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
+ 			 * and know the dtim period.
+ 			 */
+ 			iwl_mvm_te_check_disconnect(mvm, te_data->vif,
++				!te_data->vif->bss_conf.assoc ?
++				"Not associated and the time event is over already..." :
+ 				"No beacon heard and the time event is over already...");
+ 			break;
+ 		default:
+@@ -787,6 +789,8 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
+ 			 * and know the dtim period.
+ 			 */
+ 			iwl_mvm_te_check_disconnect(mvm, vif,
++						    !vif->bss_conf.assoc ?
++						    "Not associated and the session protection is over already..." :
+ 						    "No beacon heard and the session protection is over already...");
+ 			spin_lock_bh(&mvm->time_event_lock);
+ 			iwl_mvm_te_clear_data(mvm, te_data);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+index b6b481ff15185..fc4d8840eca44 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+@@ -683,23 +683,37 @@ void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm)
+ 	mvm->accu_radio_stats.on_time_scan += mvm->radio_stats.on_time_scan;
+ }
+ 
++struct iwl_mvm_diversity_iter_data {
++	struct iwl_mvm_phy_ctxt *ctxt;
++	bool result;
++};
++
+ static void iwl_mvm_diversity_iter(void *_data, u8 *mac,
+ 				   struct ieee80211_vif *vif)
+ {
+ 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+-	bool *result = _data;
++	struct iwl_mvm_diversity_iter_data *data = _data;
+ 	int i;
+ 
++	if (mvmvif->phy_ctxt != data->ctxt)
++		return;
++
+ 	for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
+ 		if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC ||
+-		    mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC)
+-			*result = false;
++		    mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC) {
++			data->result = false;
++			break;
++		}
+ 	}
+ }
+ 
+-bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
++bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm,
++				  struct iwl_mvm_phy_ctxt *ctxt)
+ {
+-	bool result = true;
++	struct iwl_mvm_diversity_iter_data data = {
++		.ctxt = ctxt,
++		.result = true,
++	};
+ 
+ 	lockdep_assert_held(&mvm->mutex);
+ 
+@@ -711,9 +725,9 @@ bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
+ 
+ 	ieee80211_iterate_active_interfaces_atomic(
+ 			mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+-			iwl_mvm_diversity_iter, &result);
++			iwl_mvm_diversity_iter, &data);
+ 
+-	return result;
++	return data.result;
+ }
+ 
+ void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm,
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+index cecc32e7dbe8a..2dbc51daa2f8a 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+@@ -79,7 +79,6 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
+ 	struct iwl_prph_scratch *prph_scratch;
+ 	struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl;
+ 	struct iwl_prph_info *prph_info;
+-	void *iml_img;
+ 	u32 control_flags = 0;
+ 	int ret;
+ 	int cmdq_size = max_t(u32, IWL_CMD_QUEUE_SIZE,
+@@ -187,14 +186,15 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
+ 	trans_pcie->prph_scratch = prph_scratch;
+ 
+ 	/* Allocate IML */
+-	iml_img = dma_alloc_coherent(trans->dev, trans->iml_len,
+-				     &trans_pcie->iml_dma_addr, GFP_KERNEL);
+-	if (!iml_img) {
++	trans_pcie->iml = dma_alloc_coherent(trans->dev, trans->iml_len,
++					     &trans_pcie->iml_dma_addr,
++					     GFP_KERNEL);
++	if (!trans_pcie->iml) {
+ 		ret = -ENOMEM;
+ 		goto err_free_ctxt_info;
+ 	}
+ 
+-	memcpy(iml_img, trans->iml, trans->iml_len);
++	memcpy(trans_pcie->iml, trans->iml, trans->iml_len);
+ 
+ 	iwl_enable_fw_load_int_ctx_info(trans);
+ 
+@@ -243,6 +243,11 @@ void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans)
+ 	trans_pcie->ctxt_info_dma_addr = 0;
+ 	trans_pcie->ctxt_info_gen3 = NULL;
+ 
++	dma_free_coherent(trans->dev, trans->iml_len, trans_pcie->iml,
++			  trans_pcie->iml_dma_addr);
++	trans_pcie->iml_dma_addr = 0;
++	trans_pcie->iml = NULL;
++
+ 	iwl_pcie_ctxt_info_free_fw_img(trans);
+ 
+ 	dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_scratch),
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+index d9688c7bed07c..53af3f29eab8e 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+@@ -279,6 +279,8 @@ struct cont_rec {
+  *	Context information addresses will be taken from here.
+  *	This is driver's local copy for keeping track of size and
+  *	count for allocating and freeing the memory.
++ * @iml: image loader image virtual address
++ * @iml_dma_addr: image loader image DMA address
+  * @trans: pointer to the generic transport area
+  * @scd_base_addr: scheduler sram base address in SRAM
+  * @kw: keep warm address
+@@ -329,6 +331,7 @@ struct iwl_trans_pcie {
+ 	};
+ 	struct iwl_prph_info *prph_info;
+ 	struct iwl_prph_scratch *prph_scratch;
++	void *iml;
+ 	dma_addr_t ctxt_info_dma_addr;
+ 	dma_addr_t prph_info_dma_addr;
+ 	dma_addr_t prph_scratch_dma_addr;
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+index af9412bd697ee..7996b05a51c2c 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+@@ -254,7 +254,8 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
+ 	/* now that we got alive we can free the fw image & the context info.
+ 	 * paging memory cannot be freed included since FW will still use it
+ 	 */
+-	iwl_pcie_ctxt_info_free(trans);
++	if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
++		iwl_pcie_ctxt_info_free(trans);
+ 
+ 	/*
+ 	 * Re-enable all the interrupts, including the RF-Kill one, now that
+diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
+index 30b39cb4056a3..1005bef16b61a 100644
+--- a/drivers/net/wireless/mac80211_hwsim.c
++++ b/drivers/net/wireless/mac80211_hwsim.c
+@@ -626,6 +626,7 @@ struct mac80211_hwsim_data {
+ 	u32 ciphers[ARRAY_SIZE(hwsim_ciphers)];
+ 
+ 	struct mac_address addresses[2];
++	struct ieee80211_chanctx_conf *chanctx;
+ 	int channels, idx;
+ 	bool use_chanctx;
+ 	bool destroy_on_close;
+@@ -1257,7 +1258,8 @@ static inline u16 trans_tx_rate_flags_ieee2hwsim(struct ieee80211_tx_rate *rate)
+ 
+ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
+ 				       struct sk_buff *my_skb,
+-				       int dst_portid)
++				       int dst_portid,
++				       struct ieee80211_channel *channel)
+ {
+ 	struct sk_buff *skb;
+ 	struct mac80211_hwsim_data *data = hw->priv;
+@@ -1312,7 +1314,7 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
+ 	if (nla_put_u32(skb, HWSIM_ATTR_FLAGS, hwsim_flags))
+ 		goto nla_put_failure;
+ 
+-	if (nla_put_u32(skb, HWSIM_ATTR_FREQ, data->channel->center_freq))
++	if (nla_put_u32(skb, HWSIM_ATTR_FREQ, channel->center_freq))
+ 		goto nla_put_failure;
+ 
+ 	/* We get the tx control (rate and retries) info*/
+@@ -1659,7 +1661,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
+ 	_portid = READ_ONCE(data->wmediumd);
+ 
+ 	if (_portid || hwsim_virtio_enabled)
+-		return mac80211_hwsim_tx_frame_nl(hw, skb, _portid);
++		return mac80211_hwsim_tx_frame_nl(hw, skb, _portid, channel);
+ 
+ 	/* NO wmediumd detected, perfect medium simulation */
+ 	data->tx_pkts++;
+@@ -1775,7 +1777,7 @@ static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
+ 	mac80211_hwsim_monitor_rx(hw, skb, chan);
+ 
+ 	if (_pid || hwsim_virtio_enabled)
+-		return mac80211_hwsim_tx_frame_nl(hw, skb, _pid);
++		return mac80211_hwsim_tx_frame_nl(hw, skb, _pid, chan);
+ 
+ 	mac80211_hwsim_tx_frame_no_nl(hw, skb, chan);
+ 	dev_kfree_skb(skb);
+@@ -2514,6 +2516,11 @@ static int mac80211_hwsim_croc(struct ieee80211_hw *hw,
+ static int mac80211_hwsim_add_chanctx(struct ieee80211_hw *hw,
+ 				      struct ieee80211_chanctx_conf *ctx)
+ {
++	struct mac80211_hwsim_data *hwsim = hw->priv;
++
++	mutex_lock(&hwsim->mutex);
++	hwsim->chanctx = ctx;
++	mutex_unlock(&hwsim->mutex);
+ 	hwsim_set_chanctx_magic(ctx);
+ 	wiphy_dbg(hw->wiphy,
+ 		  "add channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
+@@ -2525,6 +2532,11 @@ static int mac80211_hwsim_add_chanctx(struct ieee80211_hw *hw,
+ static void mac80211_hwsim_remove_chanctx(struct ieee80211_hw *hw,
+ 					  struct ieee80211_chanctx_conf *ctx)
+ {
++	struct mac80211_hwsim_data *hwsim = hw->priv;
++
++	mutex_lock(&hwsim->mutex);
++	hwsim->chanctx = NULL;
++	mutex_unlock(&hwsim->mutex);
+ 	wiphy_dbg(hw->wiphy,
+ 		  "remove channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
+ 		  ctx->def.chan->center_freq, ctx->def.width,
+@@ -2537,6 +2549,11 @@ static void mac80211_hwsim_change_chanctx(struct ieee80211_hw *hw,
+ 					  struct ieee80211_chanctx_conf *ctx,
+ 					  u32 changed)
+ {
++	struct mac80211_hwsim_data *hwsim = hw->priv;
++
++	mutex_lock(&hwsim->mutex);
++	hwsim->chanctx = ctx;
++	mutex_unlock(&hwsim->mutex);
+ 	hwsim_check_chanctx_magic(ctx);
+ 	wiphy_dbg(hw->wiphy,
+ 		  "change channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
+@@ -3129,6 +3146,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
+ 		hw->wiphy->max_remain_on_channel_duration = 1000;
+ 		data->if_combination.radar_detect_widths = 0;
+ 		data->if_combination.num_different_channels = data->channels;
++		data->chanctx = NULL;
+ 	} else {
+ 		data->if_combination.num_different_channels = 1;
+ 		data->if_combination.radar_detect_widths =
+@@ -3638,6 +3656,7 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
+ 	int frame_data_len;
+ 	void *frame_data;
+ 	struct sk_buff *skb = NULL;
++	struct ieee80211_channel *channel = NULL;
+ 
+ 	if (!info->attrs[HWSIM_ATTR_ADDR_RECEIVER] ||
+ 	    !info->attrs[HWSIM_ATTR_FRAME] ||
+@@ -3664,6 +3683,17 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
+ 	if (!data2)
+ 		goto out;
+ 
++	if (data2->use_chanctx) {
++		if (data2->tmp_chan)
++			channel = data2->tmp_chan;
++		else if (data2->chanctx)
++			channel = data2->chanctx->def.chan;
++	} else {
++		channel = data2->channel;
++	}
++	if (!channel)
++		goto out;
++
+ 	if (!hwsim_virtio_enabled) {
+ 		if (hwsim_net_get_netgroup(genl_info_net(info)) !=
+ 		    data2->netgroup)
+@@ -3675,7 +3705,7 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
+ 
+ 	/* check if radio is configured properly */
+ 
+-	if (data2->idle || !data2->started)
++	if ((data2->idle && !data2->tmp_chan) || !data2->started)
+ 		goto out;
+ 
+ 	/* A frame is received from user space */
+@@ -3688,18 +3718,16 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
+ 		mutex_lock(&data2->mutex);
+ 		rx_status.freq = nla_get_u32(info->attrs[HWSIM_ATTR_FREQ]);
+ 
+-		if (rx_status.freq != data2->channel->center_freq &&
+-		    (!data2->tmp_chan ||
+-		     rx_status.freq != data2->tmp_chan->center_freq)) {
++		if (rx_status.freq != channel->center_freq) {
+ 			mutex_unlock(&data2->mutex);
+ 			goto out;
+ 		}
+ 		mutex_unlock(&data2->mutex);
+ 	} else {
+-		rx_status.freq = data2->channel->center_freq;
++		rx_status.freq = channel->center_freq;
+ 	}
+ 
+-	rx_status.band = data2->channel->band;
++	rx_status.band = channel->band;
+ 	rx_status.rate_idx = nla_get_u32(info->attrs[HWSIM_ATTR_RX_RATE]);
+ 	rx_status.signal = nla_get_u32(info->attrs[HWSIM_ATTR_SIGNAL]);
+ 
+diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
+index 529dfd8b7ae85..17399d4aa1290 100644
+--- a/drivers/net/wireless/marvell/mwifiex/main.c
++++ b/drivers/net/wireless/marvell/mwifiex/main.c
+@@ -1445,11 +1445,18 @@ static void mwifiex_uninit_sw(struct mwifiex_adapter *adapter)
+ 		if (!priv)
+ 			continue;
+ 		rtnl_lock();
+-		wiphy_lock(adapter->wiphy);
+ 		if (priv->netdev &&
+-		    priv->wdev.iftype != NL80211_IFTYPE_UNSPECIFIED)
++		    priv->wdev.iftype != NL80211_IFTYPE_UNSPECIFIED) {
++			/*
++			 * Close the netdev now, because if we do it later, the
++			 * netdev notifiers will need to acquire the wiphy lock
++			 * again --> deadlock.
++			 */
++			dev_close(priv->wdev.netdev);
++			wiphy_lock(adapter->wiphy);
+ 			mwifiex_del_virtual_intf(adapter->wiphy, &priv->wdev);
+-		wiphy_unlock(adapter->wiphy);
++			wiphy_unlock(adapter->wiphy);
++		}
+ 		rtnl_unlock();
+ 	}
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
+index 426787f4b2ae4..ee0acde53a609 100644
+--- a/drivers/net/wireless/mediatek/mt76/dma.c
++++ b/drivers/net/wireless/mediatek/mt76/dma.c
+@@ -349,6 +349,9 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+ 		      struct sk_buff *skb, struct mt76_wcid *wcid,
+ 		      struct ieee80211_sta *sta)
+ {
++	struct ieee80211_tx_status status = {
++		.sta = sta,
++	};
+ 	struct mt76_tx_info tx_info = {
+ 		.skb = skb,
+ 	};
+@@ -360,11 +363,9 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+ 	u8 *txwi;
+ 
+ 	t = mt76_get_txwi(dev);
+-	if (!t) {
+-		hw = mt76_tx_status_get_hw(dev, skb);
+-		ieee80211_free_txskb(hw, skb);
+-		return -ENOMEM;
+-	}
++	if (!t)
++		goto free_skb;
++
+ 	txwi = mt76_get_txwi_ptr(dev, t);
+ 
+ 	skb->prev = skb->next = NULL;
+@@ -427,8 +428,13 @@ free:
+ 	}
+ #endif
+ 
+-	dev_kfree_skb(tx_info.skb);
+ 	mt76_put_txwi(dev, t);
++
++free_skb:
++	status.skb = tx_info.skb;
++	hw = mt76_tx_status_get_hw(dev, tx_info.skb);
++	ieee80211_tx_status_ext(hw, &status);
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
+index 967fe3e11d94e..1ed63eddfcf2b 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76.h
++++ b/drivers/net/wireless/mediatek/mt76/mt76.h
+@@ -85,6 +85,22 @@ enum mt76_rxq_id {
+ 	__MT_RXQ_MAX
+ };
+ 
++enum mt76_cipher_type {
++	MT_CIPHER_NONE,
++	MT_CIPHER_WEP40,
++	MT_CIPHER_TKIP,
++	MT_CIPHER_TKIP_NO_MIC,
++	MT_CIPHER_AES_CCMP,
++	MT_CIPHER_WEP104,
++	MT_CIPHER_BIP_CMAC_128,
++	MT_CIPHER_WEP128,
++	MT_CIPHER_WAPI,
++	MT_CIPHER_CCMP_CCX,
++	MT_CIPHER_CCMP_256,
++	MT_CIPHER_GCMP,
++	MT_CIPHER_GCMP_256,
++};
++
+ struct mt76_queue_buf {
+ 	dma_addr_t addr;
+ 	u16 len;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+index cc4e7bc482940..92a7b45fbc157 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+@@ -564,14 +564,27 @@ mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb)
+ 		u8 *data = (u8 *)rxd;
+ 
+ 		if (status->flag & RX_FLAG_DECRYPTED) {
+-			status->iv[0] = data[5];
+-			status->iv[1] = data[4];
+-			status->iv[2] = data[3];
+-			status->iv[3] = data[2];
+-			status->iv[4] = data[1];
+-			status->iv[5] = data[0];
+-
+-			insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
++			switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2)) {
++			case MT_CIPHER_AES_CCMP:
++			case MT_CIPHER_CCMP_CCX:
++			case MT_CIPHER_CCMP_256:
++				insert_ccmp_hdr =
++					FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
++				fallthrough;
++			case MT_CIPHER_TKIP:
++			case MT_CIPHER_TKIP_NO_MIC:
++			case MT_CIPHER_GCMP:
++			case MT_CIPHER_GCMP_256:
++				status->iv[0] = data[5];
++				status->iv[1] = data[4];
++				status->iv[2] = data[3];
++				status->iv[3] = data[2];
++				status->iv[4] = data[1];
++				status->iv[5] = data[0];
++				break;
++			default:
++				break;
++			}
+ 		}
+ 
+ 		rxd += 4;
+@@ -828,7 +841,7 @@ void mt7603_wtbl_set_rates(struct mt7603_dev *dev, struct mt7603_sta *sta,
+ 	sta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ }
+ 
+-static enum mt7603_cipher_type
++static enum mt76_cipher_type
+ mt7603_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
+ {
+ 	memset(key_data, 0, 32);
+@@ -860,7 +873,7 @@ mt7603_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
+ int mt7603_wtbl_set_key(struct mt7603_dev *dev, int wcid,
+ 			struct ieee80211_key_conf *key)
+ {
+-	enum mt7603_cipher_type cipher;
++	enum mt76_cipher_type cipher;
+ 	u32 addr = mt7603_wtbl3_addr(wcid);
+ 	u8 key_data[32];
+ 	int key_len = sizeof(key_data);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/regs.h b/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
+index 6741e69071940..3b901090b29c6 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
+@@ -765,16 +765,4 @@ enum {
+ #define MT_WTBL1_OR			(MT_WTBL1_BASE + 0x2300)
+ #define MT_WTBL1_OR_PSM_WRITE		BIT(31)
+ 
+-enum mt7603_cipher_type {
+-	MT_CIPHER_NONE,
+-	MT_CIPHER_WEP40,
+-	MT_CIPHER_TKIP,
+-	MT_CIPHER_TKIP_NO_MIC,
+-	MT_CIPHER_AES_CCMP,
+-	MT_CIPHER_WEP104,
+-	MT_CIPHER_BIP_CMAC_128,
+-	MT_CIPHER_WEP128,
+-	MT_CIPHER_WAPI,
+-};
+-
+ #endif
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+index d06e61cadc411..1da59c16e8200 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+@@ -57,6 +57,33 @@ static const struct mt7615_dfs_radar_spec jp_radar_specs = {
+ 	},
+ };
+ 
++static enum mt76_cipher_type
++mt7615_mac_get_cipher(int cipher)
++{
++	switch (cipher) {
++	case WLAN_CIPHER_SUITE_WEP40:
++		return MT_CIPHER_WEP40;
++	case WLAN_CIPHER_SUITE_WEP104:
++		return MT_CIPHER_WEP104;
++	case WLAN_CIPHER_SUITE_TKIP:
++		return MT_CIPHER_TKIP;
++	case WLAN_CIPHER_SUITE_AES_CMAC:
++		return MT_CIPHER_BIP_CMAC_128;
++	case WLAN_CIPHER_SUITE_CCMP:
++		return MT_CIPHER_AES_CCMP;
++	case WLAN_CIPHER_SUITE_CCMP_256:
++		return MT_CIPHER_CCMP_256;
++	case WLAN_CIPHER_SUITE_GCMP:
++		return MT_CIPHER_GCMP;
++	case WLAN_CIPHER_SUITE_GCMP_256:
++		return MT_CIPHER_GCMP_256;
++	case WLAN_CIPHER_SUITE_SMS4:
++		return MT_CIPHER_WAPI;
++	default:
++		return MT_CIPHER_NONE;
++	}
++}
++
+ static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev,
+ 					    u8 idx, bool unicast)
+ {
+@@ -297,14 +324,27 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
+ 		u8 *data = (u8 *)rxd;
+ 
+ 		if (status->flag & RX_FLAG_DECRYPTED) {
+-			status->iv[0] = data[5];
+-			status->iv[1] = data[4];
+-			status->iv[2] = data[3];
+-			status->iv[3] = data[2];
+-			status->iv[4] = data[1];
+-			status->iv[5] = data[0];
+-
+-			insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
++			switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2)) {
++			case MT_CIPHER_AES_CCMP:
++			case MT_CIPHER_CCMP_CCX:
++			case MT_CIPHER_CCMP_256:
++				insert_ccmp_hdr =
++					FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
++				fallthrough;
++			case MT_CIPHER_TKIP:
++			case MT_CIPHER_TKIP_NO_MIC:
++			case MT_CIPHER_GCMP:
++			case MT_CIPHER_GCMP_256:
++				status->iv[0] = data[5];
++				status->iv[1] = data[4];
++				status->iv[2] = data[3];
++				status->iv[3] = data[2];
++				status->iv[4] = data[1];
++				status->iv[5] = data[0];
++				break;
++			default:
++				break;
++			}
+ 		}
+ 		rxd += 4;
+ 		if ((u8 *)rxd - skb->data >= skb->len)
+@@ -1037,7 +1077,7 @@ EXPORT_SYMBOL_GPL(mt7615_mac_set_rates);
+ static int
+ mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+ 			   struct ieee80211_key_conf *key,
+-			   enum mt7615_cipher_type cipher, u16 cipher_mask,
++			   enum mt76_cipher_type cipher, u16 cipher_mask,
+ 			   enum set_key_cmd cmd)
+ {
+ 	u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx) + 30 * 4;
+@@ -1077,7 +1117,7 @@ mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+ 
+ static int
+ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+-			  enum mt7615_cipher_type cipher, u16 cipher_mask,
++			  enum mt76_cipher_type cipher, u16 cipher_mask,
+ 			  int keyidx, enum set_key_cmd cmd)
+ {
+ 	u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx), w0, w1;
+@@ -1116,7 +1156,7 @@ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+ 
+ static void
+ mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+-			      enum mt7615_cipher_type cipher, u16 cipher_mask,
++			      enum mt76_cipher_type cipher, u16 cipher_mask,
+ 			      enum set_key_cmd cmd)
+ {
+ 	u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx);
+@@ -1142,7 +1182,7 @@ int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
+ 			      struct ieee80211_key_conf *key,
+ 			      enum set_key_cmd cmd)
+ {
+-	enum mt7615_cipher_type cipher;
++	enum mt76_cipher_type cipher;
+ 	u16 cipher_mask = wcid->cipher;
+ 	int err;
+ 
+@@ -1194,22 +1234,20 @@ static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
+ 	int first_idx = 0, last_idx;
+ 	int i, idx, count;
+ 	bool fixed_rate, ack_timeout;
+-	bool probe, ampdu, cck = false;
++	bool ampdu, cck = false;
+ 	bool rs_idx;
+ 	u32 rate_set_tsf;
+ 	u32 final_rate, final_rate_flags, final_nss, txs;
+ 
+-	fixed_rate = info->status.rates[0].count;
+-	probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
+-
+ 	txs = le32_to_cpu(txs_data[1]);
+-	ampdu = !fixed_rate && (txs & MT_TXS1_AMPDU);
++	ampdu = txs & MT_TXS1_AMPDU;
+ 
+ 	txs = le32_to_cpu(txs_data[3]);
+ 	count = FIELD_GET(MT_TXS3_TX_COUNT, txs);
+ 	last_idx = FIELD_GET(MT_TXS3_LAST_TX_RATE, txs);
+ 
+ 	txs = le32_to_cpu(txs_data[0]);
++	fixed_rate = txs & MT_TXS0_FIXED_RATE;
+ 	final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs);
+ 	ack_timeout = txs & MT_TXS0_ACK_TIMEOUT;
+ 
+@@ -1231,7 +1269,7 @@ static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
+ 
+ 	first_idx = max_t(int, 0, last_idx - (count - 1) / MT7615_RATE_RETRY);
+ 
+-	if (fixed_rate && !probe) {
++	if (fixed_rate) {
+ 		info->status.rates[0].count = count;
+ 		i = 0;
+ 		goto out;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h
+index 169f4e17b5b42..1fb07799671bb 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h
+@@ -375,48 +375,6 @@ struct mt7615_dfs_radar_spec {
+ 	struct mt7615_dfs_pattern radar_pattern[16];
+ };
+ 
+-enum mt7615_cipher_type {
+-	MT_CIPHER_NONE,
+-	MT_CIPHER_WEP40,
+-	MT_CIPHER_TKIP,
+-	MT_CIPHER_TKIP_NO_MIC,
+-	MT_CIPHER_AES_CCMP,
+-	MT_CIPHER_WEP104,
+-	MT_CIPHER_BIP_CMAC_128,
+-	MT_CIPHER_WEP128,
+-	MT_CIPHER_WAPI,
+-	MT_CIPHER_CCMP_256 = 10,
+-	MT_CIPHER_GCMP,
+-	MT_CIPHER_GCMP_256,
+-};
+-
+-static inline enum mt7615_cipher_type
+-mt7615_mac_get_cipher(int cipher)
+-{
+-	switch (cipher) {
+-	case WLAN_CIPHER_SUITE_WEP40:
+-		return MT_CIPHER_WEP40;
+-	case WLAN_CIPHER_SUITE_WEP104:
+-		return MT_CIPHER_WEP104;
+-	case WLAN_CIPHER_SUITE_TKIP:
+-		return MT_CIPHER_TKIP;
+-	case WLAN_CIPHER_SUITE_AES_CMAC:
+-		return MT_CIPHER_BIP_CMAC_128;
+-	case WLAN_CIPHER_SUITE_CCMP:
+-		return MT_CIPHER_AES_CCMP;
+-	case WLAN_CIPHER_SUITE_CCMP_256:
+-		return MT_CIPHER_CCMP_256;
+-	case WLAN_CIPHER_SUITE_GCMP:
+-		return MT_CIPHER_GCMP;
+-	case WLAN_CIPHER_SUITE_GCMP_256:
+-		return MT_CIPHER_GCMP_256;
+-	case WLAN_CIPHER_SUITE_SMS4:
+-		return MT_CIPHER_WAPI;
+-	default:
+-		return MT_CIPHER_NONE;
+-	}
+-}
+-
+ static inline struct mt7615_txp_common *
+ mt7615_txwi_to_txp(struct mt76_dev *dev, struct mt76_txwi_cache *t)
+ {
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
+index 771bad60e1bc7..b21edc8e86e11 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
+@@ -34,24 +34,24 @@ mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
+ {
+ 	memset(key_data, 0, 32);
+ 	if (!key)
+-		return MT_CIPHER_NONE;
++		return MT76X02_CIPHER_NONE;
+ 
+ 	if (key->keylen > 32)
+-		return MT_CIPHER_NONE;
++		return MT76X02_CIPHER_NONE;
+ 
+ 	memcpy(key_data, key->key, key->keylen);
+ 
+ 	switch (key->cipher) {
+ 	case WLAN_CIPHER_SUITE_WEP40:
+-		return MT_CIPHER_WEP40;
++		return MT76X02_CIPHER_WEP40;
+ 	case WLAN_CIPHER_SUITE_WEP104:
+-		return MT_CIPHER_WEP104;
++		return MT76X02_CIPHER_WEP104;
+ 	case WLAN_CIPHER_SUITE_TKIP:
+-		return MT_CIPHER_TKIP;
++		return MT76X02_CIPHER_TKIP;
+ 	case WLAN_CIPHER_SUITE_CCMP:
+-		return MT_CIPHER_AES_CCMP;
++		return MT76X02_CIPHER_AES_CCMP;
+ 	default:
+-		return MT_CIPHER_NONE;
++		return MT76X02_CIPHER_NONE;
+ 	}
+ }
+ 
+@@ -63,7 +63,7 @@ int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
+ 	u32 val;
+ 
+ 	cipher = mt76x02_mac_get_key_info(key, key_data);
+-	if (cipher == MT_CIPHER_NONE && key)
++	if (cipher == MT76X02_CIPHER_NONE && key)
+ 		return -EOPNOTSUPP;
+ 
+ 	val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
+@@ -91,10 +91,10 @@ void mt76x02_mac_wcid_sync_pn(struct mt76x02_dev *dev, u8 idx,
+ 	eiv = mt76_rr(dev, MT_WCID_IV(idx) + 4);
+ 
+ 	pn = (u64)eiv << 16;
+-	if (cipher == MT_CIPHER_TKIP) {
++	if (cipher == MT76X02_CIPHER_TKIP) {
+ 		pn |= (iv >> 16) & 0xff;
+ 		pn |= (iv & 0xff) << 8;
+-	} else if (cipher >= MT_CIPHER_AES_CCMP) {
++	} else if (cipher >= MT76X02_CIPHER_AES_CCMP) {
+ 		pn |= iv & 0xffff;
+ 	} else {
+ 		return;
+@@ -112,7 +112,7 @@ int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
+ 	u64 pn;
+ 
+ 	cipher = mt76x02_mac_get_key_info(key, key_data);
+-	if (cipher == MT_CIPHER_NONE && key)
++	if (cipher == MT76X02_CIPHER_NONE && key)
+ 		return -EOPNOTSUPP;
+ 
+ 	mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
+@@ -126,16 +126,16 @@ int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
+ 		pn = atomic64_read(&key->tx_pn);
+ 
+ 		iv_data[3] = key->keyidx << 6;
+-		if (cipher >= MT_CIPHER_TKIP) {
++		if (cipher >= MT76X02_CIPHER_TKIP) {
+ 			iv_data[3] |= 0x20;
+ 			put_unaligned_le32(pn >> 16, &iv_data[4]);
+ 		}
+ 
+-		if (cipher == MT_CIPHER_TKIP) {
++		if (cipher == MT76X02_CIPHER_TKIP) {
+ 			iv_data[0] = (pn >> 8) & 0xff;
+ 			iv_data[1] = (iv_data[0] | 0x20) & 0x7f;
+ 			iv_data[2] = pn & 0xff;
+-		} else if (cipher >= MT_CIPHER_AES_CCMP) {
++		} else if (cipher >= MT76X02_CIPHER_AES_CCMP) {
+ 			put_unaligned_le16((pn & 0xffff), &iv_data[0]);
+ 		}
+ 	}
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
+index 3e722276b5c2f..fa7872ac22bf8 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
++++ b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
+@@ -692,15 +692,15 @@ struct mt76_wcid_key {
+ } __packed __aligned(4);
+ 
+ enum mt76x02_cipher_type {
+-	MT_CIPHER_NONE,
+-	MT_CIPHER_WEP40,
+-	MT_CIPHER_WEP104,
+-	MT_CIPHER_TKIP,
+-	MT_CIPHER_AES_CCMP,
+-	MT_CIPHER_CKIP40,
+-	MT_CIPHER_CKIP104,
+-	MT_CIPHER_CKIP128,
+-	MT_CIPHER_WAPI,
++	MT76X02_CIPHER_NONE,
++	MT76X02_CIPHER_WEP40,
++	MT76X02_CIPHER_WEP104,
++	MT76X02_CIPHER_TKIP,
++	MT76X02_CIPHER_AES_CCMP,
++	MT76X02_CIPHER_CKIP40,
++	MT76X02_CIPHER_CKIP104,
++	MT76X02_CIPHER_CKIP128,
++	MT76X02_CIPHER_WAPI,
+ };
+ 
+ #endif
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
+index 3ee8c27bb61b7..40a51d99a7814 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
+@@ -116,12 +116,15 @@ static inline bool
+ mt7915_tssi_enabled(struct mt7915_dev *dev, enum nl80211_band band)
+ {
+ 	u8 *eep = dev->mt76.eeprom.data;
++	u8 val = eep[MT_EE_WIFI_CONF + 7];
+ 
+-	/* TODO: DBDC */
+-	if (band == NL80211_BAND_5GHZ)
+-		return eep[MT_EE_WIFI_CONF + 7] & MT_EE_WIFI_CONF7_TSSI0_5G;
++	if (band == NL80211_BAND_2GHZ)
++		return val & MT_EE_WIFI_CONF7_TSSI0_2G;
++
++	if (dev->dbdc_support)
++		return val & MT_EE_WIFI_CONF7_TSSI1_5G;
+ 	else
+-		return eep[MT_EE_WIFI_CONF + 7] & MT_EE_WIFI_CONF7_TSSI0_2G;
++		return val & MT_EE_WIFI_CONF7_TSSI0_5G;
+ }
+ 
+ extern const struct sku_group mt7915_sku_groups[];
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+index c7d4268d860af..5ab34606c021b 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+@@ -452,6 +452,9 @@ mt7915_set_stream_he_txbf_caps(struct ieee80211_sta_he_cap *he_cap,
+ 	if (nss < 2)
+ 		return;
+ 
++	/* the maximum cap is 4 x 3, (Nr, Nc) = (3, 2) */
++	elem->phy_cap_info[7] |= min_t(int, nss - 1, 2) << 3;
++
+ 	if (vif != NL80211_IFTYPE_AP)
+ 		return;
+ 
+@@ -465,9 +468,6 @@ mt7915_set_stream_he_txbf_caps(struct ieee80211_sta_he_cap *he_cap,
+ 	c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB |
+ 	    IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB;
+ 	elem->phy_cap_info[6] |= c;
+-
+-	/* the maximum cap is 4 x 3, (Nr, Nc) = (3, 2) */
+-	elem->phy_cap_info[7] |= min_t(int, nss - 1, 2) << 3;
+ }
+ 
+ static void
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+index 819670767521f..be40b07775171 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+@@ -404,14 +404,27 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
+ 		u8 *data = (u8 *)rxd;
+ 
+ 		if (status->flag & RX_FLAG_DECRYPTED) {
+-			status->iv[0] = data[5];
+-			status->iv[1] = data[4];
+-			status->iv[2] = data[3];
+-			status->iv[3] = data[2];
+-			status->iv[4] = data[1];
+-			status->iv[5] = data[0];
+-
+-			insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
++			switch (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1)) {
++			case MT_CIPHER_AES_CCMP:
++			case MT_CIPHER_CCMP_CCX:
++			case MT_CIPHER_CCMP_256:
++				insert_ccmp_hdr =
++					FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
++				fallthrough;
++			case MT_CIPHER_TKIP:
++			case MT_CIPHER_TKIP_NO_MIC:
++			case MT_CIPHER_GCMP:
++			case MT_CIPHER_GCMP_256:
++				status->iv[0] = data[5];
++				status->iv[1] = data[4];
++				status->iv[2] = data[3];
++				status->iv[3] = data[2];
++				status->iv[4] = data[1];
++				status->iv[5] = data[0];
++				break;
++			default:
++				break;
++			}
+ 		}
+ 		rxd += 4;
+ 		if ((u8 *)rxd - skb->data >= skb->len)
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+index f069a5a03e145..22c791b084baf 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+@@ -88,28 +88,28 @@ struct mt7915_fw_region {
+ #define HE_PHY(p, c)			u8_get_bits(c, IEEE80211_HE_PHY_##p)
+ #define HE_MAC(m, c)			u8_get_bits(c, IEEE80211_HE_MAC_##m)
+ 
+-static enum mt7915_cipher_type
++static enum mcu_cipher_type
+ mt7915_mcu_get_cipher(int cipher)
+ {
+ 	switch (cipher) {
+ 	case WLAN_CIPHER_SUITE_WEP40:
+-		return MT_CIPHER_WEP40;
++		return MCU_CIPHER_WEP40;
+ 	case WLAN_CIPHER_SUITE_WEP104:
+-		return MT_CIPHER_WEP104;
++		return MCU_CIPHER_WEP104;
+ 	case WLAN_CIPHER_SUITE_TKIP:
+-		return MT_CIPHER_TKIP;
++		return MCU_CIPHER_TKIP;
+ 	case WLAN_CIPHER_SUITE_AES_CMAC:
+-		return MT_CIPHER_BIP_CMAC_128;
++		return MCU_CIPHER_BIP_CMAC_128;
+ 	case WLAN_CIPHER_SUITE_CCMP:
+-		return MT_CIPHER_AES_CCMP;
++		return MCU_CIPHER_AES_CCMP;
+ 	case WLAN_CIPHER_SUITE_CCMP_256:
+-		return MT_CIPHER_CCMP_256;
++		return MCU_CIPHER_CCMP_256;
+ 	case WLAN_CIPHER_SUITE_GCMP:
+-		return MT_CIPHER_GCMP;
++		return MCU_CIPHER_GCMP;
+ 	case WLAN_CIPHER_SUITE_GCMP_256:
+-		return MT_CIPHER_GCMP_256;
++		return MCU_CIPHER_GCMP_256;
+ 	case WLAN_CIPHER_SUITE_SMS4:
+-		return MT_CIPHER_WAPI;
++		return MCU_CIPHER_WAPI;
+ 	default:
+ 		return MT_CIPHER_NONE;
+ 	}
+@@ -1060,14 +1060,14 @@ mt7915_mcu_sta_key_tlv(struct mt7915_sta *msta, struct sk_buff *skb,
+ 		sec_key = &sec->key[0];
+ 		sec_key->cipher_len = sizeof(*sec_key);
+ 
+-		if (cipher == MT_CIPHER_BIP_CMAC_128) {
+-			sec_key->cipher_id = MT_CIPHER_AES_CCMP;
++		if (cipher == MCU_CIPHER_BIP_CMAC_128) {
++			sec_key->cipher_id = MCU_CIPHER_AES_CCMP;
+ 			sec_key->key_id = bip->keyidx;
+ 			sec_key->key_len = 16;
+ 			memcpy(sec_key->key, bip->key, 16);
+ 
+ 			sec_key = &sec->key[1];
+-			sec_key->cipher_id = MT_CIPHER_BIP_CMAC_128;
++			sec_key->cipher_id = MCU_CIPHER_BIP_CMAC_128;
+ 			sec_key->cipher_len = sizeof(*sec_key);
+ 			sec_key->key_len = 16;
+ 			memcpy(sec_key->key, key->key, 16);
+@@ -1079,14 +1079,14 @@ mt7915_mcu_sta_key_tlv(struct mt7915_sta *msta, struct sk_buff *skb,
+ 			sec_key->key_len = key->keylen;
+ 			memcpy(sec_key->key, key->key, key->keylen);
+ 
+-			if (cipher == MT_CIPHER_TKIP) {
++			if (cipher == MCU_CIPHER_TKIP) {
+ 				/* Rx/Tx MIC keys are swapped */
+ 				memcpy(sec_key->key + 16, key->key + 24, 8);
+ 				memcpy(sec_key->key + 24, key->key + 16, 8);
+ 			}
+ 
+ 			/* store key_conf for BIP batch update */
+-			if (cipher == MT_CIPHER_AES_CCMP) {
++			if (cipher == MCU_CIPHER_AES_CCMP) {
+ 				memcpy(bip->key, key->key, key->keylen);
+ 				bip->keyidx = key->keyidx;
+ 			}
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
+index 2d584142c27b0..1a865ef81a43a 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
+@@ -1023,18 +1023,17 @@ enum {
+ 	STA_REC_MAX_NUM
+ };
+ 
+-enum mt7915_cipher_type {
+-	MT_CIPHER_NONE,
+-	MT_CIPHER_WEP40,
+-	MT_CIPHER_WEP104,
+-	MT_CIPHER_WEP128,
+-	MT_CIPHER_TKIP,
+-	MT_CIPHER_AES_CCMP,
+-	MT_CIPHER_CCMP_256,
+-	MT_CIPHER_GCMP,
+-	MT_CIPHER_GCMP_256,
+-	MT_CIPHER_WAPI,
+-	MT_CIPHER_BIP_CMAC_128,
++enum mcu_cipher_type {
++	MCU_CIPHER_WEP40 = 1,
++	MCU_CIPHER_WEP104,
++	MCU_CIPHER_WEP128,
++	MCU_CIPHER_TKIP,
++	MCU_CIPHER_AES_CCMP,
++	MCU_CIPHER_CCMP_256,
++	MCU_CIPHER_GCMP,
++	MCU_CIPHER_GCMP_256,
++	MCU_CIPHER_WAPI,
++	MCU_CIPHER_BIP_CMAC_128,
+ };
+ 
+ enum {
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+index c4b144391a8e2..b7af252a00914 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+@@ -378,14 +378,27 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
+ 		u8 *data = (u8 *)rxd;
+ 
+ 		if (status->flag & RX_FLAG_DECRYPTED) {
+-			status->iv[0] = data[5];
+-			status->iv[1] = data[4];
+-			status->iv[2] = data[3];
+-			status->iv[3] = data[2];
+-			status->iv[4] = data[1];
+-			status->iv[5] = data[0];
+-
+-			insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
++			switch (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1)) {
++			case MT_CIPHER_AES_CCMP:
++			case MT_CIPHER_CCMP_CCX:
++			case MT_CIPHER_CCMP_256:
++				insert_ccmp_hdr =
++					FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
++				fallthrough;
++			case MT_CIPHER_TKIP:
++			case MT_CIPHER_TKIP_NO_MIC:
++			case MT_CIPHER_GCMP:
++			case MT_CIPHER_GCMP_256:
++				status->iv[0] = data[5];
++				status->iv[1] = data[4];
++				status->iv[2] = data[3];
++				status->iv[3] = data[2];
++				status->iv[4] = data[1];
++				status->iv[5] = data[0];
++				break;
++			default:
++				break;
++			}
+ 		}
+ 		rxd += 4;
+ 		if ((u8 *)rxd - skb->data >= skb->len)
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+index a4f070cd78fda..02da127e91263 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+@@ -87,28 +87,28 @@ struct mt7921_fw_region {
+ #define to_wcid_lo(id)			FIELD_GET(GENMASK(7, 0), (u16)id)
+ #define to_wcid_hi(id)			FIELD_GET(GENMASK(9, 8), (u16)id)
+ 
+-static enum mt7921_cipher_type
++static enum mcu_cipher_type
+ mt7921_mcu_get_cipher(int cipher)
+ {
+ 	switch (cipher) {
+ 	case WLAN_CIPHER_SUITE_WEP40:
+-		return MT_CIPHER_WEP40;
++		return MCU_CIPHER_WEP40;
+ 	case WLAN_CIPHER_SUITE_WEP104:
+-		return MT_CIPHER_WEP104;
++		return MCU_CIPHER_WEP104;
+ 	case WLAN_CIPHER_SUITE_TKIP:
+-		return MT_CIPHER_TKIP;
++		return MCU_CIPHER_TKIP;
+ 	case WLAN_CIPHER_SUITE_AES_CMAC:
+-		return MT_CIPHER_BIP_CMAC_128;
++		return MCU_CIPHER_BIP_CMAC_128;
+ 	case WLAN_CIPHER_SUITE_CCMP:
+-		return MT_CIPHER_AES_CCMP;
++		return MCU_CIPHER_AES_CCMP;
+ 	case WLAN_CIPHER_SUITE_CCMP_256:
+-		return MT_CIPHER_CCMP_256;
++		return MCU_CIPHER_CCMP_256;
+ 	case WLAN_CIPHER_SUITE_GCMP:
+-		return MT_CIPHER_GCMP;
++		return MCU_CIPHER_GCMP;
+ 	case WLAN_CIPHER_SUITE_GCMP_256:
+-		return MT_CIPHER_GCMP_256;
++		return MCU_CIPHER_GCMP_256;
+ 	case WLAN_CIPHER_SUITE_SMS4:
+-		return MT_CIPHER_WAPI;
++		return MCU_CIPHER_WAPI;
+ 	default:
+ 		return MT_CIPHER_NONE;
+ 	}
+@@ -579,14 +579,14 @@ mt7921_mcu_sta_key_tlv(struct mt7921_sta *msta, struct sk_buff *skb,
+ 		sec_key = &sec->key[0];
+ 		sec_key->cipher_len = sizeof(*sec_key);
+ 
+-		if (cipher == MT_CIPHER_BIP_CMAC_128) {
+-			sec_key->cipher_id = MT_CIPHER_AES_CCMP;
++		if (cipher == MCU_CIPHER_BIP_CMAC_128) {
++			sec_key->cipher_id = MCU_CIPHER_AES_CCMP;
+ 			sec_key->key_id = bip->keyidx;
+ 			sec_key->key_len = 16;
+ 			memcpy(sec_key->key, bip->key, 16);
+ 
+ 			sec_key = &sec->key[1];
+-			sec_key->cipher_id = MT_CIPHER_BIP_CMAC_128;
++			sec_key->cipher_id = MCU_CIPHER_BIP_CMAC_128;
+ 			sec_key->cipher_len = sizeof(*sec_key);
+ 			sec_key->key_len = 16;
+ 			memcpy(sec_key->key, key->key, 16);
+@@ -598,14 +598,14 @@ mt7921_mcu_sta_key_tlv(struct mt7921_sta *msta, struct sk_buff *skb,
+ 			sec_key->key_len = key->keylen;
+ 			memcpy(sec_key->key, key->key, key->keylen);
+ 
+-			if (cipher == MT_CIPHER_TKIP) {
++			if (cipher == MCU_CIPHER_TKIP) {
+ 				/* Rx/Tx MIC keys are swapped */
+ 				memcpy(sec_key->key + 16, key->key + 24, 8);
+ 				memcpy(sec_key->key + 24, key->key + 16, 8);
+ 			}
+ 
+ 			/* store key_conf for BIP batch update */
+-			if (cipher == MT_CIPHER_AES_CCMP) {
++			if (cipher == MCU_CIPHER_AES_CCMP) {
+ 				memcpy(bip->key, key->key, key->keylen);
+ 				bip->keyidx = key->keyidx;
+ 			}
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
+index 2fdc62367b3fb..4028c667bd688 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
+@@ -214,18 +214,17 @@ struct sta_rec_sec {
+ 	struct sec_key key[2];
+ } __packed;
+ 
+-enum mt7921_cipher_type {
+-	MT_CIPHER_NONE,
+-	MT_CIPHER_WEP40,
+-	MT_CIPHER_WEP104,
+-	MT_CIPHER_WEP128,
+-	MT_CIPHER_TKIP,
+-	MT_CIPHER_AES_CCMP,
+-	MT_CIPHER_CCMP_256,
+-	MT_CIPHER_GCMP,
+-	MT_CIPHER_GCMP_256,
+-	MT_CIPHER_WAPI,
+-	MT_CIPHER_BIP_CMAC_128,
++enum mcu_cipher_type {
++	MCU_CIPHER_WEP40 = 1,
++	MCU_CIPHER_WEP104,
++	MCU_CIPHER_WEP128,
++	MCU_CIPHER_TKIP,
++	MCU_CIPHER_AES_CCMP,
++	MCU_CIPHER_CCMP_256,
++	MCU_CIPHER_GCMP,
++	MCU_CIPHER_GCMP_256,
++	MCU_CIPHER_WAPI,
++	MCU_CIPHER_BIP_CMAC_128,
+ };
+ 
+ enum {
+diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+index d6d1be4169e5f..acb6b0cd36672 100644
+--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+@@ -853,15 +853,10 @@ struct rtl8192eu_efuse {
+ 	u8 usb_optional_function;
+ 	u8 res9[2];
+ 	u8 mac_addr[ETH_ALEN];		/* 0xd7 */
+-	u8 res10[2];
+-	u8 vendor_name[7];
+-	u8 res11[2];
+-	u8 device_name[0x0b];		/* 0xe8 */
+-	u8 res12[2];
+-	u8 serial[0x0b];		/* 0xf5 */
+-	u8 res13[0x30];
++	u8 device_info[80];
++	u8 res11[3];
+ 	u8 unknown[0x0d];		/* 0x130 */
+-	u8 res14[0xc3];
++	u8 res12[0xc3];
+ };
+ 
+ struct rtl8xxxu_reg8val {
+diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
+index cfe2dfdae928f..b06508d0cdf8f 100644
+--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
+@@ -554,9 +554,43 @@ rtl8192e_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
+ 	}
+ }
+ 
++static void rtl8192eu_log_next_device_info(struct rtl8xxxu_priv *priv,
++					   char *record_name,
++					   char *device_info,
++					   unsigned int *record_offset)
++{
++	char *record = device_info + *record_offset;
++
++	/* A record is [ total length | 0x03 | value ] */
++	unsigned char l = record[0];
++
++	/*
++	 * The whole device info section seems to be 80 characters, make sure
++	 * we don't read further.
++	 */
++	if (*record_offset + l > 80) {
++		dev_warn(&priv->udev->dev,
++			 "invalid record length %d while parsing \"%s\" at offset %u.\n",
++			 l, record_name, *record_offset);
++		return;
++	}
++
++	if (l >= 2) {
++		char value[80];
++
++		memcpy(value, &record[2], l - 2);
++		value[l - 2] = '\0';
++		dev_info(&priv->udev->dev, "%s: %s\n", record_name, value);
++		*record_offset = *record_offset + l;
++	} else {
++		dev_info(&priv->udev->dev, "%s not available.\n", record_name);
++	}
++}
++
+ static int rtl8192eu_parse_efuse(struct rtl8xxxu_priv *priv)
+ {
+ 	struct rtl8192eu_efuse *efuse = &priv->efuse_wifi.efuse8192eu;
++	unsigned int record_offset;
+ 	int i;
+ 
+ 	if (efuse->rtl_id != cpu_to_le16(0x8129))
+@@ -604,12 +638,25 @@ static int rtl8192eu_parse_efuse(struct rtl8xxxu_priv *priv)
+ 	priv->has_xtalk = 1;
+ 	priv->xtalk = priv->efuse_wifi.efuse8192eu.xtal_k & 0x3f;
+ 
+-	dev_info(&priv->udev->dev, "Vendor: %.7s\n", efuse->vendor_name);
+-	dev_info(&priv->udev->dev, "Product: %.11s\n", efuse->device_name);
+-	if (memchr_inv(efuse->serial, 0xff, 11))
+-		dev_info(&priv->udev->dev, "Serial: %.11s\n", efuse->serial);
+-	else
+-		dev_info(&priv->udev->dev, "Serial not available.\n");
++	/*
++	 * device_info section seems to be laid out as records
++	 * [ total length | 0x03 | value ] so:
++	 * - vendor length + 2
++	 * - 0x03
++	 * - vendor string (not null terminated)
++	 * - product length + 2
++	 * - 0x03
++	 * - product string (not null terminated)
++	 * Then there is one or 2 0x00 on all the 4 devices I own or found
++	 * dumped online.
++	 * As previous version of the code handled an optional serial
++	 * string, I now assume there may be a third record if the
++	 * length is not 0.
++	 */
++	record_offset = 0;
++	rtl8192eu_log_next_device_info(priv, "Vendor", efuse->device_info, &record_offset);
++	rtl8192eu_log_next_device_info(priv, "Product", efuse->device_info, &record_offset);
++	rtl8192eu_log_next_device_info(priv, "Serial", efuse->device_info, &record_offset);
+ 
+ 	if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE) {
+ 		unsigned char *raw = priv->efuse_wifi.raw;
+diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
+index 6b5c885798a4b..e5110d2cbc1d5 100644
+--- a/drivers/net/wireless/realtek/rtw88/pci.c
++++ b/drivers/net/wireless/realtek/rtw88/pci.c
+@@ -2,6 +2,7 @@
+ /* Copyright(c) 2018-2019  Realtek Corporation
+  */
+ 
++#include <linux/dmi.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+ #include "main.h"
+@@ -1598,6 +1599,36 @@ static void rtw_pci_napi_deinit(struct rtw_dev *rtwdev)
+ 	netif_napi_del(&rtwpci->napi);
+ }
+ 
++enum rtw88_quirk_dis_pci_caps {
++	QUIRK_DIS_PCI_CAP_MSI,
++	QUIRK_DIS_PCI_CAP_ASPM,
++};
++
++static int disable_pci_caps(const struct dmi_system_id *dmi)
++{
++	uintptr_t dis_caps = (uintptr_t)dmi->driver_data;
++
++	if (dis_caps & BIT(QUIRK_DIS_PCI_CAP_MSI))
++		rtw_disable_msi = true;
++	if (dis_caps & BIT(QUIRK_DIS_PCI_CAP_ASPM))
++		rtw_pci_disable_aspm = true;
++
++	return 1;
++}
++
++static const struct dmi_system_id rtw88_pci_quirks[] = {
++	{
++		.callback = disable_pci_caps,
++		.ident = "Protempo Ltd L116HTN6SPW",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Protempo Ltd"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "L116HTN6SPW"),
++		},
++		.driver_data = (void *)BIT(QUIRK_DIS_PCI_CAP_ASPM),
++	},
++	{}
++};
++
+ int rtw_pci_probe(struct pci_dev *pdev,
+ 		  const struct pci_device_id *id)
+ {
+@@ -1648,6 +1679,7 @@ int rtw_pci_probe(struct pci_dev *pdev,
+ 		goto err_destroy_pci;
+ 	}
+ 
++	dmi_check_system(rtw88_pci_quirks);
+ 	rtw_pci_phy_cfg(rtwdev);
+ 
+ 	ret = rtw_register_hw(rtwdev, hw);
+diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
+index ad5715c65de3e..03f97acbf80f6 100644
+--- a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
++++ b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
+@@ -16812,53 +16812,53 @@ static const u32 rtw8822c_rf_a[] = {
+ 	0x92000002,	0x00000000,	0x40000000,	0x00000000,
+ 		0x03F, 0x00010E46,
+ 	0x93000001,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00030246,
++		0x03F, 0x0003D646,
+ 	0x93000002,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00030246,
++		0x03F, 0x0003D646,
+ 	0x93000003,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00030246,
++		0x03F, 0x0003D646,
+ 	0x93000004,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00030246,
++		0x03F, 0x0003D646,
+ 	0x93000005,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00030246,
++		0x03F, 0x0003D646,
+ 	0x93000006,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00030246,
++		0x03F, 0x0003D646,
+ 	0x93000015,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00030246,
++		0x03F, 0x0003D646,
+ 	0x93000016,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00030246,
++		0x03F, 0x0003D646,
+ 	0x94000001,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00030246,
++		0x03F, 0x0003D646,
+ 	0x94000002,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00030246,
++		0x03F, 0x0003D646,
+ 	0x94000003,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00030246,
++		0x03F, 0x0003D646,
+ 	0x94000004,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00030246,
++		0x03F, 0x0003D646,
+ 	0x94000005,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00030246,
++		0x03F, 0x0003D646,
+ 	0x94000006,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00030246,
++		0x03F, 0x0003D646,
+ 	0x94000015,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00030246,
++		0x03F, 0x0003D646,
+ 	0x94000016,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00030246,
++		0x03F, 0x0003D646,
+ 	0x95000001,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00030246,
++		0x03F, 0x0003D646,
+ 	0x95000002,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00030246,
++		0x03F, 0x0003D646,
+ 	0x95000003,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00030246,
++		0x03F, 0x0003D646,
+ 	0x95000004,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00030246,
++		0x03F, 0x0003D646,
+ 	0x95000005,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00030246,
++		0x03F, 0x0003D646,
+ 	0x95000006,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00030246,
++		0x03F, 0x0003D646,
+ 	0x95000015,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00030246,
++		0x03F, 0x0003D646,
+ 	0x95000016,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00030246,
++		0x03F, 0x0003D646,
+ 	0xA0000000,	0x00000000,
+ 		0x03F, 0x00002A46,
+ 	0xB0000000,	0x00000000,
+@@ -18762,53 +18762,53 @@ static const u32 rtw8822c_rf_a[] = {
+ 	0x92000002,	0x00000000,	0x40000000,	0x00000000,
+ 		0x03F, 0x0000EA46,
+ 	0x93000001,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x93000002,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x93000003,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x93000004,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x93000005,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x93000006,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x93000015,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x93000016,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x94000001,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x94000002,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x94000003,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x94000004,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x94000005,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x94000006,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x94000015,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x94000016,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x95000001,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x95000002,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x95000003,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x95000004,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x95000005,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x95000006,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x95000015,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x95000016,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0xA0000000,	0x00000000,
+ 		0x03F, 0x00002A46,
+ 	0xB0000000,	0x00000000,
+@@ -18957,53 +18957,53 @@ static const u32 rtw8822c_rf_a[] = {
+ 	0x92000002,	0x00000000,	0x40000000,	0x00000000,
+ 		0x03F, 0x0000EA46,
+ 	0x93000001,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x93000002,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x93000003,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x93000004,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x93000005,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x93000006,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x93000015,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x93000016,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x94000001,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x94000002,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x94000003,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x94000004,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x94000005,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x94000006,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x94000015,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x94000016,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x95000001,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x95000002,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x95000003,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x95000004,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x95000005,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x95000006,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x95000015,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x95000016,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0xA0000000,	0x00000000,
+ 		0x03F, 0x00002A46,
+ 	0xB0000000,	0x00000000,
+@@ -19152,53 +19152,53 @@ static const u32 rtw8822c_rf_a[] = {
+ 	0x92000002,	0x00000000,	0x40000000,	0x00000000,
+ 		0x03F, 0x0000EA46,
+ 	0x93000001,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x93000002,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x93000003,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x93000004,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x93000005,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x93000006,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x93000015,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x93000016,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x94000001,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x94000002,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x94000003,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x94000004,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x94000005,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x94000006,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x94000015,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x94000016,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x95000001,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x95000002,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x95000003,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x95000004,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x95000005,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x95000006,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x95000015,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x95000016,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0xA0000000,	0x00000000,
+ 		0x03F, 0x00002A46,
+ 	0xB0000000,	0x00000000,
+@@ -19347,53 +19347,53 @@ static const u32 rtw8822c_rf_a[] = {
+ 	0x92000002,	0x00000000,	0x40000000,	0x00000000,
+ 		0x03F, 0x0000EA46,
+ 	0x93000001,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x93000002,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x93000003,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x93000004,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x93000005,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x93000006,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x93000015,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x93000016,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x94000001,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x94000002,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x94000003,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x94000004,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x94000005,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x94000006,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x94000015,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x94000016,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x95000001,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x95000002,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x95000003,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x95000004,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x95000005,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x95000006,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x95000015,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0x95000016,	0x00000000,	0x40000000,	0x00000000,
+-		0x03F, 0x00031E46,
++		0x03F, 0x0003D646,
+ 	0xA0000000,	0x00000000,
+ 		0x03F, 0x00002A46,
+ 	0xB0000000,	0x00000000,
+@@ -19610,21 +19610,21 @@ static const u32 rtw8822c_rf_a[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x000008C8,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x000008CB,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x000008CE,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x000008D1,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x000008D4,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000DD1,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x93000002,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -19633,21 +19633,21 @@ static const u32 rtw8822c_rf_a[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x000008C8,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x000008CB,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x000008CE,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x000008D1,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x000008D4,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000DD1,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x93000003,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -19656,21 +19656,21 @@ static const u32 rtw8822c_rf_a[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x000008C8,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x000008CB,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x000008CE,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x000008D1,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x000008D4,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000DD1,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x93000004,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -19679,21 +19679,21 @@ static const u32 rtw8822c_rf_a[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x000008C8,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x000008CB,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x000008CE,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x000008D1,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x000008D4,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000DD1,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x93000005,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -19702,21 +19702,21 @@ static const u32 rtw8822c_rf_a[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x000008C8,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x000008CB,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x000008CE,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x000008D1,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x000008D4,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000DD1,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x93000006,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -19725,21 +19725,21 @@ static const u32 rtw8822c_rf_a[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x000008C8,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x000008CB,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x000008CE,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x000008D1,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x000008D4,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000DD1,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x93000015,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -19748,21 +19748,21 @@ static const u32 rtw8822c_rf_a[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x000008C8,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x000008CB,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x000008CE,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x000008D1,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x000008D4,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000DD1,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x93000016,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -19771,21 +19771,21 @@ static const u32 rtw8822c_rf_a[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x000008C8,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x000008CB,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x000008CE,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x000008D1,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x000008D4,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000DD1,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x94000001,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -19794,21 +19794,21 @@ static const u32 rtw8822c_rf_a[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x000008C8,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x000008CB,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x000008CE,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x000008D1,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x000008D4,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000DD1,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x94000002,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -19817,21 +19817,21 @@ static const u32 rtw8822c_rf_a[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x000008C8,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x000008CB,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x000008CE,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x000008D1,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x000008D4,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000DD1,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x94000003,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -19840,21 +19840,21 @@ static const u32 rtw8822c_rf_a[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x000008C8,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x000008CB,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x000008CE,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x000008D1,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x000008D4,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000DD1,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x94000004,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -19863,21 +19863,21 @@ static const u32 rtw8822c_rf_a[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x000008C8,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x000008CB,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x000008CE,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x000008D1,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x000008D4,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000DD1,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x94000005,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -19886,21 +19886,21 @@ static const u32 rtw8822c_rf_a[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x000008C8,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x000008CB,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x000008CE,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x000008D1,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x000008D4,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000DD1,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x94000006,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -19909,21 +19909,21 @@ static const u32 rtw8822c_rf_a[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x000008C8,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x000008CB,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x000008CE,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x000008D1,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x000008D4,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000DD1,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x94000015,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -19932,21 +19932,21 @@ static const u32 rtw8822c_rf_a[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x000008C8,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x000008CB,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x000008CE,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x000008D1,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x000008D4,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000DD1,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x94000016,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -19955,21 +19955,21 @@ static const u32 rtw8822c_rf_a[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x000008C8,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x000008CB,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x000008CE,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x000008D1,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x000008D4,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000DD1,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x95000001,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -19978,21 +19978,21 @@ static const u32 rtw8822c_rf_a[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x000008C8,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x000008CB,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x000008CE,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x000008D1,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x000008D4,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000DD1,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x95000002,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -20001,21 +20001,21 @@ static const u32 rtw8822c_rf_a[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x000008C8,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x000008CB,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x000008CE,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x000008D1,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x000008D4,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000DD1,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x95000003,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -20024,21 +20024,21 @@ static const u32 rtw8822c_rf_a[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x000008C8,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x000008CB,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x000008CE,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x000008D1,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x000008D4,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000DD1,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x95000004,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -20047,21 +20047,21 @@ static const u32 rtw8822c_rf_a[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x000008C8,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x000008CB,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x000008CE,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x000008D1,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x000008D4,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000DD1,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x95000005,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -20070,21 +20070,21 @@ static const u32 rtw8822c_rf_a[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x000008C8,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x000008CB,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x000008CE,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x000008D1,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x000008D4,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000DD1,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x95000006,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -20093,21 +20093,21 @@ static const u32 rtw8822c_rf_a[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x000008C8,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x000008CB,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x000008CE,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x000008D1,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x000008D4,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000DD1,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x95000015,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -20116,21 +20116,21 @@ static const u32 rtw8822c_rf_a[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x000008C8,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x000008CB,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x000008CE,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x000008D1,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x000008D4,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000DD1,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x95000016,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -20139,21 +20139,21 @@ static const u32 rtw8822c_rf_a[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x000008C8,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x000008CB,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x000008CE,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x000008D1,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x000008D4,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000DD1,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0xA0000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000487,
+@@ -38484,21 +38484,21 @@ static const u32 rtw8822c_rf_b[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x00000CC6,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x00000CC9,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x00000CCC,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x00000CCF,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x00000CD2,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000CD5,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x93000002,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -38507,21 +38507,21 @@ static const u32 rtw8822c_rf_b[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x00000CC6,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x00000CC9,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x00000CCC,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x00000CCF,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x00000CD2,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000CD5,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x93000003,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -38530,21 +38530,21 @@ static const u32 rtw8822c_rf_b[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x00000CC6,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x00000CC9,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x00000CCC,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x00000CCF,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x00000CD2,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000CD5,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x93000004,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -38553,21 +38553,21 @@ static const u32 rtw8822c_rf_b[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x00000CC6,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x00000CC9,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x00000CCC,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x00000CCF,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x00000CD2,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000CD5,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x93000005,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -38576,21 +38576,21 @@ static const u32 rtw8822c_rf_b[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x00000CC6,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x00000CC9,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x00000CCC,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x00000CCF,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x00000CD2,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000CD5,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x93000006,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -38599,21 +38599,21 @@ static const u32 rtw8822c_rf_b[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x00000CC6,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x00000CC9,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x00000CCC,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x00000CCF,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x00000CD2,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000CD5,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x93000015,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -38622,21 +38622,21 @@ static const u32 rtw8822c_rf_b[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x00000CC6,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x00000CC9,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x00000CCC,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x00000CCF,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x00000CD2,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000CD5,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x93000016,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -38645,21 +38645,21 @@ static const u32 rtw8822c_rf_b[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x00000CC6,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x00000CC9,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x00000CCC,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x00000CCF,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x00000CD2,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000CD5,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x94000001,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -38668,21 +38668,21 @@ static const u32 rtw8822c_rf_b[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x00000CC6,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x00000CC9,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x00000CCC,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x00000CCF,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x00000CD2,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000CD5,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x94000002,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -38691,21 +38691,21 @@ static const u32 rtw8822c_rf_b[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x00000CC6,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x00000CC9,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x00000CCC,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x00000CCF,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x00000CD2,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000CD5,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x94000003,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -38714,21 +38714,21 @@ static const u32 rtw8822c_rf_b[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x00000CC6,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x00000CC9,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x00000CCC,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x00000CCF,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x00000CD2,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000CD5,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x94000004,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -38737,21 +38737,21 @@ static const u32 rtw8822c_rf_b[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x00000CC6,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x00000CC9,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x00000CCC,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x00000CCF,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x00000CD2,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000CD5,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x94000005,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -38760,21 +38760,21 @@ static const u32 rtw8822c_rf_b[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x00000CC6,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x00000CC9,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x00000CCC,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x00000CCF,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x00000CD2,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000CD5,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x94000006,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -38783,21 +38783,21 @@ static const u32 rtw8822c_rf_b[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x00000CC6,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x00000CC9,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x00000CCC,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x00000CCF,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x00000CD2,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000CD5,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x94000015,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -38806,21 +38806,21 @@ static const u32 rtw8822c_rf_b[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x00000CC6,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x00000CC9,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x00000CCC,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x00000CCF,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x00000CD2,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000CD5,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x94000016,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -38829,21 +38829,21 @@ static const u32 rtw8822c_rf_b[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x00000CC6,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x00000CC9,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x00000CCC,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x00000CCF,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x00000CD2,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000CD5,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x95000001,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -38852,21 +38852,21 @@ static const u32 rtw8822c_rf_b[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x00000CC6,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x00000CC9,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x00000CCC,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x00000CCF,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x00000CD2,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000CD5,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x95000002,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -38875,21 +38875,21 @@ static const u32 rtw8822c_rf_b[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x00000CC6,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x00000CC9,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x00000CCC,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x00000CCF,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x00000CD2,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000CD5,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x95000003,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -38898,21 +38898,21 @@ static const u32 rtw8822c_rf_b[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x00000CC6,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x00000CC9,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x00000CCC,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x00000CCF,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x00000CD2,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000CD5,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x95000004,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -38921,21 +38921,21 @@ static const u32 rtw8822c_rf_b[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x00000CC6,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x00000CC9,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x00000CCC,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x00000CCF,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x00000CD2,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000CD5,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x95000005,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -38944,21 +38944,21 @@ static const u32 rtw8822c_rf_b[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x00000CC6,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x00000CC9,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x00000CCC,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x00000CCF,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x00000CD2,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000CD5,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x95000006,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -38967,21 +38967,21 @@ static const u32 rtw8822c_rf_b[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x00000CC6,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x00000CC9,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x00000CCC,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x00000CCF,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x00000CD2,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000CD5,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x95000015,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -38990,21 +38990,21 @@ static const u32 rtw8822c_rf_b[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x00000CC6,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x00000CC9,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x00000CCC,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x00000CCF,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x00000CD2,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000CD5,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0x95000016,	0x00000000,	0x40000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000467,
+@@ -39013,21 +39013,21 @@ static const u32 rtw8822c_rf_b[] = {
+ 		0x033, 0x00000062,
+ 		0x03F, 0x00000908,
+ 		0x033, 0x00000063,
+-		0x03F, 0x00000D09,
++		0x03F, 0x00000CC6,
+ 		0x033, 0x00000064,
+-		0x03F, 0x00000D49,
++		0x03F, 0x00000CC9,
+ 		0x033, 0x00000065,
+-		0x03F, 0x00000D8A,
++		0x03F, 0x00000CCC,
+ 		0x033, 0x00000066,
+-		0x03F, 0x00000DEB,
++		0x03F, 0x00000CCF,
+ 		0x033, 0x00000067,
+-		0x03F, 0x00000DEE,
++		0x03F, 0x00000CD2,
+ 		0x033, 0x00000068,
+-		0x03F, 0x00000DF1,
++		0x03F, 0x00000CD5,
+ 		0x033, 0x00000069,
+-		0x03F, 0x00000DF4,
++		0x03F, 0x00000DD4,
+ 		0x033, 0x0000006A,
+-		0x03F, 0x00000DF7,
++		0x03F, 0x00000DD7,
+ 	0xA0000000,	0x00000000,
+ 		0x033, 0x00000060,
+ 		0x03F, 0x00000487,
+diff --git a/drivers/net/wireless/st/cw1200/cw1200_sdio.c b/drivers/net/wireless/st/cw1200/cw1200_sdio.c
+index b65ec14136c7e..4c30b5772ce0f 100644
+--- a/drivers/net/wireless/st/cw1200/cw1200_sdio.c
++++ b/drivers/net/wireless/st/cw1200/cw1200_sdio.c
+@@ -53,6 +53,7 @@ static const struct sdio_device_id cw1200_sdio_ids[] = {
+ 	{ SDIO_DEVICE(SDIO_VENDOR_ID_STE, SDIO_DEVICE_ID_STE_CW1200) },
+ 	{ /* end: all zeroes */			},
+ };
++MODULE_DEVICE_TABLE(sdio, cw1200_sdio_ids);
+ 
+ /* hwbus_ops implemetation */
+ 
+diff --git a/drivers/net/wireless/ti/wl1251/cmd.c b/drivers/net/wireless/ti/wl1251/cmd.c
+index 498c8db2eb48b..d7a869106782f 100644
+--- a/drivers/net/wireless/ti/wl1251/cmd.c
++++ b/drivers/net/wireless/ti/wl1251/cmd.c
+@@ -454,9 +454,12 @@ int wl1251_cmd_scan(struct wl1251 *wl, u8 *ssid, size_t ssid_len,
+ 		cmd->channels[i].channel = channels[i]->hw_value;
+ 	}
+ 
+-	cmd->params.ssid_len = ssid_len;
+-	if (ssid)
+-		memcpy(cmd->params.ssid, ssid, ssid_len);
++	if (ssid) {
++		int len = clamp_val(ssid_len, 0, IEEE80211_MAX_SSID_LEN);
++
++		cmd->params.ssid_len = len;
++		memcpy(cmd->params.ssid, ssid, len);
++	}
+ 
+ 	ret = wl1251_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd));
+ 	if (ret < 0) {
+diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
+index 9d7dbfe7fe0c3..c6da0cfb4afbe 100644
+--- a/drivers/net/wireless/ti/wl12xx/main.c
++++ b/drivers/net/wireless/ti/wl12xx/main.c
+@@ -1503,6 +1503,13 @@ static int wl12xx_get_fuse_mac(struct wl1271 *wl)
+ 	u32 mac1, mac2;
+ 	int ret;
+ 
++	/* Device may be in ELP from the bootloader or kexec */
++	ret = wlcore_write32(wl, WL12XX_WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL);
++	if (ret < 0)
++		goto out;
++
++	usleep_range(500000, 700000);
++
+ 	ret = wlcore_set_partition(wl, &wl->ptable[PART_DRPW]);
+ 	if (ret < 0)
+ 		goto out;
+diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
+index a5ab1e0c74cf6..3ffe676ed7f16 100644
+--- a/drivers/nvmem/core.c
++++ b/drivers/nvmem/core.c
+@@ -686,15 +686,17 @@ static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
+ 			continue;
+ 		if (len < 2 * sizeof(u32)) {
+ 			dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
++			of_node_put(child);
+ 			return -EINVAL;
+ 		}
+ 
+ 		cell = kzalloc(sizeof(*cell), GFP_KERNEL);
+-		if (!cell)
++		if (!cell) {
++			of_node_put(child);
+ 			return -ENOMEM;
++		}
+ 
+ 		cell->nvmem = nvmem;
+-		cell->np = of_node_get(child);
+ 		cell->offset = be32_to_cpup(addr++);
+ 		cell->bytes = be32_to_cpup(addr);
+ 		cell->name = kasprintf(GFP_KERNEL, "%pOFn", child);
+@@ -715,11 +717,12 @@ static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
+ 				cell->name, nvmem->stride);
+ 			/* Cells already added will be freed later. */
+ 			kfree_const(cell->name);
+-			of_node_put(cell->np);
+ 			kfree(cell);
++			of_node_put(child);
+ 			return -EINVAL;
+ 		}
+ 
++		cell->np = of_node_get(child);
+ 		nvmem_cell_add(cell);
+ 	}
+ 
+diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
+index 0e94190ca4e88..8dee6d3f33a70 100644
+--- a/drivers/pci/controller/dwc/pcie-tegra194.c
++++ b/drivers/pci/controller/dwc/pcie-tegra194.c
+@@ -2214,6 +2214,8 @@ static int tegra_pcie_dw_resume_noirq(struct device *dev)
+ 		goto fail_host_init;
+ 	}
+ 
++	dw_pcie_setup_rc(&pcie->pci.pp);
++
+ 	ret = tegra_pcie_dw_start_link(&pcie->pci);
+ 	if (ret < 0)
+ 		goto fail_host_init;
+diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
+index e3f5e7ab76063..c95ebe808f92b 100644
+--- a/drivers/pci/controller/pci-aardvark.c
++++ b/drivers/pci/controller/pci-aardvark.c
+@@ -57,7 +57,7 @@
+ #define   PIO_COMPLETION_STATUS_UR		1
+ #define   PIO_COMPLETION_STATUS_CRS		2
+ #define   PIO_COMPLETION_STATUS_CA		4
+-#define   PIO_NON_POSTED_REQ			BIT(0)
++#define   PIO_NON_POSTED_REQ			BIT(10)
+ #define PIO_ADDR_LS				(PIO_BASE_ADDR + 0x8)
+ #define PIO_ADDR_MS				(PIO_BASE_ADDR + 0xc)
+ #define PIO_WR_DATA				(PIO_BASE_ADDR + 0x10)
+@@ -125,6 +125,7 @@
+ #define     LTSSM_MASK				0x3f
+ #define     LTSSM_L0				0x10
+ #define     RC_BAR_CONFIG			0x300
++#define VENDOR_ID_REG				(LMI_BASE_ADDR + 0x44)
+ 
+ /* PCIe core controller registers */
+ #define CTRL_CORE_BASE_ADDR			0x18000
+@@ -385,6 +386,16 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
+ 	reg |= (IS_RC_MSK << IS_RC_SHIFT);
+ 	advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
+ 
++	/*
++	 * Replace incorrect PCI vendor id value 0x1b4b by correct value 0x11ab.
++	 * VENDOR_ID_REG contains vendor id in low 16 bits and subsystem vendor
++	 * id in high 16 bits. Updating this register changes readback value of
++	 * read-only vendor id bits in PCIE_CORE_DEV_ID_REG register. Workaround
++	 * for erratum 4.1: "The value of device and vendor ID is incorrect".
++	 */
++	reg = (PCI_VENDOR_ID_MARVELL << 16) | PCI_VENDOR_ID_MARVELL;
++	advk_writel(pcie, reg, VENDOR_ID_REG);
++
+ 	/* Set Advanced Error Capabilities and Control PF0 register */
+ 	reg = PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX |
+ 		PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN |
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 7bf76bca888da..909343ee7e0ac 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -27,6 +27,7 @@
+ #include <linux/nvme.h>
+ #include <linux/platform_data/x86/apple.h>
+ #include <linux/pm_runtime.h>
++#include <linux/suspend.h>
+ #include <linux/switchtec.h>
+ #include <asm/dma.h>	/* isa_dma_bridge_buggy */
+ #include "pci.h"
+@@ -3668,6 +3669,16 @@ static void quirk_apple_poweroff_thunderbolt(struct pci_dev *dev)
+ 		return;
+ 	if (pci_pcie_type(dev) != PCI_EXP_TYPE_UPSTREAM)
+ 		return;
++
++	/*
++	 * SXIO/SXFP/SXLF turns off power to the Thunderbolt controller.
++	 * We don't know how to turn it back on again, but firmware does,
++	 * so we can only use SXIO/SXFP/SXLF if we're suspending via
++	 * firmware.
++	 */
++	if (!pm_suspend_via_firmware())
++		return;
++
+ 	bridge = ACPI_HANDLE(&dev->dev);
+ 	if (!bridge)
+ 		return;
+diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
+index 2d4acf21117cc..c5950a3b4e4ce 100644
+--- a/drivers/pinctrl/pinctrl-amd.c
++++ b/drivers/pinctrl/pinctrl-amd.c
+@@ -991,6 +991,7 @@ static int amd_gpio_remove(struct platform_device *pdev)
+ static const struct acpi_device_id amd_gpio_acpi_match[] = {
+ 	{ "AMD0030", 0 },
+ 	{ "AMDI0030", 0},
++	{ "AMDI0031", 0},
+ 	{ },
+ };
+ MODULE_DEVICE_TABLE(acpi, amd_gpio_acpi_match);
+diff --git a/drivers/pinctrl/pinctrl-equilibrium.c b/drivers/pinctrl/pinctrl-equilibrium.c
+index 067271b7d35a3..ac1c47f542c11 100644
+--- a/drivers/pinctrl/pinctrl-equilibrium.c
++++ b/drivers/pinctrl/pinctrl-equilibrium.c
+@@ -929,6 +929,7 @@ static const struct of_device_id eqbr_pinctrl_dt_match[] = {
+ 	{ .compatible = "intel,lgm-io" },
+ 	{}
+ };
++MODULE_DEVICE_TABLE(of, eqbr_pinctrl_dt_match);
+ 
+ static struct platform_driver eqbr_pinctrl_driver = {
+ 	.probe	= eqbr_pinctrl_probe,
+diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
+index ce2d8014b7e0b..d0259577934e9 100644
+--- a/drivers/pinctrl/pinctrl-mcp23s08.c
++++ b/drivers/pinctrl/pinctrl-mcp23s08.c
+@@ -351,6 +351,11 @@ static irqreturn_t mcp23s08_irq(int irq, void *data)
+ 	if (mcp_read(mcp, MCP_INTF, &intf))
+ 		goto unlock;
+ 
++	if (intf == 0) {
++		/* There is no interrupt pending */
++		goto unlock;
++	}
++
+ 	if (mcp_read(mcp, MCP_INTCAP, &intcap))
+ 		goto unlock;
+ 
+@@ -368,11 +373,6 @@ static irqreturn_t mcp23s08_irq(int irq, void *data)
+ 	mcp->cached_gpio = gpio;
+ 	mutex_unlock(&mcp->lock);
+ 
+-	if (intf == 0) {
+-		/* There is no interrupt pending */
+-		return IRQ_HANDLED;
+-	}
+-
+ 	dev_dbg(mcp->chip.parent,
+ 		"intcap 0x%04X intf 0x%04X gpio_orig 0x%04X gpio 0x%04X\n",
+ 		intcap, intf, gpio_orig, gpio);
+diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
+index 9e6f2a895a234..5b1355fae9b41 100644
+--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
++++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
+@@ -100,24 +100,27 @@ static ssize_t tcc_offset_degree_celsius_show(struct device *dev,
+ 	if (err)
+ 		return err;
+ 
+-	val = (val >> 24) & 0xff;
++	val = (val >> 24) & 0x3f;
+ 	return sprintf(buf, "%d\n", (int)val);
+ }
+ 
+-static int tcc_offset_update(int tcc)
++static int tcc_offset_update(unsigned int tcc)
+ {
+ 	u64 val;
+ 	int err;
+ 
+-	if (!tcc)
++	if (tcc > 63)
+ 		return -EINVAL;
+ 
+ 	err = rdmsrl_safe(MSR_IA32_TEMPERATURE_TARGET, &val);
+ 	if (err)
+ 		return err;
+ 
+-	val &= ~GENMASK_ULL(31, 24);
+-	val |= (tcc & 0xff) << 24;
++	if (val & BIT(31))
++		return -EPERM;
++
++	val &= ~GENMASK_ULL(29, 24);
++	val |= (tcc & 0x3f) << 24;
+ 
+ 	err = wrmsrl_safe(MSR_IA32_TEMPERATURE_TARGET, val);
+ 	if (err)
+@@ -126,14 +129,15 @@ static int tcc_offset_update(int tcc)
+ 	return 0;
+ }
+ 
+-static int tcc_offset_save;
++static unsigned int tcc_offset_save;
+ 
+ static ssize_t tcc_offset_degree_celsius_store(struct device *dev,
+ 				struct device_attribute *attr, const char *buf,
+ 				size_t count)
+ {
++	unsigned int tcc;
+ 	u64 val;
+-	int tcc, err;
++	int err;
+ 
+ 	err = rdmsrl_safe(MSR_PLATFORM_INFO, &val);
+ 	if (err)
+@@ -142,7 +146,7 @@ static ssize_t tcc_offset_degree_celsius_store(struct device *dev,
+ 	if (!(val & BIT(30)))
+ 		return -EACCES;
+ 
+-	if (kstrtoint(buf, 0, &tcc))
++	if (kstrtouint(buf, 0, &tcc))
+ 		return -EINVAL;
+ 
+ 	err = tcc_offset_update(tcc);
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 826a56e3bbd28..62210cbea84b8 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1490,6 +1490,7 @@ struct ext4_sb_info {
+ 	struct kobject s_kobj;
+ 	struct completion s_kobj_unregister;
+ 	struct super_block *s_sb;
++	struct buffer_head *s_mmp_bh;
+ 
+ 	/* Journaling */
+ 	struct journal_s *s_journal;
+@@ -3663,6 +3664,9 @@ extern struct ext4_io_end_vec *ext4_last_io_end_vec(ext4_io_end_t *io_end);
+ /* mmp.c */
+ extern int ext4_multi_mount_protect(struct super_block *, ext4_fsblk_t);
+ 
++/* mmp.c */
++extern void ext4_stop_mmpd(struct ext4_sb_info *sbi);
++
+ /* verity.c */
+ extern const struct fsverity_operations ext4_verityops;
+ 
+diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
+index 68fbeedd627bc..6cb598b549ca1 100644
+--- a/fs/ext4/mmp.c
++++ b/fs/ext4/mmp.c
+@@ -127,9 +127,9 @@ void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
+  */
+ static int kmmpd(void *data)
+ {
+-	struct super_block *sb = ((struct mmpd_data *) data)->sb;
+-	struct buffer_head *bh = ((struct mmpd_data *) data)->bh;
++	struct super_block *sb = (struct super_block *) data;
+ 	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
++	struct buffer_head *bh = EXT4_SB(sb)->s_mmp_bh;
+ 	struct mmp_struct *mmp;
+ 	ext4_fsblk_t mmp_block;
+ 	u32 seq = 0;
+@@ -245,12 +245,18 @@ static int kmmpd(void *data)
+ 	retval = write_mmp_block(sb, bh);
+ 
+ exit_thread:
+-	EXT4_SB(sb)->s_mmp_tsk = NULL;
+-	kfree(data);
+-	brelse(bh);
+ 	return retval;
+ }
+ 
++void ext4_stop_mmpd(struct ext4_sb_info *sbi)
++{
++	if (sbi->s_mmp_tsk) {
++		kthread_stop(sbi->s_mmp_tsk);
++		brelse(sbi->s_mmp_bh);
++		sbi->s_mmp_tsk = NULL;
++	}
++}
++
+ /*
+  * Get a random new sequence number but make sure it is not greater than
+  * EXT4_MMP_SEQ_MAX.
+@@ -275,7 +281,6 @@ int ext4_multi_mount_protect(struct super_block *sb,
+ 	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
+ 	struct buffer_head *bh = NULL;
+ 	struct mmp_struct *mmp = NULL;
+-	struct mmpd_data *mmpd_data;
+ 	u32 seq;
+ 	unsigned int mmp_check_interval = le16_to_cpu(es->s_mmp_update_interval);
+ 	unsigned int wait_time = 0;
+@@ -364,24 +369,17 @@ skip:
+ 		goto failed;
+ 	}
+ 
+-	mmpd_data = kmalloc(sizeof(*mmpd_data), GFP_KERNEL);
+-	if (!mmpd_data) {
+-		ext4_warning(sb, "not enough memory for mmpd_data");
+-		goto failed;
+-	}
+-	mmpd_data->sb = sb;
+-	mmpd_data->bh = bh;
++	EXT4_SB(sb)->s_mmp_bh = bh;
+ 
+ 	/*
+ 	 * Start a kernel thread to update the MMP block periodically.
+ 	 */
+-	EXT4_SB(sb)->s_mmp_tsk = kthread_run(kmmpd, mmpd_data, "kmmpd-%.*s",
++	EXT4_SB(sb)->s_mmp_tsk = kthread_run(kmmpd, sb, "kmmpd-%.*s",
+ 					     (int)sizeof(mmp->mmp_bdevname),
+ 					     bdevname(bh->b_bdev,
+ 						      mmp->mmp_bdevname));
+ 	if (IS_ERR(EXT4_SB(sb)->s_mmp_tsk)) {
+ 		EXT4_SB(sb)->s_mmp_tsk = NULL;
+-		kfree(mmpd_data);
+ 		ext4_warning(sb, "Unable to create kmmpd thread for %s.",
+ 			     sb->s_id);
+ 		goto failed;
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 4a869bc5271b9..62dd98189e326 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -1245,8 +1245,8 @@ static void ext4_put_super(struct super_block *sb)
+ 	ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
+ 	sbi->s_ea_block_cache = NULL;
+ 
+-	if (sbi->s_mmp_tsk)
+-		kthread_stop(sbi->s_mmp_tsk);
++	ext4_stop_mmpd(sbi);
++
+ 	brelse(sbi->s_sbh);
+ 	sb->s_fs_info = NULL;
+ 	/*
+@@ -5168,8 +5168,7 @@ failed_mount3a:
+ failed_mount3:
+ 	flush_work(&sbi->s_error_work);
+ 	del_timer_sync(&sbi->s_err_report);
+-	if (sbi->s_mmp_tsk)
+-		kthread_stop(sbi->s_mmp_tsk);
++	ext4_stop_mmpd(sbi);
+ failed_mount2:
+ 	rcu_read_lock();
+ 	group_desc = rcu_dereference(sbi->s_group_desc);
+@@ -5967,8 +5966,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
+ 				 */
+ 				ext4_mark_recovery_complete(sb, es);
+ 			}
+-			if (sbi->s_mmp_tsk)
+-				kthread_stop(sbi->s_mmp_tsk);
++			ext4_stop_mmpd(sbi);
+ 		} else {
+ 			/* Make sure we can mount this feature set readwrite */
+ 			if (ext4_has_feature_readonly(sb) ||
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index f3fabb1edfe97..148fc1ce52d77 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -3560,6 +3560,8 @@ void f2fs_destroy_garbage_collection_cache(void);
+  */
+ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only);
+ bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi);
++int __init f2fs_create_recovery_cache(void);
++void f2fs_destroy_recovery_cache(void);
+ 
+ /*
+  * debug.c
+diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
+index da75d5d52f0ac..4ae055f39e211 100644
+--- a/fs/f2fs/recovery.c
++++ b/fs/f2fs/recovery.c
+@@ -787,13 +787,6 @@ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
+ 	quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY);
+ #endif
+ 
+-	fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
+-			sizeof(struct fsync_inode_entry));
+-	if (!fsync_entry_slab) {
+-		err = -ENOMEM;
+-		goto out;
+-	}
+-
+ 	INIT_LIST_HEAD(&inode_list);
+ 	INIT_LIST_HEAD(&tmp_inode_list);
+ 	INIT_LIST_HEAD(&dir_list);
+@@ -866,8 +859,6 @@ skip:
+ 		}
+ 	}
+ 
+-	kmem_cache_destroy(fsync_entry_slab);
+-out:
+ #ifdef CONFIG_QUOTA
+ 	/* Turn quotas off */
+ 	if (quota_enabled)
+@@ -877,3 +868,17 @@ out:
+ 
+ 	return ret ? ret: err;
+ }
++
++int __init f2fs_create_recovery_cache(void)
++{
++	fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
++					sizeof(struct fsync_inode_entry));
++	if (!fsync_entry_slab)
++		return -ENOMEM;
++	return 0;
++}
++
++void f2fs_destroy_recovery_cache(void)
++{
++	kmem_cache_destroy(fsync_entry_slab);
++}
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index d852d96773a32..1f7bc4b3f6aec 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -4187,9 +4187,12 @@ static int __init init_f2fs_fs(void)
+ 	err = f2fs_create_checkpoint_caches();
+ 	if (err)
+ 		goto free_segment_manager_caches;
+-	err = f2fs_create_extent_cache();
++	err = f2fs_create_recovery_cache();
+ 	if (err)
+ 		goto free_checkpoint_caches;
++	err = f2fs_create_extent_cache();
++	if (err)
++		goto free_recovery_cache;
+ 	err = f2fs_create_garbage_collection_cache();
+ 	if (err)
+ 		goto free_extent_cache;
+@@ -4238,6 +4241,8 @@ free_garbage_collection_cache:
+ 	f2fs_destroy_garbage_collection_cache();
+ free_extent_cache:
+ 	f2fs_destroy_extent_cache();
++free_recovery_cache:
++	f2fs_destroy_recovery_cache();
+ free_checkpoint_caches:
+ 	f2fs_destroy_checkpoint_caches();
+ free_segment_manager_caches:
+@@ -4263,6 +4268,7 @@ static void __exit exit_f2fs_fs(void)
+ 	f2fs_exit_sysfs();
+ 	f2fs_destroy_garbage_collection_cache();
+ 	f2fs_destroy_extent_cache();
++	f2fs_destroy_recovery_cache();
+ 	f2fs_destroy_checkpoint_caches();
+ 	f2fs_destroy_segment_manager_caches();
+ 	f2fs_destroy_node_manager_caches();
+diff --git a/fs/io-wq.c b/fs/io-wq.c
+index 4eba531bea5a9..b836737f96f31 100644
+--- a/fs/io-wq.c
++++ b/fs/io-wq.c
+@@ -243,7 +243,8 @@ static void io_wqe_wake_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
+ 	 * Most likely an attempt to queue unbounded work on an io_wq that
+ 	 * wasn't setup with any unbounded workers.
+ 	 */
+-	WARN_ON_ONCE(!acct->max_workers);
++	if (unlikely(!acct->max_workers))
++		pr_warn_once("io-wq is not configured for unbound workers");
+ 
+ 	rcu_read_lock();
+ 	ret = io_wqe_activate_free_worker(wqe);
+@@ -991,6 +992,8 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
+ 
+ 	if (WARN_ON_ONCE(!data->free_work || !data->do_work))
+ 		return ERR_PTR(-EINVAL);
++	if (WARN_ON_ONCE(!bounded))
++		return ERR_PTR(-EINVAL);
+ 
+ 	wq = kzalloc(sizeof(*wq), GFP_KERNEL);
+ 	if (!wq)
+diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
+index 6f65bfa9f18d5..b0eb9c85eea0c 100644
+--- a/fs/jfs/inode.c
++++ b/fs/jfs/inode.c
+@@ -151,7 +151,8 @@ void jfs_evict_inode(struct inode *inode)
+ 			if (test_cflag(COMMIT_Freewmap, inode))
+ 				jfs_free_zero_link(inode);
+ 
+-			diFree(inode);
++			if (JFS_SBI(inode->i_sb)->ipimap)
++				diFree(inode);
+ 
+ 			/*
+ 			 * Free the inode from the quota allocation.
+diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
+index e98f99338f8f8..df5fc12a6ceed 100644
+--- a/fs/reiserfs/journal.c
++++ b/fs/reiserfs/journal.c
+@@ -2760,6 +2760,20 @@ int journal_init(struct super_block *sb, const char *j_dev_name,
+ 		goto free_and_return;
+ 	}
+ 
++	/*
++	 * Sanity check to see if journal first block is correct.
++	 * If journal first block is invalid it can cause
++	 * zeroing important superblock members.
++	 */
++	if (!SB_ONDISK_JOURNAL_DEVICE(sb) &&
++	    SB_ONDISK_JOURNAL_1st_BLOCK(sb) < SB_JOURNAL_1st_RESERVED_BLOCK(sb)) {
++		reiserfs_warning(sb, "journal-1393",
++				 "journal 1st super block is invalid: 1st reserved block %d, but actual 1st block is %d",
++				 SB_JOURNAL_1st_RESERVED_BLOCK(sb),
++				 SB_ONDISK_JOURNAL_1st_BLOCK(sb));
++		goto free_and_return;
++	}
++
+ 	if (journal_init_dev(sb, journal, j_dev_name) != 0) {
+ 		reiserfs_warning(sb, "sh-462",
+ 				 "unable to initialize journal device");
+diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
+index ddb2ca636c93d..274e635e00c3d 100644
+--- a/fs/ubifs/super.c
++++ b/fs/ubifs/super.c
+@@ -275,6 +275,7 @@ static struct inode *ubifs_alloc_inode(struct super_block *sb)
+ 	memset((void *)ui + sizeof(struct inode), 0,
+ 	       sizeof(struct ubifs_inode) - sizeof(struct inode));
+ 	mutex_init(&ui->ui_mutex);
++	init_rwsem(&ui->xattr_sem);
+ 	spin_lock_init(&ui->ui_lock);
+ 	return &ui->vfs_inode;
+ };
+diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
+index 7fdfdbda4b8ae..15847c1474ba1 100644
+--- a/fs/ubifs/ubifs.h
++++ b/fs/ubifs/ubifs.h
+@@ -356,6 +356,7 @@ struct ubifs_gced_idx_leb {
+  * @ui_mutex: serializes inode write-back with the rest of VFS operations,
+  *            serializes "clean <-> dirty" state changes, serializes bulk-read,
+  *            protects @dirty, @bulk_read, @ui_size, and @xattr_size
++ * @xattr_sem: serilizes write operations (remove|set|create) on xattr
+  * @ui_lock: protects @synced_i_size
+  * @synced_i_size: synchronized size of inode, i.e. the value of inode size
+  *                 currently stored on the flash; used only for regular file
+@@ -409,6 +410,7 @@ struct ubifs_inode {
+ 	unsigned int bulk_read:1;
+ 	unsigned int compr_type:2;
+ 	struct mutex ui_mutex;
++	struct rw_semaphore xattr_sem;
+ 	spinlock_t ui_lock;
+ 	loff_t synced_i_size;
+ 	loff_t ui_size;
+diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
+index 6b1e9830b2745..1fce27e9b7697 100644
+--- a/fs/ubifs/xattr.c
++++ b/fs/ubifs/xattr.c
+@@ -285,6 +285,7 @@ int ubifs_xattr_set(struct inode *host, const char *name, const void *value,
+ 	if (!xent)
+ 		return -ENOMEM;
+ 
++	down_write(&ubifs_inode(host)->xattr_sem);
+ 	/*
+ 	 * The extended attribute entries are stored in LNC, so multiple
+ 	 * look-ups do not involve reading the flash.
+@@ -319,6 +320,7 @@ int ubifs_xattr_set(struct inode *host, const char *name, const void *value,
+ 	iput(inode);
+ 
+ out_free:
++	up_write(&ubifs_inode(host)->xattr_sem);
+ 	kfree(xent);
+ 	return err;
+ }
+@@ -341,18 +343,19 @@ ssize_t ubifs_xattr_get(struct inode *host, const char *name, void *buf,
+ 	if (!xent)
+ 		return -ENOMEM;
+ 
++	down_read(&ubifs_inode(host)->xattr_sem);
+ 	xent_key_init(c, &key, host->i_ino, &nm);
+ 	err = ubifs_tnc_lookup_nm(c, &key, xent, &nm);
+ 	if (err) {
+ 		if (err == -ENOENT)
+ 			err = -ENODATA;
+-		goto out_unlock;
++		goto out_cleanup;
+ 	}
+ 
+ 	inode = iget_xattr(c, le64_to_cpu(xent->inum));
+ 	if (IS_ERR(inode)) {
+ 		err = PTR_ERR(inode);
+-		goto out_unlock;
++		goto out_cleanup;
+ 	}
+ 
+ 	ui = ubifs_inode(inode);
+@@ -374,7 +377,8 @@ ssize_t ubifs_xattr_get(struct inode *host, const char *name, void *buf,
+ out_iput:
+ 	mutex_unlock(&ui->ui_mutex);
+ 	iput(inode);
+-out_unlock:
++out_cleanup:
++	up_read(&ubifs_inode(host)->xattr_sem);
+ 	kfree(xent);
+ 	return err;
+ }
+@@ -406,16 +410,21 @@ ssize_t ubifs_listxattr(struct dentry *dentry, char *buffer, size_t size)
+ 	dbg_gen("ino %lu ('%pd'), buffer size %zd", host->i_ino,
+ 		dentry, size);
+ 
++	down_read(&host_ui->xattr_sem);
+ 	len = host_ui->xattr_names + host_ui->xattr_cnt;
+-	if (!buffer)
++	if (!buffer) {
+ 		/*
+ 		 * We should return the minimum buffer size which will fit a
+ 		 * null-terminated list of all the extended attribute names.
+ 		 */
+-		return len;
++		err = len;
++		goto out_err;
++	}
+ 
+-	if (len > size)
+-		return -ERANGE;
++	if (len > size) {
++		err = -ERANGE;
++		goto out_err;
++	}
+ 
+ 	lowest_xent_key(c, &key, host->i_ino);
+ 	while (1) {
+@@ -437,8 +446,9 @@ ssize_t ubifs_listxattr(struct dentry *dentry, char *buffer, size_t size)
+ 		pxent = xent;
+ 		key_read(c, &xent->key, &key);
+ 	}
+-
+ 	kfree(pxent);
++	up_read(&host_ui->xattr_sem);
++
+ 	if (err != -ENOENT) {
+ 		ubifs_err(c, "cannot find next direntry, error %d", err);
+ 		return err;
+@@ -446,6 +456,10 @@ ssize_t ubifs_listxattr(struct dentry *dentry, char *buffer, size_t size)
+ 
+ 	ubifs_assert(c, written <= size);
+ 	return written;
++
++out_err:
++	up_read(&host_ui->xattr_sem);
++	return err;
+ }
+ 
+ static int remove_xattr(struct ubifs_info *c, struct inode *host,
+@@ -504,6 +518,7 @@ int ubifs_purge_xattrs(struct inode *host)
+ 	ubifs_warn(c, "inode %lu has too many xattrs, doing a non-atomic deletion",
+ 		   host->i_ino);
+ 
++	down_write(&ubifs_inode(host)->xattr_sem);
+ 	lowest_xent_key(c, &key, host->i_ino);
+ 	while (1) {
+ 		xent = ubifs_tnc_next_ent(c, &key, &nm);
+@@ -523,7 +538,7 @@ int ubifs_purge_xattrs(struct inode *host)
+ 			ubifs_ro_mode(c, err);
+ 			kfree(pxent);
+ 			kfree(xent);
+-			return err;
++			goto out_err;
+ 		}
+ 
+ 		ubifs_assert(c, ubifs_inode(xino)->xattr);
+@@ -535,7 +550,7 @@ int ubifs_purge_xattrs(struct inode *host)
+ 			kfree(xent);
+ 			iput(xino);
+ 			ubifs_err(c, "cannot remove xattr, error %d", err);
+-			return err;
++			goto out_err;
+ 		}
+ 
+ 		iput(xino);
+@@ -544,14 +559,19 @@ int ubifs_purge_xattrs(struct inode *host)
+ 		pxent = xent;
+ 		key_read(c, &xent->key, &key);
+ 	}
+-
+ 	kfree(pxent);
++	up_write(&ubifs_inode(host)->xattr_sem);
++
+ 	if (err != -ENOENT) {
+ 		ubifs_err(c, "cannot find next direntry, error %d", err);
+ 		return err;
+ 	}
+ 
+ 	return 0;
++
++out_err:
++	up_write(&ubifs_inode(host)->xattr_sem);
++	return err;
+ }
+ 
+ /**
+@@ -594,6 +614,7 @@ static int ubifs_xattr_remove(struct inode *host, const char *name)
+ 	if (!xent)
+ 		return -ENOMEM;
+ 
++	down_write(&ubifs_inode(host)->xattr_sem);
+ 	xent_key_init(c, &key, host->i_ino, &nm);
+ 	err = ubifs_tnc_lookup_nm(c, &key, xent, &nm);
+ 	if (err) {
+@@ -618,6 +639,7 @@ static int ubifs_xattr_remove(struct inode *host, const char *name)
+ 	iput(inode);
+ 
+ out_free:
++	up_write(&ubifs_inode(host)->xattr_sem);
+ 	kfree(xent);
+ 	return err;
+ }
+diff --git a/fs/udf/namei.c b/fs/udf/namei.c
+index f146b3089f3da..6c5692ad42b55 100644
+--- a/fs/udf/namei.c
++++ b/fs/udf/namei.c
+@@ -934,6 +934,10 @@ static int udf_symlink(struct user_namespace *mnt_userns, struct inode *dir,
+ 				iinfo->i_location.partitionReferenceNum,
+ 				0);
+ 		epos.bh = udf_tgetblk(sb, block);
++		if (unlikely(!epos.bh)) {
++			err = -ENOMEM;
++			goto out_no_entry;
++		}
+ 		lock_buffer(epos.bh);
+ 		memset(epos.bh->b_data, 0x00, bsize);
+ 		set_buffer_uptodate(epos.bh);
+diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
+index db026b6ec15ab..e5cf12f102a21 100644
+--- a/include/linux/blk_types.h
++++ b/include/linux/blk_types.h
+@@ -304,6 +304,7 @@ enum {
+ 	BIO_CGROUP_ACCT,	/* has been accounted to a cgroup */
+ 	BIO_TRACKED,		/* set if bio goes through the rq_qos path */
+ 	BIO_REMAPPED,
++	BIO_ZONE_WRITE_LOCKED,	/* Owns a zoned device zone write lock */
+ 	BIO_FLAG_LAST
+ };
+ 
+diff --git a/include/linux/mfd/abx500/ux500_chargalg.h b/include/linux/mfd/abx500/ux500_chargalg.h
+index 9b97d284d0ce8..bc3819dc33e12 100644
+--- a/include/linux/mfd/abx500/ux500_chargalg.h
++++ b/include/linux/mfd/abx500/ux500_chargalg.h
+@@ -15,7 +15,7 @@
+  * - POWER_SUPPLY_TYPE_USB,
+  * because only them store as drv_data pointer to struct ux500_charger.
+  */
+-#define psy_to_ux500_charger(x) power_supply_get_drvdata(psy)
++#define psy_to_ux500_charger(x) power_supply_get_drvdata(x)
+ 
+ /* Forward declaration */
+ struct ux500_charger;
+diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
+index 3de38d6a0aeac..2c6b9e4162254 100644
+--- a/include/linux/netdev_features.h
++++ b/include/linux/netdev_features.h
+@@ -93,7 +93,7 @@ enum {
+ 
+ 	/*
+ 	 * Add your fresh new feature above and remember to update
+-	 * netdev_features_strings[] in net/core/ethtool.c and maybe
++	 * netdev_features_strings[] in net/ethtool/common.c and maybe
+ 	 * some feature mask #defines below. Please also describe it
+ 	 * in Documentation/networking/netdev-features.rst.
+ 	 */
+diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
+index 2b05e7f7c2385..da633d34ab866 100644
+--- a/include/linux/of_mdio.h
++++ b/include/linux/of_mdio.h
+@@ -72,6 +72,13 @@ static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *
+ 	return mdiobus_register(mdio);
+ }
+ 
++static inline int devm_of_mdiobus_register(struct device *dev,
++					   struct mii_bus *mdio,
++					   struct device_node *np)
++{
++	return devm_mdiobus_register(dev, mdio);
++}
++
+ static inline struct mdio_device *of_mdio_find_device(struct device_node *np)
+ {
+ 	return NULL;
+diff --git a/include/linux/wait.h b/include/linux/wait.h
+index fe10e8570a522..6598ae35e1b5a 100644
+--- a/include/linux/wait.h
++++ b/include/linux/wait.h
+@@ -1136,7 +1136,7 @@ do {										\
+  * Waitqueues which are removed from the waitqueue_head at wakeup time
+  */
+ void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
+-void prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
++bool prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
+ long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
+ void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
+ long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
+diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
+index d0e9a5bdb08bb..6078dd29f3e77 100644
+--- a/include/media/v4l2-subdev.h
++++ b/include/media/v4l2-subdev.h
+@@ -162,6 +162,9 @@ struct v4l2_subdev_io_pin_config {
+  * @s_gpio: set GPIO pins. Very simple right now, might need to be extended with
+  *	a direction argument if needed.
+  *
++ * @command: called by in-kernel drivers in order to call functions internal
++ *	   to subdev drivers driver that have a separate callback.
++ *
+  * @ioctl: called at the end of ioctl() syscall handler at the V4L2 core.
+  *	   used to provide support for private ioctls used on the driver.
+  *
+@@ -193,6 +196,7 @@ struct v4l2_subdev_core_ops {
+ 	int (*load_fw)(struct v4l2_subdev *sd);
+ 	int (*reset)(struct v4l2_subdev *sd, u32 val);
+ 	int (*s_gpio)(struct v4l2_subdev *sd, u32 val);
++	long (*command)(struct v4l2_subdev *sd, unsigned int cmd, void *arg);
+ 	long (*ioctl)(struct v4l2_subdev *sd, unsigned int cmd, void *arg);
+ #ifdef CONFIG_COMPAT
+ 	long (*compat_ioctl32)(struct v4l2_subdev *sd, unsigned int cmd,
+diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h
+index e6bd8ebf9ac33..c28ea1f0ec9c0 100644
+--- a/include/net/flow_offload.h
++++ b/include/net/flow_offload.h
+@@ -313,12 +313,14 @@ flow_action_mixed_hw_stats_check(const struct flow_action *action,
+ 	if (flow_offload_has_one_action(action))
+ 		return true;
+ 
+-	flow_action_for_each(i, action_entry, action) {
+-		if (i && action_entry->hw_stats != last_hw_stats) {
+-			NL_SET_ERR_MSG_MOD(extack, "Mixing HW stats types for actions is not supported");
+-			return false;
++	if (action) {
++		flow_action_for_each(i, action_entry, action) {
++			if (i && action_entry->hw_stats != last_hw_stats) {
++				NL_SET_ERR_MSG_MOD(extack, "Mixing HW stats types for actions is not supported");
++				return false;
++			}
++			last_hw_stats = action_entry->hw_stats;
+ 		}
+-		last_hw_stats = action_entry->hw_stats;
+ 	}
+ 	return true;
+ }
+diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
+index 1aa585216f34b..d49593c72a555 100644
+--- a/include/net/sctp/structs.h
++++ b/include/net/sctp/structs.h
+@@ -461,7 +461,7 @@ struct sctp_af {
+ 					 int saddr);
+ 	void		(*from_sk)	(union sctp_addr *,
+ 					 struct sock *sk);
+-	void		(*from_addr_param) (union sctp_addr *,
++	bool		(*from_addr_param) (union sctp_addr *,
+ 					    union sctp_addr_param *,
+ 					    __be16 port, int iif);
+ 	int		(*to_addr_param) (const union sctp_addr *,
+diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h
+index 4dcd13d097a9e..d588c244ec2fa 100644
+--- a/include/uapi/asm-generic/socket.h
++++ b/include/uapi/asm-generic/socket.h
+@@ -122,6 +122,8 @@
+ #define SO_PREFER_BUSY_POLL	69
+ #define SO_BUSY_POLL_BUDGET	70
+ 
++#define SO_NETNS_COOKIE		71
++
+ #if !defined(__KERNEL__)
+ 
+ #if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__))
+diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
+index 5afea692a3f7c..e36eee9132ecb 100644
+--- a/include/uapi/linux/ethtool.h
++++ b/include/uapi/linux/ethtool.h
+@@ -233,7 +233,7 @@ enum tunable_id {
+ 	ETHTOOL_PFC_PREVENTION_TOUT, /* timeout in msecs */
+ 	/*
+ 	 * Add your fresh new tunable attribute above and remember to update
+-	 * tunable_strings[] in net/core/ethtool.c
++	 * tunable_strings[] in net/ethtool/common.c
+ 	 */
+ 	__ETHTOOL_TUNABLE_COUNT,
+ };
+@@ -297,7 +297,7 @@ enum phy_tunable_id {
+ 	ETHTOOL_PHY_EDPD,
+ 	/*
+ 	 * Add your fresh new phy tunable attribute above and remember to update
+-	 * phy_tunable_strings[] in net/core/ethtool.c
++	 * phy_tunable_strings[] in net/ethtool/common.c
+ 	 */
+ 	__ETHTOOL_PHY_TUNABLE_COUNT,
+ };
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index 75244ecb2389f..952d98beda636 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -1399,29 +1399,54 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
+ select_insn:
+ 	goto *jumptable[insn->code];
+ 
+-	/* ALU */
+-#define ALU(OPCODE, OP)			\
+-	ALU64_##OPCODE##_X:		\
+-		DST = DST OP SRC;	\
+-		CONT;			\
+-	ALU_##OPCODE##_X:		\
+-		DST = (u32) DST OP (u32) SRC;	\
+-		CONT;			\
+-	ALU64_##OPCODE##_K:		\
+-		DST = DST OP IMM;		\
+-		CONT;			\
+-	ALU_##OPCODE##_K:		\
+-		DST = (u32) DST OP (u32) IMM;	\
++	/* Explicitly mask the register-based shift amounts with 63 or 31
++	 * to avoid undefined behavior. Normally this won't affect the
++	 * generated code, for example, in case of native 64 bit archs such
++	 * as x86-64 or arm64, the compiler is optimizing the AND away for
++	 * the interpreter. In case of JITs, each of the JIT backends compiles
++	 * the BPF shift operations to machine instructions which produce
++	 * implementation-defined results in such a case; the resulting
++	 * contents of the register may be arbitrary, but program behaviour
++	 * as a whole remains defined. In other words, in case of JIT backends,
++	 * the AND must /not/ be added to the emitted LSH/RSH/ARSH translation.
++	 */
++	/* ALU (shifts) */
++#define SHT(OPCODE, OP)					\
++	ALU64_##OPCODE##_X:				\
++		DST = DST OP (SRC & 63);		\
++		CONT;					\
++	ALU_##OPCODE##_X:				\
++		DST = (u32) DST OP ((u32) SRC & 31);	\
++		CONT;					\
++	ALU64_##OPCODE##_K:				\
++		DST = DST OP IMM;			\
++		CONT;					\
++	ALU_##OPCODE##_K:				\
++		DST = (u32) DST OP (u32) IMM;		\
++		CONT;
++	/* ALU (rest) */
++#define ALU(OPCODE, OP)					\
++	ALU64_##OPCODE##_X:				\
++		DST = DST OP SRC;			\
++		CONT;					\
++	ALU_##OPCODE##_X:				\
++		DST = (u32) DST OP (u32) SRC;		\
++		CONT;					\
++	ALU64_##OPCODE##_K:				\
++		DST = DST OP IMM;			\
++		CONT;					\
++	ALU_##OPCODE##_K:				\
++		DST = (u32) DST OP (u32) IMM;		\
+ 		CONT;
+-
+ 	ALU(ADD,  +)
+ 	ALU(SUB,  -)
+ 	ALU(AND,  &)
+ 	ALU(OR,   |)
+-	ALU(LSH, <<)
+-	ALU(RSH, >>)
+ 	ALU(XOR,  ^)
+ 	ALU(MUL,  *)
++	SHT(LSH, <<)
++	SHT(RSH, >>)
++#undef SHT
+ #undef ALU
+ 	ALU_NEG:
+ 		DST = (u32) -DST;
+@@ -1446,13 +1471,13 @@ select_insn:
+ 		insn++;
+ 		CONT;
+ 	ALU_ARSH_X:
+-		DST = (u64) (u32) (((s32) DST) >> SRC);
++		DST = (u64) (u32) (((s32) DST) >> (SRC & 31));
+ 		CONT;
+ 	ALU_ARSH_K:
+ 		DST = (u64) (u32) (((s32) DST) >> IMM);
+ 		CONT;
+ 	ALU64_ARSH_X:
+-		(*(s64 *) &DST) >>= SRC;
++		(*(s64 *) &DST) >>= (SRC & 63);
+ 		CONT;
+ 	ALU64_ARSH_K:
+ 		(*(s64 *) &DST) >>= IMM;
+diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c
+index 84b3b35fc0d05..9e0c10c6892ad 100644
+--- a/kernel/bpf/ringbuf.c
++++ b/kernel/bpf/ringbuf.c
+@@ -8,6 +8,7 @@
+ #include <linux/vmalloc.h>
+ #include <linux/wait.h>
+ #include <linux/poll.h>
++#include <linux/kmemleak.h>
+ #include <uapi/linux/btf.h>
+ 
+ #define RINGBUF_CREATE_FLAG_MASK (BPF_F_NUMA_NODE)
+@@ -105,6 +106,7 @@ static struct bpf_ringbuf *bpf_ringbuf_area_alloc(size_t data_sz, int numa_node)
+ 	rb = vmap(pages, nr_meta_pages + 2 * nr_data_pages,
+ 		  VM_ALLOC | VM_USERMAP, PAGE_KERNEL);
+ 	if (rb) {
++		kmemleak_not_leak(pages);
+ 		rb->pages = pages;
+ 		rb->nr_pages = nr_pages;
+ 		return rb;
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 1b6302ecbabe9..83e65219da567 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -32,6 +32,7 @@
+ #include <linux/relay.h>
+ #include <linux/slab.h>
+ #include <linux/percpu-rwsem.h>
++#include <linux/cpuset.h>
+ 
+ #include <trace/events/power.h>
+ #define CREATE_TRACE_POINTS
+@@ -821,6 +822,52 @@ void __init cpuhp_threads_init(void)
+ 	kthread_unpark(this_cpu_read(cpuhp_state.thread));
+ }
+ 
++/*
++ *
++ * Serialize hotplug trainwrecks outside of the cpu_hotplug_lock
++ * protected region.
++ *
++ * The operation is still serialized against concurrent CPU hotplug via
++ * cpu_add_remove_lock, i.e. CPU map protection.  But it is _not_
++ * serialized against other hotplug related activity like adding or
++ * removing of state callbacks and state instances, which invoke either the
++ * startup or the teardown callback of the affected state.
++ *
++ * This is required for subsystems which are unfixable vs. CPU hotplug and
++ * evade lock inversion problems by scheduling work which has to be
++ * completed _before_ cpu_up()/_cpu_down() returns.
++ *
++ * Don't even think about adding anything to this for any new code or even
++ * drivers. It's only purpose is to keep existing lock order trainwrecks
++ * working.
++ *
++ * For cpu_down() there might be valid reasons to finish cleanups which are
++ * not required to be done under cpu_hotplug_lock, but that's a different
++ * story and would be not invoked via this.
++ */
++static void cpu_up_down_serialize_trainwrecks(bool tasks_frozen)
++{
++	/*
++	 * cpusets delegate hotplug operations to a worker to "solve" the
++	 * lock order problems. Wait for the worker, but only if tasks are
++	 * _not_ frozen (suspend, hibernate) as that would wait forever.
++	 *
++	 * The wait is required because otherwise the hotplug operation
++	 * returns with inconsistent state, which could even be observed in
++	 * user space when a new CPU is brought up. The CPU plug uevent
++	 * would be delivered and user space reacting on it would fail to
++	 * move tasks to the newly plugged CPU up to the point where the
++	 * work has finished because up to that point the newly plugged CPU
++	 * is not assignable in cpusets/cgroups. On unplug that's not
++	 * necessarily a visible issue, but it is still inconsistent state,
++	 * which is the real problem which needs to be "fixed". This can't
++	 * prevent the transient state between scheduling the work and
++	 * returning from waiting for it.
++	 */
++	if (!tasks_frozen)
++		cpuset_wait_for_hotplug();
++}
++
+ #ifdef CONFIG_HOTPLUG_CPU
+ #ifndef arch_clear_mm_cpumask_cpu
+ #define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm))
+@@ -1058,6 +1105,7 @@ out:
+ 	 */
+ 	lockup_detector_cleanup();
+ 	arch_smt_update();
++	cpu_up_down_serialize_trainwrecks(tasks_frozen);
+ 	return ret;
+ }
+ 
+@@ -1254,6 +1302,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
+ out:
+ 	cpus_write_unlock();
+ 	arch_smt_update();
++	cpu_up_down_serialize_trainwrecks(tasks_frozen);
+ 	return ret;
+ }
+ 
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 20ac5dff9a0ce..572f312cc8039 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -3665,15 +3665,15 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
+ 
+ 		r = removed_load;
+ 		sub_positive(&sa->load_avg, r);
+-		sub_positive(&sa->load_sum, r * divider);
++		sa->load_sum = sa->load_avg * divider;
+ 
+ 		r = removed_util;
+ 		sub_positive(&sa->util_avg, r);
+-		sub_positive(&sa->util_sum, r * divider);
++		sa->util_sum = sa->util_avg * divider;
+ 
+ 		r = removed_runnable;
+ 		sub_positive(&sa->runnable_avg, r);
+-		sub_positive(&sa->runnable_sum, r * divider);
++		sa->runnable_sum = sa->runnable_avg * divider;
+ 
+ 		/*
+ 		 * removed_runnable is the unweighted version of removed_load so we
+diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
+index 183cc6ae68a68..76577d1642a5d 100644
+--- a/kernel/sched/wait.c
++++ b/kernel/sched/wait.c
+@@ -264,17 +264,22 @@ prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_ent
+ }
+ EXPORT_SYMBOL(prepare_to_wait);
+ 
+-void
++/* Returns true if we are the first waiter in the queue, false otherwise. */
++bool
+ prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
+ {
+ 	unsigned long flags;
++	bool was_empty = false;
+ 
+ 	wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
+ 	spin_lock_irqsave(&wq_head->lock, flags);
+-	if (list_empty(&wq_entry->entry))
++	if (list_empty(&wq_entry->entry)) {
++		was_empty = list_empty(&wq_head->head);
+ 		__add_wait_queue_entry_tail(wq_head, wq_entry);
++	}
+ 	set_current_state(state);
+ 	spin_unlock_irqrestore(&wq_head->lock, flags);
++	return was_empty;
+ }
+ EXPORT_SYMBOL(prepare_to_wait_exclusive);
+ 
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 7c8151d74faf0..534bab352fc94 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -2184,8 +2184,15 @@ void tracing_reset_all_online_cpus(void)
+ 	}
+ }
+ 
++/*
++ * The tgid_map array maps from pid to tgid; i.e. the value stored at index i
++ * is the tgid last observed corresponding to pid=i.
++ */
+ static int *tgid_map;
+ 
++/* The maximum valid index into tgid_map. */
++static size_t tgid_map_max;
++
+ #define SAVED_CMDLINES_DEFAULT 128
+ #define NO_CMDLINE_MAP UINT_MAX
+ static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+@@ -2458,24 +2465,41 @@ void trace_find_cmdline(int pid, char comm[])
+ 	preempt_enable();
+ }
+ 
++static int *trace_find_tgid_ptr(int pid)
++{
++	/*
++	 * Pairs with the smp_store_release in set_tracer_flag() to ensure that
++	 * if we observe a non-NULL tgid_map then we also observe the correct
++	 * tgid_map_max.
++	 */
++	int *map = smp_load_acquire(&tgid_map);
++
++	if (unlikely(!map || pid > tgid_map_max))
++		return NULL;
++
++	return &map[pid];
++}
++
+ int trace_find_tgid(int pid)
+ {
+-	if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
+-		return 0;
++	int *ptr = trace_find_tgid_ptr(pid);
+ 
+-	return tgid_map[pid];
++	return ptr ? *ptr : 0;
+ }
+ 
+ static int trace_save_tgid(struct task_struct *tsk)
+ {
++	int *ptr;
++
+ 	/* treat recording of idle task as a success */
+ 	if (!tsk->pid)
+ 		return 1;
+ 
+-	if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
++	ptr = trace_find_tgid_ptr(tsk->pid);
++	if (!ptr)
+ 		return 0;
+ 
+-	tgid_map[tsk->pid] = tsk->tgid;
++	*ptr = tsk->tgid;
+ 	return 1;
+ }
+ 
+@@ -4915,6 +4939,8 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
+ 
+ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
+ {
++	int *map;
++
+ 	if ((mask == TRACE_ITER_RECORD_TGID) ||
+ 	    (mask == TRACE_ITER_RECORD_CMD))
+ 		lockdep_assert_held(&event_mutex);
+@@ -4937,10 +4963,19 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
+ 		trace_event_enable_cmd_record(enabled);
+ 
+ 	if (mask == TRACE_ITER_RECORD_TGID) {
+-		if (!tgid_map)
+-			tgid_map = kvcalloc(PID_MAX_DEFAULT + 1,
+-					   sizeof(*tgid_map),
+-					   GFP_KERNEL);
++		if (!tgid_map) {
++			tgid_map_max = pid_max;
++			map = kvcalloc(tgid_map_max + 1, sizeof(*tgid_map),
++				       GFP_KERNEL);
++
++			/*
++			 * Pairs with smp_load_acquire() in
++			 * trace_find_tgid_ptr() to ensure that if it observes
++			 * the tgid_map we just allocated then it also observes
++			 * the corresponding tgid_map_max value.
++			 */
++			smp_store_release(&tgid_map, map);
++		}
+ 		if (!tgid_map) {
+ 			tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
+ 			return -ENOMEM;
+@@ -5352,37 +5387,16 @@ static const struct file_operations tracing_readme_fops = {
+ 
+ static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
+ {
+-	int *ptr = v;
++	int pid = ++(*pos);
+ 
+-	if (*pos || m->count)
+-		ptr++;
+-
+-	(*pos)++;
+-
+-	for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
+-		if (trace_find_tgid(*ptr))
+-			return ptr;
+-	}
+-
+-	return NULL;
++	return trace_find_tgid_ptr(pid);
+ }
+ 
+ static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
+ {
+-	void *v;
+-	loff_t l = 0;
++	int pid = *pos;
+ 
+-	if (!tgid_map)
+-		return NULL;
+-
+-	v = &tgid_map[0];
+-	while (l <= *pos) {
+-		v = saved_tgids_next(m, v, &l);
+-		if (!v)
+-			return NULL;
+-	}
+-
+-	return v;
++	return trace_find_tgid_ptr(pid);
+ }
+ 
+ static void saved_tgids_stop(struct seq_file *m, void *v)
+@@ -5391,9 +5405,14 @@ static void saved_tgids_stop(struct seq_file *m, void *v)
+ 
+ static int saved_tgids_show(struct seq_file *m, void *v)
+ {
+-	int pid = (int *)v - tgid_map;
++	int *entry = (int *)v;
++	int pid = entry - tgid_map;
++	int tgid = *entry;
++
++	if (tgid == 0)
++		return SEQ_SKIP;
+ 
+-	seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
++	seq_printf(m, "%d %d\n", pid, tgid);
+ 	return 0;
+ }
+ 
+diff --git a/lib/seq_buf.c b/lib/seq_buf.c
+index 89c26c393bdba..6dafde8513337 100644
+--- a/lib/seq_buf.c
++++ b/lib/seq_buf.c
+@@ -229,8 +229,10 @@ int seq_buf_putmem_hex(struct seq_buf *s, const void *mem,
+ 
+ 	WARN_ON(s->size == 0);
+ 
++	BUILD_BUG_ON(MAX_MEMHEX_BYTES * 2 >= HEX_CHARS);
++
+ 	while (len) {
+-		start_len = min(len, HEX_CHARS - 1);
++		start_len = min(len, MAX_MEMHEX_BYTES);
+ #ifdef __BIG_ENDIAN
+ 		for (i = 0, j = 0; i < start_len; i++) {
+ #else
+diff --git a/mm/mremap.c b/mm/mremap.c
+index ec8f840399ed4..7de4a01b0f8d9 100644
+--- a/mm/mremap.c
++++ b/mm/mremap.c
+@@ -439,7 +439,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
+ 			if (!new_pud)
+ 				break;
+ 			if (move_pgt_entry(NORMAL_PUD, vma, old_addr, new_addr,
+-					   old_pud, new_pud, need_rmap_locks))
++					   old_pud, new_pud, true))
+ 				continue;
+ 		}
+ 
+@@ -466,7 +466,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
+ 			 * moving at the PMD level if possible.
+ 			 */
+ 			if (move_pgt_entry(NORMAL_PMD, vma, old_addr, new_addr,
+-					   old_pmd, new_pmd, need_rmap_locks))
++					   old_pmd, new_pmd, true))
+ 				continue;
+ 		}
+ 
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index 4fad2ca661ed9..f4e8a4ba663f4 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -1719,14 +1719,6 @@ int hci_dev_do_close(struct hci_dev *hdev)
+ 
+ 	BT_DBG("%s %p", hdev->name, hdev);
+ 
+-	if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
+-	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
+-	    test_bit(HCI_UP, &hdev->flags)) {
+-		/* Execute vendor specific shutdown routine */
+-		if (hdev->shutdown)
+-			hdev->shutdown(hdev);
+-	}
+-
+ 	cancel_delayed_work(&hdev->power_off);
+ 
+ 	hci_request_cancel_all(hdev);
+@@ -1802,6 +1794,14 @@ int hci_dev_do_close(struct hci_dev *hdev)
+ 		clear_bit(HCI_INIT, &hdev->flags);
+ 	}
+ 
++	if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
++	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
++	    test_bit(HCI_UP, &hdev->flags)) {
++		/* Execute vendor specific shutdown routine */
++		if (hdev->shutdown)
++			hdev->shutdown(hdev);
++	}
++
+ 	/* flush cmd  work */
+ 	flush_work(&hdev->cmd_work);
+ 
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index c6f400b108d94..99c89f02a9747 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -4379,12 +4379,12 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
+ 
+ 	bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
+ 
+-	switch (conn->setting & SCO_AIRMODE_MASK) {
+-	case SCO_AIRMODE_CVSD:
++	switch (ev->air_mode) {
++	case 0x02:
+ 		if (hdev->notify)
+ 			hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
+ 		break;
+-	case SCO_AIRMODE_TRANSP:
++	case 0x03:
+ 		if (hdev->notify)
+ 			hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
+ 		break;
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 53ddbee459b99..b1f4d5505bbac 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -6062,7 +6062,7 @@ static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
+ 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
+ 	struct hci_conn *hcon = conn->hcon;
+ 	u16 mtu, mps, credits, result;
+-	struct l2cap_chan *chan;
++	struct l2cap_chan *chan, *tmp;
+ 	int err = 0, sec_level;
+ 	int i = 0;
+ 
+@@ -6081,7 +6081,7 @@ static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
+ 
+ 	cmd_len -= sizeof(*rsp);
+ 
+-	list_for_each_entry(chan, &conn->chan_l, list) {
++	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
+ 		u16 dcid;
+ 
+ 		if (chan->ident != cmd->ident ||
+@@ -6244,7 +6244,7 @@ static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
+ 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
+ 					 u8 *data)
+ {
+-	struct l2cap_chan *chan;
++	struct l2cap_chan *chan, *tmp;
+ 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
+ 	u16 result;
+ 
+@@ -6258,7 +6258,7 @@ static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
+ 	if (!result)
+ 		return 0;
+ 
+-	list_for_each_entry(chan, &conn->chan_l, list) {
++	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
+ 		if (chan->ident != cmd->ident)
+ 			continue;
+ 
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index 71de147f55584..b4f6773b1a5b6 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -250,12 +250,15 @@ static const u8 mgmt_status_table[] = {
+ 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
+ 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
+ 	MGMT_STATUS_FAILED,		/* Transaction Collision */
++	MGMT_STATUS_FAILED,		/* Reserved for future use */
+ 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
+ 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
+ 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
+ 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
+ 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
++	MGMT_STATUS_FAILED,		/* Reserved for future use */
+ 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
++	MGMT_STATUS_FAILED,		/* Reserved for future use */
+ 	MGMT_STATUS_FAILED,		/* Slot Violation */
+ 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
+ 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
+@@ -4053,6 +4056,8 @@ static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
+ 
+ 	hci_dev_lock(hdev);
+ 
++	memset(&rp, 0, sizeof(rp));
++
+ 	if (cp->addr.type == BDADDR_BREDR) {
+ 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
+ 							      &cp->addr.bdaddr,
+diff --git a/net/bridge/br_mrp.c b/net/bridge/br_mrp.c
+index 12487f6fe9b48..58254fbfda858 100644
+--- a/net/bridge/br_mrp.c
++++ b/net/bridge/br_mrp.c
+@@ -620,8 +620,7 @@ int br_mrp_set_ring_state(struct net_bridge *br,
+ 	if (!mrp)
+ 		return -EINVAL;
+ 
+-	if (mrp->ring_state == BR_MRP_RING_STATE_CLOSED &&
+-	    state->ring_state != BR_MRP_RING_STATE_CLOSED)
++	if (mrp->ring_state != state->ring_state)
+ 		mrp->ring_transitions++;
+ 
+ 	mrp->ring_state = state->ring_state;
+@@ -708,8 +707,7 @@ int br_mrp_set_in_state(struct net_bridge *br, struct br_mrp_in_state *state)
+ 	if (!mrp)
+ 		return -EINVAL;
+ 
+-	if (mrp->in_state == BR_MRP_IN_STATE_CLOSED &&
+-	    state->in_state != BR_MRP_IN_STATE_CLOSED)
++	if (mrp->in_state != state->in_state)
+ 		mrp->in_transitions++;
+ 
+ 	mrp->in_state = state->in_state;
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 9631944740586..a8c89cad1ca48 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -6472,11 +6472,18 @@ EXPORT_SYMBOL(napi_schedule_prep);
+  * __napi_schedule_irqoff - schedule for receive
+  * @n: entry to schedule
+  *
+- * Variant of __napi_schedule() assuming hard irqs are masked
++ * Variant of __napi_schedule() assuming hard irqs are masked.
++ *
++ * On PREEMPT_RT enabled kernels this maps to __napi_schedule()
++ * because the interrupt disabled assumption might not be true
++ * due to force-threaded interrupts and spinlock substitution.
+  */
+ void __napi_schedule_irqoff(struct napi_struct *n)
+ {
+-	____napi_schedule(this_cpu_ptr(&softnet_data), n);
++	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
++		____napi_schedule(this_cpu_ptr(&softnet_data), n);
++	else
++		__napi_schedule(n);
+ }
+ EXPORT_SYMBOL(__napi_schedule_irqoff);
+ 
+diff --git a/net/core/sock.c b/net/core/sock.c
+index a266760cd65ea..60750f9ae32d4 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1622,6 +1622,13 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
+ 		v.val = sk->sk_bound_dev_if;
+ 		break;
+ 
++	case SO_NETNS_COOKIE:
++		lv = sizeof(u64);
++		if (len != lv)
++			return -EINVAL;
++		v.val64 = sock_net(sk)->net_cookie;
++		break;
++
+ 	default:
+ 		/* We implement the SO_SNDLOWAT etc to not be settable
+ 		 * (1003.1g 7).
+diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
+index bb1351c38397f..e31949479305e 100644
+--- a/net/hsr/hsr_framereg.c
++++ b/net/hsr/hsr_framereg.c
+@@ -397,7 +397,8 @@ void hsr_register_frame_in(struct hsr_node *node, struct hsr_port *port,
+ 	 * ensures entries of restarted nodes gets pruned so that they can
+ 	 * re-register and resume communications.
+ 	 */
+-	if (seq_nr_before(sequence_nr, node->seq_out[port->type]))
++	if (!(port->dev->features & NETIF_F_HW_HSR_TAG_RM) &&
++	    seq_nr_before(sequence_nr, node->seq_out[port->type]))
+ 		return;
+ 
+ 	node->time_in[port->type] = jiffies;
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index 3aab53beb4ea2..3ec3b67c184fb 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -1054,7 +1054,7 @@ static int __ip_append_data(struct sock *sk,
+ 			unsigned int datalen;
+ 			unsigned int fraglen;
+ 			unsigned int fraggap;
+-			unsigned int alloclen;
++			unsigned int alloclen, alloc_extra;
+ 			unsigned int pagedlen;
+ 			struct sk_buff *skb_prev;
+ alloc_new_skb:
+@@ -1074,35 +1074,39 @@ alloc_new_skb:
+ 			fraglen = datalen + fragheaderlen;
+ 			pagedlen = 0;
+ 
++			alloc_extra = hh_len + 15;
++			alloc_extra += exthdrlen;
++
++			/* The last fragment gets additional space at tail.
++			 * Note, with MSG_MORE we overallocate on fragments,
++			 * because we have no idea what fragment will be
++			 * the last.
++			 */
++			if (datalen == length + fraggap)
++				alloc_extra += rt->dst.trailer_len;
++
+ 			if ((flags & MSG_MORE) &&
+ 			    !(rt->dst.dev->features&NETIF_F_SG))
+ 				alloclen = mtu;
+-			else if (!paged)
++			else if (!paged &&
++				 (fraglen + alloc_extra < SKB_MAX_ALLOC ||
++				  !(rt->dst.dev->features & NETIF_F_SG)))
+ 				alloclen = fraglen;
+ 			else {
+ 				alloclen = min_t(int, fraglen, MAX_HEADER);
+ 				pagedlen = fraglen - alloclen;
+ 			}
+ 
+-			alloclen += exthdrlen;
+-
+-			/* The last fragment gets additional space at tail.
+-			 * Note, with MSG_MORE we overallocate on fragments,
+-			 * because we have no idea what fragment will be
+-			 * the last.
+-			 */
+-			if (datalen == length + fraggap)
+-				alloclen += rt->dst.trailer_len;
++			alloclen += alloc_extra;
+ 
+ 			if (transhdrlen) {
+-				skb = sock_alloc_send_skb(sk,
+-						alloclen + hh_len + 15,
++				skb = sock_alloc_send_skb(sk, alloclen,
+ 						(flags & MSG_DONTWAIT), &err);
+ 			} else {
+ 				skb = NULL;
+ 				if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <=
+ 				    2 * sk->sk_sndbuf)
+-					skb = alloc_skb(alloclen + hh_len + 15,
++					skb = alloc_skb(alloclen,
+ 							sk->sk_allocation);
+ 				if (unlikely(!skb))
+ 					err = -ENOBUFS;
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 69a545db80d2e..e567fff1d1a66 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -2816,8 +2816,17 @@ static void tcp_process_loss(struct sock *sk, int flag, int num_dupack,
+ 	*rexmit = REXMIT_LOST;
+ }
+ 
++static bool tcp_force_fast_retransmit(struct sock *sk)
++{
++	struct tcp_sock *tp = tcp_sk(sk);
++
++	return after(tcp_highest_sack_seq(tp),
++		     tp->snd_una + tp->reordering * tp->mss_cache);
++}
++
+ /* Undo during fast recovery after partial ACK. */
+-static bool tcp_try_undo_partial(struct sock *sk, u32 prior_snd_una)
++static bool tcp_try_undo_partial(struct sock *sk, u32 prior_snd_una,
++				 bool *do_lost)
+ {
+ 	struct tcp_sock *tp = tcp_sk(sk);
+ 
+@@ -2842,7 +2851,9 @@ static bool tcp_try_undo_partial(struct sock *sk, u32 prior_snd_una)
+ 		tcp_undo_cwnd_reduction(sk, true);
+ 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO);
+ 		tcp_try_keep_open(sk);
+-		return true;
++	} else {
++		/* Partial ACK arrived. Force fast retransmit. */
++		*do_lost = tcp_force_fast_retransmit(sk);
+ 	}
+ 	return false;
+ }
+@@ -2866,14 +2877,6 @@ static void tcp_identify_packet_loss(struct sock *sk, int *ack_flag)
+ 	}
+ }
+ 
+-static bool tcp_force_fast_retransmit(struct sock *sk)
+-{
+-	struct tcp_sock *tp = tcp_sk(sk);
+-
+-	return after(tcp_highest_sack_seq(tp),
+-		     tp->snd_una + tp->reordering * tp->mss_cache);
+-}
+-
+ /* Process an event, which can update packets-in-flight not trivially.
+  * Main goal of this function is to calculate new estimate for left_out,
+  * taking into account both packets sitting in receiver's buffer and
+@@ -2943,17 +2946,21 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
+ 		if (!(flag & FLAG_SND_UNA_ADVANCED)) {
+ 			if (tcp_is_reno(tp))
+ 				tcp_add_reno_sack(sk, num_dupack, ece_ack);
+-		} else {
+-			if (tcp_try_undo_partial(sk, prior_snd_una))
+-				return;
+-			/* Partial ACK arrived. Force fast retransmit. */
+-			do_lost = tcp_force_fast_retransmit(sk);
+-		}
+-		if (tcp_try_undo_dsack(sk)) {
+-			tcp_try_keep_open(sk);
++		} else if (tcp_try_undo_partial(sk, prior_snd_una, &do_lost))
+ 			return;
+-		}
++
++		if (tcp_try_undo_dsack(sk))
++			tcp_try_keep_open(sk);
++
+ 		tcp_identify_packet_loss(sk, ack_flag);
++		if (icsk->icsk_ca_state != TCP_CA_Recovery) {
++			if (!tcp_time_to_recover(sk, flag))
++				return;
++			/* Undo reverts the recovery state. If loss is evident,
++			 * starts a new recovery (e.g. reordering then loss);
++			 */
++			tcp_enter_recovery(sk, ece_ack);
++		}
+ 		break;
+ 	case TCP_CA_Loss:
+ 		tcp_process_loss(sk, flag, num_dupack, rexmit);
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index ff4f9ebcf7f65..497974b4372a7 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1555,7 +1555,7 @@ emsgsize:
+ 			unsigned int datalen;
+ 			unsigned int fraglen;
+ 			unsigned int fraggap;
+-			unsigned int alloclen;
++			unsigned int alloclen, alloc_extra;
+ 			unsigned int pagedlen;
+ alloc_new_skb:
+ 			/* There's no room in the current skb */
+@@ -1582,17 +1582,28 @@ alloc_new_skb:
+ 			fraglen = datalen + fragheaderlen;
+ 			pagedlen = 0;
+ 
++			alloc_extra = hh_len;
++			alloc_extra += dst_exthdrlen;
++			alloc_extra += rt->dst.trailer_len;
++
++			/* We just reserve space for fragment header.
++			 * Note: this may be overallocation if the message
++			 * (without MSG_MORE) fits into the MTU.
++			 */
++			alloc_extra += sizeof(struct frag_hdr);
++
+ 			if ((flags & MSG_MORE) &&
+ 			    !(rt->dst.dev->features&NETIF_F_SG))
+ 				alloclen = mtu;
+-			else if (!paged)
++			else if (!paged &&
++				 (fraglen + alloc_extra < SKB_MAX_ALLOC ||
++				  !(rt->dst.dev->features & NETIF_F_SG)))
+ 				alloclen = fraglen;
+ 			else {
+ 				alloclen = min_t(int, fraglen, MAX_HEADER);
+ 				pagedlen = fraglen - alloclen;
+ 			}
+-
+-			alloclen += dst_exthdrlen;
++			alloclen += alloc_extra;
+ 
+ 			if (datalen != length + fraggap) {
+ 				/*
+@@ -1602,30 +1613,21 @@ alloc_new_skb:
+ 				datalen += rt->dst.trailer_len;
+ 			}
+ 
+-			alloclen += rt->dst.trailer_len;
+ 			fraglen = datalen + fragheaderlen;
+ 
+-			/*
+-			 * We just reserve space for fragment header.
+-			 * Note: this may be overallocation if the message
+-			 * (without MSG_MORE) fits into the MTU.
+-			 */
+-			alloclen += sizeof(struct frag_hdr);
+-
+ 			copy = datalen - transhdrlen - fraggap - pagedlen;
+ 			if (copy < 0) {
+ 				err = -EINVAL;
+ 				goto error;
+ 			}
+ 			if (transhdrlen) {
+-				skb = sock_alloc_send_skb(sk,
+-						alloclen + hh_len,
++				skb = sock_alloc_send_skb(sk, alloclen,
+ 						(flags & MSG_DONTWAIT), &err);
+ 			} else {
+ 				skb = NULL;
+ 				if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <=
+ 				    2 * sk->sk_sndbuf)
+-					skb = alloc_skb(alloclen + hh_len,
++					skb = alloc_skb(alloclen,
+ 							sk->sk_allocation);
+ 				if (unlikely(!skb))
+ 					err = -ENOBUFS;
+diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
+index af36acc1a6448..2880dc7d9a491 100644
+--- a/net/ipv6/output_core.c
++++ b/net/ipv6/output_core.c
+@@ -15,29 +15,11 @@ static u32 __ipv6_select_ident(struct net *net,
+ 			       const struct in6_addr *dst,
+ 			       const struct in6_addr *src)
+ {
+-	const struct {
+-		struct in6_addr dst;
+-		struct in6_addr src;
+-	} __aligned(SIPHASH_ALIGNMENT) combined = {
+-		.dst = *dst,
+-		.src = *src,
+-	};
+-	u32 hash, id;
+-
+-	/* Note the following code is not safe, but this is okay. */
+-	if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key)))
+-		get_random_bytes(&net->ipv4.ip_id_key,
+-				 sizeof(net->ipv4.ip_id_key));
+-
+-	hash = siphash(&combined, sizeof(combined), &net->ipv4.ip_id_key);
+-
+-	/* Treat id of 0 as unset and if we get 0 back from ip_idents_reserve,
+-	 * set the hight order instead thus minimizing possible future
+-	 * collisions.
+-	 */
+-	id = ip_idents_reserve(hash, 1);
+-	if (unlikely(!id))
+-		id = 1 << 31;
++	u32 id;
++
++	do {
++		id = prandom_u32();
++	} while (!id);
+ 
+ 	return id;
+ }
+diff --git a/net/mac80211/main.c b/net/mac80211/main.c
+index 9dd741b68f268..937a024a13e2d 100644
+--- a/net/mac80211/main.c
++++ b/net/mac80211/main.c
+@@ -257,14 +257,13 @@ static void ieee80211_restart_work(struct work_struct *work)
+ 	/* wait for scan work complete */
+ 	flush_workqueue(local->workqueue);
+ 	flush_work(&local->sched_scan_stopped_work);
++	flush_work(&local->radar_detected_work);
++
++	rtnl_lock();
+ 
+ 	WARN(test_bit(SCAN_HW_SCANNING, &local->scanning),
+ 	     "%s called with hardware scan in progress\n", __func__);
+ 
+-	flush_work(&local->radar_detected_work);
+-	/* we might do interface manipulations, so need both */
+-	rtnl_lock();
+-	wiphy_lock(local->hw.wiphy);
+ 	list_for_each_entry(sdata, &local->interfaces, list) {
+ 		/*
+ 		 * XXX: there may be more work for other vif types and even
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index 13250cadb4202..e18c3855f6161 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -2088,10 +2088,9 @@ static struct ieee80211_sta_rx_stats *
+ sta_get_last_rx_stats(struct sta_info *sta)
+ {
+ 	struct ieee80211_sta_rx_stats *stats = &sta->rx_stats;
+-	struct ieee80211_local *local = sta->local;
+ 	int cpu;
+ 
+-	if (!ieee80211_hw_check(&local->hw, USES_RSS))
++	if (!sta->pcpu_rx_stats)
+ 		return stats;
+ 
+ 	for_each_possible_cpu(cpu) {
+@@ -2191,9 +2190,7 @@ static void sta_set_tidstats(struct sta_info *sta,
+ 	int cpu;
+ 
+ 	if (!(tidstats->filled & BIT(NL80211_TID_STATS_RX_MSDU))) {
+-		if (!ieee80211_hw_check(&local->hw, USES_RSS))
+-			tidstats->rx_msdu +=
+-				sta_get_tidstats_msdu(&sta->rx_stats, tid);
++		tidstats->rx_msdu += sta_get_tidstats_msdu(&sta->rx_stats, tid);
+ 
+ 		if (sta->pcpu_rx_stats) {
+ 			for_each_possible_cpu(cpu) {
+@@ -2272,7 +2269,6 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
+ 		sinfo->rx_beacon = sdata->u.mgd.count_beacon_signal;
+ 
+ 	drv_sta_statistics(local, sdata, &sta->sta, sinfo);
+-
+ 	sinfo->filled |= BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME) |
+ 			 BIT_ULL(NL80211_STA_INFO_STA_FLAGS) |
+ 			 BIT_ULL(NL80211_STA_INFO_BSS_PARAM) |
+@@ -2307,8 +2303,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
+ 
+ 	if (!(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_RX_BYTES64) |
+ 			       BIT_ULL(NL80211_STA_INFO_RX_BYTES)))) {
+-		if (!ieee80211_hw_check(&local->hw, USES_RSS))
+-			sinfo->rx_bytes += sta_get_stats_bytes(&sta->rx_stats);
++		sinfo->rx_bytes += sta_get_stats_bytes(&sta->rx_stats);
+ 
+ 		if (sta->pcpu_rx_stats) {
+ 			for_each_possible_cpu(cpu) {
+diff --git a/net/sched/act_api.c b/net/sched/act_api.c
+index f6d5755d669eb..d17a66aab8eea 100644
+--- a/net/sched/act_api.c
++++ b/net/sched/act_api.c
+@@ -381,7 +381,8 @@ static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
+ 	}
+ 	mutex_unlock(&idrinfo->lock);
+ 
+-	if (nla_put_u32(skb, TCA_FCNT, n_i))
++	ret = nla_put_u32(skb, TCA_FCNT, n_i);
++	if (ret)
+ 		goto nla_put_failure;
+ 	nla_nest_end(skb, nest);
+ 
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index 94f6942d7ec11..2f82ac7c0f93c 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -1531,7 +1531,7 @@ static inline int __tcf_classify(struct sk_buff *skb,
+ 				 u32 *last_executed_chain)
+ {
+ #ifdef CONFIG_NET_CLS_ACT
+-	const int max_reclassify_loop = 4;
++	const int max_reclassify_loop = 16;
+ 	const struct tcf_proto *first_tp;
+ 	int limit = 0;
+ 
+diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
+index 53e5ed79f63f3..59e653b528b1f 100644
+--- a/net/sctp/bind_addr.c
++++ b/net/sctp/bind_addr.c
+@@ -270,22 +270,19 @@ int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw_addr_list,
+ 		rawaddr = (union sctp_addr_param *)raw_addr_list;
+ 
+ 		af = sctp_get_af_specific(param_type2af(param->type));
+-		if (unlikely(!af)) {
++		if (unlikely(!af) ||
++		    !af->from_addr_param(&addr, rawaddr, htons(port), 0)) {
+ 			retval = -EINVAL;
+-			sctp_bind_addr_clean(bp);
+-			break;
++			goto out_err;
+ 		}
+ 
+-		af->from_addr_param(&addr, rawaddr, htons(port), 0);
+ 		if (sctp_bind_addr_state(bp, &addr) != -1)
+ 			goto next;
+ 		retval = sctp_add_bind_addr(bp, &addr, sizeof(addr),
+ 					    SCTP_ADDR_SRC, gfp);
+-		if (retval) {
++		if (retval)
+ 			/* Can't finish building the list, clean up. */
+-			sctp_bind_addr_clean(bp);
+-			break;
+-		}
++			goto out_err;
+ 
+ next:
+ 		len = ntohs(param->length);
+@@ -294,6 +291,12 @@ next:
+ 	}
+ 
+ 	return retval;
++
++out_err:
++	if (retval)
++		sctp_bind_addr_clean(bp);
++
++	return retval;
+ }
+ 
+ /********************************************************************
+diff --git a/net/sctp/input.c b/net/sctp/input.c
+index d508f6f3dd08a..f72bff93745c4 100644
+--- a/net/sctp/input.c
++++ b/net/sctp/input.c
+@@ -1131,7 +1131,8 @@ static struct sctp_association *__sctp_rcv_init_lookup(struct net *net,
+ 		if (!af)
+ 			continue;
+ 
+-		af->from_addr_param(paddr, params.addr, sh->source, 0);
++		if (!af->from_addr_param(paddr, params.addr, sh->source, 0))
++			continue;
+ 
+ 		asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
+ 		if (asoc)
+@@ -1174,7 +1175,8 @@ static struct sctp_association *__sctp_rcv_asconf_lookup(
+ 	if (unlikely(!af))
+ 		return NULL;
+ 
+-	af->from_addr_param(&paddr, param, peer_port, 0);
++	if (af->from_addr_param(&paddr, param, peer_port, 0))
++		return NULL;
+ 
+ 	return __sctp_lookup_association(net, laddr, &paddr, transportp);
+ }
+@@ -1245,7 +1247,7 @@ static struct sctp_association *__sctp_rcv_walk_lookup(struct net *net,
+ 
+ 		ch = (struct sctp_chunkhdr *)ch_end;
+ 		chunk_num++;
+-	} while (ch_end < skb_tail_pointer(skb));
++	} while (ch_end + sizeof(*ch) < skb_tail_pointer(skb));
+ 
+ 	return asoc;
+ }
+diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
+index bd08807c9e447..5c6f5ced9cfa6 100644
+--- a/net/sctp/ipv6.c
++++ b/net/sctp/ipv6.c
+@@ -551,15 +551,20 @@ static void sctp_v6_to_sk_daddr(union sctp_addr *addr, struct sock *sk)
+ }
+ 
+ /* Initialize a sctp_addr from an address parameter. */
+-static void sctp_v6_from_addr_param(union sctp_addr *addr,
++static bool sctp_v6_from_addr_param(union sctp_addr *addr,
+ 				    union sctp_addr_param *param,
+ 				    __be16 port, int iif)
+ {
++	if (ntohs(param->v6.param_hdr.length) < sizeof(struct sctp_ipv6addr_param))
++		return false;
++
+ 	addr->v6.sin6_family = AF_INET6;
+ 	addr->v6.sin6_port = port;
+ 	addr->v6.sin6_flowinfo = 0; /* BUG */
+ 	addr->v6.sin6_addr = param->v6.addr;
+ 	addr->v6.sin6_scope_id = iif;
++
++	return true;
+ }
+ 
+ /* Initialize an address parameter from a sctp_addr and return the length
+diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
+index 6f2bbfeec3a4c..25192b378e2ec 100644
+--- a/net/sctp/protocol.c
++++ b/net/sctp/protocol.c
+@@ -254,14 +254,19 @@ static void sctp_v4_to_sk_daddr(union sctp_addr *addr, struct sock *sk)
+ }
+ 
+ /* Initialize a sctp_addr from an address parameter. */
+-static void sctp_v4_from_addr_param(union sctp_addr *addr,
++static bool sctp_v4_from_addr_param(union sctp_addr *addr,
+ 				    union sctp_addr_param *param,
+ 				    __be16 port, int iif)
+ {
++	if (ntohs(param->v4.param_hdr.length) < sizeof(struct sctp_ipv4addr_param))
++		return false;
++
+ 	addr->v4.sin_family = AF_INET;
+ 	addr->v4.sin_port = port;
+ 	addr->v4.sin_addr.s_addr = param->v4.addr.s_addr;
+ 	memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
++
++	return true;
+ }
+ 
+ /* Initialize an address parameter from a sctp_addr and return the length
+diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
+index da4ce0947c3aa..1fdbde6f07b82 100644
+--- a/net/sctp/sm_make_chunk.c
++++ b/net/sctp/sm_make_chunk.c
+@@ -2350,11 +2350,13 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
+ 
+ 	/* Process the initialization parameters.  */
+ 	sctp_walk_params(param, peer_init, init_hdr.params) {
+-		if (!src_match && (param.p->type == SCTP_PARAM_IPV4_ADDRESS ||
+-		    param.p->type == SCTP_PARAM_IPV6_ADDRESS)) {
++		if (!src_match &&
++		    (param.p->type == SCTP_PARAM_IPV4_ADDRESS ||
++		     param.p->type == SCTP_PARAM_IPV6_ADDRESS)) {
+ 			af = sctp_get_af_specific(param_type2af(param.p->type));
+-			af->from_addr_param(&addr, param.addr,
+-					    chunk->sctp_hdr->source, 0);
++			if (!af->from_addr_param(&addr, param.addr,
++						 chunk->sctp_hdr->source, 0))
++				continue;
+ 			if (sctp_cmp_addr_exact(sctp_source(chunk), &addr))
+ 				src_match = 1;
+ 		}
+@@ -2535,7 +2537,8 @@ static int sctp_process_param(struct sctp_association *asoc,
+ 			break;
+ do_addr_param:
+ 		af = sctp_get_af_specific(param_type2af(param.p->type));
+-		af->from_addr_param(&addr, param.addr, htons(asoc->peer.port), 0);
++		if (!af->from_addr_param(&addr, param.addr, htons(asoc->peer.port), 0))
++			break;
+ 		scope = sctp_scope(peer_addr);
+ 		if (sctp_in_scope(net, &addr, scope))
+ 			if (!sctp_assoc_add_peer(asoc, &addr, gfp, SCTP_UNCONFIRMED))
+@@ -2636,15 +2639,13 @@ do_addr_param:
+ 		addr_param = param.v + sizeof(struct sctp_addip_param);
+ 
+ 		af = sctp_get_af_specific(param_type2af(addr_param->p.type));
+-		if (af == NULL)
++		if (!af)
+ 			break;
+ 
+-		af->from_addr_param(&addr, addr_param,
+-				    htons(asoc->peer.port), 0);
++		if (!af->from_addr_param(&addr, addr_param,
++					 htons(asoc->peer.port), 0))
++			break;
+ 
+-		/* if the address is invalid, we can't process it.
+-		 * XXX: see spec for what to do.
+-		 */
+ 		if (!af->addr_valid(&addr, NULL, NULL))
+ 			break;
+ 
+@@ -3058,7 +3059,8 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
+ 	if (unlikely(!af))
+ 		return SCTP_ERROR_DNS_FAILED;
+ 
+-	af->from_addr_param(&addr, addr_param, htons(asoc->peer.port), 0);
++	if (!af->from_addr_param(&addr, addr_param, htons(asoc->peer.port), 0))
++		return SCTP_ERROR_DNS_FAILED;
+ 
+ 	/* ADDIP 4.2.1  This parameter MUST NOT contain a broadcast
+ 	 * or multicast address.
+@@ -3335,7 +3337,8 @@ static void sctp_asconf_param_success(struct sctp_association *asoc,
+ 
+ 	/* We have checked the packet before, so we do not check again.	*/
+ 	af = sctp_get_af_specific(param_type2af(addr_param->p.type));
+-	af->from_addr_param(&addr, addr_param, htons(bp->port), 0);
++	if (!af->from_addr_param(&addr, addr_param, htons(bp->port), 0))
++		return;
+ 
+ 	switch (asconf_param->param_hdr.type) {
+ 	case SCTP_PARAM_ADD_IP:
+diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
+index bc7fb9bf3351e..2a82fbf32ccc7 100644
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -1369,7 +1369,7 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
+ 
+ 		if (signal_pending(current)) {
+ 			err = sock_intr_errno(timeout);
+-			sk->sk_state = TCP_CLOSE;
++			sk->sk_state = sk->sk_state == TCP_ESTABLISHED ? TCP_CLOSING : TCP_CLOSE;
+ 			sock->state = SS_UNCONNECTED;
+ 			vsock_transport_cancel_pkt(vsk);
+ 			goto out_wait;
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index a5224da638328..be0f616f85d34 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -4779,11 +4779,10 @@ static int nl80211_parse_tx_bitrate_mask(struct genl_info *info,
+ 		       sband->ht_cap.mcs.rx_mask,
+ 		       sizeof(mask->control[i].ht_mcs));
+ 
+-		if (!sband->vht_cap.vht_supported)
+-			continue;
+-
+-		vht_tx_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
+-		vht_build_mcs_mask(vht_tx_mcs_map, mask->control[i].vht_mcs);
++		if (sband->vht_cap.vht_supported) {
++			vht_tx_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
++			vht_build_mcs_mask(vht_tx_mcs_map, mask->control[i].vht_mcs);
++		}
+ 
+ 		he_cap = ieee80211_get_he_iftype_cap(sband, wdev->iftype);
+ 		if (!he_cap)
+diff --git a/net/wireless/wext-spy.c b/net/wireless/wext-spy.c
+index 33bef22e44e95..b379a03716539 100644
+--- a/net/wireless/wext-spy.c
++++ b/net/wireless/wext-spy.c
+@@ -120,8 +120,8 @@ int iw_handler_set_thrspy(struct net_device *	dev,
+ 		return -EOPNOTSUPP;
+ 
+ 	/* Just do it */
+-	memcpy(&(spydata->spy_thr_low), &(threshold->low),
+-	       2 * sizeof(struct iw_quality));
++	spydata->spy_thr_low = threshold->low;
++	spydata->spy_thr_high = threshold->high;
+ 
+ 	/* Clear flag */
+ 	memset(spydata->spy_thr_under, '\0', sizeof(spydata->spy_thr_under));
+@@ -147,8 +147,8 @@ int iw_handler_get_thrspy(struct net_device *	dev,
+ 		return -EOPNOTSUPP;
+ 
+ 	/* Just do it */
+-	memcpy(&(threshold->low), &(spydata->spy_thr_low),
+-	       2 * sizeof(struct iw_quality));
++	threshold->low = spydata->spy_thr_low;
++	threshold->high = spydata->spy_thr_high;
+ 
+ 	return 0;
+ }
+@@ -173,10 +173,10 @@ static void iw_send_thrspy_event(struct net_device *	dev,
+ 	memcpy(threshold.addr.sa_data, address, ETH_ALEN);
+ 	threshold.addr.sa_family = ARPHRD_ETHER;
+ 	/* Copy stats */
+-	memcpy(&(threshold.qual), wstats, sizeof(struct iw_quality));
++	threshold.qual = *wstats;
+ 	/* Copy also thresholds */
+-	memcpy(&(threshold.low), &(spydata->spy_thr_low),
+-	       2 * sizeof(struct iw_quality));
++	threshold.low = spydata->spy_thr_low;
++	threshold.high = spydata->spy_thr_high;
+ 
+ 	/* Send event to user space */
+ 	wireless_send_event(dev, SIOCGIWTHRSPY, &wrqu, (char *) &threshold);
+diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
+index 5a0ef4361e436..817e714dedea0 100644
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -580,6 +580,20 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
+ 
+ 	copy_from_user_state(x, p);
+ 
++	if (attrs[XFRMA_ENCAP]) {
++		x->encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]),
++				   sizeof(*x->encap), GFP_KERNEL);
++		if (x->encap == NULL)
++			goto error;
++	}
++
++	if (attrs[XFRMA_COADDR]) {
++		x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]),
++				    sizeof(*x->coaddr), GFP_KERNEL);
++		if (x->coaddr == NULL)
++			goto error;
++	}
++
+ 	if (attrs[XFRMA_SA_EXTRA_FLAGS])
+ 		x->props.extra_flags = nla_get_u32(attrs[XFRMA_SA_EXTRA_FLAGS]);
+ 
+@@ -600,23 +614,9 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
+ 				   attrs[XFRMA_ALG_COMP])))
+ 		goto error;
+ 
+-	if (attrs[XFRMA_ENCAP]) {
+-		x->encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]),
+-				   sizeof(*x->encap), GFP_KERNEL);
+-		if (x->encap == NULL)
+-			goto error;
+-	}
+-
+ 	if (attrs[XFRMA_TFCPAD])
+ 		x->tfcpad = nla_get_u32(attrs[XFRMA_TFCPAD]);
+ 
+-	if (attrs[XFRMA_COADDR]) {
+-		x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]),
+-				    sizeof(*x->coaddr), GFP_KERNEL);
+-		if (x->coaddr == NULL)
+-			goto error;
+-	}
+-
+ 	xfrm_mark_get(attrs, &x->mark);
+ 
+ 	xfrm_smark_init(attrs, &x->props.smark);
+diff --git a/security/selinux/avc.c b/security/selinux/avc.c
+index ad451cf9375e4..a2dc83228daf4 100644
+--- a/security/selinux/avc.c
++++ b/security/selinux/avc.c
+@@ -297,26 +297,27 @@ static struct avc_xperms_decision_node
+ 	struct avc_xperms_decision_node *xpd_node;
+ 	struct extended_perms_decision *xpd;
+ 
+-	xpd_node = kmem_cache_zalloc(avc_xperms_decision_cachep, GFP_NOWAIT);
++	xpd_node = kmem_cache_zalloc(avc_xperms_decision_cachep,
++				     GFP_NOWAIT | __GFP_NOWARN);
+ 	if (!xpd_node)
+ 		return NULL;
+ 
+ 	xpd = &xpd_node->xpd;
+ 	if (which & XPERMS_ALLOWED) {
+ 		xpd->allowed = kmem_cache_zalloc(avc_xperms_data_cachep,
+-						GFP_NOWAIT);
++						GFP_NOWAIT | __GFP_NOWARN);
+ 		if (!xpd->allowed)
+ 			goto error;
+ 	}
+ 	if (which & XPERMS_AUDITALLOW) {
+ 		xpd->auditallow = kmem_cache_zalloc(avc_xperms_data_cachep,
+-						GFP_NOWAIT);
++						GFP_NOWAIT | __GFP_NOWARN);
+ 		if (!xpd->auditallow)
+ 			goto error;
+ 	}
+ 	if (which & XPERMS_DONTAUDIT) {
+ 		xpd->dontaudit = kmem_cache_zalloc(avc_xperms_data_cachep,
+-						GFP_NOWAIT);
++						GFP_NOWAIT | __GFP_NOWARN);
+ 		if (!xpd->dontaudit)
+ 			goto error;
+ 	}
+@@ -344,7 +345,7 @@ static struct avc_xperms_node *avc_xperms_alloc(void)
+ {
+ 	struct avc_xperms_node *xp_node;
+ 
+-	xp_node = kmem_cache_zalloc(avc_xperms_cachep, GFP_NOWAIT);
++	xp_node = kmem_cache_zalloc(avc_xperms_cachep, GFP_NOWAIT | __GFP_NOWARN);
+ 	if (!xp_node)
+ 		return xp_node;
+ 	INIT_LIST_HEAD(&xp_node->xpd_head);
+@@ -500,7 +501,7 @@ static struct avc_node *avc_alloc_node(struct selinux_avc *avc)
+ {
+ 	struct avc_node *node;
+ 
+-	node = kmem_cache_zalloc(avc_node_cachep, GFP_NOWAIT);
++	node = kmem_cache_zalloc(avc_node_cachep, GFP_NOWAIT | __GFP_NOWARN);
+ 	if (!node)
+ 		goto out;
+ 
+diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
+index 22ded2c26089c..1ad7d0d1ea62e 100644
+--- a/security/smack/smackfs.c
++++ b/security/smack/smackfs.c
+@@ -855,6 +855,8 @@ static ssize_t smk_set_cipso(struct file *file, const char __user *buf,
+ 	if (format == SMK_FIXED24_FMT &&
+ 	    (count < SMK_CIPSOMIN || count > SMK_CIPSOMAX))
+ 		return -EINVAL;
++	if (count > PAGE_SIZE)
++		return -EINVAL;
+ 
+ 	data = memdup_user_nul(buf, count);
+ 	if (IS_ERR(data))
+diff --git a/sound/soc/tegra/tegra_alc5632.c b/sound/soc/tegra/tegra_alc5632.c
+index 0a0efd24e4b0f..81ea6ceba689a 100644
+--- a/sound/soc/tegra/tegra_alc5632.c
++++ b/sound/soc/tegra/tegra_alc5632.c
+@@ -139,6 +139,7 @@ static struct snd_soc_dai_link tegra_alc5632_dai = {
+ 
+ static struct snd_soc_card snd_soc_tegra_alc5632 = {
+ 	.name = "tegra-alc5632",
++	.driver_name = "tegra",
+ 	.owner = THIS_MODULE,
+ 	.dai_link = &tegra_alc5632_dai,
+ 	.num_links = 1,
+diff --git a/sound/soc/tegra/tegra_max98090.c b/sound/soc/tegra/tegra_max98090.c
+index 00c19704057b6..5a649810c0c87 100644
+--- a/sound/soc/tegra/tegra_max98090.c
++++ b/sound/soc/tegra/tegra_max98090.c
+@@ -182,6 +182,7 @@ static struct snd_soc_dai_link tegra_max98090_dai = {
+ 
+ static struct snd_soc_card snd_soc_tegra_max98090 = {
+ 	.name = "tegra-max98090",
++	.driver_name = "tegra",
+ 	.owner = THIS_MODULE,
+ 	.dai_link = &tegra_max98090_dai,
+ 	.num_links = 1,
+diff --git a/sound/soc/tegra/tegra_rt5640.c b/sound/soc/tegra/tegra_rt5640.c
+index 9afba37a3b086..3344f16258bec 100644
+--- a/sound/soc/tegra/tegra_rt5640.c
++++ b/sound/soc/tegra/tegra_rt5640.c
+@@ -132,6 +132,7 @@ static struct snd_soc_dai_link tegra_rt5640_dai = {
+ 
+ static struct snd_soc_card snd_soc_tegra_rt5640 = {
+ 	.name = "tegra-rt5640",
++	.driver_name = "tegra",
+ 	.owner = THIS_MODULE,
+ 	.dai_link = &tegra_rt5640_dai,
+ 	.num_links = 1,
+diff --git a/sound/soc/tegra/tegra_rt5677.c b/sound/soc/tegra/tegra_rt5677.c
+index d30f8b6deda4b..0f03e97d93558 100644
+--- a/sound/soc/tegra/tegra_rt5677.c
++++ b/sound/soc/tegra/tegra_rt5677.c
+@@ -175,6 +175,7 @@ static struct snd_soc_dai_link tegra_rt5677_dai = {
+ 
+ static struct snd_soc_card snd_soc_tegra_rt5677 = {
+ 	.name = "tegra-rt5677",
++	.driver_name = "tegra",
+ 	.owner = THIS_MODULE,
+ 	.dai_link = &tegra_rt5677_dai,
+ 	.num_links = 1,
+diff --git a/sound/soc/tegra/tegra_sgtl5000.c b/sound/soc/tegra/tegra_sgtl5000.c
+index 885332170c77b..ef6a553e0b7da 100644
+--- a/sound/soc/tegra/tegra_sgtl5000.c
++++ b/sound/soc/tegra/tegra_sgtl5000.c
+@@ -97,6 +97,7 @@ static struct snd_soc_dai_link tegra_sgtl5000_dai = {
+ 
+ static struct snd_soc_card snd_soc_tegra_sgtl5000 = {
+ 	.name = "tegra-sgtl5000",
++	.driver_name = "tegra",
+ 	.owner = THIS_MODULE,
+ 	.dai_link = &tegra_sgtl5000_dai,
+ 	.num_links = 1,
+diff --git a/sound/soc/tegra/tegra_wm8753.c b/sound/soc/tegra/tegra_wm8753.c
+index efd7938866895..27089077f2ea4 100644
+--- a/sound/soc/tegra/tegra_wm8753.c
++++ b/sound/soc/tegra/tegra_wm8753.c
+@@ -101,6 +101,7 @@ static struct snd_soc_dai_link tegra_wm8753_dai = {
+ 
+ static struct snd_soc_card snd_soc_tegra_wm8753 = {
+ 	.name = "tegra-wm8753",
++	.driver_name = "tegra",
+ 	.owner = THIS_MODULE,
+ 	.dai_link = &tegra_wm8753_dai,
+ 	.num_links = 1,
+diff --git a/sound/soc/tegra/tegra_wm8903.c b/sound/soc/tegra/tegra_wm8903.c
+index e4863fa37b0c6..f219c26d66a31 100644
+--- a/sound/soc/tegra/tegra_wm8903.c
++++ b/sound/soc/tegra/tegra_wm8903.c
+@@ -235,6 +235,7 @@ static struct snd_soc_dai_link tegra_wm8903_dai = {
+ 
+ static struct snd_soc_card snd_soc_tegra_wm8903 = {
+ 	.name = "tegra-wm8903",
++	.driver_name = "tegra",
+ 	.owner = THIS_MODULE,
+ 	.dai_link = &tegra_wm8903_dai,
+ 	.num_links = 1,
+diff --git a/sound/soc/tegra/tegra_wm9712.c b/sound/soc/tegra/tegra_wm9712.c
+index 4f09a178049d3..c66da161c85aa 100644
+--- a/sound/soc/tegra/tegra_wm9712.c
++++ b/sound/soc/tegra/tegra_wm9712.c
+@@ -54,6 +54,7 @@ static struct snd_soc_dai_link tegra_wm9712_dai = {
+ 
+ static struct snd_soc_card snd_soc_tegra_wm9712 = {
+ 	.name = "tegra-wm9712",
++	.driver_name = "tegra",
+ 	.owner = THIS_MODULE,
+ 	.dai_link = &tegra_wm9712_dai,
+ 	.num_links = 1,
+diff --git a/sound/soc/tegra/trimslice.c b/sound/soc/tegra/trimslice.c
+index 6c1cc3d0ac336..cb4c8f72e4e67 100644
+--- a/sound/soc/tegra/trimslice.c
++++ b/sound/soc/tegra/trimslice.c
+@@ -94,6 +94,7 @@ static struct snd_soc_dai_link trimslice_tlv320aic23_dai = {
+ 
+ static struct snd_soc_card snd_soc_trimslice = {
+ 	.name = "tegra-trimslice",
++	.driver_name = "tegra",
+ 	.owner = THIS_MODULE,
+ 	.dai_link = &trimslice_tlv320aic23_dai,
+ 	.num_links = 1,
+diff --git a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_drops.sh b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_drops.sh
+index 4029833f7e271..160891dcb4bcb 100755
+--- a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_drops.sh
++++ b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_drops.sh
+@@ -109,6 +109,9 @@ router_destroy()
+ 	__addr_add_del $rp1 del 192.0.2.2/24 2001:db8:1::2/64
+ 
+ 	tc qdisc del dev $rp2 clsact
++
++	ip link set dev $rp2 down
++	ip link set dev $rp1 down
+ }
+ 
+ setup_prepare()
+diff --git a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_exceptions.sh b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_exceptions.sh
+index 1fedfc9da434f..1d157b1bd838a 100755
+--- a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_exceptions.sh
++++ b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_exceptions.sh
+@@ -111,6 +111,9 @@ router_destroy()
+ 	__addr_add_del $rp1 del 192.0.2.2/24 2001:db8:1::2/64
+ 
+ 	tc qdisc del dev $rp2 clsact
++
++	ip link set dev $rp2 down
++	ip link set dev $rp1 down
+ }
+ 
+ setup_prepare()
+diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh
+index 5cbff8038f84c..28a570006d4d9 100755
+--- a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh
++++ b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh
+@@ -93,7 +93,9 @@ switch_destroy()
+ 	lldptool -T -i $swp1 -V APP -d $(dscp_map 10) >/dev/null
+ 	lldpad_app_wait_del
+ 
++	ip link set dev $swp2 down
+ 	ip link set dev $swp2 nomaster
++	ip link set dev $swp1 down
+ 	ip link set dev $swp1 nomaster
+ 	ip link del dev br1
+ }
+diff --git a/tools/testing/selftests/lkdtm/tests.txt b/tools/testing/selftests/lkdtm/tests.txt
+index 11ef159be0fd4..a5fce7fd4520d 100644
+--- a/tools/testing/selftests/lkdtm/tests.txt
++++ b/tools/testing/selftests/lkdtm/tests.txt
+@@ -11,7 +11,7 @@ CORRUPT_LIST_ADD list_add corruption
+ CORRUPT_LIST_DEL list_del corruption
+ STACK_GUARD_PAGE_LEADING
+ STACK_GUARD_PAGE_TRAILING
+-UNSET_SMEP CR4 bits went missing
++UNSET_SMEP pinned CR4 bits changed:
+ DOUBLE_FAULT
+ CORRUPT_PAC
+ UNALIGNED_LOAD_STORE_WRITE
+diff --git a/tools/testing/selftests/net/forwarding/pedit_dsfield.sh b/tools/testing/selftests/net/forwarding/pedit_dsfield.sh
+index 55eeacf592411..64fbd211d907b 100755
+--- a/tools/testing/selftests/net/forwarding/pedit_dsfield.sh
++++ b/tools/testing/selftests/net/forwarding/pedit_dsfield.sh
+@@ -75,7 +75,9 @@ switch_destroy()
+ 	tc qdisc del dev $swp2 clsact
+ 	tc qdisc del dev $swp1 clsact
+ 
++	ip link set dev $swp2 down
+ 	ip link set dev $swp2 nomaster
++	ip link set dev $swp1 down
+ 	ip link set dev $swp1 nomaster
+ 	ip link del dev br1
+ }
+diff --git a/tools/testing/selftests/net/forwarding/pedit_l4port.sh b/tools/testing/selftests/net/forwarding/pedit_l4port.sh
+index 5f20d289ee43c..10e594c551175 100755
+--- a/tools/testing/selftests/net/forwarding/pedit_l4port.sh
++++ b/tools/testing/selftests/net/forwarding/pedit_l4port.sh
+@@ -71,7 +71,9 @@ switch_destroy()
+ 	tc qdisc del dev $swp2 clsact
+ 	tc qdisc del dev $swp1 clsact
+ 
++	ip link set dev $swp2 down
+ 	ip link set dev $swp2 nomaster
++	ip link set dev $swp1 down
+ 	ip link set dev $swp1 nomaster
+ 	ip link del dev br1
+ }
+diff --git a/tools/testing/selftests/net/forwarding/skbedit_priority.sh b/tools/testing/selftests/net/forwarding/skbedit_priority.sh
+index e3bd8a6bb8b40..bde11dc27873c 100755
+--- a/tools/testing/selftests/net/forwarding/skbedit_priority.sh
++++ b/tools/testing/selftests/net/forwarding/skbedit_priority.sh
+@@ -72,7 +72,9 @@ switch_destroy()
+ 	tc qdisc del dev $swp2 clsact
+ 	tc qdisc del dev $swp1 clsact
+ 
++	ip link set dev $swp2 down
+ 	ip link set dev $swp2 nomaster
++	ip link set dev $swp1 down
+ 	ip link set dev $swp1 nomaster
+ 	ip link del dev br1
+ }
+diff --git a/tools/testing/selftests/resctrl/README b/tools/testing/selftests/resctrl/README
+index 6e5a0ffa18e8b..20502cb4791fc 100644
+--- a/tools/testing/selftests/resctrl/README
++++ b/tools/testing/selftests/resctrl/README
+@@ -47,7 +47,7 @@ Parameter '-h' shows usage information.
+ 
+ usage: resctrl_tests [-h] [-b "benchmark_cmd [options]"] [-t test list] [-n no_of_bits]
+         -b benchmark_cmd [options]: run specified benchmark for MBM, MBA and CQM default benchmark is builtin fill_buf
+-        -t test list: run tests specified in the test list, e.g. -t mbm, mba, cqm, cat
++        -t test list: run tests specified in the test list, e.g. -t mbm,mba,cqm,cat
+         -n no_of_bits: run cache tests using specified no of bits in cache bit mask
+         -p cpu_no: specify CPU number to run the test. 1 is default
+         -h: help
+diff --git a/tools/testing/selftests/resctrl/resctrl_tests.c b/tools/testing/selftests/resctrl/resctrl_tests.c
+index ac2269610aa9d..bd98746c6f858 100644
+--- a/tools/testing/selftests/resctrl/resctrl_tests.c
++++ b/tools/testing/selftests/resctrl/resctrl_tests.c
+@@ -40,7 +40,7 @@ static void cmd_help(void)
+ 	printf("\t-b benchmark_cmd [options]: run specified benchmark for MBM, MBA and CQM");
+ 	printf("\t default benchmark is builtin fill_buf\n");
+ 	printf("\t-t test list: run tests specified in the test list, ");
+-	printf("e.g. -t mbm, mba, cqm, cat\n");
++	printf("e.g. -t mbm,mba,cqm,cat\n");
+ 	printf("\t-n no_of_bits: run cache tests using specified no of bits in cache bit mask\n");
+ 	printf("\t-p cpu_no: specify CPU number to run the test. 1 is default\n");
+ 	printf("\t-h: help\n");
+@@ -98,7 +98,7 @@ int main(int argc, char **argv)
+ 
+ 					return -1;
+ 				}
+-				token = strtok(NULL, ":\t");
++				token = strtok(NULL, ",");
+ 			}
+ 			break;
+ 		case 'p':


^ permalink raw reply related	[flat|nested] 33+ messages in thread
* [gentoo-commits] proj/linux-patches:5.12 commit in: /
@ 2021-07-14 16:18 Mike Pagano
  0 siblings, 0 replies; 33+ messages in thread
From: Mike Pagano @ 2021-07-14 16:18 UTC (permalink / raw
  To: gentoo-commits

commit:     bf0be5aef037fe660c266831697103692019cbaf
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Jul 14 16:17:50 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Jul 14 16:17:50 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=bf0be5ae

Linux patch 5.12.17

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |     4 +
 1016_linux-5.12.17.patch | 25146 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 25150 insertions(+)

diff --git a/0000_README b/0000_README
index 010c87c..10634ea 100644
--- a/0000_README
+++ b/0000_README
@@ -107,6 +107,10 @@ Patch:  1015_linux-5.12.16.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.12.16
 
+Patch:  1016_linux-5.12.17.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.12.17
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1016_linux-5.12.17.patch b/1016_linux-5.12.17.patch
new file mode 100644
index 0000000..f1a8f56
--- /dev/null
+++ b/1016_linux-5.12.17.patch
@@ -0,0 +1,25146 @@
+diff --git a/Documentation/ABI/testing/evm b/Documentation/ABI/testing/evm
+index 3c477ba48a312..2243b72e41107 100644
+--- a/Documentation/ABI/testing/evm
++++ b/Documentation/ABI/testing/evm
+@@ -49,8 +49,30 @@ Description:
+ 		modification of EVM-protected metadata and
+ 		disable all further modification of policy
+ 
+-		Note that once a key has been loaded, it will no longer be
+-		possible to enable metadata modification.
++		Echoing a value is additive, the new value is added to the
++		existing initialization flags.
++
++		For example, after::
++
++		  echo 2 ><securityfs>/evm
++
++		another echo can be performed::
++
++		  echo 1 ><securityfs>/evm
++
++		and the resulting value will be 3.
++
++		Note that once an HMAC key has been loaded, it will no longer
++		be possible to enable metadata modification. Signaling that an
++		HMAC key has been loaded will clear the corresponding flag.
++		For example, if the current value is 6 (2 and 4 set)::
++
++		  echo 1 ><securityfs>/evm
++
++		will set the new value to 3 (4 cleared).
++
++		Loading an HMAC key is the only way to disable metadata
++		modification.
+ 
+ 		Until key loading has been signaled EVM can not create
+ 		or validate the 'security.evm' xattr, but returns
+diff --git a/Documentation/ABI/testing/sysfs-bus-papr-pmem b/Documentation/ABI/testing/sysfs-bus-papr-pmem
+index 8316c33862a04..0aa02bf2bde5c 100644
+--- a/Documentation/ABI/testing/sysfs-bus-papr-pmem
++++ b/Documentation/ABI/testing/sysfs-bus-papr-pmem
+@@ -39,9 +39,11 @@ KernelVersion:	v5.9
+ Contact:	linuxppc-dev <linuxppc-dev@lists.ozlabs.org>, linux-nvdimm@lists.01.org,
+ Description:
+ 		(RO) Report various performance stats related to papr-scm NVDIMM
+-		device.  Each stat is reported on a new line with each line
+-		composed of a stat-identifier followed by it value. Below are
+-		currently known dimm performance stats which are reported:
++		device. This attribute is only available for NVDIMM devices
++		that support reporting NVDIMM performance stats. Each stat is
++		reported on a new line with each line composed of a
++		stat-identifier followed by it value. Below are currently known
++		dimm performance stats which are reported:
+ 
+ 		* "CtlResCt" : Controller Reset Count
+ 		* "CtlResTm" : Controller Reset Elapsed Time
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index 835f810f2f26d..c08e174e6ff43 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -583,6 +583,12 @@
+ 			loops can be debugged more effectively on production
+ 			systems.
+ 
++	clocksource.max_cswd_read_retries= [KNL]
++			Number of clocksource_watchdog() retries due to
++			external delays before the clock will be marked
++			unstable.  Defaults to three retries, that is,
++			four attempts to read the clock under test.
++
+ 	clearcpuid=BITNUM[,BITNUM...] [X86]
+ 			Disable CPUID feature X for the kernel. See
+ 			arch/x86/include/asm/cpufeatures.h for the valid bit
+diff --git a/Documentation/hwmon/max31790.rst b/Documentation/hwmon/max31790.rst
+index f301385d8cef3..7b097c3b9b908 100644
+--- a/Documentation/hwmon/max31790.rst
++++ b/Documentation/hwmon/max31790.rst
+@@ -38,6 +38,7 @@ Sysfs entries
+ fan[1-12]_input    RO  fan tachometer speed in RPM
+ fan[1-12]_fault    RO  fan experienced fault
+ fan[1-6]_target    RW  desired fan speed in RPM
+-pwm[1-6]_enable    RW  regulator mode, 0=disabled, 1=manual mode, 2=rpm mode
+-pwm[1-6]           RW  fan target duty cycle (0-255)
++pwm[1-6]_enable    RW  regulator mode, 0=disabled (duty cycle=0%), 1=manual mode, 2=rpm mode
++pwm[1-6]           RW  read: current pwm duty cycle,
++                       write: target pwm duty cycle (0-255)
+ ================== === =======================================================
+diff --git a/Documentation/userspace-api/media/v4l/ext-ctrls-codec.rst b/Documentation/userspace-api/media/v4l/ext-ctrls-codec.rst
+index 00944e97d6380..09f28ba60e6fc 100644
+--- a/Documentation/userspace-api/media/v4l/ext-ctrls-codec.rst
++++ b/Documentation/userspace-api/media/v4l/ext-ctrls-codec.rst
+@@ -3285,7 +3285,7 @@ enum v4l2_mpeg_video_hevc_size_of_length_field -
+     :stub-columns: 0
+     :widths:       1 1 2
+ 
+-    * - ``V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT``
++    * - ``V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT_ENABLED``
+       - 0x00000001
+       -
+     * - ``V4L2_HEVC_PPS_FLAG_OUTPUT_FLAG_PRESENT``
+@@ -3493,6 +3493,9 @@ enum v4l2_mpeg_video_hevc_size_of_length_field -
+     * - ``V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED``
+       - 0x00000100
+       -
++    * - ``V4L2_HEVC_SLICE_PARAMS_FLAG_DEPENDENT_SLICE_SEGMENT``
++      - 0x00000200
++      -
+ 
+ .. c:type:: v4l2_hevc_dpb_entry
+ 
+diff --git a/Makefile b/Makefile
+index bf6accb2328c3..f1d0775925cc6 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 12
+-SUBLEVEL = 16
++SUBLEVEL = 17
+ EXTRAVERSION =
+ NAME = Frozen Wasteland
+ 
+@@ -1006,7 +1006,7 @@ LDFLAGS_vmlinux	+= $(call ld-option, -X,)
+ endif
+ 
+ ifeq ($(CONFIG_RELR),y)
+-LDFLAGS_vmlinux	+= --pack-dyn-relocs=relr
++LDFLAGS_vmlinux	+= --pack-dyn-relocs=relr --use-android-relr-tags
+ endif
+ 
+ # We never want expected sections to be placed heuristically by the
+diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
+index f4dd9f3f30010..4b2575f936d46 100644
+--- a/arch/alpha/kernel/smp.c
++++ b/arch/alpha/kernel/smp.c
+@@ -166,7 +166,6 @@ smp_callin(void)
+ 	DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
+ 	      cpuid, current, current->active_mm));
+ 
+-	preempt_disable();
+ 	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
+ }
+ 
+diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
+index 52906d3145371..db0e104d68355 100644
+--- a/arch/arc/kernel/smp.c
++++ b/arch/arc/kernel/smp.c
+@@ -189,7 +189,6 @@ void start_kernel_secondary(void)
+ 	pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
+ 
+ 	local_irq_enable();
+-	preempt_disable();
+ 	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
+ }
+ 
+diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
+index 05c55875835d5..f70a8528b9598 100644
+--- a/arch/arm/boot/dts/sama5d4.dtsi
++++ b/arch/arm/boot/dts/sama5d4.dtsi
+@@ -787,7 +787,7 @@
+ 					0xffffffff 0x3ffcfe7c 0x1c010101	/* pioA */
+ 					0x7fffffff 0xfffccc3a 0x3f00cc3a	/* pioB */
+ 					0xffffffff 0x3ff83fff 0xff00ffff	/* pioC */
+-					0x0003ff00 0x8002a800 0x00000000	/* pioD */
++					0xb003ff00 0x8002a800 0x00000000	/* pioD */
+ 					0xffffffff 0x7fffffff 0x76fff1bf	/* pioE */
+ 					>;
+ 
+diff --git a/arch/arm/boot/dts/ste-href.dtsi b/arch/arm/boot/dts/ste-href.dtsi
+index 83b179692dff7..13d2161929042 100644
+--- a/arch/arm/boot/dts/ste-href.dtsi
++++ b/arch/arm/boot/dts/ste-href.dtsi
+@@ -4,6 +4,7 @@
+  */
+ 
+ #include <dt-bindings/interrupt-controller/irq.h>
++#include <dt-bindings/leds/common.h>
+ #include "ste-href-family-pinctrl.dtsi"
+ 
+ / {
+@@ -64,17 +65,20 @@
+ 					reg = <0>;
+ 					led-cur = /bits/ 8 <0x2f>;
+ 					max-cur = /bits/ 8 <0x5f>;
++					color = <LED_COLOR_ID_BLUE>;
+ 					linux,default-trigger = "heartbeat";
+ 				};
+ 				chan@1 {
+ 					reg = <1>;
+ 					led-cur = /bits/ 8 <0x2f>;
+ 					max-cur = /bits/ 8 <0x5f>;
++					color = <LED_COLOR_ID_BLUE>;
+ 				};
+ 				chan@2 {
+ 					reg = <2>;
+ 					led-cur = /bits/ 8 <0x2f>;
+ 					max-cur = /bits/ 8 <0x5f>;
++					color = <LED_COLOR_ID_BLUE>;
+ 				};
+ 			};
+ 			lp5521@34 {
+@@ -88,16 +92,19 @@
+ 					reg = <0>;
+ 					led-cur = /bits/ 8 <0x2f>;
+ 					max-cur = /bits/ 8 <0x5f>;
++					color = <LED_COLOR_ID_BLUE>;
+ 				};
+ 				chan@1 {
+ 					reg = <1>;
+ 					led-cur = /bits/ 8 <0x2f>;
+ 					max-cur = /bits/ 8 <0x5f>;
++					color = <LED_COLOR_ID_BLUE>;
+ 				};
+ 				chan@2 {
+ 					reg = <2>;
+ 					led-cur = /bits/ 8 <0x2f>;
+ 					max-cur = /bits/ 8 <0x5f>;
++					color = <LED_COLOR_ID_BLUE>;
+ 				};
+ 			};
+ 			bh1780@29 {
+diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
+index 2924d7910b106..eb2190477da10 100644
+--- a/arch/arm/kernel/perf_event_v7.c
++++ b/arch/arm/kernel/perf_event_v7.c
+@@ -773,10 +773,10 @@ static inline void armv7pmu_write_counter(struct perf_event *event, u64 value)
+ 		pr_err("CPU%u writing wrong counter %d\n",
+ 			smp_processor_id(), idx);
+ 	} else if (idx == ARMV7_IDX_CYCLE_COUNTER) {
+-		asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
++		asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" ((u32)value));
+ 	} else {
+ 		armv7_pmnc_select_counter(idx);
+-		asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value));
++		asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" ((u32)value));
+ 	}
+ }
+ 
+diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
+index 74679240a9d8e..c7bb168b0d97c 100644
+--- a/arch/arm/kernel/smp.c
++++ b/arch/arm/kernel/smp.c
+@@ -432,7 +432,6 @@ asmlinkage void secondary_start_kernel(void)
+ #endif
+ 	pr_debug("CPU%u: Booted secondary processor\n", cpu);
+ 
+-	preempt_disable();
+ 	trace_hardirqs_off();
+ 
+ 	/*
+diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+index 456dcd4a7793f..6ffbb099fcac7 100644
+--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
++++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+@@ -134,7 +134,7 @@
+ 
+ 			uart0: serial@12000 {
+ 				compatible = "marvell,armada-3700-uart";
+-				reg = <0x12000 0x200>;
++				reg = <0x12000 0x18>;
+ 				clocks = <&xtalclk>;
+ 				interrupts =
+ 				<GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
+index 858c2fcfc043b..4e4356add46ed 100644
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -46,6 +46,7 @@
+ #define KVM_REQ_VCPU_RESET	KVM_ARCH_REQ(2)
+ #define KVM_REQ_RECORD_STEAL	KVM_ARCH_REQ(3)
+ #define KVM_REQ_RELOAD_GICv4	KVM_ARCH_REQ(4)
++#define KVM_REQ_RELOAD_PMU	KVM_ARCH_REQ(5)
+ 
+ #define KVM_DIRTY_LOG_MANUAL_CAPS   (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
+ 				     KVM_DIRTY_LOG_INITIALLY_SET)
+diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
+index bd02e99b1a4c5..44dceac442fc5 100644
+--- a/arch/arm64/include/asm/mmu_context.h
++++ b/arch/arm64/include/asm/mmu_context.h
+@@ -177,9 +177,9 @@ static inline void update_saved_ttbr0(struct task_struct *tsk,
+ 		return;
+ 
+ 	if (mm == &init_mm)
+-		ttbr = __pa_symbol(reserved_pg_dir);
++		ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
+ 	else
+-		ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;
++		ttbr = phys_to_ttbr(virt_to_phys(mm->pgd)) | ASID(mm) << 48;
+ 
+ 	WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
+ }
+diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h
+index 80e946b2abee2..e83f0982b99c1 100644
+--- a/arch/arm64/include/asm/preempt.h
++++ b/arch/arm64/include/asm/preempt.h
+@@ -23,7 +23,7 @@ static inline void preempt_count_set(u64 pc)
+ } while (0)
+ 
+ #define init_idle_preempt_count(p, cpu) do { \
+-	task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
++	task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
+ } while (0)
+ 
+ static inline void set_preempt_need_resched(void)
+diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
+index 4658fcf88c2b4..3baca49fcf6bd 100644
+--- a/arch/arm64/kernel/perf_event.c
++++ b/arch/arm64/kernel/perf_event.c
+@@ -312,7 +312,7 @@ static ssize_t slots_show(struct device *dev, struct device_attribute *attr,
+ 	struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
+ 	u32 slots = cpu_pmu->reg_pmmir & ARMV8_PMU_SLOTS_MASK;
+ 
+-	return snprintf(page, PAGE_SIZE, "0x%08x\n", slots);
++	return sysfs_emit(page, "0x%08x\n", slots);
+ }
+ 
+ static DEVICE_ATTR_RO(slots);
+diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
+index 61845c0821d9d..68b30e8c22dbf 100644
+--- a/arch/arm64/kernel/setup.c
++++ b/arch/arm64/kernel/setup.c
+@@ -381,7 +381,7 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
+ 	 * faults in case uaccess_enable() is inadvertently called by the init
+ 	 * thread.
+ 	 */
+-	init_task.thread_info.ttbr0 = __pa_symbol(reserved_pg_dir);
++	init_task.thread_info.ttbr0 = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
+ #endif
+ 
+ 	if (boot_args[1] || boot_args[2] || boot_args[3]) {
+diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
+index 357590beaabb2..48fd892567390 100644
+--- a/arch/arm64/kernel/smp.c
++++ b/arch/arm64/kernel/smp.c
+@@ -223,7 +223,6 @@ asmlinkage notrace void secondary_start_kernel(void)
+ 		init_gic_priority_masking();
+ 
+ 	rcu_cpu_starting(cpu);
+-	preempt_disable();
+ 	trace_hardirqs_off();
+ 
+ 	/*
+diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
+index 7730b81aad6d1..8455c5c30116d 100644
+--- a/arch/arm64/kvm/arm.c
++++ b/arch/arm64/kvm/arm.c
+@@ -684,6 +684,10 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu)
+ 			vgic_v4_load(vcpu);
+ 			preempt_enable();
+ 		}
++
++		if (kvm_check_request(KVM_REQ_RELOAD_PMU, vcpu))
++			kvm_pmu_handle_pmcr(vcpu,
++					    __vcpu_sys_reg(vcpu, PMCR_EL0));
+ 	}
+ }
+ 
+diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
+index e32c6e139a09d..14957f7c7303a 100644
+--- a/arch/arm64/kvm/pmu-emul.c
++++ b/arch/arm64/kvm/pmu-emul.c
+@@ -578,6 +578,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
+ 		kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
+ 
+ 	if (val & ARMV8_PMU_PMCR_P) {
++		mask &= ~BIT(ARMV8_PMU_CYCLE_IDX);
+ 		for_each_set_bit(i, &mask, 32)
+ 			kvm_pmu_set_counter_value(vcpu, i, 0);
+ 	}
+@@ -850,6 +851,9 @@ int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
+ 		   return -EINVAL;
+ 	}
+ 
++	/* One-off reload of the PMU on first run */
++	kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
++
+ 	return 0;
+ }
+ 
+diff --git a/arch/csky/kernel/smp.c b/arch/csky/kernel/smp.c
+index 0f9f5eef93386..e2993539af8ef 100644
+--- a/arch/csky/kernel/smp.c
++++ b/arch/csky/kernel/smp.c
+@@ -281,7 +281,6 @@ void csky_start_secondary(void)
+ 	pr_info("CPU%u Online: %s...\n", cpu, __func__);
+ 
+ 	local_irq_enable();
+-	preempt_disable();
+ 	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
+ }
+ 
+diff --git a/arch/csky/mm/syscache.c b/arch/csky/mm/syscache.c
+index ffade2f9a4c87..cd847ad62c7ee 100644
+--- a/arch/csky/mm/syscache.c
++++ b/arch/csky/mm/syscache.c
+@@ -12,14 +12,17 @@ SYSCALL_DEFINE3(cacheflush,
+ 		int, cache)
+ {
+ 	switch (cache) {
+-	case ICACHE:
+ 	case BCACHE:
+-		flush_icache_mm_range(current->mm,
+-				(unsigned long)addr,
+-				(unsigned long)addr + bytes);
+ 	case DCACHE:
+ 		dcache_wb_range((unsigned long)addr,
+ 				(unsigned long)addr + bytes);
++		if (cache != BCACHE)
++			break;
++		fallthrough;
++	case ICACHE:
++		flush_icache_mm_range(current->mm,
++				(unsigned long)addr,
++				(unsigned long)addr + bytes);
+ 		break;
+ 	default:
+ 		return -EINVAL;
+diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c
+index 36a69b4e61690..5bfc79be4cefe 100644
+--- a/arch/ia64/kernel/mca_drv.c
++++ b/arch/ia64/kernel/mca_drv.c
+@@ -343,7 +343,7 @@ init_record_index_pools(void)
+ 
+ 	/* - 2 - */
+ 	sect_min_size = sal_log_sect_min_sizes[0];
+-	for (i = 1; i < sizeof sal_log_sect_min_sizes/sizeof(size_t); i++)
++	for (i = 1; i < ARRAY_SIZE(sal_log_sect_min_sizes); i++)
+ 		if (sect_min_size > sal_log_sect_min_sizes[i])
+ 			sect_min_size = sal_log_sect_min_sizes[i];
+ 
+diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
+index 49b4885809399..d10f780c13b9e 100644
+--- a/arch/ia64/kernel/smpboot.c
++++ b/arch/ia64/kernel/smpboot.c
+@@ -441,7 +441,6 @@ start_secondary (void *unused)
+ #endif
+ 	efi_map_pal_code();
+ 	cpu_init();
+-	preempt_disable();
+ 	smp_callin();
+ 
+ 	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
+diff --git a/arch/m68k/Kconfig.machine b/arch/m68k/Kconfig.machine
+index 4d59ec2f5b8d6..d964c1f273995 100644
+--- a/arch/m68k/Kconfig.machine
++++ b/arch/m68k/Kconfig.machine
+@@ -25,6 +25,9 @@ config ATARI
+ 	  this kernel on an Atari, say Y here and browse the material
+ 	  available in <file:Documentation/m68k>; otherwise say N.
+ 
++config ATARI_KBD_CORE
++	bool
++
+ config MAC
+ 	bool "Macintosh support"
+ 	depends on MMU
+diff --git a/arch/mips/include/asm/highmem.h b/arch/mips/include/asm/highmem.h
+index 292d0425717f3..92a3802100178 100644
+--- a/arch/mips/include/asm/highmem.h
++++ b/arch/mips/include/asm/highmem.h
+@@ -36,7 +36,7 @@ extern pte_t *pkmap_page_table;
+  * easily, subsequent pte tables have to be allocated in one physical
+  * chunk of RAM.
+  */
+-#ifdef CONFIG_PHYS_ADDR_T_64BIT
++#if defined(CONFIG_PHYS_ADDR_T_64BIT) || defined(CONFIG_MIPS_HUGE_TLB_SUPPORT)
+ #define LAST_PKMAP 512
+ #else
+ #define LAST_PKMAP 1024
+diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
+index ef86fbad85460..d542fb7af3ba2 100644
+--- a/arch/mips/kernel/smp.c
++++ b/arch/mips/kernel/smp.c
+@@ -348,7 +348,6 @@ asmlinkage void start_secondary(void)
+ 	 */
+ 
+ 	calibrate_delay();
+-	preempt_disable();
+ 	cpu = smp_processor_id();
+ 	cpu_data[cpu].udelay_val = loops_per_jiffy;
+ 
+diff --git a/arch/openrisc/kernel/smp.c b/arch/openrisc/kernel/smp.c
+index 48e1092a64de3..415e209732a3d 100644
+--- a/arch/openrisc/kernel/smp.c
++++ b/arch/openrisc/kernel/smp.c
+@@ -145,8 +145,6 @@ asmlinkage __init void secondary_start_kernel(void)
+ 	set_cpu_online(cpu, true);
+ 
+ 	local_irq_enable();
+-
+-	preempt_disable();
+ 	/*
+ 	 * OK, it's off to the idle thread for us
+ 	 */
+diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
+index 10227f667c8a6..1405b603b91b6 100644
+--- a/arch/parisc/kernel/smp.c
++++ b/arch/parisc/kernel/smp.c
+@@ -302,7 +302,6 @@ void __init smp_callin(unsigned long pdce_proc)
+ #endif
+ 
+ 	smp_cpu_init(slave_id);
+-	preempt_disable();
+ 
+ 	flush_cache_all_local(); /* start with known state */
+ 	flush_tlb_all_local(NULL);
+diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h
+index 98c8bd155bf9d..b167186aaee4a 100644
+--- a/arch/powerpc/include/asm/cputhreads.h
++++ b/arch/powerpc/include/asm/cputhreads.h
+@@ -98,6 +98,36 @@ static inline int cpu_last_thread_sibling(int cpu)
+ 	return cpu | (threads_per_core - 1);
+ }
+ 
++/*
++ * tlb_thread_siblings are siblings which share a TLB. This is not
++ * architected, is not something a hypervisor could emulate and a future
++ * CPU may change behaviour even in compat mode, so this should only be
++ * used on PowerNV, and only with care.
++ */
++static inline int cpu_first_tlb_thread_sibling(int cpu)
++{
++	if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
++		return cpu & ~0x6;	/* Big Core */
++	else
++		return cpu_first_thread_sibling(cpu);
++}
++
++static inline int cpu_last_tlb_thread_sibling(int cpu)
++{
++	if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
++		return cpu | 0x6;	/* Big Core */
++	else
++		return cpu_last_thread_sibling(cpu);
++}
++
++static inline int cpu_tlb_thread_sibling_step(void)
++{
++	if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
++		return 2;		/* Big Core */
++	else
++		return 1;
++}
++
+ static inline u32 get_tensr(void)
+ {
+ #ifdef	CONFIG_BOOKE
+diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h
+index 31ed5356590a2..6d15728f06808 100644
+--- a/arch/powerpc/include/asm/interrupt.h
++++ b/arch/powerpc/include/asm/interrupt.h
+@@ -120,6 +120,7 @@ struct interrupt_nmi_state {
+ 	u8 irq_happened;
+ #endif
+ 	u8 ftrace_enabled;
++	u64 softe;
+ #endif
+ };
+ 
+@@ -129,6 +130,7 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
+ #ifdef CONFIG_PPC_BOOK3S_64
+ 	state->irq_soft_mask = local_paca->irq_soft_mask;
+ 	state->irq_happened = local_paca->irq_happened;
++	state->softe = regs->softe;
+ 
+ 	/*
+ 	 * Set IRQS_ALL_DISABLED unconditionally so irqs_disabled() does
+@@ -178,6 +180,7 @@ static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct inter
+ #ifdef CONFIG_PPC_BOOK3S_64
+ 	/* Check we didn't change the pending interrupt mask. */
+ 	WARN_ON_ONCE((state->irq_happened | PACA_IRQ_HARD_DIS) != local_paca->irq_happened);
++	regs->softe = state->softe;
+ 	local_paca->irq_happened = state->irq_happened;
+ 	local_paca->irq_soft_mask = state->irq_soft_mask;
+ #endif
+diff --git a/arch/powerpc/include/asm/kvm_guest.h b/arch/powerpc/include/asm/kvm_guest.h
+index 2fca299f7e192..c63105d2c9e7c 100644
+--- a/arch/powerpc/include/asm/kvm_guest.h
++++ b/arch/powerpc/include/asm/kvm_guest.h
+@@ -16,10 +16,10 @@ static inline bool is_kvm_guest(void)
+ 	return static_branch_unlikely(&kvm_guest);
+ }
+ 
+-bool check_kvm_guest(void);
++int check_kvm_guest(void);
+ #else
+ static inline bool is_kvm_guest(void) { return false; }
+-static inline bool check_kvm_guest(void) { return false; }
++static inline int check_kvm_guest(void) { return 0; }
+ #endif
+ 
+ #endif /* _ASM_POWERPC_KVM_GUEST_H_ */
+diff --git a/arch/powerpc/kernel/firmware.c b/arch/powerpc/kernel/firmware.c
+index c9e2819b095ab..c7022c41cc314 100644
+--- a/arch/powerpc/kernel/firmware.c
++++ b/arch/powerpc/kernel/firmware.c
+@@ -23,18 +23,20 @@ EXPORT_SYMBOL_GPL(powerpc_firmware_features);
+ 
+ #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_KVM_GUEST)
+ DEFINE_STATIC_KEY_FALSE(kvm_guest);
+-bool check_kvm_guest(void)
++int __init check_kvm_guest(void)
+ {
+ 	struct device_node *hyper_node;
+ 
+ 	hyper_node = of_find_node_by_path("/hypervisor");
+ 	if (!hyper_node)
+-		return false;
++		return 0;
+ 
+ 	if (!of_device_is_compatible(hyper_node, "linux,kvm"))
+-		return false;
++		return 0;
+ 
+ 	static_branch_enable(&kvm_guest);
+-	return true;
++
++	return 0;
+ }
++core_initcall(check_kvm_guest); // before kvm_guest_init()
+ #endif
+diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
+index 667104d4c4550..2fff886c549d0 100644
+--- a/arch/powerpc/kernel/mce_power.c
++++ b/arch/powerpc/kernel/mce_power.c
+@@ -481,12 +481,11 @@ static int mce_find_instr_ea_and_phys(struct pt_regs *regs, uint64_t *addr,
+ 	return -1;
+ }
+ 
+-static int mce_handle_ierror(struct pt_regs *regs,
++static int mce_handle_ierror(struct pt_regs *regs, unsigned long srr1,
+ 		const struct mce_ierror_table table[],
+ 		struct mce_error_info *mce_err, uint64_t *addr,
+ 		uint64_t *phys_addr)
+ {
+-	uint64_t srr1 = regs->msr;
+ 	int handled = 0;
+ 	int i;
+ 
+@@ -695,19 +694,19 @@ static long mce_handle_ue_error(struct pt_regs *regs,
+ }
+ 
+ static long mce_handle_error(struct pt_regs *regs,
++		unsigned long srr1,
+ 		const struct mce_derror_table dtable[],
+ 		const struct mce_ierror_table itable[])
+ {
+ 	struct mce_error_info mce_err = { 0 };
+ 	uint64_t addr, phys_addr = ULONG_MAX;
+-	uint64_t srr1 = regs->msr;
+ 	long handled;
+ 
+ 	if (SRR1_MC_LOADSTORE(srr1))
+ 		handled = mce_handle_derror(regs, dtable, &mce_err, &addr,
+ 				&phys_addr);
+ 	else
+-		handled = mce_handle_ierror(regs, itable, &mce_err, &addr,
++		handled = mce_handle_ierror(regs, srr1, itable, &mce_err, &addr,
+ 				&phys_addr);
+ 
+ 	if (!handled && mce_err.error_type == MCE_ERROR_TYPE_UE)
+@@ -723,16 +722,20 @@ long __machine_check_early_realmode_p7(struct pt_regs *regs)
+ 	/* P7 DD1 leaves top bits of DSISR undefined */
+ 	regs->dsisr &= 0x0000ffff;
+ 
+-	return mce_handle_error(regs, mce_p7_derror_table, mce_p7_ierror_table);
++	return mce_handle_error(regs, regs->msr,
++			mce_p7_derror_table, mce_p7_ierror_table);
+ }
+ 
+ long __machine_check_early_realmode_p8(struct pt_regs *regs)
+ {
+-	return mce_handle_error(regs, mce_p8_derror_table, mce_p8_ierror_table);
++	return mce_handle_error(regs, regs->msr,
++			mce_p8_derror_table, mce_p8_ierror_table);
+ }
+ 
+ long __machine_check_early_realmode_p9(struct pt_regs *regs)
+ {
++	unsigned long srr1 = regs->msr;
++
+ 	/*
+ 	 * On POWER9 DD2.1 and below, it's possible to get a machine check
+ 	 * caused by a paste instruction where only DSISR bit 25 is set. This
+@@ -746,10 +749,39 @@ long __machine_check_early_realmode_p9(struct pt_regs *regs)
+ 	if (SRR1_MC_LOADSTORE(regs->msr) && regs->dsisr == 0x02000000)
+ 		return 1;
+ 
+-	return mce_handle_error(regs, mce_p9_derror_table, mce_p9_ierror_table);
++	/*
++	 * Async machine check due to bad real address from store or foreign
++	 * link time out comes with the load/store bit (PPC bit 42) set in
++	 * SRR1, but the cause comes in SRR1 not DSISR. Clear bit 42 so we're
++	 * directed to the ierror table so it will find the cause (which
++	 * describes it correctly as a store error).
++	 */
++	if (SRR1_MC_LOADSTORE(srr1) &&
++			((srr1 & 0x081c0000) == 0x08140000 ||
++			 (srr1 & 0x081c0000) == 0x08180000)) {
++		srr1 &= ~PPC_BIT(42);
++	}
++
++	return mce_handle_error(regs, srr1,
++			mce_p9_derror_table, mce_p9_ierror_table);
+ }
+ 
+ long __machine_check_early_realmode_p10(struct pt_regs *regs)
+ {
+-	return mce_handle_error(regs, mce_p10_derror_table, mce_p10_ierror_table);
++	unsigned long srr1 = regs->msr;
++
++	/*
++	 * Async machine check due to bad real address from store comes with
++	 * the load/store bit (PPC bit 42) set in SRR1, but the cause comes in
++	 * SRR1 not DSISR. Clear bit 42 so we're directed to the ierror table
++	 * so it will find the cause (which describes it correctly as a store
++	 * error).
++	 */
++	if (SRR1_MC_LOADSTORE(srr1) &&
++			(srr1 & 0x081c0000) == 0x08140000) {
++		srr1 &= ~PPC_BIT(42);
++	}
++
++	return mce_handle_error(regs, srr1,
++			mce_p10_derror_table, mce_p10_ierror_table);
+ }
+diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
+index 3231c2df9e261..03d7261e14923 100644
+--- a/arch/powerpc/kernel/process.c
++++ b/arch/powerpc/kernel/process.c
+@@ -1212,6 +1212,19 @@ struct task_struct *__switch_to(struct task_struct *prev,
+ 			__flush_tlb_pending(batch);
+ 		batch->active = 0;
+ 	}
++
++	/*
++	 * On POWER9 the copy-paste buffer can only paste into
++	 * foreign real addresses, so unprivileged processes can not
++	 * see the data or use it in any way unless they have
++	 * foreign real mappings. If the new process has the foreign
++	 * real address mappings, we must issue a cp_abort to clear
++	 * any state and prevent snooping, corruption or a covert
++	 * channel. ISA v3.1 supports paste into local memory.
++	 */
++	if (new->mm && (cpu_has_feature(CPU_FTR_ARCH_31) ||
++			atomic_read(&new->mm->context.vas_windows)))
++		asm volatile(PPC_CP_ABORT);
+ #endif /* CONFIG_PPC_BOOK3S_64 */
+ 
+ #ifdef CONFIG_PPC_ADV_DEBUG_REGS
+@@ -1257,30 +1270,33 @@ struct task_struct *__switch_to(struct task_struct *prev,
+ 
+ 	last = _switch(old_thread, new_thread);
+ 
++	/*
++	 * Nothing after _switch will be run for newly created tasks,
++	 * because they switch directly to ret_from_fork/ret_from_kernel_thread
++	 * etc. Code added here should have a comment explaining why that is
++	 * okay.
++	 */
++
+ #ifdef CONFIG_PPC_BOOK3S_64
++	/*
++	 * This applies to a process that was context switched while inside
++	 * arch_enter_lazy_mmu_mode(), to re-activate the batch that was
++	 * deactivated above, before _switch(). This will never be the case
++	 * for new tasks.
++	 */
+ 	if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
+ 		current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
+ 		batch = this_cpu_ptr(&ppc64_tlb_batch);
+ 		batch->active = 1;
+ 	}
+ 
+-	if (current->thread.regs) {
++	/*
++	 * Math facilities are masked out of the child MSR in copy_thread.
++	 * A new task does not need to restore_math because it will
++	 * demand fault them.
++	 */
++	if (current->thread.regs)
+ 		restore_math(current->thread.regs);
+-
+-		/*
+-		 * On POWER9 the copy-paste buffer can only paste into
+-		 * foreign real addresses, so unprivileged processes can not
+-		 * see the data or use it in any way unless they have
+-		 * foreign real mappings. If the new process has the foreign
+-		 * real address mappings, we must issue a cp_abort to clear
+-		 * any state and prevent snooping, corruption or a covert
+-		 * channel. ISA v3.1 supports paste into local memory.
+-		 */
+-		if (current->mm &&
+-			(cpu_has_feature(CPU_FTR_ARCH_31) ||
+-			atomic_read(&current->mm->context.vas_windows)))
+-			asm volatile(PPC_CP_ABORT);
+-	}
+ #endif /* CONFIG_PPC_BOOK3S_64 */
+ 
+ 	return last;
+diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
+index c2473e20f5f5b..216919de87d7e 100644
+--- a/arch/powerpc/kernel/smp.c
++++ b/arch/powerpc/kernel/smp.c
+@@ -619,6 +619,8 @@ static void nmi_stop_this_cpu(struct pt_regs *regs)
+ 	/*
+ 	 * IRQs are already hard disabled by the smp_handle_nmi_ipi.
+ 	 */
++	set_cpu_online(smp_processor_id(), false);
++
+ 	spin_begin();
+ 	while (1)
+ 		spin_cpu_relax();
+@@ -634,6 +636,15 @@ void smp_send_stop(void)
+ static void stop_this_cpu(void *dummy)
+ {
+ 	hard_irq_disable();
++
++	/*
++	 * Offlining CPUs in stop_this_cpu can result in scheduler warnings,
++	 * (see commit de6e5d38417e), but printk_safe_flush_on_panic() wants
++	 * to know other CPUs are offline before it breaks locks to flush
++	 * printk buffers, in case we panic()ed while holding the lock.
++	 */
++	set_cpu_online(smp_processor_id(), false);
++
+ 	spin_begin();
+ 	while (1)
+ 		spin_cpu_relax();
+@@ -1530,7 +1541,6 @@ void start_secondary(void *unused)
+ 	smp_store_cpu_info(cpu);
+ 	set_dec(tb_ticks_per_jiffy);
+ 	rcu_cpu_starting(cpu);
+-	preempt_disable();
+ 	cpu_callin_map[cpu] = 1;
+ 
+ 	if (smp_ops->setup_cpu)
+diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c
+index b6440657ef92d..b60c00e98dc01 100644
+--- a/arch/powerpc/kernel/stacktrace.c
++++ b/arch/powerpc/kernel/stacktrace.c
+@@ -230,17 +230,31 @@ static void handle_backtrace_ipi(struct pt_regs *regs)
+ 
+ static void raise_backtrace_ipi(cpumask_t *mask)
+ {
++	struct paca_struct *p;
+ 	unsigned int cpu;
++	u64 delay_us;
+ 
+ 	for_each_cpu(cpu, mask) {
+-		if (cpu == smp_processor_id())
++		if (cpu == smp_processor_id()) {
+ 			handle_backtrace_ipi(NULL);
+-		else
+-			smp_send_safe_nmi_ipi(cpu, handle_backtrace_ipi, 5 * USEC_PER_SEC);
+-	}
++			continue;
++		}
+ 
+-	for_each_cpu(cpu, mask) {
+-		struct paca_struct *p = paca_ptrs[cpu];
++		delay_us = 5 * USEC_PER_SEC;
++
++		if (smp_send_safe_nmi_ipi(cpu, handle_backtrace_ipi, delay_us)) {
++			// Now wait up to 5s for the other CPU to do its backtrace
++			while (cpumask_test_cpu(cpu, mask) && delay_us) {
++				udelay(1);
++				delay_us--;
++			}
++
++			// Other CPU cleared itself from the mask
++			if (delay_us)
++				continue;
++		}
++
++		p = paca_ptrs[cpu];
+ 
+ 		cpumask_clear_cpu(cpu, mask);
+ 
+diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
+index 60c5bc0c130cf..1c6e0a52fb532 100644
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -2619,7 +2619,7 @@ static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
+ 	cpumask_t *cpu_in_guest;
+ 	int i;
+ 
+-	cpu = cpu_first_thread_sibling(cpu);
++	cpu = cpu_first_tlb_thread_sibling(cpu);
+ 	if (nested) {
+ 		cpumask_set_cpu(cpu, &nested->need_tlb_flush);
+ 		cpu_in_guest = &nested->cpu_in_guest;
+@@ -2633,9 +2633,10 @@ static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
+ 	 * the other side is the first smp_mb() in kvmppc_run_core().
+ 	 */
+ 	smp_mb();
+-	for (i = 0; i < threads_per_core; ++i)
+-		if (cpumask_test_cpu(cpu + i, cpu_in_guest))
+-			smp_call_function_single(cpu + i, do_nothing, NULL, 1);
++	for (i = cpu; i <= cpu_last_tlb_thread_sibling(cpu);
++					i += cpu_tlb_thread_sibling_step())
++		if (cpumask_test_cpu(i, cpu_in_guest))
++			smp_call_function_single(i, do_nothing, NULL, 1);
+ }
+ 
+ static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
+@@ -2666,8 +2667,8 @@ static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
+ 	 */
+ 	if (prev_cpu != pcpu) {
+ 		if (prev_cpu >= 0 &&
+-		    cpu_first_thread_sibling(prev_cpu) !=
+-		    cpu_first_thread_sibling(pcpu))
++		    cpu_first_tlb_thread_sibling(prev_cpu) !=
++		    cpu_first_tlb_thread_sibling(pcpu))
+ 			radix_flush_cpu(kvm, prev_cpu, vcpu);
+ 		if (nested)
+ 			nested->prev_cpu[vcpu->arch.nested_vcpu_id] = pcpu;
+diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
+index 158d309b42a38..b5e5d07cb40f9 100644
+--- a/arch/powerpc/kvm/book3s_hv_builtin.c
++++ b/arch/powerpc/kvm/book3s_hv_builtin.c
+@@ -797,7 +797,7 @@ void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu,
+ 	 * Thus we make all 4 threads use the same bit.
+ 	 */
+ 	if (cpu_has_feature(CPU_FTR_ARCH_300))
+-		pcpu = cpu_first_thread_sibling(pcpu);
++		pcpu = cpu_first_tlb_thread_sibling(pcpu);
+ 
+ 	if (nested)
+ 		need_tlb_flush = &nested->need_tlb_flush;
+diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
+index 0cd0e7aad588b..bf892e134727c 100644
+--- a/arch/powerpc/kvm/book3s_hv_nested.c
++++ b/arch/powerpc/kvm/book3s_hv_nested.c
+@@ -53,7 +53,8 @@ void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
+ 	hr->dawrx1 = vcpu->arch.dawrx1;
+ }
+ 
+-static void byteswap_pt_regs(struct pt_regs *regs)
++/* Use noinline_for_stack due to https://bugs.llvm.org/show_bug.cgi?id=49610 */
++static noinline_for_stack void byteswap_pt_regs(struct pt_regs *regs)
+ {
+ 	unsigned long *addr = (unsigned long *) regs;
+ 
+diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+index 88da2764c1bb9..3ddc83d2e8493 100644
+--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
++++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+@@ -67,7 +67,7 @@ static int global_invalidates(struct kvm *kvm)
+ 		 * so use the bit for the first thread to represent the core.
+ 		 */
+ 		if (cpu_has_feature(CPU_FTR_ARCH_300))
+-			cpu = cpu_first_thread_sibling(cpu);
++			cpu = cpu_first_tlb_thread_sibling(cpu);
+ 		cpumask_clear_cpu(cpu, &kvm->arch.need_tlb_flush);
+ 	}
+ 
+diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c
+index c855a0aeb49cc..d7ab868aab54a 100644
+--- a/arch/powerpc/platforms/cell/smp.c
++++ b/arch/powerpc/platforms/cell/smp.c
+@@ -78,9 +78,6 @@ static inline int smp_startup_cpu(unsigned int lcpu)
+ 
+ 	pcpu = get_hard_smp_processor_id(lcpu);
+ 
+-	/* Fixup atomic count: it exited inside IRQ handler. */
+-	task_thread_info(paca_ptrs[lcpu]->__current)->preempt_count	= 0;
+-
+ 	/*
+ 	 * If the RTAS start-cpu token does not exist then presume the
+ 	 * cpu is already spinning.
+diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
+index 835163f54244a..057acbb9116dd 100644
+--- a/arch/powerpc/platforms/pseries/papr_scm.c
++++ b/arch/powerpc/platforms/pseries/papr_scm.c
+@@ -18,6 +18,7 @@
+ #include <asm/plpar_wrappers.h>
+ #include <asm/papr_pdsm.h>
+ #include <asm/mce.h>
++#include <asm/unaligned.h>
+ 
+ #define BIND_ANY_ADDR (~0ul)
+ 
+@@ -867,6 +868,20 @@ static ssize_t flags_show(struct device *dev,
+ }
+ DEVICE_ATTR_RO(flags);
+ 
++static umode_t papr_nd_attribute_visible(struct kobject *kobj,
++					 struct attribute *attr, int n)
++{
++	struct device *dev = kobj_to_dev(kobj);
++	struct nvdimm *nvdimm = to_nvdimm(dev);
++	struct papr_scm_priv *p = nvdimm_provider_data(nvdimm);
++
++	/* For if perf-stats not available remove perf_stats sysfs */
++	if (attr == &dev_attr_perf_stats.attr && p->stat_buffer_len == 0)
++		return 0;
++
++	return attr->mode;
++}
++
+ /* papr_scm specific dimm attributes */
+ static struct attribute *papr_nd_attributes[] = {
+ 	&dev_attr_flags.attr,
+@@ -876,6 +891,7 @@ static struct attribute *papr_nd_attributes[] = {
+ 
+ static struct attribute_group papr_nd_attribute_group = {
+ 	.name = "papr",
++	.is_visible = papr_nd_attribute_visible,
+ 	.attrs = papr_nd_attributes,
+ };
+ 
+@@ -891,7 +907,6 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
+ 	struct nd_region_desc ndr_desc;
+ 	unsigned long dimm_flags;
+ 	int target_nid, online_nid;
+-	ssize_t stat_size;
+ 
+ 	p->bus_desc.ndctl = papr_scm_ndctl;
+ 	p->bus_desc.module = THIS_MODULE;
+@@ -962,16 +977,6 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
+ 	list_add_tail(&p->region_list, &papr_nd_regions);
+ 	mutex_unlock(&papr_ndr_lock);
+ 
+-	/* Try retriving the stat buffer and see if its supported */
+-	stat_size = drc_pmem_query_stats(p, NULL, 0);
+-	if (stat_size > 0) {
+-		p->stat_buffer_len = stat_size;
+-		dev_dbg(&p->pdev->dev, "Max perf-stat size %lu-bytes\n",
+-			p->stat_buffer_len);
+-	} else {
+-		dev_info(&p->pdev->dev, "Dimm performance stats unavailable\n");
+-	}
+-
+ 	return 0;
+ 
+ err:	nvdimm_bus_unregister(p->bus);
+@@ -1047,8 +1052,10 @@ static int papr_scm_probe(struct platform_device *pdev)
+ 	u32 drc_index, metadata_size;
+ 	u64 blocks, block_size;
+ 	struct papr_scm_priv *p;
++	u8 uuid_raw[UUID_SIZE];
+ 	const char *uuid_str;
+-	u64 uuid[2];
++	ssize_t stat_size;
++	uuid_t uuid;
+ 	int rc;
+ 
+ 	/* check we have all the required DT properties */
+@@ -1090,16 +1097,23 @@ static int papr_scm_probe(struct platform_device *pdev)
+ 	p->is_volatile = !of_property_read_bool(dn, "ibm,cache-flush-required");
+ 
+ 	/* We just need to ensure that set cookies are unique across */
+-	uuid_parse(uuid_str, (uuid_t *) uuid);
++	uuid_parse(uuid_str, &uuid);
++
+ 	/*
+-	 * cookie1 and cookie2 are not really little endian
+-	 * we store a little endian representation of the
+-	 * uuid str so that we can compare this with the label
+-	 * area cookie irrespective of the endian config with which
+-	 * the kernel is built.
++	 * The cookie1 and cookie2 are not really little endian.
++	 * We store a raw buffer representation of the
++	 * uuid string so that we can compare this with the label
++	 * area cookie irrespective of the endian configuration
++	 * with which the kernel is built.
++	 *
++	 * Historically we stored the cookie in the below format.
++	 * for a uuid string 72511b67-0b3b-42fd-8d1d-5be3cae8bcaa
++	 *	cookie1 was 0xfd423b0b671b5172
++	 *	cookie2 was 0xaabce8cae35b1d8d
+ 	 */
+-	p->nd_set.cookie1 = cpu_to_le64(uuid[0]);
+-	p->nd_set.cookie2 = cpu_to_le64(uuid[1]);
++	export_uuid(uuid_raw, &uuid);
++	p->nd_set.cookie1 = get_unaligned_le64(&uuid_raw[0]);
++	p->nd_set.cookie2 = get_unaligned_le64(&uuid_raw[8]);
+ 
+ 	/* might be zero */
+ 	p->metadata_size = metadata_size;
+@@ -1124,6 +1138,14 @@ static int papr_scm_probe(struct platform_device *pdev)
+ 	p->res.name  = pdev->name;
+ 	p->res.flags = IORESOURCE_MEM;
+ 
++	/* Try retrieving the stat buffer and see if its supported */
++	stat_size = drc_pmem_query_stats(p, NULL, 0);
++	if (stat_size > 0) {
++		p->stat_buffer_len = stat_size;
++		dev_dbg(&p->pdev->dev, "Max perf-stat size %lu-bytes\n",
++			p->stat_buffer_len);
++	}
++
+ 	rc = papr_scm_nvdimm_init(p);
+ 	if (rc)
+ 		goto err2;
+diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
+index c70b4be9f0a54..f47429323eee9 100644
+--- a/arch/powerpc/platforms/pseries/smp.c
++++ b/arch/powerpc/platforms/pseries/smp.c
+@@ -105,9 +105,6 @@ static inline int smp_startup_cpu(unsigned int lcpu)
+ 		return 1;
+ 	}
+ 
+-	/* Fixup atomic count: it exited inside IRQ handler. */
+-	task_thread_info(paca_ptrs[lcpu]->__current)->preempt_count	= 0;
+-
+ 	/* 
+ 	 * If the RTAS start-cpu token does not exist then presume the
+ 	 * cpu is already spinning.
+@@ -211,7 +208,9 @@ static __init void pSeries_smp_probe(void)
+ 	if (!cpu_has_feature(CPU_FTR_SMT))
+ 		return;
+ 
+-	if (check_kvm_guest()) {
++	check_kvm_guest();
++
++	if (is_kvm_guest()) {
+ 		/*
+ 		 * KVM emulates doorbells by disabling FSCR[MSGP] so msgsndp
+ 		 * faults to the hypervisor which then reads the instruction
+diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
+index 5e276c25646fc..1941a6ce86a15 100644
+--- a/arch/riscv/kernel/smpboot.c
++++ b/arch/riscv/kernel/smpboot.c
+@@ -176,7 +176,6 @@ asmlinkage __visible void smp_callin(void)
+ 	 * Disable preemption before enabling interrupts, so we don't try to
+ 	 * schedule a CPU that hasn't actually started yet.
+ 	 */
+-	preempt_disable();
+ 	local_irq_enable();
+ 	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
+ }
+diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
+index c1ff874e6c2e6..4fcd460f496ec 100644
+--- a/arch/s390/Kconfig
++++ b/arch/s390/Kconfig
+@@ -160,6 +160,7 @@ config S390
+ 	select HAVE_FUTEX_CMPXCHG if FUTEX
+ 	select HAVE_GCC_PLUGINS
+ 	select HAVE_GENERIC_VDSO
++	select HAVE_IOREMAP_PROT if PCI
+ 	select HAVE_IRQ_EXIT_ON_IRQ_STACK
+ 	select HAVE_KERNEL_BZIP2
+ 	select HAVE_KERNEL_GZIP
+@@ -858,7 +859,7 @@ config CMM_IUCV
+ config APPLDATA_BASE
+ 	def_bool n
+ 	prompt "Linux - VM Monitor Stream, base infrastructure"
+-	depends on PROC_FS
++	depends on PROC_SYSCTL
+ 	help
+ 	  This provides a kernel interface for creating and updating z/VM APPLDATA
+ 	  monitor records. The monitor records are updated at certain time
+diff --git a/arch/s390/boot/uv.c b/arch/s390/boot/uv.c
+index 87641dd65ccf9..b3501ea5039e4 100644
+--- a/arch/s390/boot/uv.c
++++ b/arch/s390/boot/uv.c
+@@ -36,6 +36,7 @@ void uv_query_info(void)
+ 		uv_info.max_sec_stor_addr = ALIGN(uvcb.max_guest_stor_addr, PAGE_SIZE);
+ 		uv_info.max_num_sec_conf = uvcb.max_num_sec_conf;
+ 		uv_info.max_guest_cpu_id = uvcb.max_guest_cpu_id;
++		uv_info.uv_feature_indications = uvcb.uv_feature_indications;
+ 	}
+ 
+ #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
+diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
+index 29c7ecd5ad1d5..adea53f69bfd3 100644
+--- a/arch/s390/include/asm/pgtable.h
++++ b/arch/s390/include/asm/pgtable.h
+@@ -344,8 +344,6 @@ static inline int is_module_addr(void *addr)
+ #define PTRS_PER_P4D	_CRST_ENTRIES
+ #define PTRS_PER_PGD	_CRST_ENTRIES
+ 
+-#define MAX_PTRS_PER_P4D	PTRS_PER_P4D
+-
+ /*
+  * Segment table and region3 table entry encoding
+  * (R = read-only, I = invalid, y = young bit):
+@@ -865,6 +863,25 @@ static inline int pte_unused(pte_t pte)
+ 	return pte_val(pte) & _PAGE_UNUSED;
+ }
+ 
++/*
++ * Extract the pgprot value from the given pte while at the same time making it
++ * usable for kernel address space mappings where fault driven dirty and
++ * young/old accounting is not supported, i.e _PAGE_PROTECT and _PAGE_INVALID
++ * must not be set.
++ */
++static inline pgprot_t pte_pgprot(pte_t pte)
++{
++	unsigned long pte_flags = pte_val(pte) & _PAGE_CHG_MASK;
++
++	if (pte_write(pte))
++		pte_flags |= pgprot_val(PAGE_KERNEL);
++	else
++		pte_flags |= pgprot_val(PAGE_KERNEL_RO);
++	pte_flags |= pte_val(pte) & mio_wb_bit_mask;
++
++	return __pgprot(pte_flags);
++}
++
+ /*
+  * pgd/pmd/pte modification functions
+  */
+diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h
+index b49e0492842cc..d9d5350cc3ec3 100644
+--- a/arch/s390/include/asm/preempt.h
++++ b/arch/s390/include/asm/preempt.h
+@@ -29,12 +29,6 @@ static inline void preempt_count_set(int pc)
+ 				  old, new) != old);
+ }
+ 
+-#define init_task_preempt_count(p)	do { } while (0)
+-
+-#define init_idle_preempt_count(p, cpu)	do { \
+-	S390_lowcore.preempt_count = PREEMPT_ENABLED; \
+-} while (0)
+-
+ static inline void set_preempt_need_resched(void)
+ {
+ 	__atomic_and(~PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
+@@ -88,12 +82,6 @@ static inline void preempt_count_set(int pc)
+ 	S390_lowcore.preempt_count = pc;
+ }
+ 
+-#define init_task_preempt_count(p)	do { } while (0)
+-
+-#define init_idle_preempt_count(p, cpu)	do { \
+-	S390_lowcore.preempt_count = PREEMPT_ENABLED; \
+-} while (0)
+-
+ static inline void set_preempt_need_resched(void)
+ {
+ }
+@@ -130,6 +118,10 @@ static inline bool should_resched(int preempt_offset)
+ 
+ #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+ 
++#define init_task_preempt_count(p)	do { } while (0)
++/* Deferred to CPU bringup time */
++#define init_idle_preempt_count(p, cpu)	do { } while (0)
++
+ #ifdef CONFIG_PREEMPTION
+ extern void preempt_schedule(void);
+ #define __preempt_schedule() preempt_schedule()
+diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h
+index 7b98d4caee779..12c5f006c1364 100644
+--- a/arch/s390/include/asm/uv.h
++++ b/arch/s390/include/asm/uv.h
+@@ -73,6 +73,10 @@ enum uv_cmds_inst {
+ 	BIT_UVC_CMD_UNPIN_PAGE_SHARED = 22,
+ };
+ 
++enum uv_feat_ind {
++	BIT_UV_FEAT_MISC = 0,
++};
++
+ struct uv_cb_header {
+ 	u16 len;
+ 	u16 cmd;	/* Command Code */
+@@ -97,7 +101,8 @@ struct uv_cb_qui {
+ 	u64 max_guest_stor_addr;
+ 	u8  reserved88[158 - 136];
+ 	u16 max_guest_cpu_id;
+-	u8  reserveda0[200 - 160];
++	u64 uv_feature_indications;
++	u8  reserveda0[200 - 168];
+ } __packed __aligned(8);
+ 
+ /* Initialize Ultravisor */
+@@ -274,6 +279,7 @@ struct uv_info {
+ 	unsigned long max_sec_stor_addr;
+ 	unsigned int max_num_sec_conf;
+ 	unsigned short max_guest_cpu_id;
++	unsigned long uv_feature_indications;
+ };
+ 
+ extern struct uv_info uv_info;
+diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
+index 5aab59ad56881..382d73da134cf 100644
+--- a/arch/s390/kernel/setup.c
++++ b/arch/s390/kernel/setup.c
+@@ -466,6 +466,7 @@ static void __init setup_lowcore_dat_off(void)
+ 	lc->br_r1_trampoline = 0x07f1;	/* br %r1 */
+ 	lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
+ 	lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
++	lc->preempt_count = PREEMPT_DISABLED;
+ 
+ 	set_prefix((u32)(unsigned long) lc);
+ 	lowcore_ptr[0] = lc;
+diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
+index 58c8afa3da65d..4b9960da3fc9c 100644
+--- a/arch/s390/kernel/smp.c
++++ b/arch/s390/kernel/smp.c
+@@ -219,6 +219,7 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
+ 	lc->br_r1_trampoline = 0x07f1;	/* br %r1 */
+ 	lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
+ 	lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
++	lc->preempt_count = PREEMPT_DISABLED;
+ 	if (nmi_alloc_per_cpu(lc))
+ 		goto out_stack;
+ 	lowcore_ptr[cpu] = lc;
+@@ -877,7 +878,6 @@ static void smp_init_secondary(void)
+ 	restore_access_regs(S390_lowcore.access_regs_save_area);
+ 	cpu_init();
+ 	rcu_cpu_starting(cpu);
+-	preempt_disable();
+ 	init_cpu_timer();
+ 	vtime_init();
+ 	vdso_getcpu_init();
+diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c
+index b2d2ad1530676..c811b2313100b 100644
+--- a/arch/s390/kernel/uv.c
++++ b/arch/s390/kernel/uv.c
+@@ -364,6 +364,15 @@ static ssize_t uv_query_facilities(struct kobject *kobj,
+ static struct kobj_attribute uv_query_facilities_attr =
+ 	__ATTR(facilities, 0444, uv_query_facilities, NULL);
+ 
++static ssize_t uv_query_feature_indications(struct kobject *kobj,
++					    struct kobj_attribute *attr, char *buf)
++{
++	return sysfs_emit(buf, "%lx\n", uv_info.uv_feature_indications);
++}
++
++static struct kobj_attribute uv_query_feature_indications_attr =
++	__ATTR(feature_indications, 0444, uv_query_feature_indications, NULL);
++
+ static ssize_t uv_query_max_guest_cpus(struct kobject *kobj,
+ 				       struct kobj_attribute *attr, char *page)
+ {
+@@ -396,6 +405,7 @@ static struct kobj_attribute uv_query_max_guest_addr_attr =
+ 
+ static struct attribute *uv_query_attrs[] = {
+ 	&uv_query_facilities_attr.attr,
++	&uv_query_feature_indications_attr.attr,
+ 	&uv_query_max_guest_cpus_attr.attr,
+ 	&uv_query_max_guest_vms_attr.attr,
+ 	&uv_query_max_guest_addr_attr.attr,
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index 24ad447e648c1..dd7136b3ed9a8 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -323,31 +323,31 @@ static void allow_cpu_feat(unsigned long nr)
+ 
+ static inline int plo_test_bit(unsigned char nr)
+ {
+-	register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
++	unsigned long function = (unsigned long)nr | 0x100;
+ 	int cc;
+ 
+ 	asm volatile(
++		"	lgr	0,%[function]\n"
+ 		/* Parameter registers are ignored for "test bit" */
+ 		"	plo	0,0,0,0(0)\n"
+ 		"	ipm	%0\n"
+ 		"	srl	%0,28\n"
+ 		: "=d" (cc)
+-		: "d" (r0)
+-		: "cc");
++		: [function] "d" (function)
++		: "cc", "0");
+ 	return cc == 0;
+ }
+ 
+ static __always_inline void __insn32_query(unsigned int opcode, u8 *query)
+ {
+-	register unsigned long r0 asm("0") = 0;	/* query function */
+-	register unsigned long r1 asm("1") = (unsigned long) query;
+-
+ 	asm volatile(
+-		/* Parameter regs are ignored */
++		"	lghi	0,0\n"
++		"	lgr	1,%[query]\n"
++		/* Parameter registers are ignored */
+ 		"	.insn	rrf,%[opc] << 16,2,4,6,0\n"
+ 		:
+-		: "d" (r0), "a" (r1), [opc] "i" (opcode)
+-		: "cc", "memory");
++		: [query] "d" ((unsigned long)query), [opc] "i" (opcode)
++		: "cc", "memory", "0", "1");
+ }
+ 
+ #define INSN_SORTL 0xb938
+diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
+index e30c7c781172c..62e62cb88c845 100644
+--- a/arch/s390/mm/fault.c
++++ b/arch/s390/mm/fault.c
+@@ -791,6 +791,32 @@ void do_secure_storage_access(struct pt_regs *regs)
+ 	struct page *page;
+ 	int rc;
+ 
++	/*
++	 * bit 61 tells us if the address is valid, if it's not we
++	 * have a major problem and should stop the kernel or send a
++	 * SIGSEGV to the process. Unfortunately bit 61 is not
++	 * reliable without the misc UV feature so we need to check
++	 * for that as well.
++	 */
++	if (test_bit_inv(BIT_UV_FEAT_MISC, &uv_info.uv_feature_indications) &&
++	    !test_bit_inv(61, &regs->int_parm_long)) {
++		/*
++		 * When this happens, userspace did something that it
++		 * was not supposed to do, e.g. branching into secure
++		 * memory. Trigger a segmentation fault.
++		 */
++		if (user_mode(regs)) {
++			send_sig(SIGSEGV, current, 0);
++			return;
++		}
++
++		/*
++		 * The kernel should never run into this case and we
++		 * have no way out of this situation.
++		 */
++		panic("Unexpected PGM 0x3d with TEID bit 61=0");
++	}
++
+ 	switch (get_fault_type(regs)) {
+ 	case USER_FAULT:
+ 		mm = current->mm;
+diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
+index 372acdc9033eb..65924d9ec2459 100644
+--- a/arch/sh/kernel/smp.c
++++ b/arch/sh/kernel/smp.c
+@@ -186,8 +186,6 @@ asmlinkage void start_secondary(void)
+ 
+ 	per_cpu_trap_init();
+ 
+-	preempt_disable();
+-
+ 	notify_cpu_starting(cpu);
+ 
+ 	local_irq_enable();
+diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c
+index 50c127ab46d5b..22b148e5a5f88 100644
+--- a/arch/sparc/kernel/smp_32.c
++++ b/arch/sparc/kernel/smp_32.c
+@@ -348,7 +348,6 @@ static void sparc_start_secondary(void *arg)
+ 	 */
+ 	arch_cpu_pre_starting(arg);
+ 
+-	preempt_disable();
+ 	cpu = smp_processor_id();
+ 
+ 	notify_cpu_starting(cpu);
+diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
+index e38d8bf454e86..ae5faa1d989d2 100644
+--- a/arch/sparc/kernel/smp_64.c
++++ b/arch/sparc/kernel/smp_64.c
+@@ -138,9 +138,6 @@ void smp_callin(void)
+ 
+ 	set_cpu_online(cpuid, true);
+ 
+-	/* idle thread is expected to have preempt disabled */
+-	preempt_disable();
+-
+ 	local_irq_enable();
+ 
+ 	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
+diff --git a/arch/x86/crypto/curve25519-x86_64.c b/arch/x86/crypto/curve25519-x86_64.c
+index 5af8021b98cea..11b4c83c715e3 100644
+--- a/arch/x86/crypto/curve25519-x86_64.c
++++ b/arch/x86/crypto/curve25519-x86_64.c
+@@ -1500,7 +1500,7 @@ static int __init curve25519_mod_init(void)
+ static void __exit curve25519_mod_exit(void)
+ {
+ 	if (IS_REACHABLE(CONFIG_CRYPTO_KPP) &&
+-	    (boot_cpu_has(X86_FEATURE_BMI2) || boot_cpu_has(X86_FEATURE_ADX)))
++	    static_branch_likely(&curve25519_use_bmi2_adx))
+ 		crypto_unregister_kpp(&curve25519_alg);
+ }
+ 
+diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
+index 400908dff42eb..7aa1be30b6473 100644
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -506,7 +506,7 @@ SYM_CODE_START(\asmsym)
+ 
+ 	movq	%rsp, %rdi		/* pt_regs pointer */
+ 
+-	call	\cfunc
++	call	kernel_\cfunc
+ 
+ 	/*
+ 	 * No need to switch back to the IST stack. The current stack is either
+@@ -517,7 +517,7 @@ SYM_CODE_START(\asmsym)
+ 
+ 	/* Switch to the regular task stack */
+ .Lfrom_usermode_switch_stack_\@:
+-	idtentry_body safe_stack_\cfunc, has_error_code=1
++	idtentry_body user_\cfunc, has_error_code=1
+ 
+ _ASM_NOKPROBE(\asmsym)
+ SYM_CODE_END(\asmsym)
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index 77fe4fece6798..ea7a9319e9181 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -280,6 +280,8 @@ static struct extra_reg intel_spr_extra_regs[] __read_mostly = {
+ 	INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
+ 	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
+ 	INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
++	INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0x7, FE),
++	INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
+ 	EVENT_EXTRA_END
+ };
+ 
+@@ -3973,8 +3975,10 @@ spr_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+ 	 * The :ppp indicates the Precise Distribution (PDist) facility, which
+ 	 * is only supported on the GP counter 0. If a :ppp event which is not
+ 	 * available on the GP counter 0, error out.
++	 * Exception: Instruction PDIR is only available on the fixed counter 0.
+ 	 */
+-	if (event->attr.precise_ip == 3) {
++	if ((event->attr.precise_ip == 3) &&
++	    !constraint_match(&fixed0_constraint, event->hw.config)) {
+ 		if (c->idxmsk64 & BIT_ULL(0))
+ 			return &counter0_constraint;
+ 
+diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h
+index 06b0789d61b91..0f1d17bb9d2f4 100644
+--- a/arch/x86/include/asm/idtentry.h
++++ b/arch/x86/include/asm/idtentry.h
+@@ -312,8 +312,8 @@ static __always_inline void __##func(struct pt_regs *regs)
+  */
+ #define DECLARE_IDTENTRY_VC(vector, func)				\
+ 	DECLARE_IDTENTRY_RAW_ERRORCODE(vector, func);			\
+-	__visible noinstr void ist_##func(struct pt_regs *regs, unsigned long error_code);	\
+-	__visible noinstr void safe_stack_##func(struct pt_regs *regs, unsigned long error_code)
++	__visible noinstr void kernel_##func(struct pt_regs *regs, unsigned long error_code);	\
++	__visible noinstr void   user_##func(struct pt_regs *regs, unsigned long error_code)
+ 
+ /**
+  * DEFINE_IDTENTRY_IST - Emit code for IST entry points
+@@ -355,33 +355,24 @@ static __always_inline void __##func(struct pt_regs *regs)
+ 	DEFINE_IDTENTRY_RAW_ERRORCODE(func)
+ 
+ /**
+- * DEFINE_IDTENTRY_VC_SAFE_STACK - Emit code for VMM communication handler
+-				   which runs on a safe stack.
++ * DEFINE_IDTENTRY_VC_KERNEL - Emit code for VMM communication handler
++			       when raised from kernel mode
+  * @func:	Function name of the entry point
+  *
+  * Maps to DEFINE_IDTENTRY_RAW_ERRORCODE
+  */
+-#define DEFINE_IDTENTRY_VC_SAFE_STACK(func)				\
+-	DEFINE_IDTENTRY_RAW_ERRORCODE(safe_stack_##func)
++#define DEFINE_IDTENTRY_VC_KERNEL(func)				\
++	DEFINE_IDTENTRY_RAW_ERRORCODE(kernel_##func)
+ 
+ /**
+- * DEFINE_IDTENTRY_VC_IST - Emit code for VMM communication handler
+-			    which runs on the VC fall-back stack
++ * DEFINE_IDTENTRY_VC_USER - Emit code for VMM communication handler
++			     when raised from user mode
+  * @func:	Function name of the entry point
+  *
+  * Maps to DEFINE_IDTENTRY_RAW_ERRORCODE
+  */
+-#define DEFINE_IDTENTRY_VC_IST(func)				\
+-	DEFINE_IDTENTRY_RAW_ERRORCODE(ist_##func)
+-
+-/**
+- * DEFINE_IDTENTRY_VC - Emit code for VMM communication handler
+- * @func:	Function name of the entry point
+- *
+- * Maps to DEFINE_IDTENTRY_RAW_ERRORCODE
+- */
+-#define DEFINE_IDTENTRY_VC(func)					\
+-	DEFINE_IDTENTRY_RAW_ERRORCODE(func)
++#define DEFINE_IDTENTRY_VC_USER(func)				\
++	DEFINE_IDTENTRY_RAW_ERRORCODE(user_##func)
+ 
+ #else	/* CONFIG_X86_64 */
+ 
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index ac7c786fa09f9..0758ff3008c6d 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -85,7 +85,7 @@
+ #define KVM_REQ_APICV_UPDATE \
+ 	KVM_ARCH_REQ_FLAGS(25, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+ #define KVM_REQ_TLB_FLUSH_CURRENT	KVM_ARCH_REQ(26)
+-#define KVM_REQ_HV_TLB_FLUSH \
++#define KVM_REQ_TLB_FLUSH_GUEST \
+ 	KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_NO_WAKEUP)
+ #define KVM_REQ_APF_READY		KVM_ARCH_REQ(28)
+ #define KVM_REQ_MSR_FILTER_CHANGED	KVM_ARCH_REQ(29)
+@@ -1433,6 +1433,7 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
+ 		u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask,
+ 		u64 acc_track_mask, u64 me_mask);
+ 
++void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu);
+ void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
+ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
+ 				      struct kvm_memory_slot *memslot,
+diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
+index f8cb8af4de5ce..fe5efbcba8240 100644
+--- a/arch/x86/include/asm/preempt.h
++++ b/arch/x86/include/asm/preempt.h
+@@ -44,7 +44,7 @@ static __always_inline void preempt_count_set(int pc)
+ #define init_task_preempt_count(p) do { } while (0)
+ 
+ #define init_idle_preempt_count(p, cpu) do { \
+-	per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \
++	per_cpu(__preempt_count, (cpu)) = PREEMPT_DISABLED; \
+ } while (0)
+ 
+ /*
+diff --git a/arch/x86/include/uapi/asm/hwcap2.h b/arch/x86/include/uapi/asm/hwcap2.h
+index 5fdfcb47000f9..054604aba9f00 100644
+--- a/arch/x86/include/uapi/asm/hwcap2.h
++++ b/arch/x86/include/uapi/asm/hwcap2.h
+@@ -2,10 +2,12 @@
+ #ifndef _ASM_X86_HWCAP2_H
+ #define _ASM_X86_HWCAP2_H
+ 
++#include <linux/const.h>
++
+ /* MONITOR/MWAIT enabled in Ring 3 */
+-#define HWCAP2_RING3MWAIT		(1 << 0)
++#define HWCAP2_RING3MWAIT		_BITUL(0)
+ 
+ /* Kernel allows FSGSBASE instructions available in Ring 3 */
+-#define HWCAP2_FSGSBASE			BIT(1)
++#define HWCAP2_FSGSBASE			_BITUL(1)
+ 
+ #endif
+diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
+index e88bc296afca0..a803fc423cb7f 100644
+--- a/arch/x86/kernel/cpu/mshyperv.c
++++ b/arch/x86/kernel/cpu/mshyperv.c
+@@ -245,7 +245,7 @@ static void __init hv_smp_prepare_cpus(unsigned int max_cpus)
+ 	for_each_present_cpu(i) {
+ 		if (i == 0)
+ 			continue;
+-		ret = hv_call_add_logical_proc(numa_cpu_node(i), i, cpu_physical_id(i));
++		ret = hv_call_add_logical_proc(numa_cpu_node(i), i, i);
+ 		BUG_ON(ret);
+ 	}
+ 
+diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
+index a4b5af03dcc1b..534cc3f78c6be 100644
+--- a/arch/x86/kernel/early-quirks.c
++++ b/arch/x86/kernel/early-quirks.c
+@@ -549,6 +549,7 @@ static const struct pci_device_id intel_early_ids[] __initconst = {
+ 	INTEL_CNL_IDS(&gen9_early_ops),
+ 	INTEL_ICL_11_IDS(&gen11_early_ops),
+ 	INTEL_EHL_IDS(&gen11_early_ops),
++	INTEL_JSL_IDS(&gen11_early_ops),
+ 	INTEL_TGL_12_IDS(&gen11_early_ops),
+ 	INTEL_RKL_IDS(&gen11_early_ops),
+ };
+diff --git a/arch/x86/kernel/sev-es.c b/arch/x86/kernel/sev-es.c
+index e0cdab7cb632b..f3202b2e3c157 100644
+--- a/arch/x86/kernel/sev-es.c
++++ b/arch/x86/kernel/sev-es.c
+@@ -12,7 +12,6 @@
+ #include <linux/sched/debug.h>	/* For show_regs() */
+ #include <linux/percpu-defs.h>
+ #include <linux/mem_encrypt.h>
+-#include <linux/lockdep.h>
+ #include <linux/printk.h>
+ #include <linux/mm_types.h>
+ #include <linux/set_memory.h>
+@@ -180,11 +179,19 @@ void noinstr __sev_es_ist_exit(void)
+ 	this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], *(unsigned long *)ist);
+ }
+ 
+-static __always_inline struct ghcb *sev_es_get_ghcb(struct ghcb_state *state)
++/*
++ * Nothing shall interrupt this code path while holding the per-CPU
++ * GHCB. The backup GHCB is only for NMIs interrupting this path.
++ *
++ * Callers must disable local interrupts around it.
++ */
++static noinstr struct ghcb *__sev_get_ghcb(struct ghcb_state *state)
+ {
+ 	struct sev_es_runtime_data *data;
+ 	struct ghcb *ghcb;
+ 
++	WARN_ON(!irqs_disabled());
++
+ 	data = this_cpu_read(runtime_data);
+ 	ghcb = &data->ghcb_page;
+ 
+@@ -201,7 +208,9 @@ static __always_inline struct ghcb *sev_es_get_ghcb(struct ghcb_state *state)
+ 			data->ghcb_active        = false;
+ 			data->backup_ghcb_active = false;
+ 
++			instrumentation_begin();
+ 			panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
++			instrumentation_end();
+ 		}
+ 
+ 		/* Mark backup_ghcb active before writing to it */
+@@ -452,11 +461,13 @@ static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt
+ /* Include code shared with pre-decompression boot stage */
+ #include "sev-es-shared.c"
+ 
+-static __always_inline void sev_es_put_ghcb(struct ghcb_state *state)
++static noinstr void __sev_put_ghcb(struct ghcb_state *state)
+ {
+ 	struct sev_es_runtime_data *data;
+ 	struct ghcb *ghcb;
+ 
++	WARN_ON(!irqs_disabled());
++
+ 	data = this_cpu_read(runtime_data);
+ 	ghcb = &data->ghcb_page;
+ 
+@@ -480,7 +491,7 @@ void noinstr __sev_es_nmi_complete(void)
+ 	struct ghcb_state state;
+ 	struct ghcb *ghcb;
+ 
+-	ghcb = sev_es_get_ghcb(&state);
++	ghcb = __sev_get_ghcb(&state);
+ 
+ 	vc_ghcb_invalidate(ghcb);
+ 	ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_NMI_COMPLETE);
+@@ -490,7 +501,7 @@ void noinstr __sev_es_nmi_complete(void)
+ 	sev_es_wr_ghcb_msr(__pa_nodebug(ghcb));
+ 	VMGEXIT();
+ 
+-	sev_es_put_ghcb(&state);
++	__sev_put_ghcb(&state);
+ }
+ 
+ static u64 get_jump_table_addr(void)
+@@ -502,7 +513,7 @@ static u64 get_jump_table_addr(void)
+ 
+ 	local_irq_save(flags);
+ 
+-	ghcb = sev_es_get_ghcb(&state);
++	ghcb = __sev_get_ghcb(&state);
+ 
+ 	vc_ghcb_invalidate(ghcb);
+ 	ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_JUMP_TABLE);
+@@ -516,7 +527,7 @@ static u64 get_jump_table_addr(void)
+ 	    ghcb_sw_exit_info_2_is_valid(ghcb))
+ 		ret = ghcb->save.sw_exit_info_2;
+ 
+-	sev_es_put_ghcb(&state);
++	__sev_put_ghcb(&state);
+ 
+ 	local_irq_restore(flags);
+ 
+@@ -641,7 +652,7 @@ static void sev_es_ap_hlt_loop(void)
+ 	struct ghcb_state state;
+ 	struct ghcb *ghcb;
+ 
+-	ghcb = sev_es_get_ghcb(&state);
++	ghcb = __sev_get_ghcb(&state);
+ 
+ 	while (true) {
+ 		vc_ghcb_invalidate(ghcb);
+@@ -658,7 +669,7 @@ static void sev_es_ap_hlt_loop(void)
+ 			break;
+ 	}
+ 
+-	sev_es_put_ghcb(&state);
++	__sev_put_ghcb(&state);
+ }
+ 
+ /*
+@@ -748,7 +759,7 @@ void __init sev_es_init_vc_handling(void)
+ 	sev_es_setup_play_dead();
+ 
+ 	/* Secondary CPUs use the runtime #VC handler */
+-	initial_vc_handler = (unsigned long)safe_stack_exc_vmm_communication;
++	initial_vc_handler = (unsigned long)kernel_exc_vmm_communication;
+ }
+ 
+ static void __init vc_early_forward_exception(struct es_em_ctxt *ctxt)
+@@ -1186,14 +1197,6 @@ static enum es_result vc_handle_trap_ac(struct ghcb *ghcb,
+ 	return ES_EXCEPTION;
+ }
+ 
+-static __always_inline void vc_handle_trap_db(struct pt_regs *regs)
+-{
+-	if (user_mode(regs))
+-		noist_exc_debug(regs);
+-	else
+-		exc_debug(regs);
+-}
+-
+ static enum es_result vc_handle_exitcode(struct es_em_ctxt *ctxt,
+ 					 struct ghcb *ghcb,
+ 					 unsigned long exit_code)
+@@ -1289,44 +1292,15 @@ static __always_inline bool on_vc_fallback_stack(struct pt_regs *regs)
+ 	return (sp >= __this_cpu_ist_bottom_va(VC2) && sp < __this_cpu_ist_top_va(VC2));
+ }
+ 
+-/*
+- * Main #VC exception handler. It is called when the entry code was able to
+- * switch off the IST to a safe kernel stack.
+- *
+- * With the current implementation it is always possible to switch to a safe
+- * stack because #VC exceptions only happen at known places, like intercepted
+- * instructions or accesses to MMIO areas/IO ports. They can also happen with
+- * code instrumentation when the hypervisor intercepts #DB, but the critical
+- * paths are forbidden to be instrumented, so #DB exceptions currently also
+- * only happen in safe places.
+- */
+-DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
++static bool vc_raw_handle_exception(struct pt_regs *regs, unsigned long error_code)
+ {
+-	irqentry_state_t irq_state;
+ 	struct ghcb_state state;
+ 	struct es_em_ctxt ctxt;
+ 	enum es_result result;
+ 	struct ghcb *ghcb;
++	bool ret = true;
+ 
+-	/*
+-	 * Handle #DB before calling into !noinstr code to avoid recursive #DB.
+-	 */
+-	if (error_code == SVM_EXIT_EXCP_BASE + X86_TRAP_DB) {
+-		vc_handle_trap_db(regs);
+-		return;
+-	}
+-
+-	irq_state = irqentry_nmi_enter(regs);
+-	lockdep_assert_irqs_disabled();
+-	instrumentation_begin();
+-
+-	/*
+-	 * This is invoked through an interrupt gate, so IRQs are disabled. The
+-	 * code below might walk page-tables for user or kernel addresses, so
+-	 * keep the IRQs disabled to protect us against concurrent TLB flushes.
+-	 */
+-
+-	ghcb = sev_es_get_ghcb(&state);
++	ghcb = __sev_get_ghcb(&state);
+ 
+ 	vc_ghcb_invalidate(ghcb);
+ 	result = vc_init_em_ctxt(&ctxt, regs, error_code);
+@@ -1334,7 +1308,7 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
+ 	if (result == ES_OK)
+ 		result = vc_handle_exitcode(&ctxt, ghcb, error_code);
+ 
+-	sev_es_put_ghcb(&state);
++	__sev_put_ghcb(&state);
+ 
+ 	/* Done - now check the result */
+ 	switch (result) {
+@@ -1344,15 +1318,18 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
+ 	case ES_UNSUPPORTED:
+ 		pr_err_ratelimited("Unsupported exit-code 0x%02lx in early #VC exception (IP: 0x%lx)\n",
+ 				   error_code, regs->ip);
+-		goto fail;
++		ret = false;
++		break;
+ 	case ES_VMM_ERROR:
+ 		pr_err_ratelimited("Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n",
+ 				   error_code, regs->ip);
+-		goto fail;
++		ret = false;
++		break;
+ 	case ES_DECODE_FAILED:
+ 		pr_err_ratelimited("Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n",
+ 				   error_code, regs->ip);
+-		goto fail;
++		ret = false;
++		break;
+ 	case ES_EXCEPTION:
+ 		vc_forward_exception(&ctxt);
+ 		break;
+@@ -1368,24 +1345,52 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
+ 		BUG();
+ 	}
+ 
+-out:
+-	instrumentation_end();
+-	irqentry_nmi_exit(regs, irq_state);
++	return ret;
++}
+ 
+-	return;
++static __always_inline bool vc_is_db(unsigned long error_code)
++{
++	return error_code == SVM_EXIT_EXCP_BASE + X86_TRAP_DB;
++}
+ 
+-fail:
+-	if (user_mode(regs)) {
+-		/*
+-		 * Do not kill the machine if user-space triggered the
+-		 * exception. Send SIGBUS instead and let user-space deal with
+-		 * it.
+-		 */
+-		force_sig_fault(SIGBUS, BUS_OBJERR, (void __user *)0);
+-	} else {
+-		pr_emerg("PANIC: Unhandled #VC exception in kernel space (result=%d)\n",
+-			 result);
++/*
++ * Runtime #VC exception handler when raised from kernel mode. Runs in NMI mode
++ * and will panic when an error happens.
++ */
++DEFINE_IDTENTRY_VC_KERNEL(exc_vmm_communication)
++{
++	irqentry_state_t irq_state;
+ 
++	/*
++	 * With the current implementation it is always possible to switch to a
++	 * safe stack because #VC exceptions only happen at known places, like
++	 * intercepted instructions or accesses to MMIO areas/IO ports. They can
++	 * also happen with code instrumentation when the hypervisor intercepts
++	 * #DB, but the critical paths are forbidden to be instrumented, so #DB
++	 * exceptions currently also only happen in safe places.
++	 *
++	 * But keep this here in case the noinstr annotations are violated due
++	 * to bug elsewhere.
++	 */
++	if (unlikely(on_vc_fallback_stack(regs))) {
++		instrumentation_begin();
++		panic("Can't handle #VC exception from unsupported context\n");
++		instrumentation_end();
++	}
++
++	/*
++	 * Handle #DB before calling into !noinstr code to avoid recursive #DB.
++	 */
++	if (vc_is_db(error_code)) {
++		exc_debug(regs);
++		return;
++	}
++
++	irq_state = irqentry_nmi_enter(regs);
++
++	instrumentation_begin();
++
++	if (!vc_raw_handle_exception(regs, error_code)) {
+ 		/* Show some debug info */
+ 		show_regs(regs);
+ 
+@@ -1396,23 +1401,38 @@ fail:
+ 		panic("Returned from Terminate-Request to Hypervisor\n");
+ 	}
+ 
+-	goto out;
++	instrumentation_end();
++	irqentry_nmi_exit(regs, irq_state);
+ }
+ 
+-/* This handler runs on the #VC fall-back stack. It can cause further #VC exceptions */
+-DEFINE_IDTENTRY_VC_IST(exc_vmm_communication)
++/*
++ * Runtime #VC exception handler when raised from user mode. Runs in IRQ mode
++ * and will kill the current task with SIGBUS when an error happens.
++ */
++DEFINE_IDTENTRY_VC_USER(exc_vmm_communication)
+ {
++	/*
++	 * Handle #DB before calling into !noinstr code to avoid recursive #DB.
++	 */
++	if (vc_is_db(error_code)) {
++		noist_exc_debug(regs);
++		return;
++	}
++
++	irqentry_enter_from_user_mode(regs);
+ 	instrumentation_begin();
+-	panic("Can't handle #VC exception from unsupported context\n");
+-	instrumentation_end();
+-}
+ 
+-DEFINE_IDTENTRY_VC(exc_vmm_communication)
+-{
+-	if (likely(!on_vc_fallback_stack(regs)))
+-		safe_stack_exc_vmm_communication(regs, error_code);
+-	else
+-		ist_exc_vmm_communication(regs, error_code);
++	if (!vc_raw_handle_exception(regs, error_code)) {
++		/*
++		 * Do not kill the machine if user-space triggered the
++		 * exception. Send SIGBUS instead and let user-space deal with
++		 * it.
++		 */
++		force_sig_fault(SIGBUS, BUS_OBJERR, (void __user *)0);
++	}
++
++	instrumentation_end();
++	irqentry_exit_to_user_mode(regs);
+ }
+ 
+ bool __init handle_vc_boot_ghcb(struct pt_regs *regs)
+diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
+index 363b36bbd791a..ebc4b13b74a47 100644
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -236,7 +236,6 @@ static void notrace start_secondary(void *unused)
+ 	cpu_init();
+ 	rcu_cpu_starting(raw_smp_processor_id());
+ 	x86_cpuinit.early_percpu_clock_init();
+-	preempt_disable();
+ 	smp_callin();
+ 
+ 	enable_start_cpu0 = 0;
+diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
+index f70dffc2771f5..56289170753c5 100644
+--- a/arch/x86/kernel/tsc.c
++++ b/arch/x86/kernel/tsc.c
+@@ -1151,7 +1151,8 @@ static struct clocksource clocksource_tsc = {
+ 	.mask			= CLOCKSOURCE_MASK(64),
+ 	.flags			= CLOCK_SOURCE_IS_CONTINUOUS |
+ 				  CLOCK_SOURCE_VALID_FOR_HRES |
+-				  CLOCK_SOURCE_MUST_VERIFY,
++				  CLOCK_SOURCE_MUST_VERIFY |
++				  CLOCK_SOURCE_VERIFY_PERCPU,
+ 	.vdso_clock_mode	= VDSO_CLOCKMODE_TSC,
+ 	.enable			= tsc_cs_enable,
+ 	.resume			= tsc_resume,
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index 62f795352c024..0ed116b8c211d 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -185,10 +185,10 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
+ 	static_call(kvm_x86_vcpu_after_set_cpuid)(vcpu);
+ 
+ 	/*
+-	 * Except for the MMU, which needs to be reset after any vendor
+-	 * specific adjustments to the reserved GPA bits.
++	 * Except for the MMU, which needs to do its thing any vendor specific
++	 * adjustments to the reserved GPA bits.
+ 	 */
+-	kvm_mmu_reset_context(vcpu);
++	kvm_mmu_after_set_cpuid(vcpu);
+ }
+ 
+ static int is_efer_nx(void)
+diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
+index f00830e5202fe..fdd1eca717fd6 100644
+--- a/arch/x86/kvm/hyperv.c
++++ b/arch/x86/kvm/hyperv.c
+@@ -1704,7 +1704,7 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, u64 ingpa, u16 rep_cnt, bool
+ 	 * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't
+ 	 * analyze it here, flush TLB regardless of the specified address space.
+ 	 */
+-	kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH,
++	kvm_make_vcpus_request_mask(kvm, KVM_REQ_TLB_FLUSH_GUEST,
+ 				    NULL, vcpu_mask, &hv_vcpu->tlb_flush);
+ 
+ ret_success:
+diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
+index fb2231cf19b5d..7ffeeb6880d93 100644
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -4155,7 +4155,15 @@ static inline u64 reserved_hpa_bits(void)
+ void
+ reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
+ {
+-	bool uses_nx = context->nx ||
++	/*
++	 * KVM uses NX when TDP is disabled to handle a variety of scenarios,
++	 * notably for huge SPTEs if iTLB multi-hit mitigation is enabled and
++	 * to generate correct permissions for CR0.WP=0/CR4.SMEP=1/EFER.NX=0.
++	 * The iTLB multi-hit workaround can be toggled at any time, so assume
++	 * NX can be used by any non-nested shadow MMU to avoid having to reset
++	 * MMU contexts.  Note, KVM forces EFER.NX=1 when TDP is disabled.
++	 */
++	bool uses_nx = context->nx || !tdp_enabled ||
+ 		context->mmu_role.base.smep_andnot_wp;
+ 	struct rsvd_bits_validate *shadow_zero_check;
+ 	int i;
+@@ -4838,6 +4846,18 @@ kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu)
+ 	return role.base;
+ }
+ 
++void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
++{
++	/*
++	 * Invalidate all MMU roles to force them to reinitialize as CPUID
++	 * information is factored into reserved bit calculations.
++	 */
++	vcpu->arch.root_mmu.mmu_role.ext.valid = 0;
++	vcpu->arch.guest_mmu.mmu_role.ext.valid = 0;
++	vcpu->arch.nested_mmu.mmu_role.ext.valid = 0;
++	kvm_mmu_reset_context(vcpu);
++}
++
+ void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
+ {
+ 	kvm_mmu_unload(vcpu);
+diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
+index a2becf9c2553e..de6407610b19a 100644
+--- a/arch/x86/kvm/mmu/paging_tmpl.h
++++ b/arch/x86/kvm/mmu/paging_tmpl.h
+@@ -471,8 +471,7 @@ retry_walk:
+ 
+ error:
+ 	errcode |= write_fault | user_fault;
+-	if (fetch_fault && (mmu->nx ||
+-			    kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)))
++	if (fetch_fault && (mmu->nx || mmu->mmu_role.ext.cr4_smep))
+ 		errcode |= PFERR_FETCH_MASK;
+ 
+ 	walker->fault.vector = PF_VECTOR;
+diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
+index 018d82e73e311..5c83b912becc1 100644
+--- a/arch/x86/kvm/mmu/tdp_mmu.c
++++ b/arch/x86/kvm/mmu/tdp_mmu.c
+@@ -745,7 +745,7 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write,
+ 					  kvm_pfn_t pfn, bool prefault)
+ {
+ 	u64 new_spte;
+-	int ret = 0;
++	int ret = RET_PF_FIXED;
+ 	int make_spte_ret = 0;
+ 
+ 	if (unlikely(is_noslot_pfn(pfn)))
+@@ -777,13 +777,16 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write,
+ 		trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn,
+ 				     new_spte);
+ 		ret = RET_PF_EMULATE;
+-	} else
++	} else {
+ 		trace_kvm_mmu_set_spte(iter->level, iter->gfn,
+ 				       rcu_dereference(iter->sptep));
++	}
+ 
+-	trace_kvm_mmu_set_spte(iter->level, iter->gfn,
+-			       rcu_dereference(iter->sptep));
+-	if (!prefault)
++	/*
++	 * Increase pf_fixed in both RET_PF_EMULATE and RET_PF_FIXED to be
++	 * consistent with legacy MMU behavior.
++	 */
++	if (ret != RET_PF_SPURIOUS)
+ 		vcpu->stat.pf_fixed++;
+ 
+ 	return ret;
+diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
+index 4ba2a43e188b5..618dcf11d6885 100644
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -1132,12 +1132,19 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool ne
+ 
+ 	/*
+ 	 * Unconditionally skip the TLB flush on fast CR3 switch, all TLB
+-	 * flushes are handled by nested_vmx_transition_tlb_flush().  See
+-	 * nested_vmx_transition_mmu_sync for details on skipping the MMU sync.
++	 * flushes are handled by nested_vmx_transition_tlb_flush().
+ 	 */
+-	if (!nested_ept)
+-		kvm_mmu_new_pgd(vcpu, cr3, true,
+-				!nested_vmx_transition_mmu_sync(vcpu));
++	if (!nested_ept) {
++		kvm_mmu_new_pgd(vcpu, cr3, true, true);
++
++		/*
++		 * A TLB flush on VM-Enter/VM-Exit flushes all linear mappings
++		 * across all PCIDs, i.e. all PGDs need to be synchronized.
++		 * See nested_vmx_transition_mmu_sync() for more details.
++		 */
++		if (nested_vmx_transition_mmu_sync(vcpu))
++			kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
++	}
+ 
+ 	vcpu->arch.cr3 = cr3;
+ 	kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
+@@ -5465,8 +5472,6 @@ static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
+ {
+ 	u32 index = kvm_rcx_read(vcpu);
+ 	u64 new_eptp;
+-	bool accessed_dirty;
+-	struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
+ 
+ 	if (!nested_cpu_has_eptp_switching(vmcs12) ||
+ 	    !nested_cpu_has_ept(vmcs12))
+@@ -5475,13 +5480,10 @@ static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
+ 	if (index >= VMFUNC_EPTP_ENTRIES)
+ 		return 1;
+ 
+-
+ 	if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT,
+ 				     &new_eptp, index * 8, 8))
+ 		return 1;
+ 
+-	accessed_dirty = !!(new_eptp & VMX_EPTP_AD_ENABLE_BIT);
+-
+ 	/*
+ 	 * If the (L2) guest does a vmfunc to the currently
+ 	 * active ept pointer, we don't have to do anything else
+@@ -5490,8 +5492,6 @@ static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
+ 		if (!nested_vmx_check_eptp(vcpu, new_eptp))
+ 			return 1;
+ 
+-		mmu->ept_ad = accessed_dirty;
+-		mmu->mmu_role.base.ad_disabled = !accessed_dirty;
+ 		vmcs12->ept_pointer = new_eptp;
+ 
+ 		kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
+@@ -5517,7 +5517,7 @@ static int handle_vmfunc(struct kvm_vcpu *vcpu)
+ 	}
+ 
+ 	vmcs12 = get_vmcs12(vcpu);
+-	if ((vmcs12->vm_function_control & (1 << function)) == 0)
++	if (!(vmcs12->vm_function_control & BIT_ULL(function)))
+ 		goto fail;
+ 
+ 	switch (function) {
+@@ -5775,6 +5775,9 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
+ 		else if (is_breakpoint(intr_info) &&
+ 			 vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
+ 			return true;
++		else if (is_alignment_check(intr_info) &&
++			 !vmx_guest_inject_ac(vcpu))
++			return true;
+ 		return false;
+ 	case EXIT_REASON_EXTERNAL_INTERRUPT:
+ 		return true;
+diff --git a/arch/x86/kvm/vmx/vmcs.h b/arch/x86/kvm/vmx/vmcs.h
+index 1472c6c376f74..571d9ad80a59e 100644
+--- a/arch/x86/kvm/vmx/vmcs.h
++++ b/arch/x86/kvm/vmx/vmcs.h
+@@ -117,6 +117,11 @@ static inline bool is_gp_fault(u32 intr_info)
+ 	return is_exception_n(intr_info, GP_VECTOR);
+ }
+ 
++static inline bool is_alignment_check(u32 intr_info)
++{
++	return is_exception_n(intr_info, AC_VECTOR);
++}
++
+ static inline bool is_machine_check(u32 intr_info)
+ {
+ 	return is_exception_n(intr_info, MC_VECTOR);
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index ae63d59be38c7..cd2ca9093e8dc 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -4796,7 +4796,7 @@ static int handle_machine_check(struct kvm_vcpu *vcpu)
+  *  - Guest has #AC detection enabled in CR0
+  *  - Guest EFLAGS has AC bit set
+  */
+-static inline bool guest_inject_ac(struct kvm_vcpu *vcpu)
++bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu)
+ {
+ 	if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
+ 		return true;
+@@ -4905,7 +4905,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
+ 		kvm_run->debug.arch.exception = ex_no;
+ 		break;
+ 	case AC_VECTOR:
+-		if (guest_inject_ac(vcpu)) {
++		if (vmx_guest_inject_ac(vcpu)) {
+ 			kvm_queue_exception_e(vcpu, AC_VECTOR, error_code);
+ 			return 1;
+ 		}
+diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
+index 89da5e1251f18..723782cd0511e 100644
+--- a/arch/x86/kvm/vmx/vmx.h
++++ b/arch/x86/kvm/vmx/vmx.h
+@@ -379,6 +379,7 @@ void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
+ u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa,
+ 		   int root_level);
+ 
++bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu);
+ void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu);
+ void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
+ bool vmx_nmi_blocked(struct kvm_vcpu *vcpu);
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index a6ca7e657af27..615dd236e8429 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -9022,7 +9022,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
+ 		}
+ 		if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
+ 			kvm_vcpu_flush_tlb_current(vcpu);
+-		if (kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu))
++		if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu))
+ 			kvm_vcpu_flush_tlb_guest(vcpu);
+ 
+ 		if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
+@@ -10302,6 +10302,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
+ 
+ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
+ {
++	unsigned long old_cr0 = kvm_read_cr0(vcpu);
++
+ 	kvm_lapic_reset(vcpu, init_event);
+ 
+ 	vcpu->arch.hflags = 0;
+@@ -10370,6 +10372,17 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
+ 	vcpu->arch.ia32_xss = 0;
+ 
+ 	static_call(kvm_x86_vcpu_reset)(vcpu, init_event);
++
++	/*
++	 * Reset the MMU context if paging was enabled prior to INIT (which is
++	 * implied if CR0.PG=1 as CR0 will be '0' prior to RESET).  Unlike the
++	 * standard CR0/CR4/EFER modification paths, only CR0.PG needs to be
++	 * checked because it is unconditionally cleared on INIT and all other
++	 * paging related bits are ignored if paging is disabled, i.e. CR0.WP,
++	 * CR4, and EFER changes are all irrelevant if CR0.PG was '0'.
++	 */
++	if (old_cr0 & X86_CR0_PG)
++		kvm_mmu_reset_context(vcpu);
+ }
+ 
+ void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
+diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
+index 7f1b3a862e141..1fb0c37e48cb1 100644
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -1297,7 +1297,7 @@ st:			if (is_imm8(insn->off))
+ 			emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
+ 			if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
+ 				struct exception_table_entry *ex;
+-				u8 *_insn = image + proglen;
++				u8 *_insn = image + proglen + (start_of_ldx - temp);
+ 				s64 delta;
+ 
+ 				/* populate jmp_offset for JMP above */
+diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c
+index cd85a7a2722ba..1254da07ead1f 100644
+--- a/arch/xtensa/kernel/smp.c
++++ b/arch/xtensa/kernel/smp.c
+@@ -145,7 +145,6 @@ void secondary_start_kernel(void)
+ 	cpumask_set_cpu(cpu, mm_cpumask(mm));
+ 	enter_lazy_tlb(mm, current);
+ 
+-	preempt_disable();
+ 	trace_hardirqs_off();
+ 
+ 	calibrate_delay();
+diff --git a/block/bio.c b/block/bio.c
+index 50e579088aca4..b00c5a88a7437 100644
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -1412,8 +1412,7 @@ static inline bool bio_remaining_done(struct bio *bio)
+  *
+  *   bio_endio() can be called several times on a bio that has been chained
+  *   using bio_chain().  The ->bi_end_io() function will only be called the
+- *   last time.  At this point the BLK_TA_COMPLETE tracing event will be
+- *   generated if BIO_TRACE_COMPLETION is set.
++ *   last time.
+  **/
+ void bio_endio(struct bio *bio)
+ {
+@@ -1426,6 +1425,11 @@ again:
+ 	if (bio->bi_bdev)
+ 		rq_qos_done_bio(bio->bi_bdev->bd_disk->queue, bio);
+ 
++	if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
++		trace_block_bio_complete(bio->bi_bdev->bd_disk->queue, bio);
++		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
++	}
++
+ 	/*
+ 	 * Need to have a real endio function for chained bios, otherwise
+ 	 * various corner cases will break (like stacking block devices that
+@@ -1439,11 +1443,6 @@ again:
+ 		goto again;
+ 	}
+ 
+-	if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
+-		trace_block_bio_complete(bio->bi_bdev->bd_disk->queue, bio);
+-		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
+-	}
+-
+ 	blk_throtl_bio_endio(bio);
+ 	/* release cgroup info */
+ 	bio_uninit(bio);
+diff --git a/block/blk-flush.c b/block/blk-flush.c
+index 7942ca6ed3211..1002f6c581816 100644
+--- a/block/blk-flush.c
++++ b/block/blk-flush.c
+@@ -219,8 +219,6 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
+ 	unsigned long flags = 0;
+ 	struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
+ 
+-	blk_account_io_flush(flush_rq);
+-
+ 	/* release the tag's ownership to the req cloned from */
+ 	spin_lock_irqsave(&fq->mq_flush_lock, flags);
+ 
+@@ -230,6 +228,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
+ 		return;
+ 	}
+ 
++	blk_account_io_flush(flush_rq);
+ 	/*
+ 	 * Flush request has to be marked as IDLE when it is really ended
+ 	 * because its .end_io() is called from timeout code path too for
+diff --git a/block/blk-merge.c b/block/blk-merge.c
+index 4d97fb6dd2267..bcdff1879c346 100644
+--- a/block/blk-merge.c
++++ b/block/blk-merge.c
+@@ -559,10 +559,14 @@ static inline unsigned int blk_rq_get_max_segments(struct request *rq)
+ static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
+ 		unsigned int nr_phys_segs)
+ {
+-	if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req))
++	if (blk_integrity_merge_bio(req->q, req, bio) == false)
+ 		goto no_merge;
+ 
+-	if (blk_integrity_merge_bio(req->q, req, bio) == false)
++	/* discard request merge won't add new segment */
++	if (req_op(req) == REQ_OP_DISCARD)
++		return 1;
++
++	if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req))
+ 		goto no_merge;
+ 
+ 	/*
+diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
+index 9c92053e704dc..c4f2f6c123aed 100644
+--- a/block/blk-mq-tag.c
++++ b/block/blk-mq-tag.c
+@@ -199,6 +199,20 @@ struct bt_iter_data {
+ 	bool reserved;
+ };
+ 
++static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
++		unsigned int bitnr)
++{
++	struct request *rq;
++	unsigned long flags;
++
++	spin_lock_irqsave(&tags->lock, flags);
++	rq = tags->rqs[bitnr];
++	if (!rq || !refcount_inc_not_zero(&rq->ref))
++		rq = NULL;
++	spin_unlock_irqrestore(&tags->lock, flags);
++	return rq;
++}
++
+ static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
+ {
+ 	struct bt_iter_data *iter_data = data;
+@@ -206,18 +220,22 @@ static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
+ 	struct blk_mq_tags *tags = hctx->tags;
+ 	bool reserved = iter_data->reserved;
+ 	struct request *rq;
++	bool ret = true;
+ 
+ 	if (!reserved)
+ 		bitnr += tags->nr_reserved_tags;
+-	rq = tags->rqs[bitnr];
+-
+ 	/*
+ 	 * We can hit rq == NULL here, because the tagging functions
+ 	 * test and set the bit before assigning ->rqs[].
+ 	 */
+-	if (rq && rq->q == hctx->queue && rq->mq_hctx == hctx)
+-		return iter_data->fn(hctx, rq, iter_data->data, reserved);
+-	return true;
++	rq = blk_mq_find_and_get_req(tags, bitnr);
++	if (!rq)
++		return true;
++
++	if (rq->q == hctx->queue && rq->mq_hctx == hctx)
++		ret = iter_data->fn(hctx, rq, iter_data->data, reserved);
++	blk_mq_put_rq_ref(rq);
++	return ret;
+ }
+ 
+ /**
+@@ -264,6 +282,8 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
+ 	struct blk_mq_tags *tags = iter_data->tags;
+ 	bool reserved = iter_data->flags & BT_TAG_ITER_RESERVED;
+ 	struct request *rq;
++	bool ret = true;
++	bool iter_static_rqs = !!(iter_data->flags & BT_TAG_ITER_STATIC_RQS);
+ 
+ 	if (!reserved)
+ 		bitnr += tags->nr_reserved_tags;
+@@ -272,16 +292,19 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
+ 	 * We can hit rq == NULL here, because the tagging functions
+ 	 * test and set the bit before assigning ->rqs[].
+ 	 */
+-	if (iter_data->flags & BT_TAG_ITER_STATIC_RQS)
++	if (iter_static_rqs)
+ 		rq = tags->static_rqs[bitnr];
+ 	else
+-		rq = tags->rqs[bitnr];
++		rq = blk_mq_find_and_get_req(tags, bitnr);
+ 	if (!rq)
+ 		return true;
+-	if ((iter_data->flags & BT_TAG_ITER_STARTED) &&
+-	    !blk_mq_request_started(rq))
+-		return true;
+-	return iter_data->fn(rq, iter_data->data, reserved);
++
++	if (!(iter_data->flags & BT_TAG_ITER_STARTED) ||
++	    blk_mq_request_started(rq))
++		ret = iter_data->fn(rq, iter_data->data, reserved);
++	if (!iter_static_rqs)
++		blk_mq_put_rq_ref(rq);
++	return ret;
+ }
+ 
+ /**
+@@ -348,6 +371,9 @@ void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
+  *		indicates whether or not @rq is a reserved request. Return
+  *		true to continue iterating tags, false to stop.
+  * @priv:	Will be passed as second argument to @fn.
++ *
++ * We grab one request reference before calling @fn and release it after
++ * @fn returns.
+  */
+ void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
+ 		busy_tag_iter_fn *fn, void *priv)
+@@ -516,6 +542,7 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
+ 
+ 	tags->nr_tags = total_tags;
+ 	tags->nr_reserved_tags = reserved_tags;
++	spin_lock_init(&tags->lock);
+ 
+ 	if (flags & BLK_MQ_F_TAG_HCTX_SHARED)
+ 		return tags;
+diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
+index 7d3e6b333a4a9..f887988e5ef60 100644
+--- a/block/blk-mq-tag.h
++++ b/block/blk-mq-tag.h
+@@ -20,6 +20,12 @@ struct blk_mq_tags {
+ 	struct request **rqs;
+ 	struct request **static_rqs;
+ 	struct list_head page_list;
++
++	/*
++	 * used to clear request reference in rqs[] before freeing one
++	 * request pool
++	 */
++	spinlock_t lock;
+ };
+ 
+ extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags,
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 0e120547ccb72..6a982a277176c 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -908,6 +908,14 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
+ 	return false;
+ }
+ 
++void blk_mq_put_rq_ref(struct request *rq)
++{
++	if (is_flush_rq(rq, rq->mq_hctx))
++		rq->end_io(rq, 0);
++	else if (refcount_dec_and_test(&rq->ref))
++		__blk_mq_free_request(rq);
++}
++
+ static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
+ 		struct request *rq, void *priv, bool reserved)
+ {
+@@ -941,11 +949,7 @@ static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
+ 	if (blk_mq_req_expired(rq, next))
+ 		blk_mq_rq_timed_out(rq, reserved);
+ 
+-	if (is_flush_rq(rq, hctx))
+-		rq->end_io(rq, 0);
+-	else if (refcount_dec_and_test(&rq->ref))
+-		__blk_mq_free_request(rq);
+-
++	blk_mq_put_rq_ref(rq);
+ 	return true;
+ }
+ 
+@@ -1219,9 +1223,6 @@ static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy)
+ {
+ 	unsigned int ewma;
+ 
+-	if (hctx->queue->elevator)
+-		return;
+-
+ 	ewma = hctx->dispatch_busy;
+ 
+ 	if (!ewma && !busy)
+@@ -2287,6 +2288,45 @@ queue_exit:
+ 	return BLK_QC_T_NONE;
+ }
+ 
++static size_t order_to_size(unsigned int order)
++{
++	return (size_t)PAGE_SIZE << order;
++}
++
++/* called before freeing request pool in @tags */
++static void blk_mq_clear_rq_mapping(struct blk_mq_tag_set *set,
++		struct blk_mq_tags *tags, unsigned int hctx_idx)
++{
++	struct blk_mq_tags *drv_tags = set->tags[hctx_idx];
++	struct page *page;
++	unsigned long flags;
++
++	list_for_each_entry(page, &tags->page_list, lru) {
++		unsigned long start = (unsigned long)page_address(page);
++		unsigned long end = start + order_to_size(page->private);
++		int i;
++
++		for (i = 0; i < set->queue_depth; i++) {
++			struct request *rq = drv_tags->rqs[i];
++			unsigned long rq_addr = (unsigned long)rq;
++
++			if (rq_addr >= start && rq_addr < end) {
++				WARN_ON_ONCE(refcount_read(&rq->ref) != 0);
++				cmpxchg(&drv_tags->rqs[i], rq, NULL);
++			}
++		}
++	}
++
++	/*
++	 * Wait until all pending iteration is done.
++	 *
++	 * Request reference is cleared and it is guaranteed to be observed
++	 * after the ->lock is released.
++	 */
++	spin_lock_irqsave(&drv_tags->lock, flags);
++	spin_unlock_irqrestore(&drv_tags->lock, flags);
++}
++
+ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
+ 		     unsigned int hctx_idx)
+ {
+@@ -2305,6 +2345,8 @@ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
+ 		}
+ 	}
+ 
++	blk_mq_clear_rq_mapping(set, tags, hctx_idx);
++
+ 	while (!list_empty(&tags->page_list)) {
+ 		page = list_first_entry(&tags->page_list, struct page, lru);
+ 		list_del_init(&page->lru);
+@@ -2364,11 +2406,6 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
+ 	return tags;
+ }
+ 
+-static size_t order_to_size(unsigned int order)
+-{
+-	return (size_t)PAGE_SIZE << order;
+-}
+-
+ static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
+ 			       unsigned int hctx_idx, int node)
+ {
+diff --git a/block/blk-mq.h b/block/blk-mq.h
+index 3616453ca28c8..143afe42c63a0 100644
+--- a/block/blk-mq.h
++++ b/block/blk-mq.h
+@@ -47,6 +47,7 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
+ void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
+ struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
+ 					struct blk_mq_ctx *start);
++void blk_mq_put_rq_ref(struct request *rq);
+ 
+ /*
+  * Internal helpers for allocating/freeing the request map
+diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h
+index 2bc43e94f4c40..2bcb3495e376b 100644
+--- a/block/blk-rq-qos.h
++++ b/block/blk-rq-qos.h
+@@ -7,6 +7,7 @@
+ #include <linux/blk_types.h>
+ #include <linux/atomic.h>
+ #include <linux/wait.h>
++#include <linux/blk-mq.h>
+ 
+ #include "blk-mq-debugfs.h"
+ 
+@@ -99,8 +100,21 @@ static inline void rq_wait_init(struct rq_wait *rq_wait)
+ 
+ static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
+ {
++	/*
++	 * No IO can be in-flight when adding rqos, so freeze queue, which
++	 * is fine since we only support rq_qos for blk-mq queue.
++	 *
++	 * Reuse ->queue_lock for protecting against other concurrent
++	 * rq_qos adding/deleting
++	 */
++	blk_mq_freeze_queue(q);
++
++	spin_lock_irq(&q->queue_lock);
+ 	rqos->next = q->rq_qos;
+ 	q->rq_qos = rqos;
++	spin_unlock_irq(&q->queue_lock);
++
++	blk_mq_unfreeze_queue(q);
+ 
+ 	if (rqos->ops->debugfs_attrs)
+ 		blk_mq_debugfs_register_rqos(rqos);
+@@ -110,12 +124,22 @@ static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
+ {
+ 	struct rq_qos **cur;
+ 
++	/*
++	 * See comment in rq_qos_add() about freezing queue & using
++	 * ->queue_lock.
++	 */
++	blk_mq_freeze_queue(q);
++
++	spin_lock_irq(&q->queue_lock);
+ 	for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
+ 		if (*cur == rqos) {
+ 			*cur = rqos->next;
+ 			break;
+ 		}
+ 	}
++	spin_unlock_irq(&q->queue_lock);
++
++	blk_mq_unfreeze_queue(q);
+ 
+ 	blk_mq_debugfs_unregister_rqos(rqos);
+ }
+diff --git a/block/blk-wbt.c b/block/blk-wbt.c
+index 42aed0160f86a..f5e5ac915bf7c 100644
+--- a/block/blk-wbt.c
++++ b/block/blk-wbt.c
+@@ -77,7 +77,8 @@ enum {
+ 
+ static inline bool rwb_enabled(struct rq_wb *rwb)
+ {
+-	return rwb && rwb->wb_normal != 0;
++	return rwb && rwb->enable_state != WBT_STATE_OFF_DEFAULT &&
++		      rwb->wb_normal != 0;
+ }
+ 
+ static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
+@@ -636,9 +637,13 @@ void wbt_set_write_cache(struct request_queue *q, bool write_cache_on)
+ void wbt_enable_default(struct request_queue *q)
+ {
+ 	struct rq_qos *rqos = wbt_rq_qos(q);
++
+ 	/* Throttling already enabled? */
+-	if (rqos)
++	if (rqos) {
++		if (RQWB(rqos)->enable_state == WBT_STATE_OFF_DEFAULT)
++			RQWB(rqos)->enable_state = WBT_STATE_ON_DEFAULT;
+ 		return;
++	}
+ 
+ 	/* Queue not registered? Maybe shutting down... */
+ 	if (!blk_queue_registered(q))
+@@ -702,7 +707,7 @@ void wbt_disable_default(struct request_queue *q)
+ 	rwb = RQWB(rqos);
+ 	if (rwb->enable_state == WBT_STATE_ON_DEFAULT) {
+ 		blk_stat_deactivate(rwb->cb);
+-		rwb->wb_normal = 0;
++		rwb->enable_state = WBT_STATE_OFF_DEFAULT;
+ 	}
+ }
+ EXPORT_SYMBOL_GPL(wbt_disable_default);
+diff --git a/block/blk-wbt.h b/block/blk-wbt.h
+index 16bdc85b8df92..2eb01becde8c4 100644
+--- a/block/blk-wbt.h
++++ b/block/blk-wbt.h
+@@ -34,6 +34,7 @@ enum {
+ enum {
+ 	WBT_STATE_ON_DEFAULT	= 1,
+ 	WBT_STATE_ON_MANUAL	= 2,
++	WBT_STATE_OFF_DEFAULT
+ };
+ 
+ struct rq_wb {
+diff --git a/crypto/shash.c b/crypto/shash.c
+index 2e3433ad97629..0a0a50cb694f0 100644
+--- a/crypto/shash.c
++++ b/crypto/shash.c
+@@ -20,12 +20,24 @@
+ 
+ static const struct crypto_type crypto_shash_type;
+ 
+-int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
+-		    unsigned int keylen)
++static int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
++			   unsigned int keylen)
+ {
+ 	return -ENOSYS;
+ }
+-EXPORT_SYMBOL_GPL(shash_no_setkey);
++
++/*
++ * Check whether an shash algorithm has a setkey function.
++ *
++ * For CFI compatibility, this must not be an inline function.  This is because
++ * when CFI is enabled, modules won't get the same address for shash_no_setkey
++ * (if it were exported, which inlining would require) as the core kernel will.
++ */
++bool crypto_shash_alg_has_setkey(struct shash_alg *alg)
++{
++	return alg->setkey != shash_no_setkey;
++}
++EXPORT_SYMBOL_GPL(crypto_shash_alg_has_setkey);
+ 
+ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
+ 				  unsigned int keylen)
+diff --git a/crypto/sm2.c b/crypto/sm2.c
+index b21addc3ac06a..db8a4a265669d 100644
+--- a/crypto/sm2.c
++++ b/crypto/sm2.c
+@@ -79,10 +79,17 @@ static int sm2_ec_ctx_init(struct mpi_ec_ctx *ec)
+ 		goto free;
+ 
+ 	rc = -ENOMEM;
++
++	ec->Q = mpi_point_new(0);
++	if (!ec->Q)
++		goto free;
++
+ 	/* mpi_ec_setup_elliptic_curve */
+ 	ec->G = mpi_point_new(0);
+-	if (!ec->G)
++	if (!ec->G) {
++		mpi_point_release(ec->Q);
+ 		goto free;
++	}
+ 
+ 	mpi_set(ec->G->x, x);
+ 	mpi_set(ec->G->y, y);
+@@ -91,6 +98,7 @@ static int sm2_ec_ctx_init(struct mpi_ec_ctx *ec)
+ 	rc = -EINVAL;
+ 	ec->n = mpi_scanval(ecp->n);
+ 	if (!ec->n) {
++		mpi_point_release(ec->Q);
+ 		mpi_point_release(ec->G);
+ 		goto free;
+ 	}
+@@ -386,27 +394,15 @@ static int sm2_set_pub_key(struct crypto_akcipher *tfm,
+ 	MPI a;
+ 	int rc;
+ 
+-	ec->Q = mpi_point_new(0);
+-	if (!ec->Q)
+-		return -ENOMEM;
+-
+ 	/* include the uncompressed flag '0x04' */
+-	rc = -ENOMEM;
+ 	a = mpi_read_raw_data(key, keylen);
+ 	if (!a)
+-		goto error;
++		return -ENOMEM;
+ 
+ 	mpi_normalize(a);
+ 	rc = sm2_ecc_os2ec(ec->Q, a);
+ 	mpi_free(a);
+-	if (rc)
+-		goto error;
+-
+-	return 0;
+ 
+-error:
+-	mpi_point_release(ec->Q);
+-	ec->Q = NULL;
+ 	return rc;
+ }
+ 
+diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
+index 700b41adf2db6..9aa82d5272720 100644
+--- a/drivers/acpi/Makefile
++++ b/drivers/acpi/Makefile
+@@ -8,6 +8,11 @@ ccflags-$(CONFIG_ACPI_DEBUG)	+= -DACPI_DEBUG_OUTPUT
+ #
+ # ACPI Boot-Time Table Parsing
+ #
++ifeq ($(CONFIG_ACPI_CUSTOM_DSDT),y)
++tables.o: $(src)/../../include/$(subst $\",,$(CONFIG_ACPI_CUSTOM_DSDT_FILE)) ;
++
++endif
++
+ obj-$(CONFIG_ACPI)		+= tables.o
+ obj-$(CONFIG_X86)		+= blacklist.o
+ 
+diff --git a/drivers/acpi/acpi_fpdt.c b/drivers/acpi/acpi_fpdt.c
+index a89a806a7a2a9..4ee2ad234e3d6 100644
+--- a/drivers/acpi/acpi_fpdt.c
++++ b/drivers/acpi/acpi_fpdt.c
+@@ -240,8 +240,10 @@ static int __init acpi_init_fpdt(void)
+ 		return 0;
+ 
+ 	fpdt_kobj = kobject_create_and_add("fpdt", acpi_kobj);
+-	if (!fpdt_kobj)
++	if (!fpdt_kobj) {
++		acpi_put_table(header);
+ 		return -ENOMEM;
++	}
+ 
+ 	while (offset < header->length) {
+ 		subtable = (void *)header + offset;
+diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
+index 14b71b41e8453..38e10ab976e67 100644
+--- a/drivers/acpi/acpica/nsrepair2.c
++++ b/drivers/acpi/acpica/nsrepair2.c
+@@ -379,6 +379,13 @@ acpi_ns_repair_CID(struct acpi_evaluate_info *info,
+ 
+ 			(*element_ptr)->common.reference_count =
+ 			    original_ref_count;
++
++			/*
++			 * The original_element holds a reference from the package object
++			 * that represents _HID. Since a new element was created by _HID,
++			 * remove the reference from the _CID package.
++			 */
++			acpi_ut_remove_reference(original_element);
+ 		}
+ 
+ 		element_ptr++;
+diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
+index fce7ade2aba92..0c8330ed1ffd5 100644
+--- a/drivers/acpi/apei/ghes.c
++++ b/drivers/acpi/apei/ghes.c
+@@ -441,28 +441,35 @@ static void ghes_kick_task_work(struct callback_head *head)
+ 	gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node, node_len);
+ }
+ 
+-static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata,
+-				       int sev)
++static bool ghes_do_memory_failure(u64 physical_addr, int flags)
+ {
+ 	unsigned long pfn;
+-	int flags = -1;
+-	int sec_sev = ghes_severity(gdata->error_severity);
+-	struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata);
+ 
+ 	if (!IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE))
+ 		return false;
+ 
+-	if (!(mem_err->validation_bits & CPER_MEM_VALID_PA))
+-		return false;
+-
+-	pfn = mem_err->physical_addr >> PAGE_SHIFT;
++	pfn = PHYS_PFN(physical_addr);
+ 	if (!pfn_valid(pfn)) {
+ 		pr_warn_ratelimited(FW_WARN GHES_PFX
+ 		"Invalid address in generic error data: %#llx\n",
+-		mem_err->physical_addr);
++		physical_addr);
+ 		return false;
+ 	}
+ 
++	memory_failure_queue(pfn, flags);
++	return true;
++}
++
++static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata,
++				       int sev)
++{
++	int flags = -1;
++	int sec_sev = ghes_severity(gdata->error_severity);
++	struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata);
++
++	if (!(mem_err->validation_bits & CPER_MEM_VALID_PA))
++		return false;
++
+ 	/* iff following two events can be handled properly by now */
+ 	if (sec_sev == GHES_SEV_CORRECTED &&
+ 	    (gdata->flags & CPER_SEC_ERROR_THRESHOLD_EXCEEDED))
+@@ -470,14 +477,56 @@ static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata,
+ 	if (sev == GHES_SEV_RECOVERABLE && sec_sev == GHES_SEV_RECOVERABLE)
+ 		flags = 0;
+ 
+-	if (flags != -1) {
+-		memory_failure_queue(pfn, flags);
+-		return true;
+-	}
++	if (flags != -1)
++		return ghes_do_memory_failure(mem_err->physical_addr, flags);
+ 
+ 	return false;
+ }
+ 
++static bool ghes_handle_arm_hw_error(struct acpi_hest_generic_data *gdata, int sev)
++{
++	struct cper_sec_proc_arm *err = acpi_hest_get_payload(gdata);
++	bool queued = false;
++	int sec_sev, i;
++	char *p;
++
++	log_arm_hw_error(err);
++
++	sec_sev = ghes_severity(gdata->error_severity);
++	if (sev != GHES_SEV_RECOVERABLE || sec_sev != GHES_SEV_RECOVERABLE)
++		return false;
++
++	p = (char *)(err + 1);
++	for (i = 0; i < err->err_info_num; i++) {
++		struct cper_arm_err_info *err_info = (struct cper_arm_err_info *)p;
++		bool is_cache = (err_info->type == CPER_ARM_CACHE_ERROR);
++		bool has_pa = (err_info->validation_bits & CPER_ARM_INFO_VALID_PHYSICAL_ADDR);
++		const char *error_type = "unknown error";
++
++		/*
++		 * The field (err_info->error_info & BIT(26)) is fixed to set to
++		 * 1 in some old firmware of HiSilicon Kunpeng920. We assume that
++		 * firmware won't mix corrected errors in an uncorrected section,
++		 * and don't filter out 'corrected' error here.
++		 */
++		if (is_cache && has_pa) {
++			queued = ghes_do_memory_failure(err_info->physical_fault_addr, 0);
++			p += err_info->length;
++			continue;
++		}
++
++		if (err_info->type < ARRAY_SIZE(cper_proc_error_type_strs))
++			error_type = cper_proc_error_type_strs[err_info->type];
++
++		pr_warn_ratelimited(FW_WARN GHES_PFX
++				    "Unhandled processor error type: %s\n",
++				    error_type);
++		p += err_info->length;
++	}
++
++	return queued;
++}
++
+ /*
+  * PCIe AER errors need to be sent to the AER driver for reporting and
+  * recovery. The GHES severities map to the following AER severities and
+@@ -605,9 +654,7 @@ static bool ghes_do_proc(struct ghes *ghes,
+ 			ghes_handle_aer(gdata);
+ 		}
+ 		else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
+-			struct cper_sec_proc_arm *err = acpi_hest_get_payload(gdata);
+-
+-			log_arm_hw_error(err);
++			queued = ghes_handle_arm_hw_error(gdata, sev);
+ 		} else {
+ 			void *err = acpi_hest_get_payload(gdata);
+ 
+diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
+index 19bb7f870204c..e0d14017706ea 100644
+--- a/drivers/acpi/bgrt.c
++++ b/drivers/acpi/bgrt.c
+@@ -15,40 +15,19 @@
+ static void *bgrt_image;
+ static struct kobject *bgrt_kobj;
+ 
+-static ssize_t version_show(struct device *dev,
+-			    struct device_attribute *attr, char *buf)
+-{
+-	return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.version);
+-}
+-static DEVICE_ATTR_RO(version);
+-
+-static ssize_t status_show(struct device *dev,
+-			   struct device_attribute *attr, char *buf)
+-{
+-	return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.status);
+-}
+-static DEVICE_ATTR_RO(status);
+-
+-static ssize_t type_show(struct device *dev,
+-			 struct device_attribute *attr, char *buf)
+-{
+-	return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.image_type);
+-}
+-static DEVICE_ATTR_RO(type);
+-
+-static ssize_t xoffset_show(struct device *dev,
+-			    struct device_attribute *attr, char *buf)
+-{
+-	return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.image_offset_x);
+-}
+-static DEVICE_ATTR_RO(xoffset);
+-
+-static ssize_t yoffset_show(struct device *dev,
+-			    struct device_attribute *attr, char *buf)
+-{
+-	return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.image_offset_y);
+-}
+-static DEVICE_ATTR_RO(yoffset);
++#define BGRT_SHOW(_name, _member) \
++	static ssize_t _name##_show(struct kobject *kobj,			\
++				    struct kobj_attribute *attr, char *buf)	\
++	{									\
++		return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab._member);	\
++	}									\
++	struct kobj_attribute bgrt_attr_##_name = __ATTR_RO(_name)
++
++BGRT_SHOW(version, version);
++BGRT_SHOW(status, status);
++BGRT_SHOW(type, image_type);
++BGRT_SHOW(xoffset, image_offset_x);
++BGRT_SHOW(yoffset, image_offset_y);
+ 
+ static ssize_t image_read(struct file *file, struct kobject *kobj,
+ 	       struct bin_attribute *attr, char *buf, loff_t off, size_t count)
+@@ -60,11 +39,11 @@ static ssize_t image_read(struct file *file, struct kobject *kobj,
+ static BIN_ATTR_RO(image, 0);	/* size gets filled in later */
+ 
+ static struct attribute *bgrt_attributes[] = {
+-	&dev_attr_version.attr,
+-	&dev_attr_status.attr,
+-	&dev_attr_type.attr,
+-	&dev_attr_xoffset.attr,
+-	&dev_attr_yoffset.attr,
++	&bgrt_attr_version.attr,
++	&bgrt_attr_status.attr,
++	&bgrt_attr_type.attr,
++	&bgrt_attr_xoffset.attr,
++	&bgrt_attr_yoffset.attr,
+ 	NULL,
+ };
+ 
+diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
+index a4bd673934c0a..44b4f02e2c6d7 100644
+--- a/drivers/acpi/bus.c
++++ b/drivers/acpi/bus.c
+@@ -1321,6 +1321,7 @@ static int __init acpi_init(void)
+ 
+ 	result = acpi_bus_init();
+ 	if (result) {
++		kobject_put(acpi_kobj);
+ 		disable_acpi();
+ 		return result;
+ 	}
+diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
+index 58876248b1921..a63dd10d9aa94 100644
+--- a/drivers/acpi/device_pm.c
++++ b/drivers/acpi/device_pm.c
+@@ -20,6 +20,7 @@
+ #include <linux/pm_runtime.h>
+ #include <linux/suspend.h>
+ 
++#include "fan.h"
+ #include "internal.h"
+ 
+ /**
+@@ -1307,10 +1308,7 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
+ 	 * with the generic ACPI PM domain.
+ 	 */
+ 	static const struct acpi_device_id special_pm_ids[] = {
+-		{"PNP0C0B", }, /* Generic ACPI fan */
+-		{"INT3404", }, /* Fan */
+-		{"INTC1044", }, /* Fan for Tiger Lake generation */
+-		{"INTC1048", }, /* Fan for Alder Lake generation */
++		ACPI_FAN_DEVICE_IDS,
+ 		{}
+ 	};
+ 	struct acpi_device *adev = ACPI_COMPANION(dev);
+diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
+index da4ff2a8b06a0..fe8c7e79f4726 100644
+--- a/drivers/acpi/device_sysfs.c
++++ b/drivers/acpi/device_sysfs.c
+@@ -446,7 +446,7 @@ static ssize_t description_show(struct device *dev,
+ 		(wchar_t *)acpi_dev->pnp.str_obj->buffer.pointer,
+ 		acpi_dev->pnp.str_obj->buffer.length,
+ 		UTF16_LITTLE_ENDIAN, buf,
+-		PAGE_SIZE);
++		PAGE_SIZE - 1);
+ 
+ 	buf[result++] = '\n';
+ 
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index 13565629ce0a8..87c3b4a099b94 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -183,6 +183,7 @@ static struct workqueue_struct *ec_query_wq;
+ 
+ static int EC_FLAGS_CORRECT_ECDT; /* Needs ECDT port address correction */
+ static int EC_FLAGS_IGNORE_DSDT_GPE; /* Needs ECDT GPE as correction setting */
++static int EC_FLAGS_TRUST_DSDT_GPE; /* Needs DSDT GPE as correction setting */
+ static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
+ 
+ /* --------------------------------------------------------------------------
+@@ -1593,7 +1594,8 @@ static int acpi_ec_add(struct acpi_device *device)
+ 		}
+ 
+ 		if (boot_ec && ec->command_addr == boot_ec->command_addr &&
+-		    ec->data_addr == boot_ec->data_addr) {
++		    ec->data_addr == boot_ec->data_addr &&
++		    !EC_FLAGS_TRUST_DSDT_GPE) {
+ 			/*
+ 			 * Trust PNP0C09 namespace location rather than
+ 			 * ECDT ID. But trust ECDT GPE rather than _GPE
+@@ -1816,6 +1818,18 @@ static int ec_correct_ecdt(const struct dmi_system_id *id)
+ 	return 0;
+ }
+ 
++/*
++ * Some ECDTs contain wrong GPE setting, but they share the same port addresses
++ * with DSDT EC, don't duplicate the DSDT EC with ECDT EC in this case.
++ * https://bugzilla.kernel.org/show_bug.cgi?id=209989
++ */
++static int ec_honor_dsdt_gpe(const struct dmi_system_id *id)
++{
++	pr_debug("Detected system needing DSDT GPE setting.\n");
++	EC_FLAGS_TRUST_DSDT_GPE = 1;
++	return 0;
++}
++
+ /*
+  * Some DSDTs contain wrong GPE setting.
+  * Asus FX502VD/VE, GL702VMK, X550VXK, X580VD
+@@ -1846,6 +1860,22 @@ static const struct dmi_system_id ec_dmi_table[] __initconst = {
+ 	DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ 	DMI_MATCH(DMI_PRODUCT_NAME, "GL702VMK"),}, NULL},
+ 	{
++	ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X505BA", {
++	DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++	DMI_MATCH(DMI_PRODUCT_NAME, "X505BA"),}, NULL},
++	{
++	ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X505BP", {
++	DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++	DMI_MATCH(DMI_PRODUCT_NAME, "X505BP"),}, NULL},
++	{
++	ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X542BA", {
++	DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++	DMI_MATCH(DMI_PRODUCT_NAME, "X542BA"),}, NULL},
++	{
++	ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X542BP", {
++	DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++	DMI_MATCH(DMI_PRODUCT_NAME, "X542BP"),}, NULL},
++	{
+ 	ec_honor_ecdt_gpe, "ASUS X550VXK", {
+ 	DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ 	DMI_MATCH(DMI_PRODUCT_NAME, "X550VXK"),}, NULL},
+@@ -1854,6 +1884,11 @@ static const struct dmi_system_id ec_dmi_table[] __initconst = {
+ 	DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ 	DMI_MATCH(DMI_PRODUCT_NAME, "X580VD"),}, NULL},
+ 	{
++	/* https://bugzilla.kernel.org/show_bug.cgi?id=209989 */
++	ec_honor_dsdt_gpe, "HP Pavilion Gaming Laptop 15-cx0xxx", {
++	DMI_MATCH(DMI_SYS_VENDOR, "HP"),
++	DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Gaming Laptop 15-cx0xxx"),}, NULL},
++	{
+ 	ec_clear_on_resume, "Samsung hardware", {
+ 	DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
+ 	{},
+diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
+index 66c3983f0ccca..5cd0ceb50bc8a 100644
+--- a/drivers/acpi/fan.c
++++ b/drivers/acpi/fan.c
+@@ -16,6 +16,8 @@
+ #include <linux/platform_device.h>
+ #include <linux/sort.h>
+ 
++#include "fan.h"
++
+ MODULE_AUTHOR("Paul Diefenbaugh");
+ MODULE_DESCRIPTION("ACPI Fan Driver");
+ MODULE_LICENSE("GPL");
+@@ -24,10 +26,7 @@ static int acpi_fan_probe(struct platform_device *pdev);
+ static int acpi_fan_remove(struct platform_device *pdev);
+ 
+ static const struct acpi_device_id fan_device_ids[] = {
+-	{"PNP0C0B", 0},
+-	{"INT3404", 0},
+-	{"INTC1044", 0},
+-	{"INTC1048", 0},
++	ACPI_FAN_DEVICE_IDS,
+ 	{"", 0},
+ };
+ MODULE_DEVICE_TABLE(acpi, fan_device_ids);
+diff --git a/drivers/acpi/fan.h b/drivers/acpi/fan.h
+new file mode 100644
+index 0000000000000..dc9a6efa514b0
+--- /dev/null
++++ b/drivers/acpi/fan.h
+@@ -0,0 +1,13 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++
++/*
++ * ACPI fan device IDs are shared between the fan driver and the device power
++ * management code.
++ *
++ * Add new device IDs before the generic ACPI fan one.
++ */
++#define ACPI_FAN_DEVICE_IDS	\
++	{"INT3404", }, /* Fan */ \
++	{"INTC1044", }, /* Fan for Tiger Lake generation */ \
++	{"INTC1048", }, /* Fan for Alder Lake generation */ \
++	{"PNP0C0B", } /* Generic ACPI fan */
+diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
+index 4e2d76b8b697e..6790df5a2462a 100644
+--- a/drivers/acpi/processor_idle.c
++++ b/drivers/acpi/processor_idle.c
+@@ -16,6 +16,7 @@
+ #include <linux/acpi.h>
+ #include <linux/dmi.h>
+ #include <linux/sched.h>       /* need_resched() */
++#include <linux/sort.h>
+ #include <linux/tick.h>
+ #include <linux/cpuidle.h>
+ #include <linux/cpu.h>
+@@ -388,10 +389,37 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
+ 	return;
+ }
+ 
++static int acpi_cst_latency_cmp(const void *a, const void *b)
++{
++	const struct acpi_processor_cx *x = a, *y = b;
++
++	if (!(x->valid && y->valid))
++		return 0;
++	if (x->latency > y->latency)
++		return 1;
++	if (x->latency < y->latency)
++		return -1;
++	return 0;
++}
++static void acpi_cst_latency_swap(void *a, void *b, int n)
++{
++	struct acpi_processor_cx *x = a, *y = b;
++	u32 tmp;
++
++	if (!(x->valid && y->valid))
++		return;
++	tmp = x->latency;
++	x->latency = y->latency;
++	y->latency = tmp;
++}
++
+ static int acpi_processor_power_verify(struct acpi_processor *pr)
+ {
+ 	unsigned int i;
+ 	unsigned int working = 0;
++	unsigned int last_latency = 0;
++	unsigned int last_type = 0;
++	bool buggy_latency = false;
+ 
+ 	pr->power.timer_broadcast_on_state = INT_MAX;
+ 
+@@ -415,12 +443,24 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
+ 		}
+ 		if (!cx->valid)
+ 			continue;
++		if (cx->type >= last_type && cx->latency < last_latency)
++			buggy_latency = true;
++		last_latency = cx->latency;
++		last_type = cx->type;
+ 
+ 		lapic_timer_check_state(i, pr, cx);
+ 		tsc_check_state(cx->type);
+ 		working++;
+ 	}
+ 
++	if (buggy_latency) {
++		pr_notice("FW issue: working around C-state latencies out of order\n");
++		sort(&pr->power.states[1], max_cstate,
++		     sizeof(struct acpi_processor_cx),
++		     acpi_cst_latency_cmp,
++		     acpi_cst_latency_swap);
++	}
++
+ 	lapic_timer_propagate_broadcast(pr);
+ 
+ 	return (working);
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+index 20a7892c6d3fd..f0b2c37912531 100644
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -423,6 +423,13 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
+ 	}
+ }
+ 
++static bool irq_is_legacy(struct acpi_resource_irq *irq)
++{
++	return irq->triggering == ACPI_EDGE_SENSITIVE &&
++		irq->polarity == ACPI_ACTIVE_HIGH &&
++		irq->shareable == ACPI_EXCLUSIVE;
++}
++
+ /**
+  * acpi_dev_resource_interrupt - Extract ACPI interrupt resource information.
+  * @ares: Input ACPI resource object.
+@@ -461,7 +468,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
+ 		}
+ 		acpi_dev_get_irqresource(res, irq->interrupts[index],
+ 					 irq->triggering, irq->polarity,
+-					 irq->shareable, true);
++					 irq->shareable, irq_is_legacy(irq));
+ 		break;
+ 	case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
+ 		ext_irq = &ares->data.extended_irq;
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index 83cd4c95faf0d..33474fd969913 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -385,6 +385,30 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
+ 		DMI_MATCH(DMI_BOARD_NAME, "BA51_MV"),
+ 		},
+ 	},
++	{
++	.callback = video_detect_force_native,
++	.ident = "ASUSTeK COMPUTER INC. GA401",
++	.matches = {
++		DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++		DMI_MATCH(DMI_PRODUCT_NAME, "GA401"),
++		},
++	},
++	{
++	.callback = video_detect_force_native,
++	.ident = "ASUSTeK COMPUTER INC. GA502",
++	.matches = {
++		DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++		DMI_MATCH(DMI_PRODUCT_NAME, "GA502"),
++		},
++	},
++	{
++	.callback = video_detect_force_native,
++	.ident = "ASUSTeK COMPUTER INC. GA503",
++	.matches = {
++		DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++		DMI_MATCH(DMI_PRODUCT_NAME, "GA503"),
++		},
++	},
+ 
+ 	/*
+ 	 * Desktops which falsely report a backlight and which our heuristics
+diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
+index 2b69536cdccba..2d7ddb8a8cb65 100644
+--- a/drivers/acpi/x86/s2idle.c
++++ b/drivers/acpi/x86/s2idle.c
+@@ -42,6 +42,8 @@ static const struct acpi_device_id lps0_device_ids[] = {
+ 
+ /* AMD */
+ #define ACPI_LPS0_DSM_UUID_AMD      "e3f32452-febc-43ce-9039-932122d37721"
++#define ACPI_LPS0_ENTRY_AMD         2
++#define ACPI_LPS0_EXIT_AMD          3
+ #define ACPI_LPS0_SCREEN_OFF_AMD    4
+ #define ACPI_LPS0_SCREEN_ON_AMD     5
+ 
+@@ -408,6 +410,7 @@ int acpi_s2idle_prepare_late(void)
+ 
+ 	if (acpi_s2idle_vendor_amd()) {
+ 		acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF_AMD);
++		acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY_AMD);
+ 	} else {
+ 		acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF);
+ 		acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY);
+@@ -422,6 +425,7 @@ void acpi_s2idle_restore_early(void)
+ 		return;
+ 
+ 	if (acpi_s2idle_vendor_amd()) {
++		acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT_AMD);
+ 		acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON_AMD);
+ 	} else {
+ 		acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT);
+diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
+index badab67088935..46208ececbb6a 100644
+--- a/drivers/ata/pata_ep93xx.c
++++ b/drivers/ata/pata_ep93xx.c
+@@ -928,7 +928,7 @@ static int ep93xx_pata_probe(struct platform_device *pdev)
+ 	/* INT[3] (IRQ_EP93XX_EXT3) line connected as pull down */
+ 	irq = platform_get_irq(pdev, 0);
+ 	if (irq < 0) {
+-		err = -ENXIO;
++		err = irq;
+ 		goto err_rel_gpio;
+ 	}
+ 
+diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
+index bd87476ab4813..b5a3f710d76de 100644
+--- a/drivers/ata/pata_octeon_cf.c
++++ b/drivers/ata/pata_octeon_cf.c
+@@ -898,10 +898,11 @@ static int octeon_cf_probe(struct platform_device *pdev)
+ 					return -EINVAL;
+ 				}
+ 
+-				irq_handler = octeon_cf_interrupt;
+ 				i = platform_get_irq(dma_dev, 0);
+-				if (i > 0)
++				if (i > 0) {
+ 					irq = i;
++					irq_handler = octeon_cf_interrupt;
++				}
+ 			}
+ 			of_node_put(dma_node);
+ 		}
+diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
+index 479c4b29b8562..303f8c375b3af 100644
+--- a/drivers/ata/pata_rb532_cf.c
++++ b/drivers/ata/pata_rb532_cf.c
+@@ -115,10 +115,12 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	irq = platform_get_irq(pdev, 0);
+-	if (irq <= 0) {
++	if (irq < 0) {
+ 		dev_err(&pdev->dev, "no IRQ resource found\n");
+-		return -ENOENT;
++		return irq;
+ 	}
++	if (!irq)
++		return -EINVAL;
+ 
+ 	gpiod = devm_gpiod_get(&pdev->dev, NULL, GPIOD_IN);
+ 	if (IS_ERR(gpiod)) {
+diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
+index 64b2ef15ec191..8440203e835ed 100644
+--- a/drivers/ata/sata_highbank.c
++++ b/drivers/ata/sata_highbank.c
+@@ -469,10 +469,12 @@ static int ahci_highbank_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	irq = platform_get_irq(pdev, 0);
+-	if (irq <= 0) {
++	if (irq < 0) {
+ 		dev_err(dev, "no irq\n");
+-		return -EINVAL;
++		return irq;
+ 	}
++	if (!irq)
++		return -EINVAL;
+ 
+ 	hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
+ 	if (!hpriv) {
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index a370cde3ddd49..a9f9794892c4e 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -1153,6 +1153,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
+ 	blk_queue_physical_block_size(lo->lo_queue, bsize);
+ 	blk_queue_io_min(lo->lo_queue, bsize);
+ 
++	loop_config_discard(lo);
+ 	loop_update_rotational(lo);
+ 	loop_update_dio(lo);
+ 	loop_sysfs_init(lo);
+diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
+index 25114f0d13199..bd71dfc9c9748 100644
+--- a/drivers/bluetooth/btqca.c
++++ b/drivers/bluetooth/btqca.c
+@@ -183,7 +183,7 @@ int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
+ EXPORT_SYMBOL_GPL(qca_send_pre_shutdown_cmd);
+ 
+ static void qca_tlv_check_data(struct qca_fw_config *config,
+-		const struct firmware *fw, enum qca_btsoc_type soc_type)
++		u8 *fw_data, enum qca_btsoc_type soc_type)
+ {
+ 	const u8 *data;
+ 	u32 type_len;
+@@ -194,7 +194,7 @@ static void qca_tlv_check_data(struct qca_fw_config *config,
+ 	struct tlv_type_nvm *tlv_nvm;
+ 	uint8_t nvm_baud_rate = config->user_baud_rate;
+ 
+-	tlv = (struct tlv_type_hdr *)fw->data;
++	tlv = (struct tlv_type_hdr *)fw_data;
+ 
+ 	type_len = le32_to_cpu(tlv->type_len);
+ 	length = (type_len >> 8) & 0x00ffffff;
+@@ -390,8 +390,9 @@ static int qca_download_firmware(struct hci_dev *hdev,
+ 				 enum qca_btsoc_type soc_type)
+ {
+ 	const struct firmware *fw;
++	u8 *data;
+ 	const u8 *segment;
+-	int ret, remain, i = 0;
++	int ret, size, remain, i = 0;
+ 
+ 	bt_dev_info(hdev, "QCA Downloading %s", config->fwname);
+ 
+@@ -402,10 +403,22 @@ static int qca_download_firmware(struct hci_dev *hdev,
+ 		return ret;
+ 	}
+ 
+-	qca_tlv_check_data(config, fw, soc_type);
++	size = fw->size;
++	data = vmalloc(fw->size);
++	if (!data) {
++		bt_dev_err(hdev, "QCA Failed to allocate memory for file: %s",
++			   config->fwname);
++		release_firmware(fw);
++		return -ENOMEM;
++	}
++
++	memcpy(data, fw->data, size);
++	release_firmware(fw);
++
++	qca_tlv_check_data(config, data, soc_type);
+ 
+-	segment = fw->data;
+-	remain = fw->size;
++	segment = data;
++	remain = size;
+ 	while (remain > 0) {
+ 		int segsize = min(MAX_SIZE_PER_TLV_SEGMENT, remain);
+ 
+@@ -435,7 +448,7 @@ static int qca_download_firmware(struct hci_dev *hdev,
+ 		ret = qca_inject_cmd_complete_event(hdev);
+ 
+ out:
+-	release_firmware(fw);
++	vfree(data);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
+index de36af63e1825..9589ef6c0c264 100644
+--- a/drivers/bluetooth/hci_qca.c
++++ b/drivers/bluetooth/hci_qca.c
+@@ -1820,8 +1820,6 @@ static void qca_power_shutdown(struct hci_uart *hu)
+ 	unsigned long flags;
+ 	enum qca_btsoc_type soc_type = qca_soc_type(hu);
+ 
+-	qcadev = serdev_device_get_drvdata(hu->serdev);
+-
+ 	/* From this point we go into power off state. But serial port is
+ 	 * still open, stop queueing the IBS data and flush all the buffered
+ 	 * data in skb's.
+@@ -1837,6 +1835,8 @@ static void qca_power_shutdown(struct hci_uart *hu)
+ 	if (!hu->serdev)
+ 		return;
+ 
++	qcadev = serdev_device_get_drvdata(hu->serdev);
++
+ 	if (qca_is_wcn399x(soc_type)) {
+ 		host_set_baudrate(hu, 2400);
+ 		qca_send_power_pulse(hu, false);
+diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c
+index 87d3b73bcaded..481b3f87ed739 100644
+--- a/drivers/bus/mhi/core/pm.c
++++ b/drivers/bus/mhi/core/pm.c
+@@ -911,6 +911,7 @@ int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
+ 
+ 	ret = wait_event_timeout(mhi_cntrl->state_event,
+ 				 mhi_cntrl->dev_state == MHI_STATE_M0 ||
++				 mhi_cntrl->dev_state == MHI_STATE_M2 ||
+ 				 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ 				 msecs_to_jiffies(mhi_cntrl->timeout_ms));
+ 
+diff --git a/drivers/bus/mhi/pci_generic.c b/drivers/bus/mhi/pci_generic.c
+index 3e50a9fc4d822..31c64eb8a201d 100644
+--- a/drivers/bus/mhi/pci_generic.c
++++ b/drivers/bus/mhi/pci_generic.c
+@@ -470,7 +470,7 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 
+ 	err = mhi_register_controller(mhi_cntrl, mhi_cntrl_config);
+ 	if (err)
+-		return err;
++		goto err_disable_reporting;
+ 
+ 	/* MHI bus does not power up the controller by default */
+ 	err = mhi_prepare_for_power_up(mhi_cntrl);
+@@ -496,6 +496,8 @@ err_unprepare:
+ 	mhi_unprepare_after_power_down(mhi_cntrl);
+ err_unregister:
+ 	mhi_unregister_controller(mhi_cntrl);
++err_disable_reporting:
++	pci_disable_pcie_error_reporting(pdev);
+ 
+ 	return err;
+ }
+@@ -514,6 +516,7 @@ static void mhi_pci_remove(struct pci_dev *pdev)
+ 	}
+ 
+ 	mhi_unregister_controller(mhi_cntrl);
++	pci_disable_pcie_error_reporting(pdev);
+ }
+ 
+ static void mhi_pci_shutdown(struct pci_dev *pdev)
+diff --git a/drivers/char/hw_random/exynos-trng.c b/drivers/char/hw_random/exynos-trng.c
+index 8e1fe3f8dd2df..c8db62bc5ff72 100644
+--- a/drivers/char/hw_random/exynos-trng.c
++++ b/drivers/char/hw_random/exynos-trng.c
+@@ -132,7 +132,7 @@ static int exynos_trng_probe(struct platform_device *pdev)
+ 		return PTR_ERR(trng->mem);
+ 
+ 	pm_runtime_enable(&pdev->dev);
+-	ret = pm_runtime_get_sync(&pdev->dev);
++	ret = pm_runtime_resume_and_get(&pdev->dev);
+ 	if (ret < 0) {
+ 		dev_err(&pdev->dev, "Could not get runtime PM.\n");
+ 		goto err_pm_get;
+@@ -165,7 +165,7 @@ err_register:
+ 	clk_disable_unprepare(trng->clk);
+ 
+ err_clock:
+-	pm_runtime_put_sync(&pdev->dev);
++	pm_runtime_put_noidle(&pdev->dev);
+ 
+ err_pm_get:
+ 	pm_runtime_disable(&pdev->dev);
+diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
+index 89681f07bc787..9468e9520cee0 100644
+--- a/drivers/char/pcmcia/cm4000_cs.c
++++ b/drivers/char/pcmcia/cm4000_cs.c
+@@ -544,6 +544,10 @@ static int set_protocol(struct cm4000_dev *dev, struct ptsreq *ptsreq)
+ 		io_read_num_rec_bytes(iobase, &num_bytes_read);
+ 		if (num_bytes_read >= 4) {
+ 			DEBUGP(2, dev, "NumRecBytes = %i\n", num_bytes_read);
++			if (num_bytes_read > 4) {
++				rc = -EIO;
++				goto exit_setprotocol;
++			}
+ 			break;
+ 		}
+ 		usleep_range(10000, 11000);
+diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
+index 55b9d3965ae1b..69579efb247b3 100644
+--- a/drivers/char/tpm/tpm_tis_core.c
++++ b/drivers/char/tpm/tpm_tis_core.c
+@@ -196,13 +196,24 @@ static u8 tpm_tis_status(struct tpm_chip *chip)
+ 		return 0;
+ 
+ 	if (unlikely((status & TPM_STS_READ_ZERO) != 0)) {
+-		/*
+-		 * If this trips, the chances are the read is
+-		 * returning 0xff because the locality hasn't been
+-		 * acquired.  Usually because tpm_try_get_ops() hasn't
+-		 * been called before doing a TPM operation.
+-		 */
+-		WARN_ONCE(1, "TPM returned invalid status\n");
++		if  (!test_and_set_bit(TPM_TIS_INVALID_STATUS, &priv->flags)) {
++			/*
++			 * If this trips, the chances are the read is
++			 * returning 0xff because the locality hasn't been
++			 * acquired.  Usually because tpm_try_get_ops() hasn't
++			 * been called before doing a TPM operation.
++			 */
++			dev_err(&chip->dev, "invalid TPM_STS.x 0x%02x, dumping stack for forensics\n",
++				status);
++
++			/*
++			 * Dump stack for forensics, as invalid TPM_STS.x could be
++			 * potentially triggered by impaired tpm_try_get_ops() or
++			 * tpm_find_get_ops().
++			 */
++			dump_stack();
++		}
++
+ 		return 0;
+ 	}
+ 
+diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
+index 9b2d32a59f670..b2a3c6c72882d 100644
+--- a/drivers/char/tpm/tpm_tis_core.h
++++ b/drivers/char/tpm/tpm_tis_core.h
+@@ -83,6 +83,7 @@ enum tis_defaults {
+ 
+ enum tpm_tis_flags {
+ 	TPM_TIS_ITPM_WORKAROUND		= BIT(0),
++	TPM_TIS_INVALID_STATUS		= BIT(1),
+ };
+ 
+ struct tpm_tis_data {
+@@ -90,7 +91,7 @@ struct tpm_tis_data {
+ 	int locality;
+ 	int irq;
+ 	bool irq_tested;
+-	unsigned int flags;
++	unsigned long flags;
+ 	void __iomem *ilb_base_addr;
+ 	u16 clkrun_enabled;
+ 	wait_queue_head_t int_queue;
+diff --git a/drivers/char/tpm/tpm_tis_spi_main.c b/drivers/char/tpm/tpm_tis_spi_main.c
+index 3856f6ebcb34f..de4209003a448 100644
+--- a/drivers/char/tpm/tpm_tis_spi_main.c
++++ b/drivers/char/tpm/tpm_tis_spi_main.c
+@@ -260,6 +260,8 @@ static int tpm_tis_spi_remove(struct spi_device *dev)
+ }
+ 
+ static const struct spi_device_id tpm_tis_spi_id[] = {
++	{ "st33htpm-spi", (unsigned long)tpm_tis_spi_probe },
++	{ "slb9670", (unsigned long)tpm_tis_spi_probe },
+ 	{ "tpm_tis_spi", (unsigned long)tpm_tis_spi_probe },
+ 	{ "cr50", (unsigned long)cr50_spi_probe },
+ 	{}
+diff --git a/drivers/clk/actions/owl-s500.c b/drivers/clk/actions/owl-s500.c
+index 61bb224f63309..cbeb51c804eb5 100644
+--- a/drivers/clk/actions/owl-s500.c
++++ b/drivers/clk/actions/owl-s500.c
+@@ -127,8 +127,7 @@ static struct clk_factor_table sd_factor_table[] = {
+ 	{ 12, 1, 13 }, { 13, 1, 14 }, { 14, 1, 15 }, { 15, 1, 16 },
+ 	{ 16, 1, 17 }, { 17, 1, 18 }, { 18, 1, 19 }, { 19, 1, 20 },
+ 	{ 20, 1, 21 }, { 21, 1, 22 }, { 22, 1, 23 }, { 23, 1, 24 },
+-	{ 24, 1, 25 }, { 25, 1, 26 }, { 26, 1, 27 }, { 27, 1, 28 },
+-	{ 28, 1, 29 }, { 29, 1, 30 }, { 30, 1, 31 }, { 31, 1, 32 },
++	{ 24, 1, 25 },
+ 
+ 	/* bit8: /128 */
+ 	{ 256, 1, 1 * 128 }, { 257, 1, 2 * 128 }, { 258, 1, 3 * 128 }, { 259, 1, 4 * 128 },
+@@ -137,19 +136,20 @@ static struct clk_factor_table sd_factor_table[] = {
+ 	{ 268, 1, 13 * 128 }, { 269, 1, 14 * 128 }, { 270, 1, 15 * 128 }, { 271, 1, 16 * 128 },
+ 	{ 272, 1, 17 * 128 }, { 273, 1, 18 * 128 }, { 274, 1, 19 * 128 }, { 275, 1, 20 * 128 },
+ 	{ 276, 1, 21 * 128 }, { 277, 1, 22 * 128 }, { 278, 1, 23 * 128 }, { 279, 1, 24 * 128 },
+-	{ 280, 1, 25 * 128 }, { 281, 1, 26 * 128 }, { 282, 1, 27 * 128 }, { 283, 1, 28 * 128 },
+-	{ 284, 1, 29 * 128 }, { 285, 1, 30 * 128 }, { 286, 1, 31 * 128 }, { 287, 1, 32 * 128 },
++	{ 280, 1, 25 * 128 },
+ 	{ 0, 0, 0 },
+ };
+ 
+-static struct clk_factor_table bisp_factor_table[] = {
+-	{ 0, 1, 1 }, { 1, 1, 2 }, { 2, 1, 3 }, { 3, 1, 4 },
+-	{ 4, 1, 5 }, { 5, 1, 6 }, { 6, 1, 7 }, { 7, 1, 8 },
++static struct clk_factor_table de_factor_table[] = {
++	{ 0, 1, 1 }, { 1, 2, 3 }, { 2, 1, 2 }, { 3, 2, 5 },
++	{ 4, 1, 3 }, { 5, 1, 4 }, { 6, 1, 6 }, { 7, 1, 8 },
++	{ 8, 1, 12 },
+ 	{ 0, 0, 0 },
+ };
+ 
+-static struct clk_factor_table ahb_factor_table[] = {
+-	{ 1, 1, 2 }, { 2, 1, 3 },
++static struct clk_factor_table hde_factor_table[] = {
++	{ 0, 1, 1 }, { 1, 2, 3 }, { 2, 1, 2 }, { 3, 2, 5 },
++	{ 4, 1, 3 }, { 5, 1, 4 }, { 6, 1, 6 }, { 7, 1, 8 },
+ 	{ 0, 0, 0 },
+ };
+ 
+@@ -158,6 +158,13 @@ static struct clk_div_table rmii_ref_div_table[] = {
+ 	{ 0, 0 },
+ };
+ 
++static struct clk_div_table std12rate_div_table[] = {
++	{ 0, 1 }, { 1, 2 }, { 2, 3 }, { 3, 4 },
++	{ 4, 5 }, { 5, 6 }, { 6, 7 }, { 7, 8 },
++	{ 8, 9 }, { 9, 10 }, { 10, 11 }, { 11, 12 },
++	{ 0, 0 },
++};
++
+ static struct clk_div_table i2s_div_table[] = {
+ 	{ 0, 1 }, { 1, 2 }, { 2, 3 }, { 3, 4 },
+ 	{ 4, 6 }, { 5, 8 }, { 6, 12 }, { 7, 16 },
+@@ -174,7 +181,6 @@ static struct clk_div_table nand_div_table[] = {
+ 
+ /* mux clock */
+ static OWL_MUX(dev_clk, "dev_clk", dev_clk_mux_p, CMU_DEVPLL, 12, 1, CLK_SET_RATE_PARENT);
+-static OWL_MUX(ahbprediv_clk, "ahbprediv_clk", ahbprediv_clk_mux_p, CMU_BUSCLK1, 8, 3, CLK_SET_RATE_PARENT);
+ 
+ /* gate clocks */
+ static OWL_GATE(gpio_clk, "gpio_clk", "apb_clk", CMU_DEVCLKEN0, 18, 0, 0);
+@@ -187,45 +193,54 @@ static OWL_GATE(timer_clk, "timer_clk", "hosc", CMU_DEVCLKEN1, 27, 0, 0);
+ static OWL_GATE(hdmi_clk, "hdmi_clk", "hosc", CMU_DEVCLKEN1, 3, 0, 0);
+ 
+ /* divider clocks */
+-static OWL_DIVIDER(h_clk, "h_clk", "ahbprediv_clk", CMU_BUSCLK1, 12, 2, NULL, 0, 0);
++static OWL_DIVIDER(h_clk, "h_clk", "ahbprediv_clk", CMU_BUSCLK1, 2, 2, NULL, 0, 0);
+ static OWL_DIVIDER(apb_clk, "apb_clk", "ahb_clk", CMU_BUSCLK1, 14, 2, NULL, 0, 0);
+ static OWL_DIVIDER(rmii_ref_clk, "rmii_ref_clk", "ethernet_pll_clk", CMU_ETHERNETPLL, 1, 1, rmii_ref_div_table, 0, 0);
+ 
+ /* factor clocks */
+-static OWL_FACTOR(ahb_clk, "ahb_clk", "h_clk", CMU_BUSCLK1, 2, 2, ahb_factor_table, 0, 0);
+-static OWL_FACTOR(de1_clk, "de_clk1", "de_clk", CMU_DECLK, 0, 3, bisp_factor_table, 0, 0);
+-static OWL_FACTOR(de2_clk, "de_clk2", "de_clk", CMU_DECLK, 4, 3, bisp_factor_table, 0, 0);
++static OWL_FACTOR(de1_clk, "de_clk1", "de_clk", CMU_DECLK, 0, 4, de_factor_table, 0, 0);
++static OWL_FACTOR(de2_clk, "de_clk2", "de_clk", CMU_DECLK, 4, 4, de_factor_table, 0, 0);
+ 
+ /* composite clocks */
++static OWL_COMP_DIV(ahbprediv_clk, "ahbprediv_clk", ahbprediv_clk_mux_p,
++			OWL_MUX_HW(CMU_BUSCLK1, 8, 3),
++			{ 0 },
++			OWL_DIVIDER_HW(CMU_BUSCLK1, 12, 2, 0, NULL),
++			CLK_SET_RATE_PARENT);
++
++static OWL_COMP_FIXED_FACTOR(ahb_clk, "ahb_clk", "h_clk",
++			{ 0 },
++			1, 1, 0);
++
+ static OWL_COMP_FACTOR(vce_clk, "vce_clk", hde_clk_mux_p,
+ 			OWL_MUX_HW(CMU_VCECLK, 4, 2),
+ 			OWL_GATE_HW(CMU_DEVCLKEN0, 26, 0),
+-			OWL_FACTOR_HW(CMU_VCECLK, 0, 3, 0, bisp_factor_table),
++			OWL_FACTOR_HW(CMU_VCECLK, 0, 3, 0, hde_factor_table),
+ 			0);
+ 
+ static OWL_COMP_FACTOR(vde_clk, "vde_clk", hde_clk_mux_p,
+ 			OWL_MUX_HW(CMU_VDECLK, 4, 2),
+ 			OWL_GATE_HW(CMU_DEVCLKEN0, 25, 0),
+-			OWL_FACTOR_HW(CMU_VDECLK, 0, 3, 0, bisp_factor_table),
++			OWL_FACTOR_HW(CMU_VDECLK, 0, 3, 0, hde_factor_table),
+ 			0);
+ 
+-static OWL_COMP_FACTOR(bisp_clk, "bisp_clk", bisp_clk_mux_p,
++static OWL_COMP_DIV(bisp_clk, "bisp_clk", bisp_clk_mux_p,
+ 			OWL_MUX_HW(CMU_BISPCLK, 4, 1),
+ 			OWL_GATE_HW(CMU_DEVCLKEN0, 14, 0),
+-			OWL_FACTOR_HW(CMU_BISPCLK, 0, 3, 0, bisp_factor_table),
++			OWL_DIVIDER_HW(CMU_BISPCLK, 0, 4, 0, std12rate_div_table),
+ 			0);
+ 
+-static OWL_COMP_FACTOR(sensor0_clk, "sensor0_clk", sensor_clk_mux_p,
++static OWL_COMP_DIV(sensor0_clk, "sensor0_clk", sensor_clk_mux_p,
+ 			OWL_MUX_HW(CMU_SENSORCLK, 4, 1),
+ 			OWL_GATE_HW(CMU_DEVCLKEN0, 14, 0),
+-			OWL_FACTOR_HW(CMU_SENSORCLK, 0, 3, 0, bisp_factor_table),
+-			CLK_IGNORE_UNUSED);
++			OWL_DIVIDER_HW(CMU_SENSORCLK, 0, 4, 0, std12rate_div_table),
++			0);
+ 
+-static OWL_COMP_FACTOR(sensor1_clk, "sensor1_clk", sensor_clk_mux_p,
++static OWL_COMP_DIV(sensor1_clk, "sensor1_clk", sensor_clk_mux_p,
+ 			OWL_MUX_HW(CMU_SENSORCLK, 4, 1),
+ 			OWL_GATE_HW(CMU_DEVCLKEN0, 14, 0),
+-			OWL_FACTOR_HW(CMU_SENSORCLK, 8, 3, 0, bisp_factor_table),
+-			CLK_IGNORE_UNUSED);
++			OWL_DIVIDER_HW(CMU_SENSORCLK, 8, 4, 0, std12rate_div_table),
++			0);
+ 
+ static OWL_COMP_FACTOR(sd0_clk, "sd0_clk", sd_clk_mux_p,
+ 			OWL_MUX_HW(CMU_SD0CLK, 9, 1),
+@@ -305,7 +320,7 @@ static OWL_COMP_FIXED_FACTOR(i2c3_clk, "i2c3_clk", "ethernet_pll_clk",
+ static OWL_COMP_DIV(uart0_clk, "uart0_clk", uart_clk_mux_p,
+ 			OWL_MUX_HW(CMU_UART0CLK, 16, 1),
+ 			OWL_GATE_HW(CMU_DEVCLKEN1, 6, 0),
+-			OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
++			OWL_DIVIDER_HW(CMU_UART0CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ 			CLK_IGNORE_UNUSED);
+ 
+ static OWL_COMP_DIV(uart1_clk, "uart1_clk", uart_clk_mux_p,
+@@ -317,31 +332,31 @@ static OWL_COMP_DIV(uart1_clk, "uart1_clk", uart_clk_mux_p,
+ static OWL_COMP_DIV(uart2_clk, "uart2_clk", uart_clk_mux_p,
+ 			OWL_MUX_HW(CMU_UART2CLK, 16, 1),
+ 			OWL_GATE_HW(CMU_DEVCLKEN1, 8, 0),
+-			OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
++			OWL_DIVIDER_HW(CMU_UART2CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ 			CLK_IGNORE_UNUSED);
+ 
+ static OWL_COMP_DIV(uart3_clk, "uart3_clk", uart_clk_mux_p,
+ 			OWL_MUX_HW(CMU_UART3CLK, 16, 1),
+ 			OWL_GATE_HW(CMU_DEVCLKEN1, 19, 0),
+-			OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
++			OWL_DIVIDER_HW(CMU_UART3CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ 			CLK_IGNORE_UNUSED);
+ 
+ static OWL_COMP_DIV(uart4_clk, "uart4_clk", uart_clk_mux_p,
+ 			OWL_MUX_HW(CMU_UART4CLK, 16, 1),
+ 			OWL_GATE_HW(CMU_DEVCLKEN1, 20, 0),
+-			OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
++			OWL_DIVIDER_HW(CMU_UART4CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ 			CLK_IGNORE_UNUSED);
+ 
+ static OWL_COMP_DIV(uart5_clk, "uart5_clk", uart_clk_mux_p,
+ 			OWL_MUX_HW(CMU_UART5CLK, 16, 1),
+ 			OWL_GATE_HW(CMU_DEVCLKEN1, 21, 0),
+-			OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
++			OWL_DIVIDER_HW(CMU_UART5CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ 			CLK_IGNORE_UNUSED);
+ 
+ static OWL_COMP_DIV(uart6_clk, "uart6_clk", uart_clk_mux_p,
+ 			OWL_MUX_HW(CMU_UART6CLK, 16, 1),
+ 			OWL_GATE_HW(CMU_DEVCLKEN1, 18, 0),
+-			OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
++			OWL_DIVIDER_HW(CMU_UART6CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ 			CLK_IGNORE_UNUSED);
+ 
+ static OWL_COMP_DIV(i2srx_clk, "i2srx_clk", i2s_clk_mux_p,
+diff --git a/drivers/clk/clk-k210.c b/drivers/clk/clk-k210.c
+index 6c84abf5b2e36..67a7cb3503c36 100644
+--- a/drivers/clk/clk-k210.c
++++ b/drivers/clk/clk-k210.c
+@@ -722,6 +722,7 @@ static int k210_clk_set_parent(struct clk_hw *hw, u8 index)
+ 		reg |= BIT(cfg->mux_bit);
+ 	else
+ 		reg &= ~BIT(cfg->mux_bit);
++	writel(reg, ksc->regs + cfg->mux_reg);
+ 	spin_unlock_irqrestore(&ksc->clk_lock, flags);
+ 
+ 	return 0;
+diff --git a/drivers/clk/clk-si5341.c b/drivers/clk/clk-si5341.c
+index e0446e66fa645..eb22f4fdbc6b4 100644
+--- a/drivers/clk/clk-si5341.c
++++ b/drivers/clk/clk-si5341.c
+@@ -92,12 +92,22 @@ struct clk_si5341_output_config {
+ #define SI5341_PN_BASE		0x0002
+ #define SI5341_DEVICE_REV	0x0005
+ #define SI5341_STATUS		0x000C
++#define SI5341_LOS		0x000D
++#define SI5341_STATUS_STICKY	0x0011
++#define SI5341_LOS_STICKY	0x0012
+ #define SI5341_SOFT_RST		0x001C
+ #define SI5341_IN_SEL		0x0021
++#define SI5341_DEVICE_READY	0x00FE
+ #define SI5341_XAXB_CFG		0x090E
+ #define SI5341_IN_EN		0x0949
+ #define SI5341_INX_TO_PFD_EN	0x094A
+ 
++/* Status bits */
++#define SI5341_STATUS_SYSINCAL	BIT(0)
++#define SI5341_STATUS_LOSXAXB	BIT(1)
++#define SI5341_STATUS_LOSREF	BIT(2)
++#define SI5341_STATUS_LOL	BIT(3)
++
+ /* Input selection */
+ #define SI5341_IN_SEL_MASK	0x06
+ #define SI5341_IN_SEL_SHIFT	1
+@@ -340,6 +350,8 @@ static const struct si5341_reg_default si5341_reg_defaults[] = {
+ 	{ 0x094A, 0x00 }, /* INx_TO_PFD_EN (disabled) */
+ 	{ 0x0A02, 0x00 }, /* Not in datasheet */
+ 	{ 0x0B44, 0x0F }, /* PDIV_ENB (datasheet does not mention what it is) */
++	{ 0x0B57, 0x10 }, /* VCO_RESET_CALCODE (not described in datasheet) */
++	{ 0x0B58, 0x05 }, /* VCO_RESET_CALCODE (not described in datasheet) */
+ };
+ 
+ /* Read and interpret a 44-bit followed by a 32-bit value in the regmap */
+@@ -623,6 +635,9 @@ static unsigned long si5341_synth_clk_recalc_rate(struct clk_hw *hw,
+ 			SI5341_SYNTH_N_NUM(synth->index), &n_num, &n_den);
+ 	if (err < 0)
+ 		return err;
++	/* Check for bogus/uninitialized settings */
++	if (!n_num || !n_den)
++		return 0;
+ 
+ 	/*
+ 	 * n_num and n_den are shifted left as much as possible, so to prevent
+@@ -806,6 +821,9 @@ static long si5341_output_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+ {
+ 	unsigned long r;
+ 
++	if (!rate)
++		return 0;
++
+ 	r = *parent_rate >> 1;
+ 
+ 	/* If rate is an even divisor, no changes to parent required */
+@@ -834,11 +852,16 @@ static int si5341_output_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ 		unsigned long parent_rate)
+ {
+ 	struct clk_si5341_output *output = to_clk_si5341_output(hw);
+-	/* Frequency divider is (r_div + 1) * 2 */
+-	u32 r_div = (parent_rate / rate) >> 1;
++	u32 r_div;
+ 	int err;
+ 	u8 r[3];
+ 
++	if (!rate)
++		return -EINVAL;
++
++	/* Frequency divider is (r_div + 1) * 2 */
++	r_div = (parent_rate / rate) >> 1;
++
+ 	if (r_div <= 1)
+ 		r_div = 0;
+ 	else if (r_div >= BIT(24))
+@@ -1083,7 +1106,7 @@ static const struct si5341_reg_default si5341_preamble[] = {
+ 	{ 0x0B25, 0x00 },
+ 	{ 0x0502, 0x01 },
+ 	{ 0x0505, 0x03 },
+-	{ 0x0957, 0x1F },
++	{ 0x0957, 0x17 },
+ 	{ 0x0B4E, 0x1A },
+ };
+ 
+@@ -1189,6 +1212,32 @@ static const struct regmap_range_cfg si5341_regmap_ranges[] = {
+ 	},
+ };
+ 
++static int si5341_wait_device_ready(struct i2c_client *client)
++{
++	int count;
++
++	/* Datasheet warns: Any attempt to read or write any register other
++	 * than DEVICE_READY before DEVICE_READY reads as 0x0F may corrupt the
++	 * NVM programming and may corrupt the register contents, as they are
++	 * read from NVM. Note that this includes accesses to the PAGE register.
++	 * Also: DEVICE_READY is available on every register page, so no page
++	 * change is needed to read it.
++	 * Do this outside regmap to avoid automatic PAGE register access.
++	 * May take up to 300ms to complete.
++	 */
++	for (count = 0; count < 15; ++count) {
++		s32 result = i2c_smbus_read_byte_data(client,
++						      SI5341_DEVICE_READY);
++		if (result < 0)
++			return result;
++		if (result == 0x0F)
++			return 0;
++		msleep(20);
++	}
++	dev_err(&client->dev, "timeout waiting for DEVICE_READY\n");
++	return -EIO;
++}
++
+ static const struct regmap_config si5341_regmap_config = {
+ 	.reg_bits = 8,
+ 	.val_bits = 8,
+@@ -1378,6 +1427,7 @@ static int si5341_probe(struct i2c_client *client,
+ 	unsigned int i;
+ 	struct clk_si5341_output_config config[SI5341_MAX_NUM_OUTPUTS];
+ 	bool initialization_required;
++	u32 status;
+ 
+ 	data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
+ 	if (!data)
+@@ -1385,6 +1435,11 @@ static int si5341_probe(struct i2c_client *client,
+ 
+ 	data->i2c_client = client;
+ 
++	/* Must be done before otherwise touching hardware */
++	err = si5341_wait_device_ready(client);
++	if (err)
++		return err;
++
+ 	for (i = 0; i < SI5341_NUM_INPUTS; ++i) {
+ 		input = devm_clk_get(&client->dev, si5341_input_clock_names[i]);
+ 		if (IS_ERR(input)) {
+@@ -1540,6 +1595,22 @@ static int si5341_probe(struct i2c_client *client,
+ 			return err;
+ 	}
+ 
++	/* wait for device to report input clock present and PLL lock */
++	err = regmap_read_poll_timeout(data->regmap, SI5341_STATUS, status,
++		!(status & (SI5341_STATUS_LOSREF | SI5341_STATUS_LOL)),
++	       10000, 250000);
++	if (err) {
++		dev_err(&client->dev, "Error waiting for input clock or PLL lock\n");
++		return err;
++	}
++
++	/* clear sticky alarm bits from initialization */
++	err = regmap_write(data->regmap, SI5341_STATUS_STICKY, 0);
++	if (err) {
++		dev_err(&client->dev, "unable to clear sticky status\n");
++		return err;
++	}
++
+ 	/* Free the names, clk framework makes copies */
+ 	for (i = 0; i < data->num_synth; ++i)
+ 		 devm_kfree(&client->dev, (void *)synth_clock_names[i]);
+diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c
+index 344cd6c611887..3c737742c2a92 100644
+--- a/drivers/clk/clk-versaclock5.c
++++ b/drivers/clk/clk-versaclock5.c
+@@ -69,7 +69,10 @@
+ #define VC5_FEEDBACK_FRAC_DIV(n)		(0x19 + (n))
+ #define VC5_RC_CONTROL0				0x1e
+ #define VC5_RC_CONTROL1				0x1f
+-/* Register 0x20 is factory reserved */
++
++/* These registers are named "Unused Factory Reserved Registers" */
++#define VC5_RESERVED_X0(idx)		(0x20 + ((idx) * 0x10))
++#define VC5_RESERVED_X0_BYPASS_SYNC	BIT(7) /* bypass_sync<idx> bit */
+ 
+ /* Output divider control for divider 1,2,3,4 */
+ #define VC5_OUT_DIV_CONTROL(idx)	(0x21 + ((idx) * 0x10))
+@@ -87,7 +90,6 @@
+ #define VC5_OUT_DIV_SKEW_INT(idx, n)	(0x2b + ((idx) * 0x10) + (n))
+ #define VC5_OUT_DIV_INT(idx, n)		(0x2d + ((idx) * 0x10) + (n))
+ #define VC5_OUT_DIV_SKEW_FRAC(idx)	(0x2f + ((idx) * 0x10))
+-/* Registers 0x30, 0x40, 0x50 are factory reserved */
+ 
+ /* Clock control register for clock 1,2 */
+ #define VC5_CLK_OUTPUT_CFG(idx, n)	(0x60 + ((idx) * 0x2) + (n))
+@@ -140,6 +142,8 @@
+ #define VC5_HAS_INTERNAL_XTAL	BIT(0)
+ /* chip has PFD requency doubler */
+ #define VC5_HAS_PFD_FREQ_DBL	BIT(1)
++/* chip has bits to disable FOD sync */
++#define VC5_HAS_BYPASS_SYNC_BIT	BIT(2)
+ 
+ /* Supported IDT VC5 models. */
+ enum vc5_model {
+@@ -581,6 +585,23 @@ static int vc5_clk_out_prepare(struct clk_hw *hw)
+ 	unsigned int src;
+ 	int ret;
+ 
++	/*
++	 * When enabling a FOD, all currently enabled FODs are briefly
++	 * stopped in order to synchronize all of them. This causes a clock
++	 * disruption to any unrelated chips that might be already using
++	 * other clock outputs. Bypass the sync feature to avoid the issue,
++	 * which is possible on the VersaClock 6E family via reserved
++	 * registers.
++	 */
++	if (vc5->chip_info->flags & VC5_HAS_BYPASS_SYNC_BIT) {
++		ret = regmap_update_bits(vc5->regmap,
++					 VC5_RESERVED_X0(hwdata->num),
++					 VC5_RESERVED_X0_BYPASS_SYNC,
++					 VC5_RESERVED_X0_BYPASS_SYNC);
++		if (ret)
++			return ret;
++	}
++
+ 	/*
+ 	 * If the input mux is disabled, enable it first and
+ 	 * select source from matching FOD.
+@@ -1166,7 +1187,7 @@ static const struct vc5_chip_info idt_5p49v6965_info = {
+ 	.model = IDT_VC6_5P49V6965,
+ 	.clk_fod_cnt = 4,
+ 	.clk_out_cnt = 5,
+-	.flags = 0,
++	.flags = VC5_HAS_BYPASS_SYNC_BIT,
+ };
+ 
+ static const struct i2c_device_id vc5_id[] = {
+diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
+index 3e1a10d3f55cc..b9bad6d92671d 100644
+--- a/drivers/clk/imx/clk-imx8mq.c
++++ b/drivers/clk/imx/clk-imx8mq.c
+@@ -358,46 +358,26 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
+ 	hws[IMX8MQ_VIDEO2_PLL_OUT] = imx_clk_hw_sscg_pll("video2_pll_out", video2_pll_out_sels, ARRAY_SIZE(video2_pll_out_sels), 0, 0, 0, base + 0x54, 0);
+ 
+ 	/* SYS PLL1 fixed output */
+-	hws[IMX8MQ_SYS1_PLL_40M_CG] = imx_clk_hw_gate("sys1_pll_40m_cg", "sys1_pll_out", base + 0x30, 9);
+-	hws[IMX8MQ_SYS1_PLL_80M_CG] = imx_clk_hw_gate("sys1_pll_80m_cg", "sys1_pll_out", base + 0x30, 11);
+-	hws[IMX8MQ_SYS1_PLL_100M_CG] = imx_clk_hw_gate("sys1_pll_100m_cg", "sys1_pll_out", base + 0x30, 13);
+-	hws[IMX8MQ_SYS1_PLL_133M_CG] = imx_clk_hw_gate("sys1_pll_133m_cg", "sys1_pll_out", base + 0x30, 15);
+-	hws[IMX8MQ_SYS1_PLL_160M_CG] = imx_clk_hw_gate("sys1_pll_160m_cg", "sys1_pll_out", base + 0x30, 17);
+-	hws[IMX8MQ_SYS1_PLL_200M_CG] = imx_clk_hw_gate("sys1_pll_200m_cg", "sys1_pll_out", base + 0x30, 19);
+-	hws[IMX8MQ_SYS1_PLL_266M_CG] = imx_clk_hw_gate("sys1_pll_266m_cg", "sys1_pll_out", base + 0x30, 21);
+-	hws[IMX8MQ_SYS1_PLL_400M_CG] = imx_clk_hw_gate("sys1_pll_400m_cg", "sys1_pll_out", base + 0x30, 23);
+-	hws[IMX8MQ_SYS1_PLL_800M_CG] = imx_clk_hw_gate("sys1_pll_800m_cg", "sys1_pll_out", base + 0x30, 25);
+-
+-	hws[IMX8MQ_SYS1_PLL_40M] = imx_clk_hw_fixed_factor("sys1_pll_40m", "sys1_pll_40m_cg", 1, 20);
+-	hws[IMX8MQ_SYS1_PLL_80M] = imx_clk_hw_fixed_factor("sys1_pll_80m", "sys1_pll_80m_cg", 1, 10);
+-	hws[IMX8MQ_SYS1_PLL_100M] = imx_clk_hw_fixed_factor("sys1_pll_100m", "sys1_pll_100m_cg", 1, 8);
+-	hws[IMX8MQ_SYS1_PLL_133M] = imx_clk_hw_fixed_factor("sys1_pll_133m", "sys1_pll_133m_cg", 1, 6);
+-	hws[IMX8MQ_SYS1_PLL_160M] = imx_clk_hw_fixed_factor("sys1_pll_160m", "sys1_pll_160m_cg", 1, 5);
+-	hws[IMX8MQ_SYS1_PLL_200M] = imx_clk_hw_fixed_factor("sys1_pll_200m", "sys1_pll_200m_cg", 1, 4);
+-	hws[IMX8MQ_SYS1_PLL_266M] = imx_clk_hw_fixed_factor("sys1_pll_266m", "sys1_pll_266m_cg", 1, 3);
+-	hws[IMX8MQ_SYS1_PLL_400M] = imx_clk_hw_fixed_factor("sys1_pll_400m", "sys1_pll_400m_cg", 1, 2);
+-	hws[IMX8MQ_SYS1_PLL_800M] = imx_clk_hw_fixed_factor("sys1_pll_800m", "sys1_pll_800m_cg", 1, 1);
++	hws[IMX8MQ_SYS1_PLL_40M] = imx_clk_hw_fixed_factor("sys1_pll_40m", "sys1_pll_out", 1, 20);
++	hws[IMX8MQ_SYS1_PLL_80M] = imx_clk_hw_fixed_factor("sys1_pll_80m", "sys1_pll_out", 1, 10);
++	hws[IMX8MQ_SYS1_PLL_100M] = imx_clk_hw_fixed_factor("sys1_pll_100m", "sys1_pll_out", 1, 8);
++	hws[IMX8MQ_SYS1_PLL_133M] = imx_clk_hw_fixed_factor("sys1_pll_133m", "sys1_pll_out", 1, 6);
++	hws[IMX8MQ_SYS1_PLL_160M] = imx_clk_hw_fixed_factor("sys1_pll_160m", "sys1_pll_out", 1, 5);
++	hws[IMX8MQ_SYS1_PLL_200M] = imx_clk_hw_fixed_factor("sys1_pll_200m", "sys1_pll_out", 1, 4);
++	hws[IMX8MQ_SYS1_PLL_266M] = imx_clk_hw_fixed_factor("sys1_pll_266m", "sys1_pll_out", 1, 3);
++	hws[IMX8MQ_SYS1_PLL_400M] = imx_clk_hw_fixed_factor("sys1_pll_400m", "sys1_pll_out", 1, 2);
++	hws[IMX8MQ_SYS1_PLL_800M] = imx_clk_hw_fixed_factor("sys1_pll_800m", "sys1_pll_out", 1, 1);
+ 
+ 	/* SYS PLL2 fixed output */
+-	hws[IMX8MQ_SYS2_PLL_50M_CG] = imx_clk_hw_gate("sys2_pll_50m_cg", "sys2_pll_out", base + 0x3c, 9);
+-	hws[IMX8MQ_SYS2_PLL_100M_CG] = imx_clk_hw_gate("sys2_pll_100m_cg", "sys2_pll_out", base + 0x3c, 11);
+-	hws[IMX8MQ_SYS2_PLL_125M_CG] = imx_clk_hw_gate("sys2_pll_125m_cg", "sys2_pll_out", base + 0x3c, 13);
+-	hws[IMX8MQ_SYS2_PLL_166M_CG] = imx_clk_hw_gate("sys2_pll_166m_cg", "sys2_pll_out", base + 0x3c, 15);
+-	hws[IMX8MQ_SYS2_PLL_200M_CG] = imx_clk_hw_gate("sys2_pll_200m_cg", "sys2_pll_out", base + 0x3c, 17);
+-	hws[IMX8MQ_SYS2_PLL_250M_CG] = imx_clk_hw_gate("sys2_pll_250m_cg", "sys2_pll_out", base + 0x3c, 19);
+-	hws[IMX8MQ_SYS2_PLL_333M_CG] = imx_clk_hw_gate("sys2_pll_333m_cg", "sys2_pll_out", base + 0x3c, 21);
+-	hws[IMX8MQ_SYS2_PLL_500M_CG] = imx_clk_hw_gate("sys2_pll_500m_cg", "sys2_pll_out", base + 0x3c, 23);
+-	hws[IMX8MQ_SYS2_PLL_1000M_CG] = imx_clk_hw_gate("sys2_pll_1000m_cg", "sys2_pll_out", base + 0x3c, 25);
+-
+-	hws[IMX8MQ_SYS2_PLL_50M] = imx_clk_hw_fixed_factor("sys2_pll_50m", "sys2_pll_50m_cg", 1, 20);
+-	hws[IMX8MQ_SYS2_PLL_100M] = imx_clk_hw_fixed_factor("sys2_pll_100m", "sys2_pll_100m_cg", 1, 10);
+-	hws[IMX8MQ_SYS2_PLL_125M] = imx_clk_hw_fixed_factor("sys2_pll_125m", "sys2_pll_125m_cg", 1, 8);
+-	hws[IMX8MQ_SYS2_PLL_166M] = imx_clk_hw_fixed_factor("sys2_pll_166m", "sys2_pll_166m_cg", 1, 6);
+-	hws[IMX8MQ_SYS2_PLL_200M] = imx_clk_hw_fixed_factor("sys2_pll_200m", "sys2_pll_200m_cg", 1, 5);
+-	hws[IMX8MQ_SYS2_PLL_250M] = imx_clk_hw_fixed_factor("sys2_pll_250m", "sys2_pll_250m_cg", 1, 4);
+-	hws[IMX8MQ_SYS2_PLL_333M] = imx_clk_hw_fixed_factor("sys2_pll_333m", "sys2_pll_333m_cg", 1, 3);
+-	hws[IMX8MQ_SYS2_PLL_500M] = imx_clk_hw_fixed_factor("sys2_pll_500m", "sys2_pll_500m_cg", 1, 2);
+-	hws[IMX8MQ_SYS2_PLL_1000M] = imx_clk_hw_fixed_factor("sys2_pll_1000m", "sys2_pll_1000m_cg", 1, 1);
++	hws[IMX8MQ_SYS2_PLL_50M] = imx_clk_hw_fixed_factor("sys2_pll_50m", "sys2_pll_out", 1, 20);
++	hws[IMX8MQ_SYS2_PLL_100M] = imx_clk_hw_fixed_factor("sys2_pll_100m", "sys2_pll_out", 1, 10);
++	hws[IMX8MQ_SYS2_PLL_125M] = imx_clk_hw_fixed_factor("sys2_pll_125m", "sys2_pll_out", 1, 8);
++	hws[IMX8MQ_SYS2_PLL_166M] = imx_clk_hw_fixed_factor("sys2_pll_166m", "sys2_pll_out", 1, 6);
++	hws[IMX8MQ_SYS2_PLL_200M] = imx_clk_hw_fixed_factor("sys2_pll_200m", "sys2_pll_out", 1, 5);
++	hws[IMX8MQ_SYS2_PLL_250M] = imx_clk_hw_fixed_factor("sys2_pll_250m", "sys2_pll_out", 1, 4);
++	hws[IMX8MQ_SYS2_PLL_333M] = imx_clk_hw_fixed_factor("sys2_pll_333m", "sys2_pll_out", 1, 3);
++	hws[IMX8MQ_SYS2_PLL_500M] = imx_clk_hw_fixed_factor("sys2_pll_500m", "sys2_pll_out", 1, 2);
++	hws[IMX8MQ_SYS2_PLL_1000M] = imx_clk_hw_fixed_factor("sys2_pll_1000m", "sys2_pll_out", 1, 1);
+ 
+ 	hws[IMX8MQ_CLK_MON_AUDIO_PLL1_DIV] = imx_clk_hw_divider("audio_pll1_out_monitor", "audio_pll1_bypass", base + 0x78, 0, 3);
+ 	hws[IMX8MQ_CLK_MON_AUDIO_PLL2_DIV] = imx_clk_hw_divider("audio_pll2_out_monitor", "audio_pll2_bypass", base + 0x78, 4, 3);
+diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
+index b080359b4645e..a805bac93c113 100644
+--- a/drivers/clk/meson/g12a.c
++++ b/drivers/clk/meson/g12a.c
+@@ -1603,7 +1603,7 @@ static struct clk_regmap g12b_cpub_clk_trace = {
+ };
+ 
+ static const struct pll_mult_range g12a_gp0_pll_mult_range = {
+-	.min = 55,
++	.min = 125,
+ 	.max = 255,
+ };
+ 
+diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
+index c6eb99169ddc7..6f8f0bbc5ab5b 100644
+--- a/drivers/clk/qcom/clk-alpha-pll.c
++++ b/drivers/clk/qcom/clk-alpha-pll.c
+@@ -1234,7 +1234,7 @@ static int alpha_pll_fabia_prepare(struct clk_hw *hw)
+ 		return ret;
+ 
+ 	/* Setup PLL for calibration frequency */
+-	regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), cal_l);
++	regmap_write(pll->clkr.regmap, PLL_CAL_L_VAL(pll), cal_l);
+ 
+ 	/* Bringup the PLL at calibration frequency */
+ 	ret = clk_alpha_pll_enable(hw);
+diff --git a/drivers/clk/qcom/gcc-sc7280.c b/drivers/clk/qcom/gcc-sc7280.c
+index 22736c16ed160..2db1b07c70447 100644
+--- a/drivers/clk/qcom/gcc-sc7280.c
++++ b/drivers/clk/qcom/gcc-sc7280.c
+@@ -716,6 +716,7 @@ static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s2_clk_src[] = {
+ 	F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ 	F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ 	F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
++	F(52174000, P_GCC_GPLL0_OUT_MAIN, 1, 2, 23),
+ 	F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ 	F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ 	F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+diff --git a/drivers/clk/socfpga/clk-agilex.c b/drivers/clk/socfpga/clk-agilex.c
+index 7689bdd0a914b..d3e01c5e93765 100644
+--- a/drivers/clk/socfpga/clk-agilex.c
++++ b/drivers/clk/socfpga/clk-agilex.c
+@@ -186,6 +186,41 @@ static const struct clk_parent_data noc_mux[] = {
+ 	  .name = "boot_clk", },
+ };
+ 
++static const struct clk_parent_data sdmmc_mux[] = {
++	{ .fw_name = "sdmmc_free_clk",
++	  .name = "sdmmc_free_clk", },
++	{ .fw_name = "boot_clk",
++	  .name = "boot_clk", },
++};
++
++static const struct clk_parent_data s2f_user1_mux[] = {
++	{ .fw_name = "s2f_user1_free_clk",
++	  .name = "s2f_user1_free_clk", },
++	{ .fw_name = "boot_clk",
++	  .name = "boot_clk", },
++};
++
++static const struct clk_parent_data psi_mux[] = {
++	{ .fw_name = "psi_ref_free_clk",
++	  .name = "psi_ref_free_clk", },
++	{ .fw_name = "boot_clk",
++	  .name = "boot_clk", },
++};
++
++static const struct clk_parent_data gpio_db_mux[] = {
++	{ .fw_name = "gpio_db_free_clk",
++	  .name = "gpio_db_free_clk", },
++	{ .fw_name = "boot_clk",
++	  .name = "boot_clk", },
++};
++
++static const struct clk_parent_data emac_ptp_mux[] = {
++	{ .fw_name = "emac_ptp_free_clk",
++	  .name = "emac_ptp_free_clk", },
++	{ .fw_name = "boot_clk",
++	  .name = "boot_clk", },
++};
++
+ /* clocks in AO (always on) controller */
+ static const struct stratix10_pll_clock agilex_pll_clks[] = {
+ 	{ AGILEX_BOOT_CLK, "boot_clk", boot_mux, ARRAY_SIZE(boot_mux), 0,
+@@ -222,11 +257,9 @@ static const struct stratix10_perip_cnt_clock agilex_main_perip_cnt_clks[] = {
+ 	{ AGILEX_MPU_FREE_CLK, "mpu_free_clk", NULL, mpu_free_mux, ARRAY_SIZE(mpu_free_mux),
+ 	   0, 0x3C, 0, 0, 0},
+ 	{ AGILEX_NOC_FREE_CLK, "noc_free_clk", NULL, noc_free_mux, ARRAY_SIZE(noc_free_mux),
+-	  0, 0x40, 0, 0, 1},
+-	{ AGILEX_L4_SYS_FREE_CLK, "l4_sys_free_clk", "noc_free_clk", NULL, 1, 0,
+-	  0, 4, 0, 0},
+-	{ AGILEX_NOC_CLK, "noc_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux),
+-	  0, 0, 0, 0x30, 1},
++	  0, 0x40, 0, 0, 0},
++	{ AGILEX_L4_SYS_FREE_CLK, "l4_sys_free_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 0,
++	  0, 4, 0x30, 1},
+ 	{ AGILEX_EMAC_A_FREE_CLK, "emaca_free_clk", NULL, emaca_free_mux, ARRAY_SIZE(emaca_free_mux),
+ 	  0, 0xD4, 0, 0x88, 0},
+ 	{ AGILEX_EMAC_B_FREE_CLK, "emacb_free_clk", NULL, emacb_free_mux, ARRAY_SIZE(emacb_free_mux),
+@@ -236,7 +269,7 @@ static const struct stratix10_perip_cnt_clock agilex_main_perip_cnt_clks[] = {
+ 	{ AGILEX_GPIO_DB_FREE_CLK, "gpio_db_free_clk", NULL, gpio_db_free_mux,
+ 	  ARRAY_SIZE(gpio_db_free_mux), 0, 0xE0, 0, 0x88, 3},
+ 	{ AGILEX_SDMMC_FREE_CLK, "sdmmc_free_clk", NULL, sdmmc_free_mux,
+-	  ARRAY_SIZE(sdmmc_free_mux), 0, 0xE4, 0, 0x88, 4},
++	  ARRAY_SIZE(sdmmc_free_mux), 0, 0xE4, 0, 0, 0},
+ 	{ AGILEX_S2F_USER0_FREE_CLK, "s2f_user0_free_clk", NULL, s2f_usr0_free_mux,
+ 	  ARRAY_SIZE(s2f_usr0_free_mux), 0, 0xE8, 0, 0, 0},
+ 	{ AGILEX_S2F_USER1_FREE_CLK, "s2f_user1_free_clk", NULL, s2f_usr1_free_mux,
+@@ -252,24 +285,24 @@ static const struct stratix10_gate_clock agilex_gate_clks[] = {
+ 	  0, 0, 0, 0, 0, 0, 4},
+ 	{ AGILEX_MPU_CCU_CLK, "mpu_ccu_clk", "mpu_clk", NULL, 1, 0, 0x24,
+ 	  0, 0, 0, 0, 0, 0, 2},
+-	{ AGILEX_L4_MAIN_CLK, "l4_main_clk", "noc_clk", NULL, 1, 0, 0x24,
+-	  1, 0x44, 0, 2, 0, 0, 0},
+-	{ AGILEX_L4_MP_CLK, "l4_mp_clk", "noc_clk", NULL, 1, 0, 0x24,
+-	  2, 0x44, 8, 2, 0, 0, 0},
++	{ AGILEX_L4_MAIN_CLK, "l4_main_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 0, 0x24,
++	  1, 0x44, 0, 2, 0x30, 1, 0},
++	{ AGILEX_L4_MP_CLK, "l4_mp_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 0, 0x24,
++	  2, 0x44, 8, 2, 0x30, 1, 0},
+ 	/*
+ 	 * The l4_sp_clk feeds a 100 MHz clock to various peripherals, one of them
+ 	 * being the SP timers, thus cannot get gated.
+ 	 */
+-	{ AGILEX_L4_SP_CLK, "l4_sp_clk", "noc_clk", NULL, 1, CLK_IS_CRITICAL, 0x24,
+-	  3, 0x44, 16, 2, 0, 0, 0},
+-	{ AGILEX_CS_AT_CLK, "cs_at_clk", "noc_clk", NULL, 1, 0, 0x24,
+-	  4, 0x44, 24, 2, 0, 0, 0},
+-	{ AGILEX_CS_TRACE_CLK, "cs_trace_clk", "noc_clk", NULL, 1, 0, 0x24,
+-	  4, 0x44, 26, 2, 0, 0, 0},
++	{ AGILEX_L4_SP_CLK, "l4_sp_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), CLK_IS_CRITICAL, 0x24,
++	  3, 0x44, 16, 2, 0x30, 1, 0},
++	{ AGILEX_CS_AT_CLK, "cs_at_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 0, 0x24,
++	  4, 0x44, 24, 2, 0x30, 1, 0},
++	{ AGILEX_CS_TRACE_CLK, "cs_trace_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 0, 0x24,
++	  4, 0x44, 26, 2, 0x30, 1, 0},
+ 	{ AGILEX_CS_PDBG_CLK, "cs_pdbg_clk", "cs_at_clk", NULL, 1, 0, 0x24,
+ 	  4, 0x44, 28, 1, 0, 0, 0},
+-	{ AGILEX_CS_TIMER_CLK, "cs_timer_clk", "noc_clk", NULL, 1, 0, 0x24,
+-	  5, 0, 0, 0, 0, 0, 0},
++	{ AGILEX_CS_TIMER_CLK, "cs_timer_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 0, 0x24,
++	  5, 0, 0, 0, 0x30, 1, 0},
+ 	{ AGILEX_S2F_USER0_CLK, "s2f_user0_clk", NULL, s2f_usr0_mux, ARRAY_SIZE(s2f_usr0_mux), 0, 0x24,
+ 	  6, 0, 0, 0, 0, 0, 0},
+ 	{ AGILEX_EMAC0_CLK, "emac0_clk", NULL, emac_mux, ARRAY_SIZE(emac_mux), 0, 0x7C,
+@@ -278,16 +311,16 @@ static const struct stratix10_gate_clock agilex_gate_clks[] = {
+ 	  1, 0, 0, 0, 0x94, 27, 0},
+ 	{ AGILEX_EMAC2_CLK, "emac2_clk", NULL, emac_mux, ARRAY_SIZE(emac_mux), 0, 0x7C,
+ 	  2, 0, 0, 0, 0x94, 28, 0},
+-	{ AGILEX_EMAC_PTP_CLK, "emac_ptp_clk", "emac_ptp_free_clk", NULL, 1, 0, 0x7C,
+-	  3, 0, 0, 0, 0, 0, 0},
+-	{ AGILEX_GPIO_DB_CLK, "gpio_db_clk", "gpio_db_free_clk", NULL, 1, 0, 0x7C,
+-	  4, 0x98, 0, 16, 0, 0, 0},
+-	{ AGILEX_SDMMC_CLK, "sdmmc_clk", "sdmmc_free_clk", NULL, 1, 0, 0x7C,
+-	  5, 0, 0, 0, 0, 0, 4},
+-	{ AGILEX_S2F_USER1_CLK, "s2f_user1_clk", "s2f_user1_free_clk", NULL, 1, 0, 0x7C,
+-	  6, 0, 0, 0, 0, 0, 0},
+-	{ AGILEX_PSI_REF_CLK, "psi_ref_clk", "psi_ref_free_clk", NULL, 1, 0, 0x7C,
+-	  7, 0, 0, 0, 0, 0, 0},
++	{ AGILEX_EMAC_PTP_CLK, "emac_ptp_clk", NULL, emac_ptp_mux, ARRAY_SIZE(emac_ptp_mux), 0, 0x7C,
++	  3, 0, 0, 0, 0x88, 2, 0},
++	{ AGILEX_GPIO_DB_CLK, "gpio_db_clk", NULL, gpio_db_mux, ARRAY_SIZE(gpio_db_mux), 0, 0x7C,
++	  4, 0x98, 0, 16, 0x88, 3, 0},
++	{ AGILEX_SDMMC_CLK, "sdmmc_clk", NULL, sdmmc_mux, ARRAY_SIZE(sdmmc_mux), 0, 0x7C,
++	  5, 0, 0, 0, 0x88, 4, 4},
++	{ AGILEX_S2F_USER1_CLK, "s2f_user1_clk", NULL, s2f_user1_mux, ARRAY_SIZE(s2f_user1_mux), 0, 0x7C,
++	  6, 0, 0, 0, 0x88, 5, 0},
++	{ AGILEX_PSI_REF_CLK, "psi_ref_clk", NULL, psi_mux, ARRAY_SIZE(psi_mux), 0, 0x7C,
++	  7, 0, 0, 0, 0x88, 6, 0},
+ 	{ AGILEX_USB_CLK, "usb_clk", "l4_mp_clk", NULL, 1, 0, 0x7C,
+ 	  8, 0, 0, 0, 0, 0, 0},
+ 	{ AGILEX_SPI_M_CLK, "spi_m_clk", "l4_mp_clk", NULL, 1, 0, 0x7C,
+diff --git a/drivers/clk/socfpga/clk-periph-s10.c b/drivers/clk/socfpga/clk-periph-s10.c
+index 0ff2b9d240353..44f3aebe1182e 100644
+--- a/drivers/clk/socfpga/clk-periph-s10.c
++++ b/drivers/clk/socfpga/clk-periph-s10.c
+@@ -64,16 +64,21 @@ static u8 clk_periclk_get_parent(struct clk_hw *hwclk)
+ {
+ 	struct socfpga_periph_clk *socfpgaclk = to_periph_clk(hwclk);
+ 	u32 clk_src, mask;
+-	u8 parent;
++	u8 parent = 0;
+ 
++	/* handle the bypass first */
+ 	if (socfpgaclk->bypass_reg) {
+ 		mask = (0x1 << socfpgaclk->bypass_shift);
+ 		parent = ((readl(socfpgaclk->bypass_reg) & mask) >>
+ 			   socfpgaclk->bypass_shift);
+-	} else {
++		if (parent)
++			return parent;
++	}
++
++	if (socfpgaclk->hw.reg) {
+ 		clk_src = readl(socfpgaclk->hw.reg);
+ 		parent = (clk_src >> CLK_MGR_FREE_SHIFT) &
+-			CLK_MGR_FREE_MASK;
++			  CLK_MGR_FREE_MASK;
+ 	}
+ 	return parent;
+ }
+diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c
+index 661a8e9bfb9bd..aaf69058b1dca 100644
+--- a/drivers/clk/socfpga/clk-s10.c
++++ b/drivers/clk/socfpga/clk-s10.c
+@@ -144,6 +144,41 @@ static const struct clk_parent_data mpu_free_mux[] = {
+ 	  .name = "f2s-free-clk", },
+ };
+ 
++static const struct clk_parent_data sdmmc_mux[] = {
++	{ .fw_name = "sdmmc_free_clk",
++	  .name = "sdmmc_free_clk", },
++	{ .fw_name = "boot_clk",
++	  .name = "boot_clk", },
++};
++
++static const struct clk_parent_data s2f_user1_mux[] = {
++	{ .fw_name = "s2f_user1_free_clk",
++	  .name = "s2f_user1_free_clk", },
++	{ .fw_name = "boot_clk",
++	  .name = "boot_clk", },
++};
++
++static const struct clk_parent_data psi_mux[] = {
++	{ .fw_name = "psi_ref_free_clk",
++	  .name = "psi_ref_free_clk", },
++	{ .fw_name = "boot_clk",
++	  .name = "boot_clk", },
++};
++
++static const struct clk_parent_data gpio_db_mux[] = {
++	{ .fw_name = "gpio_db_free_clk",
++	  .name = "gpio_db_free_clk", },
++	{ .fw_name = "boot_clk",
++	  .name = "boot_clk", },
++};
++
++static const struct clk_parent_data emac_ptp_mux[] = {
++	{ .fw_name = "emac_ptp_free_clk",
++	  .name = "emac_ptp_free_clk", },
++	{ .fw_name = "boot_clk",
++	  .name = "boot_clk", },
++};
++
+ /* clocks in AO (always on) controller */
+ static const struct stratix10_pll_clock s10_pll_clks[] = {
+ 	{ STRATIX10_BOOT_CLK, "boot_clk", boot_mux, ARRAY_SIZE(boot_mux), 0,
+@@ -167,7 +202,7 @@ static const struct stratix10_perip_cnt_clock s10_main_perip_cnt_clks[] = {
+ 	{ STRATIX10_MPU_FREE_CLK, "mpu_free_clk", NULL, mpu_free_mux, ARRAY_SIZE(mpu_free_mux),
+ 	   0, 0x48, 0, 0, 0},
+ 	{ STRATIX10_NOC_FREE_CLK, "noc_free_clk", NULL, noc_free_mux, ARRAY_SIZE(noc_free_mux),
+-	  0, 0x4C, 0, 0, 0},
++	  0, 0x4C, 0, 0x3C, 1},
+ 	{ STRATIX10_MAIN_EMACA_CLK, "main_emaca_clk", "main_noc_base_clk", NULL, 1, 0,
+ 	  0x50, 0, 0, 0},
+ 	{ STRATIX10_MAIN_EMACB_CLK, "main_emacb_clk", "main_noc_base_clk", NULL, 1, 0,
+@@ -200,10 +235,8 @@ static const struct stratix10_perip_cnt_clock s10_main_perip_cnt_clks[] = {
+ 	  0, 0xD4, 0, 0, 0},
+ 	{ STRATIX10_PERI_PSI_REF_CLK, "peri_psi_ref_clk", "peri_noc_base_clk", NULL, 1, 0,
+ 	  0xD8, 0, 0, 0},
+-	{ STRATIX10_L4_SYS_FREE_CLK, "l4_sys_free_clk", "noc_free_clk", NULL, 1, 0,
+-	  0, 4, 0, 0},
+-	{ STRATIX10_NOC_CLK, "noc_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux),
+-	  0, 0, 0, 0x3C, 1},
++	{ STRATIX10_L4_SYS_FREE_CLK, "l4_sys_free_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 0,
++	  0, 4, 0x3C, 1},
+ 	{ STRATIX10_EMAC_A_FREE_CLK, "emaca_free_clk", NULL, emaca_free_mux, ARRAY_SIZE(emaca_free_mux),
+ 	  0, 0, 2, 0xB0, 0},
+ 	{ STRATIX10_EMAC_B_FREE_CLK, "emacb_free_clk", NULL, emacb_free_mux, ARRAY_SIZE(emacb_free_mux),
+@@ -227,20 +260,20 @@ static const struct stratix10_gate_clock s10_gate_clks[] = {
+ 	  0, 0, 0, 0, 0, 0, 4},
+ 	{ STRATIX10_MPU_L2RAM_CLK, "mpu_l2ram_clk", "mpu_clk", NULL, 1, 0, 0x30,
+ 	  0, 0, 0, 0, 0, 0, 2},
+-	{ STRATIX10_L4_MAIN_CLK, "l4_main_clk", "noc_clk", NULL, 1, 0, 0x30,
+-	  1, 0x70, 0, 2, 0, 0, 0},
+-	{ STRATIX10_L4_MP_CLK, "l4_mp_clk", "noc_clk", NULL, 1, 0, 0x30,
+-	  2, 0x70, 8, 2, 0, 0, 0},
+-	{ STRATIX10_L4_SP_CLK, "l4_sp_clk", "noc_clk", NULL, 1, CLK_IS_CRITICAL, 0x30,
+-	  3, 0x70, 16, 2, 0, 0, 0},
+-	{ STRATIX10_CS_AT_CLK, "cs_at_clk", "noc_clk", NULL, 1, 0, 0x30,
+-	  4, 0x70, 24, 2, 0, 0, 0},
+-	{ STRATIX10_CS_TRACE_CLK, "cs_trace_clk", "noc_clk", NULL, 1, 0, 0x30,
+-	  4, 0x70, 26, 2, 0, 0, 0},
++	{ STRATIX10_L4_MAIN_CLK, "l4_main_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 0, 0x30,
++	  1, 0x70, 0, 2, 0x3C, 1, 0},
++	{ STRATIX10_L4_MP_CLK, "l4_mp_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 0, 0x30,
++	  2, 0x70, 8, 2, 0x3C, 1, 0},
++	{ STRATIX10_L4_SP_CLK, "l4_sp_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), CLK_IS_CRITICAL, 0x30,
++	  3, 0x70, 16, 2, 0x3C, 1, 0},
++	{ STRATIX10_CS_AT_CLK, "cs_at_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 0, 0x30,
++	  4, 0x70, 24, 2, 0x3C, 1, 0},
++	{ STRATIX10_CS_TRACE_CLK, "cs_trace_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 0, 0x30,
++	  4, 0x70, 26, 2, 0x3C, 1, 0},
+ 	{ STRATIX10_CS_PDBG_CLK, "cs_pdbg_clk", "cs_at_clk", NULL, 1, 0, 0x30,
+ 	  4, 0x70, 28, 1, 0, 0, 0},
+-	{ STRATIX10_CS_TIMER_CLK, "cs_timer_clk", "noc_clk", NULL, 1, 0, 0x30,
+-	  5, 0, 0, 0, 0, 0, 0},
++	{ STRATIX10_CS_TIMER_CLK, "cs_timer_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 0, 0x30,
++	  5, 0, 0, 0, 0x3C, 1, 0},
+ 	{ STRATIX10_S2F_USER0_CLK, "s2f_user0_clk", NULL, s2f_usr0_mux, ARRAY_SIZE(s2f_usr0_mux), 0, 0x30,
+ 	  6, 0, 0, 0, 0, 0, 0},
+ 	{ STRATIX10_EMAC0_CLK, "emac0_clk", NULL, emac_mux, ARRAY_SIZE(emac_mux), 0, 0xA4,
+@@ -249,16 +282,16 @@ static const struct stratix10_gate_clock s10_gate_clks[] = {
+ 	  1, 0, 0, 0, 0xDC, 27, 0},
+ 	{ STRATIX10_EMAC2_CLK, "emac2_clk", NULL, emac_mux, ARRAY_SIZE(emac_mux), 0, 0xA4,
+ 	  2, 0, 0, 0, 0xDC, 28, 0},
+-	{ STRATIX10_EMAC_PTP_CLK, "emac_ptp_clk", "emac_ptp_free_clk", NULL, 1, 0, 0xA4,
+-	  3, 0, 0, 0, 0, 0, 0},
+-	{ STRATIX10_GPIO_DB_CLK, "gpio_db_clk", "gpio_db_free_clk", NULL, 1, 0, 0xA4,
+-	  4, 0xE0, 0, 16, 0, 0, 0},
+-	{ STRATIX10_SDMMC_CLK, "sdmmc_clk", "sdmmc_free_clk", NULL, 1, 0, 0xA4,
+-	  5, 0, 0, 0, 0, 0, 4},
+-	{ STRATIX10_S2F_USER1_CLK, "s2f_user1_clk", "s2f_user1_free_clk", NULL, 1, 0, 0xA4,
+-	  6, 0, 0, 0, 0, 0, 0},
+-	{ STRATIX10_PSI_REF_CLK, "psi_ref_clk", "psi_ref_free_clk", NULL, 1, 0, 0xA4,
+-	  7, 0, 0, 0, 0, 0, 0},
++	{ STRATIX10_EMAC_PTP_CLK, "emac_ptp_clk", NULL, emac_ptp_mux, ARRAY_SIZE(emac_ptp_mux), 0, 0xA4,
++	  3, 0, 0, 0, 0xB0, 2, 0},
++	{ STRATIX10_GPIO_DB_CLK, "gpio_db_clk", NULL, gpio_db_mux, ARRAY_SIZE(gpio_db_mux), 0, 0xA4,
++	  4, 0xE0, 0, 16, 0xB0, 3, 0},
++	{ STRATIX10_SDMMC_CLK, "sdmmc_clk", NULL, sdmmc_mux, ARRAY_SIZE(sdmmc_mux), 0, 0xA4,
++	  5, 0, 0, 0, 0xB0, 4, 4},
++	{ STRATIX10_S2F_USER1_CLK, "s2f_user1_clk", NULL, s2f_user1_mux, ARRAY_SIZE(s2f_user1_mux), 0, 0xA4,
++	  6, 0, 0, 0, 0xB0, 5, 0},
++	{ STRATIX10_PSI_REF_CLK, "psi_ref_clk", NULL, psi_mux, ARRAY_SIZE(psi_mux), 0, 0xA4,
++	  7, 0, 0, 0, 0xB0, 6, 0},
+ 	{ STRATIX10_USB_CLK, "usb_clk", "l4_mp_clk", NULL, 1, 0, 0xA4,
+ 	  8, 0, 0, 0, 0, 0, 0},
+ 	{ STRATIX10_SPI_M_CLK, "spi_m_clk", "l4_mp_clk", NULL, 1, 0, 0xA4,
+diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
+index 16dbf83d2f62a..a33688b2359e5 100644
+--- a/drivers/clk/tegra/clk-tegra30.c
++++ b/drivers/clk/tegra/clk-tegra30.c
+@@ -1245,7 +1245,7 @@ static struct tegra_clk_init_table init_table[] __initdata = {
+ 	{ TEGRA30_CLK_GR3D, TEGRA30_CLK_PLL_C, 300000000, 0 },
+ 	{ TEGRA30_CLK_GR3D2, TEGRA30_CLK_PLL_C, 300000000, 0 },
+ 	{ TEGRA30_CLK_PLL_U, TEGRA30_CLK_CLK_MAX, 480000000, 0 },
+-	{ TEGRA30_CLK_VDE, TEGRA30_CLK_PLL_C, 600000000, 0 },
++	{ TEGRA30_CLK_VDE, TEGRA30_CLK_PLL_C, 300000000, 0 },
+ 	{ TEGRA30_CLK_SPDIF_IN_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
+ 	{ TEGRA30_CLK_I2S0_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
+ 	{ TEGRA30_CLK_I2S1_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
+diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
+index 33eeabf9c3d12..e5c631f1b5cbe 100644
+--- a/drivers/clocksource/timer-ti-dm.c
++++ b/drivers/clocksource/timer-ti-dm.c
+@@ -78,6 +78,9 @@ static void omap_dm_timer_write_reg(struct omap_dm_timer *timer, u32 reg,
+ 
+ static void omap_timer_restore_context(struct omap_dm_timer *timer)
+ {
++	__omap_dm_timer_write(timer, OMAP_TIMER_OCP_CFG_OFFSET,
++			      timer->context.ocp_cfg, 0);
++
+ 	omap_dm_timer_write_reg(timer, OMAP_TIMER_WAKEUP_EN_REG,
+ 				timer->context.twer);
+ 	omap_dm_timer_write_reg(timer, OMAP_TIMER_COUNTER_REG,
+@@ -95,6 +98,9 @@ static void omap_timer_restore_context(struct omap_dm_timer *timer)
+ 
+ static void omap_timer_save_context(struct omap_dm_timer *timer)
+ {
++	timer->context.ocp_cfg =
++		__omap_dm_timer_read(timer, OMAP_TIMER_OCP_CFG_OFFSET, 0);
++
+ 	timer->context.tclr =
+ 			omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
+ 	timer->context.twer =
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index 1d1b563cea4b8..1bc1293deae9b 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -1370,9 +1370,14 @@ static int cpufreq_online(unsigned int cpu)
+ 			goto out_free_policy;
+ 		}
+ 
++		/*
++		 * The initialization has succeeded and the policy is online.
++		 * If there is a problem with its frequency table, take it
++		 * offline and drop it.
++		 */
+ 		ret = cpufreq_table_validate_and_sort(policy);
+ 		if (ret)
+-			goto out_exit_policy;
++			goto out_offline_policy;
+ 
+ 		/* related_cpus should at least include policy->cpus. */
+ 		cpumask_copy(policy->related_cpus, policy->cpus);
+@@ -1518,6 +1523,10 @@ out_destroy_policy:
+ 
+ 	up_write(&policy->rwsem);
+ 
++out_offline_policy:
++	if (cpufreq_driver->offline)
++		cpufreq_driver->offline(policy);
++
+ out_exit_policy:
+ 	if (cpufreq_driver->exit)
+ 		cpufreq_driver->exit(policy);
+diff --git a/drivers/crypto/cavium/nitrox/nitrox_isr.c b/drivers/crypto/cavium/nitrox/nitrox_isr.c
+index 99b053094f5af..b16689b48f5a5 100644
+--- a/drivers/crypto/cavium/nitrox/nitrox_isr.c
++++ b/drivers/crypto/cavium/nitrox/nitrox_isr.c
+@@ -307,6 +307,10 @@ int nitrox_register_interrupts(struct nitrox_device *ndev)
+ 	 * Entry 192: NPS_CORE_INT_ACTIVE
+ 	 */
+ 	nr_vecs = pci_msix_vec_count(pdev);
++	if (nr_vecs < 0) {
++		dev_err(DEV(ndev), "Error in getting vec count %d\n", nr_vecs);
++		return nr_vecs;
++	}
+ 
+ 	/* Enable MSI-X */
+ 	ret = pci_alloc_irq_vectors(pdev, nr_vecs, nr_vecs, PCI_IRQ_MSIX);
+diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
+index 3e0d1d6922bab..6546d3e90d954 100644
+--- a/drivers/crypto/ccp/sev-dev.c
++++ b/drivers/crypto/ccp/sev-dev.c
+@@ -42,6 +42,10 @@ static int psp_probe_timeout = 5;
+ module_param(psp_probe_timeout, int, 0644);
+ MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe");
+ 
++MODULE_FIRMWARE("amd/amd_sev_fam17h_model0xh.sbin"); /* 1st gen EPYC */
++MODULE_FIRMWARE("amd/amd_sev_fam17h_model3xh.sbin"); /* 2nd gen EPYC */
++MODULE_FIRMWARE("amd/amd_sev_fam19h_model0xh.sbin"); /* 3rd gen EPYC */
++
+ static bool psp_dead;
+ static int psp_timeout;
+ 
+diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
+index f471dbaef1fbc..7d346d842a39e 100644
+--- a/drivers/crypto/ccp/sp-pci.c
++++ b/drivers/crypto/ccp/sp-pci.c
+@@ -222,7 +222,7 @@ static int sp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 		if (ret) {
+ 			dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n",
+ 				ret);
+-			goto e_err;
++			goto free_irqs;
+ 		}
+ 	}
+ 
+@@ -230,10 +230,12 @@ static int sp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 
+ 	ret = sp_init(sp);
+ 	if (ret)
+-		goto e_err;
++		goto free_irqs;
+ 
+ 	return 0;
+ 
++free_irqs:
++	sp_free_irqs(sp);
+ e_err:
+ 	dev_notice(dev, "initialization failed\n");
+ 	return ret;
+diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
+index 8adcbb3271267..c86b01abd0a61 100644
+--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
++++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
+@@ -1513,11 +1513,11 @@ static struct skcipher_alg sec_skciphers[] = {
+ 			 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
+ 
+ 	SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb,
+-			 SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE,
++			 SEC_DES3_3KEY_SIZE, SEC_DES3_3KEY_SIZE,
+ 			 DES3_EDE_BLOCK_SIZE, 0)
+ 
+ 	SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc,
+-			 SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE,
++			 SEC_DES3_3KEY_SIZE, SEC_DES3_3KEY_SIZE,
+ 			 DES3_EDE_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE)
+ 
+ 	SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts,
+diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
+index 8b0f17fc09fb5..7567456a21a05 100644
+--- a/drivers/crypto/ixp4xx_crypto.c
++++ b/drivers/crypto/ixp4xx_crypto.c
+@@ -149,6 +149,8 @@ struct crypt_ctl {
+ struct ablk_ctx {
+ 	struct buffer_desc *src;
+ 	struct buffer_desc *dst;
++	u8 iv[MAX_IVLEN];
++	bool encrypt;
+ };
+ 
+ struct aead_ctx {
+@@ -330,7 +332,7 @@ static void free_buf_chain(struct device *dev, struct buffer_desc *buf,
+ 
+ 		buf1 = buf->next;
+ 		phys1 = buf->phys_next;
+-		dma_unmap_single(dev, buf->phys_next, buf->buf_len, buf->dir);
++		dma_unmap_single(dev, buf->phys_addr, buf->buf_len, buf->dir);
+ 		dma_pool_free(buffer_pool, buf, phys);
+ 		buf = buf1;
+ 		phys = phys1;
+@@ -381,6 +383,20 @@ static void one_packet(dma_addr_t phys)
+ 	case CTL_FLAG_PERFORM_ABLK: {
+ 		struct skcipher_request *req = crypt->data.ablk_req;
+ 		struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
++		struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
++		unsigned int ivsize = crypto_skcipher_ivsize(tfm);
++		unsigned int offset;
++
++		if (ivsize > 0) {
++			offset = req->cryptlen - ivsize;
++			if (req_ctx->encrypt) {
++				scatterwalk_map_and_copy(req->iv, req->dst,
++							 offset, ivsize, 0);
++			} else {
++				memcpy(req->iv, req_ctx->iv, ivsize);
++				memzero_explicit(req_ctx->iv, ivsize);
++			}
++		}
+ 
+ 		if (req_ctx->dst) {
+ 			free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
+@@ -876,6 +892,7 @@ static int ablk_perform(struct skcipher_request *req, int encrypt)
+ 	struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
+ 	struct buffer_desc src_hook;
+ 	struct device *dev = &pdev->dev;
++	unsigned int offset;
+ 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
+ 				GFP_KERNEL : GFP_ATOMIC;
+ 
+@@ -885,6 +902,7 @@ static int ablk_perform(struct skcipher_request *req, int encrypt)
+ 		return -EAGAIN;
+ 
+ 	dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
++	req_ctx->encrypt = encrypt;
+ 
+ 	crypt = get_crypt_desc();
+ 	if (!crypt)
+@@ -900,6 +918,10 @@ static int ablk_perform(struct skcipher_request *req, int encrypt)
+ 
+ 	BUG_ON(ivsize && !req->iv);
+ 	memcpy(crypt->iv, req->iv, ivsize);
++	if (ivsize > 0 && !encrypt) {
++		offset = req->cryptlen - ivsize;
++		scatterwalk_map_and_copy(req_ctx->iv, req->src, offset, ivsize, 0);
++	}
+ 	if (req->src != req->dst) {
+ 		struct buffer_desc dst_hook;
+ 		crypt->mode |= NPE_OP_NOT_IN_PLACE;
+diff --git a/drivers/crypto/nx/nx-842-pseries.c b/drivers/crypto/nx/nx-842-pseries.c
+index cc8dd3072b8b7..9b2417ebc95a0 100644
+--- a/drivers/crypto/nx/nx-842-pseries.c
++++ b/drivers/crypto/nx/nx-842-pseries.c
+@@ -538,13 +538,15 @@ static int nx842_OF_set_defaults(struct nx842_devdata *devdata)
+  * The status field indicates if the device is enabled when the status
+  * is 'okay'.  Otherwise the device driver will be disabled.
+  *
+- * @prop - struct property point containing the maxsyncop for the update
++ * @devdata: struct nx842_devdata to use for dev_info
++ * @prop: struct property point containing the maxsyncop for the update
+  *
+  * Returns:
+  *  0 - Device is available
+  *  -ENODEV - Device is not available
+  */
+-static int nx842_OF_upd_status(struct property *prop)
++static int nx842_OF_upd_status(struct nx842_devdata *devdata,
++			       struct property *prop)
+ {
+ 	const char *status = (const char *)prop->value;
+ 
+@@ -758,7 +760,7 @@ static int nx842_OF_upd(struct property *new_prop)
+ 		goto out;
+ 
+ 	/* Perform property updates */
+-	ret = nx842_OF_upd_status(status);
++	ret = nx842_OF_upd_status(new_devdata, status);
+ 	if (ret)
+ 		goto error_out;
+ 
+@@ -1069,6 +1071,7 @@ static const struct vio_device_id nx842_vio_driver_ids[] = {
+ 	{"ibm,compression-v1", "ibm,compression"},
+ 	{"", ""},
+ };
++MODULE_DEVICE_TABLE(vio, nx842_vio_driver_ids);
+ 
+ static struct vio_driver nx842_vio_driver = {
+ 	.name = KBUILD_MODNAME,
+diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c
+index 6d5ce1a66f1ee..02ad26012c665 100644
+--- a/drivers/crypto/nx/nx-aes-ctr.c
++++ b/drivers/crypto/nx/nx-aes-ctr.c
+@@ -118,7 +118,7 @@ static int ctr3686_aes_nx_crypt(struct skcipher_request *req)
+ 	struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm);
+ 	u8 iv[16];
+ 
+-	memcpy(iv, nx_ctx->priv.ctr.nonce, CTR_RFC3686_IV_SIZE);
++	memcpy(iv, nx_ctx->priv.ctr.nonce, CTR_RFC3686_NONCE_SIZE);
+ 	memcpy(iv + CTR_RFC3686_NONCE_SIZE, req->iv, CTR_RFC3686_IV_SIZE);
+ 	iv[12] = iv[13] = iv[14] = 0;
+ 	iv[15] = 1;
+diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
+index ae0d320d3c60d..dd53ad9987b0d 100644
+--- a/drivers/crypto/omap-sham.c
++++ b/drivers/crypto/omap-sham.c
+@@ -372,7 +372,7 @@ static int omap_sham_hw_init(struct omap_sham_dev *dd)
+ {
+ 	int err;
+ 
+-	err = pm_runtime_get_sync(dd->dev);
++	err = pm_runtime_resume_and_get(dd->dev);
+ 	if (err < 0) {
+ 		dev_err(dd->dev, "failed to get sync: %d\n", err);
+ 		return err;
+@@ -2244,7 +2244,7 @@ static int omap_sham_suspend(struct device *dev)
+ 
+ static int omap_sham_resume(struct device *dev)
+ {
+-	int err = pm_runtime_get_sync(dev);
++	int err = pm_runtime_resume_and_get(dev);
+ 	if (err < 0) {
+ 		dev_err(dev, "failed to get sync: %d\n", err);
+ 		return err;
+diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c
+index bd3028126cbe6..069f51621f0e8 100644
+--- a/drivers/crypto/qat/qat_common/qat_hal.c
++++ b/drivers/crypto/qat/qat_common/qat_hal.c
+@@ -1417,7 +1417,11 @@ static int qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle,
+ 		pr_err("QAT: bad xfrAddr=0x%x\n", xfr_addr);
+ 		return -EINVAL;
+ 	}
+-	qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, &gprval);
++	status = qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, &gprval);
++	if (status) {
++		pr_err("QAT: failed to read register");
++		return status;
++	}
+ 	gpr_addr = qat_hal_get_reg_addr(ICP_GPB_REL, gprnum);
+ 	data16low = 0xffff & data;
+ 	data16hi = 0xffff & (data >> 0x10);
+diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c
+index 1fb5fc852f6b8..6d95160e451e5 100644
+--- a/drivers/crypto/qat/qat_common/qat_uclo.c
++++ b/drivers/crypto/qat/qat_common/qat_uclo.c
+@@ -342,7 +342,6 @@ static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle,
+ 	return 0;
+ }
+ 
+-#define ICP_DH895XCC_PESRAM_BAR_SIZE 0x80000
+ static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
+ 				   struct icp_qat_uof_initmem *init_mem)
+ {
+diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
+index a2d3da0ad95f3..d8053789c8828 100644
+--- a/drivers/crypto/qce/skcipher.c
++++ b/drivers/crypto/qce/skcipher.c
+@@ -71,7 +71,7 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
+ 	struct scatterlist *sg;
+ 	bool diff_dst;
+ 	gfp_t gfp;
+-	int ret;
++	int dst_nents, src_nents, ret;
+ 
+ 	rctx->iv = req->iv;
+ 	rctx->ivsize = crypto_skcipher_ivsize(skcipher);
+@@ -122,21 +122,26 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
+ 	sg_mark_end(sg);
+ 	rctx->dst_sg = rctx->dst_tbl.sgl;
+ 
+-	ret = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
+-	if (ret < 0)
++	dst_nents = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
++	if (dst_nents < 0) {
++		ret = dst_nents;
+ 		goto error_free;
++	}
+ 
+ 	if (diff_dst) {
+-		ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
+-		if (ret < 0)
++		src_nents = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
++		if (src_nents < 0) {
++			ret = src_nents;
+ 			goto error_unmap_dst;
++		}
+ 		rctx->src_sg = req->src;
+ 	} else {
+ 		rctx->src_sg = rctx->dst_sg;
++		src_nents = dst_nents - 1;
+ 	}
+ 
+-	ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents,
+-			       rctx->dst_sg, rctx->dst_nents,
++	ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, src_nents,
++			       rctx->dst_sg, dst_nents,
+ 			       qce_skcipher_done, async_req);
+ 	if (ret)
+ 		goto error_unmap_src;
+diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
+index b0f0502a5bb0f..ba116670ef8c8 100644
+--- a/drivers/crypto/sa2ul.c
++++ b/drivers/crypto/sa2ul.c
+@@ -2275,9 +2275,9 @@ static int sa_dma_init(struct sa_crypto_data *dd)
+ 
+ 	dd->dma_rx2 = dma_request_chan(dd->dev, "rx2");
+ 	if (IS_ERR(dd->dma_rx2)) {
+-		dma_release_channel(dd->dma_rx1);
+-		return dev_err_probe(dd->dev, PTR_ERR(dd->dma_rx2),
+-				     "Unable to request rx2 DMA channel\n");
++		ret = dev_err_probe(dd->dev, PTR_ERR(dd->dma_rx2),
++				    "Unable to request rx2 DMA channel\n");
++		goto err_dma_rx2;
+ 	}
+ 
+ 	dd->dma_tx = dma_request_chan(dd->dev, "tx");
+@@ -2298,28 +2298,31 @@ static int sa_dma_init(struct sa_crypto_data *dd)
+ 	if (ret) {
+ 		dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
+ 			ret);
+-		return ret;
++		goto err_dma_config;
+ 	}
+ 
+ 	ret = dmaengine_slave_config(dd->dma_rx2, &cfg);
+ 	if (ret) {
+ 		dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
+ 			ret);
+-		return ret;
++		goto err_dma_config;
+ 	}
+ 
+ 	ret = dmaengine_slave_config(dd->dma_tx, &cfg);
+ 	if (ret) {
+ 		dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
+ 			ret);
+-		return ret;
++		goto err_dma_config;
+ 	}
+ 
+ 	return 0;
+ 
++err_dma_config:
++	dma_release_channel(dd->dma_tx);
+ err_dma_tx:
+-	dma_release_channel(dd->dma_rx1);
+ 	dma_release_channel(dd->dma_rx2);
++err_dma_rx2:
++	dma_release_channel(dd->dma_rx1);
+ 
+ 	return ret;
+ }
+@@ -2358,13 +2361,14 @@ static int sa_ul_probe(struct platform_device *pdev)
+ 	if (ret < 0) {
+ 		dev_err(&pdev->dev, "%s: failed to get sync: %d\n", __func__,
+ 			ret);
++		pm_runtime_disable(dev);
+ 		return ret;
+ 	}
+ 
+ 	sa_init_mem(dev_data);
+ 	ret = sa_dma_init(dev_data);
+ 	if (ret)
+-		goto disable_pm_runtime;
++		goto destroy_dma_pool;
+ 
+ 	spin_lock_init(&dev_data->scid_lock);
+ 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+@@ -2394,9 +2398,9 @@ release_dma:
+ 	dma_release_channel(dev_data->dma_rx1);
+ 	dma_release_channel(dev_data->dma_tx);
+ 
++destroy_dma_pool:
+ 	dma_pool_destroy(dev_data->sc_pool);
+ 
+-disable_pm_runtime:
+ 	pm_runtime_put_sync(&pdev->dev);
+ 	pm_runtime_disable(&pdev->dev);
+ 
+diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
+index da284b0ea1b26..243515df609bd 100644
+--- a/drivers/crypto/ux500/hash/hash_core.c
++++ b/drivers/crypto/ux500/hash/hash_core.c
+@@ -1010,6 +1010,7 @@ static int hash_hw_final(struct ahash_request *req)
+ 			goto out;
+ 		}
+ 	} else if (req->nbytes == 0 && ctx->keylen > 0) {
++		ret = -EPERM;
+ 		dev_err(device_data->dev, "%s: Empty message with keylength > 0, NOT supported\n",
+ 			__func__);
+ 		goto out;
+diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
+index 59ba59bea0f54..db1bc8cf92766 100644
+--- a/drivers/devfreq/devfreq.c
++++ b/drivers/devfreq/devfreq.c
+@@ -822,6 +822,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
+ 	if (devfreq->profile->timer < 0
+ 		|| devfreq->profile->timer >= DEVFREQ_TIMER_NUM) {
+ 		mutex_unlock(&devfreq->lock);
++		err = -EINVAL;
+ 		goto err_dev;
+ 	}
+ 
+diff --git a/drivers/devfreq/governor_passive.c b/drivers/devfreq/governor_passive.c
+index b094132bd20b3..fc09324a03e03 100644
+--- a/drivers/devfreq/governor_passive.c
++++ b/drivers/devfreq/governor_passive.c
+@@ -65,7 +65,7 @@ static int devfreq_passive_get_target_freq(struct devfreq *devfreq,
+ 		dev_pm_opp_put(p_opp);
+ 
+ 		if (IS_ERR(opp))
+-			return PTR_ERR(opp);
++			goto no_required_opp;
+ 
+ 		*freq = dev_pm_opp_get_freq(opp);
+ 		dev_pm_opp_put(opp);
+@@ -73,6 +73,7 @@ static int devfreq_passive_get_target_freq(struct devfreq *devfreq,
+ 		return 0;
+ 	}
+ 
++no_required_opp:
+ 	/*
+ 	 * Get the OPP table's index of decided frequency by governor
+ 	 * of parent device.
+diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
+index 27d0c4cdc58d5..9b21e45debc22 100644
+--- a/drivers/edac/Kconfig
++++ b/drivers/edac/Kconfig
+@@ -270,7 +270,8 @@ config EDAC_PND2
+ 
+ config EDAC_IGEN6
+ 	tristate "Intel client SoC Integrated MC"
+-	depends on PCI && X86_64 && PCI_MMCONFIG && ARCH_HAVE_NMI_SAFE_CMPXCHG
++	depends on PCI && PCI_MMCONFIG && ARCH_HAVE_NMI_SAFE_CMPXCHG
++	depends on X64_64 && X86_MCE_INTEL
+ 	help
+ 	  Support for error detection and correction on the Intel
+ 	  client SoC Integrated Memory Controller using In-Band ECC IP.
+diff --git a/drivers/edac/aspeed_edac.c b/drivers/edac/aspeed_edac.c
+index a46da56d6d544..6bd5f88159193 100644
+--- a/drivers/edac/aspeed_edac.c
++++ b/drivers/edac/aspeed_edac.c
+@@ -254,8 +254,8 @@ static int init_csrows(struct mem_ctl_info *mci)
+ 		return rc;
+ 	}
+ 
+-	dev_dbg(mci->pdev, "dt: /memory node resources: first page r.start=0x%x, resource_size=0x%x, PAGE_SHIFT macro=0x%x\n",
+-		r.start, resource_size(&r), PAGE_SHIFT);
++	dev_dbg(mci->pdev, "dt: /memory node resources: first page %pR, PAGE_SHIFT macro=0x%x\n",
++		&r, PAGE_SHIFT);
+ 
+ 	csrow->first_page = r.start >> PAGE_SHIFT;
+ 	nr_pages = resource_size(&r) >> PAGE_SHIFT;
+diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
+index 238a4ad1e526e..37b4e875420e4 100644
+--- a/drivers/edac/i10nm_base.c
++++ b/drivers/edac/i10nm_base.c
+@@ -278,6 +278,9 @@ static int __init i10nm_init(void)
+ 	if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
+ 		return -EBUSY;
+ 
++	if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
++		return -ENODEV;
++
+ 	id = x86_match_cpu(i10nm_cpuids);
+ 	if (!id)
+ 		return -ENODEV;
+diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
+index 928f63a374c78..c94ca1f790c43 100644
+--- a/drivers/edac/pnd2_edac.c
++++ b/drivers/edac/pnd2_edac.c
+@@ -1554,6 +1554,9 @@ static int __init pnd2_init(void)
+ 	if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
+ 		return -EBUSY;
+ 
++	if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
++		return -ENODEV;
++
+ 	id = x86_match_cpu(pnd2_cpuids);
+ 	if (!id)
+ 		return -ENODEV;
+diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
+index 93daa4297f2e0..4c626fcd4dcbb 100644
+--- a/drivers/edac/sb_edac.c
++++ b/drivers/edac/sb_edac.c
+@@ -3510,6 +3510,9 @@ static int __init sbridge_init(void)
+ 	if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
+ 		return -EBUSY;
+ 
++	if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
++		return -ENODEV;
++
+ 	id = x86_match_cpu(sbridge_cpuids);
+ 	if (!id)
+ 		return -ENODEV;
+diff --git a/drivers/edac/skx_base.c b/drivers/edac/skx_base.c
+index 6a4f0b27c6545..4dbd46575bfb4 100644
+--- a/drivers/edac/skx_base.c
++++ b/drivers/edac/skx_base.c
+@@ -656,6 +656,9 @@ static int __init skx_init(void)
+ 	if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
+ 		return -EBUSY;
+ 
++	if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
++		return -ENODEV;
++
+ 	id = x86_match_cpu(skx_cpuids);
+ 	if (!id)
+ 		return -ENODEV;
+diff --git a/drivers/edac/ti_edac.c b/drivers/edac/ti_edac.c
+index e7eae20f83d1d..169f96e51c293 100644
+--- a/drivers/edac/ti_edac.c
++++ b/drivers/edac/ti_edac.c
+@@ -197,6 +197,7 @@ static const struct of_device_id ti_edac_of_match[] = {
+ 	{ .compatible = "ti,emif-dra7xx", .data = (void *)EMIF_TYPE_DRA7 },
+ 	{},
+ };
++MODULE_DEVICE_TABLE(of, ti_edac_of_match);
+ 
+ static int _emif_get_id(struct device_node *node)
+ {
+diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
+index 337b0eea4e629..64008808675ef 100644
+--- a/drivers/extcon/extcon-max8997.c
++++ b/drivers/extcon/extcon-max8997.c
+@@ -729,7 +729,7 @@ static int max8997_muic_probe(struct platform_device *pdev)
+ 				2, info->status);
+ 	if (ret) {
+ 		dev_err(info->dev, "failed to read MUIC register\n");
+-		return ret;
++		goto err_irq;
+ 	}
+ 	cable_type = max8997_muic_get_cable_type(info,
+ 					   MAX8997_CABLE_GROUP_ADC, &attached);
+@@ -784,3 +784,4 @@ module_platform_driver(max8997_muic_driver);
+ MODULE_DESCRIPTION("Maxim MAX8997 Extcon driver");
+ MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
+ MODULE_LICENSE("GPL");
++MODULE_ALIAS("platform:max8997-muic");
+diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c
+index 106d4da647bd9..5e0718dee03bc 100644
+--- a/drivers/extcon/extcon-sm5502.c
++++ b/drivers/extcon/extcon-sm5502.c
+@@ -88,7 +88,6 @@ static struct reg_data sm5502_reg_data[] = {
+ 			| SM5502_REG_INTM2_MHL_MASK,
+ 		.invert = true,
+ 	},
+-	{ }
+ };
+ 
+ /* List of detectable cables */
+diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
+index 3aa489dba30a7..2a7687911c097 100644
+--- a/drivers/firmware/stratix10-svc.c
++++ b/drivers/firmware/stratix10-svc.c
+@@ -1034,24 +1034,32 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
+ 
+ 	/* add svc client device(s) */
+ 	svc = devm_kzalloc(dev, sizeof(*svc), GFP_KERNEL);
+-	if (!svc)
+-		return -ENOMEM;
++	if (!svc) {
++		ret = -ENOMEM;
++		goto err_free_kfifo;
++	}
+ 
+ 	svc->stratix10_svc_rsu = platform_device_alloc(STRATIX10_RSU, 0);
+ 	if (!svc->stratix10_svc_rsu) {
+ 		dev_err(dev, "failed to allocate %s device\n", STRATIX10_RSU);
+-		return -ENOMEM;
++		ret = -ENOMEM;
++		goto err_free_kfifo;
+ 	}
+ 
+ 	ret = platform_device_add(svc->stratix10_svc_rsu);
+-	if (ret) {
+-		platform_device_put(svc->stratix10_svc_rsu);
+-		return ret;
+-	}
++	if (ret)
++		goto err_put_device;
++
+ 	dev_set_drvdata(dev, svc);
+ 
+ 	pr_info("Intel Service Layer Driver Initialized\n");
+ 
++	return 0;
++
++err_put_device:
++	platform_device_put(svc->stratix10_svc_rsu);
++err_free_kfifo:
++	kfifo_free(&controller->svc_fifo);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c
+index 4e60e84cd17a5..59ddc9fd5bca4 100644
+--- a/drivers/fsi/fsi-core.c
++++ b/drivers/fsi/fsi-core.c
+@@ -724,7 +724,7 @@ static ssize_t cfam_read(struct file *filep, char __user *buf, size_t count,
+ 	rc = count;
+  fail:
+ 	*offset = off;
+-	return count;
++	return rc;
+ }
+ 
+ static ssize_t cfam_write(struct file *filep, const char __user *buf,
+@@ -761,7 +761,7 @@ static ssize_t cfam_write(struct file *filep, const char __user *buf,
+ 	rc = count;
+  fail:
+ 	*offset = off;
+-	return count;
++	return rc;
+ }
+ 
+ static loff_t cfam_llseek(struct file *file, loff_t offset, int whence)
+diff --git a/drivers/fsi/fsi-occ.c b/drivers/fsi/fsi-occ.c
+index 10ca2e290655b..cb05b6dacc9d5 100644
+--- a/drivers/fsi/fsi-occ.c
++++ b/drivers/fsi/fsi-occ.c
+@@ -495,6 +495,7 @@ int fsi_occ_submit(struct device *dev, const void *request, size_t req_len,
+ 			goto done;
+ 
+ 		if (resp->return_status == OCC_RESP_CMD_IN_PRG ||
++		    resp->return_status == OCC_RESP_CRIT_INIT ||
+ 		    resp->seq_no != seq_no) {
+ 			rc = -ETIMEDOUT;
+ 
+diff --git a/drivers/fsi/fsi-sbefifo.c b/drivers/fsi/fsi-sbefifo.c
+index bfd5e5da80209..84cb965bfed5c 100644
+--- a/drivers/fsi/fsi-sbefifo.c
++++ b/drivers/fsi/fsi-sbefifo.c
+@@ -325,7 +325,8 @@ static int sbefifo_up_write(struct sbefifo *sbefifo, __be32 word)
+ static int sbefifo_request_reset(struct sbefifo *sbefifo)
+ {
+ 	struct device *dev = &sbefifo->fsi_dev->dev;
+-	u32 status, timeout;
++	unsigned long end_time;
++	u32 status;
+ 	int rc;
+ 
+ 	dev_dbg(dev, "Requesting FIFO reset\n");
+@@ -341,7 +342,8 @@ static int sbefifo_request_reset(struct sbefifo *sbefifo)
+ 	}
+ 
+ 	/* Wait for it to complete */
+-	for (timeout = 0; timeout < SBEFIFO_RESET_TIMEOUT; timeout++) {
++	end_time = jiffies + msecs_to_jiffies(SBEFIFO_RESET_TIMEOUT);
++	while (!time_after(jiffies, end_time)) {
+ 		rc = sbefifo_regr(sbefifo, SBEFIFO_UP | SBEFIFO_STS, &status);
+ 		if (rc) {
+ 			dev_err(dev, "Failed to read UP fifo status during reset"
+@@ -355,7 +357,7 @@ static int sbefifo_request_reset(struct sbefifo *sbefifo)
+ 			return 0;
+ 		}
+ 
+-		msleep(1);
++		cond_resched();
+ 	}
+ 	dev_err(dev, "FIFO reset timed out\n");
+ 
+@@ -400,7 +402,7 @@ static int sbefifo_cleanup_hw(struct sbefifo *sbefifo)
+ 	/* The FIFO already contains a reset request from the SBE ? */
+ 	if (down_status & SBEFIFO_STS_RESET_REQ) {
+ 		dev_info(dev, "Cleanup: FIFO reset request set, resetting\n");
+-		rc = sbefifo_regw(sbefifo, SBEFIFO_UP, SBEFIFO_PERFORM_RESET);
++		rc = sbefifo_regw(sbefifo, SBEFIFO_DOWN, SBEFIFO_PERFORM_RESET);
+ 		if (rc) {
+ 			sbefifo->broken = true;
+ 			dev_err(dev, "Cleanup: Reset reg write failed, rc=%d\n", rc);
+diff --git a/drivers/fsi/fsi-scom.c b/drivers/fsi/fsi-scom.c
+index b45bfab7b7f55..75d1389e2626d 100644
+--- a/drivers/fsi/fsi-scom.c
++++ b/drivers/fsi/fsi-scom.c
+@@ -38,9 +38,10 @@
+ #define SCOM_STATUS_PIB_RESP_MASK	0x00007000
+ #define SCOM_STATUS_PIB_RESP_SHIFT	12
+ 
+-#define SCOM_STATUS_ANY_ERR		(SCOM_STATUS_PROTECTION | \
+-					 SCOM_STATUS_PARITY |	  \
+-					 SCOM_STATUS_PIB_ABORT | \
++#define SCOM_STATUS_FSI2PIB_ERROR	(SCOM_STATUS_PROTECTION |	\
++					 SCOM_STATUS_PARITY |		\
++					 SCOM_STATUS_PIB_ABORT)
++#define SCOM_STATUS_ANY_ERR		(SCOM_STATUS_FSI2PIB_ERROR |	\
+ 					 SCOM_STATUS_PIB_RESP_MASK)
+ /* SCOM address encodings */
+ #define XSCOM_ADDR_IND_FLAG		BIT_ULL(63)
+@@ -240,13 +241,14 @@ static int handle_fsi2pib_status(struct scom_device *scom, uint32_t status)
+ {
+ 	uint32_t dummy = -1;
+ 
+-	if (status & SCOM_STATUS_PROTECTION)
+-		return -EPERM;
+-	if (status & SCOM_STATUS_PARITY) {
++	if (status & SCOM_STATUS_FSI2PIB_ERROR)
+ 		fsi_device_write(scom->fsi_dev, SCOM_FSI2PIB_RESET_REG, &dummy,
+ 				 sizeof(uint32_t));
++
++	if (status & SCOM_STATUS_PROTECTION)
++		return -EPERM;
++	if (status & SCOM_STATUS_PARITY)
+ 		return -EIO;
+-	}
+ 	/* Return -EBUSY on PIB abort to force a retry */
+ 	if (status & SCOM_STATUS_PIB_ABORT)
+ 		return -EBUSY;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index eed4946305834..0858e0c7b7a1d 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -28,6 +28,7 @@
+ 
+ #include "dm_services_types.h"
+ #include "dc.h"
++#include "dc_link_dp.h"
+ #include "dc/inc/core_types.h"
+ #include "dal_asic_id.h"
+ #include "dmub/dmub_srv.h"
+@@ -2598,6 +2599,7 @@ static void handle_hpd_rx_irq(void *param)
+ 	enum dc_connection_type new_connection_type = dc_connection_none;
+ 	struct amdgpu_device *adev = drm_to_adev(dev);
+ 	union hpd_irq_data hpd_irq_data;
++	bool lock_flag = 0;
+ 
+ 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
+ 
+@@ -2624,13 +2626,28 @@ static void handle_hpd_rx_irq(void *param)
+ 		}
+ 	}
+ 
+-	mutex_lock(&adev->dm.dc_lock);
++	/*
++	 * TODO: We need the lock to avoid touching DC state while it's being
++	 * modified during automated compliance testing, or when link loss
++	 * happens. While this should be split into subhandlers and proper
++	 * interfaces to avoid having to conditionally lock like this in the
++	 * outer layer, we need this workaround temporarily to allow MST
++	 * lightup in some scenarios to avoid timeout.
++	 */
++	if (!amdgpu_in_reset(adev) &&
++	    (hpd_rx_irq_check_link_loss_status(dc_link, &hpd_irq_data) ||
++	     hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST)) {
++		mutex_lock(&adev->dm.dc_lock);
++		lock_flag = 1;
++	}
++
+ #ifdef CONFIG_DRM_AMD_DC_HDCP
+ 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
+ #else
+ 	result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
+ #endif
+-	mutex_unlock(&adev->dm.dc_lock);
++	if (!amdgpu_in_reset(adev) && lock_flag)
++		mutex_unlock(&adev->dm.dc_lock);
+ 
+ out:
+ 	if (result && !is_mst_root_connector) {
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+index 41b09ab22233a..b478129a74774 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -270,6 +270,9 @@ dm_dp_mst_detect(struct drm_connector *connector,
+ 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ 	struct amdgpu_dm_connector *master = aconnector->mst_port;
+ 
++	if (drm_connector_is_unregistered(connector))
++		return connector_status_disconnected;
++
+ 	return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr,
+ 				      aconnector->port);
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+index c1391bfb7a9bc..b85f67341a9a0 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+@@ -1918,7 +1918,7 @@ enum dc_status read_hpd_rx_irq_data(
+ 	return retval;
+ }
+ 
+-static bool hpd_rx_irq_check_link_loss_status(
++bool hpd_rx_irq_check_link_loss_status(
+ 	struct dc_link *link,
+ 	union hpd_irq_data *hpd_irq_dpcd_data)
+ {
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+index b970a32177aff..28abd30e90a58 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+@@ -63,6 +63,10 @@ bool perform_link_training_with_retries(
+ 	struct pipe_ctx *pipe_ctx,
+ 	enum signal_type signal);
+ 
++bool hpd_rx_irq_check_link_loss_status(
++	struct dc_link *link,
++	union hpd_irq_data *hpd_irq_dpcd_data);
++
+ bool is_mst_supported(struct dc_link *link);
+ 
+ bool detect_dp_sink_caps(struct dc_link *link);
+diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
+index 0ac3c2039c4b1..c29cc7f19863a 100644
+--- a/drivers/gpu/drm/ast/ast_main.c
++++ b/drivers/gpu/drm/ast/ast_main.c
+@@ -413,7 +413,7 @@ struct ast_private *ast_device_create(const struct drm_driver *drv,
+ 
+ 	pci_set_drvdata(pdev, dev);
+ 
+-	ast->regs = pci_iomap(pdev, 1, 0);
++	ast->regs = pcim_iomap(pdev, 1, 0);
+ 	if (!ast->regs)
+ 		return ERR_PTR(-EIO);
+ 
+@@ -429,7 +429,7 @@ struct ast_private *ast_device_create(const struct drm_driver *drv,
+ 
+ 	/* "map" IO regs if the above hasn't done so already */
+ 	if (!ast->ioregs) {
+-		ast->ioregs = pci_iomap(pdev, 2, 0);
++		ast->ioregs = pcim_iomap(pdev, 2, 0);
+ 		if (!ast->ioregs)
+ 			return ERR_PTR(-EIO);
+ 	}
+diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
+index bc60fc4728d70..8d5bae9e745b2 100644
+--- a/drivers/gpu/drm/bridge/Kconfig
++++ b/drivers/gpu/drm/bridge/Kconfig
+@@ -143,7 +143,7 @@ config DRM_SIL_SII8620
+ 	tristate "Silicon Image SII8620 HDMI/MHL bridge"
+ 	depends on OF
+ 	select DRM_KMS_HELPER
+-	imply EXTCON
++	select EXTCON
+ 	depends on RC_CORE || !RC_CORE
+ 	help
+ 	  Silicon Image SII8620 HDMI/MHL bridge chip driver.
+diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
+index 64f0effb52ac1..044acd07c1538 100644
+--- a/drivers/gpu/drm/drm_bridge.c
++++ b/drivers/gpu/drm/drm_bridge.c
+@@ -522,6 +522,9 @@ void drm_bridge_chain_pre_enable(struct drm_bridge *bridge)
+ 	list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
+ 		if (iter->funcs->pre_enable)
+ 			iter->funcs->pre_enable(iter);
++
++		if (iter == bridge)
++			break;
+ 	}
+ }
+ EXPORT_SYMBOL(drm_bridge_chain_pre_enable);
+diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
+index 075508051b5fb..8c08c8b36074e 100644
+--- a/drivers/gpu/drm/imx/ipuv3-plane.c
++++ b/drivers/gpu/drm/imx/ipuv3-plane.c
+@@ -35,7 +35,7 @@ static inline struct ipu_plane *to_ipu_plane(struct drm_plane *p)
+ 	return container_of(p, struct ipu_plane, base);
+ }
+ 
+-static const uint32_t ipu_plane_formats[] = {
++static const uint32_t ipu_plane_all_formats[] = {
+ 	DRM_FORMAT_ARGB1555,
+ 	DRM_FORMAT_XRGB1555,
+ 	DRM_FORMAT_ABGR1555,
+@@ -72,6 +72,31 @@ static const uint32_t ipu_plane_formats[] = {
+ 	DRM_FORMAT_BGRX8888_A8,
+ };
+ 
++static const uint32_t ipu_plane_rgb_formats[] = {
++	DRM_FORMAT_ARGB1555,
++	DRM_FORMAT_XRGB1555,
++	DRM_FORMAT_ABGR1555,
++	DRM_FORMAT_XBGR1555,
++	DRM_FORMAT_RGBA5551,
++	DRM_FORMAT_BGRA5551,
++	DRM_FORMAT_ARGB4444,
++	DRM_FORMAT_ARGB8888,
++	DRM_FORMAT_XRGB8888,
++	DRM_FORMAT_ABGR8888,
++	DRM_FORMAT_XBGR8888,
++	DRM_FORMAT_RGBA8888,
++	DRM_FORMAT_RGBX8888,
++	DRM_FORMAT_BGRA8888,
++	DRM_FORMAT_BGRX8888,
++	DRM_FORMAT_RGB565,
++	DRM_FORMAT_RGB565_A8,
++	DRM_FORMAT_BGR565_A8,
++	DRM_FORMAT_RGB888_A8,
++	DRM_FORMAT_BGR888_A8,
++	DRM_FORMAT_RGBX8888_A8,
++	DRM_FORMAT_BGRX8888_A8,
++};
++
+ static const uint64_t ipu_format_modifiers[] = {
+ 	DRM_FORMAT_MOD_LINEAR,
+ 	DRM_FORMAT_MOD_INVALID
+@@ -320,10 +345,11 @@ static bool ipu_plane_format_mod_supported(struct drm_plane *plane,
+ 	if (modifier == DRM_FORMAT_MOD_LINEAR)
+ 		return true;
+ 
+-	/* without a PRG there are no supported modifiers */
+-	if (!ipu_prg_present(ipu))
+-		return false;
+-
++	/*
++	 * Without a PRG the possible modifiers list only includes the linear
++	 * modifier, so we always take the early return from this function and
++	 * only end up here if the PRG is present.
++	 */
+ 	return ipu_prg_format_supported(ipu, format, modifier);
+ }
+ 
+@@ -822,16 +848,28 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
+ 	struct ipu_plane *ipu_plane;
+ 	const uint64_t *modifiers = ipu_format_modifiers;
+ 	unsigned int zpos = (type == DRM_PLANE_TYPE_PRIMARY) ? 0 : 1;
++	unsigned int format_count;
++	const uint32_t *formats;
+ 	int ret;
+ 
+ 	DRM_DEBUG_KMS("channel %d, dp flow %d, possible_crtcs=0x%x\n",
+ 		      dma, dp, possible_crtcs);
+ 
++	if (dp == IPU_DP_FLOW_SYNC_BG || dp == IPU_DP_FLOW_SYNC_FG) {
++		formats = ipu_plane_all_formats;
++		format_count = ARRAY_SIZE(ipu_plane_all_formats);
++	} else {
++		formats = ipu_plane_rgb_formats;
++		format_count = ARRAY_SIZE(ipu_plane_rgb_formats);
++	}
++
++	if (ipu_prg_present(ipu))
++		modifiers = pre_format_modifiers;
++
+ 	ipu_plane = drmm_universal_plane_alloc(dev, struct ipu_plane, base,
+ 					       possible_crtcs, &ipu_plane_funcs,
+-					       ipu_plane_formats,
+-					       ARRAY_SIZE(ipu_plane_formats),
+-					       modifiers, type, NULL);
++					       formats, format_count, modifiers,
++					       type, NULL);
+ 	if (IS_ERR(ipu_plane)) {
+ 		DRM_ERROR("failed to allocate and initialize %s plane\n",
+ 			  zpos ? "overlay" : "primary");
+@@ -842,9 +880,6 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
+ 	ipu_plane->dma = dma;
+ 	ipu_plane->dp_flow = dp;
+ 
+-	if (ipu_prg_present(ipu))
+-		modifiers = pre_format_modifiers;
+-
+ 	drm_plane_helper_add(&ipu_plane->base, &ipu_plane_helper_funcs);
+ 
+ 	if (dp == IPU_DP_FLOW_SYNC_BG || dp == IPU_DP_FLOW_SYNC_FG)
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
+index 3416e9617ee9a..96f3908e4c5b9 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
+@@ -222,7 +222,7 @@ int dpu_mdss_init(struct drm_device *dev)
+ 	struct msm_drm_private *priv = dev->dev_private;
+ 	struct dpu_mdss *dpu_mdss;
+ 	struct dss_module_power *mp;
+-	int ret = 0;
++	int ret;
+ 	int irq;
+ 
+ 	dpu_mdss = devm_kzalloc(dev->dev, sizeof(*dpu_mdss), GFP_KERNEL);
+@@ -250,8 +250,10 @@ int dpu_mdss_init(struct drm_device *dev)
+ 		goto irq_domain_error;
+ 
+ 	irq = platform_get_irq(pdev, 0);
+-	if (irq < 0)
++	if (irq < 0) {
++		ret = irq;
+ 		goto irq_error;
++	}
+ 
+ 	irq_set_chained_handler_and_data(irq, dpu_mdss_irq,
+ 					 dpu_mdss);
+@@ -260,7 +262,7 @@ int dpu_mdss_init(struct drm_device *dev)
+ 
+ 	pm_runtime_enable(dev->dev);
+ 
+-	return ret;
++	return 0;
+ 
+ irq_error:
+ 	_dpu_mdss_irq_domain_fini(dpu_mdss);
+diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
+index b1a9b1b98f5f6..f4f53f23e331e 100644
+--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
++++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
+@@ -582,10 +582,9 @@ void dp_catalog_ctrl_hpd_config(struct dp_catalog *dp_catalog)
+ 
+ 	u32 reftimer = dp_read_aux(catalog, REG_DP_DP_HPD_REFTIMER);
+ 
+-	/* enable HPD interrupts */
++	/* enable HPD plug and unplug interrupts */
+ 	dp_catalog_hpd_config_intr(dp_catalog,
+-		DP_DP_HPD_PLUG_INT_MASK | DP_DP_IRQ_HPD_INT_MASK
+-		| DP_DP_HPD_UNPLUG_INT_MASK | DP_DP_HPD_REPLUG_INT_MASK, true);
++		DP_DP_HPD_PLUG_INT_MASK | DP_DP_HPD_UNPLUG_INT_MASK, true);
+ 
+ 	/* Configure REFTIMER and enable it */
+ 	reftimer |= DP_DP_HPD_REFTIMER_ENABLE;
+diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
+index 1390f3547fde4..2a8955ca70d1a 100644
+--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
++++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
+@@ -1809,6 +1809,61 @@ end:
+ 	return ret;
+ }
+ 
++int dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl)
++{
++	struct dp_ctrl_private *ctrl;
++	struct dp_io *dp_io;
++	struct phy *phy;
++	int ret;
++
++	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
++	dp_io = &ctrl->parser->io;
++	phy = dp_io->phy;
++
++	/* set dongle to D3 (power off) mode */
++	dp_link_psm_config(ctrl->link, &ctrl->panel->link_info, true);
++
++	dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
++
++	ret = dp_power_clk_enable(ctrl->power, DP_STREAM_PM, false);
++	if (ret) {
++		DRM_ERROR("Failed to disable pixel clocks. ret=%d\n", ret);
++		return ret;
++	}
++
++	ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, false);
++	if (ret) {
++		DRM_ERROR("Failed to disable link clocks. ret=%d\n", ret);
++		return ret;
++	}
++
++	phy_power_off(phy);
++
++	/* aux channel down, reinit phy */
++	phy_exit(phy);
++	phy_init(phy);
++
++	DRM_DEBUG_DP("DP off link/stream done\n");
++	return ret;
++}
++
++void dp_ctrl_off_phy(struct dp_ctrl *dp_ctrl)
++{
++	struct dp_ctrl_private *ctrl;
++	struct dp_io *dp_io;
++	struct phy *phy;
++
++	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
++	dp_io = &ctrl->parser->io;
++	phy = dp_io->phy;
++
++	dp_catalog_ctrl_reset(ctrl->catalog);
++
++	phy_exit(phy);
++
++	DRM_DEBUG_DP("DP off phy done\n");
++}
++
+ int dp_ctrl_off(struct dp_ctrl *dp_ctrl)
+ {
+ 	struct dp_ctrl_private *ctrl;
+diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h
+index a836bd358447c..25e4f75122522 100644
+--- a/drivers/gpu/drm/msm/dp/dp_ctrl.h
++++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h
+@@ -23,6 +23,8 @@ int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip, bool reset);
+ void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl);
+ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl);
+ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl);
++int dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl);
++void dp_ctrl_off_phy(struct dp_ctrl *dp_ctrl);
+ int dp_ctrl_off(struct dp_ctrl *dp_ctrl);
+ void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl);
+ void dp_ctrl_isr(struct dp_ctrl *dp_ctrl);
+diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
+index 1784e119269b7..cdec0a367a2cb 100644
+--- a/drivers/gpu/drm/msm/dp/dp_display.c
++++ b/drivers/gpu/drm/msm/dp/dp_display.c
+@@ -346,6 +346,12 @@ static int dp_display_process_hpd_high(struct dp_display_private *dp)
+ 	dp->dp_display.max_pclk_khz = DP_MAX_PIXEL_CLK_KHZ;
+ 	dp->dp_display.max_dp_lanes = dp->parser->max_dp_lanes;
+ 
++	/*
++	 * set sink to normal operation mode -- D0
++	 * before dpcd read
++	 */
++	dp_link_psm_config(dp->link, &dp->panel->link_info, false);
++
+ 	dp_link_reset_phy_params_vx_px(dp->link);
+ 	rc = dp_ctrl_on_link(dp->ctrl);
+ 	if (rc) {
+@@ -414,11 +420,6 @@ static int dp_display_usbpd_configure_cb(struct device *dev)
+ 
+ 	dp_display_host_init(dp, false);
+ 
+-	/*
+-	 * set sink to normal operation mode -- D0
+-	 * before dpcd read
+-	 */
+-	dp_link_psm_config(dp->link, &dp->panel->link_info, false);
+ 	rc = dp_display_process_hpd_high(dp);
+ end:
+ 	return rc;
+@@ -579,6 +580,10 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
+ 		dp_add_event(dp, EV_CONNECT_PENDING_TIMEOUT, 0, tout);
+ 	}
+ 
++	/* enable HDP irq_hpd/replug interrupt */
++	dp_catalog_hpd_config_intr(dp->catalog,
++		DP_DP_IRQ_HPD_INT_MASK | DP_DP_HPD_REPLUG_INT_MASK, true);
++
+ 	mutex_unlock(&dp->event_mutex);
+ 
+ 	/* uevent will complete connection part */
+@@ -628,7 +633,26 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
+ 	mutex_lock(&dp->event_mutex);
+ 
+ 	state = dp->hpd_state;
+-	if (state == ST_DISCONNECT_PENDING || state == ST_DISCONNECTED) {
++
++	/* disable irq_hpd/replug interrupts */
++	dp_catalog_hpd_config_intr(dp->catalog,
++		DP_DP_IRQ_HPD_INT_MASK | DP_DP_HPD_REPLUG_INT_MASK, false);
++
++	/* unplugged, no more irq_hpd handle */
++	dp_del_event(dp, EV_IRQ_HPD_INT);
++
++	if (state == ST_DISCONNECTED) {
++		/* triggered by irq_hdp with sink_count = 0 */
++		if (dp->link->sink_count == 0) {
++			dp_ctrl_off_phy(dp->ctrl);
++			hpd->hpd_high = 0;
++			dp->core_initialized = false;
++		}
++		mutex_unlock(&dp->event_mutex);
++		return 0;
++	}
++
++	if (state == ST_DISCONNECT_PENDING) {
+ 		mutex_unlock(&dp->event_mutex);
+ 		return 0;
+ 	}
+@@ -642,9 +666,8 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
+ 
+ 	dp->hpd_state = ST_DISCONNECT_PENDING;
+ 
+-	/* disable HPD plug interrupt until disconnect is done */
+-	dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK
+-				| DP_DP_IRQ_HPD_INT_MASK, false);
++	/* disable HPD plug interrupts */
++	dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK, false);
+ 
+ 	hpd->hpd_high = 0;
+ 
+@@ -660,8 +683,8 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
+ 	/* signal the disconnect event early to ensure proper teardown */
+ 	dp_display_handle_plugged_change(g_dp_display, false);
+ 
+-	dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK |
+-					DP_DP_IRQ_HPD_INT_MASK, true);
++	/* enable HDP plug interrupt to prepare for next plugin */
++	dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK, true);
+ 
+ 	/* uevent will complete disconnection part */
+ 	mutex_unlock(&dp->event_mutex);
+@@ -692,7 +715,7 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
+ 
+ 	/* irq_hpd can happen at either connected or disconnected state */
+ 	state =  dp->hpd_state;
+-	if (state == ST_DISPLAY_OFF) {
++	if (state == ST_DISPLAY_OFF || state == ST_SUSPENDED) {
+ 		mutex_unlock(&dp->event_mutex);
+ 		return 0;
+ 	}
+@@ -910,9 +933,13 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data)
+ 
+ 	dp_display->audio_enabled = false;
+ 
+-	dp_ctrl_off(dp->ctrl);
+-
+-	dp->core_initialized = false;
++	/* triggered by irq_hpd with sink_count = 0 */
++	if (dp->link->sink_count == 0) {
++		dp_ctrl_off_link_stream(dp->ctrl);
++	} else {
++		dp_ctrl_off(dp->ctrl);
++		dp->core_initialized = false;
++	}
+ 
+ 	dp_display->power_on = false;
+ 
+diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
+index 18ea1c66de718..f206c53c27e0e 100644
+--- a/drivers/gpu/drm/msm/msm_drv.c
++++ b/drivers/gpu/drm/msm/msm_drv.c
+@@ -521,6 +521,7 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
+ 		priv->event_thread[i].worker = kthread_create_worker(0,
+ 			"crtc_event:%d", priv->event_thread[i].crtc_id);
+ 		if (IS_ERR(priv->event_thread[i].worker)) {
++			ret = PTR_ERR(priv->event_thread[i].worker);
+ 			DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
+ 			goto err_msm_uninit;
+ 		}
+diff --git a/drivers/gpu/drm/pl111/Kconfig b/drivers/gpu/drm/pl111/Kconfig
+index 80f6748055e36..3aae387a96af2 100644
+--- a/drivers/gpu/drm/pl111/Kconfig
++++ b/drivers/gpu/drm/pl111/Kconfig
+@@ -3,6 +3,7 @@ config DRM_PL111
+ 	tristate "DRM Support for PL111 CLCD Controller"
+ 	depends on DRM
+ 	depends on ARM || ARM64 || COMPILE_TEST
++	depends on VEXPRESS_CONFIG || VEXPRESS_CONFIG=n
+ 	depends on COMMON_CLK
+ 	select DRM_KMS_HELPER
+ 	select DRM_KMS_CMA_HELPER
+diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c
+index c04cd5a2553ce..e377bdbff90dd 100644
+--- a/drivers/gpu/drm/qxl/qxl_dumb.c
++++ b/drivers/gpu/drm/qxl/qxl_dumb.c
+@@ -58,6 +58,8 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
+ 	surf.height = args->height;
+ 	surf.stride = pitch;
+ 	surf.format = format;
++	surf.data = 0;
++
+ 	r = qxl_gem_object_create_with_handle(qdev, file_priv,
+ 					      QXL_GEM_DOMAIN_SURFACE,
+ 					      args->size, &surf, &qobj,
+diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
+index a4a45daf93f2b..6802d9b65f828 100644
+--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
++++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
+@@ -73,6 +73,7 @@ static int cdn_dp_grf_write(struct cdn_dp_device *dp,
+ 	ret = regmap_write(dp->grf, reg, val);
+ 	if (ret) {
+ 		DRM_DEV_ERROR(dp->dev, "Could not write to GRF: %d\n", ret);
++		clk_disable_unprepare(dp->grf_clk);
+ 		return ret;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.c b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
+index 9d2163ef4d6e2..33fb4d05c5065 100644
+--- a/drivers/gpu/drm/rockchip/cdn-dp-reg.c
++++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
+@@ -658,7 +658,7 @@ int cdn_dp_config_video(struct cdn_dp_device *dp)
+ 	 */
+ 	do {
+ 		tu_size_reg += 2;
+-		symbol = tu_size_reg * mode->clock * bit_per_pix;
++		symbol = (u64)tu_size_reg * mode->clock * bit_per_pix;
+ 		do_div(symbol, dp->max_lanes * link_rate * 8);
+ 		rem = do_div(symbol, 1000);
+ 		if (tu_size_reg > 64) {
+diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+index 24a71091759cc..d8c47ee3cad37 100644
+--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
++++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+@@ -692,13 +692,8 @@ static const struct dw_mipi_dsi_phy_ops dw_mipi_dsi_rockchip_phy_ops = {
+ 	.get_timing = dw_mipi_dsi_phy_get_timing,
+ };
+ 
+-static void dw_mipi_dsi_rockchip_config(struct dw_mipi_dsi_rockchip *dsi,
+-					int mux)
++static void dw_mipi_dsi_rockchip_config(struct dw_mipi_dsi_rockchip *dsi)
+ {
+-	if (dsi->cdata->lcdsel_grf_reg)
+-		regmap_write(dsi->grf_regmap, dsi->cdata->lcdsel_grf_reg,
+-			mux ? dsi->cdata->lcdsel_lit : dsi->cdata->lcdsel_big);
+-
+ 	if (dsi->cdata->lanecfg1_grf_reg)
+ 		regmap_write(dsi->grf_regmap, dsi->cdata->lanecfg1_grf_reg,
+ 					      dsi->cdata->lanecfg1);
+@@ -712,6 +707,13 @@ static void dw_mipi_dsi_rockchip_config(struct dw_mipi_dsi_rockchip *dsi,
+ 					      dsi->cdata->enable);
+ }
+ 
++static void dw_mipi_dsi_rockchip_set_lcdsel(struct dw_mipi_dsi_rockchip *dsi,
++					    int mux)
++{
++	regmap_write(dsi->grf_regmap, dsi->cdata->lcdsel_grf_reg,
++		mux ? dsi->cdata->lcdsel_lit : dsi->cdata->lcdsel_big);
++}
++
+ static int
+ dw_mipi_dsi_encoder_atomic_check(struct drm_encoder *encoder,
+ 				 struct drm_crtc_state *crtc_state,
+@@ -767,9 +769,9 @@ static void dw_mipi_dsi_encoder_enable(struct drm_encoder *encoder)
+ 		return;
+ 	}
+ 
+-	dw_mipi_dsi_rockchip_config(dsi, mux);
++	dw_mipi_dsi_rockchip_set_lcdsel(dsi, mux);
+ 	if (dsi->slave)
+-		dw_mipi_dsi_rockchip_config(dsi->slave, mux);
++		dw_mipi_dsi_rockchip_set_lcdsel(dsi->slave, mux);
+ 
+ 	clk_disable_unprepare(dsi->grf_clk);
+ }
+@@ -923,6 +925,24 @@ static int dw_mipi_dsi_rockchip_bind(struct device *dev,
+ 		return ret;
+ 	}
+ 
++	/*
++	 * With the GRF clock running, write lane and dual-mode configurations
++	 * that won't change immediately. If we waited until enable() to do
++	 * this, things like panel preparation would not be able to send
++	 * commands over DSI.
++	 */
++	ret = clk_prepare_enable(dsi->grf_clk);
++	if (ret) {
++		DRM_DEV_ERROR(dsi->dev, "Failed to enable grf_clk: %d\n", ret);
++		return ret;
++	}
++
++	dw_mipi_dsi_rockchip_config(dsi);
++	if (dsi->slave)
++		dw_mipi_dsi_rockchip_config(dsi->slave);
++
++	clk_disable_unprepare(dsi->grf_clk);
++
+ 	ret = rockchip_dsi_drm_create_encoder(dsi, drm_dev);
+ 	if (ret) {
+ 		DRM_DEV_ERROR(dev, "Failed to create drm encoder\n");
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+index 8d15cabdcb02a..2d10198044c29 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+@@ -1013,6 +1013,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
+ 		VOP_WIN_SET(vop, win, alpha_en, 1);
+ 	} else {
+ 		VOP_WIN_SET(vop, win, src_alpha_ctl, SRC_ALPHA_EN(0));
++		VOP_WIN_SET(vop, win, alpha_en, 0);
+ 	}
+ 
+ 	VOP_WIN_SET(vop, win, enable, 1);
+diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
+index 654bc52d9ff39..1a7f24c1ce495 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
++++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
+@@ -499,11 +499,11 @@ static int px30_lvds_probe(struct platform_device *pdev,
+ 	if (IS_ERR(lvds->dphy))
+ 		return PTR_ERR(lvds->dphy);
+ 
+-	phy_init(lvds->dphy);
++	ret = phy_init(lvds->dphy);
+ 	if (ret)
+ 		return ret;
+ 
+-	phy_set_mode(lvds->dphy, PHY_MODE_LVDS);
++	ret = phy_set_mode(lvds->dphy, PHY_MODE_LVDS);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
+index 76657dcdf9b00..1f36b67cd6ce9 100644
+--- a/drivers/gpu/drm/vc4/vc4_crtc.c
++++ b/drivers/gpu/drm/vc4/vc4_crtc.c
+@@ -279,14 +279,22 @@ static u32 vc4_crtc_get_fifo_full_level_bits(struct vc4_crtc *vc4_crtc,
+  * allows drivers to push pixels to more than one encoder from the
+  * same CRTC.
+  */
+-static struct drm_encoder *vc4_get_crtc_encoder(struct drm_crtc *crtc)
++static struct drm_encoder *vc4_get_crtc_encoder(struct drm_crtc *crtc,
++						struct drm_atomic_state *state,
++						struct drm_connector_state *(*get_state)(struct drm_atomic_state *state,
++											 struct drm_connector *connector))
+ {
+ 	struct drm_connector *connector;
+ 	struct drm_connector_list_iter conn_iter;
+ 
+ 	drm_connector_list_iter_begin(crtc->dev, &conn_iter);
+ 	drm_for_each_connector_iter(connector, &conn_iter) {
+-		if (connector->state->crtc == crtc) {
++		struct drm_connector_state *conn_state = get_state(state, connector);
++
++		if (!conn_state)
++			continue;
++
++		if (conn_state->crtc == crtc) {
+ 			drm_connector_list_iter_end(&conn_iter);
+ 			return connector->encoder;
+ 		}
+@@ -305,16 +313,17 @@ static void vc4_crtc_pixelvalve_reset(struct drm_crtc *crtc)
+ 	CRTC_WRITE(PV_CONTROL, CRTC_READ(PV_CONTROL) | PV_CONTROL_FIFO_CLR);
+ }
+ 
+-static void vc4_crtc_config_pv(struct drm_crtc *crtc)
++static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_atomic_state *state)
+ {
+ 	struct drm_device *dev = crtc->dev;
+ 	struct vc4_dev *vc4 = to_vc4_dev(dev);
+-	struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc);
++	struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc, state,
++							   drm_atomic_get_new_connector_state);
+ 	struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
+ 	struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+ 	const struct vc4_pv_data *pv_data = vc4_crtc_to_vc4_pv_data(vc4_crtc);
+-	struct drm_crtc_state *state = crtc->state;
+-	struct drm_display_mode *mode = &state->adjusted_mode;
++	struct drm_crtc_state *crtc_state = crtc->state;
++	struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+ 	bool interlace = mode->flags & DRM_MODE_FLAG_INTERLACE;
+ 	u32 pixel_rep = (mode->flags & DRM_MODE_FLAG_DBLCLK) ? 2 : 1;
+ 	bool is_dsi = (vc4_encoder->type == VC4_ENCODER_TYPE_DSI0 ||
+@@ -421,10 +430,10 @@ static void require_hvs_enabled(struct drm_device *dev)
+ }
+ 
+ static int vc4_crtc_disable(struct drm_crtc *crtc,
++			    struct drm_encoder *encoder,
+ 			    struct drm_atomic_state *state,
+ 			    unsigned int channel)
+ {
+-	struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc);
+ 	struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
+ 	struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+ 	struct drm_device *dev = crtc->dev;
+@@ -465,10 +474,29 @@ static int vc4_crtc_disable(struct drm_crtc *crtc,
+ 	return 0;
+ }
+ 
++static struct drm_encoder *vc4_crtc_get_encoder_by_type(struct drm_crtc *crtc,
++							enum vc4_encoder_type type)
++{
++	struct drm_encoder *encoder;
++
++	drm_for_each_encoder(encoder, crtc->dev) {
++		struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
++
++		if (vc4_encoder->type == type)
++			return encoder;
++	}
++
++	return NULL;
++}
++
+ int vc4_crtc_disable_at_boot(struct drm_crtc *crtc)
+ {
+ 	struct drm_device *drm = crtc->dev;
+ 	struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
++	enum vc4_encoder_type encoder_type;
++	const struct vc4_pv_data *pv_data;
++	struct drm_encoder *encoder;
++	unsigned encoder_sel;
+ 	int channel;
+ 
+ 	if (!(of_device_is_compatible(vc4_crtc->pdev->dev.of_node,
+@@ -487,7 +515,17 @@ int vc4_crtc_disable_at_boot(struct drm_crtc *crtc)
+ 	if (channel < 0)
+ 		return 0;
+ 
+-	return vc4_crtc_disable(crtc, NULL, channel);
++	encoder_sel = VC4_GET_FIELD(CRTC_READ(PV_CONTROL), PV_CONTROL_CLK_SELECT);
++	if (WARN_ON(encoder_sel != 0))
++		return 0;
++
++	pv_data = vc4_crtc_to_vc4_pv_data(vc4_crtc);
++	encoder_type = pv_data->encoder_types[encoder_sel];
++	encoder = vc4_crtc_get_encoder_by_type(crtc, encoder_type);
++	if (WARN_ON(!encoder))
++		return 0;
++
++	return vc4_crtc_disable(crtc, encoder, NULL, channel);
+ }
+ 
+ static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
+@@ -496,6 +534,8 @@ static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
+ 	struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
+ 									 crtc);
+ 	struct vc4_crtc_state *old_vc4_state = to_vc4_crtc_state(old_state);
++	struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc, state,
++							   drm_atomic_get_old_connector_state);
+ 	struct drm_device *dev = crtc->dev;
+ 
+ 	require_hvs_enabled(dev);
+@@ -503,7 +543,7 @@ static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
+ 	/* Disable vblank irq handling before crtc is disabled. */
+ 	drm_crtc_vblank_off(crtc);
+ 
+-	vc4_crtc_disable(crtc, state, old_vc4_state->assigned_channel);
++	vc4_crtc_disable(crtc, encoder, state, old_vc4_state->assigned_channel);
+ 
+ 	/*
+ 	 * Make sure we issue a vblank event after disabling the CRTC if
+@@ -524,7 +564,8 @@ static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
+ {
+ 	struct drm_device *dev = crtc->dev;
+ 	struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+-	struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc);
++	struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc, state,
++							   drm_atomic_get_new_connector_state);
+ 	struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
+ 
+ 	require_hvs_enabled(dev);
+@@ -539,7 +580,7 @@ static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
+ 	if (vc4_encoder->pre_crtc_configure)
+ 		vc4_encoder->pre_crtc_configure(encoder, state);
+ 
+-	vc4_crtc_config_pv(crtc);
++	vc4_crtc_config_pv(crtc, state);
+ 
+ 	CRTC_WRITE(PV_CONTROL, CRTC_READ(PV_CONTROL) | PV_CONTROL_EN);
+ 
+diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
+index 8106b5634fe10..e94730beb15b7 100644
+--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
++++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
+@@ -2000,7 +2000,7 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
+ 							     &hpd_gpio_flags);
+ 		if (vc4_hdmi->hpd_gpio < 0) {
+ 			ret = vc4_hdmi->hpd_gpio;
+-			goto err_unprepare_hsm;
++			goto err_put_ddc;
+ 		}
+ 
+ 		vc4_hdmi->hpd_active_low = hpd_gpio_flags & OF_GPIO_ACTIVE_LOW;
+@@ -2041,8 +2041,8 @@ err_destroy_conn:
+ 	vc4_hdmi_connector_destroy(&vc4_hdmi->connector);
+ err_destroy_encoder:
+ 	drm_encoder_cleanup(encoder);
+-err_unprepare_hsm:
+ 	pm_runtime_disable(dev);
++err_put_ddc:
+ 	put_device(&vc4_hdmi->ddc->dev);
+ 
+ 	return ret;
+diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
+index 4db25bd9fa22d..127eaf0a0a580 100644
+--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
++++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
+@@ -1467,6 +1467,7 @@ struct svga3dsurface_cache {
+ 
+ /**
+  * struct svga3dsurface_loc - Surface location
++ * @sheet: The multisample sheet.
+  * @sub_resource: Surface subresource. Defined as layer * num_mip_levels +
+  * mip_level.
+  * @x: X coordinate.
+@@ -1474,6 +1475,7 @@ struct svga3dsurface_cache {
+  * @z: Z coordinate.
+  */
+ struct svga3dsurface_loc {
++	u32 sheet;
+ 	u32 sub_resource;
+ 	u32 x, y, z;
+ };
+@@ -1566,8 +1568,8 @@ svga3dsurface_get_loc(const struct svga3dsurface_cache *cache,
+ 	u32 layer;
+ 	int i;
+ 
+-	if (offset >= cache->sheet_bytes)
+-		offset %= cache->sheet_bytes;
++	loc->sheet = offset / cache->sheet_bytes;
++	offset -= loc->sheet * cache->sheet_bytes;
+ 
+ 	layer = offset / cache->mip_chain_bytes;
+ 	offset -= layer * cache->mip_chain_bytes;
+@@ -1631,6 +1633,7 @@ svga3dsurface_min_loc(const struct svga3dsurface_cache *cache,
+ 		      u32 sub_resource,
+ 		      struct svga3dsurface_loc *loc)
+ {
++	loc->sheet = 0;
+ 	loc->sub_resource = sub_resource;
+ 	loc->x = loc->y = loc->z = 0;
+ }
+@@ -1652,6 +1655,7 @@ svga3dsurface_max_loc(const struct svga3dsurface_cache *cache,
+ 	const struct drm_vmw_size *size;
+ 	u32 mip;
+ 
++	loc->sheet = 0;
+ 	loc->sub_resource = sub_resource + 1;
+ 	mip = sub_resource % cache->num_mip_levels;
+ 	size = &cache->mip[mip].size;
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+index 462f173207085..0996c3282ebd6 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+@@ -2759,12 +2759,24 @@ static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
+ {
+ 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) =
+ 		container_of(header, typeof(*cmd), header);
+-	struct vmw_resource *ret;
++	struct vmw_resource *view;
++	struct vmw_res_cache_entry *rcache;
+ 
+-	ret = vmw_view_id_val_add(sw_context, vmw_view_sr,
+-				  cmd->body.shaderResourceViewId);
++	view = vmw_view_id_val_add(sw_context, vmw_view_sr,
++				   cmd->body.shaderResourceViewId);
++	if (IS_ERR(view))
++		return PTR_ERR(view);
+ 
+-	return PTR_ERR_OR_ZERO(ret);
++	/*
++	 * Normally the shader-resource view is not gpu-dirtying, but for
++	 * this particular command it is...
++	 * So mark the last looked-up surface, which is the surface
++	 * the view points to, gpu-dirty.
++	 */
++	rcache = &sw_context->res_cache[vmw_res_surface];
++	vmw_validation_res_set_dirty(sw_context->ctx, rcache->private,
++				     VMW_RES_DIRTY_SET);
++	return 0;
+ }
+ 
+ /**
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+index f6cab77075a04..9905232172788 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+@@ -1801,6 +1801,19 @@ static void vmw_surface_tex_dirty_range_add(struct vmw_resource *res,
+ 	svga3dsurface_get_loc(cache, &loc2, end - 1);
+ 	svga3dsurface_inc_loc(cache, &loc2);
+ 
++	if (loc1.sheet != loc2.sheet) {
++		u32 sub_res;
++
++		/*
++		 * Multiple multisample sheets. To do this in an optimized
++		 * fashion, compute the dirty region for each sheet and the
++		 * resulting union. Since this is not a common case, just dirty
++		 * the whole surface.
++		 */
++		for (sub_res = 0; sub_res < dirty->num_subres; ++sub_res)
++			vmw_subres_dirty_full(dirty, sub_res);
++		return;
++	}
+ 	if (loc1.sub_resource + 1 == loc2.sub_resource) {
+ 		/* Dirty range covers a single sub-resource */
+ 		vmw_subres_dirty_add(dirty, &loc1, &loc2);
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 0f69f35f2957e..5550c943f9855 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -2306,12 +2306,8 @@ static int hid_device_remove(struct device *dev)
+ {
+ 	struct hid_device *hdev = to_hid_device(dev);
+ 	struct hid_driver *hdrv;
+-	int ret = 0;
+ 
+-	if (down_interruptible(&hdev->driver_input_lock)) {
+-		ret = -EINTR;
+-		goto end;
+-	}
++	down(&hdev->driver_input_lock);
+ 	hdev->io_started = false;
+ 
+ 	hdrv = hdev->driver;
+@@ -2326,8 +2322,8 @@ static int hid_device_remove(struct device *dev)
+ 
+ 	if (!hdev->io_started)
+ 		up(&hdev->driver_input_lock);
+-end:
+-	return ret;
++
++	return 0;
+ }
+ 
+ static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 03978111d9448..06168f4857225 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -397,6 +397,7 @@
+ #define USB_DEVICE_ID_HP_X2_10_COVER	0x0755
+ #define I2C_DEVICE_ID_HP_SPECTRE_X360_15	0x2817
+ #define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN	0x2706
++#define I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN	0x261A
+ 
+ #define USB_VENDOR_ID_ELECOM		0x056e
+ #define USB_DEVICE_ID_ELECOM_BM084	0x0061
+diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
+index e982d8173c9c7..bf5e728258c16 100644
+--- a/drivers/hid/hid-input.c
++++ b/drivers/hid/hid-input.c
+@@ -326,6 +326,8 @@ static const struct hid_device_id hid_battery_quirks[] = {
+ 	  HID_BATTERY_QUIRK_IGNORE },
+ 	{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_15),
+ 	  HID_BATTERY_QUIRK_IGNORE },
++	{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN),
++	  HID_BATTERY_QUIRK_IGNORE },
+ 	{}
+ };
+ 
+diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
+index 8319b0ce385a5..b3722c51ec78a 100644
+--- a/drivers/hid/hid-sony.c
++++ b/drivers/hid/hid-sony.c
+@@ -597,9 +597,8 @@ struct sony_sc {
+ 	/* DS4 calibration data */
+ 	struct ds4_calibration_data ds4_calib_data[6];
+ 	/* GH Live */
++	struct urb *ghl_urb;
+ 	struct timer_list ghl_poke_timer;
+-	struct usb_ctrlrequest *ghl_cr;
+-	u8 *ghl_databuf;
+ };
+ 
+ static void sony_set_leds(struct sony_sc *sc);
+@@ -625,66 +624,54 @@ static inline void sony_schedule_work(struct sony_sc *sc,
+ 
+ static void ghl_magic_poke_cb(struct urb *urb)
+ {
+-	if (urb) {
+-		/* Free sc->ghl_cr and sc->ghl_databuf allocated in
+-		 * ghl_magic_poke()
+-		 */
+-		kfree(urb->setup_packet);
+-		kfree(urb->transfer_buffer);
+-	}
++	struct sony_sc *sc = urb->context;
++
++	if (urb->status < 0)
++		hid_err(sc->hdev, "URB transfer failed : %d", urb->status);
++
++	mod_timer(&sc->ghl_poke_timer, jiffies + GHL_GUITAR_POKE_INTERVAL*HZ);
+ }
+ 
+ static void ghl_magic_poke(struct timer_list *t)
+ {
++	int ret;
+ 	struct sony_sc *sc = from_timer(sc, t, ghl_poke_timer);
+ 
+-	int ret;
++	ret = usb_submit_urb(sc->ghl_urb, GFP_ATOMIC);
++	if (ret < 0)
++		hid_err(sc->hdev, "usb_submit_urb failed: %d", ret);
++}
++
++static int ghl_init_urb(struct sony_sc *sc, struct usb_device *usbdev)
++{
++	struct usb_ctrlrequest *cr;
++	u16 poke_size;
++	u8 *databuf;
+ 	unsigned int pipe;
+-	struct urb *urb;
+-	struct usb_device *usbdev = to_usb_device(sc->hdev->dev.parent->parent);
+-	const u16 poke_size =
+-		ARRAY_SIZE(ghl_ps3wiiu_magic_data);
+ 
++	poke_size = ARRAY_SIZE(ghl_ps3wiiu_magic_data);
+ 	pipe = usb_sndctrlpipe(usbdev, 0);
+ 
+-	if (!sc->ghl_cr) {
+-		sc->ghl_cr = kzalloc(sizeof(*sc->ghl_cr), GFP_ATOMIC);
+-		if (!sc->ghl_cr)
+-			goto resched;
+-	}
+-
+-	if (!sc->ghl_databuf) {
+-		sc->ghl_databuf = kzalloc(poke_size, GFP_ATOMIC);
+-		if (!sc->ghl_databuf)
+-			goto resched;
+-	}
++	cr = devm_kzalloc(&sc->hdev->dev, sizeof(*cr), GFP_ATOMIC);
++	if (cr == NULL)
++		return -ENOMEM;
+ 
+-	urb = usb_alloc_urb(0, GFP_ATOMIC);
+-	if (!urb)
+-		goto resched;
++	databuf = devm_kzalloc(&sc->hdev->dev, poke_size, GFP_ATOMIC);
++	if (databuf == NULL)
++		return -ENOMEM;
+ 
+-	sc->ghl_cr->bRequestType =
++	cr->bRequestType =
+ 		USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT;
+-	sc->ghl_cr->bRequest = USB_REQ_SET_CONFIGURATION;
+-	sc->ghl_cr->wValue = cpu_to_le16(ghl_ps3wiiu_magic_value);
+-	sc->ghl_cr->wIndex = 0;
+-	sc->ghl_cr->wLength = cpu_to_le16(poke_size);
+-	memcpy(sc->ghl_databuf, ghl_ps3wiiu_magic_data, poke_size);
+-
++	cr->bRequest = USB_REQ_SET_CONFIGURATION;
++	cr->wValue = cpu_to_le16(ghl_ps3wiiu_magic_value);
++	cr->wIndex = 0;
++	cr->wLength = cpu_to_le16(poke_size);
++	memcpy(databuf, ghl_ps3wiiu_magic_data, poke_size);
+ 	usb_fill_control_urb(
+-		urb, usbdev, pipe,
+-		(unsigned char *) sc->ghl_cr, sc->ghl_databuf,
+-		poke_size, ghl_magic_poke_cb, NULL);
+-	ret = usb_submit_urb(urb, GFP_ATOMIC);
+-	if (ret < 0) {
+-		kfree(sc->ghl_databuf);
+-		kfree(sc->ghl_cr);
+-	}
+-	usb_free_urb(urb);
+-
+-resched:
+-	/* Reschedule for next time */
+-	mod_timer(&sc->ghl_poke_timer, jiffies + GHL_GUITAR_POKE_INTERVAL*HZ);
++		sc->ghl_urb, usbdev, pipe,
++		(unsigned char *) cr, databuf, poke_size,
++		ghl_magic_poke_cb, sc);
++	return 0;
+ }
+ 
+ static int guitar_mapping(struct hid_device *hdev, struct hid_input *hi,
+@@ -2981,6 +2968,7 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 	int ret;
+ 	unsigned long quirks = id->driver_data;
+ 	struct sony_sc *sc;
++	struct usb_device *usbdev;
+ 	unsigned int connect_mask = HID_CONNECT_DEFAULT;
+ 
+ 	if (!strcmp(hdev->name, "FutureMax Dance Mat"))
+@@ -3000,6 +2988,7 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 	sc->quirks = quirks;
+ 	hid_set_drvdata(hdev, sc);
+ 	sc->hdev = hdev;
++	usbdev = to_usb_device(sc->hdev->dev.parent->parent);
+ 
+ 	ret = hid_parse(hdev);
+ 	if (ret) {
+@@ -3042,6 +3031,15 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 	}
+ 
+ 	if (sc->quirks & GHL_GUITAR_PS3WIIU) {
++		sc->ghl_urb = usb_alloc_urb(0, GFP_ATOMIC);
++		if (!sc->ghl_urb)
++			return -ENOMEM;
++		ret = ghl_init_urb(sc, usbdev);
++		if (ret) {
++			hid_err(hdev, "error preparing URB\n");
++			return ret;
++		}
++
+ 		timer_setup(&sc->ghl_poke_timer, ghl_magic_poke, 0);
+ 		mod_timer(&sc->ghl_poke_timer,
+ 			  jiffies + GHL_GUITAR_POKE_INTERVAL*HZ);
+@@ -3054,8 +3052,10 @@ static void sony_remove(struct hid_device *hdev)
+ {
+ 	struct sony_sc *sc = hid_get_drvdata(hdev);
+ 
+-	if (sc->quirks & GHL_GUITAR_PS3WIIU)
++	if (sc->quirks & GHL_GUITAR_PS3WIIU) {
+ 		del_timer_sync(&sc->ghl_poke_timer);
++		usb_free_urb(sc->ghl_urb);
++	}
+ 
+ 	hid_hw_close(hdev);
+ 
+diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
+index 195910dd2154e..e3835407e8d23 100644
+--- a/drivers/hid/wacom_wac.h
++++ b/drivers/hid/wacom_wac.h
+@@ -122,7 +122,7 @@
+ #define WACOM_HID_WD_TOUCHONOFF         (WACOM_HID_UP_WACOMDIGITIZER | 0x0454)
+ #define WACOM_HID_WD_BATTERY_LEVEL      (WACOM_HID_UP_WACOMDIGITIZER | 0x043b)
+ #define WACOM_HID_WD_EXPRESSKEY00       (WACOM_HID_UP_WACOMDIGITIZER | 0x0910)
+-#define WACOM_HID_WD_EXPRESSKEYCAP00    (WACOM_HID_UP_WACOMDIGITIZER | 0x0950)
++#define WACOM_HID_WD_EXPRESSKEYCAP00    (WACOM_HID_UP_WACOMDIGITIZER | 0x0940)
+ #define WACOM_HID_WD_MODE_CHANGE        (WACOM_HID_UP_WACOMDIGITIZER | 0x0980)
+ #define WACOM_HID_WD_MUTE_DEVICE        (WACOM_HID_UP_WACOMDIGITIZER | 0x0981)
+ #define WACOM_HID_WD_CONTROLPANEL       (WACOM_HID_UP_WACOMDIGITIZER | 0x0982)
+diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
+index c83612cddb995..425bf85ed1a03 100644
+--- a/drivers/hv/connection.c
++++ b/drivers/hv/connection.c
+@@ -229,8 +229,10 @@ int vmbus_connect(void)
+ 	 */
+ 
+ 	for (i = 0; ; i++) {
+-		if (i == ARRAY_SIZE(vmbus_versions))
++		if (i == ARRAY_SIZE(vmbus_versions)) {
++			ret = -EDOM;
+ 			goto cleanup;
++		}
+ 
+ 		version = vmbus_versions[i];
+ 		if (version > max_version)
+diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
+index e4aefeb330daf..136576cba26f5 100644
+--- a/drivers/hv/hv_util.c
++++ b/drivers/hv/hv_util.c
+@@ -750,8 +750,8 @@ static int hv_timesync_init(struct hv_util_service *srv)
+ 	 */
+ 	hv_ptp_clock = ptp_clock_register(&ptp_hyperv_info, NULL);
+ 	if (IS_ERR_OR_NULL(hv_ptp_clock)) {
+-		pr_err("cannot register PTP clock: %ld\n",
+-		       PTR_ERR(hv_ptp_clock));
++		pr_err("cannot register PTP clock: %d\n",
++		       PTR_ERR_OR_ZERO(hv_ptp_clock));
+ 		hv_ptp_clock = NULL;
+ 	}
+ 
+diff --git a/drivers/hwmon/lm70.c b/drivers/hwmon/lm70.c
+index 40eab3349904b..6b884ea009877 100644
+--- a/drivers/hwmon/lm70.c
++++ b/drivers/hwmon/lm70.c
+@@ -22,10 +22,10 @@
+ #include <linux/hwmon.h>
+ #include <linux/mutex.h>
+ #include <linux/mod_devicetable.h>
++#include <linux/of.h>
+ #include <linux/property.h>
+ #include <linux/spi/spi.h>
+ #include <linux/slab.h>
+-#include <linux/acpi.h>
+ 
+ #define DRVNAME		"lm70"
+ 
+@@ -148,29 +148,6 @@ static const struct of_device_id lm70_of_ids[] = {
+ MODULE_DEVICE_TABLE(of, lm70_of_ids);
+ #endif
+ 
+-#ifdef CONFIG_ACPI
+-static const struct acpi_device_id lm70_acpi_ids[] = {
+-	{
+-		.id = "LM000070",
+-		.driver_data = LM70_CHIP_LM70,
+-	},
+-	{
+-		.id = "TMP00121",
+-		.driver_data = LM70_CHIP_TMP121,
+-	},
+-	{
+-		.id = "LM000071",
+-		.driver_data = LM70_CHIP_LM71,
+-	},
+-	{
+-		.id = "LM000074",
+-		.driver_data = LM70_CHIP_LM74,
+-	},
+-	{},
+-};
+-MODULE_DEVICE_TABLE(acpi, lm70_acpi_ids);
+-#endif
+-
+ static int lm70_probe(struct spi_device *spi)
+ {
+ 	struct device *hwmon_dev;
+@@ -217,7 +194,6 @@ static struct spi_driver lm70_driver = {
+ 	.driver = {
+ 		.name	= "lm70",
+ 		.of_match_table	= of_match_ptr(lm70_of_ids),
+-		.acpi_match_table = ACPI_PTR(lm70_acpi_ids),
+ 	},
+ 	.id_table = lm70_ids,
+ 	.probe	= lm70_probe,
+diff --git a/drivers/hwmon/max31722.c b/drivers/hwmon/max31722.c
+index 062eceb7be0db..613338cbcb170 100644
+--- a/drivers/hwmon/max31722.c
++++ b/drivers/hwmon/max31722.c
+@@ -6,7 +6,6 @@
+  * Copyright (c) 2016, Intel Corporation.
+  */
+ 
+-#include <linux/acpi.h>
+ #include <linux/hwmon.h>
+ #include <linux/hwmon-sysfs.h>
+ #include <linux/kernel.h>
+@@ -133,20 +132,12 @@ static const struct spi_device_id max31722_spi_id[] = {
+ 	{"max31723", 0},
+ 	{}
+ };
+-
+-static const struct acpi_device_id __maybe_unused max31722_acpi_id[] = {
+-	{"MAX31722", 0},
+-	{"MAX31723", 0},
+-	{}
+-};
+-
+ MODULE_DEVICE_TABLE(spi, max31722_spi_id);
+ 
+ static struct spi_driver max31722_driver = {
+ 	.driver = {
+ 		.name = "max31722",
+ 		.pm = &max31722_pm_ops,
+-		.acpi_match_table = ACPI_PTR(max31722_acpi_id),
+ 	},
+ 	.probe =            max31722_probe,
+ 	.remove =           max31722_remove,
+diff --git a/drivers/hwmon/max31790.c b/drivers/hwmon/max31790.c
+index 86e6c71db685c..67677c4377687 100644
+--- a/drivers/hwmon/max31790.c
++++ b/drivers/hwmon/max31790.c
+@@ -27,6 +27,7 @@
+ 
+ /* Fan Config register bits */
+ #define MAX31790_FAN_CFG_RPM_MODE	0x80
++#define MAX31790_FAN_CFG_CTRL_MON	0x10
+ #define MAX31790_FAN_CFG_TACH_INPUT_EN	0x08
+ #define MAX31790_FAN_CFG_TACH_INPUT	0x01
+ 
+@@ -104,7 +105,7 @@ static struct max31790_data *max31790_update_device(struct device *dev)
+ 				data->tach[NR_CHANNEL + i] = rv;
+ 			} else {
+ 				rv = i2c_smbus_read_word_swapped(client,
+-						MAX31790_REG_PWMOUT(i));
++						MAX31790_REG_PWM_DUTY_CYCLE(i));
+ 				if (rv < 0)
+ 					goto abort;
+ 				data->pwm[i] = rv;
+@@ -170,7 +171,7 @@ static int max31790_read_fan(struct device *dev, u32 attr, int channel,
+ 
+ 	switch (attr) {
+ 	case hwmon_fan_input:
+-		sr = get_tach_period(data->fan_dynamics[channel]);
++		sr = get_tach_period(data->fan_dynamics[channel % NR_CHANNEL]);
+ 		rpm = RPM_FROM_REG(data->tach[channel], sr);
+ 		*val = rpm;
+ 		return 0;
+@@ -271,12 +272,12 @@ static int max31790_read_pwm(struct device *dev, u32 attr, int channel,
+ 		*val = data->pwm[channel] >> 8;
+ 		return 0;
+ 	case hwmon_pwm_enable:
+-		if (fan_config & MAX31790_FAN_CFG_RPM_MODE)
++		if (fan_config & MAX31790_FAN_CFG_CTRL_MON)
++			*val = 0;
++		else if (fan_config & MAX31790_FAN_CFG_RPM_MODE)
+ 			*val = 2;
+-		else if (fan_config & MAX31790_FAN_CFG_TACH_INPUT_EN)
+-			*val = 1;
+ 		else
+-			*val = 0;
++			*val = 1;
+ 		return 0;
+ 	default:
+ 		return -EOPNOTSUPP;
+@@ -299,31 +300,41 @@ static int max31790_write_pwm(struct device *dev, u32 attr, int channel,
+ 			err = -EINVAL;
+ 			break;
+ 		}
+-		data->pwm[channel] = val << 8;
++		data->valid = false;
+ 		err = i2c_smbus_write_word_swapped(client,
+ 						   MAX31790_REG_PWMOUT(channel),
+-						   data->pwm[channel]);
++						   val << 8);
+ 		break;
+ 	case hwmon_pwm_enable:
+ 		fan_config = data->fan_config[channel];
+ 		if (val == 0) {
+-			fan_config &= ~(MAX31790_FAN_CFG_TACH_INPUT_EN |
+-					MAX31790_FAN_CFG_RPM_MODE);
++			fan_config |= MAX31790_FAN_CFG_CTRL_MON;
++			/*
++			 * Disable RPM mode; otherwise disabling fan speed
++			 * monitoring is not possible.
++			 */
++			fan_config &= ~MAX31790_FAN_CFG_RPM_MODE;
+ 		} else if (val == 1) {
+-			fan_config = (fan_config |
+-				      MAX31790_FAN_CFG_TACH_INPUT_EN) &
+-				     ~MAX31790_FAN_CFG_RPM_MODE;
++			fan_config &= ~(MAX31790_FAN_CFG_CTRL_MON | MAX31790_FAN_CFG_RPM_MODE);
+ 		} else if (val == 2) {
+-			fan_config |= MAX31790_FAN_CFG_TACH_INPUT_EN |
+-				      MAX31790_FAN_CFG_RPM_MODE;
++			fan_config &= ~MAX31790_FAN_CFG_CTRL_MON;
++			/*
++			 * The chip sets MAX31790_FAN_CFG_TACH_INPUT_EN on its
++			 * own if MAX31790_FAN_CFG_RPM_MODE is set.
++			 * Do it here as well to reflect the actual register
++			 * value in the cache.
++			 */
++			fan_config |= (MAX31790_FAN_CFG_RPM_MODE | MAX31790_FAN_CFG_TACH_INPUT_EN);
+ 		} else {
+ 			err = -EINVAL;
+ 			break;
+ 		}
+-		data->fan_config[channel] = fan_config;
+-		err = i2c_smbus_write_byte_data(client,
+-					MAX31790_REG_FAN_CONFIG(channel),
+-					fan_config);
++		if (fan_config != data->fan_config[channel]) {
++			err = i2c_smbus_write_byte_data(client, MAX31790_REG_FAN_CONFIG(channel),
++							fan_config);
++			if (!err)
++				data->fan_config[channel] = fan_config;
++		}
+ 		break;
+ 	default:
+ 		err = -EOPNOTSUPP;
+diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
+index 0062c89356530..237a8c0d6c244 100644
+--- a/drivers/hwtracing/coresight/coresight-core.c
++++ b/drivers/hwtracing/coresight/coresight-core.c
+@@ -595,7 +595,7 @@ static struct coresight_device *
+ coresight_find_enabled_sink(struct coresight_device *csdev)
+ {
+ 	int i;
+-	struct coresight_device *sink;
++	struct coresight_device *sink = NULL;
+ 
+ 	if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
+ 	     csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) &&
+diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
+index 71f85a3e525b2..9b0018874eec6 100644
+--- a/drivers/iio/accel/bma180.c
++++ b/drivers/iio/accel/bma180.c
+@@ -55,7 +55,7 @@ struct bma180_part_info {
+ 
+ 	u8 int_reset_reg, int_reset_mask;
+ 	u8 sleep_reg, sleep_mask;
+-	u8 bw_reg, bw_mask;
++	u8 bw_reg, bw_mask, bw_offset;
+ 	u8 scale_reg, scale_mask;
+ 	u8 power_reg, power_mask, lowpower_val;
+ 	u8 int_enable_reg, int_enable_mask;
+@@ -127,6 +127,7 @@ struct bma180_part_info {
+ 
+ #define BMA250_RANGE_MASK	GENMASK(3, 0) /* Range of accel values */
+ #define BMA250_BW_MASK		GENMASK(4, 0) /* Accel bandwidth */
++#define BMA250_BW_OFFSET	8
+ #define BMA250_SUSPEND_MASK	BIT(7) /* chip will sleep */
+ #define BMA250_LOWPOWER_MASK	BIT(6)
+ #define BMA250_DATA_INTEN_MASK	BIT(4)
+@@ -143,6 +144,7 @@ struct bma180_part_info {
+ 
+ #define BMA254_RANGE_MASK	GENMASK(3, 0) /* Range of accel values */
+ #define BMA254_BW_MASK		GENMASK(4, 0) /* Accel bandwidth */
++#define BMA254_BW_OFFSET	8
+ #define BMA254_SUSPEND_MASK	BIT(7) /* chip will sleep */
+ #define BMA254_LOWPOWER_MASK	BIT(6)
+ #define BMA254_DATA_INTEN_MASK	BIT(4)
+@@ -162,7 +164,11 @@ struct bma180_data {
+ 	int scale;
+ 	int bw;
+ 	bool pmode;
+-	u8 buff[16]; /* 3x 16-bit + 8-bit + padding + timestamp */
++	/* Ensure timestamp is naturally aligned */
++	struct {
++		s16 chan[4];
++		s64 timestamp __aligned(8);
++	} scan;
+ };
+ 
+ enum bma180_chan {
+@@ -283,7 +289,8 @@ static int bma180_set_bw(struct bma180_data *data, int val)
+ 	for (i = 0; i < data->part_info->num_bw; ++i) {
+ 		if (data->part_info->bw_table[i] == val) {
+ 			ret = bma180_set_bits(data, data->part_info->bw_reg,
+-				data->part_info->bw_mask, i);
++				data->part_info->bw_mask,
++				i + data->part_info->bw_offset);
+ 			if (ret) {
+ 				dev_err(&data->client->dev,
+ 					"failed to set bandwidth\n");
+@@ -876,6 +883,7 @@ static const struct bma180_part_info bma180_part_info[] = {
+ 		.sleep_mask = BMA250_SUSPEND_MASK,
+ 		.bw_reg = BMA250_BW_REG,
+ 		.bw_mask = BMA250_BW_MASK,
++		.bw_offset = BMA250_BW_OFFSET,
+ 		.scale_reg = BMA250_RANGE_REG,
+ 		.scale_mask = BMA250_RANGE_MASK,
+ 		.power_reg = BMA250_POWER_REG,
+@@ -905,6 +913,7 @@ static const struct bma180_part_info bma180_part_info[] = {
+ 		.sleep_mask = BMA254_SUSPEND_MASK,
+ 		.bw_reg = BMA254_BW_REG,
+ 		.bw_mask = BMA254_BW_MASK,
++		.bw_offset = BMA254_BW_OFFSET,
+ 		.scale_reg = BMA254_RANGE_REG,
+ 		.scale_mask = BMA254_RANGE_MASK,
+ 		.power_reg = BMA254_POWER_REG,
+@@ -938,12 +947,12 @@ static irqreturn_t bma180_trigger_handler(int irq, void *p)
+ 			mutex_unlock(&data->mutex);
+ 			goto err;
+ 		}
+-		((s16 *)data->buff)[i++] = ret;
++		data->scan.chan[i++] = ret;
+ 	}
+ 
+ 	mutex_unlock(&data->mutex);
+ 
+-	iio_push_to_buffers_with_timestamp(indio_dev, data->buff, time_ns);
++	iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, time_ns);
+ err:
+ 	iio_trigger_notify_done(indio_dev->trig);
+ 
+diff --git a/drivers/iio/accel/bma220_spi.c b/drivers/iio/accel/bma220_spi.c
+index 3c9b0c6954e60..e8a9db1a82ad8 100644
+--- a/drivers/iio/accel/bma220_spi.c
++++ b/drivers/iio/accel/bma220_spi.c
+@@ -63,7 +63,11 @@ static const int bma220_scale_table[][2] = {
+ struct bma220_data {
+ 	struct spi_device *spi_device;
+ 	struct mutex lock;
+-	s8 buffer[16]; /* 3x8-bit channels + 5x8 padding + 8x8 timestamp */
++	struct {
++		s8 chans[3];
++		/* Ensure timestamp is naturally aligned. */
++		s64 timestamp __aligned(8);
++	} scan;
+ 	u8 tx_buf[2] ____cacheline_aligned;
+ };
+ 
+@@ -94,12 +98,12 @@ static irqreturn_t bma220_trigger_handler(int irq, void *p)
+ 
+ 	mutex_lock(&data->lock);
+ 	data->tx_buf[0] = BMA220_REG_ACCEL_X | BMA220_READ_MASK;
+-	ret = spi_write_then_read(spi, data->tx_buf, 1, data->buffer,
++	ret = spi_write_then_read(spi, data->tx_buf, 1, &data->scan.chans,
+ 				  ARRAY_SIZE(bma220_channels) - 1);
+ 	if (ret < 0)
+ 		goto err;
+ 
+-	iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
++	iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
+ 					   pf->timestamp);
+ err:
+ 	mutex_unlock(&data->lock);
+diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
+index 7e425ebcd7ea8..dca1f00d65e50 100644
+--- a/drivers/iio/accel/bmc150-accel-core.c
++++ b/drivers/iio/accel/bmc150-accel-core.c
+@@ -1171,11 +1171,12 @@ static const struct bmc150_accel_chip_info bmc150_accel_chip_info_tbl[] = {
+ 		/*
+ 		 * The datasheet page 17 says:
+ 		 * 15.6, 31.3, 62.5 and 125 mg per LSB.
++		 * IIO unit is m/s^2 so multiply by g = 9.80665 m/s^2.
+ 		 */
+-		.scale_table = { {156000, BMC150_ACCEL_DEF_RANGE_2G},
+-				 {313000, BMC150_ACCEL_DEF_RANGE_4G},
+-				 {625000, BMC150_ACCEL_DEF_RANGE_8G},
+-				 {1250000, BMC150_ACCEL_DEF_RANGE_16G} },
++		.scale_table = { {152984, BMC150_ACCEL_DEF_RANGE_2G},
++				 {306948, BMC150_ACCEL_DEF_RANGE_4G},
++				 {612916, BMC150_ACCEL_DEF_RANGE_8G},
++				 {1225831, BMC150_ACCEL_DEF_RANGE_16G} },
+ 	},
+ 	[bma222e] = {
+ 		.name = "BMA222E",
+@@ -1804,21 +1805,17 @@ EXPORT_SYMBOL_GPL(bmc150_accel_core_probe);
+ 
+ struct i2c_client *bmc150_get_second_device(struct i2c_client *client)
+ {
+-	struct bmc150_accel_data *data = i2c_get_clientdata(client);
+-
+-	if (!data)
+-		return NULL;
++	struct bmc150_accel_data *data = iio_priv(i2c_get_clientdata(client));
+ 
+ 	return data->second_device;
+ }
+ EXPORT_SYMBOL_GPL(bmc150_get_second_device);
+ 
+-void bmc150_set_second_device(struct i2c_client *client)
++void bmc150_set_second_device(struct i2c_client *client, struct i2c_client *second_dev)
+ {
+-	struct bmc150_accel_data *data = i2c_get_clientdata(client);
++	struct bmc150_accel_data *data = iio_priv(i2c_get_clientdata(client));
+ 
+-	if (data)
+-		data->second_device = client;
++	data->second_device = second_dev;
+ }
+ EXPORT_SYMBOL_GPL(bmc150_set_second_device);
+ 
+diff --git a/drivers/iio/accel/bmc150-accel-i2c.c b/drivers/iio/accel/bmc150-accel-i2c.c
+index 69f709319484f..2afaae0294eef 100644
+--- a/drivers/iio/accel/bmc150-accel-i2c.c
++++ b/drivers/iio/accel/bmc150-accel-i2c.c
+@@ -70,7 +70,7 @@ static int bmc150_accel_probe(struct i2c_client *client,
+ 
+ 		second_dev = i2c_acpi_new_device(&client->dev, 1, &board_info);
+ 		if (!IS_ERR(second_dev))
+-			bmc150_set_second_device(second_dev);
++			bmc150_set_second_device(client, second_dev);
+ 	}
+ #endif
+ 
+diff --git a/drivers/iio/accel/bmc150-accel.h b/drivers/iio/accel/bmc150-accel.h
+index 6024f15b97004..e30c1698f6fbd 100644
+--- a/drivers/iio/accel/bmc150-accel.h
++++ b/drivers/iio/accel/bmc150-accel.h
+@@ -18,7 +18,7 @@ int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq,
+ 			    const char *name, bool block_supported);
+ int bmc150_accel_core_remove(struct device *dev);
+ struct i2c_client *bmc150_get_second_device(struct i2c_client *second_device);
+-void bmc150_set_second_device(struct i2c_client *second_device);
++void bmc150_set_second_device(struct i2c_client *client, struct i2c_client *second_dev);
+ extern const struct dev_pm_ops bmc150_accel_pm_ops;
+ extern const struct regmap_config bmc150_regmap_conf;
+ 
+diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
+index 5d63ed19e6e25..d35d039c79cb5 100644
+--- a/drivers/iio/accel/hid-sensor-accel-3d.c
++++ b/drivers/iio/accel/hid-sensor-accel-3d.c
+@@ -28,8 +28,11 @@ struct accel_3d_state {
+ 	struct hid_sensor_hub_callbacks callbacks;
+ 	struct hid_sensor_common common_attributes;
+ 	struct hid_sensor_hub_attribute_info accel[ACCEL_3D_CHANNEL_MAX];
+-	/* Reserve for 3 channels + padding + timestamp */
+-	u32 accel_val[ACCEL_3D_CHANNEL_MAX + 3];
++	/* Ensure timestamp is naturally aligned */
++	struct {
++		u32 accel_val[3];
++		s64 timestamp __aligned(8);
++	} scan;
+ 	int scale_pre_decml;
+ 	int scale_post_decml;
+ 	int scale_precision;
+@@ -241,8 +244,8 @@ static int accel_3d_proc_event(struct hid_sensor_hub_device *hsdev,
+ 			accel_state->timestamp = iio_get_time_ns(indio_dev);
+ 
+ 		hid_sensor_push_data(indio_dev,
+-				     accel_state->accel_val,
+-				     sizeof(accel_state->accel_val),
++				     &accel_state->scan,
++				     sizeof(accel_state->scan),
+ 				     accel_state->timestamp);
+ 
+ 		accel_state->timestamp = 0;
+@@ -267,7 +270,7 @@ static int accel_3d_capture_sample(struct hid_sensor_hub_device *hsdev,
+ 	case HID_USAGE_SENSOR_ACCEL_Y_AXIS:
+ 	case HID_USAGE_SENSOR_ACCEL_Z_AXIS:
+ 		offset = usage_id - HID_USAGE_SENSOR_ACCEL_X_AXIS;
+-		accel_state->accel_val[CHANNEL_SCAN_INDEX_X + offset] =
++		accel_state->scan.accel_val[CHANNEL_SCAN_INDEX_X + offset] =
+ 						*(u32 *)raw_data;
+ 		ret = 0;
+ 	break;
+diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
+index 2fadafc860fd6..5a19b5041e282 100644
+--- a/drivers/iio/accel/kxcjk-1013.c
++++ b/drivers/iio/accel/kxcjk-1013.c
+@@ -133,6 +133,13 @@ enum kx_acpi_type {
+ 	ACPI_KIOX010A,
+ };
+ 
++enum kxcjk1013_axis {
++	AXIS_X,
++	AXIS_Y,
++	AXIS_Z,
++	AXIS_MAX
++};
++
+ struct kxcjk1013_data {
+ 	struct regulator_bulk_data regulators[2];
+ 	struct i2c_client *client;
+@@ -140,7 +147,11 @@ struct kxcjk1013_data {
+ 	struct iio_trigger *motion_trig;
+ 	struct iio_mount_matrix orientation;
+ 	struct mutex mutex;
+-	s16 buffer[8];
++	/* Ensure timestamp naturally aligned */
++	struct {
++		s16 chans[AXIS_MAX];
++		s64 timestamp __aligned(8);
++	} scan;
+ 	u8 odr_bits;
+ 	u8 range;
+ 	int wake_thres;
+@@ -154,13 +165,6 @@ struct kxcjk1013_data {
+ 	enum kx_acpi_type acpi_type;
+ };
+ 
+-enum kxcjk1013_axis {
+-	AXIS_X,
+-	AXIS_Y,
+-	AXIS_Z,
+-	AXIS_MAX,
+-};
+-
+ enum kxcjk1013_mode {
+ 	STANDBY,
+ 	OPERATION,
+@@ -1094,12 +1098,12 @@ static irqreturn_t kxcjk1013_trigger_handler(int irq, void *p)
+ 	ret = i2c_smbus_read_i2c_block_data_or_emulated(data->client,
+ 							KXCJK1013_REG_XOUT_L,
+ 							AXIS_MAX * 2,
+-							(u8 *)data->buffer);
++							(u8 *)data->scan.chans);
+ 	mutex_unlock(&data->mutex);
+ 	if (ret < 0)
+ 		goto err;
+ 
+-	iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
++	iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
+ 					   data->timestamp);
+ err:
+ 	iio_trigger_notify_done(indio_dev->trig);
+diff --git a/drivers/iio/accel/mxc4005.c b/drivers/iio/accel/mxc4005.c
+index 0f8fd687866d4..381c6b1be5f55 100644
+--- a/drivers/iio/accel/mxc4005.c
++++ b/drivers/iio/accel/mxc4005.c
+@@ -56,7 +56,11 @@ struct mxc4005_data {
+ 	struct mutex mutex;
+ 	struct regmap *regmap;
+ 	struct iio_trigger *dready_trig;
+-	__be16 buffer[8];
++	/* Ensure timestamp is naturally aligned */
++	struct {
++		__be16 chans[3];
++		s64 timestamp __aligned(8);
++	} scan;
+ 	bool trigger_enabled;
+ };
+ 
+@@ -135,7 +139,7 @@ static int mxc4005_read_xyz(struct mxc4005_data *data)
+ 	int ret;
+ 
+ 	ret = regmap_bulk_read(data->regmap, MXC4005_REG_XOUT_UPPER,
+-			       data->buffer, sizeof(data->buffer));
++			       data->scan.chans, sizeof(data->scan.chans));
+ 	if (ret < 0) {
+ 		dev_err(data->dev, "failed to read axes\n");
+ 		return ret;
+@@ -301,7 +305,7 @@ static irqreturn_t mxc4005_trigger_handler(int irq, void *private)
+ 	if (ret < 0)
+ 		goto err;
+ 
+-	iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
++	iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
+ 					   pf->timestamp);
+ 
+ err:
+diff --git a/drivers/iio/accel/stk8312.c b/drivers/iio/accel/stk8312.c
+index 3b59887a8581b..7d24801e8aa7c 100644
+--- a/drivers/iio/accel/stk8312.c
++++ b/drivers/iio/accel/stk8312.c
+@@ -103,7 +103,11 @@ struct stk8312_data {
+ 	u8 mode;
+ 	struct iio_trigger *dready_trig;
+ 	bool dready_trigger_on;
+-	s8 buffer[16]; /* 3x8-bit channels + 5x8 padding + 64-bit timestamp */
++	/* Ensure timestamp is naturally aligned */
++	struct {
++		s8 chans[3];
++		s64 timestamp __aligned(8);
++	} scan;
+ };
+ 
+ static IIO_CONST_ATTR(in_accel_scale_available, STK8312_SCALE_AVAIL);
+@@ -438,7 +442,7 @@ static irqreturn_t stk8312_trigger_handler(int irq, void *p)
+ 		ret = i2c_smbus_read_i2c_block_data(data->client,
+ 						    STK8312_REG_XOUT,
+ 						    STK8312_ALL_CHANNEL_SIZE,
+-						    data->buffer);
++						    data->scan.chans);
+ 		if (ret < STK8312_ALL_CHANNEL_SIZE) {
+ 			dev_err(&data->client->dev, "register read failed\n");
+ 			mutex_unlock(&data->lock);
+@@ -452,12 +456,12 @@ static irqreturn_t stk8312_trigger_handler(int irq, void *p)
+ 				mutex_unlock(&data->lock);
+ 				goto err;
+ 			}
+-			data->buffer[i++] = ret;
++			data->scan.chans[i++] = ret;
+ 		}
+ 	}
+ 	mutex_unlock(&data->lock);
+ 
+-	iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
++	iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
+ 					   pf->timestamp);
+ err:
+ 	iio_trigger_notify_done(indio_dev->trig);
+diff --git a/drivers/iio/accel/stk8ba50.c b/drivers/iio/accel/stk8ba50.c
+index 3ead378b02c9b..e8087d7ee49f9 100644
+--- a/drivers/iio/accel/stk8ba50.c
++++ b/drivers/iio/accel/stk8ba50.c
+@@ -91,12 +91,11 @@ struct stk8ba50_data {
+ 	u8 sample_rate_idx;
+ 	struct iio_trigger *dready_trig;
+ 	bool dready_trigger_on;
+-	/*
+-	 * 3 x 16-bit channels (10-bit data, 6-bit padding) +
+-	 * 1 x 16 padding +
+-	 * 4 x 16 64-bit timestamp
+-	 */
+-	s16 buffer[8];
++	/* Ensure timestamp is naturally aligned */
++	struct {
++		s16 chans[3];
++		s64 timetamp __aligned(8);
++	} scan;
+ };
+ 
+ #define STK8BA50_ACCEL_CHANNEL(index, reg, axis) {			\
+@@ -324,7 +323,7 @@ static irqreturn_t stk8ba50_trigger_handler(int irq, void *p)
+ 		ret = i2c_smbus_read_i2c_block_data(data->client,
+ 						    STK8BA50_REG_XOUT,
+ 						    STK8BA50_ALL_CHANNEL_SIZE,
+-						    (u8 *)data->buffer);
++						    (u8 *)data->scan.chans);
+ 		if (ret < STK8BA50_ALL_CHANNEL_SIZE) {
+ 			dev_err(&data->client->dev, "register read failed\n");
+ 			goto err;
+@@ -337,10 +336,10 @@ static irqreturn_t stk8ba50_trigger_handler(int irq, void *p)
+ 			if (ret < 0)
+ 				goto err;
+ 
+-			data->buffer[i++] = ret;
++			data->scan.chans[i++] = ret;
+ 		}
+ 	}
+-	iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
++	iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
+ 					   pf->timestamp);
+ err:
+ 	mutex_unlock(&data->lock);
+diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
+index a7826f097b95c..d356b515df090 100644
+--- a/drivers/iio/adc/at91-sama5d2_adc.c
++++ b/drivers/iio/adc/at91-sama5d2_adc.c
+@@ -403,7 +403,8 @@ struct at91_adc_state {
+ 	struct at91_adc_dma		dma_st;
+ 	struct at91_adc_touch		touch_st;
+ 	struct iio_dev			*indio_dev;
+-	u16				buffer[AT91_BUFFER_MAX_HWORDS];
++	/* Ensure naturally aligned timestamp */
++	u16				buffer[AT91_BUFFER_MAX_HWORDS] __aligned(8);
+ 	/*
+ 	 * lock to prevent concurrent 'single conversion' requests through
+ 	 * sysfs.
+diff --git a/drivers/iio/adc/hx711.c b/drivers/iio/adc/hx711.c
+index 6a173531d355b..f7ee856a6b8b6 100644
+--- a/drivers/iio/adc/hx711.c
++++ b/drivers/iio/adc/hx711.c
+@@ -86,9 +86,9 @@ struct hx711_data {
+ 	struct mutex		lock;
+ 	/*
+ 	 * triggered buffer
+-	 * 2x32-bit channel + 64-bit timestamp
++	 * 2x32-bit channel + 64-bit naturally aligned timestamp
+ 	 */
+-	u32			buffer[4];
++	u32			buffer[4] __aligned(8);
+ 	/*
+ 	 * delay after a rising edge on SCK until the data is ready DOUT
+ 	 * this is dependent on the hx711 where the datasheet tells a
+diff --git a/drivers/iio/adc/mxs-lradc-adc.c b/drivers/iio/adc/mxs-lradc-adc.c
+index 30e29f44ebd2e..c480cb489c1a3 100644
+--- a/drivers/iio/adc/mxs-lradc-adc.c
++++ b/drivers/iio/adc/mxs-lradc-adc.c
+@@ -115,7 +115,8 @@ struct mxs_lradc_adc {
+ 	struct device		*dev;
+ 
+ 	void __iomem		*base;
+-	u32			buffer[10];
++	/* Maximum of 8 channels + 8 byte ts */
++	u32			buffer[10] __aligned(8);
+ 	struct iio_trigger	*trig;
+ 	struct completion	completion;
+ 	spinlock_t		lock;
+diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
+index 9fef39bcf997b..5b828428be77c 100644
+--- a/drivers/iio/adc/ti-ads1015.c
++++ b/drivers/iio/adc/ti-ads1015.c
+@@ -395,10 +395,14 @@ static irqreturn_t ads1015_trigger_handler(int irq, void *p)
+ 	struct iio_poll_func *pf = p;
+ 	struct iio_dev *indio_dev = pf->indio_dev;
+ 	struct ads1015_data *data = iio_priv(indio_dev);
+-	s16 buf[8]; /* 1x s16 ADC val + 3x s16 padding +  4x s16 timestamp */
++	/* Ensure natural alignment of timestamp */
++	struct {
++		s16 chan;
++		s64 timestamp __aligned(8);
++	} scan;
+ 	int chan, ret, res;
+ 
+-	memset(buf, 0, sizeof(buf));
++	memset(&scan, 0, sizeof(scan));
+ 
+ 	mutex_lock(&data->lock);
+ 	chan = find_first_bit(indio_dev->active_scan_mask,
+@@ -409,10 +413,10 @@ static irqreturn_t ads1015_trigger_handler(int irq, void *p)
+ 		goto err;
+ 	}
+ 
+-	buf[0] = res;
++	scan.chan = res;
+ 	mutex_unlock(&data->lock);
+ 
+-	iio_push_to_buffers_with_timestamp(indio_dev, buf,
++	iio_push_to_buffers_with_timestamp(indio_dev, &scan,
+ 					   iio_get_time_ns(indio_dev));
+ 
+ err:
+diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c
+index 16bcb37eebb72..79c803537dc42 100644
+--- a/drivers/iio/adc/ti-ads8688.c
++++ b/drivers/iio/adc/ti-ads8688.c
+@@ -383,7 +383,8 @@ static irqreturn_t ads8688_trigger_handler(int irq, void *p)
+ {
+ 	struct iio_poll_func *pf = p;
+ 	struct iio_dev *indio_dev = pf->indio_dev;
+-	u16 buffer[ADS8688_MAX_CHANNELS + sizeof(s64)/sizeof(u16)];
++	/* Ensure naturally aligned timestamp */
++	u16 buffer[ADS8688_MAX_CHANNELS + sizeof(s64)/sizeof(u16)] __aligned(8);
+ 	int i, j = 0;
+ 
+ 	for (i = 0; i < indio_dev->masklength; i++) {
+diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
+index 1d794cf3e3f13..fd57fc43e8e5c 100644
+--- a/drivers/iio/adc/vf610_adc.c
++++ b/drivers/iio/adc/vf610_adc.c
+@@ -167,7 +167,11 @@ struct vf610_adc {
+ 	u32 sample_freq_avail[5];
+ 
+ 	struct completion completion;
+-	u16 buffer[8];
++	/* Ensure the timestamp is naturally aligned */
++	struct {
++		u16 chan;
++		s64 timestamp __aligned(8);
++	} scan;
+ };
+ 
+ static const u32 vf610_hw_avgs[] = { 1, 4, 8, 16, 32 };
+@@ -579,9 +583,9 @@ static irqreturn_t vf610_adc_isr(int irq, void *dev_id)
+ 	if (coco & VF610_ADC_HS_COCO0) {
+ 		info->value = vf610_adc_read_data(info);
+ 		if (iio_buffer_enabled(indio_dev)) {
+-			info->buffer[0] = info->value;
++			info->scan.chan = info->value;
+ 			iio_push_to_buffers_with_timestamp(indio_dev,
+-					info->buffer,
++					&info->scan,
+ 					iio_get_time_ns(indio_dev));
+ 			iio_trigger_notify_done(indio_dev->trig);
+ 		} else
+diff --git a/drivers/iio/chemical/atlas-sensor.c b/drivers/iio/chemical/atlas-sensor.c
+index cdab9d04dedd0..0c8a50de89408 100644
+--- a/drivers/iio/chemical/atlas-sensor.c
++++ b/drivers/iio/chemical/atlas-sensor.c
+@@ -91,8 +91,8 @@ struct atlas_data {
+ 	struct regmap *regmap;
+ 	struct irq_work work;
+ 	unsigned int interrupt_enabled;
+-
+-	__be32 buffer[6]; /* 96-bit data + 32-bit pad + 64-bit timestamp */
++	/* 96-bit data + 32-bit pad + 64-bit timestamp */
++	__be32 buffer[6] __aligned(8);
+ };
+ 
+ static const struct regmap_config atlas_regmap_config = {
+diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
+index 1462a6a5bc6da..3d9eba716b691 100644
+--- a/drivers/iio/frequency/adf4350.c
++++ b/drivers/iio/frequency/adf4350.c
+@@ -563,8 +563,10 @@ static int adf4350_probe(struct spi_device *spi)
+ 
+ 	st->lock_detect_gpiod = devm_gpiod_get_optional(&spi->dev, NULL,
+ 							GPIOD_IN);
+-	if (IS_ERR(st->lock_detect_gpiod))
+-		return PTR_ERR(st->lock_detect_gpiod);
++	if (IS_ERR(st->lock_detect_gpiod)) {
++		ret = PTR_ERR(st->lock_detect_gpiod);
++		goto error_disable_reg;
++	}
+ 
+ 	if (pdata->power_up_frequency) {
+ 		ret = adf4350_set_freq(st, pdata->power_up_frequency);
+diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
+index 029ef4c346046..457fa8702d19d 100644
+--- a/drivers/iio/gyro/bmg160_core.c
++++ b/drivers/iio/gyro/bmg160_core.c
+@@ -98,7 +98,11 @@ struct bmg160_data {
+ 	struct iio_trigger *motion_trig;
+ 	struct iio_mount_matrix orientation;
+ 	struct mutex mutex;
+-	s16 buffer[8];
++	/* Ensure naturally aligned timestamp */
++	struct {
++		s16 chans[3];
++		s64 timestamp __aligned(8);
++	} scan;
+ 	u32 dps_range;
+ 	int ev_enable_state;
+ 	int slope_thres;
+@@ -882,12 +886,12 @@ static irqreturn_t bmg160_trigger_handler(int irq, void *p)
+ 
+ 	mutex_lock(&data->mutex);
+ 	ret = regmap_bulk_read(data->regmap, BMG160_REG_XOUT_L,
+-			       data->buffer, AXIS_MAX * 2);
++			       data->scan.chans, AXIS_MAX * 2);
+ 	mutex_unlock(&data->mutex);
+ 	if (ret < 0)
+ 		goto err;
+ 
+-	iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
++	iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
+ 					   pf->timestamp);
+ err:
+ 	iio_trigger_notify_done(indio_dev->trig);
+diff --git a/drivers/iio/humidity/am2315.c b/drivers/iio/humidity/am2315.c
+index 02ad1767c845e..3398fa413ec5c 100644
+--- a/drivers/iio/humidity/am2315.c
++++ b/drivers/iio/humidity/am2315.c
+@@ -33,7 +33,11 @@
+ struct am2315_data {
+ 	struct i2c_client *client;
+ 	struct mutex lock;
+-	s16 buffer[8]; /* 2x16-bit channels + 2x16 padding + 4x16 timestamp */
++	/* Ensure timestamp is naturally aligned */
++	struct {
++		s16 chans[2];
++		s64 timestamp __aligned(8);
++	} scan;
+ };
+ 
+ struct am2315_sensor_data {
+@@ -167,20 +171,20 @@ static irqreturn_t am2315_trigger_handler(int irq, void *p)
+ 
+ 	mutex_lock(&data->lock);
+ 	if (*(indio_dev->active_scan_mask) == AM2315_ALL_CHANNEL_MASK) {
+-		data->buffer[0] = sensor_data.hum_data;
+-		data->buffer[1] = sensor_data.temp_data;
++		data->scan.chans[0] = sensor_data.hum_data;
++		data->scan.chans[1] = sensor_data.temp_data;
+ 	} else {
+ 		i = 0;
+ 		for_each_set_bit(bit, indio_dev->active_scan_mask,
+ 				 indio_dev->masklength) {
+-			data->buffer[i] = (bit ? sensor_data.temp_data :
+-						 sensor_data.hum_data);
++			data->scan.chans[i] = (bit ? sensor_data.temp_data :
++					       sensor_data.hum_data);
+ 			i++;
+ 		}
+ 	}
+ 	mutex_unlock(&data->lock);
+ 
+-	iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
++	iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
+ 					   pf->timestamp);
+ err:
+ 	iio_trigger_notify_done(indio_dev->trig);
+diff --git a/drivers/iio/imu/adis16400.c b/drivers/iio/imu/adis16400.c
+index 785a4ce606d89..4aff16466da02 100644
+--- a/drivers/iio/imu/adis16400.c
++++ b/drivers/iio/imu/adis16400.c
+@@ -647,9 +647,6 @@ static irqreturn_t adis16400_trigger_handler(int irq, void *p)
+ 	void *buffer;
+ 	int ret;
+ 
+-	if (!adis->buffer)
+-		return -ENOMEM;
+-
+ 	if (!(st->variant->flags & ADIS16400_NO_BURST) &&
+ 		st->adis.spi->max_speed_hz > ADIS16400_SPI_BURST) {
+ 		st->adis.spi->max_speed_hz = ADIS16400_SPI_BURST;
+diff --git a/drivers/iio/imu/adis16475.c b/drivers/iio/imu/adis16475.c
+index 197d482409911..3c4e4deb87608 100644
+--- a/drivers/iio/imu/adis16475.c
++++ b/drivers/iio/imu/adis16475.c
+@@ -990,7 +990,7 @@ static irqreturn_t adis16475_trigger_handler(int irq, void *p)
+ 
+ 	ret = spi_sync(adis->spi, &adis->msg);
+ 	if (ret)
+-		return ret;
++		goto check_burst32;
+ 
+ 	adis->spi->max_speed_hz = cached_spi_speed_hz;
+ 	buffer = adis->buffer;
+diff --git a/drivers/iio/imu/adis_buffer.c b/drivers/iio/imu/adis_buffer.c
+index ac354321f63a3..175af154e4437 100644
+--- a/drivers/iio/imu/adis_buffer.c
++++ b/drivers/iio/imu/adis_buffer.c
+@@ -129,9 +129,6 @@ static irqreturn_t adis_trigger_handler(int irq, void *p)
+ 	struct adis *adis = iio_device_get_drvdata(indio_dev);
+ 	int ret;
+ 
+-	if (!adis->buffer)
+-		return -ENOMEM;
+-
+ 	if (adis->data->has_paging) {
+ 		mutex_lock(&adis->state_lock);
+ 		if (adis->current_page != 0) {
+diff --git a/drivers/iio/light/isl29125.c b/drivers/iio/light/isl29125.c
+index b93b85dbc3a6a..ba53b50d711a1 100644
+--- a/drivers/iio/light/isl29125.c
++++ b/drivers/iio/light/isl29125.c
+@@ -51,7 +51,11 @@
+ struct isl29125_data {
+ 	struct i2c_client *client;
+ 	u8 conf1;
+-	u16 buffer[8]; /* 3x 16-bit, padding, 8 bytes timestamp */
++	/* Ensure timestamp is naturally aligned */
++	struct {
++		u16 chans[3];
++		s64 timestamp __aligned(8);
++	} scan;
+ };
+ 
+ #define ISL29125_CHANNEL(_color, _si) { \
+@@ -184,10 +188,10 @@ static irqreturn_t isl29125_trigger_handler(int irq, void *p)
+ 		if (ret < 0)
+ 			goto done;
+ 
+-		data->buffer[j++] = ret;
++		data->scan.chans[j++] = ret;
+ 	}
+ 
+-	iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
++	iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
+ 		iio_get_time_ns(indio_dev));
+ 
+ done:
+diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
+index b4323d2db0b19..74ed2d88a3ed3 100644
+--- a/drivers/iio/light/ltr501.c
++++ b/drivers/iio/light/ltr501.c
+@@ -32,9 +32,12 @@
+ #define LTR501_PART_ID 0x86
+ #define LTR501_MANUFAC_ID 0x87
+ #define LTR501_ALS_DATA1 0x88 /* 16-bit, little endian */
++#define LTR501_ALS_DATA1_UPPER 0x89 /* upper 8 bits of LTR501_ALS_DATA1 */
+ #define LTR501_ALS_DATA0 0x8a /* 16-bit, little endian */
++#define LTR501_ALS_DATA0_UPPER 0x8b /* upper 8 bits of LTR501_ALS_DATA0 */
+ #define LTR501_ALS_PS_STATUS 0x8c
+ #define LTR501_PS_DATA 0x8d /* 16-bit, little endian */
++#define LTR501_PS_DATA_UPPER 0x8e /* upper 8 bits of LTR501_PS_DATA */
+ #define LTR501_INTR 0x8f /* output mode, polarity, mode */
+ #define LTR501_PS_THRESH_UP 0x90 /* 11 bit, ps upper threshold */
+ #define LTR501_PS_THRESH_LOW 0x92 /* 11 bit, ps lower threshold */
+@@ -406,18 +409,19 @@ static int ltr501_read_als(const struct ltr501_data *data, __le16 buf[2])
+ 
+ static int ltr501_read_ps(const struct ltr501_data *data)
+ {
+-	int ret, status;
++	__le16 status;
++	int ret;
+ 
+ 	ret = ltr501_drdy(data, LTR501_STATUS_PS_RDY);
+ 	if (ret < 0)
+ 		return ret;
+ 
+ 	ret = regmap_bulk_read(data->regmap, LTR501_PS_DATA,
+-			       &status, 2);
++			       &status, sizeof(status));
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	return status;
++	return le16_to_cpu(status);
+ }
+ 
+ static int ltr501_read_intr_prst(const struct ltr501_data *data,
+@@ -1205,7 +1209,7 @@ static struct ltr501_chip_info ltr501_chip_info_tbl[] = {
+ 		.als_gain_tbl_size = ARRAY_SIZE(ltr559_als_gain_tbl),
+ 		.ps_gain = ltr559_ps_gain_tbl,
+ 		.ps_gain_tbl_size = ARRAY_SIZE(ltr559_ps_gain_tbl),
+-		.als_mode_active = BIT(1),
++		.als_mode_active = BIT(0),
+ 		.als_gain_mask = BIT(2) | BIT(3) | BIT(4),
+ 		.als_gain_shift = 2,
+ 		.info = &ltr501_info,
+@@ -1354,9 +1358,12 @@ static bool ltr501_is_volatile_reg(struct device *dev, unsigned int reg)
+ {
+ 	switch (reg) {
+ 	case LTR501_ALS_DATA1:
++	case LTR501_ALS_DATA1_UPPER:
+ 	case LTR501_ALS_DATA0:
++	case LTR501_ALS_DATA0_UPPER:
+ 	case LTR501_ALS_PS_STATUS:
+ 	case LTR501_PS_DATA:
++	case LTR501_PS_DATA_UPPER:
+ 		return true;
+ 	default:
+ 		return false;
+diff --git a/drivers/iio/light/tcs3414.c b/drivers/iio/light/tcs3414.c
+index 6fe5d46f80d40..0593abd600ec2 100644
+--- a/drivers/iio/light/tcs3414.c
++++ b/drivers/iio/light/tcs3414.c
+@@ -53,7 +53,11 @@ struct tcs3414_data {
+ 	u8 control;
+ 	u8 gain;
+ 	u8 timing;
+-	u16 buffer[8]; /* 4x 16-bit + 8 bytes timestamp */
++	/* Ensure timestamp is naturally aligned */
++	struct {
++		u16 chans[4];
++		s64 timestamp __aligned(8);
++	} scan;
+ };
+ 
+ #define TCS3414_CHANNEL(_color, _si, _addr) { \
+@@ -209,10 +213,10 @@ static irqreturn_t tcs3414_trigger_handler(int irq, void *p)
+ 		if (ret < 0)
+ 			goto done;
+ 
+-		data->buffer[j++] = ret;
++		data->scan.chans[j++] = ret;
+ 	}
+ 
+-	iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
++	iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
+ 		iio_get_time_ns(indio_dev));
+ 
+ done:
+diff --git a/drivers/iio/light/tcs3472.c b/drivers/iio/light/tcs3472.c
+index a0dc447aeb68b..371c6a39a1654 100644
+--- a/drivers/iio/light/tcs3472.c
++++ b/drivers/iio/light/tcs3472.c
+@@ -64,7 +64,11 @@ struct tcs3472_data {
+ 	u8 control;
+ 	u8 atime;
+ 	u8 apers;
+-	u16 buffer[8]; /* 4 16-bit channels + 64-bit timestamp */
++	/* Ensure timestamp is naturally aligned */
++	struct {
++		u16 chans[4];
++		s64 timestamp __aligned(8);
++	} scan;
+ };
+ 
+ static const struct iio_event_spec tcs3472_events[] = {
+@@ -386,10 +390,10 @@ static irqreturn_t tcs3472_trigger_handler(int irq, void *p)
+ 		if (ret < 0)
+ 			goto done;
+ 
+-		data->buffer[j++] = ret;
++		data->scan.chans[j++] = ret;
+ 	}
+ 
+-	iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
++	iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
+ 		iio_get_time_ns(indio_dev));
+ 
+ done:
+@@ -531,7 +535,8 @@ static int tcs3472_probe(struct i2c_client *client,
+ 	return 0;
+ 
+ free_irq:
+-	free_irq(client->irq, indio_dev);
++	if (client->irq)
++		free_irq(client->irq, indio_dev);
+ buffer_cleanup:
+ 	iio_triggered_buffer_cleanup(indio_dev);
+ 	return ret;
+@@ -559,7 +564,8 @@ static int tcs3472_remove(struct i2c_client *client)
+ 	struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ 
+ 	iio_device_unregister(indio_dev);
+-	free_irq(client->irq, indio_dev);
++	if (client->irq)
++		free_irq(client->irq, indio_dev);
+ 	iio_triggered_buffer_cleanup(indio_dev);
+ 	tcs3472_powerdown(iio_priv(indio_dev));
+ 
+diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c
+index fff4b36b8b58d..f4feb44903b3f 100644
+--- a/drivers/iio/light/vcnl4000.c
++++ b/drivers/iio/light/vcnl4000.c
+@@ -910,7 +910,7 @@ static irqreturn_t vcnl4010_trigger_handler(int irq, void *p)
+ 	struct iio_dev *indio_dev = pf->indio_dev;
+ 	struct vcnl4000_data *data = iio_priv(indio_dev);
+ 	const unsigned long *active_scan_mask = indio_dev->active_scan_mask;
+-	u16 buffer[8] = {0}; /* 1x16-bit + ts */
++	u16 buffer[8] __aligned(8) = {0}; /* 1x16-bit + naturally aligned ts */
+ 	bool data_read = false;
+ 	unsigned long isr;
+ 	int val = 0;
+diff --git a/drivers/iio/light/vcnl4035.c b/drivers/iio/light/vcnl4035.c
+index 73a28e30dddcc..1342bbe111ed5 100644
+--- a/drivers/iio/light/vcnl4035.c
++++ b/drivers/iio/light/vcnl4035.c
+@@ -102,7 +102,8 @@ static irqreturn_t vcnl4035_trigger_consumer_handler(int irq, void *p)
+ 	struct iio_poll_func *pf = p;
+ 	struct iio_dev *indio_dev = pf->indio_dev;
+ 	struct vcnl4035_data *data = iio_priv(indio_dev);
+-	u8 buffer[ALIGN(sizeof(u16), sizeof(s64)) + sizeof(s64)];
++	/* Ensure naturally aligned timestamp */
++	u8 buffer[ALIGN(sizeof(u16), sizeof(s64)) + sizeof(s64)]  __aligned(8);
+ 	int ret;
+ 
+ 	ret = regmap_read(data->regmap, VCNL4035_ALS_DATA, (int *)buffer);
+diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c
+index b2f3129e1b4f3..20a0842f0e3a6 100644
+--- a/drivers/iio/magnetometer/bmc150_magn.c
++++ b/drivers/iio/magnetometer/bmc150_magn.c
+@@ -138,8 +138,11 @@ struct bmc150_magn_data {
+ 	struct regmap *regmap;
+ 	struct regulator_bulk_data regulators[2];
+ 	struct iio_mount_matrix orientation;
+-	/* 4 x 32 bits for x, y z, 4 bytes align, 64 bits timestamp */
+-	s32 buffer[6];
++	/* Ensure timestamp is naturally aligned */
++	struct {
++		s32 chans[3];
++		s64 timestamp __aligned(8);
++	} scan;
+ 	struct iio_trigger *dready_trig;
+ 	bool dready_trigger_on;
+ 	int max_odr;
+@@ -675,11 +678,11 @@ static irqreturn_t bmc150_magn_trigger_handler(int irq, void *p)
+ 	int ret;
+ 
+ 	mutex_lock(&data->mutex);
+-	ret = bmc150_magn_read_xyz(data, data->buffer);
++	ret = bmc150_magn_read_xyz(data, data->scan.chans);
+ 	if (ret < 0)
+ 		goto err;
+ 
+-	iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
++	iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
+ 					   pf->timestamp);
+ 
+ err:
+diff --git a/drivers/iio/magnetometer/hmc5843.h b/drivers/iio/magnetometer/hmc5843.h
+index 3f6c0b6629415..242f742f2643a 100644
+--- a/drivers/iio/magnetometer/hmc5843.h
++++ b/drivers/iio/magnetometer/hmc5843.h
+@@ -33,7 +33,8 @@ enum hmc5843_ids {
+  * @lock:		update and read regmap data
+  * @regmap:		hardware access register maps
+  * @variant:		describe chip variants
+- * @buffer:		3x 16-bit channels + padding + 64-bit timestamp
++ * @scan:		buffer to pack data for passing to
++ *			iio_push_to_buffers_with_timestamp()
+  */
+ struct hmc5843_data {
+ 	struct device *dev;
+@@ -41,7 +42,10 @@ struct hmc5843_data {
+ 	struct regmap *regmap;
+ 	const struct hmc5843_chip_info *variant;
+ 	struct iio_mount_matrix orientation;
+-	__be16 buffer[8];
++	struct {
++		__be16 chans[3];
++		s64 timestamp __aligned(8);
++	} scan;
+ };
+ 
+ int hmc5843_common_probe(struct device *dev, struct regmap *regmap,
+diff --git a/drivers/iio/magnetometer/hmc5843_core.c b/drivers/iio/magnetometer/hmc5843_core.c
+index 780faea61d82e..221563e0c18fd 100644
+--- a/drivers/iio/magnetometer/hmc5843_core.c
++++ b/drivers/iio/magnetometer/hmc5843_core.c
+@@ -446,13 +446,13 @@ static irqreturn_t hmc5843_trigger_handler(int irq, void *p)
+ 	}
+ 
+ 	ret = regmap_bulk_read(data->regmap, HMC5843_DATA_OUT_MSB_REGS,
+-			       data->buffer, 3 * sizeof(__be16));
++			       data->scan.chans, sizeof(data->scan.chans));
+ 
+ 	mutex_unlock(&data->lock);
+ 	if (ret < 0)
+ 		goto done;
+ 
+-	iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
++	iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
+ 					   iio_get_time_ns(indio_dev));
+ 
+ done:
+diff --git a/drivers/iio/magnetometer/rm3100-core.c b/drivers/iio/magnetometer/rm3100-core.c
+index 7242897a05e95..720234a91db11 100644
+--- a/drivers/iio/magnetometer/rm3100-core.c
++++ b/drivers/iio/magnetometer/rm3100-core.c
+@@ -78,7 +78,8 @@ struct rm3100_data {
+ 	bool use_interrupt;
+ 	int conversion_time;
+ 	int scale;
+-	u8 buffer[RM3100_SCAN_BYTES];
++	/* Ensure naturally aligned timestamp */
++	u8 buffer[RM3100_SCAN_BYTES] __aligned(8);
+ 	struct iio_trigger *drdy_trig;
+ 
+ 	/*
+diff --git a/drivers/iio/potentiostat/lmp91000.c b/drivers/iio/potentiostat/lmp91000.c
+index f34ca769dc20d..d7ff74a798ba3 100644
+--- a/drivers/iio/potentiostat/lmp91000.c
++++ b/drivers/iio/potentiostat/lmp91000.c
+@@ -71,8 +71,8 @@ struct lmp91000_data {
+ 
+ 	struct completion completion;
+ 	u8 chan_select;
+-
+-	u32 buffer[4]; /* 64-bit data + 64-bit timestamp */
++	/* 64-bit data + 64-bit naturally aligned timestamp */
++	u32 buffer[4] __aligned(8);
+ };
+ 
+ static const struct iio_chan_spec lmp91000_channels[] = {
+diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
+index b79ada839e012..98330e26ac3bd 100644
+--- a/drivers/iio/proximity/as3935.c
++++ b/drivers/iio/proximity/as3935.c
+@@ -59,7 +59,11 @@ struct as3935_state {
+ 	unsigned long noise_tripped;
+ 	u32 tune_cap;
+ 	u32 nflwdth_reg;
+-	u8 buffer[16]; /* 8-bit data + 56-bit padding + 64-bit timestamp */
++	/* Ensure timestamp is naturally aligned */
++	struct {
++		u8 chan;
++		s64 timestamp __aligned(8);
++	} scan;
+ 	u8 buf[2] ____cacheline_aligned;
+ };
+ 
+@@ -225,8 +229,8 @@ static irqreturn_t as3935_trigger_handler(int irq, void *private)
+ 	if (ret)
+ 		goto err_read;
+ 
+-	st->buffer[0] = val & AS3935_DATA_MASK;
+-	iio_push_to_buffers_with_timestamp(indio_dev, &st->buffer,
++	st->scan.chan = val & AS3935_DATA_MASK;
++	iio_push_to_buffers_with_timestamp(indio_dev, &st->scan,
+ 					   iio_get_time_ns(indio_dev));
+ err_read:
+ 	iio_trigger_notify_done(indio_dev->trig);
+diff --git a/drivers/iio/proximity/isl29501.c b/drivers/iio/proximity/isl29501.c
+index 90e76451c972a..5b6ea783795d9 100644
+--- a/drivers/iio/proximity/isl29501.c
++++ b/drivers/iio/proximity/isl29501.c
+@@ -938,7 +938,7 @@ static irqreturn_t isl29501_trigger_handler(int irq, void *p)
+ 	struct iio_dev *indio_dev = pf->indio_dev;
+ 	struct isl29501_private *isl29501 = iio_priv(indio_dev);
+ 	const unsigned long *active_mask = indio_dev->active_scan_mask;
+-	u32 buffer[4] = {}; /* 1x16-bit + ts */
++	u32 buffer[4] __aligned(8) = {}; /* 1x16-bit + naturally aligned ts */
+ 
+ 	if (test_bit(ISL29501_DISTANCE_SCAN_INDEX, active_mask))
+ 		isl29501_register_read(isl29501, REG_DISTANCE, buffer);
+diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
+index cc206bfa09c78..d854b8d5fbbaf 100644
+--- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
++++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
+@@ -44,7 +44,11 @@ struct lidar_data {
+ 	int (*xfer)(struct lidar_data *data, u8 reg, u8 *val, int len);
+ 	int i2c_enabled;
+ 
+-	u16 buffer[8]; /* 2 byte distance + 8 byte timestamp */
++	/* Ensure timestamp is naturally aligned */
++	struct {
++		u16 chan;
++		s64 timestamp __aligned(8);
++	} scan;
+ };
+ 
+ static const struct iio_chan_spec lidar_channels[] = {
+@@ -230,9 +234,9 @@ static irqreturn_t lidar_trigger_handler(int irq, void *private)
+ 	struct lidar_data *data = iio_priv(indio_dev);
+ 	int ret;
+ 
+-	ret = lidar_get_measurement(data, data->buffer);
++	ret = lidar_get_measurement(data, &data->scan.chan);
+ 	if (!ret) {
+-		iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
++		iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
+ 						   iio_get_time_ns(indio_dev));
+ 	} else if (ret != -EINVAL) {
+ 		dev_err(&data->client->dev, "cannot read LIDAR measurement");
+diff --git a/drivers/iio/proximity/srf08.c b/drivers/iio/proximity/srf08.c
+index 70beac5c9c1df..9b0886760f76d 100644
+--- a/drivers/iio/proximity/srf08.c
++++ b/drivers/iio/proximity/srf08.c
+@@ -63,11 +63,11 @@ struct srf08_data {
+ 	int			range_mm;
+ 	struct mutex		lock;
+ 
+-	/*
+-	 * triggered buffer
+-	 * 1x16-bit channel + 3x16 padding + 4x16 timestamp
+-	 */
+-	s16			buffer[8];
++	/* Ensure timestamp is naturally aligned */
++	struct {
++		s16 chan;
++		s64 timestamp __aligned(8);
++	} scan;
+ 
+ 	/* Sensor-Type */
+ 	enum srf08_sensor_type	sensor_type;
+@@ -190,9 +190,9 @@ static irqreturn_t srf08_trigger_handler(int irq, void *p)
+ 
+ 	mutex_lock(&data->lock);
+ 
+-	data->buffer[0] = sensor_data;
++	data->scan.chan = sensor_data;
+ 	iio_push_to_buffers_with_timestamp(indio_dev,
+-						data->buffer, pf->timestamp);
++					   &data->scan, pf->timestamp);
+ 
+ 	mutex_unlock(&data->lock);
+ err:
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index 5b9022a8c9ece..bb46f794f3240 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -1856,6 +1856,7 @@ static void _destroy_id(struct rdma_id_private *id_priv,
+ {
+ 	cma_cancel_operation(id_priv, state);
+ 
++	rdma_restrack_del(&id_priv->res);
+ 	if (id_priv->cma_dev) {
+ 		if (rdma_cap_ib_cm(id_priv->id.device, 1)) {
+ 			if (id_priv->cm_id.ib)
+@@ -1865,7 +1866,6 @@ static void _destroy_id(struct rdma_id_private *id_priv,
+ 				iw_destroy_cm_id(id_priv->cm_id.iw);
+ 		}
+ 		cma_leave_mc_groups(id_priv);
+-		rdma_restrack_del(&id_priv->res);
+ 		cma_release_dev(id_priv);
+ 	}
+ 
+@@ -2476,8 +2476,10 @@ static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
+ 	if (IS_ERR(id))
+ 		return PTR_ERR(id);
+ 
++	mutex_lock(&id_priv->qp_mutex);
+ 	id->tos = id_priv->tos;
+ 	id->tos_set = id_priv->tos_set;
++	mutex_unlock(&id_priv->qp_mutex);
+ 	id_priv->cm_id.iw = id;
+ 
+ 	memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv),
+@@ -2537,8 +2539,10 @@ static int cma_listen_on_dev(struct rdma_id_private *id_priv,
+ 	cma_id_get(id_priv);
+ 	dev_id_priv->internal_id = 1;
+ 	dev_id_priv->afonly = id_priv->afonly;
++	mutex_lock(&id_priv->qp_mutex);
+ 	dev_id_priv->tos_set = id_priv->tos_set;
+ 	dev_id_priv->tos = id_priv->tos;
++	mutex_unlock(&id_priv->qp_mutex);
+ 
+ 	ret = rdma_listen(&dev_id_priv->id, id_priv->backlog);
+ 	if (ret)
+@@ -2585,8 +2589,10 @@ void rdma_set_service_type(struct rdma_cm_id *id, int tos)
+ 	struct rdma_id_private *id_priv;
+ 
+ 	id_priv = container_of(id, struct rdma_id_private, id);
++	mutex_lock(&id_priv->qp_mutex);
+ 	id_priv->tos = (u8) tos;
+ 	id_priv->tos_set = true;
++	mutex_unlock(&id_priv->qp_mutex);
+ }
+ EXPORT_SYMBOL(rdma_set_service_type);
+ 
+@@ -2613,8 +2619,10 @@ int rdma_set_ack_timeout(struct rdma_cm_id *id, u8 timeout)
+ 		return -EINVAL;
+ 
+ 	id_priv = container_of(id, struct rdma_id_private, id);
++	mutex_lock(&id_priv->qp_mutex);
+ 	id_priv->timeout = timeout;
+ 	id_priv->timeout_set = true;
++	mutex_unlock(&id_priv->qp_mutex);
+ 
+ 	return 0;
+ }
+@@ -3000,8 +3008,11 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
+ 
+ 	u8 default_roce_tos = id_priv->cma_dev->default_roce_tos[id_priv->id.port_num -
+ 					rdma_start_port(id_priv->cma_dev->device)];
+-	u8 tos = id_priv->tos_set ? id_priv->tos : default_roce_tos;
++	u8 tos;
+ 
++	mutex_lock(&id_priv->qp_mutex);
++	tos = id_priv->tos_set ? id_priv->tos : default_roce_tos;
++	mutex_unlock(&id_priv->qp_mutex);
+ 
+ 	work = kzalloc(sizeof *work, GFP_KERNEL);
+ 	if (!work)
+@@ -3048,8 +3059,12 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
+ 	 * PacketLifeTime = local ACK timeout/2
+ 	 * as a reasonable approximation for RoCE networks.
+ 	 */
+-	route->path_rec->packet_life_time = id_priv->timeout_set ?
+-		id_priv->timeout - 1 : CMA_IBOE_PACKET_LIFETIME;
++	mutex_lock(&id_priv->qp_mutex);
++	if (id_priv->timeout_set && id_priv->timeout)
++		route->path_rec->packet_life_time = id_priv->timeout - 1;
++	else
++		route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME;
++	mutex_unlock(&id_priv->qp_mutex);
+ 
+ 	if (!route->path_rec->mtu) {
+ 		ret = -EINVAL;
+@@ -4073,8 +4088,11 @@ static int cma_connect_iw(struct rdma_id_private *id_priv,
+ 	if (IS_ERR(cm_id))
+ 		return PTR_ERR(cm_id);
+ 
++	mutex_lock(&id_priv->qp_mutex);
+ 	cm_id->tos = id_priv->tos;
+ 	cm_id->tos_set = id_priv->tos_set;
++	mutex_unlock(&id_priv->qp_mutex);
++
+ 	id_priv->cm_id.iw = cm_id;
+ 
+ 	memcpy(&cm_id->local_addr, cma_src_addr(id_priv),
+diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
+index ab55f8b3190eb..92ae454d500a6 100644
+--- a/drivers/infiniband/core/uverbs_cmd.c
++++ b/drivers/infiniband/core/uverbs_cmd.c
+@@ -3033,12 +3033,29 @@ static int ib_uverbs_ex_modify_wq(struct uverbs_attr_bundle *attrs)
+ 	if (!wq)
+ 		return -EINVAL;
+ 
+-	wq_attr.curr_wq_state = cmd.curr_wq_state;
+-	wq_attr.wq_state = cmd.wq_state;
+ 	if (cmd.attr_mask & IB_WQ_FLAGS) {
+ 		wq_attr.flags = cmd.flags;
+ 		wq_attr.flags_mask = cmd.flags_mask;
+ 	}
++
++	if (cmd.attr_mask & IB_WQ_CUR_STATE) {
++		if (cmd.curr_wq_state > IB_WQS_ERR)
++			return -EINVAL;
++
++		wq_attr.curr_wq_state = cmd.curr_wq_state;
++	} else {
++		wq_attr.curr_wq_state = wq->state;
++	}
++
++	if (cmd.attr_mask & IB_WQ_STATE) {
++		if (cmd.wq_state > IB_WQS_ERR)
++			return -EINVAL;
++
++		wq_attr.wq_state = cmd.wq_state;
++	} else {
++		wq_attr.wq_state = wq_attr.curr_wq_state;
++	}
++
+ 	ret = wq->device->ops.modify_wq(wq, &wq_attr, cmd.attr_mask,
+ 					&attrs->driver_udata);
+ 	rdma_lookup_put_uobject(&wq->uobject->uevent.uobject,
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index ad3cee54140e1..851acc9d050f7 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -268,8 +268,6 @@ static int set_rc_inl(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
+ 
+ 	dseg += sizeof(struct hns_roce_v2_rc_send_wqe);
+ 
+-	roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S, 1);
+-
+ 	if (msg_len <= HNS_ROCE_V2_MAX_RC_INL_INN_SZ) {
+ 		roce_set_bit(rc_sq_wqe->byte_20,
+ 			     V2_RC_SEND_WQE_BYTE_20_INL_TYPE_S, 0);
+@@ -314,6 +312,8 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ 		       V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
+ 		       (*sge_ind) & (qp->sge.sge_cnt - 1));
+ 
++	roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S,
++		     !!(wr->send_flags & IB_SEND_INLINE));
+ 	if (wr->send_flags & IB_SEND_INLINE)
+ 		return set_rc_inl(qp, wr, rc_sq_wqe, sge_ind);
+ 
+@@ -750,8 +750,7 @@ out:
+ 		qp->sq.head += nreq;
+ 		qp->next_sge = sge_idx;
+ 
+-		if (nreq == 1 && qp->sq.head == qp->sq.tail + 1 &&
+-		    (qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE))
++		if (nreq == 1 && (qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE))
+ 			write_dwqe(hr_dev, qp, wqe);
+ 		else
+ 			update_sq_db(hr_dev, qp);
+diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
+index 79b3c3023fe7a..b8454dcb03183 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
++++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
+@@ -776,7 +776,7 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ 	struct ib_device *ibdev = &hr_dev->ib_dev;
+ 	struct hns_roce_buf_region *r;
+ 	unsigned int i, mapped_cnt;
+-	int ret;
++	int ret = 0;
+ 
+ 	/*
+ 	 * Only use the first page address as root ba when hopnum is 0, this
+diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
+index 651785bd57f2d..18a47248e4441 100644
+--- a/drivers/infiniband/hw/mlx4/qp.c
++++ b/drivers/infiniband/hw/mlx4/qp.c
+@@ -4254,13 +4254,8 @@ int mlx4_ib_modify_wq(struct ib_wq *ibwq, struct ib_wq_attr *wq_attr,
+ 	if (wq_attr_mask & IB_WQ_FLAGS)
+ 		return -EOPNOTSUPP;
+ 
+-	cur_state = wq_attr_mask & IB_WQ_CUR_STATE ? wq_attr->curr_wq_state :
+-						     ibwq->state;
+-	new_state = wq_attr_mask & IB_WQ_STATE ? wq_attr->wq_state : cur_state;
+-
+-	if (cur_state  < IB_WQS_RESET || cur_state  > IB_WQS_ERR ||
+-	    new_state < IB_WQS_RESET || new_state > IB_WQS_ERR)
+-		return -EINVAL;
++	cur_state = wq_attr->curr_wq_state;
++	new_state = wq_attr->wq_state;
+ 
+ 	if ((new_state == IB_WQS_RDY) && (cur_state == IB_WQS_ERR))
+ 		return -EINVAL;
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index 59ffbbdda3179..fc531a5069126 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -3393,8 +3393,6 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
+ 
+ 	port->mp.mpi = NULL;
+ 
+-	list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
+-
+ 	spin_unlock(&port->mp.mpi_lock);
+ 
+ 	err = mlx5_nic_vport_unaffiliate_multiport(mpi->mdev);
+@@ -3541,6 +3539,8 @@ static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev)
+ 				dev->port[i].mp.mpi = NULL;
+ 			} else {
+ 				mlx5_ib_dbg(dev, "unbinding port_num: %d\n", i + 1);
++				list_add_tail(&dev->port[i].mp.mpi->list,
++					      &mlx5_ib_unaffiliated_port_list);
+ 				mlx5_ib_unbind_slave_port(dev, dev->port[i].mp.mpi);
+ 			}
+ 		}
+diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
+index 843f9e7fe96ff..bcaaf238b364d 100644
+--- a/drivers/infiniband/hw/mlx5/qp.c
++++ b/drivers/infiniband/hw/mlx5/qp.c
+@@ -5309,10 +5309,8 @@ int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
+ 
+ 	rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
+ 
+-	curr_wq_state = (wq_attr_mask & IB_WQ_CUR_STATE) ?
+-		wq_attr->curr_wq_state : wq->state;
+-	wq_state = (wq_attr_mask & IB_WQ_STATE) ?
+-		wq_attr->wq_state : curr_wq_state;
++	curr_wq_state = wq_attr->curr_wq_state;
++	wq_state = wq_attr->wq_state;
+ 	if (curr_wq_state == IB_WQS_ERR)
+ 		curr_wq_state = MLX5_RQC_STATE_ERR;
+ 	if (wq_state == IB_WQS_ERR)
+diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
+index 01662727dca08..fc1ba49042792 100644
+--- a/drivers/infiniband/sw/rxe/rxe_net.c
++++ b/drivers/infiniband/sw/rxe/rxe_net.c
+@@ -207,10 +207,8 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
+ 
+ 	/* Create UDP socket */
+ 	err = udp_sock_create(net, &udp_cfg, &sock);
+-	if (err < 0) {
+-		pr_err("failed to create udp socket. err = %d\n", err);
++	if (err < 0)
+ 		return ERR_PTR(err);
+-	}
+ 
+ 	tnl_cfg.encap_type = 1;
+ 	tnl_cfg.encap_rcv = rxe_udp_encap_recv;
+@@ -619,6 +617,12 @@ static int rxe_net_ipv6_init(void)
+ 
+ 	recv_sockets.sk6 = rxe_setup_udp_tunnel(&init_net,
+ 						htons(ROCE_V2_UDP_DPORT), true);
++	if (PTR_ERR(recv_sockets.sk6) == -EAFNOSUPPORT) {
++		recv_sockets.sk6 = NULL;
++		pr_warn("IPv6 is not supported, can not create a UDPv6 socket\n");
++		return 0;
++	}
++
+ 	if (IS_ERR(recv_sockets.sk6)) {
+ 		recv_sockets.sk6 = NULL;
+ 		pr_err("Failed to create IPv6 UDP tunnel\n");
+diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
+index b0f350d674fdb..93a41ebda1a85 100644
+--- a/drivers/infiniband/sw/rxe/rxe_qp.c
++++ b/drivers/infiniband/sw/rxe/rxe_qp.c
+@@ -136,7 +136,6 @@ static void free_rd_atomic_resources(struct rxe_qp *qp)
+ void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res)
+ {
+ 	if (res->type == RXE_ATOMIC_MASK) {
+-		rxe_drop_ref(qp);
+ 		kfree_skb(res->atomic.skb);
+ 	} else if (res->type == RXE_READ_MASK) {
+ 		if (res->read.mr)
+diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
+index 8e237b623b316..ae97bebc0f34f 100644
+--- a/drivers/infiniband/sw/rxe/rxe_resp.c
++++ b/drivers/infiniband/sw/rxe/rxe_resp.c
+@@ -966,8 +966,6 @@ static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
+ 		goto out;
+ 	}
+ 
+-	rxe_add_ref(qp);
+-
+ 	res = &qp->resp.resources[qp->resp.res_head];
+ 	free_rd_atomic_resource(qp, res);
+ 	rxe_advance_resp_resource(qp);
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index 8fcaa1136f2cd..776e46ee95dad 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -506,6 +506,7 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
+ 	iser_conn->iscsi_conn = conn;
+ 
+ out:
++	iscsi_put_endpoint(ep);
+ 	mutex_unlock(&iser_conn->state_mutex);
+ 	return error;
+ }
+@@ -1002,6 +1003,7 @@ static struct iscsi_transport iscsi_iser_transport = {
+ 	/* connection management */
+ 	.create_conn            = iscsi_iser_conn_create,
+ 	.bind_conn              = iscsi_iser_conn_bind,
++	.unbind_conn		= iscsi_conn_unbind,
+ 	.destroy_conn           = iscsi_conn_teardown,
+ 	.attr_is_visible	= iser_attr_is_visible,
+ 	.set_param              = iscsi_iser_set_param,
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+index 959ba0462ef07..49d12dd4a5039 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+@@ -805,6 +805,9 @@ static struct rtrs_clt_sess *get_next_path_min_inflight(struct path_it *it)
+ 	int inflight;
+ 
+ 	list_for_each_entry_rcu(sess, &clt->paths_list, s.entry) {
++		if (unlikely(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED))
++			continue;
++
+ 		if (unlikely(!list_empty(raw_cpu_ptr(sess->mp_skip_entry))))
+ 			continue;
+ 
+@@ -1713,7 +1716,19 @@ static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
+ 				  queue_depth);
+ 			return -ECONNRESET;
+ 		}
+-		if (!sess->rbufs || sess->queue_depth < queue_depth) {
++		if (sess->queue_depth > 0 && queue_depth != sess->queue_depth) {
++			rtrs_err(clt, "Error: queue depth changed\n");
++
++			/*
++			 * Stop any more reconnection attempts
++			 */
++			sess->reconnect_attempts = -1;
++			rtrs_err(clt,
++				"Disabling auto-reconnect. Trigger a manual reconnect after issue is resolved\n");
++			return -ECONNRESET;
++		}
++
++		if (!sess->rbufs) {
+ 			kfree(sess->rbufs);
+ 			sess->rbufs = kcalloc(queue_depth, sizeof(*sess->rbufs),
+ 					      GFP_KERNEL);
+@@ -1727,7 +1742,7 @@ static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
+ 		sess->chunk_size = sess->max_io_size + sess->max_hdr_size;
+ 
+ 		/*
+-		 * Global queue depth and IO size is always a minimum.
++		 * Global IO size is always a minimum.
+ 		 * If while a reconnection server sends us a value a bit
+ 		 * higher - client does not care and uses cached minimum.
+ 		 *
+@@ -1735,8 +1750,7 @@ static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
+ 		 * connections in parallel, use lock.
+ 		 */
+ 		mutex_lock(&clt->paths_mutex);
+-		clt->queue_depth = min_not_zero(sess->queue_depth,
+-						clt->queue_depth);
++		clt->queue_depth = sess->queue_depth;
+ 		clt->max_io_size = min_not_zero(sess->max_io_size,
+ 						clt->max_io_size);
+ 		mutex_unlock(&clt->paths_mutex);
+@@ -2675,6 +2689,8 @@ struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
+ 		if (err) {
+ 			list_del_rcu(&sess->s.entry);
+ 			rtrs_clt_close_conns(sess, true);
++			free_percpu(sess->stats->pcpu_stats);
++			kfree(sess->stats);
+ 			free_sess(sess);
+ 			goto close_all_sess;
+ 		}
+@@ -2683,6 +2699,8 @@ struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
+ 		if (err) {
+ 			list_del_rcu(&sess->s.entry);
+ 			rtrs_clt_close_conns(sess, true);
++			free_percpu(sess->stats->pcpu_stats);
++			kfree(sess->stats);
+ 			free_sess(sess);
+ 			goto close_all_sess;
+ 		}
+@@ -2940,6 +2958,8 @@ int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt,
+ close_sess:
+ 	rtrs_clt_remove_path_from_arr(sess);
+ 	rtrs_clt_close_conns(sess, true);
++	free_percpu(sess->stats->pcpu_stats);
++	kfree(sess->stats);
+ 	free_sess(sess);
+ 
+ 	return err;
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
+index 126a96e75c621..e499f64ae608b 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
+@@ -211,6 +211,7 @@ rtrs_srv_destroy_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
+ 		device_del(&srv->dev);
+ 		put_device(&srv->dev);
+ 	} else {
++		put_device(&srv->dev);
+ 		mutex_unlock(&srv->paths_mutex);
+ 	}
+ }
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+index d071809e3ed2f..57a9d396ab75d 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+@@ -1477,6 +1477,7 @@ static void free_sess(struct rtrs_srv_sess *sess)
+ 		kobject_del(&sess->kobj);
+ 		kobject_put(&sess->kobj);
+ 	} else {
++		kfree(sess->stats);
+ 		kfree(sess);
+ 	}
+ }
+@@ -1600,7 +1601,7 @@ static int create_con(struct rtrs_srv_sess *sess,
+ 	struct rtrs_sess *s = &sess->s;
+ 	struct rtrs_srv_con *con;
+ 
+-	u32 cq_size, wr_queue_size;
++	u32 cq_size, max_send_wr, max_recv_wr, wr_limit;
+ 	int err, cq_vector;
+ 
+ 	con = kzalloc(sizeof(*con), GFP_KERNEL);
+@@ -1621,30 +1622,42 @@ static int create_con(struct rtrs_srv_sess *sess,
+ 		 * All receive and all send (each requiring invalidate)
+ 		 * + 2 for drain and heartbeat
+ 		 */
+-		wr_queue_size = SERVICE_CON_QUEUE_DEPTH * 3 + 2;
+-		cq_size = wr_queue_size;
++		max_send_wr = SERVICE_CON_QUEUE_DEPTH * 2 + 2;
++		max_recv_wr = SERVICE_CON_QUEUE_DEPTH + 2;
++		cq_size = max_send_wr + max_recv_wr;
+ 	} else {
+-		/*
+-		 * If we have all receive requests posted and
+-		 * all write requests posted and each read request
+-		 * requires an invalidate request + drain
+-		 * and qp gets into error state.
+-		 */
+-		cq_size = srv->queue_depth * 3 + 1;
+ 		/*
+ 		 * In theory we might have queue_depth * 32
+ 		 * outstanding requests if an unsafe global key is used
+ 		 * and we have queue_depth read requests each consisting
+ 		 * of 32 different addresses. div 3 for mlx5.
+ 		 */
+-		wr_queue_size = sess->s.dev->ib_dev->attrs.max_qp_wr / 3;
++		wr_limit = sess->s.dev->ib_dev->attrs.max_qp_wr / 3;
++		/* when always_invlaidate enalbed, we need linv+rinv+mr+imm */
++		if (always_invalidate)
++			max_send_wr =
++				min_t(int, wr_limit,
++				      srv->queue_depth * (1 + 4) + 1);
++		else
++			max_send_wr =
++				min_t(int, wr_limit,
++				      srv->queue_depth * (1 + 2) + 1);
++
++		max_recv_wr = srv->queue_depth + 1;
++		/*
++		 * If we have all receive requests posted and
++		 * all write requests posted and each read request
++		 * requires an invalidate request + drain
++		 * and qp gets into error state.
++		 */
++		cq_size = max_send_wr + max_recv_wr;
+ 	}
+-	atomic_set(&con->sq_wr_avail, wr_queue_size);
++	atomic_set(&con->sq_wr_avail, max_send_wr);
+ 	cq_vector = rtrs_srv_get_next_cq_vector(sess);
+ 
+ 	/* TODO: SOFTIRQ can be faster, but be careful with softirq context */
+ 	err = rtrs_cq_qp_create(&sess->s, &con->c, 1, cq_vector, cq_size,
+-				 wr_queue_size, wr_queue_size,
++				 max_send_wr, max_recv_wr,
+ 				 IB_POLL_WORKQUEUE);
+ 	if (err) {
+ 		rtrs_err(s, "rtrs_cq_qp_create(), err: %d\n", err);
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c
+index d13aff0aa8165..4629bb758126a 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs.c
+@@ -373,7 +373,6 @@ void rtrs_stop_hb(struct rtrs_sess *sess)
+ {
+ 	cancel_delayed_work_sync(&sess->hb_dwork);
+ 	sess->hb_missed_cnt = 0;
+-	sess->hb_missed_max = 0;
+ }
+ EXPORT_SYMBOL_GPL(rtrs_stop_hb);
+ 
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
+index 31f8aa2c40ed8..168705c88e2fa 100644
+--- a/drivers/infiniband/ulp/srp/ib_srp.c
++++ b/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -998,7 +998,6 @@ static int srp_alloc_req_data(struct srp_rdma_ch *ch)
+ 	struct srp_device *srp_dev = target->srp_host->srp_dev;
+ 	struct ib_device *ibdev = srp_dev->dev;
+ 	struct srp_request *req;
+-	void *mr_list;
+ 	dma_addr_t dma_addr;
+ 	int i, ret = -ENOMEM;
+ 
+@@ -1009,12 +1008,12 @@ static int srp_alloc_req_data(struct srp_rdma_ch *ch)
+ 
+ 	for (i = 0; i < target->req_ring_size; ++i) {
+ 		req = &ch->req_ring[i];
+-		mr_list = kmalloc_array(target->mr_per_cmd, sizeof(void *),
+-					GFP_KERNEL);
+-		if (!mr_list)
+-			goto out;
+-		if (srp_dev->use_fast_reg)
+-			req->fr_list = mr_list;
++		if (srp_dev->use_fast_reg) {
++			req->fr_list = kmalloc_array(target->mr_per_cmd,
++						sizeof(void *), GFP_KERNEL);
++			if (!req->fr_list)
++				goto out;
++		}
+ 		req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
+ 		if (!req->indirect_desc)
+ 			goto out;
+diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
+index da8963a9f044c..947d440a3be63 100644
+--- a/drivers/input/joydev.c
++++ b/drivers/input/joydev.c
+@@ -499,7 +499,7 @@ static int joydev_handle_JSIOCSBTNMAP(struct joydev *joydev,
+ 	memcpy(joydev->keypam, keypam, len);
+ 
+ 	for (i = 0; i < joydev->nkey; i++)
+-		joydev->keymap[keypam[i] - BTN_MISC] = i;
++		joydev->keymap[joydev->keypam[i] - BTN_MISC] = i;
+ 
+  out:
+ 	kfree(keypam);
+diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
+index 32d15809ae586..40a070a2e7f5b 100644
+--- a/drivers/input/keyboard/Kconfig
++++ b/drivers/input/keyboard/Kconfig
+@@ -67,9 +67,6 @@ config KEYBOARD_AMIGA
+ 	  To compile this driver as a module, choose M here: the
+ 	  module will be called amikbd.
+ 
+-config ATARI_KBD_CORE
+-	bool
+-
+ config KEYBOARD_APPLESPI
+ 	tristate "Apple SPI keyboard and trackpad"
+ 	depends on ACPI && EFI
+diff --git a/drivers/input/keyboard/hil_kbd.c b/drivers/input/keyboard/hil_kbd.c
+index bb29a7c9a1c0c..54afb38601b9f 100644
+--- a/drivers/input/keyboard/hil_kbd.c
++++ b/drivers/input/keyboard/hil_kbd.c
+@@ -512,6 +512,7 @@ static int hil_dev_connect(struct serio *serio, struct serio_driver *drv)
+ 		    HIL_IDD_NUM_AXES_PER_SET(*idd)) {
+ 			printk(KERN_INFO PREFIX
+ 				"combo devices are not supported.\n");
++			error = -EINVAL;
+ 			goto bail1;
+ 		}
+ 
+diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
+index 17540bdb1eaf7..0f9e3ec99aae1 100644
+--- a/drivers/input/touchscreen/elants_i2c.c
++++ b/drivers/input/touchscreen/elants_i2c.c
+@@ -1396,7 +1396,7 @@ static int elants_i2c_probe(struct i2c_client *client,
+ 	init_completion(&ts->cmd_done);
+ 
+ 	ts->client = client;
+-	ts->chip_id = (enum elants_chip_id)id->driver_data;
++	ts->chip_id = (enum elants_chip_id)(uintptr_t)device_get_match_data(&client->dev);
+ 	i2c_set_clientdata(client, ts);
+ 
+ 	ts->vcc33 = devm_regulator_get(&client->dev, "vcc33");
+@@ -1636,8 +1636,8 @@ MODULE_DEVICE_TABLE(acpi, elants_acpi_id);
+ 
+ #ifdef CONFIG_OF
+ static const struct of_device_id elants_of_match[] = {
+-	{ .compatible = "elan,ekth3500" },
+-	{ .compatible = "elan,ektf3624" },
++	{ .compatible = "elan,ekth3500", .data = (void *)EKTH3500 },
++	{ .compatible = "elan,ektf3624", .data = (void *)EKTF3624 },
+ 	{ /* sentinel */ }
+ };
+ MODULE_DEVICE_TABLE(of, elants_of_match);
+diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
+index c682b028f0a29..4f53d3c57e698 100644
+--- a/drivers/input/touchscreen/goodix.c
++++ b/drivers/input/touchscreen/goodix.c
+@@ -178,51 +178,6 @@ static const unsigned long goodix_irq_flags[] = {
+ 	IRQ_TYPE_LEVEL_HIGH,
+ };
+ 
+-/*
+- * Those tablets have their coordinates origin at the bottom right
+- * of the tablet, as if rotated 180 degrees
+- */
+-static const struct dmi_system_id rotated_screen[] = {
+-#if defined(CONFIG_DMI) && defined(CONFIG_X86)
+-	{
+-		.ident = "Teclast X89",
+-		.matches = {
+-			/* tPAD is too generic, also match on bios date */
+-			DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"),
+-			DMI_MATCH(DMI_BOARD_NAME, "tPAD"),
+-			DMI_MATCH(DMI_BIOS_DATE, "12/19/2014"),
+-		},
+-	},
+-	{
+-		.ident = "Teclast X98 Pro",
+-		.matches = {
+-			/*
+-			 * Only match BIOS date, because the manufacturers
+-			 * BIOS does not report the board name at all
+-			 * (sometimes)...
+-			 */
+-			DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"),
+-			DMI_MATCH(DMI_BIOS_DATE, "10/28/2015"),
+-		},
+-	},
+-	{
+-		.ident = "WinBook TW100",
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "TW100")
+-		}
+-	},
+-	{
+-		.ident = "WinBook TW700",
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "TW700")
+-		},
+-	},
+-#endif
+-	{}
+-};
+-
+ static const struct dmi_system_id nine_bytes_report[] = {
+ #if defined(CONFIG_DMI) && defined(CONFIG_X86)
+ 	{
+@@ -1123,13 +1078,6 @@ static int goodix_configure_dev(struct goodix_ts_data *ts)
+ 				  ABS_MT_POSITION_Y, ts->prop.max_y);
+ 	}
+ 
+-	if (dmi_check_system(rotated_screen)) {
+-		ts->prop.invert_x = true;
+-		ts->prop.invert_y = true;
+-		dev_dbg(&ts->client->dev,
+-			"Applying '180 degrees rotated screen' quirk\n");
+-	}
+-
+ 	if (dmi_check_system(nine_bytes_report)) {
+ 		ts->contact_size = 9;
+ 
+diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
+index c847453a03c26..43c521f50c851 100644
+--- a/drivers/input/touchscreen/usbtouchscreen.c
++++ b/drivers/input/touchscreen/usbtouchscreen.c
+@@ -251,7 +251,7 @@ static int e2i_init(struct usbtouch_usb *usbtouch)
+ 	int ret;
+ 	struct usb_device *udev = interface_to_usbdev(usbtouch->interface);
+ 
+-	ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
++	ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ 	                      0x01, 0x02, 0x0000, 0x0081,
+ 	                      NULL, 0, USB_CTRL_SET_TIMEOUT);
+ 
+@@ -531,7 +531,7 @@ static int mtouch_init(struct usbtouch_usb *usbtouch)
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
++	ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ 	                      MTOUCHUSB_RESET,
+ 	                      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 	                      1, 0, NULL, 0, USB_CTRL_SET_TIMEOUT);
+@@ -543,7 +543,7 @@ static int mtouch_init(struct usbtouch_usb *usbtouch)
+ 	msleep(150);
+ 
+ 	for (i = 0; i < 3; i++) {
+-		ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
++		ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ 				      MTOUCHUSB_ASYNC_REPORT,
+ 				      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 				      1, 1, NULL, 0, USB_CTRL_SET_TIMEOUT);
+@@ -722,7 +722,7 @@ static int dmc_tsc10_init(struct usbtouch_usb *usbtouch)
+ 	}
+ 
+ 	/* start sending data */
+-	ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0),
++	ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ 	                      TSC10_CMD_DATA1,
+ 	                      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 	                      0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT);
+diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
+index df7b19ff0a9ed..ecc7308130ba6 100644
+--- a/drivers/iommu/amd/init.c
++++ b/drivers/iommu/amd/init.c
+@@ -1911,8 +1911,8 @@ static void print_iommu_info(void)
+ 		pci_info(pdev, "Found IOMMU cap 0x%x\n", iommu->cap_ptr);
+ 
+ 		if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
+-			pci_info(pdev, "Extended features (%#llx):",
+-				 iommu->features);
++			pr_info("Extended features (%#llx):", iommu->features);
++
+ 			for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
+ 				if (iommu_feature(iommu, (1ULL << i)))
+ 					pr_cont(" %s", feat_str[i]);
+diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
+index fdd095e1fa521..53e5f41278853 100644
+--- a/drivers/iommu/dma-iommu.c
++++ b/drivers/iommu/dma-iommu.c
+@@ -252,9 +252,11 @@ resv_iova:
+ 			lo = iova_pfn(iovad, start);
+ 			hi = iova_pfn(iovad, end);
+ 			reserve_iova(iovad, lo, hi);
+-		} else {
++		} else if (end < start) {
+ 			/* dma_ranges list should be sorted */
+-			dev_err(&dev->dev, "Failed to reserve IOVA\n");
++			dev_err(&dev->dev,
++				"Failed to reserve IOVA [%pa-%pa]\n",
++				&start, &end);
+ 			return -EINVAL;
+ 		}
+ 
+diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
+index b6742b4231bf8..258247dd5e3de 100644
+--- a/drivers/leds/Kconfig
++++ b/drivers/leds/Kconfig
+@@ -199,6 +199,7 @@ config LEDS_LM3530
+ 
+ config LEDS_LM3532
+ 	tristate "LCD Backlight driver for LM3532"
++	select REGMAP_I2C
+ 	depends on LEDS_CLASS
+ 	depends on I2C
+ 	help
+diff --git a/drivers/leds/blink/leds-lgm-sso.c b/drivers/leds/blink/leds-lgm-sso.c
+index 7d5c9ca007d66..7d5f0bf2817ad 100644
+--- a/drivers/leds/blink/leds-lgm-sso.c
++++ b/drivers/leds/blink/leds-lgm-sso.c
+@@ -132,8 +132,7 @@ struct sso_led_priv {
+ 	struct regmap *mmap;
+ 	struct device *dev;
+ 	struct platform_device *pdev;
+-	struct clk *gclk;
+-	struct clk *fpid_clk;
++	struct clk_bulk_data clocks[2];
+ 	u32 fpid_clkrate;
+ 	u32 gptc_clkrate;
+ 	u32 freq[MAX_FREQ_RANK];
+@@ -763,12 +762,11 @@ static int sso_probe_gpios(struct sso_led_priv *priv)
+ 	return sso_gpio_gc_init(dev, priv);
+ }
+ 
+-static void sso_clk_disable(void *data)
++static void sso_clock_disable_unprepare(void *data)
+ {
+ 	struct sso_led_priv *priv = data;
+ 
+-	clk_disable_unprepare(priv->fpid_clk);
+-	clk_disable_unprepare(priv->gclk);
++	clk_bulk_disable_unprepare(ARRAY_SIZE(priv->clocks), priv->clocks);
+ }
+ 
+ static int intel_sso_led_probe(struct platform_device *pdev)
+@@ -785,36 +783,30 @@ static int intel_sso_led_probe(struct platform_device *pdev)
+ 	priv->dev = dev;
+ 
+ 	/* gate clock */
+-	priv->gclk = devm_clk_get(dev, "sso");
+-	if (IS_ERR(priv->gclk)) {
+-		dev_err(dev, "get sso gate clock failed!\n");
+-		return PTR_ERR(priv->gclk);
+-	}
++	priv->clocks[0].id = "sso";
++
++	/* fpid clock */
++	priv->clocks[1].id = "fpid";
+ 
+-	ret = clk_prepare_enable(priv->gclk);
++	ret = devm_clk_bulk_get(dev, ARRAY_SIZE(priv->clocks), priv->clocks);
+ 	if (ret) {
+-		dev_err(dev, "Failed to prepate/enable sso gate clock!\n");
++		dev_err(dev, "Getting clocks failed!\n");
+ 		return ret;
+ 	}
+ 
+-	priv->fpid_clk = devm_clk_get(dev, "fpid");
+-	if (IS_ERR(priv->fpid_clk)) {
+-		dev_err(dev, "Failed to get fpid clock!\n");
+-		return PTR_ERR(priv->fpid_clk);
+-	}
+-
+-	ret = clk_prepare_enable(priv->fpid_clk);
++	ret = clk_bulk_prepare_enable(ARRAY_SIZE(priv->clocks), priv->clocks);
+ 	if (ret) {
+-		dev_err(dev, "Failed to prepare/enable fpid clock!\n");
++		dev_err(dev, "Failed to prepare and enable clocks!\n");
+ 		return ret;
+ 	}
+-	priv->fpid_clkrate = clk_get_rate(priv->fpid_clk);
+ 
+-	ret = devm_add_action_or_reset(dev, sso_clk_disable, priv);
+-	if (ret) {
+-		dev_err(dev, "Failed to devm_add_action_or_reset, %d\n", ret);
++	ret = devm_add_action_or_reset(dev, sso_clock_disable_unprepare, priv);
++	if (ret)
+ 		return ret;
+-	}
++
++	priv->fpid_clkrate = clk_get_rate(priv->clocks[1].clk);
++
++	priv->mmap = syscon_node_to_regmap(dev->of_node);
+ 
+ 	priv->mmap = syscon_node_to_regmap(dev->of_node);
+ 	if (IS_ERR(priv->mmap)) {
+@@ -859,8 +851,6 @@ static int intel_sso_led_remove(struct platform_device *pdev)
+ 		sso_led_shutdown(led);
+ 	}
+ 
+-	clk_disable_unprepare(priv->fpid_clk);
+-	clk_disable_unprepare(priv->gclk);
+ 	regmap_exit(priv->mmap);
+ 
+ 	return 0;
+diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
+index 2e495ff678562..fa3f5f504ff7d 100644
+--- a/drivers/leds/led-class.c
++++ b/drivers/leds/led-class.c
+@@ -285,10 +285,6 @@ struct led_classdev *__must_check devm_of_led_get(struct device *dev,
+ 	if (!dev)
+ 		return ERR_PTR(-EINVAL);
+ 
+-	/* Not using device tree? */
+-	if (!IS_ENABLED(CONFIG_OF) || !dev->of_node)
+-		return ERR_PTR(-ENOTSUPP);
+-
+ 	led = of_led_get(dev->of_node, index);
+ 	if (IS_ERR(led))
+ 		return led;
+diff --git a/drivers/leds/leds-as3645a.c b/drivers/leds/leds-as3645a.c
+index e8922fa033796..80411d41e802d 100644
+--- a/drivers/leds/leds-as3645a.c
++++ b/drivers/leds/leds-as3645a.c
+@@ -545,6 +545,7 @@ static int as3645a_parse_node(struct as3645a *flash,
+ 	if (!flash->indicator_node) {
+ 		dev_warn(&flash->client->dev,
+ 			 "can't find indicator node\n");
++		rval = -ENODEV;
+ 		goto out_err;
+ 	}
+ 
+diff --git a/drivers/leds/leds-ktd2692.c b/drivers/leds/leds-ktd2692.c
+index 632f10db4b3ff..f341da1503a49 100644
+--- a/drivers/leds/leds-ktd2692.c
++++ b/drivers/leds/leds-ktd2692.c
+@@ -256,6 +256,17 @@ static void ktd2692_setup(struct ktd2692_context *led)
+ 				 | KTD2692_REG_FLASH_CURRENT_BASE);
+ }
+ 
++static void regulator_disable_action(void *_data)
++{
++	struct device *dev = _data;
++	struct ktd2692_context *led = dev_get_drvdata(dev);
++	int ret;
++
++	ret = regulator_disable(led->regulator);
++	if (ret)
++		dev_err(dev, "Failed to disable supply: %d\n", ret);
++}
++
+ static int ktd2692_parse_dt(struct ktd2692_context *led, struct device *dev,
+ 			    struct ktd2692_led_config_data *cfg)
+ {
+@@ -286,8 +297,14 @@ static int ktd2692_parse_dt(struct ktd2692_context *led, struct device *dev,
+ 
+ 	if (led->regulator) {
+ 		ret = regulator_enable(led->regulator);
+-		if (ret)
++		if (ret) {
+ 			dev_err(dev, "Failed to enable supply: %d\n", ret);
++		} else {
++			ret = devm_add_action_or_reset(dev,
++						regulator_disable_action, dev);
++			if (ret)
++				return ret;
++		}
+ 	}
+ 
+ 	child_node = of_get_next_available_child(np, NULL);
+@@ -377,17 +394,9 @@ static int ktd2692_probe(struct platform_device *pdev)
+ static int ktd2692_remove(struct platform_device *pdev)
+ {
+ 	struct ktd2692_context *led = platform_get_drvdata(pdev);
+-	int ret;
+ 
+ 	led_classdev_flash_unregister(&led->fled_cdev);
+ 
+-	if (led->regulator) {
+-		ret = regulator_disable(led->regulator);
+-		if (ret)
+-			dev_err(&pdev->dev,
+-				"Failed to disable supply: %d\n", ret);
+-	}
+-
+ 	mutex_destroy(&led->lock);
+ 
+ 	return 0;
+diff --git a/drivers/leds/leds-lm36274.c b/drivers/leds/leds-lm36274.c
+index aadb03468a40a..a23a9424c2f38 100644
+--- a/drivers/leds/leds-lm36274.c
++++ b/drivers/leds/leds-lm36274.c
+@@ -127,6 +127,7 @@ static int lm36274_probe(struct platform_device *pdev)
+ 
+ 	ret = lm36274_init(chip);
+ 	if (ret) {
++		fwnode_handle_put(init_data.fwnode);
+ 		dev_err(chip->dev, "Failed to init the device\n");
+ 		return ret;
+ 	}
+diff --git a/drivers/leds/leds-lm3692x.c b/drivers/leds/leds-lm3692x.c
+index e945de45388ca..55e6443997ec9 100644
+--- a/drivers/leds/leds-lm3692x.c
++++ b/drivers/leds/leds-lm3692x.c
+@@ -435,6 +435,7 @@ static int lm3692x_probe_dt(struct lm3692x_led *led)
+ 
+ 	ret = fwnode_property_read_u32(child, "reg", &led->led_enable);
+ 	if (ret) {
++		fwnode_handle_put(child);
+ 		dev_err(&led->client->dev, "reg DT property missing\n");
+ 		return ret;
+ 	}
+@@ -449,12 +450,11 @@ static int lm3692x_probe_dt(struct lm3692x_led *led)
+ 
+ 	ret = devm_led_classdev_register_ext(&led->client->dev, &led->led_dev,
+ 					     &init_data);
+-	if (ret) {
++	if (ret)
+ 		dev_err(&led->client->dev, "led register err: %d\n", ret);
+-		return ret;
+-	}
+ 
+-	return 0;
++	fwnode_handle_put(init_data.fwnode);
++	return ret;
+ }
+ 
+ static int lm3692x_probe(struct i2c_client *client,
+diff --git a/drivers/leds/leds-lm3697.c b/drivers/leds/leds-lm3697.c
+index 7d216cdb91a8a..912e8bb22a995 100644
+--- a/drivers/leds/leds-lm3697.c
++++ b/drivers/leds/leds-lm3697.c
+@@ -203,11 +203,9 @@ static int lm3697_probe_dt(struct lm3697 *priv)
+ 
+ 	priv->enable_gpio = devm_gpiod_get_optional(dev, "enable",
+ 						    GPIOD_OUT_LOW);
+-	if (IS_ERR(priv->enable_gpio)) {
+-		ret = PTR_ERR(priv->enable_gpio);
+-		dev_err(dev, "Failed to get enable gpio: %d\n", ret);
+-		return ret;
+-	}
++	if (IS_ERR(priv->enable_gpio))
++		return dev_err_probe(dev, PTR_ERR(priv->enable_gpio),
++					  "Failed to get enable GPIO\n");
+ 
+ 	priv->regulator = devm_regulator_get(dev, "vled");
+ 	if (IS_ERR(priv->regulator))
+diff --git a/drivers/leds/leds-lp50xx.c b/drivers/leds/leds-lp50xx.c
+index 06230614fdc56..401df1e2e05d0 100644
+--- a/drivers/leds/leds-lp50xx.c
++++ b/drivers/leds/leds-lp50xx.c
+@@ -490,6 +490,7 @@ static int lp50xx_probe_dt(struct lp50xx *priv)
+ 			ret = fwnode_property_read_u32(led_node, "color",
+ 						       &color_id);
+ 			if (ret) {
++				fwnode_handle_put(led_node);
+ 				dev_err(priv->dev, "Cannot read color\n");
+ 				goto child_out;
+ 			}
+@@ -512,7 +513,6 @@ static int lp50xx_probe_dt(struct lp50xx *priv)
+ 			goto child_out;
+ 		}
+ 		i++;
+-		fwnode_handle_put(child);
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
+index f25324d03842e..15236d7296258 100644
+--- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c
++++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
+@@ -132,7 +132,7 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev)
+ 	if (apcs_data->clk_name) {
+ 		apcs->clk = platform_device_register_data(&pdev->dev,
+ 							  apcs_data->clk_name,
+-							  PLATFORM_DEVID_NONE,
++							  PLATFORM_DEVID_AUTO,
+ 							  NULL, 0);
+ 		if (IS_ERR(apcs->clk))
+ 			dev_err(&pdev->dev, "failed to register APCS clk\n");
+diff --git a/drivers/mailbox/qcom-ipcc.c b/drivers/mailbox/qcom-ipcc.c
+index 2d13c72944c6f..584700cd15855 100644
+--- a/drivers/mailbox/qcom-ipcc.c
++++ b/drivers/mailbox/qcom-ipcc.c
+@@ -155,6 +155,11 @@ static int qcom_ipcc_mbox_send_data(struct mbox_chan *chan, void *data)
+ 	return 0;
+ }
+ 
++static void qcom_ipcc_mbox_shutdown(struct mbox_chan *chan)
++{
++	chan->con_priv = NULL;
++}
++
+ static struct mbox_chan *qcom_ipcc_mbox_xlate(struct mbox_controller *mbox,
+ 					const struct of_phandle_args *ph)
+ {
+@@ -184,6 +189,7 @@ static struct mbox_chan *qcom_ipcc_mbox_xlate(struct mbox_controller *mbox,
+ 
+ static const struct mbox_chan_ops ipcc_mbox_chan_ops = {
+ 	.send_data = qcom_ipcc_mbox_send_data,
++	.shutdown = qcom_ipcc_mbox_shutdown,
+ };
+ 
+ static int qcom_ipcc_setup_mbox(struct qcom_ipcc *ipcc)
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 2a9553efc2d1b..c21ce8070d3c8 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -441,30 +441,6 @@ check_suspended:
+ }
+ EXPORT_SYMBOL(md_handle_request);
+ 
+-struct md_io {
+-	struct mddev *mddev;
+-	bio_end_io_t *orig_bi_end_io;
+-	void *orig_bi_private;
+-	struct block_device *orig_bi_bdev;
+-	unsigned long start_time;
+-};
+-
+-static void md_end_io(struct bio *bio)
+-{
+-	struct md_io *md_io = bio->bi_private;
+-	struct mddev *mddev = md_io->mddev;
+-
+-	bio_end_io_acct_remapped(bio, md_io->start_time, md_io->orig_bi_bdev);
+-
+-	bio->bi_end_io = md_io->orig_bi_end_io;
+-	bio->bi_private = md_io->orig_bi_private;
+-
+-	mempool_free(md_io, &mddev->md_io_pool);
+-
+-	if (bio->bi_end_io)
+-		bio->bi_end_io(bio);
+-}
+-
+ static blk_qc_t md_submit_bio(struct bio *bio)
+ {
+ 	const int rw = bio_data_dir(bio);
+@@ -489,21 +465,6 @@ static blk_qc_t md_submit_bio(struct bio *bio)
+ 		return BLK_QC_T_NONE;
+ 	}
+ 
+-	if (bio->bi_end_io != md_end_io) {
+-		struct md_io *md_io;
+-
+-		md_io = mempool_alloc(&mddev->md_io_pool, GFP_NOIO);
+-		md_io->mddev = mddev;
+-		md_io->orig_bi_end_io = bio->bi_end_io;
+-		md_io->orig_bi_private = bio->bi_private;
+-		md_io->orig_bi_bdev = bio->bi_bdev;
+-
+-		bio->bi_end_io = md_end_io;
+-		bio->bi_private = md_io;
+-
+-		md_io->start_time = bio_start_io_acct(bio);
+-	}
+-
+ 	/* bio could be mergeable after passing to underlayer */
+ 	bio->bi_opf &= ~REQ_NOMERGE;
+ 
+@@ -5614,7 +5575,6 @@ static void md_free(struct kobject *ko)
+ 
+ 	bioset_exit(&mddev->bio_set);
+ 	bioset_exit(&mddev->sync_set);
+-	mempool_exit(&mddev->md_io_pool);
+ 	kfree(mddev);
+ }
+ 
+@@ -5710,11 +5670,6 @@ static int md_alloc(dev_t dev, char *name)
+ 		 */
+ 		mddev->hold_active = UNTIL_STOP;
+ 
+-	error = mempool_init_kmalloc_pool(&mddev->md_io_pool, BIO_POOL_SIZE,
+-					  sizeof(struct md_io));
+-	if (error)
+-		goto abort;
+-
+ 	error = -ENOMEM;
+ 	mddev->queue = blk_alloc_queue(NUMA_NO_NODE);
+ 	if (!mddev->queue)
+diff --git a/drivers/md/md.h b/drivers/md/md.h
+index bcbba1b5ec4a7..5b2da02e2e758 100644
+--- a/drivers/md/md.h
++++ b/drivers/md/md.h
+@@ -487,7 +487,6 @@ struct mddev {
+ 	struct bio_set			sync_set; /* for sync operations like
+ 						   * metadata and bitmap writes
+ 						   */
+-	mempool_t			md_io_pool;
+ 
+ 	/* Generic flush handling.
+ 	 * The last to finish preflush schedules a worker to submit
+diff --git a/drivers/media/cec/platform/s5p/s5p_cec.c b/drivers/media/cec/platform/s5p/s5p_cec.c
+index 2a3e7ffefe0a2..028a09a7531ef 100644
+--- a/drivers/media/cec/platform/s5p/s5p_cec.c
++++ b/drivers/media/cec/platform/s5p/s5p_cec.c
+@@ -35,10 +35,13 @@ MODULE_PARM_DESC(debug, "debug level (0-2)");
+ 
+ static int s5p_cec_adap_enable(struct cec_adapter *adap, bool enable)
+ {
++	int ret;
+ 	struct s5p_cec_dev *cec = cec_get_drvdata(adap);
+ 
+ 	if (enable) {
+-		pm_runtime_get_sync(cec->dev);
++		ret = pm_runtime_resume_and_get(cec->dev);
++		if (ret < 0)
++			return ret;
+ 
+ 		s5p_cec_reset(cec);
+ 
+@@ -51,7 +54,7 @@ static int s5p_cec_adap_enable(struct cec_adapter *adap, bool enable)
+ 	} else {
+ 		s5p_cec_mask_tx_interrupts(cec);
+ 		s5p_cec_mask_rx_interrupts(cec);
+-		pm_runtime_disable(cec->dev);
++		pm_runtime_put(cec->dev);
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/media/common/siano/smscoreapi.c b/drivers/media/common/siano/smscoreapi.c
+index c1511094fdc7b..b735e23701373 100644
+--- a/drivers/media/common/siano/smscoreapi.c
++++ b/drivers/media/common/siano/smscoreapi.c
+@@ -908,7 +908,7 @@ static int smscore_load_firmware_family2(struct smscore_device_t *coredev,
+ 					 void *buffer, size_t size)
+ {
+ 	struct sms_firmware *firmware = (struct sms_firmware *) buffer;
+-	struct sms_msg_data4 *msg;
++	struct sms_msg_data5 *msg;
+ 	u32 mem_address,  calc_checksum = 0;
+ 	u32 i, *ptr;
+ 	u8 *payload = firmware->payload;
+@@ -989,24 +989,20 @@ static int smscore_load_firmware_family2(struct smscore_device_t *coredev,
+ 		goto exit_fw_download;
+ 
+ 	if (coredev->mode == DEVICE_MODE_NONE) {
+-		struct sms_msg_data *trigger_msg =
+-			(struct sms_msg_data *) msg;
+-
+ 		pr_debug("sending MSG_SMS_SWDOWNLOAD_TRIGGER_REQ\n");
+ 		SMS_INIT_MSG(&msg->x_msg_header,
+ 				MSG_SMS_SWDOWNLOAD_TRIGGER_REQ,
+-				sizeof(struct sms_msg_hdr) +
+-				sizeof(u32) * 5);
++				sizeof(*msg));
+ 
+-		trigger_msg->msg_data[0] = firmware->start_address;
++		msg->msg_data[0] = firmware->start_address;
+ 					/* Entry point */
+-		trigger_msg->msg_data[1] = 6; /* Priority */
+-		trigger_msg->msg_data[2] = 0x200; /* Stack size */
+-		trigger_msg->msg_data[3] = 0; /* Parameter */
+-		trigger_msg->msg_data[4] = 4; /* Task ID */
++		msg->msg_data[1] = 6; /* Priority */
++		msg->msg_data[2] = 0x200; /* Stack size */
++		msg->msg_data[3] = 0; /* Parameter */
++		msg->msg_data[4] = 4; /* Task ID */
+ 
+-		rc = smscore_sendrequest_and_wait(coredev, trigger_msg,
+-					trigger_msg->x_msg_header.msg_length,
++		rc = smscore_sendrequest_and_wait(coredev, msg,
++					msg->x_msg_header.msg_length,
+ 					&coredev->trigger_done);
+ 	} else {
+ 		SMS_INIT_MSG(&msg->x_msg_header, MSG_SW_RELOAD_EXEC_REQ,
+diff --git a/drivers/media/common/siano/smscoreapi.h b/drivers/media/common/siano/smscoreapi.h
+index b3b793b5caf35..16c45afabc530 100644
+--- a/drivers/media/common/siano/smscoreapi.h
++++ b/drivers/media/common/siano/smscoreapi.h
+@@ -629,9 +629,9 @@ struct sms_msg_data2 {
+ 	u32 msg_data[2];
+ };
+ 
+-struct sms_msg_data4 {
++struct sms_msg_data5 {
+ 	struct sms_msg_hdr x_msg_header;
+-	u32 msg_data[4];
++	u32 msg_data[5];
+ };
+ 
+ struct sms_data_download {
+diff --git a/drivers/media/common/siano/smsdvb-main.c b/drivers/media/common/siano/smsdvb-main.c
+index ae17407e477a4..7cc654bc52d37 100644
+--- a/drivers/media/common/siano/smsdvb-main.c
++++ b/drivers/media/common/siano/smsdvb-main.c
+@@ -1176,6 +1176,10 @@ static int smsdvb_hotplug(struct smscore_device_t *coredev,
+ 	return 0;
+ 
+ media_graph_error:
++	mutex_lock(&g_smsdvb_clientslock);
++	list_del(&client->entry);
++	mutex_unlock(&g_smsdvb_clientslock);
++
+ 	smsdvb_debugfs_release(client);
+ 
+ client_error:
+diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
+index 89620da983bab..dddebea644bb8 100644
+--- a/drivers/media/dvb-core/dvb_net.c
++++ b/drivers/media/dvb-core/dvb_net.c
+@@ -45,6 +45,7 @@
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+ #include <linux/netdevice.h>
++#include <linux/nospec.h>
+ #include <linux/etherdevice.h>
+ #include <linux/dvb/net.h>
+ #include <linux/uio.h>
+@@ -1462,14 +1463,20 @@ static int dvb_net_do_ioctl(struct file *file,
+ 		struct net_device *netdev;
+ 		struct dvb_net_priv *priv_data;
+ 		struct dvb_net_if *dvbnetif = parg;
++		int if_num = dvbnetif->if_num;
+ 
+-		if (dvbnetif->if_num >= DVB_NET_DEVICES_MAX ||
+-		    !dvbnet->state[dvbnetif->if_num]) {
++		if (if_num >= DVB_NET_DEVICES_MAX) {
+ 			ret = -EINVAL;
+ 			goto ioctl_error;
+ 		}
++		if_num = array_index_nospec(if_num, DVB_NET_DEVICES_MAX);
+ 
+-		netdev = dvbnet->device[dvbnetif->if_num];
++		if (!dvbnet->state[if_num]) {
++			ret = -EINVAL;
++			goto ioctl_error;
++		}
++
++		netdev = dvbnet->device[if_num];
+ 
+ 		priv_data = netdev_priv(netdev);
+ 		dvbnetif->pid=priv_data->pid;
+@@ -1522,14 +1529,20 @@ static int dvb_net_do_ioctl(struct file *file,
+ 		struct net_device *netdev;
+ 		struct dvb_net_priv *priv_data;
+ 		struct __dvb_net_if_old *dvbnetif = parg;
++		int if_num = dvbnetif->if_num;
++
++		if (if_num >= DVB_NET_DEVICES_MAX) {
++			ret = -EINVAL;
++			goto ioctl_error;
++		}
++		if_num = array_index_nospec(if_num, DVB_NET_DEVICES_MAX);
+ 
+-		if (dvbnetif->if_num >= DVB_NET_DEVICES_MAX ||
+-		    !dvbnet->state[dvbnetif->if_num]) {
++		if (!dvbnet->state[if_num]) {
+ 			ret = -EINVAL;
+ 			goto ioctl_error;
+ 		}
+ 
+-		netdev = dvbnet->device[dvbnetif->if_num];
++		netdev = dvbnet->device[if_num];
+ 
+ 		priv_data = netdev_priv(netdev);
+ 		dvbnetif->pid=priv_data->pid;
+diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
+index 3862ddc86ec48..795d9bfaba5cf 100644
+--- a/drivers/media/dvb-core/dvbdev.c
++++ b/drivers/media/dvb-core/dvbdev.c
+@@ -506,6 +506,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
+ 			break;
+ 
+ 	if (minor == MAX_DVB_MINORS) {
++		list_del (&dvbdev->list_head);
+ 		kfree(dvbdevfops);
+ 		kfree(dvbdev);
+ 		up_write(&minor_rwsem);
+@@ -526,6 +527,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
+ 		      __func__);
+ 
+ 		dvb_media_device_free(dvbdev);
++		list_del (&dvbdev->list_head);
+ 		kfree(dvbdevfops);
+ 		kfree(dvbdev);
+ 		mutex_unlock(&dvbdev_register_lock);
+@@ -541,6 +543,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
+ 		pr_err("%s: failed to create device dvb%d.%s%d (%ld)\n",
+ 		       __func__, adap->num, dnames[type], id, PTR_ERR(clsdev));
+ 		dvb_media_device_free(dvbdev);
++		list_del (&dvbdev->list_head);
+ 		kfree(dvbdevfops);
+ 		kfree(dvbdev);
+ 		return PTR_ERR(clsdev);
+diff --git a/drivers/media/i2c/ccs/ccs-core.c b/drivers/media/i2c/ccs/ccs-core.c
+index 4505594996bd8..fde0c51f04069 100644
+--- a/drivers/media/i2c/ccs/ccs-core.c
++++ b/drivers/media/i2c/ccs/ccs-core.c
+@@ -3093,7 +3093,7 @@ static int __maybe_unused ccs_suspend(struct device *dev)
+ 	if (rval < 0) {
+ 		pm_runtime_put_noidle(dev);
+ 
+-		return -EAGAIN;
++		return rval;
+ 	}
+ 
+ 	if (sensor->streaming)
+diff --git a/drivers/media/i2c/imx334.c b/drivers/media/i2c/imx334.c
+index ad530f0d338a1..02d22907c75c2 100644
+--- a/drivers/media/i2c/imx334.c
++++ b/drivers/media/i2c/imx334.c
+@@ -717,9 +717,9 @@ static int imx334_set_stream(struct v4l2_subdev *sd, int enable)
+ 	}
+ 
+ 	if (enable) {
+-		ret = pm_runtime_get_sync(imx334->dev);
+-		if (ret)
+-			goto error_power_off;
++		ret = pm_runtime_resume_and_get(imx334->dev);
++		if (ret < 0)
++			goto error_unlock;
+ 
+ 		ret = imx334_start_streaming(imx334);
+ 		if (ret)
+@@ -737,6 +737,7 @@ static int imx334_set_stream(struct v4l2_subdev *sd, int enable)
+ 
+ error_power_off:
+ 	pm_runtime_put(imx334->dev);
++error_unlock:
+ 	mutex_unlock(&imx334->mutex);
+ 
+ 	return ret;
+diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c
+index e8119ad0bc71d..92376592455ee 100644
+--- a/drivers/media/i2c/ir-kbd-i2c.c
++++ b/drivers/media/i2c/ir-kbd-i2c.c
+@@ -678,8 +678,8 @@ static int zilog_tx(struct rc_dev *rcdev, unsigned int *txbuf,
+ 		goto out_unlock;
+ 	}
+ 
+-	i = i2c_master_recv(ir->tx_c, buf, 1);
+-	if (i != 1) {
++	ret = i2c_master_recv(ir->tx_c, buf, 1);
++	if (ret != 1) {
+ 		dev_err(&ir->rc->dev, "i2c_master_recv failed with %d\n", ret);
+ 		ret = -EIO;
+ 		goto out_unlock;
+diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c
+index 42f64175a6dff..fb78a1cedc03b 100644
+--- a/drivers/media/i2c/ov2659.c
++++ b/drivers/media/i2c/ov2659.c
+@@ -204,6 +204,7 @@ struct ov2659 {
+ 	struct i2c_client *client;
+ 	struct v4l2_ctrl_handler ctrls;
+ 	struct v4l2_ctrl *link_frequency;
++	struct clk *clk;
+ 	const struct ov2659_framesize *frame_size;
+ 	struct sensor_register *format_ctrl_regs;
+ 	struct ov2659_pll_ctrl pll;
+@@ -1270,6 +1271,8 @@ static int ov2659_power_off(struct device *dev)
+ 
+ 	gpiod_set_value(ov2659->pwdn_gpio, 1);
+ 
++	clk_disable_unprepare(ov2659->clk);
++
+ 	return 0;
+ }
+ 
+@@ -1278,9 +1281,17 @@ static int ov2659_power_on(struct device *dev)
+ 	struct i2c_client *client = to_i2c_client(dev);
+ 	struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ 	struct ov2659 *ov2659 = to_ov2659(sd);
++	int ret;
+ 
+ 	dev_dbg(&client->dev, "%s:\n", __func__);
+ 
++	ret = clk_prepare_enable(ov2659->clk);
++	if (ret) {
++		dev_err(&client->dev, "%s: failed to enable clock\n",
++			__func__);
++		return ret;
++	}
++
+ 	gpiod_set_value(ov2659->pwdn_gpio, 0);
+ 
+ 	if (ov2659->resetb_gpio) {
+@@ -1425,7 +1436,6 @@ static int ov2659_probe(struct i2c_client *client)
+ 	const struct ov2659_platform_data *pdata = ov2659_get_pdata(client);
+ 	struct v4l2_subdev *sd;
+ 	struct ov2659 *ov2659;
+-	struct clk *clk;
+ 	int ret;
+ 
+ 	if (!pdata) {
+@@ -1440,11 +1450,11 @@ static int ov2659_probe(struct i2c_client *client)
+ 	ov2659->pdata = pdata;
+ 	ov2659->client = client;
+ 
+-	clk = devm_clk_get(&client->dev, "xvclk");
+-	if (IS_ERR(clk))
+-		return PTR_ERR(clk);
++	ov2659->clk = devm_clk_get(&client->dev, "xvclk");
++	if (IS_ERR(ov2659->clk))
++		return PTR_ERR(ov2659->clk);
+ 
+-	ov2659->xvclk_frequency = clk_get_rate(clk);
++	ov2659->xvclk_frequency = clk_get_rate(ov2659->clk);
+ 	if (ov2659->xvclk_frequency < 6000000 ||
+ 	    ov2659->xvclk_frequency > 27000000)
+ 		return -EINVAL;
+@@ -1506,7 +1516,9 @@ static int ov2659_probe(struct i2c_client *client)
+ 	ov2659->frame_size = &ov2659_framesizes[2];
+ 	ov2659->format_ctrl_regs = ov2659_formats[0].format_ctrl_regs;
+ 
+-	ov2659_power_on(&client->dev);
++	ret = ov2659_power_on(&client->dev);
++	if (ret < 0)
++		goto error;
+ 
+ 	ret = ov2659_detect(sd);
+ 	if (ret < 0)
+diff --git a/drivers/media/i2c/rdacm21.c b/drivers/media/i2c/rdacm21.c
+index 179d107f494ca..50e2af5227603 100644
+--- a/drivers/media/i2c/rdacm21.c
++++ b/drivers/media/i2c/rdacm21.c
+@@ -69,6 +69,7 @@
+ #define OV490_ISP_VSIZE_LOW		0x80820062
+ #define OV490_ISP_VSIZE_HIGH		0x80820063
+ 
++#define OV10640_PID_TIMEOUT		20
+ #define OV10640_ID_HIGH			0xa6
+ #define OV10640_CHIP_ID			0x300a
+ #define OV10640_PIXEL_RATE		55000000
+@@ -329,30 +330,51 @@ static const struct v4l2_subdev_ops rdacm21_subdev_ops = {
+ 	.pad		= &rdacm21_subdev_pad_ops,
+ };
+ 
+-static int ov10640_initialize(struct rdacm21_device *dev)
++static void ov10640_power_up(struct rdacm21_device *dev)
+ {
+-	u8 val;
+-
+-	/* Power-up OV10640 by setting RESETB and PWDNB pins high. */
++	/* Enable GPIO0#0 (reset) and GPIO1#0 (pwdn) as output lines. */
+ 	ov490_write_reg(dev, OV490_GPIO_SEL0, OV490_GPIO0);
+ 	ov490_write_reg(dev, OV490_GPIO_SEL1, OV490_SPWDN0);
+ 	ov490_write_reg(dev, OV490_GPIO_DIRECTION0, OV490_GPIO0);
+ 	ov490_write_reg(dev, OV490_GPIO_DIRECTION1, OV490_SPWDN0);
++
++	/* Power up OV10640 and then reset it. */
++	ov490_write_reg(dev, OV490_GPIO_OUTPUT_VALUE1, OV490_SPWDN0);
++	usleep_range(1500, 3000);
++
++	ov490_write_reg(dev, OV490_GPIO_OUTPUT_VALUE0, 0x00);
++	usleep_range(1500, 3000);
+ 	ov490_write_reg(dev, OV490_GPIO_OUTPUT_VALUE0, OV490_GPIO0);
+-	ov490_write_reg(dev, OV490_GPIO_OUTPUT_VALUE0, OV490_SPWDN0);
+ 	usleep_range(3000, 5000);
++}
+ 
+-	/* Read OV10640 ID to test communications. */
+-	ov490_write_reg(dev, OV490_SCCB_SLAVE0_DIR, OV490_SCCB_SLAVE_READ);
+-	ov490_write_reg(dev, OV490_SCCB_SLAVE0_ADDR_HIGH, OV10640_CHIP_ID >> 8);
+-	ov490_write_reg(dev, OV490_SCCB_SLAVE0_ADDR_LOW, OV10640_CHIP_ID & 0xff);
+-
+-	/* Trigger SCCB slave transaction and give it some time to complete. */
+-	ov490_write_reg(dev, OV490_HOST_CMD, OV490_HOST_CMD_TRIGGER);
+-	usleep_range(1000, 1500);
++static int ov10640_check_id(struct rdacm21_device *dev)
++{
++	unsigned int i;
++	u8 val;
+ 
+-	ov490_read_reg(dev, OV490_SCCB_SLAVE0_DIR, &val);
+-	if (val != OV10640_ID_HIGH) {
++	/* Read OV10640 ID to test communications. */
++	for (i = 0; i < OV10640_PID_TIMEOUT; ++i) {
++		ov490_write_reg(dev, OV490_SCCB_SLAVE0_DIR,
++				OV490_SCCB_SLAVE_READ);
++		ov490_write_reg(dev, OV490_SCCB_SLAVE0_ADDR_HIGH,
++				OV10640_CHIP_ID >> 8);
++		ov490_write_reg(dev, OV490_SCCB_SLAVE0_ADDR_LOW,
++				OV10640_CHIP_ID & 0xff);
++
++		/*
++		 * Trigger SCCB slave transaction and give it some time
++		 * to complete.
++		 */
++		ov490_write_reg(dev, OV490_HOST_CMD, OV490_HOST_CMD_TRIGGER);
++		usleep_range(1000, 1500);
++
++		ov490_read_reg(dev, OV490_SCCB_SLAVE0_DIR, &val);
++		if (val == OV10640_ID_HIGH)
++			break;
++		usleep_range(1000, 1500);
++	}
++	if (i == OV10640_PID_TIMEOUT) {
+ 		dev_err(dev->dev, "OV10640 ID mismatch: (0x%02x)\n", val);
+ 		return -ENODEV;
+ 	}
+@@ -368,6 +390,8 @@ static int ov490_initialize(struct rdacm21_device *dev)
+ 	unsigned int i;
+ 	int ret;
+ 
++	ov10640_power_up(dev);
++
+ 	/*
+ 	 * Read OV490 Id to test communications. Give it up to 40msec to
+ 	 * exit from reset.
+@@ -405,7 +429,7 @@ static int ov490_initialize(struct rdacm21_device *dev)
+ 		return -ENODEV;
+ 	}
+ 
+-	ret = ov10640_initialize(dev);
++	ret = ov10640_check_id(dev);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
+index 5b4c4a3547c93..71804a70bc6d7 100644
+--- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c
++++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
+@@ -1386,7 +1386,7 @@ static int __s5c73m3_power_on(struct s5c73m3 *state)
+ 	s5c73m3_gpio_deassert(state, STBY);
+ 	usleep_range(100, 200);
+ 
+-	s5c73m3_gpio_deassert(state, RST);
++	s5c73m3_gpio_deassert(state, RSET);
+ 	usleep_range(50, 100);
+ 
+ 	return 0;
+@@ -1401,7 +1401,7 @@ static int __s5c73m3_power_off(struct s5c73m3 *state)
+ {
+ 	int i, ret;
+ 
+-	if (s5c73m3_gpio_assert(state, RST))
++	if (s5c73m3_gpio_assert(state, RSET))
+ 		usleep_range(10, 50);
+ 
+ 	if (s5c73m3_gpio_assert(state, STBY))
+@@ -1606,7 +1606,7 @@ static int s5c73m3_get_platform_data(struct s5c73m3 *state)
+ 
+ 		state->mclk_frequency = pdata->mclk_frequency;
+ 		state->gpio[STBY] = pdata->gpio_stby;
+-		state->gpio[RST] = pdata->gpio_reset;
++		state->gpio[RSET] = pdata->gpio_reset;
+ 		return 0;
+ 	}
+ 
+diff --git a/drivers/media/i2c/s5c73m3/s5c73m3.h b/drivers/media/i2c/s5c73m3/s5c73m3.h
+index ef7e85b34263b..c3fcfdd3ea66d 100644
+--- a/drivers/media/i2c/s5c73m3/s5c73m3.h
++++ b/drivers/media/i2c/s5c73m3/s5c73m3.h
+@@ -353,7 +353,7 @@ struct s5c73m3_ctrls {
+ 
+ enum s5c73m3_gpio_id {
+ 	STBY,
+-	RST,
++	RSET,
+ 	GPIO_NUM,
+ };
+ 
+diff --git a/drivers/media/i2c/s5k4ecgx.c b/drivers/media/i2c/s5k4ecgx.c
+index b2d53417badf6..4e97309a67f41 100644
+--- a/drivers/media/i2c/s5k4ecgx.c
++++ b/drivers/media/i2c/s5k4ecgx.c
+@@ -173,7 +173,7 @@ static const char * const s5k4ecgx_supply_names[] = {
+ 
+ enum s5k4ecgx_gpio_id {
+ 	STBY,
+-	RST,
++	RSET,
+ 	GPIO_NUM,
+ };
+ 
+@@ -476,7 +476,7 @@ static int __s5k4ecgx_power_on(struct s5k4ecgx *priv)
+ 	if (s5k4ecgx_gpio_set_value(priv, STBY, priv->gpio[STBY].level))
+ 		usleep_range(30, 50);
+ 
+-	if (s5k4ecgx_gpio_set_value(priv, RST, priv->gpio[RST].level))
++	if (s5k4ecgx_gpio_set_value(priv, RSET, priv->gpio[RSET].level))
+ 		usleep_range(30, 50);
+ 
+ 	return 0;
+@@ -484,7 +484,7 @@ static int __s5k4ecgx_power_on(struct s5k4ecgx *priv)
+ 
+ static int __s5k4ecgx_power_off(struct s5k4ecgx *priv)
+ {
+-	if (s5k4ecgx_gpio_set_value(priv, RST, !priv->gpio[RST].level))
++	if (s5k4ecgx_gpio_set_value(priv, RSET, !priv->gpio[RSET].level))
+ 		usleep_range(30, 50);
+ 
+ 	if (s5k4ecgx_gpio_set_value(priv, STBY, !priv->gpio[STBY].level))
+@@ -872,7 +872,7 @@ static int s5k4ecgx_config_gpios(struct s5k4ecgx *priv,
+ 	int ret;
+ 
+ 	priv->gpio[STBY].gpio = -EINVAL;
+-	priv->gpio[RST].gpio  = -EINVAL;
++	priv->gpio[RSET].gpio  = -EINVAL;
+ 
+ 	ret = s5k4ecgx_config_gpio(gpio->gpio, gpio->level, "S5K4ECGX_STBY");
+ 
+@@ -891,7 +891,7 @@ static int s5k4ecgx_config_gpios(struct s5k4ecgx *priv,
+ 		s5k4ecgx_free_gpios(priv);
+ 		return ret;
+ 	}
+-	priv->gpio[RST] = *gpio;
++	priv->gpio[RSET] = *gpio;
+ 	if (gpio_is_valid(gpio->gpio))
+ 		gpio_set_value(gpio->gpio, 0);
+ 
+diff --git a/drivers/media/i2c/s5k5baf.c b/drivers/media/i2c/s5k5baf.c
+index ec6f22efe19ad..ec65a8e084c6a 100644
+--- a/drivers/media/i2c/s5k5baf.c
++++ b/drivers/media/i2c/s5k5baf.c
+@@ -235,7 +235,7 @@ struct s5k5baf_gpio {
+ 
+ enum s5k5baf_gpio_id {
+ 	STBY,
+-	RST,
++	RSET,
+ 	NUM_GPIOS,
+ };
+ 
+@@ -969,7 +969,7 @@ static int s5k5baf_power_on(struct s5k5baf *state)
+ 
+ 	s5k5baf_gpio_deassert(state, STBY);
+ 	usleep_range(50, 100);
+-	s5k5baf_gpio_deassert(state, RST);
++	s5k5baf_gpio_deassert(state, RSET);
+ 	return 0;
+ 
+ err_reg_dis:
+@@ -987,7 +987,7 @@ static int s5k5baf_power_off(struct s5k5baf *state)
+ 	state->apply_cfg = 0;
+ 	state->apply_crop = 0;
+ 
+-	s5k5baf_gpio_assert(state, RST);
++	s5k5baf_gpio_assert(state, RSET);
+ 	s5k5baf_gpio_assert(state, STBY);
+ 
+ 	if (!IS_ERR(state->clock))
+diff --git a/drivers/media/i2c/s5k6aa.c b/drivers/media/i2c/s5k6aa.c
+index 72439fae7968b..6516e205e9a3d 100644
+--- a/drivers/media/i2c/s5k6aa.c
++++ b/drivers/media/i2c/s5k6aa.c
+@@ -177,7 +177,7 @@ static const char * const s5k6aa_supply_names[] = {
+ 
+ enum s5k6aa_gpio_id {
+ 	STBY,
+-	RST,
++	RSET,
+ 	GPIO_NUM,
+ };
+ 
+@@ -841,7 +841,7 @@ static int __s5k6aa_power_on(struct s5k6aa *s5k6aa)
+ 		ret = s5k6aa->s_power(1);
+ 	usleep_range(4000, 5000);
+ 
+-	if (s5k6aa_gpio_deassert(s5k6aa, RST))
++	if (s5k6aa_gpio_deassert(s5k6aa, RSET))
+ 		msleep(20);
+ 
+ 	return ret;
+@@ -851,7 +851,7 @@ static int __s5k6aa_power_off(struct s5k6aa *s5k6aa)
+ {
+ 	int ret;
+ 
+-	if (s5k6aa_gpio_assert(s5k6aa, RST))
++	if (s5k6aa_gpio_assert(s5k6aa, RSET))
+ 		usleep_range(100, 150);
+ 
+ 	if (s5k6aa->s_power) {
+@@ -1510,7 +1510,7 @@ static int s5k6aa_configure_gpios(struct s5k6aa *s5k6aa,
+ 	int ret;
+ 
+ 	s5k6aa->gpio[STBY].gpio = -EINVAL;
+-	s5k6aa->gpio[RST].gpio  = -EINVAL;
++	s5k6aa->gpio[RSET].gpio  = -EINVAL;
+ 
+ 	gpio = &pdata->gpio_stby;
+ 	if (gpio_is_valid(gpio->gpio)) {
+@@ -1533,7 +1533,7 @@ static int s5k6aa_configure_gpios(struct s5k6aa *s5k6aa,
+ 		if (ret < 0)
+ 			return ret;
+ 
+-		s5k6aa->gpio[RST] = *gpio;
++		s5k6aa->gpio[RSET] = *gpio;
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
+index 1b309bb743c7b..f21da11caf224 100644
+--- a/drivers/media/i2c/tc358743.c
++++ b/drivers/media/i2c/tc358743.c
+@@ -1974,6 +1974,7 @@ static int tc358743_probe_of(struct tc358743_state *state)
+ 	bps_pr_lane = 2 * endpoint.link_frequencies[0];
+ 	if (bps_pr_lane < 62500000U || bps_pr_lane > 1000000000U) {
+ 		dev_err(dev, "unsupported bps per lane: %u bps\n", bps_pr_lane);
++		ret = -EINVAL;
+ 		goto disable_clk;
+ 	}
+ 
+diff --git a/drivers/media/mc/Makefile b/drivers/media/mc/Makefile
+index 119037f0e686d..2b7af42ba59c1 100644
+--- a/drivers/media/mc/Makefile
++++ b/drivers/media/mc/Makefile
+@@ -3,7 +3,7 @@
+ mc-objs	:= mc-device.o mc-devnode.o mc-entity.o \
+ 	   mc-request.o
+ 
+-ifeq ($(CONFIG_USB),y)
++ifneq ($(CONFIG_USB),)
+ 	mc-objs += mc-dev-allocator.o
+ endif
+ 
+diff --git a/drivers/media/pci/bt8xx/bt878.c b/drivers/media/pci/bt8xx/bt878.c
+index 78dd35c9b65d7..90972d6952f1c 100644
+--- a/drivers/media/pci/bt8xx/bt878.c
++++ b/drivers/media/pci/bt8xx/bt878.c
+@@ -300,7 +300,8 @@ static irqreturn_t bt878_irq(int irq, void *dev_id)
+ 		}
+ 		if (astat & BT878_ARISCI) {
+ 			bt->finished_block = (stat & BT878_ARISCS) >> 28;
+-			tasklet_schedule(&bt->tasklet);
++			if (bt->tasklet.callback)
++				tasklet_schedule(&bt->tasklet);
+ 			break;
+ 		}
+ 		count++;
+@@ -477,6 +478,9 @@ static int bt878_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
+ 	btwrite(0, BT878_AINT_MASK);
+ 	bt878_num++;
+ 
++	if (!bt->tasklet.func)
++		tasklet_disable(&bt->tasklet);
++
+ 	return 0;
+ 
+       fail2:
+diff --git a/drivers/media/pci/cobalt/cobalt-driver.c b/drivers/media/pci/cobalt/cobalt-driver.c
+index 0695078ef8125..1bd8bbe57a30e 100644
+--- a/drivers/media/pci/cobalt/cobalt-driver.c
++++ b/drivers/media/pci/cobalt/cobalt-driver.c
+@@ -667,6 +667,7 @@ static int cobalt_probe(struct pci_dev *pci_dev,
+ 		return -ENOMEM;
+ 	cobalt->pci_dev = pci_dev;
+ 	cobalt->instance = i;
++	mutex_init(&cobalt->pci_lock);
+ 
+ 	retval = v4l2_device_register(&pci_dev->dev, &cobalt->v4l2_dev);
+ 	if (retval) {
+diff --git a/drivers/media/pci/cobalt/cobalt-driver.h b/drivers/media/pci/cobalt/cobalt-driver.h
+index bca68572b3242..12c33e035904c 100644
+--- a/drivers/media/pci/cobalt/cobalt-driver.h
++++ b/drivers/media/pci/cobalt/cobalt-driver.h
+@@ -251,6 +251,8 @@ struct cobalt {
+ 	int instance;
+ 	struct pci_dev *pci_dev;
+ 	struct v4l2_device v4l2_dev;
++	/* serialize PCI access in cobalt_s_bit_sysctrl() */
++	struct mutex pci_lock;
+ 
+ 	void __iomem *bar0, *bar1;
+ 
+@@ -320,10 +322,13 @@ static inline u32 cobalt_g_sysctrl(struct cobalt *cobalt)
+ static inline void cobalt_s_bit_sysctrl(struct cobalt *cobalt,
+ 					int bit, int val)
+ {
+-	u32 ctrl = cobalt_read_bar1(cobalt, COBALT_SYS_CTRL_BASE);
++	u32 ctrl;
+ 
++	mutex_lock(&cobalt->pci_lock);
++	ctrl = cobalt_read_bar1(cobalt, COBALT_SYS_CTRL_BASE);
+ 	cobalt_write_bar1(cobalt, COBALT_SYS_CTRL_BASE,
+ 			(ctrl & ~(1UL << bit)) | (val << bit));
++	mutex_unlock(&cobalt->pci_lock);
+ }
+ 
+ static inline u32 cobalt_g_sysstat(struct cobalt *cobalt)
+diff --git a/drivers/media/pci/intel/ipu3/cio2-bridge.c b/drivers/media/pci/intel/ipu3/cio2-bridge.c
+index c2199042d3db5..85f8b587405e0 100644
+--- a/drivers/media/pci/intel/ipu3/cio2-bridge.c
++++ b/drivers/media/pci/intel/ipu3/cio2-bridge.c
+@@ -173,14 +173,15 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
+ 	int ret;
+ 
+ 	for_each_acpi_dev_match(adev, cfg->hid, NULL, -1) {
+-		if (!adev->status.enabled)
++		if (!adev->status.enabled) {
++			acpi_dev_put(adev);
+ 			continue;
++		}
+ 
+ 		if (bridge->n_sensors >= CIO2_NUM_PORTS) {
++			acpi_dev_put(adev);
+ 			dev_err(&cio2->dev, "Exceeded available CIO2 ports\n");
+-			cio2_bridge_unregister_sensors(bridge);
+-			ret = -EINVAL;
+-			goto err_out;
++			return -EINVAL;
+ 		}
+ 
+ 		sensor = &bridge->sensors[bridge->n_sensors];
+@@ -228,7 +229,6 @@ err_free_swnodes:
+ 	software_node_unregister_nodes(sensor->swnodes);
+ err_put_adev:
+ 	acpi_dev_put(sensor->adev);
+-err_out:
+ 	return ret;
+ }
+ 
+diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c
+index 6cdc77dda0e49..1c9cb9e05fdf6 100644
+--- a/drivers/media/platform/am437x/am437x-vpfe.c
++++ b/drivers/media/platform/am437x/am437x-vpfe.c
+@@ -1021,7 +1021,9 @@ static int vpfe_initialize_device(struct vpfe_device *vpfe)
+ 	if (ret)
+ 		return ret;
+ 
+-	pm_runtime_get_sync(vpfe->pdev);
++	ret = pm_runtime_resume_and_get(vpfe->pdev);
++	if (ret < 0)
++		return ret;
+ 
+ 	vpfe_config_enable(&vpfe->ccdc, 1);
+ 
+@@ -2443,7 +2445,11 @@ static int vpfe_probe(struct platform_device *pdev)
+ 	pm_runtime_enable(&pdev->dev);
+ 
+ 	/* for now just enable it here instead of waiting for the open */
+-	pm_runtime_get_sync(&pdev->dev);
++	ret = pm_runtime_resume_and_get(&pdev->dev);
++	if (ret < 0) {
++		vpfe_err(vpfe, "Unable to resume device.\n");
++		goto probe_out_v4l2_unregister;
++	}
+ 
+ 	vpfe_ccdc_config_defaults(ccdc);
+ 
+@@ -2530,6 +2536,11 @@ static int vpfe_suspend(struct device *dev)
+ 
+ 	/* only do full suspend if streaming has started */
+ 	if (vb2_start_streaming_called(&vpfe->buffer_queue)) {
++		/*
++		 * ignore RPM resume errors here, as it is already too late.
++		 * A check like that should happen earlier, either at
++		 * open() or just before start streaming.
++		 */
+ 		pm_runtime_get_sync(dev);
+ 		vpfe_config_enable(ccdc, 1);
+ 
+diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c
+index 27a3c92c73bce..f1cf847d1cc2d 100644
+--- a/drivers/media/platform/exynos-gsc/gsc-m2m.c
++++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c
+@@ -56,10 +56,8 @@ static void __gsc_m2m_job_abort(struct gsc_ctx *ctx)
+ static int gsc_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
+ {
+ 	struct gsc_ctx *ctx = q->drv_priv;
+-	int ret;
+ 
+-	ret = pm_runtime_get_sync(&ctx->gsc_dev->pdev->dev);
+-	return ret > 0 ? 0 : ret;
++	return pm_runtime_resume_and_get(&ctx->gsc_dev->pdev->dev);
+ }
+ 
+ static void __gsc_m2m_cleanup_queue(struct gsc_ctx *ctx)
+diff --git a/drivers/media/platform/exynos4-is/fimc-capture.c b/drivers/media/platform/exynos4-is/fimc-capture.c
+index 13c838d3f9473..0da36443173c1 100644
+--- a/drivers/media/platform/exynos4-is/fimc-capture.c
++++ b/drivers/media/platform/exynos4-is/fimc-capture.c
+@@ -478,11 +478,9 @@ static int fimc_capture_open(struct file *file)
+ 		goto unlock;
+ 
+ 	set_bit(ST_CAPT_BUSY, &fimc->state);
+-	ret = pm_runtime_get_sync(&fimc->pdev->dev);
+-	if (ret < 0) {
+-		pm_runtime_put_sync(&fimc->pdev->dev);
++	ret = pm_runtime_resume_and_get(&fimc->pdev->dev);
++	if (ret < 0)
+ 		goto unlock;
+-	}
+ 
+ 	ret = v4l2_fh_open(file);
+ 	if (ret) {
+diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
+index 972d9601d2360..1b24f5bfc4af4 100644
+--- a/drivers/media/platform/exynos4-is/fimc-is.c
++++ b/drivers/media/platform/exynos4-is/fimc-is.c
+@@ -828,9 +828,9 @@ static int fimc_is_probe(struct platform_device *pdev)
+ 			goto err_irq;
+ 	}
+ 
+-	ret = pm_runtime_get_sync(dev);
++	ret = pm_runtime_resume_and_get(dev);
+ 	if (ret < 0)
+-		goto err_pm;
++		goto err_irq;
+ 
+ 	vb2_dma_contig_set_max_seg_size(dev, DMA_BIT_MASK(32));
+ 
+diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.c b/drivers/media/platform/exynos4-is/fimc-isp-video.c
+index 612b9872afc87..83688a7982f70 100644
+--- a/drivers/media/platform/exynos4-is/fimc-isp-video.c
++++ b/drivers/media/platform/exynos4-is/fimc-isp-video.c
+@@ -275,7 +275,7 @@ static int isp_video_open(struct file *file)
+ 	if (ret < 0)
+ 		goto unlock;
+ 
+-	ret = pm_runtime_get_sync(&isp->pdev->dev);
++	ret = pm_runtime_resume_and_get(&isp->pdev->dev);
+ 	if (ret < 0)
+ 		goto rel_fh;
+ 
+@@ -293,7 +293,6 @@ static int isp_video_open(struct file *file)
+ 	if (!ret)
+ 		goto unlock;
+ rel_fh:
+-	pm_runtime_put_noidle(&isp->pdev->dev);
+ 	v4l2_fh_release(file);
+ unlock:
+ 	mutex_unlock(&isp->video_lock);
+@@ -306,17 +305,20 @@ static int isp_video_release(struct file *file)
+ 	struct fimc_is_video *ivc = &isp->video_capture;
+ 	struct media_entity *entity = &ivc->ve.vdev.entity;
+ 	struct media_device *mdev = entity->graph_obj.mdev;
++	bool is_singular_file;
+ 
+ 	mutex_lock(&isp->video_lock);
+ 
+-	if (v4l2_fh_is_singular_file(file) && ivc->streaming) {
++	is_singular_file = v4l2_fh_is_singular_file(file);
++
++	if (is_singular_file && ivc->streaming) {
+ 		media_pipeline_stop(entity);
+ 		ivc->streaming = 0;
+ 	}
+ 
+ 	_vb2_fop_release(file, NULL);
+ 
+-	if (v4l2_fh_is_singular_file(file)) {
++	if (is_singular_file) {
+ 		fimc_pipeline_call(&ivc->ve, close);
+ 
+ 		mutex_lock(&mdev->graph_mutex);
+diff --git a/drivers/media/platform/exynos4-is/fimc-isp.c b/drivers/media/platform/exynos4-is/fimc-isp.c
+index a77c49b185115..74b49d30901ed 100644
+--- a/drivers/media/platform/exynos4-is/fimc-isp.c
++++ b/drivers/media/platform/exynos4-is/fimc-isp.c
+@@ -304,11 +304,10 @@ static int fimc_isp_subdev_s_power(struct v4l2_subdev *sd, int on)
+ 	pr_debug("on: %d\n", on);
+ 
+ 	if (on) {
+-		ret = pm_runtime_get_sync(&is->pdev->dev);
+-		if (ret < 0) {
+-			pm_runtime_put(&is->pdev->dev);
++		ret = pm_runtime_resume_and_get(&is->pdev->dev);
++		if (ret < 0)
+ 			return ret;
+-		}
++
+ 		set_bit(IS_ST_PWR_ON, &is->state);
+ 
+ 		ret = fimc_is_start_firmware(is);
+diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c
+index fe20af3a7178a..4d8b18078ff37 100644
+--- a/drivers/media/platform/exynos4-is/fimc-lite.c
++++ b/drivers/media/platform/exynos4-is/fimc-lite.c
+@@ -469,9 +469,9 @@ static int fimc_lite_open(struct file *file)
+ 	}
+ 
+ 	set_bit(ST_FLITE_IN_USE, &fimc->state);
+-	ret = pm_runtime_get_sync(&fimc->pdev->dev);
++	ret = pm_runtime_resume_and_get(&fimc->pdev->dev);
+ 	if (ret < 0)
+-		goto err_pm;
++		goto err_in_use;
+ 
+ 	ret = v4l2_fh_open(file);
+ 	if (ret < 0)
+@@ -499,6 +499,7 @@ static int fimc_lite_open(struct file *file)
+ 	v4l2_fh_release(file);
+ err_pm:
+ 	pm_runtime_put_sync(&fimc->pdev->dev);
++err_in_use:
+ 	clear_bit(ST_FLITE_IN_USE, &fimc->state);
+ unlock:
+ 	mutex_unlock(&fimc->lock);
+diff --git a/drivers/media/platform/exynos4-is/fimc-m2m.c b/drivers/media/platform/exynos4-is/fimc-m2m.c
+index c9704a147e5cf..df8e2aa454d8f 100644
+--- a/drivers/media/platform/exynos4-is/fimc-m2m.c
++++ b/drivers/media/platform/exynos4-is/fimc-m2m.c
+@@ -73,17 +73,14 @@ static void fimc_m2m_shutdown(struct fimc_ctx *ctx)
+ static int start_streaming(struct vb2_queue *q, unsigned int count)
+ {
+ 	struct fimc_ctx *ctx = q->drv_priv;
+-	int ret;
+ 
+-	ret = pm_runtime_get_sync(&ctx->fimc_dev->pdev->dev);
+-	return ret > 0 ? 0 : ret;
++	return pm_runtime_resume_and_get(&ctx->fimc_dev->pdev->dev);
+ }
+ 
+ static void stop_streaming(struct vb2_queue *q)
+ {
+ 	struct fimc_ctx *ctx = q->drv_priv;
+ 
+-
+ 	fimc_m2m_shutdown(ctx);
+ 	fimc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
+ 	pm_runtime_put(&ctx->fimc_dev->pdev->dev);
+diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
+index 8e1e892085ec0..f7b08dbe25edb 100644
+--- a/drivers/media/platform/exynos4-is/media-dev.c
++++ b/drivers/media/platform/exynos4-is/media-dev.c
+@@ -510,11 +510,9 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
+ 	if (!fmd->pmf)
+ 		return -ENXIO;
+ 
+-	ret = pm_runtime_get_sync(fmd->pmf);
+-	if (ret < 0) {
+-		pm_runtime_put(fmd->pmf);
++	ret = pm_runtime_resume_and_get(fmd->pmf);
++	if (ret < 0)
+ 		return ret;
+-	}
+ 
+ 	fmd->num_sensors = 0;
+ 
+@@ -1284,13 +1282,11 @@ static DEVICE_ATTR(subdev_conf_mode, S_IWUSR | S_IRUGO,
+ static int cam_clk_prepare(struct clk_hw *hw)
+ {
+ 	struct cam_clk *camclk = to_cam_clk(hw);
+-	int ret;
+ 
+ 	if (camclk->fmd->pmf == NULL)
+ 		return -ENODEV;
+ 
+-	ret = pm_runtime_get_sync(camclk->fmd->pmf);
+-	return ret < 0 ? ret : 0;
++	return pm_runtime_resume_and_get(camclk->fmd->pmf);
+ }
+ 
+ static void cam_clk_unprepare(struct clk_hw *hw)
+diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c
+index 1aac167abb175..ebf39c8568943 100644
+--- a/drivers/media/platform/exynos4-is/mipi-csis.c
++++ b/drivers/media/platform/exynos4-is/mipi-csis.c
+@@ -494,7 +494,7 @@ static int s5pcsis_s_power(struct v4l2_subdev *sd, int on)
+ 	struct device *dev = &state->pdev->dev;
+ 
+ 	if (on)
+-		return pm_runtime_get_sync(dev);
++		return pm_runtime_resume_and_get(dev);
+ 
+ 	return pm_runtime_put_sync(dev);
+ }
+@@ -509,11 +509,9 @@ static int s5pcsis_s_stream(struct v4l2_subdev *sd, int enable)
+ 
+ 	if (enable) {
+ 		s5pcsis_clear_counters(state);
+-		ret = pm_runtime_get_sync(&state->pdev->dev);
+-		if (ret && ret != 1) {
+-			pm_runtime_put_noidle(&state->pdev->dev);
++		ret = pm_runtime_resume_and_get(&state->pdev->dev);
++		if (ret < 0)
+ 			return ret;
+-		}
+ 	}
+ 
+ 	mutex_lock(&state->lock);
+@@ -535,7 +533,7 @@ unlock:
+ 	if (!enable)
+ 		pm_runtime_put(&state->pdev->dev);
+ 
+-	return ret == 1 ? 0 : ret;
++	return ret;
+ }
+ 
+ static int s5pcsis_enum_mbus_code(struct v4l2_subdev *sd,
+diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
+index 141bf5d97a044..ea87110d90738 100644
+--- a/drivers/media/platform/marvell-ccic/mcam-core.c
++++ b/drivers/media/platform/marvell-ccic/mcam-core.c
+@@ -918,6 +918,7 @@ static int mclk_enable(struct clk_hw *hw)
+ 	struct mcam_camera *cam = container_of(hw, struct mcam_camera, mclk_hw);
+ 	int mclk_src;
+ 	int mclk_div;
++	int ret;
+ 
+ 	/*
+ 	 * Clock the sensor appropriately.  Controller clock should
+@@ -931,7 +932,9 @@ static int mclk_enable(struct clk_hw *hw)
+ 		mclk_div = 2;
+ 	}
+ 
+-	pm_runtime_get_sync(cam->dev);
++	ret = pm_runtime_resume_and_get(cam->dev);
++	if (ret < 0)
++		return ret;
+ 	clk_enable(cam->clk[0]);
+ 	mcam_reg_write(cam, REG_CLKCTRL, (mclk_src << 29) | mclk_div);
+ 	mcam_ctlr_power_up(cam);
+@@ -1611,7 +1614,9 @@ static int mcam_v4l_open(struct file *filp)
+ 		ret = sensor_call(cam, core, s_power, 1);
+ 		if (ret)
+ 			goto out;
+-		pm_runtime_get_sync(cam->dev);
++		ret = pm_runtime_resume_and_get(cam->dev);
++		if (ret < 0)
++			goto out;
+ 		__mcam_cam_reset(cam);
+ 		mcam_set_config_needed(cam, 1);
+ 	}
+diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c
+index ace4528cdc5ef..f14779e7596e5 100644
+--- a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c
++++ b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c
+@@ -391,12 +391,12 @@ static int mtk_mdp_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
+ 	struct mtk_mdp_ctx *ctx = q->drv_priv;
+ 	int ret;
+ 
+-	ret = pm_runtime_get_sync(&ctx->mdp_dev->pdev->dev);
++	ret = pm_runtime_resume_and_get(&ctx->mdp_dev->pdev->dev);
+ 	if (ret < 0)
+-		mtk_mdp_dbg(1, "[%d] pm_runtime_get_sync failed:%d",
++		mtk_mdp_dbg(1, "[%d] pm_runtime_resume_and_get failed:%d",
+ 			    ctx->id, ret);
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ static void *mtk_mdp_m2m_buf_remove(struct mtk_mdp_ctx *ctx,
+diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
+index 147dfef1638d2..f87dc47d9e638 100644
+--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
++++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
+@@ -126,7 +126,9 @@ static int fops_vcodec_open(struct file *file)
+ 	mtk_vcodec_dec_set_default_params(ctx);
+ 
+ 	if (v4l2_fh_is_singular(&ctx->fh)) {
+-		mtk_vcodec_dec_pw_on(&dev->pm);
++		ret = mtk_vcodec_dec_pw_on(&dev->pm);
++		if (ret < 0)
++			goto err_load_fw;
+ 		/*
+ 		 * Does nothing if firmware was already loaded.
+ 		 */
+diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c
+index ddee7046ce422..6038db96f71c3 100644
+--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c
++++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c
+@@ -88,13 +88,15 @@ void mtk_vcodec_release_dec_pm(struct mtk_vcodec_dev *dev)
+ 	put_device(dev->pm.larbvdec);
+ }
+ 
+-void mtk_vcodec_dec_pw_on(struct mtk_vcodec_pm *pm)
++int mtk_vcodec_dec_pw_on(struct mtk_vcodec_pm *pm)
+ {
+ 	int ret;
+ 
+-	ret = pm_runtime_get_sync(pm->dev);
++	ret = pm_runtime_resume_and_get(pm->dev);
+ 	if (ret)
+-		mtk_v4l2_err("pm_runtime_get_sync fail %d", ret);
++		mtk_v4l2_err("pm_runtime_resume_and_get fail %d", ret);
++
++	return ret;
+ }
+ 
+ void mtk_vcodec_dec_pw_off(struct mtk_vcodec_pm *pm)
+diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.h
+index 872d8bf8cfaf3..280aeaefdb651 100644
+--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.h
++++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.h
+@@ -12,7 +12,7 @@
+ int mtk_vcodec_init_dec_pm(struct mtk_vcodec_dev *dev);
+ void mtk_vcodec_release_dec_pm(struct mtk_vcodec_dev *dev);
+ 
+-void mtk_vcodec_dec_pw_on(struct mtk_vcodec_pm *pm);
++int mtk_vcodec_dec_pw_on(struct mtk_vcodec_pm *pm);
+ void mtk_vcodec_dec_pw_off(struct mtk_vcodec_pm *pm);
+ void mtk_vcodec_dec_clock_on(struct mtk_vcodec_pm *pm);
+ void mtk_vcodec_dec_clock_off(struct mtk_vcodec_pm *pm);
+diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.c b/drivers/media/platform/mtk-vpu/mtk_vpu.c
+index 043894f7188c8..f49f6d53a941f 100644
+--- a/drivers/media/platform/mtk-vpu/mtk_vpu.c
++++ b/drivers/media/platform/mtk-vpu/mtk_vpu.c
+@@ -987,6 +987,12 @@ static int mtk_vpu_suspend(struct device *dev)
+ 		return ret;
+ 	}
+ 
++	if (!vpu_running(vpu)) {
++		vpu_clock_disable(vpu);
++		clk_unprepare(vpu->clk);
++		return 0;
++	}
++
+ 	mutex_lock(&vpu->vpu_mutex);
+ 	/* disable vpu timer interrupt */
+ 	vpu_cfg_writel(vpu, vpu_cfg_readl(vpu, VPU_INT_STATUS) | VPU_IDLE_STATE,
+diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
+index ae374bb2a48f0..28443547ae8f2 100644
+--- a/drivers/media/platform/qcom/venus/core.c
++++ b/drivers/media/platform/qcom/venus/core.c
+@@ -76,22 +76,32 @@ static const struct hfi_core_ops venus_core_ops = {
+ 	.event_notify = venus_event_notify,
+ };
+ 
++#define RPM_WAIT_FOR_IDLE_MAX_ATTEMPTS 10
++
+ static void venus_sys_error_handler(struct work_struct *work)
+ {
+ 	struct venus_core *core =
+ 			container_of(work, struct venus_core, work.work);
+-	int ret = 0;
+-
+-	pm_runtime_get_sync(core->dev);
++	int ret, i, max_attempts = RPM_WAIT_FOR_IDLE_MAX_ATTEMPTS;
++	const char *err_msg = "";
++	bool failed = false;
++
++	ret = pm_runtime_get_sync(core->dev);
++	if (ret < 0) {
++		err_msg = "resume runtime PM";
++		max_attempts = 0;
++		failed = true;
++	}
+ 
+ 	hfi_core_deinit(core, true);
+ 
+-	dev_warn(core->dev, "system error has occurred, starting recovery!\n");
+-
+ 	mutex_lock(&core->lock);
+ 
+-	while (pm_runtime_active(core->dev_dec) || pm_runtime_active(core->dev_enc))
++	for (i = 0; i < max_attempts; i++) {
++		if (!pm_runtime_active(core->dev_dec) && !pm_runtime_active(core->dev_enc))
++			break;
+ 		msleep(10);
++	}
+ 
+ 	venus_shutdown(core);
+ 
+@@ -99,31 +109,55 @@ static void venus_sys_error_handler(struct work_struct *work)
+ 
+ 	pm_runtime_put_sync(core->dev);
+ 
+-	while (core->pmdomains[0] && pm_runtime_active(core->pmdomains[0]))
++	for (i = 0; i < max_attempts; i++) {
++		if (!core->pmdomains[0] || !pm_runtime_active(core->pmdomains[0]))
++			break;
+ 		usleep_range(1000, 1500);
++	}
+ 
+ 	hfi_reinit(core);
+ 
+-	pm_runtime_get_sync(core->dev);
++	ret = pm_runtime_get_sync(core->dev);
++	if (ret < 0) {
++		err_msg = "resume runtime PM";
++		failed = true;
++	}
+ 
+-	ret |= venus_boot(core);
+-	ret |= hfi_core_resume(core, true);
++	ret = venus_boot(core);
++	if (ret && !failed) {
++		err_msg = "boot Venus";
++		failed = true;
++	}
++
++	ret = hfi_core_resume(core, true);
++	if (ret && !failed) {
++		err_msg = "resume HFI";
++		failed = true;
++	}
+ 
+ 	enable_irq(core->irq);
+ 
+ 	mutex_unlock(&core->lock);
+ 
+-	ret |= hfi_core_init(core);
++	ret = hfi_core_init(core);
++	if (ret && !failed) {
++		err_msg = "init HFI";
++		failed = true;
++	}
+ 
+ 	pm_runtime_put_sync(core->dev);
+ 
+-	if (ret) {
++	if (failed) {
+ 		disable_irq_nosync(core->irq);
+-		dev_warn(core->dev, "recovery failed (%d)\n", ret);
++		dev_warn_ratelimited(core->dev,
++				     "System error has occurred, recovery failed to %s\n",
++				     err_msg);
+ 		schedule_delayed_work(&core->work, msecs_to_jiffies(10));
+ 		return;
+ 	}
+ 
++	dev_warn(core->dev, "system error has occurred (recovered)\n");
++
+ 	mutex_lock(&core->lock);
+ 	core->sys_error = false;
+ 	mutex_unlock(&core->lock);
+diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
+index 15bcb7f6e113c..1cb5eaabf340b 100644
+--- a/drivers/media/platform/s5p-g2d/g2d.c
++++ b/drivers/media/platform/s5p-g2d/g2d.c
+@@ -276,6 +276,9 @@ static int g2d_release(struct file *file)
+ 	struct g2d_dev *dev = video_drvdata(file);
+ 	struct g2d_ctx *ctx = fh2ctx(file->private_data);
+ 
++	mutex_lock(&dev->mutex);
++	v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
++	mutex_unlock(&dev->mutex);
+ 	v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ 	v4l2_fh_del(&ctx->fh);
+ 	v4l2_fh_exit(&ctx->fh);
+diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
+index 026111505f5a5..d402e456f27df 100644
+--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
++++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
+@@ -2566,11 +2566,8 @@ static void s5p_jpeg_buf_queue(struct vb2_buffer *vb)
+ static int s5p_jpeg_start_streaming(struct vb2_queue *q, unsigned int count)
+ {
+ 	struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(q);
+-	int ret;
+-
+-	ret = pm_runtime_get_sync(ctx->jpeg->dev);
+ 
+-	return ret > 0 ? 0 : ret;
++	return pm_runtime_resume_and_get(ctx->jpeg->dev);
+ }
+ 
+ static void s5p_jpeg_stop_streaming(struct vb2_queue *q)
+diff --git a/drivers/media/platform/sh_vou.c b/drivers/media/platform/sh_vou.c
+index 4ac48441f22c4..ca4310e26c49e 100644
+--- a/drivers/media/platform/sh_vou.c
++++ b/drivers/media/platform/sh_vou.c
+@@ -1133,7 +1133,11 @@ static int sh_vou_open(struct file *file)
+ 	if (v4l2_fh_is_singular_file(file) &&
+ 	    vou_dev->status == SH_VOU_INITIALISING) {
+ 		/* First open */
+-		pm_runtime_get_sync(vou_dev->v4l2_dev.dev);
++		err = pm_runtime_resume_and_get(vou_dev->v4l2_dev.dev);
++		if (err < 0) {
++			v4l2_fh_release(file);
++			goto done_open;
++		}
+ 		err = sh_vou_hw_init(vou_dev);
+ 		if (err < 0) {
+ 			pm_runtime_put(vou_dev->v4l2_dev.dev);
+diff --git a/drivers/media/platform/sti/bdisp/Makefile b/drivers/media/platform/sti/bdisp/Makefile
+index caf7ccd193eaa..39ade0a347236 100644
+--- a/drivers/media/platform/sti/bdisp/Makefile
++++ b/drivers/media/platform/sti/bdisp/Makefile
+@@ -1,4 +1,4 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+-obj-$(CONFIG_VIDEO_STI_BDISP) := bdisp.o
++obj-$(CONFIG_VIDEO_STI_BDISP) += bdisp.o
+ 
+ bdisp-objs := bdisp-v4l2.o bdisp-hw.o bdisp-debug.o
+diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
+index 060ca85f64d5d..85288da9d2ae6 100644
+--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
++++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
+@@ -499,7 +499,7 @@ static int bdisp_start_streaming(struct vb2_queue *q, unsigned int count)
+ {
+ 	struct bdisp_ctx *ctx = q->drv_priv;
+ 	struct vb2_v4l2_buffer *buf;
+-	int ret = pm_runtime_get_sync(ctx->bdisp_dev->dev);
++	int ret = pm_runtime_resume_and_get(ctx->bdisp_dev->dev);
+ 
+ 	if (ret < 0) {
+ 		dev_err(ctx->bdisp_dev->dev, "failed to set runtime PM\n");
+@@ -1364,10 +1364,10 @@ static int bdisp_probe(struct platform_device *pdev)
+ 
+ 	/* Power management */
+ 	pm_runtime_enable(dev);
+-	ret = pm_runtime_get_sync(dev);
++	ret = pm_runtime_resume_and_get(dev);
+ 	if (ret < 0) {
+ 		dev_err(dev, "failed to set PM\n");
+-		goto err_pm;
++		goto err_remove;
+ 	}
+ 
+ 	/* Filters */
+@@ -1395,6 +1395,7 @@ err_filter:
+ 	bdisp_hw_free_filters(bdisp->dev);
+ err_pm:
+ 	pm_runtime_put(dev);
++err_remove:
+ 	bdisp_debugfs_remove(bdisp);
+ 	v4l2_device_unregister(&bdisp->v4l2_dev);
+ err_clk:
+diff --git a/drivers/media/platform/sti/delta/Makefile b/drivers/media/platform/sti/delta/Makefile
+index 92b37e216f004..32412fa4c6328 100644
+--- a/drivers/media/platform/sti/delta/Makefile
++++ b/drivers/media/platform/sti/delta/Makefile
+@@ -1,5 +1,5 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+-obj-$(CONFIG_VIDEO_STI_DELTA_DRIVER) := st-delta.o
++obj-$(CONFIG_VIDEO_STI_DELTA_DRIVER) += st-delta.o
+ st-delta-y := delta-v4l2.o delta-mem.o delta-ipc.o delta-debug.o
+ 
+ # MJPEG support
+diff --git a/drivers/media/platform/sti/hva/Makefile b/drivers/media/platform/sti/hva/Makefile
+index 74b41ec52f976..b5a5478bdd016 100644
+--- a/drivers/media/platform/sti/hva/Makefile
++++ b/drivers/media/platform/sti/hva/Makefile
+@@ -1,4 +1,4 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+-obj-$(CONFIG_VIDEO_STI_HVA) := st-hva.o
++obj-$(CONFIG_VIDEO_STI_HVA) += st-hva.o
+ st-hva-y := hva-v4l2.o hva-hw.o hva-mem.o hva-h264.o
+ st-hva-$(CONFIG_VIDEO_STI_HVA_DEBUGFS) += hva-debugfs.o
+diff --git a/drivers/media/platform/sti/hva/hva-hw.c b/drivers/media/platform/sti/hva/hva-hw.c
+index f59811e27f51f..6eeee5017fac4 100644
+--- a/drivers/media/platform/sti/hva/hva-hw.c
++++ b/drivers/media/platform/sti/hva/hva-hw.c
+@@ -130,8 +130,7 @@ static irqreturn_t hva_hw_its_irq_thread(int irq, void *arg)
+ 	ctx_id = (hva->sts_reg & 0xFF00) >> 8;
+ 	if (ctx_id >= HVA_MAX_INSTANCES) {
+ 		dev_err(dev, "%s     %s: bad context identifier: %d\n",
+-			ctx->name, __func__, ctx_id);
+-		ctx->hw_err = true;
++			HVA_PREFIX, __func__, ctx_id);
+ 		goto out;
+ 	}
+ 
+diff --git a/drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c b/drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c
+index 3f81dd17755cb..fbcca59a0517c 100644
+--- a/drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c
++++ b/drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c
+@@ -494,7 +494,7 @@ static int rotate_start_streaming(struct vb2_queue *vq, unsigned int count)
+ 		struct device *dev = ctx->dev->dev;
+ 		int ret;
+ 
+-		ret = pm_runtime_get_sync(dev);
++		ret = pm_runtime_resume_and_get(dev);
+ 		if (ret < 0) {
+ 			dev_err(dev, "Failed to enable module\n");
+ 
+diff --git a/drivers/media/platform/video-mux.c b/drivers/media/platform/video-mux.c
+index 133122e385150..9bc0b4d8de095 100644
+--- a/drivers/media/platform/video-mux.c
++++ b/drivers/media/platform/video-mux.c
+@@ -362,7 +362,7 @@ static int video_mux_async_register(struct video_mux *vmux,
+ 
+ 	for (i = 0; i < num_input_pads; i++) {
+ 		struct v4l2_async_subdev *asd;
+-		struct fwnode_handle *ep;
++		struct fwnode_handle *ep, *remote_ep;
+ 
+ 		ep = fwnode_graph_get_endpoint_by_id(
+ 			dev_fwnode(vmux->subdev.dev), i, 0,
+@@ -370,6 +370,14 @@ static int video_mux_async_register(struct video_mux *vmux,
+ 		if (!ep)
+ 			continue;
+ 
++		/* Skip dangling endpoints for backwards compatibility */
++		remote_ep = fwnode_graph_get_remote_endpoint(ep);
++		if (!remote_ep) {
++			fwnode_handle_put(ep);
++			continue;
++		}
++		fwnode_handle_put(remote_ep);
++
+ 		asd = v4l2_async_notifier_add_fwnode_remote_subdev(
+ 			&vmux->notifier, ep, struct v4l2_async_subdev);
+ 
+diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c
+index a8a72d5fbd129..caefac07af927 100644
+--- a/drivers/media/usb/au0828/au0828-core.c
++++ b/drivers/media/usb/au0828/au0828-core.c
+@@ -199,8 +199,8 @@ static int au0828_media_device_init(struct au0828_dev *dev,
+ 	struct media_device *mdev;
+ 
+ 	mdev = media_device_usb_allocate(udev, KBUILD_MODNAME, THIS_MODULE);
+-	if (!mdev)
+-		return -ENOMEM;
++	if (IS_ERR(mdev))
++		return PTR_ERR(mdev);
+ 
+ 	dev->media_dev = mdev;
+ #endif
+diff --git a/drivers/media/usb/cpia2/cpia2.h b/drivers/media/usb/cpia2/cpia2.h
+index 50835f5f7512c..57b7f1ea68da5 100644
+--- a/drivers/media/usb/cpia2/cpia2.h
++++ b/drivers/media/usb/cpia2/cpia2.h
+@@ -429,6 +429,7 @@ int cpia2_send_command(struct camera_data *cam, struct cpia2_command *cmd);
+ int cpia2_do_command(struct camera_data *cam,
+ 		     unsigned int command,
+ 		     unsigned char direction, unsigned char param);
++void cpia2_deinit_camera_struct(struct camera_data *cam, struct usb_interface *intf);
+ struct camera_data *cpia2_init_camera_struct(struct usb_interface *intf);
+ int cpia2_init_camera(struct camera_data *cam);
+ int cpia2_allocate_buffers(struct camera_data *cam);
+diff --git a/drivers/media/usb/cpia2/cpia2_core.c b/drivers/media/usb/cpia2/cpia2_core.c
+index e747548ab2869..b5a2d06fb356b 100644
+--- a/drivers/media/usb/cpia2/cpia2_core.c
++++ b/drivers/media/usb/cpia2/cpia2_core.c
+@@ -2163,6 +2163,18 @@ static void reset_camera_struct(struct camera_data *cam)
+ 	cam->height = cam->params.roi.height;
+ }
+ 
++/******************************************************************************
++ *
++ *  cpia2_init_camera_struct
++ *
++ *  Deinitialize camera struct
++ *****************************************************************************/
++void cpia2_deinit_camera_struct(struct camera_data *cam, struct usb_interface *intf)
++{
++	v4l2_device_unregister(&cam->v4l2_dev);
++	kfree(cam);
++}
++
+ /******************************************************************************
+  *
+  *  cpia2_init_camera_struct
+diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c
+index 3ab80a7b44985..76aac06f9fb8e 100644
+--- a/drivers/media/usb/cpia2/cpia2_usb.c
++++ b/drivers/media/usb/cpia2/cpia2_usb.c
+@@ -844,15 +844,13 @@ static int cpia2_usb_probe(struct usb_interface *intf,
+ 	ret = set_alternate(cam, USBIF_CMDONLY);
+ 	if (ret < 0) {
+ 		ERR("%s: usb_set_interface error (ret = %d)\n", __func__, ret);
+-		kfree(cam);
+-		return ret;
++		goto alt_err;
+ 	}
+ 
+ 
+ 	if((ret = cpia2_init_camera(cam)) < 0) {
+ 		ERR("%s: failed to initialize cpia2 camera (ret = %d)\n", __func__, ret);
+-		kfree(cam);
+-		return ret;
++		goto alt_err;
+ 	}
+ 	LOG("  CPiA Version: %d.%02d (%d.%d)\n",
+ 	       cam->params.version.firmware_revision_hi,
+@@ -872,11 +870,14 @@ static int cpia2_usb_probe(struct usb_interface *intf,
+ 	ret = cpia2_register_camera(cam);
+ 	if (ret < 0) {
+ 		ERR("%s: Failed to register cpia2 camera (ret = %d)\n", __func__, ret);
+-		kfree(cam);
+-		return ret;
++		goto alt_err;
+ 	}
+ 
+ 	return 0;
++
++alt_err:
++	cpia2_deinit_camera_struct(cam, intf);
++	return ret;
+ }
+ 
+ /******************************************************************************
+diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c
+index 969a7ec71dff7..4116ba5c45fcb 100644
+--- a/drivers/media/usb/dvb-usb/cinergyT2-core.c
++++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c
+@@ -78,6 +78,8 @@ static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
+ 
+ 	ret = dvb_usb_generic_rw(d, st->data, 1, st->data, 3, 0);
+ 	if (ret < 0) {
++		if (adap->fe_adap[0].fe)
++			adap->fe_adap[0].fe->ops.release(adap->fe_adap[0].fe);
+ 		deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep state info\n");
+ 	}
+ 	mutex_unlock(&d->data_mutex);
+diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
+index 761992ad05e2a..7707de7bae7ca 100644
+--- a/drivers/media/usb/dvb-usb/cxusb.c
++++ b/drivers/media/usb/dvb-usb/cxusb.c
+@@ -1947,7 +1947,7 @@ static struct dvb_usb_device_properties cxusb_bluebird_lgz201_properties = {
+ 
+ 	.size_of_priv     = sizeof(struct cxusb_state),
+ 
+-	.num_adapters = 2,
++	.num_adapters = 1,
+ 	.adapter = {
+ 		{
+ 		.num_frontends = 1,
+diff --git a/drivers/media/usb/em28xx/em28xx-input.c b/drivers/media/usb/em28xx/em28xx-input.c
+index 5aa15a7a49def..59529cbf9cd0b 100644
+--- a/drivers/media/usb/em28xx/em28xx-input.c
++++ b/drivers/media/usb/em28xx/em28xx-input.c
+@@ -720,7 +720,8 @@ static int em28xx_ir_init(struct em28xx *dev)
+ 			dev->board.has_ir_i2c = 0;
+ 			dev_warn(&dev->intf->dev,
+ 				 "No i2c IR remote control device found.\n");
+-			return -ENODEV;
++			err = -ENODEV;
++			goto ref_put;
+ 		}
+ 	}
+ 
+@@ -735,7 +736,7 @@ static int em28xx_ir_init(struct em28xx *dev)
+ 
+ 	ir = kzalloc(sizeof(*ir), GFP_KERNEL);
+ 	if (!ir)
+-		return -ENOMEM;
++		goto ref_put;
+ 	rc = rc_allocate_device(RC_DRIVER_SCANCODE);
+ 	if (!rc)
+ 		goto error;
+@@ -839,6 +840,9 @@ error:
+ 	dev->ir = NULL;
+ 	rc_free_device(rc);
+ 	kfree(ir);
++ref_put:
++	em28xx_shutdown_buttons(dev);
++	kref_put(&dev->ref, em28xx_free_device);
+ 	return err;
+ }
+ 
+diff --git a/drivers/media/usb/gspca/gl860/gl860.c b/drivers/media/usb/gspca/gl860/gl860.c
+index 2c05ea2598e76..ce4ee8bc75c85 100644
+--- a/drivers/media/usb/gspca/gl860/gl860.c
++++ b/drivers/media/usb/gspca/gl860/gl860.c
+@@ -561,8 +561,8 @@ int gl860_RTx(struct gspca_dev *gspca_dev,
+ 					len, 400 + 200 * (len > 1));
+ 			memcpy(pdata, gspca_dev->usb_buf, len);
+ 		} else {
+-			r = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+-					req, pref, val, index, NULL, len, 400);
++			gspca_err(gspca_dev, "zero-length read request\n");
++			r = -EINVAL;
+ 		}
+ 	}
+ 
+diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+index f4a727918e352..d38dee1792e41 100644
+--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
++++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+@@ -2676,9 +2676,8 @@ void pvr2_hdw_destroy(struct pvr2_hdw *hdw)
+ 		pvr2_stream_destroy(hdw->vid_stream);
+ 		hdw->vid_stream = NULL;
+ 	}
+-	pvr2_i2c_core_done(hdw);
+ 	v4l2_device_unregister(&hdw->v4l2_dev);
+-	pvr2_hdw_remove_usb_stuff(hdw);
++	pvr2_hdw_disconnect(hdw);
+ 	mutex_lock(&pvr2_unit_mtx);
+ 	do {
+ 		if ((hdw->unit_number >= 0) &&
+@@ -2705,6 +2704,7 @@ void pvr2_hdw_disconnect(struct pvr2_hdw *hdw)
+ {
+ 	pvr2_trace(PVR2_TRACE_INIT,"pvr2_hdw_disconnect(hdw=%p)",hdw);
+ 	LOCK_TAKE(hdw->big_lock);
++	pvr2_i2c_core_done(hdw);
+ 	LOCK_TAKE(hdw->ctl_lock);
+ 	pvr2_hdw_remove_usb_stuff(hdw);
+ 	LOCK_GIVE(hdw->ctl_lock);
+diff --git a/drivers/media/v4l2-core/v4l2-fh.c b/drivers/media/v4l2-core/v4l2-fh.c
+index 684574f58e82d..90eec79ee995a 100644
+--- a/drivers/media/v4l2-core/v4l2-fh.c
++++ b/drivers/media/v4l2-core/v4l2-fh.c
+@@ -96,6 +96,7 @@ int v4l2_fh_release(struct file *filp)
+ 		v4l2_fh_del(fh);
+ 		v4l2_fh_exit(fh);
+ 		kfree(fh);
++		filp->private_data = NULL;
+ 	}
+ 	return 0;
+ }
+diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
+index 31d1342e61e88..7e8bf4b1ab2e5 100644
+--- a/drivers/media/v4l2-core/v4l2-ioctl.c
++++ b/drivers/media/v4l2-core/v4l2-ioctl.c
+@@ -3114,8 +3114,8 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
+ 
+ static unsigned int video_translate_cmd(unsigned int cmd)
+ {
++#if !defined(CONFIG_64BIT) && defined(CONFIG_COMPAT_32BIT_TIME)
+ 	switch (cmd) {
+-#ifdef CONFIG_COMPAT_32BIT_TIME
+ 	case VIDIOC_DQEVENT_TIME32:
+ 		return VIDIOC_DQEVENT;
+ 	case VIDIOC_QUERYBUF_TIME32:
+@@ -3126,8 +3126,8 @@ static unsigned int video_translate_cmd(unsigned int cmd)
+ 		return VIDIOC_DQBUF;
+ 	case VIDIOC_PREPARE_BUF_TIME32:
+ 		return VIDIOC_PREPARE_BUF;
+-#endif
+ 	}
++#endif
+ 	if (in_compat_syscall())
+ 		return v4l2_compat_translate_cmd(cmd);
+ 
+@@ -3168,8 +3168,8 @@ static int video_get_user(void __user *arg, void *parg,
+ 	} else if (in_compat_syscall()) {
+ 		err = v4l2_compat_get_user(arg, parg, cmd);
+ 	} else {
++#if !defined(CONFIG_64BIT) && defined(CONFIG_COMPAT_32BIT_TIME)
+ 		switch (cmd) {
+-#ifdef CONFIG_COMPAT_32BIT_TIME
+ 		case VIDIOC_QUERYBUF_TIME32:
+ 		case VIDIOC_QBUF_TIME32:
+ 		case VIDIOC_DQBUF_TIME32:
+@@ -3197,8 +3197,8 @@ static int video_get_user(void __user *arg, void *parg,
+ 			};
+ 			break;
+ 		}
+-#endif
+ 		}
++#endif
+ 	}
+ 
+ 	/* zero out anything we don't copy from userspace */
+@@ -3223,8 +3223,8 @@ static int video_put_user(void __user *arg, void *parg,
+ 	if (in_compat_syscall())
+ 		return v4l2_compat_put_user(arg, parg, cmd);
+ 
++#if !defined(CONFIG_64BIT) && defined(CONFIG_COMPAT_32BIT_TIME)
+ 	switch (cmd) {
+-#ifdef CONFIG_COMPAT_32BIT_TIME
+ 	case VIDIOC_DQEVENT_TIME32: {
+ 		struct v4l2_event *ev = parg;
+ 		struct v4l2_event_time32 ev32;
+@@ -3272,8 +3272,8 @@ static int video_put_user(void __user *arg, void *parg,
+ 			return -EFAULT;
+ 		break;
+ 	}
+-#endif
+ 	}
++#endif
+ 
+ 	return 0;
+ }
+diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
+index 956dafab43d49..bf3aa92524584 100644
+--- a/drivers/media/v4l2-core/v4l2-subdev.c
++++ b/drivers/media/v4l2-core/v4l2-subdev.c
+@@ -428,30 +428,6 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
+ 
+ 		return v4l2_event_dequeue(vfh, arg, file->f_flags & O_NONBLOCK);
+ 
+-	case VIDIOC_DQEVENT_TIME32: {
+-		struct v4l2_event_time32 *ev32 = arg;
+-		struct v4l2_event ev = { };
+-
+-		if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
+-			return -ENOIOCTLCMD;
+-
+-		rval = v4l2_event_dequeue(vfh, &ev, file->f_flags & O_NONBLOCK);
+-
+-		*ev32 = (struct v4l2_event_time32) {
+-			.type		= ev.type,
+-			.pending	= ev.pending,
+-			.sequence	= ev.sequence,
+-			.timestamp.tv_sec  = ev.timestamp.tv_sec,
+-			.timestamp.tv_nsec = ev.timestamp.tv_nsec,
+-			.id		= ev.id,
+-		};
+-
+-		memcpy(&ev32->u, &ev.u, sizeof(ev.u));
+-		memcpy(&ev32->reserved, &ev.reserved, sizeof(ev.reserved));
+-
+-		return rval;
+-	}
+-
+ 	case VIDIOC_SUBSCRIBE_EVENT:
+ 		return v4l2_subdev_call(sd, core, subscribe_event, vfh, arg);
+ 
+diff --git a/drivers/memstick/host/rtsx_usb_ms.c b/drivers/memstick/host/rtsx_usb_ms.c
+index 102dbb8080da5..29271ad4728a2 100644
+--- a/drivers/memstick/host/rtsx_usb_ms.c
++++ b/drivers/memstick/host/rtsx_usb_ms.c
+@@ -799,9 +799,9 @@ static int rtsx_usb_ms_drv_probe(struct platform_device *pdev)
+ 
+ 	return 0;
+ err_out:
+-	memstick_free_host(msh);
+ 	pm_runtime_disable(ms_dev(host));
+ 	pm_runtime_put_noidle(ms_dev(host));
++	memstick_free_host(msh);
+ 	return err;
+ }
+ 
+@@ -828,9 +828,6 @@ static int rtsx_usb_ms_drv_remove(struct platform_device *pdev)
+ 	}
+ 	mutex_unlock(&host->host_mutex);
+ 
+-	memstick_remove_host(msh);
+-	memstick_free_host(msh);
+-
+ 	/* Balance possible unbalanced usage count
+ 	 * e.g. unconditional module removal
+ 	 */
+@@ -838,10 +835,11 @@ static int rtsx_usb_ms_drv_remove(struct platform_device *pdev)
+ 		pm_runtime_put(ms_dev(host));
+ 
+ 	pm_runtime_disable(ms_dev(host));
+-	platform_set_drvdata(pdev, NULL);
+-
++	memstick_remove_host(msh);
+ 	dev_dbg(ms_dev(host),
+ 		": Realtek USB Memstick controller has been removed\n");
++	memstick_free_host(msh);
++	platform_set_drvdata(pdev, NULL);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
+index b74efa469e909..8b421b21a2320 100644
+--- a/drivers/mfd/Kconfig
++++ b/drivers/mfd/Kconfig
+@@ -465,6 +465,7 @@ config MFD_MP2629
+ 	tristate "Monolithic Power Systems MP2629 ADC and Battery charger"
+ 	depends on I2C
+ 	select REGMAP_I2C
++	select MFD_CORE
+ 	help
+ 	  Select this option to enable support for Monolithic Power Systems
+ 	  battery charger. This provides ADC, thermal and battery charger power
+diff --git a/drivers/mfd/rn5t618.c b/drivers/mfd/rn5t618.c
+index dc452df1f1bfe..652a5e60067f8 100644
+--- a/drivers/mfd/rn5t618.c
++++ b/drivers/mfd/rn5t618.c
+@@ -104,7 +104,7 @@ static int rn5t618_irq_init(struct rn5t618 *rn5t618)
+ 
+ 	ret = devm_regmap_add_irq_chip(rn5t618->dev, rn5t618->regmap,
+ 				       rn5t618->irq,
+-				       IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
++				       IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ 				       0, irq_chip, &rn5t618->irq_data);
+ 	if (ret)
+ 		dev_err(rn5t618->dev, "Failed to register IRQ chip\n");
+diff --git a/drivers/misc/eeprom/idt_89hpesx.c b/drivers/misc/eeprom/idt_89hpesx.c
+index 81c70e5bc168f..3e4a594c110b3 100644
+--- a/drivers/misc/eeprom/idt_89hpesx.c
++++ b/drivers/misc/eeprom/idt_89hpesx.c
+@@ -1126,11 +1126,10 @@ static void idt_get_fw_data(struct idt_89hpesx_dev *pdev)
+ 
+ 	device_for_each_child_node(dev, fwnode) {
+ 		ee_id = idt_ee_match_id(fwnode);
+-		if (!ee_id) {
+-			dev_warn(dev, "Skip unsupported EEPROM device");
+-			continue;
+-		} else
++		if (ee_id)
+ 			break;
++
++		dev_warn(dev, "Skip unsupported EEPROM device %pfw\n", fwnode);
+ 	}
+ 
+ 	/* If there is no fwnode EEPROM device, then set zero size */
+@@ -1161,6 +1160,7 @@ static void idt_get_fw_data(struct idt_89hpesx_dev *pdev)
+ 	else /* if (!fwnode_property_read_bool(node, "read-only")) */
+ 		pdev->eero = false;
+ 
++	fwnode_handle_put(fwnode);
+ 	dev_info(dev, "EEPROM of %d bytes found by 0x%x",
+ 		pdev->eesize, pdev->eeaddr);
+ }
+diff --git a/drivers/misc/habanalabs/common/habanalabs_drv.c b/drivers/misc/habanalabs/common/habanalabs_drv.c
+index 032d114f01ea5..8271406262445 100644
+--- a/drivers/misc/habanalabs/common/habanalabs_drv.c
++++ b/drivers/misc/habanalabs/common/habanalabs_drv.c
+@@ -437,6 +437,7 @@ static int hl_pci_probe(struct pci_dev *pdev,
+ 	return 0;
+ 
+ disable_device:
++	pci_disable_pcie_error_reporting(pdev);
+ 	pci_set_drvdata(pdev, NULL);
+ 	destroy_hdev(hdev);
+ 
+diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
+index a4c06ef673943..6573ec3792d60 100644
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -1004,6 +1004,12 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
+ 
+ 	switch (mq_rq->drv_op) {
+ 	case MMC_DRV_OP_IOCTL:
++		if (card->ext_csd.cmdq_en) {
++			ret = mmc_cmdq_disable(card);
++			if (ret)
++				break;
++		}
++		fallthrough;
+ 	case MMC_DRV_OP_IOCTL_RPMB:
+ 		idata = mq_rq->drv_op_data;
+ 		for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) {
+@@ -1014,6 +1020,8 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
+ 		/* Always switch back to main area after RPMB access */
+ 		if (rpmb_ioctl)
+ 			mmc_blk_part_switch(card, 0);
++		else if (card->reenable_cmdq && !card->ext_csd.cmdq_en)
++			mmc_cmdq_enable(card);
+ 		break;
+ 	case MMC_DRV_OP_BOOT_WP:
+ 		ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
+diff --git a/drivers/mmc/host/sdhci-of-aspeed.c b/drivers/mmc/host/sdhci-of-aspeed.c
+index 7d8692e909961..b6ac2af199b8d 100644
+--- a/drivers/mmc/host/sdhci-of-aspeed.c
++++ b/drivers/mmc/host/sdhci-of-aspeed.c
+@@ -150,7 +150,7 @@ static int aspeed_sdhci_phase_to_tap(struct device *dev, unsigned long rate_hz,
+ 
+ 	tap = div_u64(phase_period_ps, prop_delay_ps);
+ 	if (tap > ASPEED_SDHCI_NR_TAPS) {
+-		dev_warn(dev,
++		dev_dbg(dev,
+ 			 "Requested out of range phase tap %d for %d degrees of phase compensation at %luHz, clamping to tap %d\n",
+ 			 tap, phase_deg, rate_hz, ASPEED_SDHCI_NR_TAPS);
+ 		tap = ASPEED_SDHCI_NR_TAPS;
+diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
+index 5dc36efff47ff..11e375579cfb9 100644
+--- a/drivers/mmc/host/sdhci-sprd.c
++++ b/drivers/mmc/host/sdhci-sprd.c
+@@ -393,6 +393,7 @@ static void sdhci_sprd_request_done(struct sdhci_host *host,
+ static struct sdhci_ops sdhci_sprd_ops = {
+ 	.read_l = sdhci_sprd_readl,
+ 	.write_l = sdhci_sprd_writel,
++	.write_w = sdhci_sprd_writew,
+ 	.write_b = sdhci_sprd_writeb,
+ 	.set_clock = sdhci_sprd_set_clock,
+ 	.get_max_clock = sdhci_sprd_get_max_clock,
+diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
+index 615f3d008af1e..b9b79b1089a00 100644
+--- a/drivers/mmc/host/usdhi6rol0.c
++++ b/drivers/mmc/host/usdhi6rol0.c
+@@ -1801,6 +1801,7 @@ static int usdhi6_probe(struct platform_device *pdev)
+ 
+ 	version = usdhi6_read(host, USDHI6_VERSION);
+ 	if ((version & 0xfff) != 0xa0d) {
++		ret = -EPERM;
+ 		dev_err(dev, "Version not recognized %x\n", version);
+ 		goto e_clk_off;
+ 	}
+diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
+index 4f4c0813f9fdc..350e67056fa62 100644
+--- a/drivers/mmc/host/via-sdmmc.c
++++ b/drivers/mmc/host/via-sdmmc.c
+@@ -857,6 +857,9 @@ static void via_sdc_data_isr(struct via_crdr_mmc_host *host, u16 intmask)
+ {
+ 	BUG_ON(intmask == 0);
+ 
++	if (!host->data)
++		return;
++
+ 	if (intmask & VIA_CRDR_SDSTS_DT)
+ 		host->data->error = -ETIMEDOUT;
+ 	else if (intmask & (VIA_CRDR_SDSTS_RC | VIA_CRDR_SDSTS_WC))
+diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
+index 739cf63ef6e2f..4950d10d3a191 100644
+--- a/drivers/mmc/host/vub300.c
++++ b/drivers/mmc/host/vub300.c
+@@ -2279,7 +2279,7 @@ static int vub300_probe(struct usb_interface *interface,
+ 	if (retval < 0)
+ 		goto error5;
+ 	retval =
+-		usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0),
++		usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
+ 				SET_ROM_WAIT_STATES,
+ 				USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 				firmware_rom_wait_states, 0x0000, NULL, 0, HZ);
+diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c
+index 549aac00228eb..390f8d719c258 100644
+--- a/drivers/mtd/nand/raw/arasan-nand-controller.c
++++ b/drivers/mtd/nand/raw/arasan-nand-controller.c
+@@ -273,6 +273,37 @@ static int anfc_pkt_len_config(unsigned int len, unsigned int *steps,
+ 	return 0;
+ }
+ 
++static int anfc_select_target(struct nand_chip *chip, int target)
++{
++	struct anand *anand = to_anand(chip);
++	struct arasan_nfc *nfc = to_anfc(chip->controller);
++	int ret;
++
++	/* Update the controller timings and the potential ECC configuration */
++	writel_relaxed(anand->timings, nfc->base + DATA_INTERFACE_REG);
++
++	/* Update clock frequency */
++	if (nfc->cur_clk != anand->clk) {
++		clk_disable_unprepare(nfc->controller_clk);
++		ret = clk_set_rate(nfc->controller_clk, anand->clk);
++		if (ret) {
++			dev_err(nfc->dev, "Failed to change clock rate\n");
++			return ret;
++		}
++
++		ret = clk_prepare_enable(nfc->controller_clk);
++		if (ret) {
++			dev_err(nfc->dev,
++				"Failed to re-enable the controller clock\n");
++			return ret;
++		}
++
++		nfc->cur_clk = anand->clk;
++	}
++
++	return 0;
++}
++
+ /*
+  * When using the embedded hardware ECC engine, the controller is in charge of
+  * feeding the engine with, first, the ECC residue present in the data array.
+@@ -401,6 +432,18 @@ static int anfc_read_page_hw_ecc(struct nand_chip *chip, u8 *buf,
+ 	return 0;
+ }
+ 
++static int anfc_sel_read_page_hw_ecc(struct nand_chip *chip, u8 *buf,
++				     int oob_required, int page)
++{
++	int ret;
++
++	ret = anfc_select_target(chip, chip->cur_cs);
++	if (ret)
++		return ret;
++
++	return anfc_read_page_hw_ecc(chip, buf, oob_required, page);
++};
++
+ static int anfc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
+ 				  int oob_required, int page)
+ {
+@@ -461,6 +504,18 @@ static int anfc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
+ 	return ret;
+ }
+ 
++static int anfc_sel_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
++				      int oob_required, int page)
++{
++	int ret;
++
++	ret = anfc_select_target(chip, chip->cur_cs);
++	if (ret)
++		return ret;
++
++	return anfc_write_page_hw_ecc(chip, buf, oob_required, page);
++};
++
+ /* NAND framework ->exec_op() hooks and related helpers */
+ static int anfc_parse_instructions(struct nand_chip *chip,
+ 				   const struct nand_subop *subop,
+@@ -753,37 +808,6 @@ static const struct nand_op_parser anfc_op_parser = NAND_OP_PARSER(
+ 		NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ 	);
+ 
+-static int anfc_select_target(struct nand_chip *chip, int target)
+-{
+-	struct anand *anand = to_anand(chip);
+-	struct arasan_nfc *nfc = to_anfc(chip->controller);
+-	int ret;
+-
+-	/* Update the controller timings and the potential ECC configuration */
+-	writel_relaxed(anand->timings, nfc->base + DATA_INTERFACE_REG);
+-
+-	/* Update clock frequency */
+-	if (nfc->cur_clk != anand->clk) {
+-		clk_disable_unprepare(nfc->controller_clk);
+-		ret = clk_set_rate(nfc->controller_clk, anand->clk);
+-		if (ret) {
+-			dev_err(nfc->dev, "Failed to change clock rate\n");
+-			return ret;
+-		}
+-
+-		ret = clk_prepare_enable(nfc->controller_clk);
+-		if (ret) {
+-			dev_err(nfc->dev,
+-				"Failed to re-enable the controller clock\n");
+-			return ret;
+-		}
+-
+-		nfc->cur_clk = anand->clk;
+-	}
+-
+-	return 0;
+-}
+-
+ static int anfc_check_op(struct nand_chip *chip,
+ 			 const struct nand_operation *op)
+ {
+@@ -1007,8 +1031,8 @@ static int anfc_init_hw_ecc_controller(struct arasan_nfc *nfc,
+ 	if (!anand->bch)
+ 		return -EINVAL;
+ 
+-	ecc->read_page = anfc_read_page_hw_ecc;
+-	ecc->write_page = anfc_write_page_hw_ecc;
++	ecc->read_page = anfc_sel_read_page_hw_ecc;
++	ecc->write_page = anfc_sel_write_page_hw_ecc;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
+index 79da6b02e2095..f83525a1ab0e6 100644
+--- a/drivers/mtd/nand/raw/marvell_nand.c
++++ b/drivers/mtd/nand/raw/marvell_nand.c
+@@ -3030,8 +3030,10 @@ static int __maybe_unused marvell_nfc_resume(struct device *dev)
+ 		return ret;
+ 
+ 	ret = clk_prepare_enable(nfc->reg_clk);
+-	if (ret < 0)
++	if (ret < 0) {
++		clk_disable_unprepare(nfc->core_clk);
+ 		return ret;
++	}
+ 
+ 	/*
+ 	 * Reset nfc->selected_chip so the next command will cause the timing
+diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
+index 17f63f95f4a28..54ae540bc66b4 100644
+--- a/drivers/mtd/nand/spi/core.c
++++ b/drivers/mtd/nand/spi/core.c
+@@ -290,6 +290,8 @@ static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand,
+ {
+ 	struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv;
+ 	struct spinand_device *spinand = nand_to_spinand(nand);
++	struct mtd_info *mtd = spinand_to_mtd(spinand);
++	int ret;
+ 
+ 	if (req->mode == MTD_OPS_RAW)
+ 		return 0;
+@@ -299,7 +301,13 @@ static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand,
+ 		return 0;
+ 
+ 	/* Finish a page write: check the status, report errors/bitflips */
+-	return spinand_check_ecc_status(spinand, engine_conf->status);
++	ret = spinand_check_ecc_status(spinand, engine_conf->status);
++	if (ret == -EBADMSG)
++		mtd->ecc_stats.failed++;
++	else if (ret > 0)
++		mtd->ecc_stats.corrected += ret;
++
++	return ret;
+ }
+ 
+ static struct nand_ecc_engine_ops spinand_ondie_ecc_engine_ops = {
+@@ -620,13 +628,10 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
+ 		if (ret < 0 && ret != -EBADMSG)
+ 			break;
+ 
+-		if (ret == -EBADMSG) {
++		if (ret == -EBADMSG)
+ 			ecc_failed = true;
+-			mtd->ecc_stats.failed++;
+-		} else {
+-			mtd->ecc_stats.corrected += ret;
++		else
+ 			max_bitflips = max_t(unsigned int, max_bitflips, ret);
+-		}
+ 
+ 		ret = 0;
+ 		ops->retlen += iter.req.datalen;
+diff --git a/drivers/mtd/parsers/qcomsmempart.c b/drivers/mtd/parsers/qcomsmempart.c
+index d9083308f6ba6..06a818cd2433f 100644
+--- a/drivers/mtd/parsers/qcomsmempart.c
++++ b/drivers/mtd/parsers/qcomsmempart.c
+@@ -159,6 +159,15 @@ out_free_parts:
+ 	return ret;
+ }
+ 
++static void parse_qcomsmem_cleanup(const struct mtd_partition *pparts,
++				   int nr_parts)
++{
++	int i;
++
++	for (i = 0; i < nr_parts; i++)
++		kfree(pparts[i].name);
++}
++
+ static const struct of_device_id qcomsmem_of_match_table[] = {
+ 	{ .compatible = "qcom,smem-part" },
+ 	{},
+@@ -167,6 +176,7 @@ MODULE_DEVICE_TABLE(of, qcomsmem_of_match_table);
+ 
+ static struct mtd_part_parser mtd_parser_qcomsmem = {
+ 	.parse_fn = parse_qcomsmem_part,
++	.cleanup = parse_qcomsmem_cleanup,
+ 	.name = "qcomsmem",
+ 	.of_match_table = qcomsmem_of_match_table,
+ };
+diff --git a/drivers/mtd/parsers/redboot.c b/drivers/mtd/parsers/redboot.c
+index 91146bdc47132..3ccd6363ee8cb 100644
+--- a/drivers/mtd/parsers/redboot.c
++++ b/drivers/mtd/parsers/redboot.c
+@@ -45,6 +45,7 @@ static inline int redboot_checksum(struct fis_image_desc *img)
+ static void parse_redboot_of(struct mtd_info *master)
+ {
+ 	struct device_node *np;
++	struct device_node *npart;
+ 	u32 dirblock;
+ 	int ret;
+ 
+@@ -52,7 +53,11 @@ static void parse_redboot_of(struct mtd_info *master)
+ 	if (!np)
+ 		return;
+ 
+-	ret = of_property_read_u32(np, "fis-index-block", &dirblock);
++	npart = of_get_child_by_name(np, "partitions");
++	if (!npart)
++		return;
++
++	ret = of_property_read_u32(npart, "fis-index-block", &dirblock);
+ 	if (ret)
+ 		return;
+ 
+diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
+index 00847cbaf7b62..d08718e98e110 100644
+--- a/drivers/net/can/peak_canfd/peak_canfd.c
++++ b/drivers/net/can/peak_canfd/peak_canfd.c
+@@ -351,8 +351,8 @@ static int pucan_handle_status(struct peak_canfd_priv *priv,
+ 				return err;
+ 		}
+ 
+-		/* start network queue (echo_skb array is empty) */
+-		netif_start_queue(ndev);
++		/* wake network queue up (echo_skb array is empty) */
++		netif_wake_queue(ndev);
+ 
+ 		return 0;
+ 	}
+diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
+index 18f40eb203605..5cd26c1b78ad0 100644
+--- a/drivers/net/can/usb/ems_usb.c
++++ b/drivers/net/can/usb/ems_usb.c
+@@ -1053,7 +1053,6 @@ static void ems_usb_disconnect(struct usb_interface *intf)
+ 
+ 	if (dev) {
+ 		unregister_netdev(dev->netdev);
+-		free_candev(dev->netdev);
+ 
+ 		unlink_all_urbs(dev);
+ 
+@@ -1061,6 +1060,8 @@ static void ems_usb_disconnect(struct usb_interface *intf)
+ 
+ 		kfree(dev->intr_in_buffer);
+ 		kfree(dev->tx_msg_buffer);
++
++		free_candev(dev->netdev);
+ 	}
+ }
+ 
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index e08bf93771400..25363fceb45e8 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -1552,9 +1552,6 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
+ 	struct mv88e6xxx_vtu_entry vlan;
+ 	int i, err;
+ 
+-	if (!vid)
+-		return -EOPNOTSUPP;
+-
+ 	/* DSA and CPU ports have to be members of multiple vlans */
+ 	if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
+ 		return 0;
+@@ -1993,6 +1990,9 @@ static int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
+ 	u8 member;
+ 	int err;
+ 
++	if (!vlan->vid)
++		return 0;
++
+ 	err = mv88e6xxx_port_vlan_prepare(ds, port, vlan);
+ 	if (err)
+ 		return err;
+diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
+index 926544440f02a..42a0fb588f647 100644
+--- a/drivers/net/dsa/sja1105/sja1105_main.c
++++ b/drivers/net/dsa/sja1105/sja1105_main.c
+@@ -1798,6 +1798,12 @@ static int sja1105_reload_cbs(struct sja1105_private *priv)
+ {
+ 	int rc = 0, i;
+ 
++	/* The credit based shapers are only allocated if
++	 * CONFIG_NET_SCH_CBS is enabled.
++	 */
++	if (!priv->cbs)
++		return 0;
++
+ 	for (i = 0; i < priv->info->num_cbs_shapers; i++) {
+ 		struct sja1105_cbs_entry *cbs = &priv->cbs[i];
+ 
+diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
+index 9c5891bbfe61a..f4f50b3a472e1 100644
+--- a/drivers/net/ethernet/aeroflex/greth.c
++++ b/drivers/net/ethernet/aeroflex/greth.c
+@@ -1539,10 +1539,11 @@ static int greth_of_remove(struct platform_device *of_dev)
+ 	mdiobus_unregister(greth->mdio);
+ 
+ 	unregister_netdev(ndev);
+-	free_netdev(ndev);
+ 
+ 	of_iounmap(&of_dev->resource[0], greth->regs, resource_size(&of_dev->resource[0]));
+ 
++	free_netdev(ndev);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.h b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.h
+index f5fba8b8cdea9..a47e2710487ec 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.h
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.h
+@@ -91,7 +91,7 @@ struct aq_macsec_txsc {
+ 	u32 hw_sc_idx;
+ 	unsigned long tx_sa_idx_busy;
+ 	const struct macsec_secy *sw_secy;
+-	u8 tx_sa_key[MACSEC_NUM_AN][MACSEC_KEYID_LEN];
++	u8 tx_sa_key[MACSEC_NUM_AN][MACSEC_MAX_KEY_LEN];
+ 	struct aq_macsec_tx_sc_stats stats;
+ 	struct aq_macsec_tx_sa_stats tx_sa_stats[MACSEC_NUM_AN];
+ };
+@@ -101,7 +101,7 @@ struct aq_macsec_rxsc {
+ 	unsigned long rx_sa_idx_busy;
+ 	const struct macsec_secy *sw_secy;
+ 	const struct macsec_rx_sc *sw_rxsc;
+-	u8 rx_sa_key[MACSEC_NUM_AN][MACSEC_KEYID_LEN];
++	u8 rx_sa_key[MACSEC_NUM_AN][MACSEC_MAX_KEY_LEN];
+ 	struct aq_macsec_rx_sa_stats rx_sa_stats[MACSEC_NUM_AN];
+ };
+ 
+diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c
+index 65981931a7989..a31984cd0fb7b 100644
+--- a/drivers/net/ethernet/broadcom/bcm4908_enet.c
++++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c
+@@ -165,9 +165,6 @@ static int bcm4908_dma_alloc_buf_descs(struct bcm4908_enet *enet,
+ 	if (!ring->slots)
+ 		goto err_free_buf_descs;
+ 
+-	ring->read_idx = 0;
+-	ring->write_idx = 0;
+-
+ 	return 0;
+ 
+ err_free_buf_descs:
+@@ -295,6 +292,9 @@ static void bcm4908_enet_dma_ring_init(struct bcm4908_enet *enet,
+ 
+ 	enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_BASE_DESC_PTR,
+ 		   (uint32_t)ring->dma_addr);
++
++	ring->read_idx = 0;
++	ring->write_idx = 0;
+ }
+ 
+ static void bcm4908_enet_dma_uninit(struct bcm4908_enet *enet)
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+index fcca023f22e54..41f7f078cd27c 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+@@ -4296,3 +4296,4 @@ MODULE_AUTHOR("Broadcom Corporation");
+ MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver");
+ MODULE_ALIAS("platform:bcmgenet");
+ MODULE_LICENSE("GPL");
++MODULE_SOFTDEP("pre: mdio-bcm-unimac");
+diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
+index 701c12c9e0337..649c5c429bd7c 100644
+--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
++++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
+@@ -550,7 +550,7 @@ int be_process_mcc(struct be_adapter *adapter)
+ 	int num = 0, status = 0;
+ 	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
+ 
+-	spin_lock_bh(&adapter->mcc_cq_lock);
++	spin_lock(&adapter->mcc_cq_lock);
+ 
+ 	while ((compl = be_mcc_compl_get(adapter))) {
+ 		if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
+@@ -566,7 +566,7 @@ int be_process_mcc(struct be_adapter *adapter)
+ 	if (num)
+ 		be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
+ 
+-	spin_unlock_bh(&adapter->mcc_cq_lock);
++	spin_unlock(&adapter->mcc_cq_lock);
+ 	return status;
+ }
+ 
+@@ -581,7 +581,9 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
+ 		if (be_check_error(adapter, BE_ERROR_ANY))
+ 			return -EIO;
+ 
++		local_bh_disable();
+ 		status = be_process_mcc(adapter);
++		local_bh_enable();
+ 
+ 		if (atomic_read(&mcc_obj->q.used) == 0)
+ 			break;
+diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
+index 7968568bbe214..361c1c87c1830 100644
+--- a/drivers/net/ethernet/emulex/benet/be_main.c
++++ b/drivers/net/ethernet/emulex/benet/be_main.c
+@@ -5501,7 +5501,9 @@ static void be_worker(struct work_struct *work)
+ 	 * mcc completions
+ 	 */
+ 	if (!netif_running(adapter->netdev)) {
++		local_bh_disable();
+ 		be_process_mcc(adapter);
++		local_bh_enable();
+ 		goto reschedule;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
+index 815fb62c4b02e..3d74401b4f102 100644
+--- a/drivers/net/ethernet/ezchip/nps_enet.c
++++ b/drivers/net/ethernet/ezchip/nps_enet.c
+@@ -610,7 +610,7 @@ static s32 nps_enet_probe(struct platform_device *pdev)
+ 
+ 	/* Get IRQ number */
+ 	priv->irq = platform_get_irq(pdev, 0);
+-	if (!priv->irq) {
++	if (priv->irq < 0) {
+ 		dev_err(dev, "failed to retrieve <irq Rx-Tx> value from device tree\n");
+ 		err = -ENODEV;
+ 		goto out_netdev;
+@@ -645,8 +645,8 @@ static s32 nps_enet_remove(struct platform_device *pdev)
+ 	struct nps_enet_priv *priv = netdev_priv(ndev);
+ 
+ 	unregister_netdev(ndev);
+-	free_netdev(ndev);
+ 	netif_napi_del(&priv->napi);
++	free_netdev(ndev);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
+index 04421aec2dfd6..11dbbfd38770c 100644
+--- a/drivers/net/ethernet/faraday/ftgmac100.c
++++ b/drivers/net/ethernet/faraday/ftgmac100.c
+@@ -1830,14 +1830,17 @@ static int ftgmac100_probe(struct platform_device *pdev)
+ 	if (np && of_get_property(np, "use-ncsi", NULL)) {
+ 		if (!IS_ENABLED(CONFIG_NET_NCSI)) {
+ 			dev_err(&pdev->dev, "NCSI stack not enabled\n");
++			err = -EINVAL;
+ 			goto err_phy_connect;
+ 		}
+ 
+ 		dev_info(&pdev->dev, "Using NCSI interface\n");
+ 		priv->use_ncsi = true;
+ 		priv->ndev = ncsi_register_dev(netdev, ftgmac100_ncsi_handler);
+-		if (!priv->ndev)
++		if (!priv->ndev) {
++			err = -EINVAL;
+ 			goto err_phy_connect;
++		}
+ 	} else if (np && of_get_property(np, "phy-handle", NULL)) {
+ 		struct phy_device *phy;
+ 
+@@ -1856,6 +1859,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
+ 					     &ftgmac100_adjust_link);
+ 		if (!phy) {
+ 			dev_err(&pdev->dev, "Failed to connect to phy\n");
++			err = -EINVAL;
+ 			goto err_phy_connect;
+ 		}
+ 
+diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
+index bbc423e931223..79cefe85a799f 100644
+--- a/drivers/net/ethernet/google/gve/gve_main.c
++++ b/drivers/net/ethernet/google/gve/gve_main.c
+@@ -1295,8 +1295,8 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 
+ 	gve_write_version(&reg_bar->driver_version);
+ 	/* Get max queues to alloc etherdev */
+-	max_rx_queues = ioread32be(&reg_bar->max_tx_queues);
+-	max_tx_queues = ioread32be(&reg_bar->max_rx_queues);
++	max_tx_queues = ioread32be(&reg_bar->max_tx_queues);
++	max_rx_queues = ioread32be(&reg_bar->max_rx_queues);
+ 	/* Alloc and setup the netdev and priv */
+ 	dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues);
+ 	if (!dev) {
+diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
+index c2e7404757869..f630667364253 100644
+--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
++++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
+@@ -2617,10 +2617,8 @@ static int ehea_restart_qps(struct net_device *dev)
+ 	u16 dummy16 = 0;
+ 
+ 	cb0 = (void *)get_zeroed_page(GFP_KERNEL);
+-	if (!cb0) {
+-		ret = -ENOMEM;
+-		goto out;
+-	}
++	if (!cb0)
++		return -ENOMEM;
+ 
+ 	for (i = 0; i < (port->num_def_qps); i++) {
+ 		struct ehea_port_res *pr =  &port->port_res[i];
+@@ -2640,6 +2638,7 @@ static int ehea_restart_qps(struct net_device *dev)
+ 					    cb0);
+ 		if (hret != H_SUCCESS) {
+ 			netdev_err(dev, "query_ehea_qp failed (1)\n");
++			ret = -EFAULT;
+ 			goto out;
+ 		}
+ 
+@@ -2652,6 +2651,7 @@ static int ehea_restart_qps(struct net_device *dev)
+ 					     &dummy64, &dummy16, &dummy16);
+ 		if (hret != H_SUCCESS) {
+ 			netdev_err(dev, "modify_ehea_qp failed (1)\n");
++			ret = -EFAULT;
+ 			goto out;
+ 		}
+ 
+@@ -2660,6 +2660,7 @@ static int ehea_restart_qps(struct net_device *dev)
+ 					    cb0);
+ 		if (hret != H_SUCCESS) {
+ 			netdev_err(dev, "query_ehea_qp failed (2)\n");
++			ret = -EFAULT;
+ 			goto out;
+ 		}
+ 
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index ffb2a91750c7e..3c77897b3f31f 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -106,6 +106,8 @@ static void release_crq_queue(struct ibmvnic_adapter *);
+ static int __ibmvnic_set_mac(struct net_device *, u8 *);
+ static int init_crq_queue(struct ibmvnic_adapter *adapter);
+ static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
++static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
++					 struct ibmvnic_sub_crq_queue *tx_scrq);
+ 
+ struct ibmvnic_stat {
+ 	char name[ETH_GSTRING_LEN];
+@@ -209,12 +211,11 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
+ 	mutex_lock(&adapter->fw_lock);
+ 	adapter->fw_done_rc = 0;
+ 	reinit_completion(&adapter->fw_done);
+-	rc = send_request_map(adapter, ltb->addr,
+-			      ltb->size, ltb->map_id);
++
++	rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
+ 	if (rc) {
+-		dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
+-		mutex_unlock(&adapter->fw_lock);
+-		return rc;
++		dev_err(dev, "send_request_map failed, rc = %d\n", rc);
++		goto out;
+ 	}
+ 
+ 	rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
+@@ -222,20 +223,23 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
+ 		dev_err(dev,
+ 			"Long term map request aborted or timed out,rc = %d\n",
+ 			rc);
+-		dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
+-		mutex_unlock(&adapter->fw_lock);
+-		return rc;
++		goto out;
+ 	}
+ 
+ 	if (adapter->fw_done_rc) {
+ 		dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
+ 			adapter->fw_done_rc);
++		rc = -1;
++		goto out;
++	}
++	rc = 0;
++out:
++	if (rc) {
+ 		dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
+-		mutex_unlock(&adapter->fw_lock);
+-		return -1;
++		ltb->buff = NULL;
+ 	}
+ 	mutex_unlock(&adapter->fw_lock);
+-	return 0;
++	return rc;
+ }
+ 
+ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
+@@ -255,14 +259,44 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
+ 	    adapter->reset_reason != VNIC_RESET_TIMEOUT)
+ 		send_request_unmap(adapter, ltb->map_id);
+ 	dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
++	ltb->buff = NULL;
++	ltb->map_id = 0;
+ }
+ 
+-static int reset_long_term_buff(struct ibmvnic_long_term_buff *ltb)
++static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
++				struct ibmvnic_long_term_buff *ltb)
+ {
+-	if (!ltb->buff)
+-		return -EINVAL;
++	struct device *dev = &adapter->vdev->dev;
++	int rc;
+ 
+ 	memset(ltb->buff, 0, ltb->size);
++
++	mutex_lock(&adapter->fw_lock);
++	adapter->fw_done_rc = 0;
++
++	reinit_completion(&adapter->fw_done);
++	rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
++	if (rc) {
++		mutex_unlock(&adapter->fw_lock);
++		return rc;
++	}
++
++	rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
++	if (rc) {
++		dev_info(dev,
++			 "Reset failed, long term map request timed out or aborted\n");
++		mutex_unlock(&adapter->fw_lock);
++		return rc;
++	}
++
++	if (adapter->fw_done_rc) {
++		dev_info(dev,
++			 "Reset failed, attempting to free and reallocate buffer\n");
++		free_long_term_buff(adapter, ltb);
++		mutex_unlock(&adapter->fw_lock);
++		return alloc_long_term_buff(adapter, ltb, ltb->size);
++	}
++	mutex_unlock(&adapter->fw_lock);
+ 	return 0;
+ }
+ 
+@@ -298,7 +332,14 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
+ 
+ 	rx_scrq = adapter->rx_scrq[pool->index];
+ 	ind_bufp = &rx_scrq->ind_buf;
+-	for (i = 0; i < count; ++i) {
++
++	/* netdev_skb_alloc() could have failed after we saved a few skbs
++	 * in the indir_buf and we would not have sent them to VIOS yet.
++	 * To account for them, start the loop at ind_bufp->index rather
++	 * than 0. If we pushed all the skbs to VIOS, ind_bufp->index will
++	 * be 0.
++	 */
++	for (i = ind_bufp->index; i < count; ++i) {
+ 		skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
+ 		if (!skb) {
+ 			dev_err(dev, "Couldn't replenish rx buff\n");
+@@ -484,7 +525,8 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
+ 						  rx_pool->size *
+ 						  rx_pool->buff_size);
+ 		} else {
+-			rc = reset_long_term_buff(&rx_pool->long_term_buff);
++			rc = reset_long_term_buff(adapter,
++						  &rx_pool->long_term_buff);
+ 		}
+ 
+ 		if (rc)
+@@ -607,11 +649,12 @@ static int init_rx_pools(struct net_device *netdev)
+ 	return 0;
+ }
+ 
+-static int reset_one_tx_pool(struct ibmvnic_tx_pool *tx_pool)
++static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
++			     struct ibmvnic_tx_pool *tx_pool)
+ {
+ 	int rc, i;
+ 
+-	rc = reset_long_term_buff(&tx_pool->long_term_buff);
++	rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
+ 	if (rc)
+ 		return rc;
+ 
+@@ -638,10 +681,11 @@ static int reset_tx_pools(struct ibmvnic_adapter *adapter)
+ 
+ 	tx_scrqs = adapter->num_active_tx_pools;
+ 	for (i = 0; i < tx_scrqs; i++) {
+-		rc = reset_one_tx_pool(&adapter->tso_pool[i]);
++		ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]);
++		rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
+ 		if (rc)
+ 			return rc;
+-		rc = reset_one_tx_pool(&adapter->tx_pool[i]);
++		rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
+ 		if (rc)
+ 			return rc;
+ 	}
+@@ -734,8 +778,11 @@ static int init_tx_pools(struct net_device *netdev)
+ 
+ 	adapter->tso_pool = kcalloc(tx_subcrqs,
+ 				    sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
+-	if (!adapter->tso_pool)
++	if (!adapter->tso_pool) {
++		kfree(adapter->tx_pool);
++		adapter->tx_pool = NULL;
+ 		return -1;
++	}
+ 
+ 	adapter->num_active_tx_pools = tx_subcrqs;
+ 
+@@ -1156,6 +1203,11 @@ static int __ibmvnic_open(struct net_device *netdev)
+ 
+ 	netif_tx_start_all_queues(netdev);
+ 
++	if (prev_state == VNIC_CLOSED) {
++		for (i = 0; i < adapter->req_rx_queues; i++)
++			napi_schedule(&adapter->napi[i]);
++	}
++
+ 	adapter->state = VNIC_OPEN;
+ 	return rc;
+ }
+@@ -1557,7 +1609,8 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
+ 	ind_bufp->index = 0;
+ 	if (atomic_sub_return(entries, &tx_scrq->used) <=
+ 	    (adapter->req_tx_entries_per_subcrq / 2) &&
+-	    __netif_subqueue_stopped(adapter->netdev, queue_num)) {
++	    __netif_subqueue_stopped(adapter->netdev, queue_num) &&
++	    !test_bit(0, &adapter->resetting)) {
+ 		netif_wake_subqueue(adapter->netdev, queue_num);
+ 		netdev_dbg(adapter->netdev, "Started queue %d\n",
+ 			   queue_num);
+@@ -1650,7 +1703,6 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
+ 		tx_send_failed++;
+ 		tx_dropped++;
+ 		ret = NETDEV_TX_OK;
+-		ibmvnic_tx_scrq_flush(adapter, tx_scrq);
+ 		goto out;
+ 	}
+ 
+@@ -3088,6 +3140,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
+ 
+ 			netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
+ 				   i);
++			ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]);
+ 			if (adapter->tx_scrq[i]->irq) {
+ 				free_irq(adapter->tx_scrq[i]->irq,
+ 					 adapter->tx_scrq[i]);
+diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
+index a0948002ddf85..b3ad95ac3d859 100644
+--- a/drivers/net/ethernet/intel/e1000e/netdev.c
++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
+@@ -5222,18 +5222,20 @@ static void e1000_watchdog_task(struct work_struct *work)
+ 			pm_runtime_resume(netdev->dev.parent);
+ 
+ 			/* Checking if MAC is in DMoff state*/
+-			pcim_state = er32(STATUS);
+-			while (pcim_state & E1000_STATUS_PCIM_STATE) {
+-				if (tries++ == dmoff_exit_timeout) {
+-					e_dbg("Error in exiting dmoff\n");
+-					break;
+-				}
+-				usleep_range(10000, 20000);
++			if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
+ 				pcim_state = er32(STATUS);
+-
+-				/* Checking if MAC exited DMoff state */
+-				if (!(pcim_state & E1000_STATUS_PCIM_STATE))
+-					e1000_phy_hw_reset(&adapter->hw);
++				while (pcim_state & E1000_STATUS_PCIM_STATE) {
++					if (tries++ == dmoff_exit_timeout) {
++						e_dbg("Error in exiting dmoff\n");
++						break;
++					}
++					usleep_range(10000, 20000);
++					pcim_state = er32(STATUS);
++
++					/* Checking if MAC exited DMoff state */
++					if (!(pcim_state & E1000_STATUS_PCIM_STATE))
++						e1000_phy_hw_reset(&adapter->hw);
++				}
+ 			}
+ 
+ 			/* update snapshot of PHY registers on LSC */
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+index 93dd58fda272f..d558364e3a9fb 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+@@ -1262,8 +1262,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
+ 			if (ethtool_link_ksettings_test_link_mode(&safe_ks,
+ 								  supported,
+ 								  Autoneg) &&
+-			    hw->phy.link_info.phy_type !=
+-			    I40E_PHY_TYPE_10GBASE_T) {
++			    hw->phy.media_type != I40E_MEDIA_TYPE_BASET) {
+ 				netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
+ 				err = -EINVAL;
+ 				goto done;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index ac4b44fc19f17..d5106a6afb453 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -31,7 +31,7 @@ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
+ static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
+ static int i40e_add_vsi(struct i40e_vsi *vsi);
+ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
+-static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
++static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired);
+ static int i40e_setup_misc_vector(struct i40e_pf *pf);
+ static void i40e_determine_queue_usage(struct i40e_pf *pf);
+ static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
+@@ -8702,6 +8702,8 @@ int i40e_vsi_open(struct i40e_vsi *vsi)
+ 			 dev_driver_string(&pf->pdev->dev),
+ 			 dev_name(&pf->pdev->dev));
+ 		err = i40e_vsi_request_irq(vsi, int_name);
++		if (err)
++			goto err_setup_rx;
+ 
+ 	} else {
+ 		err = -EINVAL;
+@@ -10568,7 +10570,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
+ #endif /* CONFIG_I40E_DCB */
+ 	if (!lock_acquired)
+ 		rtnl_lock();
+-	ret = i40e_setup_pf_switch(pf, reinit);
++	ret = i40e_setup_pf_switch(pf, reinit, true);
+ 	if (ret)
+ 		goto end_unlock;
+ 
+@@ -14621,10 +14623,11 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
+  * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
+  * @pf: board private structure
+  * @reinit: if the Main VSI needs to re-initialized.
++ * @lock_acquired: indicates whether or not the lock has been acquired
+  *
+  * Returns 0 on success, negative value on failure
+  **/
+-static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
++static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired)
+ {
+ 	u16 flags = 0;
+ 	int ret;
+@@ -14726,9 +14729,15 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
+ 
+ 	i40e_ptp_init(pf);
+ 
++	if (!lock_acquired)
++		rtnl_lock();
++
+ 	/* repopulate tunnel port filters */
+ 	udp_tunnel_nic_reset_ntf(pf->vsi[pf->lan_vsi]->netdev);
+ 
++	if (!lock_acquired)
++		rtnl_unlock();
++
+ 	return ret;
+ }
+ 
+@@ -15509,7 +15518,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 			pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+ 	}
+ #endif
+-	err = i40e_setup_pf_switch(pf, false);
++	err = i40e_setup_pf_switch(pf, false, false);
+ 	if (err) {
+ 		dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
+ 		goto err_vsis;
+diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+index 6c81e4f175ac6..bf06f2d785db6 100644
+--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+@@ -7589,6 +7589,8 @@ static int mvpp2_probe(struct platform_device *pdev)
+ 	return 0;
+ 
+ err_port_probe:
++	fwnode_handle_put(port_fwnode);
++
+ 	i = 0;
+ 	fwnode_for_each_available_child_node(fwnode, port_fwnode) {
+ 		if (priv->port_list[i])
+diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
+index 3712e1786091f..406fdfe968bfb 100644
+--- a/drivers/net/ethernet/marvell/pxa168_eth.c
++++ b/drivers/net/ethernet/marvell/pxa168_eth.c
+@@ -1533,6 +1533,7 @@ static int pxa168_eth_remove(struct platform_device *pdev)
+ 	struct net_device *dev = platform_get_drvdata(pdev);
+ 	struct pxa168_eth_private *pep = netdev_priv(dev);
+ 
++	cancel_work_sync(&pep->tx_timeout_task);
+ 	if (pep->htpr) {
+ 		dma_free_coherent(pep->dev->dev.parent, HASH_ADDR_TABLE_SIZE,
+ 				  pep->htpr, pep->htpr_dma);
+@@ -1544,7 +1545,6 @@ static int pxa168_eth_remove(struct platform_device *pdev)
+ 	clk_disable_unprepare(pep->clk);
+ 	mdiobus_unregister(pep->smi_bus);
+ 	mdiobus_free(pep->smi_bus);
+-	cancel_work_sync(&pep->tx_timeout_task);
+ 	unregister_netdev(dev);
+ 	free_netdev(dev);
+ 	return 0;
+diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+index 140cee7c459d0..1b32a43f70242 100644
+--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
++++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+@@ -2531,9 +2531,13 @@ static int pch_gbe_probe(struct pci_dev *pdev,
+ 	adapter->pdev = pdev;
+ 	adapter->hw.back = adapter;
+ 	adapter->hw.reg = pcim_iomap_table(pdev)[PCH_GBE_PCI_BAR];
++
+ 	adapter->pdata = (struct pch_gbe_privdata *)pci_id->driver_data;
+-	if (adapter->pdata && adapter->pdata->platform_init)
+-		adapter->pdata->platform_init(pdev);
++	if (adapter->pdata && adapter->pdata->platform_init) {
++		ret = adapter->pdata->platform_init(pdev);
++		if (ret)
++			goto err_free_netdev;
++	}
+ 
+ 	adapter->ptp_pdev =
+ 		pci_get_domain_bus_and_slot(pci_domain_nr(adapter->pdev->bus),
+@@ -2628,7 +2632,7 @@ err_free_netdev:
+  */
+ static int pch_gbe_minnow_platform_init(struct pci_dev *pdev)
+ {
+-	unsigned long flags = GPIOF_DIR_OUT | GPIOF_INIT_HIGH | GPIOF_EXPORT;
++	unsigned long flags = GPIOF_OUT_INIT_HIGH;
+ 	unsigned gpio = MINNOW_PHY_RESET_GPIO;
+ 	int ret;
+ 
+diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+index 638d7b03be4bf..a98182b2d19bb 100644
+--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+@@ -1506,12 +1506,12 @@ static void am65_cpsw_nuss_free_tx_chns(void *data)
+ 	for (i = 0; i < common->tx_ch_num; i++) {
+ 		struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
+ 
+-		if (!IS_ERR_OR_NULL(tx_chn->tx_chn))
+-			k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
+-
+ 		if (!IS_ERR_OR_NULL(tx_chn->desc_pool))
+ 			k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
+ 
++		if (!IS_ERR_OR_NULL(tx_chn->tx_chn))
++			k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
++
+ 		memset(tx_chn, 0, sizeof(*tx_chn));
+ 	}
+ }
+@@ -1531,12 +1531,12 @@ void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
+ 
+ 		netif_napi_del(&tx_chn->napi_tx);
+ 
+-		if (!IS_ERR_OR_NULL(tx_chn->tx_chn))
+-			k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
+-
+ 		if (!IS_ERR_OR_NULL(tx_chn->desc_pool))
+ 			k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
+ 
++		if (!IS_ERR_OR_NULL(tx_chn->tx_chn))
++			k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
++
+ 		memset(tx_chn, 0, sizeof(*tx_chn));
+ 	}
+ }
+@@ -1624,11 +1624,11 @@ static void am65_cpsw_nuss_free_rx_chns(void *data)
+ 
+ 	rx_chn = &common->rx_chns;
+ 
+-	if (!IS_ERR_OR_NULL(rx_chn->rx_chn))
+-		k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
+-
+ 	if (!IS_ERR_OR_NULL(rx_chn->desc_pool))
+ 		k3_cppi_desc_pool_destroy(rx_chn->desc_pool);
++
++	if (!IS_ERR_OR_NULL(rx_chn->rx_chn))
++		k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
+ }
+ 
+ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
+diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
+index c0bf7d78276e4..626e1ce817fcf 100644
+--- a/drivers/net/ieee802154/mac802154_hwsim.c
++++ b/drivers/net/ieee802154/mac802154_hwsim.c
+@@ -480,7 +480,7 @@ static int hwsim_del_edge_nl(struct sk_buff *msg, struct genl_info *info)
+ 	struct hwsim_edge *e;
+ 	u32 v0, v1;
+ 
+-	if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] &&
++	if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] ||
+ 	    !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
+ 		return -EINVAL;
+ 
+@@ -715,6 +715,8 @@ static int hwsim_subscribe_all_others(struct hwsim_phy *phy)
+ 
+ 	return 0;
+ 
++sub_fail:
++	hwsim_edge_unsubscribe_me(phy);
+ me_fail:
+ 	rcu_read_lock();
+ 	list_for_each_entry_rcu(e, &phy->edges, list) {
+@@ -722,8 +724,6 @@ me_fail:
+ 		hwsim_free_edge(e);
+ 	}
+ 	rcu_read_unlock();
+-sub_fail:
+-	hwsim_edge_unsubscribe_me(phy);
+ 	return -ENOMEM;
+ }
+ 
+@@ -824,12 +824,17 @@ err_pib:
+ static void hwsim_del(struct hwsim_phy *phy)
+ {
+ 	struct hwsim_pib *pib;
++	struct hwsim_edge *e;
+ 
+ 	hwsim_edge_unsubscribe_me(phy);
+ 
+ 	list_del(&phy->list);
+ 
+ 	rcu_read_lock();
++	list_for_each_entry_rcu(e, &phy->edges, list) {
++		list_del_rcu(&e->list);
++		hwsim_free_edge(e);
++	}
+ 	pib = rcu_dereference(phy->pib);
+ 	rcu_read_unlock();
+ 
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 92425e1fd70c0..93dc48b9b4f24 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -1819,7 +1819,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
+ 		ctx.sa.rx_sa = rx_sa;
+ 		ctx.secy = secy;
+ 		memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
+-		       MACSEC_KEYID_LEN);
++		       secy->key_len);
+ 
+ 		err = macsec_offload(ops->mdo_add_rxsa, &ctx);
+ 		if (err)
+@@ -2061,7 +2061,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
+ 		ctx.sa.tx_sa = tx_sa;
+ 		ctx.secy = secy;
+ 		memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
+-		       MACSEC_KEYID_LEN);
++		       secy->key_len);
+ 
+ 		err = macsec_offload(ops->mdo_add_txsa, &ctx);
+ 		if (err)
+diff --git a/drivers/net/phy/mscc/mscc_macsec.c b/drivers/net/phy/mscc/mscc_macsec.c
+index 10be266e48e8b..b7b2521c73fb6 100644
+--- a/drivers/net/phy/mscc/mscc_macsec.c
++++ b/drivers/net/phy/mscc/mscc_macsec.c
+@@ -501,7 +501,7 @@ static u32 vsc8584_macsec_flow_context_id(struct macsec_flow *flow)
+ }
+ 
+ /* Derive the AES key to get a key for the hash autentication */
+-static int vsc8584_macsec_derive_key(const u8 key[MACSEC_KEYID_LEN],
++static int vsc8584_macsec_derive_key(const u8 key[MACSEC_MAX_KEY_LEN],
+ 				     u16 key_len, u8 hkey[16])
+ {
+ 	const u8 input[AES_BLOCK_SIZE] = {0};
+diff --git a/drivers/net/phy/mscc/mscc_macsec.h b/drivers/net/phy/mscc/mscc_macsec.h
+index 9c6d25e36de2a..453304bae7784 100644
+--- a/drivers/net/phy/mscc/mscc_macsec.h
++++ b/drivers/net/phy/mscc/mscc_macsec.h
+@@ -81,7 +81,7 @@ struct macsec_flow {
+ 	/* Highest takes precedence [0..15] */
+ 	u8 priority;
+ 
+-	u8 key[MACSEC_KEYID_LEN];
++	u8 key[MACSEC_MAX_KEY_LEN];
+ 
+ 	union {
+ 		struct macsec_rx_sa *rx_sa;
+diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
+index 28a6c4cfe9b8c..414afcb0a23f8 100644
+--- a/drivers/net/vrf.c
++++ b/drivers/net/vrf.c
+@@ -1366,22 +1366,22 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
+ 	int orig_iif = skb->skb_iif;
+ 	bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
+ 	bool is_ndisc = ipv6_ndisc_frame(skb);
+-	bool is_ll_src;
+ 
+ 	/* loopback, multicast & non-ND link-local traffic; do not push through
+ 	 * packet taps again. Reset pkt_type for upper layers to process skb.
+-	 * for packets with lladdr src, however, skip so that the dst can be
+-	 * determine at input using original ifindex in the case that daddr
+-	 * needs strict
++	 * For strict packets with a source LLA, determine the dst using the
++	 * original ifindex.
+ 	 */
+-	is_ll_src = ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL;
+-	if (skb->pkt_type == PACKET_LOOPBACK ||
+-	    (need_strict && !is_ndisc && !is_ll_src)) {
++	if (skb->pkt_type == PACKET_LOOPBACK || (need_strict && !is_ndisc)) {
+ 		skb->dev = vrf_dev;
+ 		skb->skb_iif = vrf_dev->ifindex;
+ 		IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
++
+ 		if (skb->pkt_type == PACKET_LOOPBACK)
+ 			skb->pkt_type = PACKET_HOST;
++		else if (ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)
++			vrf_ip6_input_dst(skb, vrf_dev, orig_iif);
++
+ 		goto out;
+ 	}
+ 
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index 53dbc67e8a34f..a3ec03ce3343a 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -2164,6 +2164,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
+ 	struct neighbour *n;
+ 	struct nd_msg *msg;
+ 
++	rcu_read_lock();
+ 	in6_dev = __in6_dev_get(dev);
+ 	if (!in6_dev)
+ 		goto out;
+@@ -2215,6 +2216,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
+ 	}
+ 
+ out:
++	rcu_read_unlock();
+ 	consume_skb(skb);
+ 	return NETDEV_TX_OK;
+ }
+diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
+index bb6c5ee43ac0c..def52df829d48 100644
+--- a/drivers/net/wireless/ath/ath10k/mac.c
++++ b/drivers/net/wireless/ath/ath10k/mac.c
+@@ -5590,6 +5590,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
+ 
+ 	if (arvif->nohwcrypt &&
+ 	    !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
++		ret = -EINVAL;
+ 		ath10k_warn(ar, "cryptmode module param needed for sw crypto\n");
+ 		goto err;
+ 	}
+diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
+index e7fde635e0eef..71878ab35b93c 100644
+--- a/drivers/net/wireless/ath/ath10k/pci.c
++++ b/drivers/net/wireless/ath/ath10k/pci.c
+@@ -3685,8 +3685,10 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
+ 			ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
+ 		if (bus_params.chip_id != 0xffffffff) {
+ 			if (!ath10k_pci_chip_is_supported(pdev->device,
+-							  bus_params.chip_id))
++							  bus_params.chip_id)) {
++				ret = -ENODEV;
+ 				goto err_unsupported;
++			}
+ 		}
+ 	}
+ 
+@@ -3697,11 +3699,15 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
+ 	}
+ 
+ 	bus_params.chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
+-	if (bus_params.chip_id == 0xffffffff)
++	if (bus_params.chip_id == 0xffffffff) {
++		ret = -ENODEV;
+ 		goto err_unsupported;
++	}
+ 
+-	if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id))
+-		goto err_free_irq;
++	if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id)) {
++		ret = -ENODEV;
++		goto err_unsupported;
++	}
+ 
+ 	ret = ath10k_core_register(ar, &bus_params);
+ 	if (ret) {
+diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
+index 350b7913622cb..b55b6289eeb1a 100644
+--- a/drivers/net/wireless/ath/ath11k/core.c
++++ b/drivers/net/wireless/ath/ath11k/core.c
+@@ -445,7 +445,8 @@ static int ath11k_core_fetch_board_data_api_n(struct ath11k_base *ab,
+ 		if (len < ALIGN(ie_len, 4)) {
+ 			ath11k_err(ab, "invalid length for board ie_id %d ie_len %zu len %zu\n",
+ 				   ie_id, ie_len, len);
+-			return -EINVAL;
++			ret = -EINVAL;
++			goto err;
+ 		}
+ 
+ 		switch (ie_id) {
+diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
+index 7ad0383affcba..a0e7bc6dd8c78 100644
+--- a/drivers/net/wireless/ath/ath11k/mac.c
++++ b/drivers/net/wireless/ath/ath11k/mac.c
+@@ -5311,11 +5311,6 @@ ath11k_mac_update_vif_chan(struct ath11k *ar,
+ 		if (WARN_ON(!arvif->is_up))
+ 			continue;
+ 
+-		ret = ath11k_mac_setup_bcn_tmpl(arvif);
+-		if (ret)
+-			ath11k_warn(ab, "failed to update bcn tmpl during csa: %d\n",
+-				    ret);
+-
+ 		ret = ath11k_mac_vdev_restart(arvif, &vifs[i].new_ctx->def);
+ 		if (ret) {
+ 			ath11k_warn(ab, "failed to restart vdev %d: %d\n",
+@@ -5323,6 +5318,11 @@ ath11k_mac_update_vif_chan(struct ath11k *ar,
+ 			continue;
+ 		}
+ 
++		ret = ath11k_mac_setup_bcn_tmpl(arvif);
++		if (ret)
++			ath11k_warn(ab, "failed to update bcn tmpl during csa: %d\n",
++				    ret);
++
+ 		ret = ath11k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
+ 					 arvif->bssid);
+ 		if (ret) {
+diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
+index 45f6402478b50..97c3a53f9cef2 100644
+--- a/drivers/net/wireless/ath/ath9k/main.c
++++ b/drivers/net/wireless/ath/ath9k/main.c
+@@ -307,6 +307,11 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
+ 		hchan = ah->curchan;
+ 	}
+ 
++	if (!hchan) {
++		fastcc = false;
++		hchan = ath9k_cmn_get_channel(sc->hw, ah, &sc->cur_chan->chandef);
++	}
++
+ 	if (!ath_prepare_reset(sc))
+ 		fastcc = false;
+ 
+diff --git a/drivers/net/wireless/ath/carl9170/Kconfig b/drivers/net/wireless/ath/carl9170/Kconfig
+index b2d760873992f..ba9bea79381c5 100644
+--- a/drivers/net/wireless/ath/carl9170/Kconfig
++++ b/drivers/net/wireless/ath/carl9170/Kconfig
+@@ -16,13 +16,11 @@ config CARL9170
+ 
+ config CARL9170_LEDS
+ 	bool "SoftLED Support"
+-	depends on CARL9170
+-	select MAC80211_LEDS
+-	select LEDS_CLASS
+-	select NEW_LEDS
+ 	default y
++	depends on CARL9170
++	depends on MAC80211_LEDS
+ 	help
+-	  This option is necessary, if you want your device' LEDs to blink
++	  This option is necessary, if you want your device's LEDs to blink.
+ 
+ 	  Say Y, unless you need the LEDs for firmware debugging.
+ 
+diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
+index afb4877eaad8f..dabed4e3ca457 100644
+--- a/drivers/net/wireless/ath/wcn36xx/main.c
++++ b/drivers/net/wireless/ath/wcn36xx/main.c
+@@ -293,23 +293,16 @@ static int wcn36xx_start(struct ieee80211_hw *hw)
+ 		goto out_free_dxe_pool;
+ 	}
+ 
+-	wcn->hal_buf = kmalloc(WCN36XX_HAL_BUF_SIZE, GFP_KERNEL);
+-	if (!wcn->hal_buf) {
+-		wcn36xx_err("Failed to allocate smd buf\n");
+-		ret = -ENOMEM;
+-		goto out_free_dxe_ctl;
+-	}
+-
+ 	ret = wcn36xx_smd_load_nv(wcn);
+ 	if (ret) {
+ 		wcn36xx_err("Failed to push NV to chip\n");
+-		goto out_free_smd_buf;
++		goto out_free_dxe_ctl;
+ 	}
+ 
+ 	ret = wcn36xx_smd_start(wcn);
+ 	if (ret) {
+ 		wcn36xx_err("Failed to start chip\n");
+-		goto out_free_smd_buf;
++		goto out_free_dxe_ctl;
+ 	}
+ 
+ 	if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
+@@ -336,8 +329,6 @@ static int wcn36xx_start(struct ieee80211_hw *hw)
+ 
+ out_smd_stop:
+ 	wcn36xx_smd_stop(wcn);
+-out_free_smd_buf:
+-	kfree(wcn->hal_buf);
+ out_free_dxe_ctl:
+ 	wcn36xx_dxe_free_ctl_blks(wcn);
+ out_free_dxe_pool:
+@@ -372,8 +363,6 @@ static void wcn36xx_stop(struct ieee80211_hw *hw)
+ 
+ 	wcn36xx_dxe_free_mem_pools(wcn);
+ 	wcn36xx_dxe_free_ctl_blks(wcn);
+-
+-	kfree(wcn->hal_buf);
+ }
+ 
+ static void wcn36xx_change_ps(struct wcn36xx *wcn, bool enable)
+@@ -1401,6 +1390,12 @@ static int wcn36xx_probe(struct platform_device *pdev)
+ 	mutex_init(&wcn->hal_mutex);
+ 	mutex_init(&wcn->scan_lock);
+ 
++	wcn->hal_buf = devm_kmalloc(wcn->dev, WCN36XX_HAL_BUF_SIZE, GFP_KERNEL);
++	if (!wcn->hal_buf) {
++		ret = -ENOMEM;
++		goto out_wq;
++	}
++
+ 	ret = dma_set_mask_and_coherent(wcn->dev, DMA_BIT_MASK(32));
+ 	if (ret < 0) {
+ 		wcn36xx_err("failed to set DMA mask: %d\n", ret);
+diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
+index 6746fd206d2a9..1ff2679963f06 100644
+--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
++++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
+@@ -2842,9 +2842,7 @@ void wil_p2p_wdev_free(struct wil6210_priv *wil)
+ 	wil->radio_wdev = wil->main_ndev->ieee80211_ptr;
+ 	mutex_unlock(&wil->vif_mutex);
+ 	if (p2p_wdev) {
+-		wiphy_lock(wil->wiphy);
+ 		cfg80211_unregister_wdev(p2p_wdev);
+-		wiphy_unlock(wil->wiphy);
+ 		kfree(p2p_wdev);
+ 	}
+ }
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+index f4405d7861b69..d8822a01d277e 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+@@ -2767,8 +2767,9 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
+ 	struct brcmf_sta_info_le sta_info_le;
+ 	u32 sta_flags;
+ 	u32 is_tdls_peer;
+-	s32 total_rssi;
+-	s32 count_rssi;
++	s32 total_rssi_avg = 0;
++	s32 total_rssi = 0;
++	s32 count_rssi = 0;
+ 	int rssi;
+ 	u32 i;
+ 
+@@ -2834,25 +2835,27 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
+ 			sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES);
+ 			sinfo->rx_bytes = le64_to_cpu(sta_info_le.rx_tot_bytes);
+ 		}
+-		total_rssi = 0;
+-		count_rssi = 0;
+ 		for (i = 0; i < BRCMF_ANT_MAX; i++) {
+-			if (sta_info_le.rssi[i]) {
+-				sinfo->chain_signal_avg[count_rssi] =
+-					sta_info_le.rssi[i];
+-				sinfo->chain_signal[count_rssi] =
+-					sta_info_le.rssi[i];
+-				total_rssi += sta_info_le.rssi[i];
+-				count_rssi++;
+-			}
++			if (sta_info_le.rssi[i] == 0 ||
++			    sta_info_le.rx_lastpkt_rssi[i] == 0)
++				continue;
++			sinfo->chains |= BIT(count_rssi);
++			sinfo->chain_signal[count_rssi] =
++				sta_info_le.rx_lastpkt_rssi[i];
++			sinfo->chain_signal_avg[count_rssi] =
++				sta_info_le.rssi[i];
++			total_rssi += sta_info_le.rx_lastpkt_rssi[i];
++			total_rssi_avg += sta_info_le.rssi[i];
++			count_rssi++;
+ 		}
+ 		if (count_rssi) {
+-			sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL);
+-			sinfo->chains = count_rssi;
+-
+ 			sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
+-			total_rssi /= count_rssi;
+-			sinfo->signal = total_rssi;
++			sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
++			sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL);
++			sinfo->filled |=
++				BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG);
++			sinfo->signal = total_rssi / count_rssi;
++			sinfo->signal_avg = total_rssi_avg / count_rssi;
+ 		} else if (test_bit(BRCMF_VIF_STATUS_CONNECTED,
+ 			&ifp->vif->sme_state)) {
+ 			memset(&scb_val, 0, sizeof(scb_val));
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+index 16ed325795a8b..faf5f8e5eee33 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+@@ -626,8 +626,8 @@ BRCMF_FW_DEF(4373, "brcmfmac4373-sdio");
+ BRCMF_FW_DEF(43012, "brcmfmac43012-sdio");
+ 
+ /* firmware config files */
+-MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcm/brcmfmac*-sdio.*.txt");
+-MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcm/brcmfmac*-pcie.*.txt");
++MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-sdio.*.txt");
++MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.*.txt");
+ 
+ static const struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = {
+ 	BRCMF_FW_ENTRY(BRCM_CC_43143_CHIP_ID, 0xFFFFFFFF, 43143),
+@@ -4162,7 +4162,6 @@ static int brcmf_sdio_bus_reset(struct device *dev)
+ 	if (ret) {
+ 		brcmf_err("Failed to probe after sdio device reset: ret %d\n",
+ 			  ret);
+-		brcmf_sdiod_remove(sdiodev);
+ 	}
+ 
+ 	return ret;
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+index 39f3af2d0439b..eadac0f5590fc 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+@@ -1220,6 +1220,7 @@ static int brcms_bcma_probe(struct bcma_device *pdev)
+ {
+ 	struct brcms_info *wl;
+ 	struct ieee80211_hw *hw;
++	int ret;
+ 
+ 	dev_info(&pdev->dev, "mfg %x core %x rev %d class %d irq %d\n",
+ 		 pdev->id.manuf, pdev->id.id, pdev->id.rev, pdev->id.class,
+@@ -1244,11 +1245,16 @@ static int brcms_bcma_probe(struct bcma_device *pdev)
+ 	wl = brcms_attach(pdev);
+ 	if (!wl) {
+ 		pr_err("%s: brcms_attach failed!\n", __func__);
+-		return -ENODEV;
++		ret = -ENODEV;
++		goto err_free_ieee80211;
+ 	}
+ 	brcms_led_register(wl);
+ 
+ 	return 0;
++
++err_free_ieee80211:
++	ieee80211_free_hw(hw);
++	return ret;
+ }
+ 
+ static int brcms_suspend(struct bcma_device *pdev)
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h
+index e4f91bce222d8..61d3d4e0b7d94 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h
++++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h
+@@ -1,7 +1,7 @@
+ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+ /******************************************************************************
+  *
+- * Copyright(c) 2020 Intel Corporation
++ * Copyright(c) 2020-2021 Intel Corporation
+  *
+  *****************************************************************************/
+ 
+@@ -10,7 +10,7 @@
+ 
+ #include "fw/notif-wait.h"
+ 
+-#define MVM_UCODE_PNVM_TIMEOUT	(HZ / 10)
++#define MVM_UCODE_PNVM_TIMEOUT	(HZ / 4)
+ 
+ int iwl_pnvm_load(struct iwl_trans *trans,
+ 		  struct iwl_notif_wait_data *notif_wait);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+index 1ad621d13ad3a..0a13c2bda2eed 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+@@ -1032,6 +1032,9 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
+ 	if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA))
+ 		return -1;
+ 
++	if (unlikely(ieee80211_is_any_nullfunc(fc)) && sta->he_cap.has_he)
++		return -1;
++
+ 	if (unlikely(ieee80211_is_probe_resp(fc)))
+ 		iwl_mvm_probe_resp_set_noa(mvm, skb);
+ 
+diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
+index 94228b316df1b..46517515ba728 100644
+--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
++++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
+@@ -1231,7 +1231,7 @@ static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter)
+ static int mwifiex_pcie_alloc_sleep_cookie_buf(struct mwifiex_adapter *adapter)
+ {
+ 	struct pcie_service_card *card = adapter->card;
+-	u32 tmp;
++	u32 *cookie;
+ 
+ 	card->sleep_cookie_vbase = dma_alloc_coherent(&card->dev->dev,
+ 						      sizeof(u32),
+@@ -1242,13 +1242,11 @@ static int mwifiex_pcie_alloc_sleep_cookie_buf(struct mwifiex_adapter *adapter)
+ 			    "dma_alloc_coherent failed!\n");
+ 		return -ENOMEM;
+ 	}
++	cookie = (u32 *)card->sleep_cookie_vbase;
+ 	/* Init val of Sleep Cookie */
+-	tmp = FW_AWAKE_COOKIE;
+-	put_unaligned(tmp, card->sleep_cookie_vbase);
++	*cookie = FW_AWAKE_COOKIE;
+ 
+-	mwifiex_dbg(adapter, INFO,
+-		    "alloc_scook: sleep cookie=0x%x\n",
+-		    get_unaligned(card->sleep_cookie_vbase));
++	mwifiex_dbg(adapter, INFO, "alloc_scook: sleep cookie=0x%x\n", *cookie);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+index 8dccb589b756d..d06e61cadc411 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+@@ -1890,6 +1890,10 @@ void mt7615_pm_power_save_work(struct work_struct *work)
+ 						pm.ps_work.work);
+ 
+ 	delta = dev->pm.idle_timeout;
++	if (test_bit(MT76_HW_SCANNING, &dev->mphy.state) ||
++	    test_bit(MT76_HW_SCHED_SCANNING, &dev->mphy.state))
++		goto out;
++
+ 	if (time_is_after_jiffies(dev->pm.last_activity + delta)) {
+ 		delta = dev->pm.last_activity + delta - jiffies;
+ 		goto out;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
+index 1b4cb145f38e1..f2fbf11e0321d 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
+@@ -133,20 +133,21 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ 			  struct mt76_tx_info *tx_info)
+ {
+ 	struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+-	struct mt7615_sta *msta = container_of(wcid, struct mt7615_sta, wcid);
+ 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
+ 	struct ieee80211_key_conf *key = info->control.hw_key;
+ 	int pid, id;
+ 	u8 *txwi = (u8 *)txwi_ptr;
+ 	struct mt76_txwi_cache *t;
++	struct mt7615_sta *msta;
+ 	void *txp;
+ 
++	msta = wcid ? container_of(wcid, struct mt7615_sta, wcid) : NULL;
+ 	if (!wcid)
+ 		wcid = &dev->mt76.global_wcid;
+ 
+ 	pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
+ 
+-	if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
++	if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && msta) {
+ 		struct mt7615_phy *phy = &dev->phy;
+ 
+ 		if ((info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY) && mdev->phy2)
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
+index f8d3673c2cae8..7010101f6b147 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
+@@ -191,14 +191,15 @@ int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ 				   struct ieee80211_sta *sta,
+ 				   struct mt76_tx_info *tx_info)
+ {
+-	struct mt7615_sta *msta = container_of(wcid, struct mt7615_sta, wcid);
+ 	struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ 	struct sk_buff *skb = tx_info->skb;
+ 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
++	struct mt7615_sta *msta;
+ 	int pad;
+ 
++	msta = wcid ? container_of(wcid, struct mt7615_sta, wcid) : NULL;
+ 	if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) &&
+-	    !msta->rate_probe) {
++	    msta && !msta->rate_probe) {
+ 		/* request to configure sampling rate */
+ 		spin_lock_bh(&dev->mt76.lock);
+ 		mt7615_mac_set_rates(&dev->phy, msta, &info->control.rates[0],
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+index c5f5037f57570..cff60b699e319 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+@@ -16,10 +16,6 @@ int mt76_connac_pm_wake(struct mt76_phy *phy, struct mt76_connac_pm *pm)
+ 	if (!test_bit(MT76_STATE_PM, &phy->state))
+ 		return 0;
+ 
+-	if (test_bit(MT76_HW_SCANNING, &phy->state) ||
+-	    test_bit(MT76_HW_SCHED_SCANNING, &phy->state))
+-		return 0;
+-
+ 	if (queue_work(dev->wq, &pm->wake_work))
+ 		reinit_completion(&pm->wake_cmpl);
+ 
+@@ -45,10 +41,6 @@ void mt76_connac_power_save_sched(struct mt76_phy *phy,
+ 
+ 	pm->last_activity = jiffies;
+ 
+-	if (test_bit(MT76_HW_SCANNING, &phy->state) ||
+-	    test_bit(MT76_HW_SCHED_SCANNING, &phy->state))
+-		return;
+-
+ 	if (!test_bit(MT76_STATE_PM, &phy->state))
+ 		queue_delayed_work(dev->wq, &pm->ps_work, pm->idle_timeout);
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+index cefd33b74a875..280aee1aa299f 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+@@ -1732,7 +1732,7 @@ mt76_connac_mcu_set_wow_pattern(struct mt76_dev *dev,
+ 	ptlv->index = index;
+ 
+ 	memcpy(ptlv->pattern, pattern->pattern, pattern->pattern_len);
+-	memcpy(ptlv->mask, pattern->mask, pattern->pattern_len / 8);
++	memcpy(ptlv->mask, pattern->mask, DIV_ROUND_UP(pattern->pattern_len, 8));
+ 
+ 	return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD_SUSPEND, true);
+ }
+@@ -1767,14 +1767,17 @@ mt76_connac_mcu_set_wow_ctrl(struct mt76_phy *phy, struct ieee80211_vif *vif,
+ 	};
+ 
+ 	if (wowlan->magic_pkt)
+-		req.wow_ctrl_tlv.trigger |= BIT(0);
++		req.wow_ctrl_tlv.trigger |= UNI_WOW_DETECT_TYPE_MAGIC;
+ 	if (wowlan->disconnect)
+-		req.wow_ctrl_tlv.trigger |= BIT(2);
++		req.wow_ctrl_tlv.trigger |= (UNI_WOW_DETECT_TYPE_DISCONNECT |
++					     UNI_WOW_DETECT_TYPE_BCN_LOST);
+ 	if (wowlan->nd_config) {
+ 		mt76_connac_mcu_sched_scan_req(phy, vif, wowlan->nd_config);
+-		req.wow_ctrl_tlv.trigger |= BIT(5);
++		req.wow_ctrl_tlv.trigger |= UNI_WOW_DETECT_TYPE_SCH_SCAN_HIT;
+ 		mt76_connac_mcu_sched_scan_enable(phy, vif, suspend);
+ 	}
++	if (wowlan->n_patterns)
++		req.wow_ctrl_tlv.trigger |= UNI_WOW_DETECT_TYPE_BITMAP;
+ 
+ 	if (mt76_is_mmio(dev))
+ 		req.wow_ctrl_tlv.wakeup_hif = WOW_PCIE;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
+index c1e1df5f7cd75..eea121101b5ee 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
+@@ -589,6 +589,14 @@ enum {
+ 	UNI_OFFLOAD_OFFLOAD_BMC_RPY_DETECT,
+ };
+ 
++#define UNI_WOW_DETECT_TYPE_MAGIC		BIT(0)
++#define UNI_WOW_DETECT_TYPE_ANY			BIT(1)
++#define UNI_WOW_DETECT_TYPE_DISCONNECT		BIT(2)
++#define UNI_WOW_DETECT_TYPE_GTK_REKEY_FAIL	BIT(3)
++#define UNI_WOW_DETECT_TYPE_BCN_LOST		BIT(4)
++#define UNI_WOW_DETECT_TYPE_SCH_SCAN_HIT	BIT(5)
++#define UNI_WOW_DETECT_TYPE_BITMAP		BIT(6)
++
+ enum {
+ 	UNI_SUSPEND_MODE_SETTING,
+ 	UNI_SUSPEND_WOW_CTRL,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
+index bd798df748ba5..8eb90722c5325 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
+@@ -476,10 +476,17 @@ mt7915_tm_set_tx_frames(struct mt7915_phy *phy, bool en)
+ static void
+ mt7915_tm_set_rx_frames(struct mt7915_phy *phy, bool en)
+ {
+-	if (en)
++	mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, false);
++
++	if (en) {
++		struct mt7915_dev *dev = phy->dev;
++
+ 		mt7915_tm_update_channel(phy);
+ 
+-	mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, en);
++		/* read-clear */
++		mt76_rr(dev, MT_MIB_SDR3(phy != &dev->phy));
++		mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, en);
++	}
+ }
+ 
+ static int
+@@ -702,7 +709,11 @@ static int
+ mt7915_tm_dump_stats(struct mt76_phy *mphy, struct sk_buff *msg)
+ {
+ 	struct mt7915_phy *phy = mphy->priv;
++	struct mt7915_dev *dev = phy->dev;
++	bool ext_phy = phy != &dev->phy;
++	enum mt76_rxq_id q;
+ 	void *rx, *rssi;
++	u16 fcs_err;
+ 	int i;
+ 
+ 	rx = nla_nest_start(msg, MT76_TM_STATS_ATTR_LAST_RX);
+@@ -747,6 +758,12 @@ mt7915_tm_dump_stats(struct mt76_phy *mphy, struct sk_buff *msg)
+ 
+ 	nla_nest_end(msg, rx);
+ 
++	fcs_err = mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
++				 MT_MIB_SDR3_FCS_ERR_MASK);
++	q = ext_phy ? MT_RXQ_EXT : MT_RXQ_MAIN;
++	mphy->test.rx_stats.packets[q] += fcs_err;
++	mphy->test.rx_stats.fcs_error[q] += fcs_err;
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+index a0797cec136e9..f9bd907b90fa0 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+@@ -110,30 +110,12 @@ mt7921_init_wiphy(struct ieee80211_hw *hw)
+ static void
+ mt7921_mac_init_band(struct mt7921_dev *dev, u8 band)
+ {
+-	u32 mask, set;
+-
+ 	mt76_rmw_field(dev, MT_TMAC_CTCR0(band),
+ 		       MT_TMAC_CTCR0_INS_DDLMT_REFTIME, 0x3f);
+ 	mt76_set(dev, MT_TMAC_CTCR0(band),
+ 		 MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN |
+ 		 MT_TMAC_CTCR0_INS_DDLMT_EN);
+ 
+-	mask = MT_MDP_RCFR0_MCU_RX_MGMT |
+-	       MT_MDP_RCFR0_MCU_RX_CTL_NON_BAR |
+-	       MT_MDP_RCFR0_MCU_RX_CTL_BAR;
+-	set = FIELD_PREP(MT_MDP_RCFR0_MCU_RX_MGMT, MT_MDP_TO_HIF) |
+-	      FIELD_PREP(MT_MDP_RCFR0_MCU_RX_CTL_NON_BAR, MT_MDP_TO_HIF) |
+-	      FIELD_PREP(MT_MDP_RCFR0_MCU_RX_CTL_BAR, MT_MDP_TO_HIF);
+-	mt76_rmw(dev, MT_MDP_BNRCFR0(band), mask, set);
+-
+-	mask = MT_MDP_RCFR1_MCU_RX_BYPASS |
+-	       MT_MDP_RCFR1_RX_DROPPED_UCAST |
+-	       MT_MDP_RCFR1_RX_DROPPED_MCAST;
+-	set = FIELD_PREP(MT_MDP_RCFR1_MCU_RX_BYPASS, MT_MDP_TO_HIF) |
+-	      FIELD_PREP(MT_MDP_RCFR1_RX_DROPPED_UCAST, MT_MDP_TO_HIF) |
+-	      FIELD_PREP(MT_MDP_RCFR1_RX_DROPPED_MCAST, MT_MDP_TO_HIF);
+-	mt76_rmw(dev, MT_MDP_BNRCFR1(band), mask, set);
+-
+ 	mt76_set(dev, MT_WF_RMAC_MIB_TIME0(band), MT_WF_RMAC_MIB_RXTIME_EN);
+ 	mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(band), MT_WF_RMAC_MIB_RXTIME_EN);
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+index ce4eae7f1e448..c4b144391a8e2 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+@@ -420,16 +420,19 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
+ 		status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v1);
+ 		status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v1);
+ 		status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v1);
+-		status->signal = status->chain_signal[0];
+-
+-		for (i = 1; i < hweight8(mphy->antenna_mask); i++) {
+-			if (!(status->chains & BIT(i)))
++		status->signal = -128;
++		for (i = 0; i < hweight8(mphy->antenna_mask); i++) {
++			if (!(status->chains & BIT(i)) ||
++			    status->chain_signal[i] >= 0)
+ 				continue;
+ 
+ 			status->signal = max(status->signal,
+ 					     status->chain_signal[i]);
+ 		}
+ 
++		if (status->signal == -128)
++			status->flag |= RX_FLAG_NO_SIGNAL_VAL;
++
+ 		stbc = FIELD_GET(MT_PRXV_STBC, v0);
+ 		gi = FIELD_GET(MT_PRXV_SGI, v0);
+ 		cck = false;
+@@ -1521,6 +1524,10 @@ void mt7921_pm_power_save_work(struct work_struct *work)
+ 						pm.ps_work.work);
+ 
+ 	delta = dev->pm.idle_timeout;
++	if (test_bit(MT76_HW_SCANNING, &dev->mphy.state) ||
++	    test_bit(MT76_HW_SCHED_SCANNING, &dev->mphy.state))
++		goto out;
++
+ 	if (time_is_after_jiffies(dev->pm.last_activity + delta)) {
+ 		delta = dev->pm.last_activity + delta - jiffies;
+ 		goto out;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+index c6e8857067a3a..1894ca6324d53 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+@@ -223,57 +223,6 @@ static void mt7921_stop(struct ieee80211_hw *hw)
+ 	mt7921_mutex_release(dev);
+ }
+ 
+-static inline int get_free_idx(u32 mask, u8 start, u8 end)
+-{
+-	return ffs(~mask & GENMASK(end, start));
+-}
+-
+-static int get_omac_idx(enum nl80211_iftype type, u64 mask)
+-{
+-	int i;
+-
+-	switch (type) {
+-	case NL80211_IFTYPE_STATION:
+-		/* prefer hw bssid slot 1-3 */
+-		i = get_free_idx(mask, HW_BSSID_1, HW_BSSID_3);
+-		if (i)
+-			return i - 1;
+-
+-		if (type != NL80211_IFTYPE_STATION)
+-			break;
+-
+-		/* next, try to find a free repeater entry for the sta */
+-		i = get_free_idx(mask >> REPEATER_BSSID_START, 0,
+-				 REPEATER_BSSID_MAX - REPEATER_BSSID_START);
+-		if (i)
+-			return i + 32 - 1;
+-
+-		i = get_free_idx(mask, EXT_BSSID_1, EXT_BSSID_MAX);
+-		if (i)
+-			return i - 1;
+-
+-		if (~mask & BIT(HW_BSSID_0))
+-			return HW_BSSID_0;
+-
+-		break;
+-	case NL80211_IFTYPE_MONITOR:
+-		/* ap uses hw bssid 0 and ext bssid */
+-		if (~mask & BIT(HW_BSSID_0))
+-			return HW_BSSID_0;
+-
+-		i = get_free_idx(mask, EXT_BSSID_1, EXT_BSSID_MAX);
+-		if (i)
+-			return i - 1;
+-
+-		break;
+-	default:
+-		WARN_ON(1);
+-		break;
+-	}
+-
+-	return -1;
+-}
+-
+ static int mt7921_add_interface(struct ieee80211_hw *hw,
+ 				struct ieee80211_vif *vif)
+ {
+@@ -295,12 +244,7 @@ static int mt7921_add_interface(struct ieee80211_hw *hw,
+ 		goto out;
+ 	}
+ 
+-	idx = get_omac_idx(vif->type, phy->omac_mask);
+-	if (idx < 0) {
+-		ret = -ENOSPC;
+-		goto out;
+-	}
+-	mvif->mt76.omac_idx = idx;
++	mvif->mt76.omac_idx = mvif->mt76.idx;
+ 	mvif->phy = phy;
+ 	mvif->mt76.band_idx = 0;
+ 	mvif->mt76.wmm_idx = mvif->mt76.idx % MT7921_MAX_WMM_SETS;
+diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
+index 451ed60c62961..802e3d733959f 100644
+--- a/drivers/net/wireless/mediatek/mt76/tx.c
++++ b/drivers/net/wireless/mediatek/mt76/tx.c
+@@ -285,7 +285,7 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
+ 		skb_set_queue_mapping(skb, qid);
+ 	}
+ 
+-	if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
++	if (wcid && !(wcid->tx_info & MT_WCID_TX_INFO_SET))
+ 		ieee80211_get_tx_rates(info->control.vif, sta, skb,
+ 				       info->control.rates, 1);
+ 
+diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+index 448922cb2e63d..10bb3b5a8c22a 100644
+--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
++++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+@@ -3529,26 +3529,28 @@ static void rtw8822c_pwrtrack_set(struct rtw_dev *rtwdev, u8 rf_path)
+ 	}
+ }
+ 
+-static void rtw8822c_pwr_track_path(struct rtw_dev *rtwdev,
+-				    struct rtw_swing_table *swing_table,
+-				    u8 path)
++static void rtw8822c_pwr_track_stats(struct rtw_dev *rtwdev, u8 path)
+ {
+-	struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+-	u8 thermal_value, delta;
++	u8 thermal_value;
+ 
+ 	if (rtwdev->efuse.thermal_meter[path] == 0xff)
+ 		return;
+ 
+ 	thermal_value = rtw_read_rf(rtwdev, path, RF_T_METER, 0x7e);
+-
+ 	rtw_phy_pwrtrack_avg(rtwdev, thermal_value, path);
++}
+ 
+-	delta = rtw_phy_pwrtrack_get_delta(rtwdev, path);
++static void rtw8822c_pwr_track_path(struct rtw_dev *rtwdev,
++				    struct rtw_swing_table *swing_table,
++				    u8 path)
++{
++	struct rtw_dm_info *dm_info = &rtwdev->dm_info;
++	u8 delta;
+ 
++	delta = rtw_phy_pwrtrack_get_delta(rtwdev, path);
+ 	dm_info->delta_power_index[path] =
+ 		rtw_phy_pwrtrack_get_pwridx(rtwdev, swing_table, path, path,
+ 					    delta);
+-
+ 	rtw8822c_pwrtrack_set(rtwdev, path);
+ }
+ 
+@@ -3559,12 +3561,12 @@ static void __rtw8822c_pwr_track(struct rtw_dev *rtwdev)
+ 
+ 	rtw_phy_config_swing_table(rtwdev, &swing_table);
+ 
++	for (i = 0; i < rtwdev->hal.rf_path_num; i++)
++		rtw8822c_pwr_track_stats(rtwdev, i);
+ 	if (rtw_phy_pwrtrack_need_lck(rtwdev))
+ 		rtw8822c_do_lck(rtwdev);
+-
+ 	for (i = 0; i < rtwdev->hal.rf_path_num; i++)
+ 		rtw8822c_pwr_track_path(rtwdev, &swing_table, i);
+-
+ }
+ 
+ static void rtw8822c_pwr_track(struct rtw_dev *rtwdev)
+diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c
+index ce9892152f4d4..99b21a2c83861 100644
+--- a/drivers/net/wireless/rsi/rsi_91x_hal.c
++++ b/drivers/net/wireless/rsi/rsi_91x_hal.c
+@@ -203,7 +203,7 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
+ 		wh->frame_control |= cpu_to_le16(RSI_SET_PS_ENABLE);
+ 
+ 	if ((!(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) &&
+-	    (common->secinfo.security_enable)) {
++	    info->control.hw_key) {
+ 		if (rsi_is_cipher_wep(common))
+ 			ieee80211_size += 4;
+ 		else
+@@ -470,9 +470,9 @@ int rsi_prepare_beacon(struct rsi_common *common, struct sk_buff *skb)
+ 	}
+ 
+ 	if (common->band == NL80211_BAND_2GHZ)
+-		bcn_frm->bbp_info |= cpu_to_le16(RSI_RATE_1);
++		bcn_frm->rate_info |= cpu_to_le16(RSI_RATE_1);
+ 	else
+-		bcn_frm->bbp_info |= cpu_to_le16(RSI_RATE_6);
++		bcn_frm->rate_info |= cpu_to_le16(RSI_RATE_6);
+ 
+ 	if (mac_bcn->data[tim_offset + 2] == 0)
+ 		bcn_frm->frame_info |= cpu_to_le16(RSI_DATA_DESC_DTIM_BEACON);
+diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+index 16025300cddb3..57c9e3559dfd1 100644
+--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
++++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+@@ -1028,7 +1028,6 @@ static int rsi_mac80211_set_key(struct ieee80211_hw *hw,
+ 	mutex_lock(&common->mutex);
+ 	switch (cmd) {
+ 	case SET_KEY:
+-		secinfo->security_enable = true;
+ 		status = rsi_hal_key_config(hw, vif, key, sta);
+ 		if (status) {
+ 			mutex_unlock(&common->mutex);
+@@ -1047,8 +1046,6 @@ static int rsi_mac80211_set_key(struct ieee80211_hw *hw,
+ 		break;
+ 
+ 	case DISABLE_KEY:
+-		if (vif->type == NL80211_IFTYPE_STATION)
+-			secinfo->security_enable = false;
+ 		rsi_dbg(ERR_ZONE, "%s: RSI del key\n", __func__);
+ 		memset(key, 0, sizeof(struct ieee80211_key_conf));
+ 		status = rsi_hal_key_config(hw, vif, key, sta);
+diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+index 33c76d39a8e96..b6d050a2fbe7e 100644
+--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
++++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+@@ -1803,8 +1803,7 @@ int rsi_send_wowlan_request(struct rsi_common *common, u16 flags,
+ 			RSI_WIFI_MGMT_Q);
+ 	cmd_frame->desc.desc_dword0.frame_type = WOWLAN_CONFIG_PARAMS;
+ 	cmd_frame->host_sleep_status = sleep_status;
+-	if (common->secinfo.security_enable &&
+-	    common->secinfo.gtk_cipher)
++	if (common->secinfo.gtk_cipher)
+ 		flags |= RSI_WOW_GTK_REKEY;
+ 	if (sleep_status)
+ 		cmd_frame->wow_flags = flags;
+diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h
+index 73a19e43106b1..b3e25bc28682c 100644
+--- a/drivers/net/wireless/rsi/rsi_main.h
++++ b/drivers/net/wireless/rsi/rsi_main.h
+@@ -151,7 +151,6 @@ enum edca_queue {
+ };
+ 
+ struct security_info {
+-	bool security_enable;
+ 	u32 ptk_cipher;
+ 	u32 gtk_cipher;
+ };
+diff --git a/drivers/net/wireless/st/cw1200/scan.c b/drivers/net/wireless/st/cw1200/scan.c
+index 988581cc134b7..1f856fbbc0ea4 100644
+--- a/drivers/net/wireless/st/cw1200/scan.c
++++ b/drivers/net/wireless/st/cw1200/scan.c
+@@ -75,30 +75,27 @@ int cw1200_hw_scan(struct ieee80211_hw *hw,
+ 	if (req->n_ssids > WSM_SCAN_MAX_NUM_OF_SSIDS)
+ 		return -EINVAL;
+ 
+-	/* will be unlocked in cw1200_scan_work() */
+-	down(&priv->scan.lock);
+-	mutex_lock(&priv->conf_mutex);
+-
+ 	frame.skb = ieee80211_probereq_get(hw, priv->vif->addr, NULL, 0,
+ 		req->ie_len);
+-	if (!frame.skb) {
+-		mutex_unlock(&priv->conf_mutex);
+-		up(&priv->scan.lock);
++	if (!frame.skb)
+ 		return -ENOMEM;
+-	}
+ 
+ 	if (req->ie_len)
+ 		skb_put_data(frame.skb, req->ie, req->ie_len);
+ 
++	/* will be unlocked in cw1200_scan_work() */
++	down(&priv->scan.lock);
++	mutex_lock(&priv->conf_mutex);
++
+ 	ret = wsm_set_template_frame(priv, &frame);
+ 	if (!ret) {
+ 		/* Host want to be the probe responder. */
+ 		ret = wsm_set_probe_responder(priv, true);
+ 	}
+ 	if (ret) {
+-		dev_kfree_skb(frame.skb);
+ 		mutex_unlock(&priv->conf_mutex);
+ 		up(&priv->scan.lock);
++		dev_kfree_skb(frame.skb);
+ 		return ret;
+ 	}
+ 
+@@ -120,8 +117,8 @@ int cw1200_hw_scan(struct ieee80211_hw *hw,
+ 		++priv->scan.n_ssids;
+ 	}
+ 
+-	dev_kfree_skb(frame.skb);
+ 	mutex_unlock(&priv->conf_mutex);
++	dev_kfree_skb(frame.skb);
+ 	queue_work(priv->workqueue, &priv->scan.work);
+ 	return 0;
+ }
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index c92a15c3fbc5e..2a3ef79f96f9c 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -1027,7 +1027,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
+ 
+ static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
+ {
+-	u16 tmp = nvmeq->cq_head + 1;
++	u32 tmp = nvmeq->cq_head + 1;
+ 
+ 	if (tmp == nvmeq->q_depth) {
+ 		nvmeq->cq_head = 0;
+@@ -2834,10 +2834,7 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
+ #ifdef CONFIG_ACPI
+ static bool nvme_acpi_storage_d3(struct pci_dev *dev)
+ {
+-	struct acpi_device *adev;
+-	struct pci_dev *root;
+-	acpi_handle handle;
+-	acpi_status status;
++	struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
+ 	u8 val;
+ 
+ 	/*
+@@ -2845,28 +2842,9 @@ static bool nvme_acpi_storage_d3(struct pci_dev *dev)
+ 	 * must use D3 to support deep platform power savings during
+ 	 * suspend-to-idle.
+ 	 */
+-	root = pcie_find_root_port(dev);
+-	if (!root)
+-		return false;
+ 
+-	adev = ACPI_COMPANION(&root->dev);
+ 	if (!adev)
+ 		return false;
+-
+-	/*
+-	 * The property is defined in the PXSX device for South complex ports
+-	 * and in the PEGP device for North complex ports.
+-	 */
+-	status = acpi_get_handle(adev->handle, "PXSX", &handle);
+-	if (ACPI_FAILURE(status)) {
+-		status = acpi_get_handle(adev->handle, "PEGP", &handle);
+-		if (ACPI_FAILURE(status))
+-			return false;
+-	}
+-
+-	if (acpi_bus_get_device(handle, &adev))
+-		return false;
+-
+ 	if (fwnode_property_read_u8(acpi_fwnode_handle(adev), "StorageD3Enable",
+ 			&val))
+ 		return false;
+diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
+index d375745fc4ed3..b81db5270018e 100644
+--- a/drivers/nvme/target/fc.c
++++ b/drivers/nvme/target/fc.c
+@@ -2494,13 +2494,6 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
+ 	u32 xfrlen = be32_to_cpu(cmdiu->data_len);
+ 	int ret;
+ 
+-	/*
+-	 * if there is no nvmet mapping to the targetport there
+-	 * shouldn't be requests. just terminate them.
+-	 */
+-	if (!tgtport->pe)
+-		goto transport_error;
+-
+ 	/*
+ 	 * Fused commands are currently not supported in the linux
+ 	 * implementation.
+@@ -2528,7 +2521,8 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
+ 
+ 	fod->req.cmd = &fod->cmdiubuf.sqe;
+ 	fod->req.cqe = &fod->rspiubuf.cqe;
+-	fod->req.port = tgtport->pe->port;
++	if (tgtport->pe)
++		fod->req.port = tgtport->pe->port;
+ 
+ 	/* clear any response payload */
+ 	memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
+diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
+index adb26aff481d5..c485b2c7720d8 100644
+--- a/drivers/of/fdt.c
++++ b/drivers/of/fdt.c
+@@ -511,11 +511,11 @@ static int __init __reserved_mem_reserve_reg(unsigned long node,
+ 
+ 		if (size &&
+ 		    early_init_dt_reserve_memory_arch(base, size, nomap) == 0)
+-			pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %ld MiB\n",
+-				uname, &base, (unsigned long)size / SZ_1M);
++			pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n",
++				uname, &base, (unsigned long)(size / SZ_1M));
+ 		else
+-			pr_info("Reserved memory: failed to reserve memory for node '%s': base %pa, size %ld MiB\n",
+-				uname, &base, (unsigned long)size / SZ_1M);
++			pr_info("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n",
++				uname, &base, (unsigned long)(size / SZ_1M));
+ 
+ 		len -= t_len;
+ 		if (first) {
+diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
+index a7fbc5e37e19e..6c95bbdf9265a 100644
+--- a/drivers/of/of_reserved_mem.c
++++ b/drivers/of/of_reserved_mem.c
+@@ -134,9 +134,9 @@ static int __init __reserved_mem_alloc_size(unsigned long node,
+ 			ret = early_init_dt_alloc_reserved_memory_arch(size,
+ 					align, start, end, nomap, &base);
+ 			if (ret == 0) {
+-				pr_debug("allocated memory for '%s' node: base %pa, size %ld MiB\n",
++				pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n",
+ 					uname, &base,
+-					(unsigned long)size / SZ_1M);
++					(unsigned long)(size / SZ_1M));
+ 				break;
+ 			}
+ 			len -= t_len;
+@@ -146,8 +146,8 @@ static int __init __reserved_mem_alloc_size(unsigned long node,
+ 		ret = early_init_dt_alloc_reserved_memory_arch(size, align,
+ 							0, 0, nomap, &base);
+ 		if (ret == 0)
+-			pr_debug("allocated memory for '%s' node: base %pa, size %ld MiB\n",
+-				uname, &base, (unsigned long)size / SZ_1M);
++			pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n",
++				uname, &base, (unsigned long)(size / SZ_1M));
+ 	}
+ 
+ 	if (base == 0) {
+diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
+index 27a17a1e4a7c3..7479edf3676c1 100644
+--- a/drivers/pci/controller/pci-hyperv.c
++++ b/drivers/pci/controller/pci-hyperv.c
+@@ -3480,6 +3480,9 @@ static void __exit exit_hv_pci_drv(void)
+ 
+ static int __init init_hv_pci_drv(void)
+ {
++	if (!hv_is_hyperv_initialized())
++		return -ENODEV;
++
+ 	/* Set the invalid domain number's bit, so it will not be used */
+ 	set_bit(HVPCI_DOM_INVALID, hvpci_dom_map);
+ 
+diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
+index 1328159fe564d..a4339426664e5 100644
+--- a/drivers/perf/arm-cmn.c
++++ b/drivers/perf/arm-cmn.c
+@@ -1212,7 +1212,7 @@ static int arm_cmn_init_irqs(struct arm_cmn *cmn)
+ 		irq = cmn->dtc[i].irq;
+ 		for (j = i; j--; ) {
+ 			if (cmn->dtc[j].irq == irq) {
+-				cmn->dtc[j].irq_friend = j - i;
++				cmn->dtc[j].irq_friend = i - j;
+ 				goto next;
+ 			}
+ 		}
+diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
+index 8ff7a67f691cf..4c3e5f2130807 100644
+--- a/drivers/perf/arm_smmuv3_pmu.c
++++ b/drivers/perf/arm_smmuv3_pmu.c
+@@ -277,7 +277,7 @@ static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
+ 				       struct perf_event *event, int idx)
+ {
+ 	u32 span, sid;
+-	unsigned int num_ctrs = smmu_pmu->num_counters;
++	unsigned int cur_idx, num_ctrs = smmu_pmu->num_counters;
+ 	bool filter_en = !!get_filter_enable(event);
+ 
+ 	span = filter_en ? get_filter_span(event) :
+@@ -285,17 +285,19 @@ static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
+ 	sid = filter_en ? get_filter_stream_id(event) :
+ 			   SMMU_PMCG_DEFAULT_FILTER_SID;
+ 
+-	/* Support individual filter settings */
+-	if (!smmu_pmu->global_filter) {
++	cur_idx = find_first_bit(smmu_pmu->used_counters, num_ctrs);
++	/*
++	 * Per-counter filtering, or scheduling the first globally-filtered
++	 * event into an empty PMU so idx == 0 and it works out equivalent.
++	 */
++	if (!smmu_pmu->global_filter || cur_idx == num_ctrs) {
+ 		smmu_pmu_set_event_filter(event, idx, span, sid);
+ 		return 0;
+ 	}
+ 
+-	/* Requested settings same as current global settings*/
+-	idx = find_first_bit(smmu_pmu->used_counters, num_ctrs);
+-	if (idx == num_ctrs ||
+-	    smmu_pmu_check_global_filter(smmu_pmu->events[idx], event)) {
+-		smmu_pmu_set_event_filter(event, 0, span, sid);
++	/* Otherwise, must match whatever's currently scheduled */
++	if (smmu_pmu_check_global_filter(smmu_pmu->events[cur_idx], event)) {
++		smmu_pmu_set_evtyper(smmu_pmu, idx, get_event(event));
+ 		return 0;
+ 	}
+ 
+diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
+index be1f26b62ddb8..4a56849f04001 100644
+--- a/drivers/perf/fsl_imx8_ddr_perf.c
++++ b/drivers/perf/fsl_imx8_ddr_perf.c
+@@ -706,8 +706,10 @@ static int ddr_perf_probe(struct platform_device *pdev)
+ 
+ 	name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME "%d",
+ 			      num);
+-	if (!name)
+-		return -ENOMEM;
++	if (!name) {
++		ret = -ENOMEM;
++		goto cpuhp_state_err;
++	}
+ 
+ 	pmu->devtype_data = of_device_get_match_data(&pdev->dev);
+ 
+diff --git a/drivers/phy/ralink/phy-mt7621-pci.c b/drivers/phy/ralink/phy-mt7621-pci.c
+index 753cb5bab9308..88e82ab81b61b 100644
+--- a/drivers/phy/ralink/phy-mt7621-pci.c
++++ b/drivers/phy/ralink/phy-mt7621-pci.c
+@@ -272,8 +272,8 @@ static struct phy *mt7621_pcie_phy_of_xlate(struct device *dev,
+ 
+ 	mt7621_phy->has_dual_port = args->args[0];
+ 
+-	dev_info(dev, "PHY for 0x%08x (dual port = %d)\n",
+-		 (unsigned int)mt7621_phy->port_base, mt7621_phy->has_dual_port);
++	dev_dbg(dev, "PHY for 0x%px (dual port = %d)\n",
++		mt7621_phy->port_base, mt7621_phy->has_dual_port);
+ 
+ 	return mt7621_phy->phy;
+ }
+diff --git a/drivers/phy/socionext/phy-uniphier-pcie.c b/drivers/phy/socionext/phy-uniphier-pcie.c
+index e4adab375c737..6bdbd1f214dd4 100644
+--- a/drivers/phy/socionext/phy-uniphier-pcie.c
++++ b/drivers/phy/socionext/phy-uniphier-pcie.c
+@@ -24,11 +24,13 @@
+ #define PORT_SEL_1		FIELD_PREP(PORT_SEL_MASK, 1)
+ 
+ #define PCL_PHY_TEST_I		0x2000
+-#define PCL_PHY_TEST_O		0x2004
+ #define TESTI_DAT_MASK		GENMASK(13, 6)
+ #define TESTI_ADR_MASK		GENMASK(5, 1)
+ #define TESTI_WR_EN		BIT(0)
+ 
++#define PCL_PHY_TEST_O		0x2004
++#define TESTO_DAT_MASK		GENMASK(7, 0)
++
+ #define PCL_PHY_RESET		0x200c
+ #define PCL_PHY_RESET_N_MNMODE	BIT(8)	/* =1:manual */
+ #define PCL_PHY_RESET_N		BIT(0)	/* =1:deasssert */
+@@ -77,11 +79,12 @@ static void uniphier_pciephy_set_param(struct uniphier_pciephy_priv *priv,
+ 	val  = FIELD_PREP(TESTI_DAT_MASK, 1);
+ 	val |= FIELD_PREP(TESTI_ADR_MASK, reg);
+ 	uniphier_pciephy_testio_write(priv, val);
+-	val = readl(priv->base + PCL_PHY_TEST_O);
++	val = readl(priv->base + PCL_PHY_TEST_O) & TESTO_DAT_MASK;
+ 
+ 	/* update value */
+-	val &= ~FIELD_PREP(TESTI_DAT_MASK, mask);
+-	val  = FIELD_PREP(TESTI_DAT_MASK, mask & param);
++	val &= ~mask;
++	val |= mask & param;
++	val = FIELD_PREP(TESTI_DAT_MASK, val);
+ 	val |= FIELD_PREP(TESTI_ADR_MASK, reg);
+ 	uniphier_pciephy_testio_write(priv, val);
+ 	uniphier_pciephy_testio_write(priv, val | TESTI_WR_EN);
+diff --git a/drivers/phy/ti/phy-dm816x-usb.c b/drivers/phy/ti/phy-dm816x-usb.c
+index 57adc08a89b2d..9fe6ea6fdae55 100644
+--- a/drivers/phy/ti/phy-dm816x-usb.c
++++ b/drivers/phy/ti/phy-dm816x-usb.c
+@@ -242,19 +242,28 @@ static int dm816x_usb_phy_probe(struct platform_device *pdev)
+ 
+ 	pm_runtime_enable(phy->dev);
+ 	generic_phy = devm_phy_create(phy->dev, NULL, &ops);
+-	if (IS_ERR(generic_phy))
+-		return PTR_ERR(generic_phy);
++	if (IS_ERR(generic_phy)) {
++		error = PTR_ERR(generic_phy);
++		goto clk_unprepare;
++	}
+ 
+ 	phy_set_drvdata(generic_phy, phy);
+ 
+ 	phy_provider = devm_of_phy_provider_register(phy->dev,
+ 						     of_phy_simple_xlate);
+-	if (IS_ERR(phy_provider))
+-		return PTR_ERR(phy_provider);
++	if (IS_ERR(phy_provider)) {
++		error = PTR_ERR(phy_provider);
++		goto clk_unprepare;
++	}
+ 
+ 	usb_add_phy_dev(&phy->phy);
+ 
+ 	return 0;
++
++clk_unprepare:
++	pm_runtime_disable(phy->dev);
++	clk_unprepare(phy->refclk);
++	return error;
+ }
+ 
+ static int dm816x_usb_phy_remove(struct platform_device *pdev)
+diff --git a/drivers/pinctrl/renesas/pfc-r8a7796.c b/drivers/pinctrl/renesas/pfc-r8a7796.c
+index 96b5b1509bb70..c4f1f5607601b 100644
+--- a/drivers/pinctrl/renesas/pfc-r8a7796.c
++++ b/drivers/pinctrl/renesas/pfc-r8a7796.c
+@@ -68,6 +68,7 @@
+ 	PIN_NOGP_CFG(QSPI1_MOSI_IO0, "QSPI1_MOSI_IO0", fn, CFG_FLAGS),	\
+ 	PIN_NOGP_CFG(QSPI1_SPCLK, "QSPI1_SPCLK", fn, CFG_FLAGS),	\
+ 	PIN_NOGP_CFG(QSPI1_SSL, "QSPI1_SSL", fn, CFG_FLAGS),		\
++	PIN_NOGP_CFG(PRESET_N, "PRESET#", fn, SH_PFC_PIN_CFG_PULL_DOWN),\
+ 	PIN_NOGP_CFG(RPC_INT_N, "RPC_INT#", fn, CFG_FLAGS),		\
+ 	PIN_NOGP_CFG(RPC_RESET_N, "RPC_RESET#", fn, CFG_FLAGS),		\
+ 	PIN_NOGP_CFG(RPC_WP_N, "RPC_WP#", fn, CFG_FLAGS),		\
+@@ -6191,7 +6192,7 @@ static const struct pinmux_bias_reg pinmux_bias_regs[] = {
+ 		[ 4] = RCAR_GP_PIN(6, 29),	/* USB30_OVC */
+ 		[ 5] = RCAR_GP_PIN(6, 30),	/* GP6_30 */
+ 		[ 6] = RCAR_GP_PIN(6, 31),	/* GP6_31 */
+-		[ 7] = SH_PFC_PIN_NONE,
++		[ 7] = PIN_PRESET_N,		/* PRESET# */
+ 		[ 8] = SH_PFC_PIN_NONE,
+ 		[ 9] = SH_PFC_PIN_NONE,
+ 		[10] = SH_PFC_PIN_NONE,
+diff --git a/drivers/pinctrl/renesas/pfc-r8a77990.c b/drivers/pinctrl/renesas/pfc-r8a77990.c
+index 0a32e3c317c1a..95bcacf1275d9 100644
+--- a/drivers/pinctrl/renesas/pfc-r8a77990.c
++++ b/drivers/pinctrl/renesas/pfc-r8a77990.c
+@@ -54,10 +54,10 @@
+ 	PIN_NOGP_CFG(FSCLKST_N, "FSCLKST_N", fn, CFG_FLAGS),		\
+ 	PIN_NOGP_CFG(MLB_REF, "MLB_REF", fn, CFG_FLAGS),		\
+ 	PIN_NOGP_CFG(PRESETOUT_N, "PRESETOUT_N", fn, CFG_FLAGS),	\
+-	PIN_NOGP_CFG(TCK, "TCK", fn, CFG_FLAGS),			\
+-	PIN_NOGP_CFG(TDI, "TDI", fn, CFG_FLAGS),			\
+-	PIN_NOGP_CFG(TMS, "TMS", fn, CFG_FLAGS),			\
+-	PIN_NOGP_CFG(TRST_N, "TRST_N", fn, CFG_FLAGS)
++	PIN_NOGP_CFG(TCK, "TCK", fn, SH_PFC_PIN_CFG_PULL_UP),		\
++	PIN_NOGP_CFG(TDI, "TDI", fn, SH_PFC_PIN_CFG_PULL_UP),		\
++	PIN_NOGP_CFG(TMS, "TMS", fn, SH_PFC_PIN_CFG_PULL_UP),		\
++	PIN_NOGP_CFG(TRST_N, "TRST_N", fn, SH_PFC_PIN_CFG_PULL_UP)
+ 
+ /*
+  * F_() : just information
+diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
+index d41d7ad14be0d..0cb927f0f301a 100644
+--- a/drivers/platform/x86/asus-nb-wmi.c
++++ b/drivers/platform/x86/asus-nb-wmi.c
+@@ -110,11 +110,6 @@ static struct quirk_entry quirk_asus_forceals = {
+ 	.wmi_force_als_set = true,
+ };
+ 
+-static struct quirk_entry quirk_asus_vendor_backlight = {
+-	.wmi_backlight_power = true,
+-	.wmi_backlight_set_devstate = true,
+-};
+-
+ static struct quirk_entry quirk_asus_use_kbd_dock_devid = {
+ 	.use_kbd_dock_devid = true,
+ };
+@@ -425,78 +420,6 @@ static const struct dmi_system_id asus_quirks[] = {
+ 		},
+ 		.driver_data = &quirk_asus_forceals,
+ 	},
+-	{
+-		.callback = dmi_matched,
+-		.ident = "ASUSTeK COMPUTER INC. GA401IH",
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "GA401IH"),
+-		},
+-		.driver_data = &quirk_asus_vendor_backlight,
+-	},
+-	{
+-		.callback = dmi_matched,
+-		.ident = "ASUSTeK COMPUTER INC. GA401II",
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "GA401II"),
+-		},
+-		.driver_data = &quirk_asus_vendor_backlight,
+-	},
+-	{
+-		.callback = dmi_matched,
+-		.ident = "ASUSTeK COMPUTER INC. GA401IU",
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "GA401IU"),
+-		},
+-		.driver_data = &quirk_asus_vendor_backlight,
+-	},
+-	{
+-		.callback = dmi_matched,
+-		.ident = "ASUSTeK COMPUTER INC. GA401IV",
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "GA401IV"),
+-		},
+-		.driver_data = &quirk_asus_vendor_backlight,
+-	},
+-	{
+-		.callback = dmi_matched,
+-		.ident = "ASUSTeK COMPUTER INC. GA401IVC",
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "GA401IVC"),
+-		},
+-		.driver_data = &quirk_asus_vendor_backlight,
+-	},
+-		{
+-		.callback = dmi_matched,
+-		.ident = "ASUSTeK COMPUTER INC. GA502II",
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "GA502II"),
+-		},
+-		.driver_data = &quirk_asus_vendor_backlight,
+-	},
+-	{
+-		.callback = dmi_matched,
+-		.ident = "ASUSTeK COMPUTER INC. GA502IU",
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "GA502IU"),
+-		},
+-		.driver_data = &quirk_asus_vendor_backlight,
+-	},
+-	{
+-		.callback = dmi_matched,
+-		.ident = "ASUSTeK COMPUTER INC. GA502IV",
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "GA502IV"),
+-		},
+-		.driver_data = &quirk_asus_vendor_backlight,
+-	},
+ 	{
+ 		.callback = dmi_matched,
+ 		.ident = "Asus Transformer T100TA / T100HA / T100CHI",
+diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
+index fa7232ad8c395..352508d304675 100644
+--- a/drivers/platform/x86/toshiba_acpi.c
++++ b/drivers/platform/x86/toshiba_acpi.c
+@@ -2831,6 +2831,7 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
+ 
+ 	if (!dev->info_supported && !dev->system_event_supported) {
+ 		pr_warn("No hotkey query interface found\n");
++		error = -EINVAL;
+ 		goto err_remove_filter;
+ 	}
+ 
+diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
+index 8618c44106c2b..b47f6821615e6 100644
+--- a/drivers/platform/x86/touchscreen_dmi.c
++++ b/drivers/platform/x86/touchscreen_dmi.c
+@@ -299,6 +299,35 @@ static const struct ts_dmi_data estar_beauty_hd_data = {
+ 	.properties	= estar_beauty_hd_props,
+ };
+ 
++/* Generic props + data for upside-down mounted GDIX1001 touchscreens */
++static const struct property_entry gdix1001_upside_down_props[] = {
++	PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
++	PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
++	{ }
++};
++
++static const struct ts_dmi_data gdix1001_00_upside_down_data = {
++	.acpi_name	= "GDIX1001:00",
++	.properties	= gdix1001_upside_down_props,
++};
++
++static const struct ts_dmi_data gdix1001_01_upside_down_data = {
++	.acpi_name	= "GDIX1001:01",
++	.properties	= gdix1001_upside_down_props,
++};
++
++static const struct property_entry glavey_tm800a550l_props[] = {
++	PROPERTY_ENTRY_STRING("firmware-name", "gt912-glavey-tm800a550l.fw"),
++	PROPERTY_ENTRY_STRING("goodix,config-name", "gt912-glavey-tm800a550l.cfg"),
++	PROPERTY_ENTRY_U32("goodix,main-clk", 54),
++	{ }
++};
++
++static const struct ts_dmi_data glavey_tm800a550l_data = {
++	.acpi_name	= "GDIX1001:00",
++	.properties	= glavey_tm800a550l_props,
++};
++
+ static const struct property_entry gp_electronic_t701_props[] = {
+ 	PROPERTY_ENTRY_U32("touchscreen-size-x", 960),
+ 	PROPERTY_ENTRY_U32("touchscreen-size-y", 640),
+@@ -1012,6 +1041,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "eSTAR BEAUTY HD Intel Quad core"),
+ 		},
+ 	},
++	{	/* Glavey TM800A550L */
++		.driver_data = (void *)&glavey_tm800a550l_data,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
++			DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
++			/* Above strings are too generic, also match on BIOS version */
++			DMI_MATCH(DMI_BIOS_VERSION, "ZY-8-BI-PX4S70VTR400-X423B-005-D"),
++		},
++	},
+ 	{
+ 		/* GP-electronic T701 */
+ 		.driver_data = (void *)&gp_electronic_t701_data,
+@@ -1295,6 +1333,24 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "X3 Plus"),
+ 		},
+ 	},
++	{
++		/* Teclast X89 (Android version / BIOS) */
++		.driver_data = (void *)&gdix1001_00_upside_down_data,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "WISKY"),
++			DMI_MATCH(DMI_BOARD_NAME, "3G062i"),
++		},
++	},
++	{
++		/* Teclast X89 (Windows version / BIOS) */
++		.driver_data = (void *)&gdix1001_01_upside_down_data,
++		.matches = {
++			/* tPAD is too generic, also match on bios date */
++			DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"),
++			DMI_MATCH(DMI_BOARD_NAME, "tPAD"),
++			DMI_MATCH(DMI_BIOS_DATE, "12/19/2014"),
++		},
++	},
+ 	{
+ 		/* Teclast X98 Plus II */
+ 		.driver_data = (void *)&teclast_x98plus2_data,
+@@ -1303,6 +1359,19 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "X98 Plus II"),
+ 		},
+ 	},
++	{
++		/* Teclast X98 Pro */
++		.driver_data = (void *)&gdix1001_00_upside_down_data,
++		.matches = {
++			/*
++			 * Only match BIOS date, because the manufacturers
++			 * BIOS does not report the board name at all
++			 * (sometimes)...
++			 */
++			DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"),
++			DMI_MATCH(DMI_BIOS_DATE, "10/28/2015"),
++		},
++	},
+ 	{
+ 		/* Trekstor Primebook C11 */
+ 		.driver_data = (void *)&trekstor_primebook_c11_data,
+@@ -1378,6 +1447,22 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "VINGA Twizzle J116"),
+ 		},
+ 	},
++	{
++		/* "WinBook TW100" */
++		.driver_data = (void *)&gdix1001_00_upside_down_data,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "TW100")
++		}
++	},
++	{
++		/* WinBook TW700 */
++		.driver_data = (void *)&gdix1001_00_upside_down_data,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "TW700")
++		},
++	},
+ 	{
+ 		/* Yours Y8W81, same case and touchscreen as Chuwi Vi8 */
+ 		.driver_data = (void *)&chuwi_vi8_data,
+diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c
+index e18d291c7f21c..23fa429ebe760 100644
+--- a/drivers/regulator/da9052-regulator.c
++++ b/drivers/regulator/da9052-regulator.c
+@@ -250,7 +250,8 @@ static int da9052_regulator_set_voltage_time_sel(struct regulator_dev *rdev,
+ 	case DA9052_ID_BUCK3:
+ 	case DA9052_ID_LDO2:
+ 	case DA9052_ID_LDO3:
+-		ret = (new_sel - old_sel) * info->step_uV / 6250;
++		ret = DIV_ROUND_UP(abs(new_sel - old_sel) * info->step_uV,
++				   6250);
+ 		break;
+ 	}
+ 
+diff --git a/drivers/regulator/fan53880.c b/drivers/regulator/fan53880.c
+index 1684faf82ed25..94f02f3099dd4 100644
+--- a/drivers/regulator/fan53880.c
++++ b/drivers/regulator/fan53880.c
+@@ -79,7 +79,7 @@ static const struct regulator_desc fan53880_regulators[] = {
+ 		.n_linear_ranges = 2,
+ 		.n_voltages =	   0xf8,
+ 		.vsel_reg =	   FAN53880_BUCKVOUT,
+-		.vsel_mask =	   0x7f,
++		.vsel_mask =	   0xff,
+ 		.enable_reg =	   FAN53880_ENABLE,
+ 		.enable_mask =	   0x10,
+ 		.enable_time =	   480,
+diff --git a/drivers/regulator/hi655x-regulator.c b/drivers/regulator/hi655x-regulator.c
+index ac2ee2030211a..b44f492a2b832 100644
+--- a/drivers/regulator/hi655x-regulator.c
++++ b/drivers/regulator/hi655x-regulator.c
+@@ -72,7 +72,7 @@ enum hi655x_regulator_id {
+ static int hi655x_is_enabled(struct regulator_dev *rdev)
+ {
+ 	unsigned int value = 0;
+-	struct hi655x_regulator *regulator = rdev_get_drvdata(rdev);
++	const struct hi655x_regulator *regulator = rdev_get_drvdata(rdev);
+ 
+ 	regmap_read(rdev->regmap, regulator->status_reg, &value);
+ 	return (value & rdev->desc->enable_mask);
+@@ -80,7 +80,7 @@ static int hi655x_is_enabled(struct regulator_dev *rdev)
+ 
+ static int hi655x_disable(struct regulator_dev *rdev)
+ {
+-	struct hi655x_regulator *regulator = rdev_get_drvdata(rdev);
++	const struct hi655x_regulator *regulator = rdev_get_drvdata(rdev);
+ 
+ 	return regmap_write(rdev->regmap, regulator->disable_reg,
+ 			    rdev->desc->enable_mask);
+@@ -169,7 +169,6 @@ static const struct hi655x_regulator regulators[] = {
+ static int hi655x_regulator_probe(struct platform_device *pdev)
+ {
+ 	unsigned int i;
+-	struct hi655x_regulator *regulator;
+ 	struct hi655x_pmic *pmic;
+ 	struct regulator_config config = { };
+ 	struct regulator_dev *rdev;
+@@ -180,22 +179,17 @@ static int hi655x_regulator_probe(struct platform_device *pdev)
+ 		return -ENODEV;
+ 	}
+ 
+-	regulator = devm_kzalloc(&pdev->dev, sizeof(*regulator), GFP_KERNEL);
+-	if (!regulator)
+-		return -ENOMEM;
+-
+-	platform_set_drvdata(pdev, regulator);
+-
+ 	config.dev = pdev->dev.parent;
+ 	config.regmap = pmic->regmap;
+-	config.driver_data = regulator;
+ 	for (i = 0; i < ARRAY_SIZE(regulators); i++) {
++		config.driver_data = (void *) &regulators[i];
++
+ 		rdev = devm_regulator_register(&pdev->dev,
+ 					       &regulators[i].rdesc,
+ 					       &config);
+ 		if (IS_ERR(rdev)) {
+ 			dev_err(&pdev->dev, "failed to register regulator %s\n",
+-				regulator->rdesc.name);
++				regulators[i].rdesc.name);
+ 			return PTR_ERR(rdev);
+ 		}
+ 	}
+diff --git a/drivers/regulator/mt6315-regulator.c b/drivers/regulator/mt6315-regulator.c
+index 6b8be52c3772a..7514702f78cf7 100644
+--- a/drivers/regulator/mt6315-regulator.c
++++ b/drivers/regulator/mt6315-regulator.c
+@@ -223,8 +223,8 @@ static int mt6315_regulator_probe(struct spmi_device *pdev)
+ 	int i;
+ 
+ 	regmap = devm_regmap_init_spmi_ext(pdev, &mt6315_regmap_config);
+-	if (!regmap)
+-		return -ENODEV;
++	if (IS_ERR(regmap))
++		return PTR_ERR(regmap);
+ 
+ 	chip = devm_kzalloc(dev, sizeof(struct mt6315_chip), GFP_KERNEL);
+ 	if (!chip)
+diff --git a/drivers/regulator/mt6358-regulator.c b/drivers/regulator/mt6358-regulator.c
+index 13cb6ac9a8929..1d4eb5dc4fac8 100644
+--- a/drivers/regulator/mt6358-regulator.c
++++ b/drivers/regulator/mt6358-regulator.c
+@@ -457,7 +457,7 @@ static struct mt6358_regulator_info mt6358_regulators[] = {
+ 	MT6358_REG_FIXED("ldo_vaud28", VAUD28,
+ 			 MT6358_LDO_VAUD28_CON0, 0, 2800000),
+ 	MT6358_LDO("ldo_vdram2", VDRAM2, vdram2_voltages, vdram2_idx,
+-		   MT6358_LDO_VDRAM2_CON0, 0, MT6358_LDO_VDRAM2_ELR0, 0x10, 0),
++		   MT6358_LDO_VDRAM2_CON0, 0, MT6358_LDO_VDRAM2_ELR0, 0xf, 0),
+ 	MT6358_LDO("ldo_vsim1", VSIM1, vsim_voltages, vsim_idx,
+ 		   MT6358_LDO_VSIM1_CON0, 0, MT6358_VSIM1_ANA_CON0, 0xf00, 8),
+ 	MT6358_LDO("ldo_vibr", VIBR, vibr_voltages, vibr_idx,
+diff --git a/drivers/regulator/uniphier-regulator.c b/drivers/regulator/uniphier-regulator.c
+index 2e02e26b516c4..e75b0973e3256 100644
+--- a/drivers/regulator/uniphier-regulator.c
++++ b/drivers/regulator/uniphier-regulator.c
+@@ -201,6 +201,7 @@ static const struct of_device_id uniphier_regulator_match[] = {
+ 	},
+ 	{ /* Sentinel */ },
+ };
++MODULE_DEVICE_TABLE(of, uniphier_regulator_match);
+ 
+ static struct platform_driver uniphier_regulator_driver = {
+ 	.probe = uniphier_regulator_probe,
+diff --git a/drivers/rtc/rtc-stm32.c b/drivers/rtc/rtc-stm32.c
+index 75a8924ba12b3..ac9e228b56d0b 100644
+--- a/drivers/rtc/rtc-stm32.c
++++ b/drivers/rtc/rtc-stm32.c
+@@ -754,7 +754,7 @@ static int stm32_rtc_probe(struct platform_device *pdev)
+ 
+ 	ret = clk_prepare_enable(rtc->rtc_ck);
+ 	if (ret)
+-		goto err;
++		goto err_no_rtc_ck;
+ 
+ 	if (rtc->data->need_dbp)
+ 		regmap_update_bits(rtc->dbp, rtc->dbp_reg,
+@@ -830,10 +830,12 @@ static int stm32_rtc_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	return 0;
++
+ err:
++	clk_disable_unprepare(rtc->rtc_ck);
++err_no_rtc_ck:
+ 	if (rtc->data->has_pclk)
+ 		clk_disable_unprepare(rtc->pclk);
+-	clk_disable_unprepare(rtc->rtc_ck);
+ 
+ 	if (rtc->data->need_dbp)
+ 		regmap_update_bits(rtc->dbp, rtc->dbp_reg, rtc->dbp_mask, 0);
+diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
+index 8d0de6adcad08..69d62421d5611 100644
+--- a/drivers/s390/cio/chp.c
++++ b/drivers/s390/cio/chp.c
+@@ -255,6 +255,9 @@ static ssize_t chp_status_write(struct device *dev,
+ 	if (!num_args)
+ 		return count;
+ 
++	/* Wait until previous actions have settled. */
++	css_wait_for_slow_path();
++
+ 	if (!strncasecmp(cmd, "on", 2) || !strcmp(cmd, "1")) {
+ 		mutex_lock(&cp->lock);
+ 		error = s390_vary_chpid(cp->chpid, 1);
+diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
+index c22d9ee27ba19..297fb399363cc 100644
+--- a/drivers/s390/cio/chsc.c
++++ b/drivers/s390/cio/chsc.c
+@@ -801,8 +801,6 @@ int chsc_chp_vary(struct chp_id chpid, int on)
+ {
+ 	struct channel_path *chp = chpid_to_chp(chpid);
+ 
+-	/* Wait until previous actions have settled. */
+-	css_wait_for_slow_path();
+ 	/*
+ 	 * Redo PathVerification on the devices the chpid connects to
+ 	 */
+diff --git a/drivers/scsi/FlashPoint.c b/drivers/scsi/FlashPoint.c
+index 24ace18240480..ec8a621d232d6 100644
+--- a/drivers/scsi/FlashPoint.c
++++ b/drivers/scsi/FlashPoint.c
+@@ -40,7 +40,7 @@ struct sccb_mgr_info {
+ 	u16 si_per_targ_ultra_nego;
+ 	u16 si_per_targ_no_disc;
+ 	u16 si_per_targ_wide_nego;
+-	u16 si_flags;
++	u16 si_mflags;
+ 	unsigned char si_card_family;
+ 	unsigned char si_bustype;
+ 	unsigned char si_card_model[3];
+@@ -1073,22 +1073,22 @@ static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo)
+ 		ScamFlg =
+ 		    (unsigned char)FPT_utilEERead(ioport, SCAM_CONFIG / 2);
+ 
+-	pCardInfo->si_flags = 0x0000;
++	pCardInfo->si_mflags = 0x0000;
+ 
+ 	if (i & 0x01)
+-		pCardInfo->si_flags |= SCSI_PARITY_ENA;
++		pCardInfo->si_mflags |= SCSI_PARITY_ENA;
+ 
+ 	if (!(i & 0x02))
+-		pCardInfo->si_flags |= SOFT_RESET;
++		pCardInfo->si_mflags |= SOFT_RESET;
+ 
+ 	if (i & 0x10)
+-		pCardInfo->si_flags |= EXTENDED_TRANSLATION;
++		pCardInfo->si_mflags |= EXTENDED_TRANSLATION;
+ 
+ 	if (ScamFlg & SCAM_ENABLED)
+-		pCardInfo->si_flags |= FLAG_SCAM_ENABLED;
++		pCardInfo->si_mflags |= FLAG_SCAM_ENABLED;
+ 
+ 	if (ScamFlg & SCAM_LEVEL2)
+-		pCardInfo->si_flags |= FLAG_SCAM_LEVEL2;
++		pCardInfo->si_mflags |= FLAG_SCAM_LEVEL2;
+ 
+ 	j = (RD_HARPOON(ioport + hp_bm_ctrl) & ~SCSI_TERM_ENA_L);
+ 	if (i & 0x04) {
+@@ -1104,7 +1104,7 @@ static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo)
+ 
+ 	if (!(RD_HARPOON(ioport + hp_page_ctrl) & NARROW_SCSI_CARD))
+ 
+-		pCardInfo->si_flags |= SUPPORT_16TAR_32LUN;
++		pCardInfo->si_mflags |= SUPPORT_16TAR_32LUN;
+ 
+ 	pCardInfo->si_card_family = HARPOON_FAMILY;
+ 	pCardInfo->si_bustype = BUSTYPE_PCI;
+@@ -1140,15 +1140,15 @@ static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo)
+ 
+ 	if (pCardInfo->si_card_model[1] == '3') {
+ 		if (RD_HARPOON(ioport + hp_ee_ctrl) & BIT(7))
+-			pCardInfo->si_flags |= LOW_BYTE_TERM;
++			pCardInfo->si_mflags |= LOW_BYTE_TERM;
+ 	} else if (pCardInfo->si_card_model[2] == '0') {
+ 		temp = RD_HARPOON(ioport + hp_xfer_pad);
+ 		WR_HARPOON(ioport + hp_xfer_pad, (temp & ~BIT(4)));
+ 		if (RD_HARPOON(ioport + hp_ee_ctrl) & BIT(7))
+-			pCardInfo->si_flags |= LOW_BYTE_TERM;
++			pCardInfo->si_mflags |= LOW_BYTE_TERM;
+ 		WR_HARPOON(ioport + hp_xfer_pad, (temp | BIT(4)));
+ 		if (RD_HARPOON(ioport + hp_ee_ctrl) & BIT(7))
+-			pCardInfo->si_flags |= HIGH_BYTE_TERM;
++			pCardInfo->si_mflags |= HIGH_BYTE_TERM;
+ 		WR_HARPOON(ioport + hp_xfer_pad, temp);
+ 	} else {
+ 		temp = RD_HARPOON(ioport + hp_ee_ctrl);
+@@ -1166,9 +1166,9 @@ static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo)
+ 		WR_HARPOON(ioport + hp_ee_ctrl, temp);
+ 		WR_HARPOON(ioport + hp_xfer_pad, temp2);
+ 		if (!(temp3 & BIT(7)))
+-			pCardInfo->si_flags |= LOW_BYTE_TERM;
++			pCardInfo->si_mflags |= LOW_BYTE_TERM;
+ 		if (!(temp3 & BIT(6)))
+-			pCardInfo->si_flags |= HIGH_BYTE_TERM;
++			pCardInfo->si_mflags |= HIGH_BYTE_TERM;
+ 	}
+ 
+ 	ARAM_ACCESS(ioport);
+@@ -1275,7 +1275,7 @@ static void *FlashPoint_HardwareResetHostAdapter(struct sccb_mgr_info
+ 	WR_HARPOON(ioport + hp_arb_id, pCardInfo->si_id);
+ 	CurrCard->ourId = pCardInfo->si_id;
+ 
+-	i = (unsigned char)pCardInfo->si_flags;
++	i = (unsigned char)pCardInfo->si_mflags;
+ 	if (i & SCSI_PARITY_ENA)
+ 		WR_HARPOON(ioport + hp_portctrl_1, (HOST_MODE8 | CHK_SCSI_P));
+ 
+@@ -1289,14 +1289,14 @@ static void *FlashPoint_HardwareResetHostAdapter(struct sccb_mgr_info
+ 		j |= SCSI_TERM_ENA_H;
+ 	WR_HARPOON(ioport + hp_ee_ctrl, j);
+ 
+-	if (!(pCardInfo->si_flags & SOFT_RESET)) {
++	if (!(pCardInfo->si_mflags & SOFT_RESET)) {
+ 
+ 		FPT_sresb(ioport, thisCard);
+ 
+ 		FPT_scini(thisCard, pCardInfo->si_id, 0);
+ 	}
+ 
+-	if (pCardInfo->si_flags & POST_ALL_UNDERRRUNS)
++	if (pCardInfo->si_mflags & POST_ALL_UNDERRRUNS)
+ 		CurrCard->globalFlags |= F_NO_FILTER;
+ 
+ 	if (pCurrNvRam) {
+diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
+index a13c203ef7a9a..c4881657a807b 100644
+--- a/drivers/scsi/be2iscsi/be_iscsi.c
++++ b/drivers/scsi/be2iscsi/be_iscsi.c
+@@ -182,6 +182,7 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
+ 	struct beiscsi_endpoint *beiscsi_ep;
+ 	struct iscsi_endpoint *ep;
+ 	uint16_t cri_index;
++	int rc = 0;
+ 
+ 	ep = iscsi_lookup_endpoint(transport_fd);
+ 	if (!ep)
+@@ -189,15 +190,17 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
+ 
+ 	beiscsi_ep = ep->dd_data;
+ 
+-	if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
+-		return -EINVAL;
++	if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) {
++		rc = -EINVAL;
++		goto put_ep;
++	}
+ 
+ 	if (beiscsi_ep->phba != phba) {
+ 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ 			    "BS_%d : beiscsi_ep->hba=%p not equal to phba=%p\n",
+ 			    beiscsi_ep->phba, phba);
+-
+-		return -EEXIST;
++		rc = -EEXIST;
++		goto put_ep;
+ 	}
+ 	cri_index = BE_GET_CRI_FROM_CID(beiscsi_ep->ep_cid);
+ 	if (phba->conn_table[cri_index]) {
+@@ -209,7 +212,8 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
+ 				      beiscsi_ep->ep_cid,
+ 				      beiscsi_conn,
+ 				      phba->conn_table[cri_index]);
+-			return -EINVAL;
++			rc = -EINVAL;
++			goto put_ep;
+ 		}
+ 	}
+ 
+@@ -226,7 +230,10 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
+ 		    "BS_%d : cid %d phba->conn_table[%u]=%p\n",
+ 		    beiscsi_ep->ep_cid, cri_index, beiscsi_conn);
+ 	phba->conn_table[cri_index] = beiscsi_conn;
+-	return 0;
++
++put_ep:
++	iscsi_put_endpoint(ep);
++	return rc;
+ }
+ 
+ static int beiscsi_iface_create_ipv4(struct beiscsi_hba *phba)
+diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
+index 90fcddb76f46f..e9658a67d9da0 100644
+--- a/drivers/scsi/be2iscsi/be_main.c
++++ b/drivers/scsi/be2iscsi/be_main.c
+@@ -5809,6 +5809,7 @@ struct iscsi_transport beiscsi_iscsi_transport = {
+ 	.destroy_session = beiscsi_session_destroy,
+ 	.create_conn = beiscsi_conn_create,
+ 	.bind_conn = beiscsi_conn_bind,
++	.unbind_conn = iscsi_conn_unbind,
+ 	.destroy_conn = iscsi_conn_teardown,
+ 	.attr_is_visible = beiscsi_attr_is_visible,
+ 	.set_iface_param = beiscsi_iface_set_param,
+diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
+index 1e6d8f62ea3c2..2ad85c6b99fd2 100644
+--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
++++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
+@@ -1420,17 +1420,23 @@ static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
+ 	 * Forcefully terminate all in progress connection recovery at the
+ 	 * earliest, either in bind(), send_pdu(LOGIN), or conn_start()
+ 	 */
+-	if (bnx2i_adapter_ready(hba))
+-		return -EIO;
++	if (bnx2i_adapter_ready(hba)) {
++		ret_code = -EIO;
++		goto put_ep;
++	}
+ 
+ 	bnx2i_ep = ep->dd_data;
+ 	if ((bnx2i_ep->state == EP_STATE_TCP_FIN_RCVD) ||
+-	    (bnx2i_ep->state == EP_STATE_TCP_RST_RCVD))
++	    (bnx2i_ep->state == EP_STATE_TCP_RST_RCVD)) {
+ 		/* Peer disconnect via' FIN or RST */
+-		return -EINVAL;
++		ret_code = -EINVAL;
++		goto put_ep;
++	}
+ 
+-	if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
+-		return -EINVAL;
++	if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) {
++		ret_code = -EINVAL;
++		goto put_ep;
++	}
+ 
+ 	if (bnx2i_ep->hba != hba) {
+ 		/* Error - TCP connection does not belong to this device
+@@ -1441,7 +1447,8 @@ static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
+ 		iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data,
+ 				  "belong to hba (%s)\n",
+ 				  hba->netdev->name);
+-		return -EEXIST;
++		ret_code = -EEXIST;
++		goto put_ep;
+ 	}
+ 	bnx2i_ep->conn = bnx2i_conn;
+ 	bnx2i_conn->ep = bnx2i_ep;
+@@ -1458,6 +1465,8 @@ static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
+ 		bnx2i_put_rq_buf(bnx2i_conn, 0);
+ 
+ 	bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE);
++put_ep:
++	iscsi_put_endpoint(ep);
+ 	return ret_code;
+ }
+ 
+@@ -2276,6 +2285,7 @@ struct iscsi_transport bnx2i_iscsi_transport = {
+ 	.destroy_session	= bnx2i_session_destroy,
+ 	.create_conn		= bnx2i_conn_create,
+ 	.bind_conn		= bnx2i_conn_bind,
++	.unbind_conn		= iscsi_conn_unbind,
+ 	.destroy_conn		= bnx2i_conn_destroy,
+ 	.attr_is_visible	= bnx2i_attr_is_visible,
+ 	.set_param		= iscsi_set_param,
+diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+index 37d99357120fa..edcd3fab6973c 100644
+--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
++++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+@@ -117,6 +117,7 @@ static struct iscsi_transport cxgb3i_iscsi_transport = {
+ 	/* connection management */
+ 	.create_conn	= cxgbi_create_conn,
+ 	.bind_conn	= cxgbi_bind_conn,
++	.unbind_conn	= iscsi_conn_unbind,
+ 	.destroy_conn	= iscsi_tcp_conn_teardown,
+ 	.start_conn	= iscsi_conn_start,
+ 	.stop_conn	= iscsi_conn_stop,
+diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+index 2c3491528d424..efb3e2b3398e2 100644
+--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
++++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+@@ -134,6 +134,7 @@ static struct iscsi_transport cxgb4i_iscsi_transport = {
+ 	/* connection management */
+ 	.create_conn	= cxgbi_create_conn,
+ 	.bind_conn		= cxgbi_bind_conn,
++	.unbind_conn	= iscsi_conn_unbind,
+ 	.destroy_conn	= iscsi_tcp_conn_teardown,
+ 	.start_conn		= iscsi_conn_start,
+ 	.stop_conn		= iscsi_conn_stop,
+diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
+index f078b3c4e083f..f6bcae829c29b 100644
+--- a/drivers/scsi/cxgbi/libcxgbi.c
++++ b/drivers/scsi/cxgbi/libcxgbi.c
+@@ -2690,11 +2690,13 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
+ 	err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid,
+ 					     ppm->tformat.pgsz_idx_dflt);
+ 	if (err < 0)
+-		return err;
++		goto put_ep;
+ 
+ 	err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
+-	if (err)
+-		return -EINVAL;
++	if (err) {
++		err = -EINVAL;
++		goto put_ep;
++	}
+ 
+ 	/*  calculate the tag idx bits needed for this conn based on cmds_max */
+ 	cconn->task_idx_bits = (__ilog2_u32(conn->session->cmds_max - 1)) + 1;
+@@ -2715,7 +2717,9 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
+ 	/*  init recv engine */
+ 	iscsi_tcp_hdr_recv_prep(tcp_conn);
+ 
+-	return 0;
++put_ep:
++	iscsi_put_endpoint(ep);
++	return err;
+ }
+ EXPORT_SYMBOL_GPL(cxgbi_bind_conn);
+ 
+diff --git a/drivers/scsi/libfc/fc_encode.h b/drivers/scsi/libfc/fc_encode.h
+index 602c97a651bc0..9ea4ceadb5594 100644
+--- a/drivers/scsi/libfc/fc_encode.h
++++ b/drivers/scsi/libfc/fc_encode.h
+@@ -166,9 +166,11 @@ static inline int fc_ct_ns_fill(struct fc_lport *lport,
+ static inline void fc_ct_ms_fill_attr(struct fc_fdmi_attr_entry *entry,
+ 				    const char *in, size_t len)
+ {
+-	int copied = strscpy(entry->value, in, len);
+-	if (copied > 0)
+-		memset(entry->value, copied, len - copied);
++	int copied;
++
++	copied = strscpy((char *)&entry->value, in, len);
++	if (copied > 0 && (copied + 1) < len)
++		memset((entry->value + copied + 1), 0, len - copied - 1);
+ }
+ 
+ /**
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index 4834219497eeb..2aaf836786548 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -1387,23 +1387,32 @@ void iscsi_session_failure(struct iscsi_session *session,
+ }
+ EXPORT_SYMBOL_GPL(iscsi_session_failure);
+ 
+-void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
++static bool iscsi_set_conn_failed(struct iscsi_conn *conn)
+ {
+ 	struct iscsi_session *session = conn->session;
+ 
+-	spin_lock_bh(&session->frwd_lock);
+-	if (session->state == ISCSI_STATE_FAILED) {
+-		spin_unlock_bh(&session->frwd_lock);
+-		return;
+-	}
++	if (session->state == ISCSI_STATE_FAILED)
++		return false;
+ 
+ 	if (conn->stop_stage == 0)
+ 		session->state = ISCSI_STATE_FAILED;
+-	spin_unlock_bh(&session->frwd_lock);
+ 
+ 	set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+ 	set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
+-	iscsi_conn_error_event(conn->cls_conn, err);
++	return true;
++}
++
++void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
++{
++	struct iscsi_session *session = conn->session;
++	bool needs_evt;
++
++	spin_lock_bh(&session->frwd_lock);
++	needs_evt = iscsi_set_conn_failed(conn);
++	spin_unlock_bh(&session->frwd_lock);
++
++	if (needs_evt)
++		iscsi_conn_error_event(conn->cls_conn, err);
+ }
+ EXPORT_SYMBOL_GPL(iscsi_conn_failure);
+ 
+@@ -2180,6 +2189,51 @@ done:
+ 	spin_unlock(&session->frwd_lock);
+ }
+ 
++/**
++ * iscsi_conn_unbind - prevent queueing to conn.
++ * @cls_conn: iscsi conn ep is bound to.
++ * @is_active: is the conn in use for boot or is this for EH/termination
++ *
++ * This must be called by drivers implementing the ep_disconnect callout.
++ * It disables queueing to the connection from libiscsi in preparation for
++ * an ep_disconnect call.
++ */
++void iscsi_conn_unbind(struct iscsi_cls_conn *cls_conn, bool is_active)
++{
++	struct iscsi_session *session;
++	struct iscsi_conn *conn;
++
++	if (!cls_conn)
++		return;
++
++	conn = cls_conn->dd_data;
++	session = conn->session;
++	/*
++	 * Wait for iscsi_eh calls to exit. We don't wait for the tmf to
++	 * complete or timeout. The caller just wants to know what's running
++	 * is everything that needs to be cleaned up, and no cmds will be
++	 * queued.
++	 */
++	mutex_lock(&session->eh_mutex);
++
++	iscsi_suspend_queue(conn);
++	iscsi_suspend_tx(conn);
++
++	spin_lock_bh(&session->frwd_lock);
++	if (!is_active) {
++		/*
++		 * if logout timed out before userspace could even send a PDU
++		 * the state might still be in ISCSI_STATE_LOGGED_IN and
++		 * allowing new cmds and TMFs.
++		 */
++		if (session->state == ISCSI_STATE_LOGGED_IN)
++			iscsi_set_conn_failed(conn);
++	}
++	spin_unlock_bh(&session->frwd_lock);
++	mutex_unlock(&session->eh_mutex);
++}
++EXPORT_SYMBOL_GPL(iscsi_conn_unbind);
++
+ static void iscsi_prep_abort_task_pdu(struct iscsi_task *task,
+ 				      struct iscsi_tm *hdr)
+ {
+diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
+index 46a8f2d1d2b83..8ef8a3672e494 100644
+--- a/drivers/scsi/lpfc/lpfc_debugfs.c
++++ b/drivers/scsi/lpfc/lpfc_debugfs.c
+@@ -868,11 +868,8 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
+ 		len += scnprintf(buf+len, size-len,
+ 				"WWNN x%llx ",
+ 				wwn_to_u64(ndlp->nlp_nodename.u.wwn));
+-		if (ndlp->nlp_flag & NLP_RPI_REGISTERED)
+-			len += scnprintf(buf+len, size-len, "RPI:%03d ",
+-					ndlp->nlp_rpi);
+-		else
+-			len += scnprintf(buf+len, size-len, "RPI:none ");
++		len += scnprintf(buf+len, size-len, "RPI:x%04x ",
++				 ndlp->nlp_rpi);
+ 		len +=  scnprintf(buf+len, size-len, "flag:x%08x ",
+ 			ndlp->nlp_flag);
+ 		if (!ndlp->nlp_type)
+diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
+index 3dd22da3153ff..5c4172e8c81b2 100644
+--- a/drivers/scsi/lpfc/lpfc_els.c
++++ b/drivers/scsi/lpfc/lpfc_els.c
+@@ -1985,9 +1985,20 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 			lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ 						NLP_EVT_CMPL_PLOGI);
+ 
+-		/* As long as this node is not registered with the scsi or nvme
+-		 * transport, it is no longer an active node.  Otherwise
+-		 * devloss handles the final cleanup.
++		/* If a PLOGI collision occurred, the node needs to continue
++		 * with the reglogin process.
++		 */
++		spin_lock_irq(&ndlp->lock);
++		if ((ndlp->nlp_flag & (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI)) &&
++		    ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) {
++			spin_unlock_irq(&ndlp->lock);
++			goto out;
++		}
++		spin_unlock_irq(&ndlp->lock);
++
++		/* No PLOGI collision and the node is not registered with the
++		 * scsi or nvme transport. It is no longer an active node. Just
++		 * start the device remove process.
+ 		 */
+ 		if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
+ 			spin_lock_irq(&ndlp->lock);
+@@ -2856,6 +2867,11 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 	 * log into the remote port.
+ 	 */
+ 	if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
++		spin_lock_irq(&ndlp->lock);
++		if (phba->sli_rev == LPFC_SLI_REV4)
++			ndlp->nlp_flag |= NLP_RELEASE_RPI;
++		ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
++		spin_unlock_irq(&ndlp->lock);
+ 		lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ 					NLP_EVT_DEVICE_RM);
+ 		lpfc_els_free_iocb(phba, cmdiocb);
+@@ -4363,6 +4379,7 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ 	struct lpfc_vport *vport = cmdiocb->vport;
+ 	IOCB_t *irsp;
++	u32 xpt_flags = 0, did_mask = 0;
+ 
+ 	irsp = &rspiocb->iocb;
+ 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+@@ -4378,9 +4395,20 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 	if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
+ 		/* NPort Recovery mode or node is just allocated */
+ 		if (!lpfc_nlp_not_used(ndlp)) {
+-			/* If the ndlp is being used by another discovery
+-			 * thread, just unregister the RPI.
++			/* A LOGO is completing and the node is in NPR state.
++			 * If this a fabric node that cleared its transport
++			 * registration, release the rpi.
+ 			 */
++			xpt_flags = SCSI_XPT_REGD | NVME_XPT_REGD;
++			did_mask = ndlp->nlp_DID & Fabric_DID_MASK;
++			if (did_mask == Fabric_DID_MASK &&
++			    !(ndlp->fc4_xpt_flags & xpt_flags)) {
++				spin_lock_irq(&ndlp->lock);
++				ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
++				if (phba->sli_rev == LPFC_SLI_REV4)
++					ndlp->nlp_flag |= NLP_RELEASE_RPI;
++				spin_unlock_irq(&ndlp->lock);
++			}
+ 			lpfc_unreg_rpi(vport, ndlp);
+ 		} else {
+ 			/* Indicate the node has already released, should
+@@ -4416,28 +4444,37 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+ {
+ 	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
+ 	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
++	u32 mbx_flag = pmb->mbox_flag;
++	u32 mbx_cmd = pmb->u.mb.mbxCommand;
+ 
+ 	pmb->ctx_buf = NULL;
+ 	pmb->ctx_ndlp = NULL;
+ 
+-	lpfc_mbuf_free(phba, mp->virt, mp->phys);
+-	kfree(mp);
+-	mempool_free(pmb, phba->mbox_mem_pool);
+ 	if (ndlp) {
+ 		lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
+-				 "0006 rpi x%x DID:%x flg:%x %d x%px\n",
++				 "0006 rpi x%x DID:%x flg:%x %d x%px "
++				 "mbx_cmd x%x mbx_flag x%x x%px\n",
+ 				 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
+-				 kref_read(&ndlp->kref),
+-				 ndlp);
+-		/* This is the end of the default RPI cleanup logic for
+-		 * this ndlp and it could get released.  Clear the nlp_flags to
+-		 * prevent any further processing.
++				 kref_read(&ndlp->kref), ndlp, mbx_cmd,
++				 mbx_flag, pmb);
++
++		/* This ends the default/temporary RPI cleanup logic for this
++		 * ndlp and the node and rpi needs to be released. Free the rpi
++		 * first on an UNREG_LOGIN and then release the final
++		 * references.
+ 		 */
++		spin_lock_irq(&ndlp->lock);
+ 		ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
++		if (mbx_cmd == MBX_UNREG_LOGIN)
++			ndlp->nlp_flag &= ~NLP_UNREG_INP;
++		spin_unlock_irq(&ndlp->lock);
+ 		lpfc_nlp_put(ndlp);
+-		lpfc_nlp_not_used(ndlp);
++		lpfc_drop_node(ndlp->vport, ndlp);
+ 	}
+ 
++	lpfc_mbuf_free(phba, mp->virt, mp->phys);
++	kfree(mp);
++	mempool_free(pmb, phba->mbox_mem_pool);
+ 	return;
+ }
+ 
+@@ -4495,11 +4532,11 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 	/* ELS response tag <ulpIoTag> completes */
+ 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ 			 "0110 ELS response tag x%x completes "
+-			 "Data: x%x x%x x%x x%x x%x x%x x%x\n",
++			 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%px\n",
+ 			 cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus,
+ 			 rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout,
+ 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+-			 ndlp->nlp_rpi);
++			 ndlp->nlp_rpi, kref_read(&ndlp->kref), mbox);
+ 	if (mbox) {
+ 		if ((rspiocb->iocb.ulpStatus == 0) &&
+ 		    (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
+@@ -4579,6 +4616,20 @@ out:
+ 		spin_unlock_irq(&ndlp->lock);
+ 	}
+ 
++	/* An SLI4 NPIV instance wants to drop the node at this point under
++	 * these conditions and release the RPI.
++	 */
++	if (phba->sli_rev == LPFC_SLI_REV4 &&
++	    (vport && vport->port_type == LPFC_NPIV_PORT) &&
++	    ndlp->nlp_flag & NLP_RELEASE_RPI) {
++		lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
++		spin_lock_irq(&ndlp->lock);
++		ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
++		ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
++		spin_unlock_irq(&ndlp->lock);
++		lpfc_drop_node(vport, ndlp);
++	}
++
+ 	/* Release the originating I/O reference. */
+ 	lpfc_els_free_iocb(phba, cmdiocb);
+ 	lpfc_nlp_put(ndlp);
+@@ -4762,10 +4813,10 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
+ 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ 			 "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, "
+ 			 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x "
+-			 "RPI: x%x, fc_flag x%x\n",
++			 "RPI: x%x, fc_flag x%x refcnt %d\n",
+ 			 rc, elsiocb->iotag, elsiocb->sli4_xritag,
+ 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+-			 ndlp->nlp_rpi, vport->fc_flag);
++			 ndlp->nlp_rpi, vport->fc_flag, kref_read(&ndlp->kref));
+ 	return 0;
+ 
+ io_err:
+@@ -5978,6 +6029,17 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
+ 		goto free_rdp_context;
+ 	}
+ 
++	/* The NPIV instance is rejecting this unsolicited ELS. Make sure the
++	 * node's assigned RPI needs to be released as this node will get
++	 * freed.
++	 */
++	if (phba->sli_rev == LPFC_SLI_REV4 &&
++	    vport->port_type == LPFC_NPIV_PORT) {
++		spin_lock_irq(&ndlp->lock);
++		ndlp->nlp_flag |= NLP_RELEASE_RPI;
++		spin_unlock_irq(&ndlp->lock);
++	}
++
+ 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ 	if (rc == IOCB_ERROR) {
+ 		lpfc_nlp_put(ndlp);
+diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
+index c5176f4063864..b77d0e1931f36 100644
+--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
++++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
+@@ -4789,12 +4789,17 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+ 		ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
+ 		lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+ 	} else {
++		/* NLP_RELEASE_RPI is only set for SLI4 ports. */
+ 		if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
+ 			lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
++			spin_lock_irq(&ndlp->lock);
+ 			ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
+ 			ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
++			spin_unlock_irq(&ndlp->lock);
+ 		}
++		spin_lock_irq(&ndlp->lock);
+ 		ndlp->nlp_flag &= ~NLP_UNREG_INP;
++		spin_unlock_irq(&ndlp->lock);
+ 	}
+ }
+ 
+@@ -5129,8 +5134,10 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+ 	list_del_init(&ndlp->dev_loss_evt.evt_listp);
+ 	list_del_init(&ndlp->recovery_evt.evt_listp);
+ 	lpfc_cleanup_vports_rrqs(vport, ndlp);
++
+ 	if (phba->sli_rev == LPFC_SLI_REV4)
+ 		ndlp->nlp_flag |= NLP_RELEASE_RPI;
++
+ 	return 0;
+ }
+ 
+@@ -6174,8 +6181,23 @@ lpfc_nlp_release(struct kref *kref)
+ 	lpfc_cancel_retry_delay_tmo(vport, ndlp);
+ 	lpfc_cleanup_node(vport, ndlp);
+ 
+-	/* Clear Node key fields to give other threads notice
+-	 * that this node memory is not valid anymore.
++	/* Not all ELS transactions have registered the RPI with the port.
++	 * In these cases the rpi usage is temporary and the node is
++	 * released when the WQE is completed.  Catch this case to free the
++	 * RPI to the pool.  Because this node is in the release path, a lock
++	 * is unnecessary.  All references are gone and the node has been
++	 * dequeued.
++	 */
++	if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
++		if (ndlp->nlp_rpi != LPFC_RPI_ALLOC_ERROR &&
++		    !(ndlp->nlp_flag & (NLP_RPI_REGISTERED | NLP_UNREG_INP))) {
++			lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
++			ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
++		}
++	}
++
++	/* The node is not freed back to memory, it is released to a pool so
++	 * the node fields need to be cleaned up.
+ 	 */
+ 	ndlp->vport = NULL;
+ 	ndlp->nlp_state = NLP_STE_FREED_NODE;
+@@ -6255,6 +6277,7 @@ lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
+ 		"node not used:   did:x%x flg:x%x refcnt:x%x",
+ 		ndlp->nlp_DID, ndlp->nlp_flag,
+ 		kref_read(&ndlp->kref));
++
+ 	if (kref_read(&ndlp->kref) == 1)
+ 		if (lpfc_nlp_put(ndlp))
+ 			return 1;
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index a67051ba3f127..d6819e2bc10b7 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -3532,13 +3532,6 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
+ 			list_for_each_entry_safe(ndlp, next_ndlp,
+ 						 &vports[i]->fc_nodes,
+ 						 nlp_listp) {
+-				if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
+-					/* Driver must assume RPI is invalid for
+-					 * any unused or inactive node.
+-					 */
+-					ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
+-					continue;
+-				}
+ 
+ 				spin_lock_irq(&ndlp->lock);
+ 				ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
+index 9f05f5e329c6b..135f084d4de7e 100644
+--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
++++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
+@@ -557,15 +557,24 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 		/* no deferred ACC */
+ 		kfree(save_iocb);
+ 
+-		/* In order to preserve RPIs, we want to cleanup
+-		 * the default RPI the firmware created to rcv
+-		 * this ELS request. The only way to do this is
+-		 * to register, then unregister the RPI.
++		/* This is an NPIV SLI4 instance that does not need to register
++		 * a default RPI.
+ 		 */
+-		spin_lock_irq(&ndlp->lock);
+-		ndlp->nlp_flag |= (NLP_RM_DFLT_RPI | NLP_ACC_REGLOGIN |
+-				   NLP_RCV_PLOGI);
+-		spin_unlock_irq(&ndlp->lock);
++		if (phba->sli_rev == LPFC_SLI_REV4) {
++			mempool_free(login_mbox, phba->mbox_mem_pool);
++			login_mbox = NULL;
++		} else {
++			/* In order to preserve RPIs, we want to cleanup
++			 * the default RPI the firmware created to rcv
++			 * this ELS request. The only way to do this is
++			 * to register, then unregister the RPI.
++			 */
++			spin_lock_irq(&ndlp->lock);
++			ndlp->nlp_flag |= (NLP_RM_DFLT_RPI | NLP_ACC_REGLOGIN |
++					   NLP_RCV_PLOGI);
++			spin_unlock_irq(&ndlp->lock);
++		}
++
+ 		stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
+ 		stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+ 		rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index f8a5a4eb5bcef..7551743835fc0 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -13628,9 +13628,15 @@ lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
+ 		if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
+ 			mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
+ 			ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+-			/* Reg_LOGIN of dflt RPI was successful. Now lets get
+-			 * RID of the PPI using the same mbox buffer.
++
++			/* Reg_LOGIN of dflt RPI was successful. Mark the
++			 * node as having an UNREG_LOGIN in progress to stop
++			 * an unsolicited PLOGI from the same NPortId from
++			 * starting another mailbox transaction.
+ 			 */
++			spin_lock_irqsave(&ndlp->lock, iflags);
++			ndlp->nlp_flag |= NLP_UNREG_INP;
++			spin_unlock_irqrestore(&ndlp->lock, iflags);
+ 			lpfc_unreg_login(phba, vport->vpi,
+ 					 pmbox->un.varWords[0], pmb);
+ 			pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
+diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
+index 38fc9467c6258..73295cf74cbe3 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
++++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
+@@ -3167,6 +3167,8 @@ megasas_build_io_fusion(struct megasas_instance *instance,
+ {
+ 	int sge_count;
+ 	u8  cmd_type;
++	u16 pd_index = 0;
++	u8 drive_type = 0;
+ 	struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request;
+ 	struct MR_PRIV_DEVICE *mr_device_priv_data;
+ 	mr_device_priv_data = scp->device->hostdata;
+@@ -3201,8 +3203,12 @@ megasas_build_io_fusion(struct megasas_instance *instance,
+ 		megasas_build_syspd_fusion(instance, scp, cmd, true);
+ 		break;
+ 	case NON_READ_WRITE_SYSPDIO:
+-		if (instance->secure_jbod_support ||
+-		    mr_device_priv_data->is_tm_capable)
++		pd_index = MEGASAS_PD_INDEX(scp);
++		drive_type = instance->pd_list[pd_index].driveType;
++		if ((instance->secure_jbod_support ||
++		     mr_device_priv_data->is_tm_capable) ||
++		     (instance->adapter_type >= VENTURA_SERIES &&
++		     drive_type == TYPE_ENCLOSURE))
+ 			megasas_build_syspd_fusion(instance, scp, cmd, false);
+ 		else
+ 			megasas_build_syspd_fusion(instance, scp, cmd, true);
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+index ae1973878cc7d..7824e77bc6e26 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+@@ -6883,8 +6883,10 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+ 		 handle, parent_handle,
+ 		 (u64)sas_expander->sas_address, sas_expander->num_phys);
+ 
+-	if (!sas_expander->num_phys)
++	if (!sas_expander->num_phys) {
++		rc = -1;
+ 		goto out_fail;
++	}
+ 	sas_expander->phy = kcalloc(sas_expander->num_phys,
+ 	    sizeof(struct _sas_phy), GFP_KERNEL);
+ 	if (!sas_expander->phy) {
+diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
+index 08c05403cd720..087c7ff28cd52 100644
+--- a/drivers/scsi/qedi/qedi_iscsi.c
++++ b/drivers/scsi/qedi/qedi_iscsi.c
+@@ -377,6 +377,7 @@ static int qedi_conn_bind(struct iscsi_cls_session *cls_session,
+ 	struct qedi_ctx *qedi = iscsi_host_priv(shost);
+ 	struct qedi_endpoint *qedi_ep;
+ 	struct iscsi_endpoint *ep;
++	int rc = 0;
+ 
+ 	ep = iscsi_lookup_endpoint(transport_fd);
+ 	if (!ep)
+@@ -384,11 +385,16 @@ static int qedi_conn_bind(struct iscsi_cls_session *cls_session,
+ 
+ 	qedi_ep = ep->dd_data;
+ 	if ((qedi_ep->state == EP_STATE_TCP_FIN_RCVD) ||
+-	    (qedi_ep->state == EP_STATE_TCP_RST_RCVD))
+-		return -EINVAL;
++	    (qedi_ep->state == EP_STATE_TCP_RST_RCVD)) {
++		rc = -EINVAL;
++		goto put_ep;
++	}
++
++	if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) {
++		rc = -EINVAL;
++		goto put_ep;
++	}
+ 
+-	if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
+-		return -EINVAL;
+ 
+ 	qedi_ep->conn = qedi_conn;
+ 	qedi_conn->ep = qedi_ep;
+@@ -398,13 +404,18 @@ static int qedi_conn_bind(struct iscsi_cls_session *cls_session,
+ 	qedi_conn->cmd_cleanup_req = 0;
+ 	qedi_conn->cmd_cleanup_cmpl = 0;
+ 
+-	if (qedi_bind_conn_to_iscsi_cid(qedi, qedi_conn))
+-		return -EINVAL;
++	if (qedi_bind_conn_to_iscsi_cid(qedi, qedi_conn)) {
++		rc = -EINVAL;
++		goto put_ep;
++	}
++
+ 
+ 	spin_lock_init(&qedi_conn->tmf_work_lock);
+ 	INIT_LIST_HEAD(&qedi_conn->tmf_work_list);
+ 	init_waitqueue_head(&qedi_conn->wait_queue);
+-	return 0;
++put_ep:
++	iscsi_put_endpoint(ep);
++	return rc;
+ }
+ 
+ static int qedi_iscsi_update_conn(struct qedi_ctx *qedi,
+@@ -1401,6 +1412,7 @@ struct iscsi_transport qedi_iscsi_transport = {
+ 	.destroy_session = qedi_session_destroy,
+ 	.create_conn = qedi_conn_create,
+ 	.bind_conn = qedi_conn_bind,
++	.unbind_conn = iscsi_conn_unbind,
+ 	.start_conn = qedi_conn_start,
+ 	.stop_conn = iscsi_conn_stop,
+ 	.destroy_conn = qedi_conn_destroy,
+diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
+index 7bd9a4a04ad5d..ea128da08537e 100644
+--- a/drivers/scsi/qla4xxx/ql4_os.c
++++ b/drivers/scsi/qla4xxx/ql4_os.c
+@@ -259,6 +259,7 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
+ 	.start_conn             = qla4xxx_conn_start,
+ 	.create_conn            = qla4xxx_conn_create,
+ 	.bind_conn              = qla4xxx_conn_bind,
++	.unbind_conn		= iscsi_conn_unbind,
+ 	.stop_conn              = iscsi_conn_stop,
+ 	.destroy_conn           = qla4xxx_conn_destroy,
+ 	.set_param              = iscsi_set_param,
+@@ -3234,6 +3235,7 @@ static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
+ 	conn = cls_conn->dd_data;
+ 	qla_conn = conn->dd_data;
+ 	qla_conn->qla_ep = ep->dd_data;
++	iscsi_put_endpoint(ep);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index 7d52a11e1b611..e172c660dcd53 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -761,6 +761,7 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
+ 				case 0x07: /* operation in progress */
+ 				case 0x08: /* Long write in progress */
+ 				case 0x09: /* self test in progress */
++				case 0x11: /* notify (enable spinup) required */
+ 				case 0x14: /* space allocation in progress */
+ 				case 0x1a: /* start stop unit in progress */
+ 				case 0x1b: /* sanitize in progress */
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index 441f0152193f7..6ce1cc992d1d0 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -86,16 +86,10 @@ struct iscsi_internal {
+ 	struct transport_container session_cont;
+ };
+ 
+-/* Worker to perform connection failure on unresponsive connections
+- * completely in kernel space.
+- */
+-static void stop_conn_work_fn(struct work_struct *work);
+-static DECLARE_WORK(stop_conn_work, stop_conn_work_fn);
+-
+ static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
+ static struct workqueue_struct *iscsi_eh_timer_workq;
+ 
+-static struct workqueue_struct *iscsi_destroy_workq;
++static struct workqueue_struct *iscsi_conn_cleanup_workq;
+ 
+ static DEFINE_IDA(iscsi_sess_ida);
+ /*
+@@ -268,9 +262,20 @@ void iscsi_destroy_endpoint(struct iscsi_endpoint *ep)
+ }
+ EXPORT_SYMBOL_GPL(iscsi_destroy_endpoint);
+ 
++void iscsi_put_endpoint(struct iscsi_endpoint *ep)
++{
++	put_device(&ep->dev);
++}
++EXPORT_SYMBOL_GPL(iscsi_put_endpoint);
++
++/**
++ * iscsi_lookup_endpoint - get ep from handle
++ * @handle: endpoint handle
++ *
++ * Caller must do a iscsi_put_endpoint.
++ */
+ struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
+ {
+-	struct iscsi_endpoint *ep;
+ 	struct device *dev;
+ 
+ 	dev = class_find_device(&iscsi_endpoint_class, NULL, &handle,
+@@ -278,13 +283,7 @@ struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
+ 	if (!dev)
+ 		return NULL;
+ 
+-	ep = iscsi_dev_to_endpoint(dev);
+-	/*
+-	 * we can drop this now because the interface will prevent
+-	 * removals and lookups from racing.
+-	 */
+-	put_device(dev);
+-	return ep;
++	return iscsi_dev_to_endpoint(dev);
+ }
+ EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint);
+ 
+@@ -1620,12 +1619,6 @@ static DECLARE_TRANSPORT_CLASS(iscsi_connection_class,
+ static struct sock *nls;
+ static DEFINE_MUTEX(rx_queue_mutex);
+ 
+-/*
+- * conn_mutex protects the {start,bind,stop,destroy}_conn from racing
+- * against the kernel stop_connection recovery mechanism
+- */
+-static DEFINE_MUTEX(conn_mutex);
+-
+ static LIST_HEAD(sesslist);
+ static DEFINE_SPINLOCK(sesslock);
+ static LIST_HEAD(connlist);
+@@ -1976,6 +1969,8 @@ static void __iscsi_unblock_session(struct work_struct *work)
+  */
+ void iscsi_unblock_session(struct iscsi_cls_session *session)
+ {
++	flush_work(&session->block_work);
++
+ 	queue_work(iscsi_eh_timer_workq, &session->unblock_work);
+ 	/*
+ 	 * Blocking the session can be done from any context so we only
+@@ -2242,6 +2237,123 @@ void iscsi_remove_session(struct iscsi_cls_session *session)
+ }
+ EXPORT_SYMBOL_GPL(iscsi_remove_session);
+ 
++static void iscsi_stop_conn(struct iscsi_cls_conn *conn, int flag)
++{
++	ISCSI_DBG_TRANS_CONN(conn, "Stopping conn.\n");
++
++	switch (flag) {
++	case STOP_CONN_RECOVER:
++		conn->state = ISCSI_CONN_FAILED;
++		break;
++	case STOP_CONN_TERM:
++		conn->state = ISCSI_CONN_DOWN;
++		break;
++	default:
++		iscsi_cls_conn_printk(KERN_ERR, conn, "invalid stop flag %d\n",
++				      flag);
++		return;
++	}
++
++	conn->transport->stop_conn(conn, flag);
++	ISCSI_DBG_TRANS_CONN(conn, "Stopping conn done.\n");
++}
++
++static int iscsi_if_stop_conn(struct iscsi_transport *transport,
++			      struct iscsi_uevent *ev)
++{
++	int flag = ev->u.stop_conn.flag;
++	struct iscsi_cls_conn *conn;
++
++	conn = iscsi_conn_lookup(ev->u.stop_conn.sid, ev->u.stop_conn.cid);
++	if (!conn)
++		return -EINVAL;
++
++	ISCSI_DBG_TRANS_CONN(conn, "iscsi if conn stop.\n");
++	/*
++	 * If this is a termination we have to call stop_conn with that flag
++	 * so the correct states get set. If we haven't run the work yet try to
++	 * avoid the extra run.
++	 */
++	if (flag == STOP_CONN_TERM) {
++		cancel_work_sync(&conn->cleanup_work);
++		iscsi_stop_conn(conn, flag);
++	} else {
++		/*
++		 * Figure out if it was the kernel or userspace initiating this.
++		 */
++		if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
++			iscsi_stop_conn(conn, flag);
++		} else {
++			ISCSI_DBG_TRANS_CONN(conn,
++					     "flush kernel conn cleanup.\n");
++			flush_work(&conn->cleanup_work);
++		}
++		/*
++		 * Only clear for recovery to avoid extra cleanup runs during
++		 * termination.
++		 */
++		clear_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags);
++	}
++	ISCSI_DBG_TRANS_CONN(conn, "iscsi if conn stop done.\n");
++	return 0;
++}
++
++static void iscsi_ep_disconnect(struct iscsi_cls_conn *conn, bool is_active)
++{
++	struct iscsi_cls_session *session = iscsi_conn_to_session(conn);
++	struct iscsi_endpoint *ep;
++
++	ISCSI_DBG_TRANS_CONN(conn, "disconnect ep.\n");
++	conn->state = ISCSI_CONN_FAILED;
++
++	if (!conn->ep || !session->transport->ep_disconnect)
++		return;
++
++	ep = conn->ep;
++	conn->ep = NULL;
++
++	session->transport->unbind_conn(conn, is_active);
++	session->transport->ep_disconnect(ep);
++	ISCSI_DBG_TRANS_CONN(conn, "disconnect ep done.\n");
++}
++
++static void iscsi_cleanup_conn_work_fn(struct work_struct *work)
++{
++	struct iscsi_cls_conn *conn = container_of(work, struct iscsi_cls_conn,
++						   cleanup_work);
++	struct iscsi_cls_session *session = iscsi_conn_to_session(conn);
++
++	mutex_lock(&conn->ep_mutex);
++	/*
++	 * If we are not at least bound there is nothing for us to do. Userspace
++	 * will do a ep_disconnect call if offload is used, but will not be
++	 * doing a stop since there is nothing to clean up, so we have to clear
++	 * the cleanup bit here.
++	 */
++	if (conn->state != ISCSI_CONN_BOUND && conn->state != ISCSI_CONN_UP) {
++		ISCSI_DBG_TRANS_CONN(conn, "Got error while conn is already failed. Ignoring.\n");
++		clear_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags);
++		mutex_unlock(&conn->ep_mutex);
++		return;
++	}
++
++	iscsi_ep_disconnect(conn, false);
++
++	if (system_state != SYSTEM_RUNNING) {
++		/*
++		 * If the user has set up for the session to never timeout
++		 * then hang like they wanted. For all other cases fail right
++		 * away since userspace is not going to relogin.
++		 */
++		if (session->recovery_tmo > 0)
++			session->recovery_tmo = 0;
++	}
++
++	iscsi_stop_conn(conn, STOP_CONN_RECOVER);
++	mutex_unlock(&conn->ep_mutex);
++	ISCSI_DBG_TRANS_CONN(conn, "cleanup done.\n");
++}
++
+ void iscsi_free_session(struct iscsi_cls_session *session)
+ {
+ 	ISCSI_DBG_TRANS_SESSION(session, "Freeing session\n");
+@@ -2281,7 +2393,7 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
+ 
+ 	mutex_init(&conn->ep_mutex);
+ 	INIT_LIST_HEAD(&conn->conn_list);
+-	INIT_LIST_HEAD(&conn->conn_list_err);
++	INIT_WORK(&conn->cleanup_work, iscsi_cleanup_conn_work_fn);
+ 	conn->transport = transport;
+ 	conn->cid = cid;
+ 	conn->state = ISCSI_CONN_DOWN;
+@@ -2338,7 +2450,6 @@ int iscsi_destroy_conn(struct iscsi_cls_conn *conn)
+ 
+ 	spin_lock_irqsave(&connlock, flags);
+ 	list_del(&conn->conn_list);
+-	list_del(&conn->conn_list_err);
+ 	spin_unlock_irqrestore(&connlock, flags);
+ 
+ 	transport_unregister_device(&conn->dev);
+@@ -2453,77 +2564,6 @@ int iscsi_offload_mesg(struct Scsi_Host *shost,
+ }
+ EXPORT_SYMBOL_GPL(iscsi_offload_mesg);
+ 
+-/*
+- * This can be called without the rx_queue_mutex, if invoked by the kernel
+- * stop work. But, in that case, it is guaranteed not to race with
+- * iscsi_destroy by conn_mutex.
+- */
+-static void iscsi_if_stop_conn(struct iscsi_cls_conn *conn, int flag)
+-{
+-	/*
+-	 * It is important that this path doesn't rely on
+-	 * rx_queue_mutex, otherwise, a thread doing allocation on a
+-	 * start_session/start_connection could sleep waiting on a
+-	 * writeback to a failed iscsi device, that cannot be recovered
+-	 * because the lock is held.  If we don't hold it here, the
+-	 * kernel stop_conn_work_fn has a chance to stop the broken
+-	 * session and resolve the allocation.
+-	 *
+-	 * Still, the user invoked .stop_conn() needs to be serialized
+-	 * with stop_conn_work_fn by a private mutex.  Not pretty, but
+-	 * it works.
+-	 */
+-	mutex_lock(&conn_mutex);
+-	switch (flag) {
+-	case STOP_CONN_RECOVER:
+-		conn->state = ISCSI_CONN_FAILED;
+-		break;
+-	case STOP_CONN_TERM:
+-		conn->state = ISCSI_CONN_DOWN;
+-		break;
+-	default:
+-		iscsi_cls_conn_printk(KERN_ERR, conn,
+-				      "invalid stop flag %d\n", flag);
+-		goto unlock;
+-	}
+-
+-	conn->transport->stop_conn(conn, flag);
+-unlock:
+-	mutex_unlock(&conn_mutex);
+-}
+-
+-static void stop_conn_work_fn(struct work_struct *work)
+-{
+-	struct iscsi_cls_conn *conn, *tmp;
+-	unsigned long flags;
+-	LIST_HEAD(recovery_list);
+-
+-	spin_lock_irqsave(&connlock, flags);
+-	if (list_empty(&connlist_err)) {
+-		spin_unlock_irqrestore(&connlock, flags);
+-		return;
+-	}
+-	list_splice_init(&connlist_err, &recovery_list);
+-	spin_unlock_irqrestore(&connlock, flags);
+-
+-	list_for_each_entry_safe(conn, tmp, &recovery_list, conn_list_err) {
+-		uint32_t sid = iscsi_conn_get_sid(conn);
+-		struct iscsi_cls_session *session;
+-
+-		session = iscsi_session_lookup(sid);
+-		if (session) {
+-			if (system_state != SYSTEM_RUNNING) {
+-				session->recovery_tmo = 0;
+-				iscsi_if_stop_conn(conn, STOP_CONN_TERM);
+-			} else {
+-				iscsi_if_stop_conn(conn, STOP_CONN_RECOVER);
+-			}
+-		}
+-
+-		list_del_init(&conn->conn_list_err);
+-	}
+-}
+-
+ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
+ {
+ 	struct nlmsghdr	*nlh;
+@@ -2531,12 +2571,9 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
+ 	struct iscsi_uevent *ev;
+ 	struct iscsi_internal *priv;
+ 	int len = nlmsg_total_size(sizeof(*ev));
+-	unsigned long flags;
+ 
+-	spin_lock_irqsave(&connlock, flags);
+-	list_add(&conn->conn_list_err, &connlist_err);
+-	spin_unlock_irqrestore(&connlock, flags);
+-	queue_work(system_unbound_wq, &stop_conn_work);
++	if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags))
++		queue_work(iscsi_conn_cleanup_workq, &conn->cleanup_work);
+ 
+ 	priv = iscsi_if_transport_lookup(conn->transport);
+ 	if (!priv)
+@@ -2866,26 +2903,17 @@ static int
+ iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev)
+ {
+ 	struct iscsi_cls_conn *conn;
+-	unsigned long flags;
+ 
+ 	conn = iscsi_conn_lookup(ev->u.d_conn.sid, ev->u.d_conn.cid);
+ 	if (!conn)
+ 		return -EINVAL;
+ 
+-	spin_lock_irqsave(&connlock, flags);
+-	if (!list_empty(&conn->conn_list_err)) {
+-		spin_unlock_irqrestore(&connlock, flags);
+-		return -EAGAIN;
+-	}
+-	spin_unlock_irqrestore(&connlock, flags);
+-
++	ISCSI_DBG_TRANS_CONN(conn, "Flushing cleanup during destruction\n");
++	flush_work(&conn->cleanup_work);
+ 	ISCSI_DBG_TRANS_CONN(conn, "Destroying transport conn\n");
+ 
+-	mutex_lock(&conn_mutex);
+ 	if (transport->destroy_conn)
+ 		transport->destroy_conn(conn);
+-	mutex_unlock(&conn_mutex);
+-
+ 	return 0;
+ }
+ 
+@@ -2975,15 +3003,31 @@ static int iscsi_if_ep_disconnect(struct iscsi_transport *transport,
+ 	ep = iscsi_lookup_endpoint(ep_handle);
+ 	if (!ep)
+ 		return -EINVAL;
++
+ 	conn = ep->conn;
+-	if (conn) {
+-		mutex_lock(&conn->ep_mutex);
+-		conn->ep = NULL;
++	if (!conn) {
++		/*
++		 * conn was not even bound yet, so we can't get iscsi conn
++		 * failures yet.
++		 */
++		transport->ep_disconnect(ep);
++		goto put_ep;
++	}
++
++	mutex_lock(&conn->ep_mutex);
++	/* Check if this was a conn error and the kernel took ownership */
++	if (test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
++		ISCSI_DBG_TRANS_CONN(conn, "flush kernel conn cleanup.\n");
+ 		mutex_unlock(&conn->ep_mutex);
+-		conn->state = ISCSI_CONN_FAILED;
++
++		flush_work(&conn->cleanup_work);
++		goto put_ep;
+ 	}
+ 
+-	transport->ep_disconnect(ep);
++	iscsi_ep_disconnect(conn, false);
++	mutex_unlock(&conn->ep_mutex);
++put_ep:
++	iscsi_put_endpoint(ep);
+ 	return 0;
+ }
+ 
+@@ -3009,6 +3053,7 @@ iscsi_if_transport_ep(struct iscsi_transport *transport,
+ 
+ 		ev->r.retcode = transport->ep_poll(ep,
+ 						   ev->u.ep_poll.timeout_ms);
++		iscsi_put_endpoint(ep);
+ 		break;
+ 	case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
+ 		rc = iscsi_if_ep_disconnect(transport,
+@@ -3639,18 +3684,129 @@ exit_host_stats:
+ 	return err;
+ }
+ 
++static int iscsi_if_transport_conn(struct iscsi_transport *transport,
++				   struct nlmsghdr *nlh)
++{
++	struct iscsi_uevent *ev = nlmsg_data(nlh);
++	struct iscsi_cls_session *session;
++	struct iscsi_cls_conn *conn = NULL;
++	struct iscsi_endpoint *ep;
++	uint32_t pdu_len;
++	int err = 0;
++
++	switch (nlh->nlmsg_type) {
++	case ISCSI_UEVENT_CREATE_CONN:
++		return iscsi_if_create_conn(transport, ev);
++	case ISCSI_UEVENT_DESTROY_CONN:
++		return iscsi_if_destroy_conn(transport, ev);
++	case ISCSI_UEVENT_STOP_CONN:
++		return iscsi_if_stop_conn(transport, ev);
++	}
++
++	/*
++	 * The following cmds need to be run under the ep_mutex so in kernel
++	 * conn cleanup (ep_disconnect + unbind and conn) is not done while
++	 * these are running. They also must not run if we have just run a conn
++	 * cleanup because they would set the state in a way that might allow
++	 * IO or send IO themselves.
++	 */
++	switch (nlh->nlmsg_type) {
++	case ISCSI_UEVENT_START_CONN:
++		conn = iscsi_conn_lookup(ev->u.start_conn.sid,
++					 ev->u.start_conn.cid);
++		break;
++	case ISCSI_UEVENT_BIND_CONN:
++		conn = iscsi_conn_lookup(ev->u.b_conn.sid, ev->u.b_conn.cid);
++		break;
++	case ISCSI_UEVENT_SEND_PDU:
++		conn = iscsi_conn_lookup(ev->u.send_pdu.sid, ev->u.send_pdu.cid);
++		break;
++	}
++
++	if (!conn)
++		return -EINVAL;
++
++	mutex_lock(&conn->ep_mutex);
++	if (test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
++		mutex_unlock(&conn->ep_mutex);
++		ev->r.retcode = -ENOTCONN;
++		return 0;
++	}
++
++	switch (nlh->nlmsg_type) {
++	case ISCSI_UEVENT_BIND_CONN:
++		if (conn->ep) {
++			/*
++			 * For offload boot support where iscsid is restarted
++			 * during the pivot root stage, the ep will be intact
++			 * here when the new iscsid instance starts up and
++			 * reconnects.
++			 */
++			iscsi_ep_disconnect(conn, true);
++		}
++
++		session = iscsi_session_lookup(ev->u.b_conn.sid);
++		if (!session) {
++			err = -EINVAL;
++			break;
++		}
++
++		ev->r.retcode =	transport->bind_conn(session, conn,
++						ev->u.b_conn.transport_eph,
++						ev->u.b_conn.is_leading);
++		if (!ev->r.retcode)
++			conn->state = ISCSI_CONN_BOUND;
++
++		if (ev->r.retcode || !transport->ep_connect)
++			break;
++
++		ep = iscsi_lookup_endpoint(ev->u.b_conn.transport_eph);
++		if (ep) {
++			ep->conn = conn;
++			conn->ep = ep;
++			iscsi_put_endpoint(ep);
++		} else {
++			err = -ENOTCONN;
++			iscsi_cls_conn_printk(KERN_ERR, conn,
++					      "Could not set ep conn binding\n");
++		}
++		break;
++	case ISCSI_UEVENT_START_CONN:
++		ev->r.retcode = transport->start_conn(conn);
++		if (!ev->r.retcode)
++			conn->state = ISCSI_CONN_UP;
++		break;
++	case ISCSI_UEVENT_SEND_PDU:
++		pdu_len = nlh->nlmsg_len - sizeof(*nlh) - sizeof(*ev);
++
++		if ((ev->u.send_pdu.hdr_size > pdu_len) ||
++		    (ev->u.send_pdu.data_size > (pdu_len - ev->u.send_pdu.hdr_size))) {
++			err = -EINVAL;
++			break;
++		}
++
++		ev->r.retcode =	transport->send_pdu(conn,
++				(struct iscsi_hdr *)((char *)ev + sizeof(*ev)),
++				(char *)ev + sizeof(*ev) + ev->u.send_pdu.hdr_size,
++				ev->u.send_pdu.data_size);
++		break;
++	default:
++		err = -ENOSYS;
++	}
++
++	mutex_unlock(&conn->ep_mutex);
++	return err;
++}
+ 
+ static int
+ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
+ {
+ 	int err = 0;
+ 	u32 portid;
+-	u32 pdu_len;
+ 	struct iscsi_uevent *ev = nlmsg_data(nlh);
+ 	struct iscsi_transport *transport = NULL;
+ 	struct iscsi_internal *priv;
+ 	struct iscsi_cls_session *session;
+-	struct iscsi_cls_conn *conn;
+ 	struct iscsi_endpoint *ep = NULL;
+ 
+ 	if (!netlink_capable(skb, CAP_SYS_ADMIN))
+@@ -3691,6 +3847,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
+ 					ev->u.c_bound_session.initial_cmdsn,
+ 					ev->u.c_bound_session.cmds_max,
+ 					ev->u.c_bound_session.queue_depth);
++		iscsi_put_endpoint(ep);
+ 		break;
+ 	case ISCSI_UEVENT_DESTROY_SESSION:
+ 		session = iscsi_session_lookup(ev->u.d_session.sid);
+@@ -3715,7 +3872,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
+ 			list_del_init(&session->sess_list);
+ 			spin_unlock_irqrestore(&sesslock, flags);
+ 
+-			queue_work(iscsi_destroy_workq, &session->destroy_work);
++			queue_work(system_unbound_wq, &session->destroy_work);
+ 		}
+ 		break;
+ 	case ISCSI_UEVENT_UNBIND_SESSION:
+@@ -3726,89 +3883,16 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
+ 		else
+ 			err = -EINVAL;
+ 		break;
+-	case ISCSI_UEVENT_CREATE_CONN:
+-		err = iscsi_if_create_conn(transport, ev);
+-		break;
+-	case ISCSI_UEVENT_DESTROY_CONN:
+-		err = iscsi_if_destroy_conn(transport, ev);
+-		break;
+-	case ISCSI_UEVENT_BIND_CONN:
+-		session = iscsi_session_lookup(ev->u.b_conn.sid);
+-		conn = iscsi_conn_lookup(ev->u.b_conn.sid, ev->u.b_conn.cid);
+-
+-		if (conn && conn->ep)
+-			iscsi_if_ep_disconnect(transport, conn->ep->id);
+-
+-		if (!session || !conn) {
+-			err = -EINVAL;
+-			break;
+-		}
+-
+-		mutex_lock(&conn_mutex);
+-		ev->r.retcode =	transport->bind_conn(session, conn,
+-						ev->u.b_conn.transport_eph,
+-						ev->u.b_conn.is_leading);
+-		if (!ev->r.retcode)
+-			conn->state = ISCSI_CONN_BOUND;
+-		mutex_unlock(&conn_mutex);
+-
+-		if (ev->r.retcode || !transport->ep_connect)
+-			break;
+-
+-		ep = iscsi_lookup_endpoint(ev->u.b_conn.transport_eph);
+-		if (ep) {
+-			ep->conn = conn;
+-
+-			mutex_lock(&conn->ep_mutex);
+-			conn->ep = ep;
+-			mutex_unlock(&conn->ep_mutex);
+-		} else
+-			iscsi_cls_conn_printk(KERN_ERR, conn,
+-					      "Could not set ep conn "
+-					      "binding\n");
+-		break;
+ 	case ISCSI_UEVENT_SET_PARAM:
+ 		err = iscsi_set_param(transport, ev);
+ 		break;
+-	case ISCSI_UEVENT_START_CONN:
+-		conn = iscsi_conn_lookup(ev->u.start_conn.sid, ev->u.start_conn.cid);
+-		if (conn) {
+-			mutex_lock(&conn_mutex);
+-			ev->r.retcode = transport->start_conn(conn);
+-			if (!ev->r.retcode)
+-				conn->state = ISCSI_CONN_UP;
+-			mutex_unlock(&conn_mutex);
+-		}
+-		else
+-			err = -EINVAL;
+-		break;
++	case ISCSI_UEVENT_CREATE_CONN:
++	case ISCSI_UEVENT_DESTROY_CONN:
+ 	case ISCSI_UEVENT_STOP_CONN:
+-		conn = iscsi_conn_lookup(ev->u.stop_conn.sid, ev->u.stop_conn.cid);
+-		if (conn)
+-			iscsi_if_stop_conn(conn, ev->u.stop_conn.flag);
+-		else
+-			err = -EINVAL;
+-		break;
++	case ISCSI_UEVENT_START_CONN:
++	case ISCSI_UEVENT_BIND_CONN:
+ 	case ISCSI_UEVENT_SEND_PDU:
+-		pdu_len = nlh->nlmsg_len - sizeof(*nlh) - sizeof(*ev);
+-
+-		if ((ev->u.send_pdu.hdr_size > pdu_len) ||
+-		    (ev->u.send_pdu.data_size > (pdu_len - ev->u.send_pdu.hdr_size))) {
+-			err = -EINVAL;
+-			break;
+-		}
+-
+-		conn = iscsi_conn_lookup(ev->u.send_pdu.sid, ev->u.send_pdu.cid);
+-		if (conn) {
+-			mutex_lock(&conn_mutex);
+-			ev->r.retcode =	transport->send_pdu(conn,
+-				(struct iscsi_hdr*)((char*)ev + sizeof(*ev)),
+-				(char*)ev + sizeof(*ev) + ev->u.send_pdu.hdr_size,
+-				ev->u.send_pdu.data_size);
+-			mutex_unlock(&conn_mutex);
+-		}
+-		else
+-			err = -EINVAL;
++		err = iscsi_if_transport_conn(transport, nlh);
+ 		break;
+ 	case ISCSI_UEVENT_GET_STATS:
+ 		err = iscsi_if_get_stats(transport, nlh);
+@@ -4656,6 +4740,7 @@ iscsi_register_transport(struct iscsi_transport *tt)
+ 	int err;
+ 
+ 	BUG_ON(!tt);
++	WARN_ON(tt->ep_disconnect && !tt->unbind_conn);
+ 
+ 	priv = iscsi_if_transport_lookup(tt);
+ 	if (priv)
+@@ -4810,10 +4895,10 @@ static __init int iscsi_transport_init(void)
+ 		goto release_nls;
+ 	}
+ 
+-	iscsi_destroy_workq = alloc_workqueue("%s",
+-			WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND,
+-			1, "iscsi_destroy");
+-	if (!iscsi_destroy_workq) {
++	iscsi_conn_cleanup_workq = alloc_workqueue("%s",
++			WQ_SYSFS | WQ_MEM_RECLAIM | WQ_UNBOUND, 0,
++			"iscsi_conn_cleanup");
++	if (!iscsi_conn_cleanup_workq) {
+ 		err = -ENOMEM;
+ 		goto destroy_wq;
+ 	}
+@@ -4843,7 +4928,7 @@ unregister_transport_class:
+ 
+ static void __exit iscsi_transport_exit(void)
+ {
+-	destroy_workqueue(iscsi_destroy_workq);
++	destroy_workqueue(iscsi_conn_cleanup_workq);
+ 	destroy_workqueue(iscsi_eh_timer_workq);
+ 	netlink_kernel_release(nls);
+ 	bus_unregister(&iscsi_flashnode_bus);
+diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
+index a418c3c7001c0..304ff2ee7d75a 100644
+--- a/drivers/soundwire/stream.c
++++ b/drivers/soundwire/stream.c
+@@ -422,7 +422,6 @@ static int sdw_prep_deprep_slave_ports(struct sdw_bus *bus,
+ 	struct completion *port_ready;
+ 	struct sdw_dpn_prop *dpn_prop;
+ 	struct sdw_prepare_ch prep_ch;
+-	unsigned int time_left;
+ 	bool intr = false;
+ 	int ret = 0, val;
+ 	u32 addr;
+@@ -479,15 +478,15 @@ static int sdw_prep_deprep_slave_ports(struct sdw_bus *bus,
+ 
+ 		/* Wait for completion on port ready */
+ 		port_ready = &s_rt->slave->port_ready[prep_ch.num];
+-		time_left = wait_for_completion_timeout(port_ready,
+-				msecs_to_jiffies(dpn_prop->ch_prep_timeout));
++		wait_for_completion_timeout(port_ready,
++			msecs_to_jiffies(dpn_prop->ch_prep_timeout));
+ 
+ 		val = sdw_read(s_rt->slave, SDW_DPN_PREPARESTATUS(p_rt->num));
+-		val &= p_rt->ch_mask;
+-		if (!time_left || val) {
++		if ((val < 0) || (val & p_rt->ch_mask)) {
++			ret = (val < 0) ? val : -ETIMEDOUT;
+ 			dev_err(&s_rt->slave->dev,
+-				"Chn prep failed for port:%d\n", prep_ch.num);
+-			return -ETIMEDOUT;
++				"Chn prep failed for port %d: %d\n", prep_ch.num, ret);
++			return ret;
+ 		}
+ 	}
+ 
+diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c
+index df981e55c24c9..89b91cdfb2a54 100644
+--- a/drivers/spi/spi-loopback-test.c
++++ b/drivers/spi/spi-loopback-test.c
+@@ -874,7 +874,7 @@ static int spi_test_run_iter(struct spi_device *spi,
+ 		test.transfers[i].len = len;
+ 		if (test.transfers[i].tx_buf)
+ 			test.transfers[i].tx_buf += tx_off;
+-		if (test.transfers[i].tx_buf)
++		if (test.transfers[i].rx_buf)
+ 			test.transfers[i].rx_buf += rx_off;
+ 	}
+ 
+diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
+index ecba6b4a5d85d..b2c4621db34d7 100644
+--- a/drivers/spi/spi-meson-spicc.c
++++ b/drivers/spi/spi-meson-spicc.c
+@@ -725,7 +725,7 @@ static int meson_spicc_probe(struct platform_device *pdev)
+ 	ret = clk_prepare_enable(spicc->pclk);
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "pclk clock enable failed\n");
+-		goto out_master;
++		goto out_core_clk;
+ 	}
+ 
+ 	device_reset_optional(&pdev->dev);
+@@ -752,7 +752,7 @@ static int meson_spicc_probe(struct platform_device *pdev)
+ 	ret = meson_spicc_clk_init(spicc);
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "clock registration failed\n");
+-		goto out_master;
++		goto out_clk;
+ 	}
+ 
+ 	ret = devm_spi_register_master(&pdev->dev, master);
+@@ -764,9 +764,11 @@ static int meson_spicc_probe(struct platform_device *pdev)
+ 	return 0;
+ 
+ out_clk:
+-	clk_disable_unprepare(spicc->core);
+ 	clk_disable_unprepare(spicc->pclk);
+ 
++out_core_clk:
++	clk_disable_unprepare(spicc->core);
++
+ out_master:
+ 	spi_master_put(master);
+ 
+diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c
+index ccd817ee4917b..0d0cd061d3563 100644
+--- a/drivers/spi/spi-omap-100k.c
++++ b/drivers/spi/spi-omap-100k.c
+@@ -241,7 +241,7 @@ static int omap1_spi100k_setup_transfer(struct spi_device *spi,
+ 	else
+ 		word_len = spi->bits_per_word;
+ 
+-	if (spi->bits_per_word > 32)
++	if (word_len > 32)
+ 		return -EINVAL;
+ 	cs->word_len = word_len;
+ 
+diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
+index cc8401980125d..23ad052528dbe 100644
+--- a/drivers/spi/spi-sun6i.c
++++ b/drivers/spi/spi-sun6i.c
+@@ -379,6 +379,10 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
+ 	}
+ 
+ 	sun6i_spi_write(sspi, SUN6I_CLK_CTL_REG, reg);
++	/* Finally enable the bus - doing so before might raise SCK to HIGH */
++	reg = sun6i_spi_read(sspi, SUN6I_GBL_CTL_REG);
++	reg |= SUN6I_GBL_CTL_BUS_ENABLE;
++	sun6i_spi_write(sspi, SUN6I_GBL_CTL_REG, reg);
+ 
+ 	/* Setup the transfer now... */
+ 	if (sspi->tx_buf)
+@@ -504,7 +508,7 @@ static int sun6i_spi_runtime_resume(struct device *dev)
+ 	}
+ 
+ 	sun6i_spi_write(sspi, SUN6I_GBL_CTL_REG,
+-			SUN6I_GBL_CTL_BUS_ENABLE | SUN6I_GBL_CTL_MASTER | SUN6I_GBL_CTL_TP);
++			SUN6I_GBL_CTL_MASTER | SUN6I_GBL_CTL_TP);
+ 
+ 	return 0;
+ 
+diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
+index b459e369079f8..7fb020a1d66aa 100644
+--- a/drivers/spi/spi-topcliff-pch.c
++++ b/drivers/spi/spi-topcliff-pch.c
+@@ -580,8 +580,10 @@ static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw)
+ 	data->pkt_tx_buff = kzalloc(size, GFP_KERNEL);
+ 	if (data->pkt_tx_buff != NULL) {
+ 		data->pkt_rx_buff = kzalloc(size, GFP_KERNEL);
+-		if (!data->pkt_rx_buff)
++		if (!data->pkt_rx_buff) {
+ 			kfree(data->pkt_tx_buff);
++			data->pkt_tx_buff = NULL;
++		}
+ 	}
+ 
+ 	if (!data->pkt_rx_buff) {
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index e067c54e87dd7..2350463bfb8f8 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -2066,6 +2066,7 @@ of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
+ 	/* Store a pointer to the node in the device structure */
+ 	of_node_get(nc);
+ 	spi->dev.of_node = nc;
++	spi->dev.fwnode = of_fwnode_handle(nc);
+ 
+ 	/* Register the new device */
+ 	rc = spi_add_device(spi);
+@@ -2629,9 +2630,10 @@ static int spi_get_gpio_descs(struct spi_controller *ctlr)
+ 		native_cs_mask |= BIT(i);
+ 	}
+ 
+-	ctlr->unused_native_cs = ffz(native_cs_mask);
+-	if (num_cs_gpios && ctlr->max_native_cs &&
+-	    ctlr->unused_native_cs >= ctlr->max_native_cs) {
++	ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
++
++	if ((ctlr->flags & SPI_MASTER_GPIO_SS) && num_cs_gpios &&
++	    ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
+ 		dev_err(dev, "No unused native chip select available\n");
+ 		return -EINVAL;
+ 	}
+diff --git a/drivers/ssb/scan.c b/drivers/ssb/scan.c
+index f49ab1aa2149a..4161e5d1f276e 100644
+--- a/drivers/ssb/scan.c
++++ b/drivers/ssb/scan.c
+@@ -325,6 +325,7 @@ int ssb_bus_scan(struct ssb_bus *bus,
+ 	if (bus->nr_devices > ARRAY_SIZE(bus->devices)) {
+ 		pr_err("More than %d ssb cores found (%d)\n",
+ 		       SSB_MAX_NR_CORES, bus->nr_devices);
++		err = -EINVAL;
+ 		goto err_unmap;
+ 	}
+ 	if (bus->bustype == SSB_BUSTYPE_SSB) {
+diff --git a/drivers/ssb/sdio.c b/drivers/ssb/sdio.c
+index 7fe0afb42234f..66c5c2169704b 100644
+--- a/drivers/ssb/sdio.c
++++ b/drivers/ssb/sdio.c
+@@ -411,7 +411,6 @@ static void ssb_sdio_block_write(struct ssb_device *dev, const void *buffer,
+ 	sdio_claim_host(bus->host_sdio);
+ 	if (unlikely(ssb_sdio_switch_core(bus, dev))) {
+ 		error = -EIO;
+-		memset((void *)buffer, 0xff, count);
+ 		goto err_out;
+ 	}
+ 	offset |= bus->sdio_sbaddr & 0xffff;
+diff --git a/drivers/staging/fbtft/fb_agm1264k-fl.c b/drivers/staging/fbtft/fb_agm1264k-fl.c
+index eeeeec97ad278..b545c2ca80a41 100644
+--- a/drivers/staging/fbtft/fb_agm1264k-fl.c
++++ b/drivers/staging/fbtft/fb_agm1264k-fl.c
+@@ -84,9 +84,9 @@ static void reset(struct fbtft_par *par)
+ 
+ 	dev_dbg(par->info->device, "%s()\n", __func__);
+ 
+-	gpiod_set_value(par->gpio.reset, 0);
+-	udelay(20);
+ 	gpiod_set_value(par->gpio.reset, 1);
++	udelay(20);
++	gpiod_set_value(par->gpio.reset, 0);
+ 	mdelay(120);
+ }
+ 
+@@ -194,12 +194,12 @@ static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
+ 	/* select chip */
+ 	if (*buf) {
+ 		/* cs1 */
+-		gpiod_set_value(par->CS0, 1);
+-		gpiod_set_value(par->CS1, 0);
+-	} else {
+-		/* cs0 */
+ 		gpiod_set_value(par->CS0, 0);
+ 		gpiod_set_value(par->CS1, 1);
++	} else {
++		/* cs0 */
++		gpiod_set_value(par->CS0, 1);
++		gpiod_set_value(par->CS1, 0);
+ 	}
+ 
+ 	gpiod_set_value(par->RS, 0); /* RS->0 (command mode) */
+@@ -397,8 +397,8 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
+ 	}
+ 	kfree(convert_buf);
+ 
+-	gpiod_set_value(par->CS0, 1);
+-	gpiod_set_value(par->CS1, 1);
++	gpiod_set_value(par->CS0, 0);
++	gpiod_set_value(par->CS1, 0);
+ 
+ 	return ret;
+ }
+@@ -419,10 +419,10 @@ static int write(struct fbtft_par *par, void *buf, size_t len)
+ 		for (i = 0; i < 8; ++i)
+ 			gpiod_set_value(par->gpio.db[i], data & (1 << i));
+ 		/* set E */
+-		gpiod_set_value(par->EPIN, 1);
++		gpiod_set_value(par->EPIN, 0);
+ 		udelay(5);
+ 		/* unset E - write */
+-		gpiod_set_value(par->EPIN, 0);
++		gpiod_set_value(par->EPIN, 1);
+ 		udelay(1);
+ 	}
+ 
+diff --git a/drivers/staging/fbtft/fb_bd663474.c b/drivers/staging/fbtft/fb_bd663474.c
+index e2c7646588f8c..1629c2c440a97 100644
+--- a/drivers/staging/fbtft/fb_bd663474.c
++++ b/drivers/staging/fbtft/fb_bd663474.c
+@@ -12,7 +12,6 @@
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+ #include <linux/init.h>
+-#include <linux/gpio/consumer.h>
+ #include <linux/delay.h>
+ 
+ #include "fbtft.h"
+@@ -24,9 +23,6 @@
+ 
+ static int init_display(struct fbtft_par *par)
+ {
+-	if (par->gpio.cs)
+-		gpiod_set_value(par->gpio.cs, 0);  /* Activate chip */
+-
+ 	par->fbtftops.reset(par);
+ 
+ 	/* Initialization sequence from Lib_UTFT */
+diff --git a/drivers/staging/fbtft/fb_ili9163.c b/drivers/staging/fbtft/fb_ili9163.c
+index 05648c3ffe474..6582a2c90aafc 100644
+--- a/drivers/staging/fbtft/fb_ili9163.c
++++ b/drivers/staging/fbtft/fb_ili9163.c
+@@ -11,7 +11,6 @@
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+ #include <linux/init.h>
+-#include <linux/gpio/consumer.h>
+ #include <linux/delay.h>
+ #include <video/mipi_display.h>
+ 
+@@ -77,9 +76,6 @@ static int init_display(struct fbtft_par *par)
+ {
+ 	par->fbtftops.reset(par);
+ 
+-	if (par->gpio.cs)
+-		gpiod_set_value(par->gpio.cs, 0);  /* Activate chip */
+-
+ 	write_reg(par, MIPI_DCS_SOFT_RESET); /* software reset */
+ 	mdelay(500);
+ 	write_reg(par, MIPI_DCS_EXIT_SLEEP_MODE); /* exit sleep */
+diff --git a/drivers/staging/fbtft/fb_ili9320.c b/drivers/staging/fbtft/fb_ili9320.c
+index f2e72d14431db..a8f4c618b754c 100644
+--- a/drivers/staging/fbtft/fb_ili9320.c
++++ b/drivers/staging/fbtft/fb_ili9320.c
+@@ -8,7 +8,6 @@
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+ #include <linux/init.h>
+-#include <linux/gpio/consumer.h>
+ #include <linux/spi/spi.h>
+ #include <linux/delay.h>
+ 
+diff --git a/drivers/staging/fbtft/fb_ili9325.c b/drivers/staging/fbtft/fb_ili9325.c
+index c9aa4cb431236..16d3b17ca2798 100644
+--- a/drivers/staging/fbtft/fb_ili9325.c
++++ b/drivers/staging/fbtft/fb_ili9325.c
+@@ -10,7 +10,6 @@
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+ #include <linux/init.h>
+-#include <linux/gpio/consumer.h>
+ #include <linux/delay.h>
+ 
+ #include "fbtft.h"
+@@ -85,9 +84,6 @@ static int init_display(struct fbtft_par *par)
+ {
+ 	par->fbtftops.reset(par);
+ 
+-	if (par->gpio.cs)
+-		gpiod_set_value(par->gpio.cs, 0);  /* Activate chip */
+-
+ 	bt &= 0x07;
+ 	vc &= 0x07;
+ 	vrh &= 0x0f;
+diff --git a/drivers/staging/fbtft/fb_ili9340.c b/drivers/staging/fbtft/fb_ili9340.c
+index 415183c7054a8..704236bcaf3ff 100644
+--- a/drivers/staging/fbtft/fb_ili9340.c
++++ b/drivers/staging/fbtft/fb_ili9340.c
+@@ -8,7 +8,6 @@
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+ #include <linux/init.h>
+-#include <linux/gpio/consumer.h>
+ #include <linux/delay.h>
+ #include <video/mipi_display.h>
+ 
+diff --git a/drivers/staging/fbtft/fb_s6d1121.c b/drivers/staging/fbtft/fb_s6d1121.c
+index 8c7de32903434..62f27172f8449 100644
+--- a/drivers/staging/fbtft/fb_s6d1121.c
++++ b/drivers/staging/fbtft/fb_s6d1121.c
+@@ -12,7 +12,6 @@
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+ #include <linux/init.h>
+-#include <linux/gpio/consumer.h>
+ #include <linux/delay.h>
+ 
+ #include "fbtft.h"
+@@ -29,9 +28,6 @@ static int init_display(struct fbtft_par *par)
+ {
+ 	par->fbtftops.reset(par);
+ 
+-	if (par->gpio.cs)
+-		gpiod_set_value(par->gpio.cs, 0);  /* Activate chip */
+-
+ 	/* Initialization sequence from Lib_UTFT */
+ 
+ 	write_reg(par, 0x0011, 0x2004);
+diff --git a/drivers/staging/fbtft/fb_sh1106.c b/drivers/staging/fbtft/fb_sh1106.c
+index 6f7249493ea3b..7b9ab39e1c1a8 100644
+--- a/drivers/staging/fbtft/fb_sh1106.c
++++ b/drivers/staging/fbtft/fb_sh1106.c
+@@ -9,7 +9,6 @@
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+ #include <linux/init.h>
+-#include <linux/gpio/consumer.h>
+ #include <linux/delay.h>
+ 
+ #include "fbtft.h"
+diff --git a/drivers/staging/fbtft/fb_ssd1289.c b/drivers/staging/fbtft/fb_ssd1289.c
+index 7a3fe022cc69d..f27bab38b3ec4 100644
+--- a/drivers/staging/fbtft/fb_ssd1289.c
++++ b/drivers/staging/fbtft/fb_ssd1289.c
+@@ -10,7 +10,6 @@
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+ #include <linux/init.h>
+-#include <linux/gpio/consumer.h>
+ 
+ #include "fbtft.h"
+ 
+@@ -28,9 +27,6 @@ static int init_display(struct fbtft_par *par)
+ {
+ 	par->fbtftops.reset(par);
+ 
+-	if (par->gpio.cs)
+-		gpiod_set_value(par->gpio.cs, 0);  /* Activate chip */
+-
+ 	write_reg(par, 0x00, 0x0001);
+ 	write_reg(par, 0x03, 0xA8A4);
+ 	write_reg(par, 0x0C, 0x0000);
+diff --git a/drivers/staging/fbtft/fb_ssd1325.c b/drivers/staging/fbtft/fb_ssd1325.c
+index 8a3140d41d8bb..796a2ac3e1948 100644
+--- a/drivers/staging/fbtft/fb_ssd1325.c
++++ b/drivers/staging/fbtft/fb_ssd1325.c
+@@ -35,8 +35,6 @@ static int init_display(struct fbtft_par *par)
+ {
+ 	par->fbtftops.reset(par);
+ 
+-	gpiod_set_value(par->gpio.cs, 0);
+-
+ 	write_reg(par, 0xb3);
+ 	write_reg(par, 0xf0);
+ 	write_reg(par, 0xae);
+diff --git a/drivers/staging/fbtft/fb_ssd1331.c b/drivers/staging/fbtft/fb_ssd1331.c
+index 37622c9462aa7..ec5eced7f8cbd 100644
+--- a/drivers/staging/fbtft/fb_ssd1331.c
++++ b/drivers/staging/fbtft/fb_ssd1331.c
+@@ -81,8 +81,7 @@ static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
+ 	va_start(args, len);
+ 
+ 	*buf = (u8)va_arg(args, unsigned int);
+-	if (par->gpio.dc)
+-		gpiod_set_value(par->gpio.dc, 0);
++	gpiod_set_value(par->gpio.dc, 0);
+ 	ret = par->fbtftops.write(par, par->buf, sizeof(u8));
+ 	if (ret < 0) {
+ 		va_end(args);
+@@ -104,8 +103,7 @@ static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
+ 			return;
+ 		}
+ 	}
+-	if (par->gpio.dc)
+-		gpiod_set_value(par->gpio.dc, 1);
++	gpiod_set_value(par->gpio.dc, 1);
+ 	va_end(args);
+ }
+ 
+diff --git a/drivers/staging/fbtft/fb_ssd1351.c b/drivers/staging/fbtft/fb_ssd1351.c
+index 900b28d826b28..cf263a58a1489 100644
+--- a/drivers/staging/fbtft/fb_ssd1351.c
++++ b/drivers/staging/fbtft/fb_ssd1351.c
+@@ -2,7 +2,6 @@
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+ #include <linux/init.h>
+-#include <linux/gpio/consumer.h>
+ #include <linux/spi/spi.h>
+ #include <linux/delay.h>
+ 
+diff --git a/drivers/staging/fbtft/fb_upd161704.c b/drivers/staging/fbtft/fb_upd161704.c
+index c77832ae5e5ba..c680160d63807 100644
+--- a/drivers/staging/fbtft/fb_upd161704.c
++++ b/drivers/staging/fbtft/fb_upd161704.c
+@@ -12,7 +12,6 @@
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+ #include <linux/init.h>
+-#include <linux/gpio/consumer.h>
+ #include <linux/delay.h>
+ 
+ #include "fbtft.h"
+@@ -26,9 +25,6 @@ static int init_display(struct fbtft_par *par)
+ {
+ 	par->fbtftops.reset(par);
+ 
+-	if (par->gpio.cs)
+-		gpiod_set_value(par->gpio.cs, 0);  /* Activate chip */
+-
+ 	/* Initialization sequence from Lib_UTFT */
+ 
+ 	/* register reset */
+diff --git a/drivers/staging/fbtft/fb_watterott.c b/drivers/staging/fbtft/fb_watterott.c
+index 76b25df376b8f..a57e1f4feef35 100644
+--- a/drivers/staging/fbtft/fb_watterott.c
++++ b/drivers/staging/fbtft/fb_watterott.c
+@@ -8,7 +8,6 @@
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+ #include <linux/init.h>
+-#include <linux/gpio/consumer.h>
+ #include <linux/delay.h>
+ 
+ #include "fbtft.h"
+diff --git a/drivers/staging/fbtft/fbtft-bus.c b/drivers/staging/fbtft/fbtft-bus.c
+index 63c65dd67b175..3d422bc116411 100644
+--- a/drivers/staging/fbtft/fbtft-bus.c
++++ b/drivers/staging/fbtft/fbtft-bus.c
+@@ -135,8 +135,7 @@ int fbtft_write_vmem16_bus8(struct fbtft_par *par, size_t offset, size_t len)
+ 	remain = len / 2;
+ 	vmem16 = (u16 *)(par->info->screen_buffer + offset);
+ 
+-	if (par->gpio.dc)
+-		gpiod_set_value(par->gpio.dc, 1);
++	gpiod_set_value(par->gpio.dc, 1);
+ 
+ 	/* non buffered write */
+ 	if (!par->txbuf.buf)
+diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
+index 4f362dad4436a..3723269890d5f 100644
+--- a/drivers/staging/fbtft/fbtft-core.c
++++ b/drivers/staging/fbtft/fbtft-core.c
+@@ -38,8 +38,7 @@ int fbtft_write_buf_dc(struct fbtft_par *par, void *buf, size_t len, int dc)
+ {
+ 	int ret;
+ 
+-	if (par->gpio.dc)
+-		gpiod_set_value(par->gpio.dc, dc);
++	gpiod_set_value(par->gpio.dc, dc);
+ 
+ 	ret = par->fbtftops.write(par, buf, len);
+ 	if (ret < 0)
+@@ -76,20 +75,16 @@ static int fbtft_request_one_gpio(struct fbtft_par *par,
+ 				  struct gpio_desc **gpiop)
+ {
+ 	struct device *dev = par->info->device;
+-	int ret = 0;
+ 
+ 	*gpiop = devm_gpiod_get_index_optional(dev, name, index,
+-					       GPIOD_OUT_HIGH);
+-	if (IS_ERR(*gpiop)) {
+-		ret = PTR_ERR(*gpiop);
+-		dev_err(dev,
+-			"Failed to request %s GPIO: %d\n", name, ret);
+-		return ret;
+-	}
++					       GPIOD_OUT_LOW);
++	if (IS_ERR(*gpiop))
++		return dev_err_probe(dev, PTR_ERR(*gpiop), "Failed to request %s GPIO\n", name);
++
+ 	fbtft_par_dbg(DEBUG_REQUEST_GPIOS, par, "%s: '%s' GPIO\n",
+ 		      __func__, name);
+ 
+-	return ret;
++	return 0;
+ }
+ 
+ static int fbtft_request_gpios(struct fbtft_par *par)
+@@ -226,11 +221,15 @@ static void fbtft_reset(struct fbtft_par *par)
+ {
+ 	if (!par->gpio.reset)
+ 		return;
++
+ 	fbtft_par_dbg(DEBUG_RESET, par, "%s()\n", __func__);
++
+ 	gpiod_set_value_cansleep(par->gpio.reset, 1);
+ 	usleep_range(20, 40);
+ 	gpiod_set_value_cansleep(par->gpio.reset, 0);
+ 	msleep(120);
++
++	gpiod_set_value_cansleep(par->gpio.cs, 1);  /* Activate chip */
+ }
+ 
+ static void fbtft_update_display(struct fbtft_par *par, unsigned int start_line,
+@@ -922,8 +921,6 @@ static int fbtft_init_display_from_property(struct fbtft_par *par)
+ 		goto out_free;
+ 
+ 	par->fbtftops.reset(par);
+-	if (par->gpio.cs)
+-		gpiod_set_value(par->gpio.cs, 0);  /* Activate chip */
+ 
+ 	index = -1;
+ 	val = values[++index];
+@@ -1018,8 +1015,6 @@ int fbtft_init_display(struct fbtft_par *par)
+ 	}
+ 
+ 	par->fbtftops.reset(par);
+-	if (par->gpio.cs)
+-		gpiod_set_value(par->gpio.cs, 0);  /* Activate chip */
+ 
+ 	i = 0;
+ 	while (i < FBTFT_MAX_INIT_SEQUENCE) {
+diff --git a/drivers/staging/fbtft/fbtft-io.c b/drivers/staging/fbtft/fbtft-io.c
+index 0863d257d7620..de1904a443c27 100644
+--- a/drivers/staging/fbtft/fbtft-io.c
++++ b/drivers/staging/fbtft/fbtft-io.c
+@@ -142,12 +142,12 @@ int fbtft_write_gpio8_wr(struct fbtft_par *par, void *buf, size_t len)
+ 		data = *(u8 *)buf;
+ 
+ 		/* Start writing by pulling down /WR */
+-		gpiod_set_value(par->gpio.wr, 0);
++		gpiod_set_value(par->gpio.wr, 1);
+ 
+ 		/* Set data */
+ #ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
+ 		if (data == prev_data) {
+-			gpiod_set_value(par->gpio.wr, 0); /* used as delay */
++			gpiod_set_value(par->gpio.wr, 1); /* used as delay */
+ 		} else {
+ 			for (i = 0; i < 8; i++) {
+ 				if ((data & 1) != (prev_data & 1))
+@@ -165,7 +165,7 @@ int fbtft_write_gpio8_wr(struct fbtft_par *par, void *buf, size_t len)
+ #endif
+ 
+ 		/* Pullup /WR */
+-		gpiod_set_value(par->gpio.wr, 1);
++		gpiod_set_value(par->gpio.wr, 0);
+ 
+ #ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
+ 		prev_data = *(u8 *)buf;
+@@ -192,12 +192,12 @@ int fbtft_write_gpio16_wr(struct fbtft_par *par, void *buf, size_t len)
+ 		data = *(u16 *)buf;
+ 
+ 		/* Start writing by pulling down /WR */
+-		gpiod_set_value(par->gpio.wr, 0);
++		gpiod_set_value(par->gpio.wr, 1);
+ 
+ 		/* Set data */
+ #ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
+ 		if (data == prev_data) {
+-			gpiod_set_value(par->gpio.wr, 0); /* used as delay */
++			gpiod_set_value(par->gpio.wr, 1); /* used as delay */
+ 		} else {
+ 			for (i = 0; i < 16; i++) {
+ 				if ((data & 1) != (prev_data & 1))
+@@ -215,7 +215,7 @@ int fbtft_write_gpio16_wr(struct fbtft_par *par, void *buf, size_t len)
+ #endif
+ 
+ 		/* Pullup /WR */
+-		gpiod_set_value(par->gpio.wr, 1);
++		gpiod_set_value(par->gpio.wr, 0);
+ 
+ #ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
+ 		prev_data = *(u16 *)buf;
+diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c
+index 571f47d394843..bd5f874334043 100644
+--- a/drivers/staging/gdm724x/gdm_lte.c
++++ b/drivers/staging/gdm724x/gdm_lte.c
+@@ -611,10 +611,12 @@ static void gdm_lte_netif_rx(struct net_device *dev, char *buf,
+ 						  * bytes (99,130,83,99 dec)
+ 						  */
+ 			} __packed;
+-			void *addr = buf + sizeof(struct iphdr) +
+-				sizeof(struct udphdr) +
+-				offsetof(struct dhcp_packet, chaddr);
+-			ether_addr_copy(nic->dest_mac_addr, addr);
++			int offset = sizeof(struct iphdr) +
++				     sizeof(struct udphdr) +
++				     offsetof(struct dhcp_packet, chaddr);
++			if (offset + ETH_ALEN > len)
++				return;
++			ether_addr_copy(nic->dest_mac_addr, buf + offset);
+ 		}
+ 	}
+ 
+@@ -677,6 +679,7 @@ static void gdm_lte_multi_sdu_pkt(struct phy_dev *phy_dev, char *buf, int len)
+ 	struct sdu *sdu = NULL;
+ 	u8 endian = phy_dev->get_endian(phy_dev->priv_dev);
+ 	u8 *data = (u8 *)multi_sdu->data;
++	int copied;
+ 	u16 i = 0;
+ 	u16 num_packet;
+ 	u16 hci_len;
+@@ -688,6 +691,12 @@ static void gdm_lte_multi_sdu_pkt(struct phy_dev *phy_dev, char *buf, int len)
+ 	num_packet = gdm_dev16_to_cpu(endian, multi_sdu->num_packet);
+ 
+ 	for (i = 0; i < num_packet; i++) {
++		copied = data - multi_sdu->data;
++		if (len < copied + sizeof(*sdu)) {
++			pr_err("rx prevent buffer overflow");
++			return;
++		}
++
+ 		sdu = (struct sdu *)data;
+ 
+ 		cmd_evt  = gdm_dev16_to_cpu(endian, sdu->cmd_evt);
+@@ -698,7 +707,8 @@ static void gdm_lte_multi_sdu_pkt(struct phy_dev *phy_dev, char *buf, int len)
+ 			pr_err("rx sdu wrong hci %04x\n", cmd_evt);
+ 			return;
+ 		}
+-		if (hci_len < 12) {
++		if (hci_len < 12 ||
++		    len < copied + sizeof(*sdu) + (hci_len - 12)) {
+ 			pr_err("rx sdu invalid len %d\n", hci_len);
+ 			return;
+ 		}
+diff --git a/drivers/staging/hikey9xx/hi6421v600-regulator.c b/drivers/staging/hikey9xx/hi6421v600-regulator.c
+index e10fe3058176d..91136db3961ee 100644
+--- a/drivers/staging/hikey9xx/hi6421v600-regulator.c
++++ b/drivers/staging/hikey9xx/hi6421v600-regulator.c
+@@ -129,7 +129,7 @@ static unsigned int hi6421_spmi_regulator_get_mode(struct regulator_dev *rdev)
+ {
+ 	struct hi6421_spmi_reg_info *sreg = rdev_get_drvdata(rdev);
+ 	struct hi6421_spmi_pmic *pmic = sreg->pmic;
+-	u32 reg_val;
++	unsigned int reg_val;
+ 
+ 	regmap_read(pmic->regmap, rdev->desc->enable_reg, &reg_val);
+ 
+@@ -144,14 +144,17 @@ static int hi6421_spmi_regulator_set_mode(struct regulator_dev *rdev,
+ {
+ 	struct hi6421_spmi_reg_info *sreg = rdev_get_drvdata(rdev);
+ 	struct hi6421_spmi_pmic *pmic = sreg->pmic;
+-	u32 val;
++	unsigned int val;
+ 
+ 	switch (mode) {
+ 	case REGULATOR_MODE_NORMAL:
+ 		val = 0;
+ 		break;
+ 	case REGULATOR_MODE_IDLE:
+-		val = sreg->eco_mode_mask << (ffs(sreg->eco_mode_mask) - 1);
++		if (!sreg->eco_mode_mask)
++			return -EINVAL;
++
++		val = sreg->eco_mode_mask;
+ 		break;
+ 	default:
+ 		return -EINVAL;
+diff --git a/drivers/staging/media/hantro/hantro_drv.c b/drivers/staging/media/hantro/hantro_drv.c
+index e5f200e649933..2d6e0056be624 100644
+--- a/drivers/staging/media/hantro/hantro_drv.c
++++ b/drivers/staging/media/hantro/hantro_drv.c
+@@ -56,16 +56,12 @@ dma_addr_t hantro_get_ref(struct hantro_ctx *ctx, u64 ts)
+ 	return hantro_get_dec_buf_addr(ctx, buf);
+ }
+ 
+-static void hantro_job_finish(struct hantro_dev *vpu,
+-			      struct hantro_ctx *ctx,
+-			      enum vb2_buffer_state result)
++static void hantro_job_finish_no_pm(struct hantro_dev *vpu,
++				    struct hantro_ctx *ctx,
++				    enum vb2_buffer_state result)
+ {
+ 	struct vb2_v4l2_buffer *src, *dst;
+ 
+-	pm_runtime_mark_last_busy(vpu->dev);
+-	pm_runtime_put_autosuspend(vpu->dev);
+-	clk_bulk_disable(vpu->variant->num_clocks, vpu->clocks);
+-
+ 	src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ 	dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+ 
+@@ -81,6 +77,18 @@ static void hantro_job_finish(struct hantro_dev *vpu,
+ 					 result);
+ }
+ 
++static void hantro_job_finish(struct hantro_dev *vpu,
++			      struct hantro_ctx *ctx,
++			      enum vb2_buffer_state result)
++{
++	pm_runtime_mark_last_busy(vpu->dev);
++	pm_runtime_put_autosuspend(vpu->dev);
++
++	clk_bulk_disable(vpu->variant->num_clocks, vpu->clocks);
++
++	hantro_job_finish_no_pm(vpu, ctx, result);
++}
++
+ void hantro_irq_done(struct hantro_dev *vpu,
+ 		     enum vb2_buffer_state result)
+ {
+@@ -152,12 +160,15 @@ static void device_run(void *priv)
+ 	src = hantro_get_src_buf(ctx);
+ 	dst = hantro_get_dst_buf(ctx);
+ 
++	ret = pm_runtime_get_sync(ctx->dev->dev);
++	if (ret < 0) {
++		pm_runtime_put_noidle(ctx->dev->dev);
++		goto err_cancel_job;
++	}
++
+ 	ret = clk_bulk_enable(ctx->dev->variant->num_clocks, ctx->dev->clocks);
+ 	if (ret)
+ 		goto err_cancel_job;
+-	ret = pm_runtime_get_sync(ctx->dev->dev);
+-	if (ret < 0)
+-		goto err_cancel_job;
+ 
+ 	v4l2_m2m_buf_copy_metadata(src, dst, true);
+ 
+@@ -165,7 +176,7 @@ static void device_run(void *priv)
+ 	return;
+ 
+ err_cancel_job:
+-	hantro_job_finish(ctx->dev, ctx, VB2_BUF_STATE_ERROR);
++	hantro_job_finish_no_pm(ctx->dev, ctx, VB2_BUF_STATE_ERROR);
+ }
+ 
+ static struct v4l2_m2m_ops vpu_m2m_ops = {
+diff --git a/drivers/staging/media/hantro/hantro_v4l2.c b/drivers/staging/media/hantro/hantro_v4l2.c
+index 1bc118e375a12..7ccc6405036ae 100644
+--- a/drivers/staging/media/hantro/hantro_v4l2.c
++++ b/drivers/staging/media/hantro/hantro_v4l2.c
+@@ -639,7 +639,14 @@ static int hantro_buf_prepare(struct vb2_buffer *vb)
+ 	ret = hantro_buf_plane_check(vb, pix_fmt);
+ 	if (ret)
+ 		return ret;
+-	vb2_set_plane_payload(vb, 0, pix_fmt->plane_fmt[0].sizeimage);
++	/*
++	 * Buffer's bytesused must be written by driver for CAPTURE buffers.
++	 * (for OUTPUT buffers, if userspace passes 0 bytesused, v4l2-core sets
++	 * it to buffer length).
++	 */
++	if (V4L2_TYPE_IS_CAPTURE(vq->type))
++		vb2_set_plane_payload(vb, 0, pix_fmt->plane_fmt[0].sizeimage);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
+index ef5add079774e..7f4b967646d98 100644
+--- a/drivers/staging/media/imx/imx-media-csi.c
++++ b/drivers/staging/media/imx/imx-media-csi.c
+@@ -753,9 +753,10 @@ static int csi_setup(struct csi_priv *priv)
+ 
+ static int csi_start(struct csi_priv *priv)
+ {
+-	struct v4l2_fract *output_fi;
++	struct v4l2_fract *input_fi, *output_fi;
+ 	int ret;
+ 
++	input_fi = &priv->frame_interval[CSI_SINK_PAD];
+ 	output_fi = &priv->frame_interval[priv->active_output_pad];
+ 
+ 	/* start upstream */
+@@ -764,6 +765,17 @@ static int csi_start(struct csi_priv *priv)
+ 	if (ret)
+ 		return ret;
+ 
++	/* Skip first few frames from a BT.656 source */
++	if (priv->upstream_ep.bus_type == V4L2_MBUS_BT656) {
++		u32 delay_usec, bad_frames = 20;
++
++		delay_usec = DIV_ROUND_UP_ULL((u64)USEC_PER_SEC *
++			input_fi->numerator * bad_frames,
++			input_fi->denominator);
++
++		usleep_range(delay_usec, delay_usec + 1000);
++	}
++
+ 	if (priv->dest == IPU_CSI_DEST_IDMAC) {
+ 		ret = csi_idmac_start(priv);
+ 		if (ret)
+diff --git a/drivers/staging/media/imx/imx7-mipi-csis.c b/drivers/staging/media/imx/imx7-mipi-csis.c
+index a01a7364b4b94..b365790256e42 100644
+--- a/drivers/staging/media/imx/imx7-mipi-csis.c
++++ b/drivers/staging/media/imx/imx7-mipi-csis.c
+@@ -597,13 +597,15 @@ static void mipi_csis_clear_counters(struct csi_state *state)
+ 
+ static void mipi_csis_log_counters(struct csi_state *state, bool non_errors)
+ {
+-	int i = non_errors ? MIPI_CSIS_NUM_EVENTS : MIPI_CSIS_NUM_EVENTS - 4;
++	unsigned int num_events = non_errors ? MIPI_CSIS_NUM_EVENTS
++				: MIPI_CSIS_NUM_EVENTS - 6;
+ 	struct device *dev = &state->pdev->dev;
+ 	unsigned long flags;
++	unsigned int i;
+ 
+ 	spin_lock_irqsave(&state->slock, flags);
+ 
+-	for (i--; i >= 0; i--) {
++	for (i = 0; i < num_events; ++i) {
+ 		if (state->events[i].counter > 0 || state->debug)
+ 			dev_info(dev, "%s events: %d\n", state->events[i].name,
+ 				 state->events[i].counter);
+diff --git a/drivers/staging/media/rkvdec/rkvdec.c b/drivers/staging/media/rkvdec/rkvdec.c
+index d821661d30f38..7131156c1f2cf 100644
+--- a/drivers/staging/media/rkvdec/rkvdec.c
++++ b/drivers/staging/media/rkvdec/rkvdec.c
+@@ -481,7 +481,15 @@ static int rkvdec_buf_prepare(struct vb2_buffer *vb)
+ 		if (vb2_plane_size(vb, i) < sizeimage)
+ 			return -EINVAL;
+ 	}
+-	vb2_set_plane_payload(vb, 0, f->fmt.pix_mp.plane_fmt[0].sizeimage);
++
++	/*
++	 * Buffer's bytesused must be written by driver for CAPTURE buffers.
++	 * (for OUTPUT buffers, if userspace passes 0 bytesused, v4l2-core sets
++	 * it to buffer length).
++	 */
++	if (V4L2_TYPE_IS_CAPTURE(vq->type))
++		vb2_set_plane_payload(vb, 0, f->fmt.pix_mp.plane_fmt[0].sizeimage);
++
+ 	return 0;
+ }
+ 
+@@ -658,7 +666,7 @@ static void rkvdec_device_run(void *priv)
+ 	if (WARN_ON(!desc))
+ 		return;
+ 
+-	ret = pm_runtime_get_sync(rkvdec->dev);
++	ret = pm_runtime_resume_and_get(rkvdec->dev);
+ 	if (ret < 0) {
+ 		rkvdec_job_finish_no_pm(ctx, VB2_BUF_STATE_ERROR);
+ 		return;
+diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
+index ce497d0197dfc..10744fab7ceaa 100644
+--- a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
++++ b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
+@@ -477,8 +477,8 @@ static void cedrus_h265_setup(struct cedrus_ctx *ctx,
+ 				slice_params->flags);
+ 
+ 	reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_DEPENDENT_SLICE_SEGMENT,
+-				V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT,
+-				pps->flags);
++				V4L2_HEVC_SLICE_PARAMS_FLAG_DEPENDENT_SLICE_SEGMENT,
++				slice_params->flags);
+ 
+ 	/* FIXME: For multi-slice support. */
+ 	reg |= VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_FIRST_SLICE_SEGMENT_IN_PIC;
+diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_video.c b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
+index b62eb8e840573..bf731caf2ed51 100644
+--- a/drivers/staging/media/sunxi/cedrus/cedrus_video.c
++++ b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
+@@ -457,7 +457,13 @@ static int cedrus_buf_prepare(struct vb2_buffer *vb)
+ 	if (vb2_plane_size(vb, 0) < pix_fmt->sizeimage)
+ 		return -EINVAL;
+ 
+-	vb2_set_plane_payload(vb, 0, pix_fmt->sizeimage);
++	/*
++	 * Buffer's bytesused must be written by driver for CAPTURE buffers.
++	 * (for OUTPUT buffers, if userspace passes 0 bytesused, v4l2-core sets
++	 * it to buffer length).
++	 */
++	if (V4L2_TYPE_IS_CAPTURE(vq->type))
++		vb2_set_plane_payload(vb, 0, pix_fmt->sizeimage);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/staging/mt7621-dts/mt7621.dtsi b/drivers/staging/mt7621-dts/mt7621.dtsi
+index 16fc94f654865..b3d08459acc85 100644
+--- a/drivers/staging/mt7621-dts/mt7621.dtsi
++++ b/drivers/staging/mt7621-dts/mt7621.dtsi
+@@ -508,7 +508,7 @@
+ 
+ 		bus-range = <0 255>;
+ 		ranges = <
+-			0x02000000 0 0x00000000 0x60000000 0 0x10000000 /* pci memory */
++			0x02000000 0 0x60000000 0x60000000 0 0x10000000 /* pci memory */
+ 			0x01000000 0 0x00000000 0x1e160000 0 0x00010000 /* io space */
+ 		>;
+ 
+diff --git a/drivers/staging/rtl8712/hal_init.c b/drivers/staging/rtl8712/hal_init.c
+index 715f1fe8b4726..22974277afa08 100644
+--- a/drivers/staging/rtl8712/hal_init.c
++++ b/drivers/staging/rtl8712/hal_init.c
+@@ -40,7 +40,10 @@ static void rtl871x_load_fw_cb(const struct firmware *firmware, void *context)
+ 		dev_err(&udev->dev, "r8712u: Firmware request failed\n");
+ 		usb_put_dev(udev);
+ 		usb_set_intfdata(usb_intf, NULL);
++		r8712_free_drv_sw(adapter);
++		adapter->dvobj_deinit(adapter);
+ 		complete(&adapter->rtl8712_fw_ready);
++		free_netdev(adapter->pnetdev);
+ 		return;
+ 	}
+ 	adapter->fw = firmware;
+diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
+index 0c3ae8495afb7..2214aca097308 100644
+--- a/drivers/staging/rtl8712/os_intfs.c
++++ b/drivers/staging/rtl8712/os_intfs.c
+@@ -328,8 +328,6 @@ int r8712_init_drv_sw(struct _adapter *padapter)
+ 
+ void r8712_free_drv_sw(struct _adapter *padapter)
+ {
+-	struct net_device *pnetdev = padapter->pnetdev;
+-
+ 	r8712_free_cmd_priv(&padapter->cmdpriv);
+ 	r8712_free_evt_priv(&padapter->evtpriv);
+ 	r8712_DeInitSwLeds(padapter);
+@@ -339,8 +337,6 @@ void r8712_free_drv_sw(struct _adapter *padapter)
+ 	_r8712_free_sta_priv(&padapter->stapriv);
+ 	_r8712_free_recv_priv(&padapter->recvpriv);
+ 	mp871xdeinit(padapter);
+-	if (pnetdev)
+-		free_netdev(pnetdev);
+ }
+ 
+ static void enable_video_mode(struct _adapter *padapter, int cbw40_value)
+diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
+index dc21e7743349c..b760bc3559373 100644
+--- a/drivers/staging/rtl8712/usb_intf.c
++++ b/drivers/staging/rtl8712/usb_intf.c
+@@ -361,7 +361,7 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
+ 	/* step 1. */
+ 	pnetdev = r8712_init_netdev();
+ 	if (!pnetdev)
+-		goto error;
++		goto put_dev;
+ 	padapter = netdev_priv(pnetdev);
+ 	disable_ht_for_spec_devid(pdid, padapter);
+ 	pdvobjpriv = &padapter->dvobjpriv;
+@@ -381,16 +381,16 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
+ 	 * initialize the dvobj_priv
+ 	 */
+ 	if (!padapter->dvobj_init) {
+-		goto error;
++		goto put_dev;
+ 	} else {
+ 		status = padapter->dvobj_init(padapter);
+ 		if (status != _SUCCESS)
+-			goto error;
++			goto free_netdev;
+ 	}
+ 	/* step 4. */
+ 	status = r8712_init_drv_sw(padapter);
+ 	if (status)
+-		goto error;
++		goto dvobj_deinit;
+ 	/* step 5. read efuse/eeprom data and get mac_addr */
+ 	{
+ 		int i, offset;
+@@ -570,17 +570,20 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
+ 	}
+ 	/* step 6. Load the firmware asynchronously */
+ 	if (rtl871x_load_fw(padapter))
+-		goto error;
++		goto deinit_drv_sw;
+ 	spin_lock_init(&padapter->lock_rx_ff0_filter);
+ 	mutex_init(&padapter->mutex_start);
+ 	return 0;
+-error:
++
++deinit_drv_sw:
++	r8712_free_drv_sw(padapter);
++dvobj_deinit:
++	padapter->dvobj_deinit(padapter);
++free_netdev:
++	free_netdev(pnetdev);
++put_dev:
+ 	usb_put_dev(udev);
+ 	usb_set_intfdata(pusb_intf, NULL);
+-	if (padapter && padapter->dvobj_deinit)
+-		padapter->dvobj_deinit(padapter);
+-	if (pnetdev)
+-		free_netdev(pnetdev);
+ 	return -ENODEV;
+ }
+ 
+@@ -612,6 +615,7 @@ static void r871xu_dev_remove(struct usb_interface *pusb_intf)
+ 		r8712_stop_drv_timers(padapter);
+ 		r871x_dev_unload(padapter);
+ 		r8712_free_drv_sw(padapter);
++		free_netdev(pnetdev);
+ 
+ 		/* decrease the reference count of the usb device structure
+ 		 * when disconnect
+diff --git a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
+index 9097bcbd67d82..d697ea55a0da1 100644
+--- a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
++++ b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
+@@ -1862,7 +1862,7 @@ int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance)
+ 	int status;
+ 	int err = -ENODEV;
+ 	struct vchiq_mmal_instance *instance;
+-	static struct vchiq_instance *vchiq_instance;
++	struct vchiq_instance *vchiq_instance;
+ 	struct vchiq_service_params_kernel params = {
+ 		.version		= VC_MMAL_VER,
+ 		.version_min		= VC_MMAL_MIN_VER,
+diff --git a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
+index af35251232eb3..b044999ad002b 100644
+--- a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
++++ b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
+@@ -265,12 +265,13 @@ void cxgbit_unmap_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+ 	struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd);
+ 
+ 	if (ccmd->release) {
+-		struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo;
+-
+-		if (ttinfo->sgl) {
++		if (cmd->se_cmd.se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
++			put_page(sg_page(&ccmd->sg));
++		} else {
+ 			struct cxgbit_sock *csk = conn->context;
+ 			struct cxgbit_device *cdev = csk->com.cdev;
+ 			struct cxgbi_ppm *ppm = cdev2ppm(cdev);
++			struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo;
+ 
+ 			/* Abort the TCP conn if DDP is not complete to
+ 			 * avoid any possibility of DDP after freeing
+@@ -280,14 +281,14 @@ void cxgbit_unmap_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+ 				     cmd->se_cmd.data_length))
+ 				cxgbit_abort_conn(csk);
+ 
++			if (unlikely(ttinfo->sgl)) {
++				dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl,
++					     ttinfo->nents, DMA_FROM_DEVICE);
++				ttinfo->nents = 0;
++				ttinfo->sgl = NULL;
++			}
+ 			cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
+-
+-			dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl,
+-				     ttinfo->nents, DMA_FROM_DEVICE);
+-		} else {
+-			put_page(sg_page(&ccmd->sg));
+ 		}
+-
+ 		ccmd->release = false;
+ 	}
+ }
+diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c
+index b926e1d6c7b8e..282297ffc4044 100644
+--- a/drivers/target/iscsi/cxgbit/cxgbit_target.c
++++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c
+@@ -997,17 +997,18 @@ static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk)
+ 	struct scatterlist *sg_start;
+ 	struct iscsi_conn *conn = csk->conn;
+ 	struct iscsi_cmd *cmd = NULL;
++	struct cxgbit_cmd *ccmd;
++	struct cxgbi_task_tag_info *ttinfo;
+ 	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
+ 	struct iscsi_data *hdr = (struct iscsi_data *)pdu_cb->hdr;
+ 	u32 data_offset = be32_to_cpu(hdr->offset);
+-	u32 data_len = pdu_cb->dlen;
++	u32 data_len = ntoh24(hdr->dlength);
+ 	int rc, sg_nents, sg_off;
+ 	bool dcrc_err = false;
+ 
+ 	if (pdu_cb->flags & PDUCBF_RX_DDP_CMP) {
+ 		u32 offset = be32_to_cpu(hdr->offset);
+ 		u32 ddp_data_len;
+-		u32 payload_length = ntoh24(hdr->dlength);
+ 		bool success = false;
+ 
+ 		cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt, 0);
+@@ -1022,7 +1023,7 @@ static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk)
+ 		cmd->data_sn = be32_to_cpu(hdr->datasn);
+ 
+ 		rc = __iscsit_check_dataout_hdr(conn, (unsigned char *)hdr,
+-						cmd, payload_length, &success);
++						cmd, data_len, &success);
+ 		if (rc < 0)
+ 			return rc;
+ 		else if (!success)
+@@ -1060,6 +1061,20 @@ static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk)
+ 		cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents, skip);
+ 	}
+ 
++	ccmd = iscsit_priv_cmd(cmd);
++	ttinfo = &ccmd->ttinfo;
++
++	if (ccmd->release && ttinfo->sgl &&
++	    (cmd->se_cmd.data_length ==	(cmd->write_data_done + data_len))) {
++		struct cxgbit_device *cdev = csk->com.cdev;
++		struct cxgbi_ppm *ppm = cdev2ppm(cdev);
++
++		dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl, ttinfo->nents,
++			     DMA_FROM_DEVICE);
++		ttinfo->nents = 0;
++		ttinfo->sgl = NULL;
++	}
++
+ check_payload:
+ 
+ 	rc = iscsit_check_dataout_payload(cmd, hdr, dcrc_err);
+diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c
+index 6956581ed7a4f..b8ded3aef371e 100644
+--- a/drivers/thermal/cpufreq_cooling.c
++++ b/drivers/thermal/cpufreq_cooling.c
+@@ -487,7 +487,7 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
+ 	ret = freq_qos_update_request(&cpufreq_cdev->qos_req, frequency);
+ 	if (ret >= 0) {
+ 		cpufreq_cdev->cpufreq_state = state;
+-		cpus = cpufreq_cdev->policy->cpus;
++		cpus = cpufreq_cdev->policy->related_cpus;
+ 		max_capacity = arch_scale_cpu_capacity(cpumask_first(cpus));
+ 		capacity = frequency * max_capacity;
+ 		capacity /= cpufreq_cdev->policy->cpuinfo.max_freq;
+diff --git a/drivers/thunderbolt/test.c b/drivers/thunderbolt/test.c
+index 464c2d37b992e..e254f8c37cb73 100644
+--- a/drivers/thunderbolt/test.c
++++ b/drivers/thunderbolt/test.c
+@@ -259,14 +259,14 @@ static struct tb_switch *alloc_dev_default(struct kunit *test,
+ 	if (port->dual_link_port && upstream_port->dual_link_port) {
+ 		port->dual_link_port->remote = upstream_port->dual_link_port;
+ 		upstream_port->dual_link_port->remote = port->dual_link_port;
+-	}
+ 
+-	if (bonded) {
+-		/* Bonding is used */
+-		port->bonded = true;
+-		port->dual_link_port->bonded = true;
+-		upstream_port->bonded = true;
+-		upstream_port->dual_link_port->bonded = true;
++		if (bonded) {
++			/* Bonding is used */
++			port->bonded = true;
++			port->dual_link_port->bonded = true;
++			upstream_port->bonded = true;
++			upstream_port->dual_link_port->bonded = true;
++		}
+ 	}
+ 
+ 	return sw;
+diff --git a/drivers/tty/nozomi.c b/drivers/tty/nozomi.c
+index 861e950431919..1076f884d9f9e 100644
+--- a/drivers/tty/nozomi.c
++++ b/drivers/tty/nozomi.c
+@@ -1391,7 +1391,7 @@ static int nozomi_card_init(struct pci_dev *pdev,
+ 			NOZOMI_NAME, dc);
+ 	if (unlikely(ret)) {
+ 		dev_err(&pdev->dev, "can't request irq %d\n", pdev->irq);
+-		goto err_free_kfifo;
++		goto err_free_all_kfifo;
+ 	}
+ 
+ 	DBG1("base_addr: %p", dc->base_addr);
+@@ -1429,12 +1429,15 @@ static int nozomi_card_init(struct pci_dev *pdev,
+ 	return 0;
+ 
+ err_free_tty:
+-	for (i = 0; i < MAX_PORT; ++i) {
++	for (i--; i >= 0; i--) {
+ 		tty_unregister_device(ntty_driver, dc->index_start + i);
+ 		tty_port_destroy(&dc->port[i].port);
+ 	}
++	free_irq(pdev->irq, dc);
++err_free_all_kfifo:
++	i = MAX_PORT;
+ err_free_kfifo:
+-	for (i = 0; i < MAX_PORT; i++)
++	for (i--; i >= PORT_MDM; i--)
+ 		kfifo_free(&dc->port[i].fifo_ul);
+ err_free_sbuf:
+ 	kfree(dc->send_buf);
+diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
+index 23e0decde33eb..c37468887fd2a 100644
+--- a/drivers/tty/serial/8250/8250_omap.c
++++ b/drivers/tty/serial/8250/8250_omap.c
+@@ -43,6 +43,7 @@
+ #define UART_ERRATA_CLOCK_DISABLE	(1 << 3)
+ #define	UART_HAS_EFR2			BIT(4)
+ #define UART_HAS_RHR_IT_DIS		BIT(5)
++#define UART_RX_TIMEOUT_QUIRK		BIT(6)
+ 
+ #define OMAP_UART_FCR_RX_TRIG		6
+ #define OMAP_UART_FCR_TX_TRIG		4
+@@ -104,6 +105,9 @@
+ #define UART_OMAP_EFR2			0x23
+ #define UART_OMAP_EFR2_TIMEOUT_BEHAVE	BIT(6)
+ 
++/* RX FIFO occupancy indicator */
++#define UART_OMAP_RX_LVL		0x64
++
+ struct omap8250_priv {
+ 	int line;
+ 	u8 habit;
+@@ -611,6 +615,7 @@ static int omap_8250_dma_handle_irq(struct uart_port *port);
+ static irqreturn_t omap8250_irq(int irq, void *dev_id)
+ {
+ 	struct uart_port *port = dev_id;
++	struct omap8250_priv *priv = port->private_data;
+ 	struct uart_8250_port *up = up_to_u8250p(port);
+ 	unsigned int iir;
+ 	int ret;
+@@ -625,6 +630,18 @@ static irqreturn_t omap8250_irq(int irq, void *dev_id)
+ 	serial8250_rpm_get(up);
+ 	iir = serial_port_in(port, UART_IIR);
+ 	ret = serial8250_handle_irq(port, iir);
++
++	/*
++	 * On K3 SoCs, it is observed that RX TIMEOUT is signalled after
++	 * FIFO has been drained, in which case a dummy read of RX FIFO
++	 * is required to clear RX TIMEOUT condition.
++	 */
++	if (priv->habit & UART_RX_TIMEOUT_QUIRK &&
++	    (iir & UART_IIR_RX_TIMEOUT) == UART_IIR_RX_TIMEOUT &&
++	    serial_port_in(port, UART_OMAP_RX_LVL) == 0) {
++		serial_port_in(port, UART_RX);
++	}
++
+ 	serial8250_rpm_put(up);
+ 
+ 	return IRQ_RETVAL(ret);
+@@ -813,7 +830,7 @@ static void __dma_rx_do_complete(struct uart_8250_port *p)
+ 			       poll_count--)
+ 				cpu_relax();
+ 
+-			if (!poll_count)
++			if (poll_count == -1)
+ 				dev_err(p->port.dev, "teardown incomplete\n");
+ 		}
+ 	}
+@@ -1218,7 +1235,8 @@ static struct omap8250_dma_params am33xx_dma = {
+ 
+ static struct omap8250_platdata am654_platdata = {
+ 	.dma_params	= &am654_dma,
+-	.habit		= UART_HAS_EFR2 | UART_HAS_RHR_IT_DIS,
++	.habit		= UART_HAS_EFR2 | UART_HAS_RHR_IT_DIS |
++			  UART_RX_TIMEOUT_QUIRK,
+ };
+ 
+ static struct omap8250_platdata am33xx_platdata = {
+diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
+index 6e141429c9808..6d9c494bed7d2 100644
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -2635,6 +2635,21 @@ static unsigned int serial8250_get_baud_rate(struct uart_port *port,
+ 					     struct ktermios *old)
+ {
+ 	unsigned int tolerance = port->uartclk / 100;
++	unsigned int min;
++	unsigned int max;
++
++	/*
++	 * Handle magic divisors for baud rates above baud_base on SMSC
++	 * Super I/O chips.  Enable custom rates of clk/4 and clk/8, but
++	 * disable divisor values beyond 32767, which are unavailable.
++	 */
++	if (port->flags & UPF_MAGIC_MULTIPLIER) {
++		min = port->uartclk / 16 / UART_DIV_MAX >> 1;
++		max = (port->uartclk + tolerance) / 4;
++	} else {
++		min = port->uartclk / 16 / UART_DIV_MAX;
++		max = (port->uartclk + tolerance) / 16;
++	}
+ 
+ 	/*
+ 	 * Ask the core to calculate the divisor for us.
+@@ -2642,9 +2657,7 @@ static unsigned int serial8250_get_baud_rate(struct uart_port *port,
+ 	 * slower than nominal still match standard baud rates without
+ 	 * causing transmission errors.
+ 	 */
+-	return uart_get_baud_rate(port, termios, old,
+-				  port->uartclk / 16 / UART_DIV_MAX,
+-				  (port->uartclk + tolerance) / 16);
++	return uart_get_baud_rate(port, termios, old, min, max);
+ }
+ 
+ /*
+diff --git a/drivers/tty/serial/8250/serial_cs.c b/drivers/tty/serial/8250/serial_cs.c
+index 35ff6627c61be..1cc749903d127 100644
+--- a/drivers/tty/serial/8250/serial_cs.c
++++ b/drivers/tty/serial/8250/serial_cs.c
+@@ -777,6 +777,7 @@ static const struct pcmcia_device_id serial_ids[] = {
+ 	PCMCIA_DEVICE_PROD_ID12("Multi-Tech", "MT2834LT", 0x5f73be51, 0x4cd7c09e),
+ 	PCMCIA_DEVICE_PROD_ID12("OEM      ", "C288MX     ", 0xb572d360, 0xd2385b7a),
+ 	PCMCIA_DEVICE_PROD_ID12("Option International", "V34bis GSM/PSTN Data/Fax Modem", 0x9d7cd6f5, 0x5cb8bf41),
++	PCMCIA_DEVICE_PROD_ID12("Option International", "GSM-Ready 56K/ISDN", 0x9d7cd6f5, 0xb23844aa),
+ 	PCMCIA_DEVICE_PROD_ID12("PCMCIA   ", "C336MX     ", 0x99bcafe9, 0xaa25bcab),
+ 	PCMCIA_DEVICE_PROD_ID12("Quatech Inc", "PCMCIA Dual RS-232 Serial Port Card", 0xc4420b35, 0x92abc92f),
+ 	PCMCIA_DEVICE_PROD_ID12("Quatech Inc", "Dual RS-232 Serial Port PC Card", 0xc4420b35, 0x031a380d),
+@@ -804,7 +805,6 @@ static const struct pcmcia_device_id serial_ids[] = {
+ 	PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-4", 0x96913a85, 0xcec8f102, "cis/COMpad4.cis"),
+ 	PCMCIA_DEVICE_CIS_PROD_ID123("ADVANTECH", "COMpad-32/85", "1.0", 0x96913a85, 0x8fbe92ae, 0x0877b627, "cis/COMpad2.cis"),
+ 	PCMCIA_DEVICE_CIS_PROD_ID2("RS-COM 2P", 0xad20b156, "cis/RS-COM-2P.cis"),
+-	PCMCIA_DEVICE_CIS_MANF_CARD(0x0013, 0x0000, "cis/GLOBETROTTER.cis"),
+ 	PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.", "SERIAL CARD: SL100  1.00.", 0x19ca78af, 0xf964f42b),
+ 	PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.", "SERIAL CARD: SL100", 0x19ca78af, 0x71d98e83),
+ 	PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.", "SERIAL CARD: SL232  1.00.", 0x19ca78af, 0x69fb7490),
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index 794035041744f..9c78e43e669d7 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -1408,17 +1408,7 @@ static unsigned int lpuart_get_mctrl(struct uart_port *port)
+ 
+ static unsigned int lpuart32_get_mctrl(struct uart_port *port)
+ {
+-	unsigned int temp = 0;
+-	unsigned long reg;
+-
+-	reg = lpuart32_read(port, UARTMODIR);
+-	if (reg & UARTMODIR_TXCTSE)
+-		temp |= TIOCM_CTS;
+-
+-	if (reg & UARTMODIR_RXRTSE)
+-		temp |= TIOCM_RTS;
+-
+-	return temp;
++	return 0;
+ }
+ 
+ static void lpuart_set_mctrl(struct uart_port *port, unsigned int mctrl)
+@@ -1625,7 +1615,7 @@ static void lpuart_rx_dma_startup(struct lpuart_port *sport)
+ 	sport->lpuart_dma_rx_use = true;
+ 	rx_dma_timer_init(sport);
+ 
+-	if (sport->port.has_sysrq) {
++	if (sport->port.has_sysrq && !lpuart_is_32(sport)) {
+ 		cr3 = readb(sport->port.membase + UARTCR3);
+ 		cr3 |= UARTCR3_FEIE;
+ 		writeb(cr3, sport->port.membase + UARTCR3);
+diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
+index 51b0ecabf2ec9..1e26220c78527 100644
+--- a/drivers/tty/serial/mvebu-uart.c
++++ b/drivers/tty/serial/mvebu-uart.c
+@@ -445,12 +445,11 @@ static void mvebu_uart_shutdown(struct uart_port *port)
+ 
+ static int mvebu_uart_baud_rate_set(struct uart_port *port, unsigned int baud)
+ {
+-	struct mvebu_uart *mvuart = to_mvuart(port);
+ 	unsigned int d_divisor, m_divisor;
+ 	u32 brdv, osamp;
+ 
+-	if (IS_ERR(mvuart->clk))
+-		return -PTR_ERR(mvuart->clk);
++	if (!port->uartclk)
++		return -EOPNOTSUPP;
+ 
+ 	/*
+ 	 * The baudrate is derived from the UART clock thanks to two divisors:
+@@ -463,7 +462,7 @@ static int mvebu_uart_baud_rate_set(struct uart_port *port, unsigned int baud)
+ 	 * makes use of D to configure the desired baudrate.
+ 	 */
+ 	m_divisor = OSAMP_DEFAULT_DIVISOR;
+-	d_divisor = DIV_ROUND_UP(port->uartclk, baud * m_divisor);
++	d_divisor = DIV_ROUND_CLOSEST(port->uartclk, baud * m_divisor);
+ 
+ 	brdv = readl(port->membase + UART_BRDV);
+ 	brdv &= ~BRDV_BAUD_MASK;
+@@ -482,7 +481,7 @@ static void mvebu_uart_set_termios(struct uart_port *port,
+ 				   struct ktermios *old)
+ {
+ 	unsigned long flags;
+-	unsigned int baud;
++	unsigned int baud, min_baud, max_baud;
+ 
+ 	spin_lock_irqsave(&port->lock, flags);
+ 
+@@ -501,16 +500,21 @@ static void mvebu_uart_set_termios(struct uart_port *port,
+ 		port->ignore_status_mask |= STAT_RX_RDY(port) | STAT_BRK_ERR;
+ 
+ 	/*
++	 * Maximal divisor is 1023 * 16 when using default (x16) scheme.
+ 	 * Maximum achievable frequency with simple baudrate divisor is 230400.
+ 	 * Since the error per bit frame would be of more than 15%, achieving
+ 	 * higher frequencies would require to implement the fractional divisor
+ 	 * feature.
+ 	 */
+-	baud = uart_get_baud_rate(port, termios, old, 0, 230400);
++	min_baud = DIV_ROUND_UP(port->uartclk, 1023 * 16);
++	max_baud = 230400;
++
++	baud = uart_get_baud_rate(port, termios, old, min_baud, max_baud);
+ 	if (mvebu_uart_baud_rate_set(port, baud)) {
+ 		/* No clock available, baudrate cannot be changed */
+ 		if (old)
+-			baud = uart_get_baud_rate(port, old, NULL, 0, 230400);
++			baud = uart_get_baud_rate(port, old, NULL,
++						  min_baud, max_baud);
+ 	} else {
+ 		tty_termios_encode_baud_rate(termios, baud, baud);
+ 		uart_update_timeout(port, termios->c_cflag, baud);
+diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
+index 3b1aaa93d750e..70898a999a498 100644
+--- a/drivers/tty/serial/sh-sci.c
++++ b/drivers/tty/serial/sh-sci.c
+@@ -610,6 +610,14 @@ static void sci_stop_tx(struct uart_port *port)
+ 	ctrl &= ~SCSCR_TIE;
+ 
+ 	serial_port_out(port, SCSCR, ctrl);
++
++#ifdef CONFIG_SERIAL_SH_SCI_DMA
++	if (to_sci_port(port)->chan_tx &&
++	    !dma_submit_error(to_sci_port(port)->cookie_tx)) {
++		dmaengine_terminate_async(to_sci_port(port)->chan_tx);
++		to_sci_port(port)->cookie_tx = -EINVAL;
++	}
++#endif
+ }
+ 
+ static void sci_start_rx(struct uart_port *port)
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index c103961c3fae9..68a282ceb4345 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1951,6 +1951,11 @@ static const struct usb_device_id acm_ids[] = {
+ 	.driver_info = IGNORE_DEVICE,
+ 	},
+ 
++	/* Exclude Heimann Sensor GmbH USB appset demo */
++	{ USB_DEVICE(0x32a7, 0x0000),
++	.driver_info = IGNORE_DEVICE,
++	},
++
+ 	/* control interfaces without any protocol set */
+ 	{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
+ 		USB_CDC_PROTO_NONE) },
+diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
+index fec17a2d2447d..15911ac7582b4 100644
+--- a/drivers/usb/dwc2/core.c
++++ b/drivers/usb/dwc2/core.c
+@@ -1167,15 +1167,6 @@ static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
+ 		usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16);
+ 		if (hsotg->params.phy_utmi_width == 16)
+ 			usbcfg |= GUSBCFG_PHYIF16;
+-
+-		/* Set turnaround time */
+-		if (dwc2_is_device_mode(hsotg)) {
+-			usbcfg &= ~GUSBCFG_USBTRDTIM_MASK;
+-			if (hsotg->params.phy_utmi_width == 16)
+-				usbcfg |= 5 << GUSBCFG_USBTRDTIM_SHIFT;
+-			else
+-				usbcfg |= 9 << GUSBCFG_USBTRDTIM_SHIFT;
+-		}
+ 		break;
+ 	default:
+ 		dev_err(hsotg->dev, "FS PHY selected at HS!\n");
+@@ -1197,6 +1188,24 @@ static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
+ 	return retval;
+ }
+ 
++static void dwc2_set_turnaround_time(struct dwc2_hsotg *hsotg)
++{
++	u32 usbcfg;
++
++	if (hsotg->params.phy_type != DWC2_PHY_TYPE_PARAM_UTMI)
++		return;
++
++	usbcfg = dwc2_readl(hsotg, GUSBCFG);
++
++	usbcfg &= ~GUSBCFG_USBTRDTIM_MASK;
++	if (hsotg->params.phy_utmi_width == 16)
++		usbcfg |= 5 << GUSBCFG_USBTRDTIM_SHIFT;
++	else
++		usbcfg |= 9 << GUSBCFG_USBTRDTIM_SHIFT;
++
++	dwc2_writel(hsotg, usbcfg, GUSBCFG);
++}
++
+ int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
+ {
+ 	u32 usbcfg;
+@@ -1214,6 +1223,9 @@ int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
+ 		retval = dwc2_hs_phy_init(hsotg, select_phy);
+ 		if (retval)
+ 			return retval;
++
++		if (dwc2_is_device_mode(hsotg))
++			dwc2_set_turnaround_time(hsotg);
+ 	}
+ 
+ 	if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index 0022039bc2355..8e740c7623e40 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -1605,17 +1605,18 @@ static int dwc3_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	dwc3_check_params(dwc);
++	dwc3_debugfs_init(dwc);
+ 
+ 	ret = dwc3_core_init_mode(dwc);
+ 	if (ret)
+ 		goto err5;
+ 
+-	dwc3_debugfs_init(dwc);
+ 	pm_runtime_put(dev);
+ 
+ 	return 0;
+ 
+ err5:
++	dwc3_debugfs_exit(dwc);
+ 	dwc3_event_buffers_cleanup(dwc);
+ 
+ 	usb_phy_shutdown(dwc->usb2_phy);
+diff --git a/drivers/usb/gadget/function/f_eem.c b/drivers/usb/gadget/function/f_eem.c
+index 2cd9942707b46..5d38f29bda720 100644
+--- a/drivers/usb/gadget/function/f_eem.c
++++ b/drivers/usb/gadget/function/f_eem.c
+@@ -30,6 +30,11 @@ struct f_eem {
+ 	u8				ctrl_id;
+ };
+ 
++struct in_context {
++	struct sk_buff	*skb;
++	struct usb_ep	*ep;
++};
++
+ static inline struct f_eem *func_to_eem(struct usb_function *f)
+ {
+ 	return container_of(f, struct f_eem, port.func);
+@@ -320,9 +325,12 @@ fail:
+ 
+ static void eem_cmd_complete(struct usb_ep *ep, struct usb_request *req)
+ {
+-	struct sk_buff *skb = (struct sk_buff *)req->context;
++	struct in_context *ctx = req->context;
+ 
+-	dev_kfree_skb_any(skb);
++	dev_kfree_skb_any(ctx->skb);
++	kfree(req->buf);
++	usb_ep_free_request(ctx->ep, req);
++	kfree(ctx);
+ }
+ 
+ /*
+@@ -410,7 +418,9 @@ static int eem_unwrap(struct gether *port,
+ 		 * b15:		bmType (0 == data, 1 == command)
+ 		 */
+ 		if (header & BIT(15)) {
+-			struct usb_request	*req = cdev->req;
++			struct usb_request	*req;
++			struct in_context	*ctx;
++			struct usb_ep		*ep;
+ 			u16			bmEEMCmd;
+ 
+ 			/* EEM command packet format:
+@@ -439,11 +449,36 @@ static int eem_unwrap(struct gether *port,
+ 				skb_trim(skb2, len);
+ 				put_unaligned_le16(BIT(15) | BIT(11) | len,
+ 							skb_push(skb2, 2));
++
++				ep = port->in_ep;
++				req = usb_ep_alloc_request(ep, GFP_ATOMIC);
++				if (!req) {
++					dev_kfree_skb_any(skb2);
++					goto next;
++				}
++
++				req->buf = kmalloc(skb2->len, GFP_KERNEL);
++				if (!req->buf) {
++					usb_ep_free_request(ep, req);
++					dev_kfree_skb_any(skb2);
++					goto next;
++				}
++
++				ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
++				if (!ctx) {
++					kfree(req->buf);
++					usb_ep_free_request(ep, req);
++					dev_kfree_skb_any(skb2);
++					goto next;
++				}
++				ctx->skb = skb2;
++				ctx->ep = ep;
++
+ 				skb_copy_bits(skb2, 0, req->buf, skb2->len);
+ 				req->length = skb2->len;
+ 				req->complete = eem_cmd_complete;
+ 				req->zero = 1;
+-				req->context = skb2;
++				req->context = ctx;
+ 				if (usb_ep_queue(port->in_ep, req, GFP_ATOMIC))
+ 					DBG(cdev, "echo response queue fail\n");
+ 				break;
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index f29abc7867d59..366509e89b984 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -250,8 +250,8 @@ EXPORT_SYMBOL_GPL(ffs_lock);
+ static struct ffs_dev *_ffs_find_dev(const char *name);
+ static struct ffs_dev *_ffs_alloc_dev(void);
+ static void _ffs_free_dev(struct ffs_dev *dev);
+-static void *ffs_acquire_dev(const char *dev_name);
+-static void ffs_release_dev(struct ffs_data *ffs_data);
++static int ffs_acquire_dev(const char *dev_name, struct ffs_data *ffs_data);
++static void ffs_release_dev(struct ffs_dev *ffs_dev);
+ static int ffs_ready(struct ffs_data *ffs);
+ static void ffs_closed(struct ffs_data *ffs);
+ 
+@@ -1554,8 +1554,8 @@ unmapped_value:
+ static int ffs_fs_get_tree(struct fs_context *fc)
+ {
+ 	struct ffs_sb_fill_data *ctx = fc->fs_private;
+-	void *ffs_dev;
+ 	struct ffs_data	*ffs;
++	int ret;
+ 
+ 	ENTER();
+ 
+@@ -1574,13 +1574,12 @@ static int ffs_fs_get_tree(struct fs_context *fc)
+ 		return -ENOMEM;
+ 	}
+ 
+-	ffs_dev = ffs_acquire_dev(ffs->dev_name);
+-	if (IS_ERR(ffs_dev)) {
++	ret = ffs_acquire_dev(ffs->dev_name, ffs);
++	if (ret) {
+ 		ffs_data_put(ffs);
+-		return PTR_ERR(ffs_dev);
++		return ret;
+ 	}
+ 
+-	ffs->private_data = ffs_dev;
+ 	ctx->ffs_data = ffs;
+ 	return get_tree_nodev(fc, ffs_sb_fill);
+ }
+@@ -1591,7 +1590,6 @@ static void ffs_fs_free_fc(struct fs_context *fc)
+ 
+ 	if (ctx) {
+ 		if (ctx->ffs_data) {
+-			ffs_release_dev(ctx->ffs_data);
+ 			ffs_data_put(ctx->ffs_data);
+ 		}
+ 
+@@ -1630,10 +1628,8 @@ ffs_fs_kill_sb(struct super_block *sb)
+ 	ENTER();
+ 
+ 	kill_litter_super(sb);
+-	if (sb->s_fs_info) {
+-		ffs_release_dev(sb->s_fs_info);
++	if (sb->s_fs_info)
+ 		ffs_data_closed(sb->s_fs_info);
+-	}
+ }
+ 
+ static struct file_system_type ffs_fs_type = {
+@@ -1703,6 +1699,7 @@ static void ffs_data_put(struct ffs_data *ffs)
+ 	if (refcount_dec_and_test(&ffs->ref)) {
+ 		pr_info("%s(): freeing\n", __func__);
+ 		ffs_data_clear(ffs);
++		ffs_release_dev(ffs->private_data);
+ 		BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
+ 		       swait_active(&ffs->ep0req_completion.wait) ||
+ 		       waitqueue_active(&ffs->wait));
+@@ -3032,6 +3029,7 @@ static inline struct f_fs_opts *ffs_do_functionfs_bind(struct usb_function *f,
+ 	struct ffs_function *func = ffs_func_from_usb(f);
+ 	struct f_fs_opts *ffs_opts =
+ 		container_of(f->fi, struct f_fs_opts, func_inst);
++	struct ffs_data *ffs_data;
+ 	int ret;
+ 
+ 	ENTER();
+@@ -3046,12 +3044,13 @@ static inline struct f_fs_opts *ffs_do_functionfs_bind(struct usb_function *f,
+ 	if (!ffs_opts->no_configfs)
+ 		ffs_dev_lock();
+ 	ret = ffs_opts->dev->desc_ready ? 0 : -ENODEV;
+-	func->ffs = ffs_opts->dev->ffs_data;
++	ffs_data = ffs_opts->dev->ffs_data;
+ 	if (!ffs_opts->no_configfs)
+ 		ffs_dev_unlock();
+ 	if (ret)
+ 		return ERR_PTR(ret);
+ 
++	func->ffs = ffs_data;
+ 	func->conf = c;
+ 	func->gadget = c->cdev->gadget;
+ 
+@@ -3506,6 +3505,7 @@ static void ffs_free_inst(struct usb_function_instance *f)
+ 	struct f_fs_opts *opts;
+ 
+ 	opts = to_f_fs_opts(f);
++	ffs_release_dev(opts->dev);
+ 	ffs_dev_lock();
+ 	_ffs_free_dev(opts->dev);
+ 	ffs_dev_unlock();
+@@ -3693,47 +3693,48 @@ static void _ffs_free_dev(struct ffs_dev *dev)
+ {
+ 	list_del(&dev->entry);
+ 
+-	/* Clear the private_data pointer to stop incorrect dev access */
+-	if (dev->ffs_data)
+-		dev->ffs_data->private_data = NULL;
+-
+ 	kfree(dev);
+ 	if (list_empty(&ffs_devices))
+ 		functionfs_cleanup();
+ }
+ 
+-static void *ffs_acquire_dev(const char *dev_name)
++static int ffs_acquire_dev(const char *dev_name, struct ffs_data *ffs_data)
+ {
++	int ret = 0;
+ 	struct ffs_dev *ffs_dev;
+ 
+ 	ENTER();
+ 	ffs_dev_lock();
+ 
+ 	ffs_dev = _ffs_find_dev(dev_name);
+-	if (!ffs_dev)
+-		ffs_dev = ERR_PTR(-ENOENT);
+-	else if (ffs_dev->mounted)
+-		ffs_dev = ERR_PTR(-EBUSY);
+-	else if (ffs_dev->ffs_acquire_dev_callback &&
+-	    ffs_dev->ffs_acquire_dev_callback(ffs_dev))
+-		ffs_dev = ERR_PTR(-ENOENT);
+-	else
++	if (!ffs_dev) {
++		ret = -ENOENT;
++	} else if (ffs_dev->mounted) {
++		ret = -EBUSY;
++	} else if (ffs_dev->ffs_acquire_dev_callback &&
++		   ffs_dev->ffs_acquire_dev_callback(ffs_dev)) {
++		ret = -ENOENT;
++	} else {
+ 		ffs_dev->mounted = true;
++		ffs_dev->ffs_data = ffs_data;
++		ffs_data->private_data = ffs_dev;
++	}
+ 
+ 	ffs_dev_unlock();
+-	return ffs_dev;
++	return ret;
+ }
+ 
+-static void ffs_release_dev(struct ffs_data *ffs_data)
++static void ffs_release_dev(struct ffs_dev *ffs_dev)
+ {
+-	struct ffs_dev *ffs_dev;
+-
+ 	ENTER();
+ 	ffs_dev_lock();
+ 
+-	ffs_dev = ffs_data->private_data;
+-	if (ffs_dev) {
++	if (ffs_dev && ffs_dev->mounted) {
+ 		ffs_dev->mounted = false;
++		if (ffs_dev->ffs_data) {
++			ffs_dev->ffs_data->private_data = NULL;
++			ffs_dev->ffs_data = NULL;
++		}
+ 
+ 		if (ffs_dev->ffs_release_dev_callback)
+ 			ffs_dev->ffs_release_dev_callback(ffs_dev);
+@@ -3761,7 +3762,6 @@ static int ffs_ready(struct ffs_data *ffs)
+ 	}
+ 
+ 	ffs_obj->desc_ready = true;
+-	ffs_obj->ffs_data = ffs;
+ 
+ 	if (ffs_obj->ffs_ready_callback) {
+ 		ret = ffs_obj->ffs_ready_callback(ffs);
+@@ -3789,7 +3789,6 @@ static void ffs_closed(struct ffs_data *ffs)
+ 		goto done;
+ 
+ 	ffs_obj->desc_ready = false;
+-	ffs_obj->ffs_data = NULL;
+ 
+ 	if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags) &&
+ 	    ffs_obj->ffs_closed_callback)
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 717c122f94490..5143e63bcbca8 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -1924,6 +1924,7 @@ no_bw:
+ 	xhci->hw_ports = NULL;
+ 	xhci->rh_bw = NULL;
+ 	xhci->ext_caps = NULL;
++	xhci->port_caps = NULL;
+ 
+ 	xhci->page_size = 0;
+ 	xhci->page_shift = 0;
+diff --git a/drivers/usb/host/xhci-pci-renesas.c b/drivers/usb/host/xhci-pci-renesas.c
+index f97ac9f52bf4d..431213cdf9e0e 100644
+--- a/drivers/usb/host/xhci-pci-renesas.c
++++ b/drivers/usb/host/xhci-pci-renesas.c
+@@ -207,7 +207,8 @@ static int renesas_check_rom_state(struct pci_dev *pdev)
+ 			return 0;
+ 
+ 		case RENESAS_ROM_STATUS_NO_RESULT: /* No result yet */
+-			return 0;
++			dev_dbg(&pdev->dev, "Unknown ROM status ...\n");
++			break;
+ 
+ 		case RENESAS_ROM_STATUS_ERROR: /* Error State */
+ 		default: /* All other states are marked as "Reserved states" */
+@@ -224,13 +225,12 @@ static int renesas_fw_check_running(struct pci_dev *pdev)
+ 	u8 fw_state;
+ 	int err;
+ 
+-	/* Check if device has ROM and loaded, if so skip everything */
+-	err = renesas_check_rom(pdev);
+-	if (err) { /* we have rom */
+-		err = renesas_check_rom_state(pdev);
+-		if (!err)
+-			return err;
+-	}
++	/*
++	 * Only if device has ROM and loaded FW we can skip loading and
++	 * return success. Otherwise (even unknown state), attempt to load FW.
++	 */
++	if (renesas_check_rom(pdev) && !renesas_check_rom_state(pdev))
++		return 0;
+ 
+ 	/*
+ 	 * Test if the device is actually needing the firmware. As most
+diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c
+index a48452a6172b6..c0f432d509aab 100644
+--- a/drivers/usb/phy/phy-tegra-usb.c
++++ b/drivers/usb/phy/phy-tegra-usb.c
+@@ -58,12 +58,12 @@
+ #define   USB_WAKEUP_DEBOUNCE_COUNT(x)		(((x) & 0x7) << 16)
+ 
+ #define USB_PHY_VBUS_SENSORS			0x404
+-#define   B_SESS_VLD_WAKEUP_EN			BIT(6)
+-#define   B_VBUS_VLD_WAKEUP_EN			BIT(14)
++#define   B_SESS_VLD_WAKEUP_EN			BIT(14)
+ #define   A_SESS_VLD_WAKEUP_EN			BIT(22)
+ #define   A_VBUS_VLD_WAKEUP_EN			BIT(30)
+ 
+ #define USB_PHY_VBUS_WAKEUP_ID			0x408
++#define   VBUS_WAKEUP_STS			BIT(10)
+ #define   VBUS_WAKEUP_WAKEUP_EN			BIT(30)
+ 
+ #define USB1_LEGACY_CTRL			0x410
+@@ -544,7 +544,7 @@ static int utmi_phy_power_on(struct tegra_usb_phy *phy)
+ 
+ 		val = readl_relaxed(base + USB_PHY_VBUS_SENSORS);
+ 		val &= ~(A_VBUS_VLD_WAKEUP_EN | A_SESS_VLD_WAKEUP_EN);
+-		val &= ~(B_VBUS_VLD_WAKEUP_EN | B_SESS_VLD_WAKEUP_EN);
++		val &= ~(B_SESS_VLD_WAKEUP_EN);
+ 		writel_relaxed(val, base + USB_PHY_VBUS_SENSORS);
+ 
+ 		val = readl_relaxed(base + UTMIP_BAT_CHRG_CFG0);
+@@ -642,6 +642,15 @@ static int utmi_phy_power_off(struct tegra_usb_phy *phy)
+ 	void __iomem *base = phy->regs;
+ 	u32 val;
+ 
++	/*
++	 * Give hardware time to settle down after VBUS disconnection,
++	 * otherwise PHY will immediately wake up from suspend.
++	 */
++	if (phy->wakeup_enabled && phy->mode != USB_DR_MODE_HOST)
++		readl_relaxed_poll_timeout(base + USB_PHY_VBUS_WAKEUP_ID,
++					   val, !(val & VBUS_WAKEUP_STS),
++					   5000, 100000);
++
+ 	utmi_phy_clk_disable(phy);
+ 
+ 	/* PHY won't resume if reset is asserted */
+diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
+index 45f0bf65e9aba..ae0bfb8b9c71c 100644
+--- a/drivers/usb/typec/class.c
++++ b/drivers/usb/typec/class.c
+@@ -572,8 +572,10 @@ typec_register_altmode(struct device *parent,
+ 	int ret;
+ 
+ 	alt = kzalloc(sizeof(*alt), GFP_KERNEL);
+-	if (!alt)
++	if (!alt) {
++		altmode_id_remove(parent, id);
+ 		return ERR_PTR(-ENOMEM);
++	}
+ 
+ 	alt->adev.svid = desc->svid;
+ 	alt->adev.mode = desc->mode;
+diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
+index 027afd7dfdce2..91a507eb514f1 100644
+--- a/drivers/usb/typec/tcpm/tcpci.c
++++ b/drivers/usb/typec/tcpm/tcpci.c
+@@ -21,8 +21,12 @@
+ #define	PD_RETRY_COUNT_DEFAULT			3
+ #define	PD_RETRY_COUNT_3_0_OR_HIGHER		2
+ #define	AUTO_DISCHARGE_DEFAULT_THRESHOLD_MV	3500
+-#define	AUTO_DISCHARGE_PD_HEADROOM_MV		850
+-#define	AUTO_DISCHARGE_PPS_HEADROOM_MV		1250
++#define	VSINKPD_MIN_IR_DROP_MV			750
++#define	VSRC_NEW_MIN_PERCENT			95
++#define	VSRC_VALID_MIN_MV			500
++#define	VPPS_NEW_MIN_PERCENT			95
++#define	VPPS_VALID_MIN_MV			100
++#define	VSINKDISCONNECT_PD_MIN_PERCENT		90
+ 
+ #define tcpc_presenting_cc1_rd(reg) \
+ 	(!(TCPC_ROLE_CTRL_DRP & (reg)) && \
+@@ -328,11 +332,13 @@ static int tcpci_set_auto_vbus_discharge_threshold(struct tcpc_dev *dev, enum ty
+ 		threshold = AUTO_DISCHARGE_DEFAULT_THRESHOLD_MV;
+ 	} else if (mode == TYPEC_PWR_MODE_PD) {
+ 		if (pps_active)
+-			threshold = (95 * requested_vbus_voltage_mv / 100) -
+-				AUTO_DISCHARGE_PD_HEADROOM_MV;
++			threshold = ((VPPS_NEW_MIN_PERCENT * requested_vbus_voltage_mv / 100) -
++				     VSINKPD_MIN_IR_DROP_MV - VPPS_VALID_MIN_MV) *
++				     VSINKDISCONNECT_PD_MIN_PERCENT / 100;
+ 		else
+-			threshold = (95 * requested_vbus_voltage_mv / 100) -
+-				AUTO_DISCHARGE_PPS_HEADROOM_MV;
++			threshold = ((VSRC_NEW_MIN_PERCENT * requested_vbus_voltage_mv / 100) -
++				     VSINKPD_MIN_IR_DROP_MV - VSRC_VALID_MIN_MV) *
++				     VSINKDISCONNECT_PD_MIN_PERCENT / 100;
+ 	} else {
+ 		/* 3.5V for non-pd sink */
+ 		threshold = AUTO_DISCHARGE_DEFAULT_THRESHOLD_MV;
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
+index 6133c0679c273..07dee0118c27e 100644
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -2556,6 +2556,11 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
+ 			} else {
+ 				next_state = SNK_WAIT_CAPABILITIES;
+ 			}
++
++			/* Threshold was relaxed before sending Request. Restore it back. */
++			tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
++							       port->pps_data.active,
++							       port->supply_voltage);
+ 			tcpm_set_state(port, next_state, 0);
+ 			break;
+ 		case SNK_NEGOTIATE_PPS_CAPABILITIES:
+@@ -2569,6 +2574,11 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
+ 			    port->send_discover)
+ 				port->vdm_sm_running = true;
+ 
++			/* Threshold was relaxed before sending Request. Restore it back. */
++			tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
++							       port->pps_data.active,
++							       port->supply_voltage);
++
+ 			tcpm_set_state(port, SNK_READY, 0);
+ 			break;
+ 		case DR_SWAP_SEND:
+@@ -3288,6 +3298,12 @@ static int tcpm_pd_send_request(struct tcpm_port *port)
+ 	if (ret < 0)
+ 		return ret;
+ 
++	/*
++	 * Relax the threshold as voltage will be adjusted after Accept Message plus tSrcTransition.
++	 * It is safer to modify the threshold here.
++	 */
++	tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, 0);
++
+ 	memset(&msg, 0, sizeof(msg));
+ 	msg.header = PD_HEADER_LE(PD_DATA_REQUEST,
+ 				  port->pwr_role,
+@@ -3385,6 +3401,9 @@ static int tcpm_pd_send_pps_request(struct tcpm_port *port)
+ 	if (ret < 0)
+ 		return ret;
+ 
++	/* Relax the threshold as voltage will be adjusted right after Accept Message. */
++	tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, 0);
++
+ 	memset(&msg, 0, sizeof(msg));
+ 	msg.header = PD_HEADER_LE(PD_DATA_REQUEST,
+ 				  port->pwr_role,
+@@ -4161,6 +4180,10 @@ static void run_state_machine(struct tcpm_port *port)
+ 		port->hard_reset_count = 0;
+ 		ret = tcpm_pd_send_request(port);
+ 		if (ret < 0) {
++			/* Restore back to the original state */
++			tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
++							       port->pps_data.active,
++							       port->supply_voltage);
+ 			/* Let the Source send capabilities again. */
+ 			tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
+ 		} else {
+@@ -4171,6 +4194,10 @@ static void run_state_machine(struct tcpm_port *port)
+ 	case SNK_NEGOTIATE_PPS_CAPABILITIES:
+ 		ret = tcpm_pd_send_pps_request(port);
+ 		if (ret < 0) {
++			/* Restore back to the original state */
++			tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
++							       port->pps_data.active,
++							       port->supply_voltage);
+ 			port->pps_status = ret;
+ 			/*
+ 			 * If this was called due to updates to sink
+@@ -5160,6 +5187,9 @@ static void _tcpm_pd_vbus_vsafe0v(struct tcpm_port *port)
+ 				tcpm_set_state(port, SNK_UNATTACHED, 0);
+ 		}
+ 		break;
++	case PR_SWAP_SNK_SRC_SINK_OFF:
++		/* Do nothing, vsafe0v is expected during transition */
++		break;
+ 	default:
+ 		if (port->pwr_role == TYPEC_SINK && port->auto_vbus_discharge_enabled)
+ 			tcpm_set_state(port, SNK_UNATTACHED, 0);
+diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
+index cb7f2dc09e9d4..94b1dc07baeeb 100644
+--- a/drivers/vfio/pci/vfio_pci.c
++++ b/drivers/vfio/pci/vfio_pci.c
+@@ -1612,6 +1612,7 @@ static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
+ {
+ 	struct vm_area_struct *vma = vmf->vma;
+ 	struct vfio_pci_device *vdev = vma->vm_private_data;
++	struct vfio_pci_mmap_vma *mmap_vma;
+ 	vm_fault_t ret = VM_FAULT_NOPAGE;
+ 
+ 	mutex_lock(&vdev->vma_lock);
+@@ -1619,24 +1620,36 @@ static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
+ 
+ 	if (!__vfio_pci_memory_enabled(vdev)) {
+ 		ret = VM_FAULT_SIGBUS;
+-		mutex_unlock(&vdev->vma_lock);
+ 		goto up_out;
+ 	}
+ 
+-	if (__vfio_pci_add_vma(vdev, vma)) {
+-		ret = VM_FAULT_OOM;
+-		mutex_unlock(&vdev->vma_lock);
+-		goto up_out;
++	/*
++	 * We populate the whole vma on fault, so we need to test whether
++	 * the vma has already been mapped, such as for concurrent faults
++	 * to the same vma.  io_remap_pfn_range() will trigger a BUG_ON if
++	 * we ask it to fill the same range again.
++	 */
++	list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) {
++		if (mmap_vma->vma == vma)
++			goto up_out;
+ 	}
+ 
+-	mutex_unlock(&vdev->vma_lock);
+-
+ 	if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+-			       vma->vm_end - vma->vm_start, vma->vm_page_prot))
++			       vma->vm_end - vma->vm_start,
++			       vma->vm_page_prot)) {
+ 		ret = VM_FAULT_SIGBUS;
++		zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
++		goto up_out;
++	}
++
++	if (__vfio_pci_add_vma(vdev, vma)) {
++		ret = VM_FAULT_OOM;
++		zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
++	}
+ 
+ up_out:
+ 	up_read(&vdev->memory_lock);
++	mutex_unlock(&vdev->vma_lock);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/video/backlight/lm3630a_bl.c b/drivers/video/backlight/lm3630a_bl.c
+index e88a2b0e59046..662029d6a3dc9 100644
+--- a/drivers/video/backlight/lm3630a_bl.c
++++ b/drivers/video/backlight/lm3630a_bl.c
+@@ -482,8 +482,10 @@ static int lm3630a_parse_node(struct lm3630a_chip *pchip,
+ 
+ 	device_for_each_child_node(pchip->dev, node) {
+ 		ret = lm3630a_parse_bank(pdata, node, &seen_led_sources);
+-		if (ret)
++		if (ret) {
++			fwnode_handle_put(node);
+ 			return ret;
++		}
+ 	}
+ 
+ 	return ret;
+diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
+index 7f8debd2da065..ad598257ab386 100644
+--- a/drivers/video/fbdev/imxfb.c
++++ b/drivers/video/fbdev/imxfb.c
+@@ -992,7 +992,7 @@ static int imxfb_probe(struct platform_device *pdev)
+ 	info->screen_buffer = dma_alloc_wc(&pdev->dev, fbi->map_size,
+ 					   &fbi->map_dma, GFP_KERNEL);
+ 	if (!info->screen_buffer) {
+-		dev_err(&pdev->dev, "Failed to allocate video RAM: %d\n", ret);
++		dev_err(&pdev->dev, "Failed to allocate video RAM\n");
+ 		ret = -ENOMEM;
+ 		goto failed_map;
+ 	}
+diff --git a/drivers/visorbus/visorchipset.c b/drivers/visorbus/visorchipset.c
+index cb1eb7e05f871..5668cad86e374 100644
+--- a/drivers/visorbus/visorchipset.c
++++ b/drivers/visorbus/visorchipset.c
+@@ -1561,7 +1561,7 @@ schedule_out:
+ 
+ static int visorchipset_init(struct acpi_device *acpi_device)
+ {
+-	int err = -ENODEV;
++	int err = -ENOMEM;
+ 	struct visorchannel *controlvm_channel;
+ 
+ 	chipset_dev = kzalloc(sizeof(*chipset_dev), GFP_KERNEL);
+@@ -1584,8 +1584,10 @@ static int visorchipset_init(struct acpi_device *acpi_device)
+ 				 "controlvm",
+ 				 sizeof(struct visor_controlvm_channel),
+ 				 VISOR_CONTROLVM_CHANNEL_VERSIONID,
+-				 VISOR_CHANNEL_SIGNATURE))
++				 VISOR_CHANNEL_SIGNATURE)) {
++		err = -ENODEV;
+ 		goto error_delete_groups;
++	}
+ 	/* if booting in a crash kernel */
+ 	if (is_kdump_kernel())
+ 		INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
+diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig
+index 68b95ad82126e..520a0f6a7d9e9 100644
+--- a/fs/btrfs/Kconfig
++++ b/fs/btrfs/Kconfig
+@@ -18,6 +18,8 @@ config BTRFS_FS
+ 	select RAID6_PQ
+ 	select XOR_BLOCKS
+ 	select SRCU
++	depends on !PPC_256K_PAGES	# powerpc
++	depends on !PAGE_SIZE_256KB	# hexagon
+ 
+ 	help
+ 	  Btrfs is a general purpose copy-on-write filesystem with extents,
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index f43ce82a6aedc..8f48553d861ec 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -1498,7 +1498,6 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
+ 		       trans->transid, fs_info->generation);
+ 
+ 	if (!should_cow_block(trans, root, buf)) {
+-		trans->dirty = true;
+ 		*cow_ret = buf;
+ 		return 0;
+ 	}
+@@ -2670,10 +2669,8 @@ again:
+ 			 * then we don't want to set the path blocking,
+ 			 * so we test it here
+ 			 */
+-			if (!should_cow_block(trans, root, b)) {
+-				trans->dirty = true;
++			if (!should_cow_block(trans, root, b))
+ 				goto cow_done;
+-			}
+ 
+ 			/*
+ 			 * must have write locks on this node and the
+diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
+index c1d2b67861292..55dad12a5ce04 100644
+--- a/fs/btrfs/delayed-inode.c
++++ b/fs/btrfs/delayed-inode.c
+@@ -1025,12 +1025,10 @@ static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
+ 	nofs_flag = memalloc_nofs_save();
+ 	ret = btrfs_lookup_inode(trans, root, path, &key, mod);
+ 	memalloc_nofs_restore(nofs_flag);
+-	if (ret > 0) {
+-		btrfs_release_path(path);
+-		return -ENOENT;
+-	} else if (ret < 0) {
+-		return ret;
+-	}
++	if (ret > 0)
++		ret = -ENOENT;
++	if (ret < 0)
++		goto out;
+ 
+ 	leaf = path->nodes[0];
+ 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
+@@ -1068,6 +1066,14 @@ err_out:
+ 	btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0));
+ 	btrfs_release_delayed_inode(node);
+ 
++	/*
++	 * If we fail to update the delayed inode we need to abort the
++	 * transaction, because we could leave the inode with the improper
++	 * counts behind.
++	 */
++	if (ret && ret != -ENOENT)
++		btrfs_abort_transaction(trans, ret);
++
+ 	return ret;
+ 
+ search:
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 27c3680074814..1cde6f84f145e 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -4799,7 +4799,6 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ 		set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
+ 			 buf->start + buf->len - 1, GFP_NOFS);
+ 	}
+-	trans->dirty = true;
+ 	/* this returns a buffer locked for blocking */
+ 	return buf;
+ }
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 3bb8ce4969f31..1b22ded1a7994 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -598,7 +598,7 @@ again:
+ 	 * inode has not been flagged as nocompress.  This flag can
+ 	 * change at any time if we discover bad compression ratios.
+ 	 */
+-	if (inode_need_compress(BTRFS_I(inode), start, end)) {
++	if (nr_pages > 1 && inode_need_compress(BTRFS_I(inode), start, end)) {
+ 		WARN_ON(pages);
+ 		pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
+ 		if (!pages) {
+@@ -8386,7 +8386,19 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
+ 	 */
+ 	wait_on_page_writeback(page);
+ 
+-	if (offset) {
++	/*
++	 * For subpage case, we have call sites like
++	 * btrfs_punch_hole_lock_range() which passes range not aligned to
++	 * sectorsize.
++	 * If the range doesn't cover the full page, we don't need to and
++	 * shouldn't clear page extent mapped, as page->private can still
++	 * record subpage dirty bits for other part of the range.
++	 *
++	 * For cases that can invalidate the full even the range doesn't
++	 * cover the full page, like invalidating the last page, we're
++	 * still safe to wait for ordered extent to finish.
++	 */
++	if (!(offset == 0 && length == PAGE_SIZE)) {
+ 		btrfs_releasepage(page, GFP_NOFS);
+ 		return;
+ 	}
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
+index 8ae8f1732fd25..548ef38edaf5b 100644
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -4064,6 +4064,17 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
+ 				if (ret < 0)
+ 					goto out;
+ 			} else {
++				/*
++				 * If we previously orphanized a directory that
++				 * collided with a new reference that we already
++				 * processed, recompute the current path because
++				 * that directory may be part of the path.
++				 */
++				if (orphanized_dir) {
++					ret = refresh_ref_path(sctx, cur);
++					if (ret < 0)
++						goto out;
++				}
+ 				ret = send_unlink(sctx, cur->full_path);
+ 				if (ret < 0)
+ 					goto out;
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index f7a4ad86adee6..b3b7f3066cfac 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -273,17 +273,6 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
+ 	struct btrfs_fs_info *fs_info = trans->fs_info;
+ 
+ 	WRITE_ONCE(trans->aborted, errno);
+-	/* Nothing used. The other threads that have joined this
+-	 * transaction may be able to continue. */
+-	if (!trans->dirty && list_empty(&trans->new_bgs)) {
+-		const char *errstr;
+-
+-		errstr = btrfs_decode_error(errno);
+-		btrfs_warn(fs_info,
+-		           "%s:%d: Aborting unused transaction(%s).",
+-		           function, line, errstr);
+-		return;
+-	}
+ 	WRITE_ONCE(trans->transaction->aborted, errno);
+ 	/* Wake up anybody who may be waiting on this transaction */
+ 	wake_up(&fs_info->transaction_wait);
+diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
+index 6eb1c50fa98c1..bd95446869d01 100644
+--- a/fs/btrfs/sysfs.c
++++ b/fs/btrfs/sysfs.c
+@@ -414,7 +414,7 @@ static ssize_t btrfs_discard_bitmap_bytes_show(struct kobject *kobj,
+ {
+ 	struct btrfs_fs_info *fs_info = discard_to_fs_info(kobj);
+ 
+-	return scnprintf(buf, PAGE_SIZE, "%lld\n",
++	return scnprintf(buf, PAGE_SIZE, "%llu\n",
+ 			fs_info->discard_ctl.discard_bitmap_bytes);
+ }
+ BTRFS_ATTR(discard, discard_bitmap_bytes, btrfs_discard_bitmap_bytes_show);
+@@ -436,7 +436,7 @@ static ssize_t btrfs_discard_extent_bytes_show(struct kobject *kobj,
+ {
+ 	struct btrfs_fs_info *fs_info = discard_to_fs_info(kobj);
+ 
+-	return scnprintf(buf, PAGE_SIZE, "%lld\n",
++	return scnprintf(buf, PAGE_SIZE, "%llu\n",
+ 			fs_info->discard_ctl.discard_extent_bytes);
+ }
+ BTRFS_ATTR(discard, discard_extent_bytes, btrfs_discard_extent_bytes_show);
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index d56d3e7ca3240..81c8567dffee9 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -1393,8 +1393,10 @@ int btrfs_defrag_root(struct btrfs_root *root)
+ 
+ 	while (1) {
+ 		trans = btrfs_start_transaction(root, 0);
+-		if (IS_ERR(trans))
+-			return PTR_ERR(trans);
++		if (IS_ERR(trans)) {
++			ret = PTR_ERR(trans);
++			break;
++		}
+ 
+ 		ret = btrfs_defrag_leaves(trans, root);
+ 
+@@ -1461,7 +1463,7 @@ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
+ 	ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
+ 	if (ret) {
+ 		btrfs_abort_transaction(trans, ret);
+-		goto out;
++		return ret;
+ 	}
+ 
+ 	/*
+@@ -2054,14 +2056,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
+ 
+ 	ASSERT(refcount_read(&trans->use_count) == 1);
+ 
+-	/*
+-	 * Some places just start a transaction to commit it.  We need to make
+-	 * sure that if this commit fails that the abort code actually marks the
+-	 * transaction as failed, so set trans->dirty to make the abort code do
+-	 * the right thing.
+-	 */
+-	trans->dirty = true;
+-
+ 	/* Stop the commit early if ->aborted is set */
+ 	if (TRANS_ABORTED(cur_trans)) {
+ 		ret = cur_trans->aborted;
+diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
+index 364cfbb4c5c59..c49e2266b28ba 100644
+--- a/fs/btrfs/transaction.h
++++ b/fs/btrfs/transaction.h
+@@ -143,7 +143,6 @@ struct btrfs_trans_handle {
+ 	bool allocating_chunk;
+ 	bool can_flush_pending_bgs;
+ 	bool reloc_reserved;
+-	bool dirty;
+ 	bool in_fsync;
+ 	struct btrfs_root *root;
+ 	struct btrfs_fs_info *fs_info;
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 276b5511ff809..f4b0aecdaac72 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -6365,6 +6365,7 @@ next:
+ error:
+ 	if (wc.trans)
+ 		btrfs_end_transaction(wc.trans);
++	clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
+ 	btrfs_free_path(path);
+ 	return ret;
+ }
+diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
+index cfff29718b840..40c53c8ea1b7d 100644
+--- a/fs/btrfs/zoned.c
++++ b/fs/btrfs/zoned.c
+@@ -1140,6 +1140,10 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
+ 		}
+ 
+ 		if (zone.type == BLK_ZONE_TYPE_CONVENTIONAL) {
++			btrfs_err_in_rcu(fs_info,
++	"zoned: unexpected conventional zone %llu on device %s (devid %llu)",
++				zone.start << SECTOR_SHIFT,
++				rcu_str_deref(device->name), device->devid);
+ 			ret = -EIO;
+ 			goto out;
+ 		}
+@@ -1200,6 +1204,13 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
+ 
+ 	switch (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
+ 	case 0: /* single */
++		if (alloc_offsets[0] == WP_MISSING_DEV) {
++			btrfs_err(fs_info,
++			"zoned: cannot recover write pointer for zone %llu",
++				physical);
++			ret = -EIO;
++			goto out;
++		}
+ 		cache->alloc_offset = alloc_offsets[0];
+ 		break;
+ 	case BTRFS_BLOCK_GROUP_DUP:
+@@ -1217,6 +1228,13 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
+ 	}
+ 
+ out:
++	if (cache->alloc_offset > fs_info->zone_size) {
++		btrfs_err(fs_info,
++			"zoned: invalid write pointer %llu in block group %llu",
++			cache->alloc_offset, cache->start);
++		ret = -EIO;
++	}
++
+ 	/* An extent is allocated after the write pointer */
+ 	if (!ret && num_conventional && last_alloc > cache->alloc_offset) {
+ 		btrfs_err(fs_info,
+diff --git a/fs/cifs/cifs_swn.c b/fs/cifs/cifs_swn.c
+index d829b8bf833e3..93b47818c6c2d 100644
+--- a/fs/cifs/cifs_swn.c
++++ b/fs/cifs/cifs_swn.c
+@@ -447,15 +447,13 @@ static int cifs_swn_store_swn_addr(const struct sockaddr_storage *new,
+ 				   const struct sockaddr_storage *old,
+ 				   struct sockaddr_storage *dst)
+ {
+-	__be16 port;
++	__be16 port = cpu_to_be16(CIFS_PORT);
+ 
+ 	if (old->ss_family == AF_INET) {
+ 		struct sockaddr_in *ipv4 = (struct sockaddr_in *)old;
+ 
+ 		port = ipv4->sin_port;
+-	}
+-
+-	if (old->ss_family == AF_INET6) {
++	} else if (old->ss_family == AF_INET6) {
+ 		struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)old;
+ 
+ 		port = ipv6->sin6_port;
+@@ -465,9 +463,7 @@ static int cifs_swn_store_swn_addr(const struct sockaddr_storage *new,
+ 		struct sockaddr_in *ipv4 = (struct sockaddr_in *)new;
+ 
+ 		ipv4->sin_port = port;
+-	}
+-
+-	if (new->ss_family == AF_INET6) {
++	} else if (new->ss_family == AF_INET6) {
+ 		struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)new;
+ 
+ 		ipv6->sin6_port = port;
+diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
+index d178cf85e926d..b80b6ba232aad 100644
+--- a/fs/cifs/cifsacl.c
++++ b/fs/cifs/cifsacl.c
+@@ -1310,7 +1310,7 @@ static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
+ 		ndacl_ptr = (struct cifs_acl *)((char *)pnntsd + ndacloffset);
+ 		ndacl_ptr->revision =
+ 			dacloffset ? dacl_ptr->revision : cpu_to_le16(ACL_REVISION);
+-		ndacl_ptr->num_aces = dacl_ptr->num_aces;
++		ndacl_ptr->num_aces = dacl_ptr ? dacl_ptr->num_aces : 0;
+ 
+ 		if (uid_valid(uid)) { /* chown */
+ 			uid_t id;
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index ec824ab8c5ca3..723b97002d8dd 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -896,7 +896,7 @@ struct cifs_ses {
+ 	struct mutex session_mutex;
+ 	struct TCP_Server_Info *server;	/* pointer to server info */
+ 	int ses_count;		/* reference counter */
+-	enum statusEnum status;
++	enum statusEnum status;  /* updates protected by GlobalMid_Lock */
+ 	unsigned overrideSecFlg;  /* if non-zero override global sec flags */
+ 	char *serverOS;		/* name of operating system underlying server */
+ 	char *serverNOS;	/* name of network operating system of server */
+@@ -1783,6 +1783,7 @@ require use of the stronger protocol */
+  *	list operations on pending_mid_q and oplockQ
+  *      updates to XID counters, multiplex id  and SMB sequence numbers
+  *      list operations on global DnotifyReqList
++ *      updates to ses->status
+  *  tcp_ses_lock protects:
+  *	list operations on tcp and SMB session lists
+  *  tcon->open_file_lock protects the list of open files hanging off the tcon
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 3d62d52d730b5..09a3939f25bf6 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -1639,9 +1639,12 @@ void cifs_put_smb_ses(struct cifs_ses *ses)
+ 		spin_unlock(&cifs_tcp_ses_lock);
+ 		return;
+ 	}
++	spin_unlock(&cifs_tcp_ses_lock);
++
++	spin_lock(&GlobalMid_Lock);
+ 	if (ses->status == CifsGood)
+ 		ses->status = CifsExiting;
+-	spin_unlock(&cifs_tcp_ses_lock);
++	spin_unlock(&GlobalMid_Lock);
+ 
+ 	cifs_free_ipc(ses);
+ 
+diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
+index 098b4bc8da59a..d2d686ee10a3f 100644
+--- a/fs/cifs/dfs_cache.c
++++ b/fs/cifs/dfs_cache.c
+@@ -25,8 +25,7 @@
+ #define CACHE_HTABLE_SIZE 32
+ #define CACHE_MAX_ENTRIES 64
+ 
+-#define IS_INTERLINK_SET(v) ((v) & (DFSREF_REFERRAL_SERVER | \
+-				    DFSREF_STORAGE_SERVER))
++#define IS_DFS_INTERLINK(v) (((v) & DFSREF_REFERRAL_SERVER) && !((v) & DFSREF_STORAGE_SERVER))
+ 
+ struct cache_dfs_tgt {
+ 	char *name;
+@@ -170,7 +169,7 @@ static int dfscache_proc_show(struct seq_file *m, void *v)
+ 				   "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
+ 				   ce->path, ce->srvtype == DFS_TYPE_ROOT ? "root" : "link",
+ 				   ce->ttl, ce->etime.tv_nsec, ce->ref_flags, ce->hdr_flags,
+-				   IS_INTERLINK_SET(ce->hdr_flags) ? "yes" : "no",
++				   IS_DFS_INTERLINK(ce->hdr_flags) ? "yes" : "no",
+ 				   ce->path_consumed, cache_entry_expired(ce) ? "yes" : "no");
+ 
+ 			list_for_each_entry(t, &ce->tlist, list) {
+@@ -239,7 +238,7 @@ static inline void dump_ce(const struct cache_entry *ce)
+ 		 ce->srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ttl,
+ 		 ce->etime.tv_nsec,
+ 		 ce->hdr_flags, ce->ref_flags,
+-		 IS_INTERLINK_SET(ce->hdr_flags) ? "yes" : "no",
++		 IS_DFS_INTERLINK(ce->hdr_flags) ? "yes" : "no",
+ 		 ce->path_consumed,
+ 		 cache_entry_expired(ce) ? "yes" : "no");
+ 	dump_tgts(ce);
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index e9a530da4255c..ea5e958fd6b04 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -3561,6 +3561,119 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
+ 	return rc;
+ }
+ 
++static int smb3_simple_fallocate_write_range(unsigned int xid,
++					     struct cifs_tcon *tcon,
++					     struct cifsFileInfo *cfile,
++					     loff_t off, loff_t len,
++					     char *buf)
++{
++	struct cifs_io_parms io_parms = {0};
++	int nbytes;
++	struct kvec iov[2];
++
++	io_parms.netfid = cfile->fid.netfid;
++	io_parms.pid = current->tgid;
++	io_parms.tcon = tcon;
++	io_parms.persistent_fid = cfile->fid.persistent_fid;
++	io_parms.volatile_fid = cfile->fid.volatile_fid;
++	io_parms.offset = off;
++	io_parms.length = len;
++
++	/* iov[0] is reserved for smb header */
++	iov[1].iov_base = buf;
++	iov[1].iov_len = io_parms.length;
++	return SMB2_write(xid, &io_parms, &nbytes, iov, 1);
++}
++
++static int smb3_simple_fallocate_range(unsigned int xid,
++				       struct cifs_tcon *tcon,
++				       struct cifsFileInfo *cfile,
++				       loff_t off, loff_t len)
++{
++	struct file_allocated_range_buffer in_data, *out_data = NULL, *tmp_data;
++	u32 out_data_len;
++	char *buf = NULL;
++	loff_t l;
++	int rc;
++
++	in_data.file_offset = cpu_to_le64(off);
++	in_data.length = cpu_to_le64(len);
++	rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
++			cfile->fid.volatile_fid,
++			FSCTL_QUERY_ALLOCATED_RANGES, true,
++			(char *)&in_data, sizeof(in_data),
++			1024 * sizeof(struct file_allocated_range_buffer),
++			(char **)&out_data, &out_data_len);
++	if (rc)
++		goto out;
++	/*
++	 * It is already all allocated
++	 */
++	if (out_data_len == 0)
++		goto out;
++
++	buf = kzalloc(1024 * 1024, GFP_KERNEL);
++	if (buf == NULL) {
++		rc = -ENOMEM;
++		goto out;
++	}
++
++	tmp_data = out_data;
++	while (len) {
++		/*
++		 * The rest of the region is unmapped so write it all.
++		 */
++		if (out_data_len == 0) {
++			rc = smb3_simple_fallocate_write_range(xid, tcon,
++					       cfile, off, len, buf);
++			goto out;
++		}
++
++		if (out_data_len < sizeof(struct file_allocated_range_buffer)) {
++			rc = -EINVAL;
++			goto out;
++		}
++
++		if (off < le64_to_cpu(tmp_data->file_offset)) {
++			/*
++			 * We are at a hole. Write until the end of the region
++			 * or until the next allocated data,
++			 * whichever comes next.
++			 */
++			l = le64_to_cpu(tmp_data->file_offset) - off;
++			if (len < l)
++				l = len;
++			rc = smb3_simple_fallocate_write_range(xid, tcon,
++					       cfile, off, l, buf);
++			if (rc)
++				goto out;
++			off = off + l;
++			len = len - l;
++			if (len == 0)
++				goto out;
++		}
++		/*
++		 * We are at a section of allocated data, just skip forward
++		 * until the end of the data or the end of the region
++		 * we are supposed to fallocate, whichever comes first.
++		 */
++		l = le64_to_cpu(tmp_data->length);
++		if (len < l)
++			l = len;
++		off += l;
++		len -= l;
++
++		tmp_data = &tmp_data[1];
++		out_data_len -= sizeof(struct file_allocated_range_buffer);
++	}
++
++ out:
++	kfree(out_data);
++	kfree(buf);
++	return rc;
++}
++
++
+ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
+ 			    loff_t off, loff_t len, bool keep_size)
+ {
+@@ -3621,6 +3734,26 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
+ 	}
+ 
+ 	if ((keep_size == true) || (i_size_read(inode) >= off + len)) {
++		/*
++		 * At this point, we are trying to fallocate an internal
++		 * regions of a sparse file. Since smb2 does not have a
++		 * fallocate command we have two otions on how to emulate this.
++		 * We can either turn the entire file to become non-sparse
++		 * which we only do if the fallocate is for virtually
++		 * the whole file,  or we can overwrite the region with zeroes
++		 * using SMB2_write, which could be prohibitevly expensive
++		 * if len is large.
++		 */
++		/*
++		 * We are only trying to fallocate a small region so
++		 * just write it with zero.
++		 */
++		if (len <= 1024 * 1024) {
++			rc = smb3_simple_fallocate_range(xid, tcon, cfile,
++							 off, len);
++			goto out;
++		}
++
+ 		/*
+ 		 * Check if falloc starts within first few pages of file
+ 		 * and ends within a few pages of the end of file to
+diff --git a/fs/configfs/file.c b/fs/configfs/file.c
+index da8351d1e4552..4d0825213116a 100644
+--- a/fs/configfs/file.c
++++ b/fs/configfs/file.c
+@@ -482,13 +482,13 @@ static int configfs_release_bin_file(struct inode *inode, struct file *file)
+ 					buffer->bin_buffer_size);
+ 		}
+ 		up_read(&frag->frag_sem);
+-		/* vfree on NULL is safe */
+-		vfree(buffer->bin_buffer);
+-		buffer->bin_buffer = NULL;
+-		buffer->bin_buffer_size = 0;
+-		buffer->needs_read_fill = 1;
+ 	}
+ 
++	vfree(buffer->bin_buffer);
++	buffer->bin_buffer = NULL;
++	buffer->bin_buffer_size = 0;
++	buffer->needs_read_fill = 1;
++
+ 	configfs_release(inode, file);
+ 	return 0;
+ }
+diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
+index 6ca7d16593ff6..d00455440d087 100644
+--- a/fs/crypto/fname.c
++++ b/fs/crypto/fname.c
+@@ -344,13 +344,9 @@ int fscrypt_fname_disk_to_usr(const struct inode *inode,
+ 		     offsetof(struct fscrypt_nokey_name, sha256));
+ 	BUILD_BUG_ON(BASE64_CHARS(FSCRYPT_NOKEY_NAME_MAX) > NAME_MAX);
+ 
+-	if (hash) {
+-		nokey_name.dirhash[0] = hash;
+-		nokey_name.dirhash[1] = minor_hash;
+-	} else {
+-		nokey_name.dirhash[0] = 0;
+-		nokey_name.dirhash[1] = 0;
+-	}
++	nokey_name.dirhash[0] = hash;
++	nokey_name.dirhash[1] = minor_hash;
++
+ 	if (iname->len <= sizeof(nokey_name.bytes)) {
+ 		memcpy(nokey_name.bytes, iname->name, iname->len);
+ 		size = offsetof(struct fscrypt_nokey_name, bytes[iname->len]);
+diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c
+index 261293fb70974..bca9c6658a7c5 100644
+--- a/fs/crypto/keysetup.c
++++ b/fs/crypto/keysetup.c
+@@ -210,15 +210,40 @@ out_unlock:
+ 	return err;
+ }
+ 
++/*
++ * Derive a SipHash key from the given fscrypt master key and the given
++ * application-specific information string.
++ *
++ * Note that the KDF produces a byte array, but the SipHash APIs expect the key
++ * as a pair of 64-bit words.  Therefore, on big endian CPUs we have to do an
++ * endianness swap in order to get the same results as on little endian CPUs.
++ */
++static int fscrypt_derive_siphash_key(const struct fscrypt_master_key *mk,
++				      u8 context, const u8 *info,
++				      unsigned int infolen, siphash_key_t *key)
++{
++	int err;
++
++	err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf, context, info, infolen,
++				  (u8 *)key, sizeof(*key));
++	if (err)
++		return err;
++
++	BUILD_BUG_ON(sizeof(*key) != 16);
++	BUILD_BUG_ON(ARRAY_SIZE(key->key) != 2);
++	le64_to_cpus(&key->key[0]);
++	le64_to_cpus(&key->key[1]);
++	return 0;
++}
++
+ int fscrypt_derive_dirhash_key(struct fscrypt_info *ci,
+ 			       const struct fscrypt_master_key *mk)
+ {
+ 	int err;
+ 
+-	err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf, HKDF_CONTEXT_DIRHASH_KEY,
+-				  ci->ci_nonce, FSCRYPT_FILE_NONCE_SIZE,
+-				  (u8 *)&ci->ci_dirhash_key,
+-				  sizeof(ci->ci_dirhash_key));
++	err = fscrypt_derive_siphash_key(mk, HKDF_CONTEXT_DIRHASH_KEY,
++					 ci->ci_nonce, FSCRYPT_FILE_NONCE_SIZE,
++					 &ci->ci_dirhash_key);
+ 	if (err)
+ 		return err;
+ 	ci->ci_dirhash_key_initialized = true;
+@@ -253,10 +278,9 @@ static int fscrypt_setup_iv_ino_lblk_32_key(struct fscrypt_info *ci,
+ 		if (mk->mk_ino_hash_key_initialized)
+ 			goto unlock;
+ 
+-		err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf,
+-					  HKDF_CONTEXT_INODE_HASH_KEY, NULL, 0,
+-					  (u8 *)&mk->mk_ino_hash_key,
+-					  sizeof(mk->mk_ino_hash_key));
++		err = fscrypt_derive_siphash_key(mk,
++						 HKDF_CONTEXT_INODE_HASH_KEY,
++						 NULL, 0, &mk->mk_ino_hash_key);
+ 		if (err)
+ 			goto unlock;
+ 		/* pairs with smp_load_acquire() above */
+diff --git a/fs/dax.c b/fs/dax.c
+index df5485b4bddf1..d5d7b9393bcaa 100644
+--- a/fs/dax.c
++++ b/fs/dax.c
+@@ -488,10 +488,11 @@ static void *grab_mapping_entry(struct xa_state *xas,
+ 		struct address_space *mapping, unsigned int order)
+ {
+ 	unsigned long index = xas->xa_index;
+-	bool pmd_downgrade = false; /* splitting PMD entry into PTE entries? */
++	bool pmd_downgrade;	/* splitting PMD entry into PTE entries? */
+ 	void *entry;
+ 
+ retry:
++	pmd_downgrade = false;
+ 	xas_lock_irq(xas);
+ 	entry = get_unlocked_entry(xas, order);
+ 
+diff --git a/fs/dlm/config.c b/fs/dlm/config.c
+index 88d95d96e36c5..52bcda64172aa 100644
+--- a/fs/dlm/config.c
++++ b/fs/dlm/config.c
+@@ -79,6 +79,9 @@ struct dlm_cluster {
+ 	unsigned int cl_new_rsb_count;
+ 	unsigned int cl_recover_callbacks;
+ 	char cl_cluster_name[DLM_LOCKSPACE_LEN];
++
++	struct dlm_spaces *sps;
++	struct dlm_comms *cms;
+ };
+ 
+ static struct dlm_cluster *config_item_to_cluster(struct config_item *i)
+@@ -409,6 +412,9 @@ static struct config_group *make_cluster(struct config_group *g,
+ 	if (!cl || !sps || !cms)
+ 		goto fail;
+ 
++	cl->sps = sps;
++	cl->cms = cms;
++
+ 	config_group_init_type_name(&cl->group, name, &cluster_type);
+ 	config_group_init_type_name(&sps->ss_group, "spaces", &spaces_type);
+ 	config_group_init_type_name(&cms->cs_group, "comms", &comms_type);
+@@ -458,6 +464,9 @@ static void drop_cluster(struct config_group *g, struct config_item *i)
+ static void release_cluster(struct config_item *i)
+ {
+ 	struct dlm_cluster *cl = config_item_to_cluster(i);
++
++	kfree(cl->sps);
++	kfree(cl->cms);
+ 	kfree(cl);
+ }
+ 
+diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
+index 45c2fdaf34c4d..d2a0ea0acca34 100644
+--- a/fs/dlm/lowcomms.c
++++ b/fs/dlm/lowcomms.c
+@@ -79,6 +79,8 @@ struct connection {
+ #define CF_CLOSING 8
+ #define CF_SHUTDOWN 9
+ #define CF_CONNECTED 10
++#define CF_RECONNECT 11
++#define CF_DELAY_CONNECT 12
+ 	struct list_head writequeue;  /* List of outgoing writequeue_entries */
+ 	spinlock_t writequeue_lock;
+ 	void (*connect_action) (struct connection *);	/* What to do to connect */
+@@ -87,6 +89,7 @@ struct connection {
+ #define MAX_CONNECT_RETRIES 3
+ 	struct hlist_node list;
+ 	struct connection *othercon;
++	struct connection *sendcon;
+ 	struct work_struct rwork; /* Receive workqueue */
+ 	struct work_struct swork; /* Send workqueue */
+ 	wait_queue_head_t shutdown_wait; /* wait for graceful shutdown */
+@@ -584,6 +587,22 @@ static void lowcomms_error_report(struct sock *sk)
+ 				   dlm_config.ci_tcp_port, sk->sk_err,
+ 				   sk->sk_err_soft);
+ 	}
++
++	/* below sendcon only handling */
++	if (test_bit(CF_IS_OTHERCON, &con->flags))
++		con = con->sendcon;
++
++	switch (sk->sk_err) {
++	case ECONNREFUSED:
++		set_bit(CF_DELAY_CONNECT, &con->flags);
++		break;
++	default:
++		break;
++	}
++
++	if (!test_and_set_bit(CF_RECONNECT, &con->flags))
++		queue_work(send_workqueue, &con->swork);
++
+ out:
+ 	read_unlock_bh(&sk->sk_callback_lock);
+ 	if (orig_report)
+@@ -695,12 +714,14 @@ static void close_connection(struct connection *con, bool and_other,
+ 
+ 	if (con->othercon && and_other) {
+ 		/* Will only re-enter once. */
+-		close_connection(con->othercon, false, true, true);
++		close_connection(con->othercon, false, tx, rx);
+ 	}
+ 
+ 	con->rx_leftover = 0;
+ 	con->retries = 0;
+ 	clear_bit(CF_CONNECTED, &con->flags);
++	clear_bit(CF_DELAY_CONNECT, &con->flags);
++	clear_bit(CF_RECONNECT, &con->flags);
+ 	mutex_unlock(&con->sock_mutex);
+ 	clear_bit(CF_CLOSING, &con->flags);
+ }
+@@ -839,18 +860,15 @@ out_resched:
+ 
+ out_close:
+ 	mutex_unlock(&con->sock_mutex);
+-	if (ret != -EAGAIN) {
+-		/* Reconnect when there is something to send */
++	if (ret == 0) {
+ 		close_connection(con, false, true, false);
+-		if (ret == 0) {
+-			log_print("connection %p got EOF from %d",
+-				  con, con->nodeid);
+-			/* handling for tcp shutdown */
+-			clear_bit(CF_SHUTDOWN, &con->flags);
+-			wake_up(&con->shutdown_wait);
+-			/* signal to breaking receive worker */
+-			ret = -1;
+-		}
++		log_print("connection %p got EOF from %d",
++			  con, con->nodeid);
++		/* handling for tcp shutdown */
++		clear_bit(CF_SHUTDOWN, &con->flags);
++		wake_up(&con->shutdown_wait);
++		/* signal to breaking receive worker */
++		ret = -1;
+ 	}
+ 	return ret;
+ }
+@@ -933,6 +951,7 @@ static int accept_from_sock(struct listen_connection *con)
+ 			}
+ 
+ 			newcon->othercon = othercon;
++			othercon->sendcon = newcon;
+ 		} else {
+ 			/* close other sock con if we have something new */
+ 			close_connection(othercon, false, true, false);
+@@ -1478,7 +1497,7 @@ static void send_to_sock(struct connection *con)
+ 				cond_resched();
+ 				goto out;
+ 			} else if (ret < 0)
+-				goto send_error;
++				goto out;
+ 		}
+ 
+ 		/* Don't starve people filling buffers */
+@@ -1495,14 +1514,6 @@ out:
+ 	mutex_unlock(&con->sock_mutex);
+ 	return;
+ 
+-send_error:
+-	mutex_unlock(&con->sock_mutex);
+-	close_connection(con, false, false, true);
+-	/* Requeue the send work. When the work daemon runs again, it will try
+-	   a new connection, then call this function again. */
+-	queue_work(send_workqueue, &con->swork);
+-	return;
+-
+ out_connect:
+ 	mutex_unlock(&con->sock_mutex);
+ 	queue_work(send_workqueue, &con->swork);
+@@ -1574,18 +1585,30 @@ static void process_send_sockets(struct work_struct *work)
+ 	struct connection *con = container_of(work, struct connection, swork);
+ 
+ 	clear_bit(CF_WRITE_PENDING, &con->flags);
+-	if (con->sock == NULL) /* not mutex protected so check it inside too */
++
++	if (test_and_clear_bit(CF_RECONNECT, &con->flags))
++		close_connection(con, false, false, true);
++
++	if (con->sock == NULL) { /* not mutex protected so check it inside too */
++		if (test_and_clear_bit(CF_DELAY_CONNECT, &con->flags))
++			msleep(1000);
+ 		con->connect_action(con);
++	}
+ 	if (!list_empty(&con->writequeue))
+ 		send_to_sock(con);
+ }
+ 
+ static void work_stop(void)
+ {
+-	if (recv_workqueue)
++	if (recv_workqueue) {
+ 		destroy_workqueue(recv_workqueue);
+-	if (send_workqueue)
++		recv_workqueue = NULL;
++	}
++
++	if (send_workqueue) {
+ 		destroy_workqueue(send_workqueue);
++		send_workqueue = NULL;
++	}
+ }
+ 
+ static int work_start(void)
+@@ -1602,6 +1625,7 @@ static int work_start(void)
+ 	if (!send_workqueue) {
+ 		log_print("can't start dlm_send");
+ 		destroy_workqueue(recv_workqueue);
++		recv_workqueue = NULL;
+ 		return -ENOMEM;
+ 	}
+ 
+@@ -1733,7 +1757,7 @@ int dlm_lowcomms_start(void)
+ 
+ 	error = work_start();
+ 	if (error)
+-		goto fail;
++		goto fail_local;
+ 
+ 	dlm_allow_conn = 1;
+ 
+@@ -1750,6 +1774,9 @@ int dlm_lowcomms_start(void)
+ fail_unlisten:
+ 	dlm_allow_conn = 0;
+ 	dlm_close_sock(&listen_con.sock);
++	work_stop();
++fail_local:
++	deinit_local();
+ fail:
+ 	return error;
+ }
+diff --git a/fs/erofs/super.c b/fs/erofs/super.c
+index d5a6b9b888a56..f31a08d86be89 100644
+--- a/fs/erofs/super.c
++++ b/fs/erofs/super.c
+@@ -155,6 +155,7 @@ static int erofs_read_superblock(struct super_block *sb)
+ 			goto out;
+ 	}
+ 
++	ret = -EINVAL;
+ 	blkszbits = dsb->blkszbits;
+ 	/* 9(512 bytes) + LOG_SECTORS_PER_BLOCK == LOG_BLOCK_SIZE */
+ 	if (blkszbits != LOG_BLOCK_SIZE) {
+diff --git a/fs/exec.c b/fs/exec.c
+index 18594f11c31fe..d7c4187ca023e 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -1360,6 +1360,10 @@ int begin_new_exec(struct linux_binprm * bprm)
+ 	WRITE_ONCE(me->self_exec_id, me->self_exec_id + 1);
+ 	flush_signal_handlers(me, 0);
+ 
++	retval = set_cred_ucounts(bprm->cred);
++	if (retval < 0)
++		goto out_unlock;
++
+ 	/*
+ 	 * install the new credentials for this executable
+ 	 */
+diff --git a/fs/exfat/dir.c b/fs/exfat/dir.c
+index 916797077aad4..dedbc55cd48f5 100644
+--- a/fs/exfat/dir.c
++++ b/fs/exfat/dir.c
+@@ -62,7 +62,7 @@ static void exfat_get_uniname_from_ext_entry(struct super_block *sb,
+ static int exfat_readdir(struct inode *inode, loff_t *cpos, struct exfat_dir_entry *dir_entry)
+ {
+ 	int i, dentries_per_clu, dentries_per_clu_bits = 0, num_ext;
+-	unsigned int type, clu_offset;
++	unsigned int type, clu_offset, max_dentries;
+ 	sector_t sector;
+ 	struct exfat_chain dir, clu;
+ 	struct exfat_uni_name uni_name;
+@@ -85,6 +85,8 @@ static int exfat_readdir(struct inode *inode, loff_t *cpos, struct exfat_dir_ent
+ 
+ 	dentries_per_clu = sbi->dentries_per_clu;
+ 	dentries_per_clu_bits = ilog2(dentries_per_clu);
++	max_dentries = (unsigned int)min_t(u64, MAX_EXFAT_DENTRIES,
++					   (u64)sbi->num_clusters << dentries_per_clu_bits);
+ 
+ 	clu_offset = dentry >> dentries_per_clu_bits;
+ 	exfat_chain_dup(&clu, &dir);
+@@ -108,7 +110,7 @@ static int exfat_readdir(struct inode *inode, loff_t *cpos, struct exfat_dir_ent
+ 		}
+ 	}
+ 
+-	while (clu.dir != EXFAT_EOF_CLUSTER) {
++	while (clu.dir != EXFAT_EOF_CLUSTER && dentry < max_dentries) {
+ 		i = dentry & (dentries_per_clu - 1);
+ 
+ 		for ( ; i < dentries_per_clu; i++, dentry++) {
+@@ -244,7 +246,7 @@ static int exfat_iterate(struct file *filp, struct dir_context *ctx)
+ 	if (err)
+ 		goto unlock;
+ get_new:
+-	if (cpos >= i_size_read(inode))
++	if (ei->flags == ALLOC_NO_FAT_CHAIN && cpos >= i_size_read(inode))
+ 		goto end_of_dir;
+ 
+ 	err = exfat_readdir(inode, &cpos, &de);
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index cbf37b2cf871e..1293de50c8d48 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -825,6 +825,7 @@ void ext4_ext_tree_init(handle_t *handle, struct inode *inode)
+ 	eh->eh_entries = 0;
+ 	eh->eh_magic = EXT4_EXT_MAGIC;
+ 	eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
++	eh->eh_generation = 0;
+ 	ext4_mark_inode_dirty(handle, inode);
+ }
+ 
+@@ -1090,6 +1091,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
+ 	neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
+ 	neh->eh_magic = EXT4_EXT_MAGIC;
+ 	neh->eh_depth = 0;
++	neh->eh_generation = 0;
+ 
+ 	/* move remainder of path[depth] to the new leaf */
+ 	if (unlikely(path[depth].p_hdr->eh_entries !=
+@@ -1167,6 +1169,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
+ 		neh->eh_magic = EXT4_EXT_MAGIC;
+ 		neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
+ 		neh->eh_depth = cpu_to_le16(depth - i);
++		neh->eh_generation = 0;
+ 		fidx = EXT_FIRST_INDEX(neh);
+ 		fidx->ei_block = border;
+ 		ext4_idx_store_pblock(fidx, oldblock);
+diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
+index 0a729027322dd..9a3a8996aacf7 100644
+--- a/fs/ext4/extents_status.c
++++ b/fs/ext4/extents_status.c
+@@ -1574,11 +1574,9 @@ static unsigned long ext4_es_scan(struct shrinker *shrink,
+ 	ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt);
+ 	trace_ext4_es_shrink_scan_enter(sbi->s_sb, nr_to_scan, ret);
+ 
+-	if (!nr_to_scan)
+-		return ret;
+-
+ 	nr_shrunk = __es_shrink(sbi, nr_to_scan, NULL);
+ 
++	ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt);
+ 	trace_ext4_es_shrink_scan_exit(sbi->s_sb, nr_shrunk, ret);
+ 	return nr_shrunk;
+ }
+diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
+index edbaed073ac5c..1b6bfb3f303c3 100644
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -402,7 +402,7 @@ static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
+  *
+  * We always try to spread first-level directories.
+  *
+- * If there are blockgroups with both free inodes and free blocks counts
++ * If there are blockgroups with both free inodes and free clusters counts
+  * not worse than average we return one with smallest directory count.
+  * Otherwise we simply return a random group.
+  *
+@@ -411,7 +411,7 @@ static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
+  * It's OK to put directory into a group unless
+  * it has too many directories already (max_dirs) or
+  * it has too few free inodes left (min_inodes) or
+- * it has too few free blocks left (min_blocks) or
++ * it has too few free clusters left (min_clusters) or
+  * Parent's group is preferred, if it doesn't satisfy these
+  * conditions we search cyclically through the rest. If none
+  * of the groups look good we just look for a group with more
+@@ -427,7 +427,7 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent,
+ 	ext4_group_t real_ngroups = ext4_get_groups_count(sb);
+ 	int inodes_per_group = EXT4_INODES_PER_GROUP(sb);
+ 	unsigned int freei, avefreei, grp_free;
+-	ext4_fsblk_t freeb, avefreec;
++	ext4_fsblk_t freec, avefreec;
+ 	unsigned int ndirs;
+ 	int max_dirs, min_inodes;
+ 	ext4_grpblk_t min_clusters;
+@@ -446,9 +446,8 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent,
+ 
+ 	freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter);
+ 	avefreei = freei / ngroups;
+-	freeb = EXT4_C2B(sbi,
+-		percpu_counter_read_positive(&sbi->s_freeclusters_counter));
+-	avefreec = freeb;
++	freec = percpu_counter_read_positive(&sbi->s_freeclusters_counter);
++	avefreec = freec;
+ 	do_div(avefreec, ngroups);
+ 	ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter);
+ 
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 0948a43f1b3df..7cebbb2d2e34d 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -3420,7 +3420,7 @@ retry:
+ 	 * i_disksize out to i_size. This could be beyond where direct I/O is
+ 	 * happening and thus expose allocated blocks to direct I/O reads.
+ 	 */
+-	else if ((map->m_lblk * (1 << blkbits)) >= i_size_read(inode))
++	else if (((loff_t)map->m_lblk << blkbits) >= i_size_read(inode))
+ 		m_flags = EXT4_GET_BLOCKS_CREATE;
+ 	else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+ 		m_flags = EXT4_GET_BLOCKS_IO_CREATE_EXT;
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index d24cb3dc79fff..c51fa945424e1 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -1574,10 +1574,11 @@ static int mb_find_extent(struct ext4_buddy *e4b, int block,
+ 	if (ex->fe_start + ex->fe_len > EXT4_CLUSTERS_PER_GROUP(e4b->bd_sb)) {
+ 		/* Should never happen! (but apparently sometimes does?!?) */
+ 		WARN_ON(1);
+-		ext4_error(e4b->bd_sb, "corruption or bug in mb_find_extent "
+-			   "block=%d, order=%d needed=%d ex=%u/%d/%d@%u",
+-			   block, order, needed, ex->fe_group, ex->fe_start,
+-			   ex->fe_len, ex->fe_logical);
++		ext4_grp_locked_error(e4b->bd_sb, e4b->bd_group, 0, 0,
++			"corruption or bug in mb_find_extent "
++			"block=%d, order=%d needed=%d ex=%u/%d/%d@%u",
++			block, order, needed, ex->fe_group, ex->fe_start,
++			ex->fe_len, ex->fe_logical);
+ 		ex->fe_len = 0;
+ 		ex->fe_start = 0;
+ 		ex->fe_group = 0;
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 0e3a847b5d279..4a869bc5271b9 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -3084,8 +3084,15 @@ static void ext4_orphan_cleanup(struct super_block *sb,
+ 			inode_lock(inode);
+ 			truncate_inode_pages(inode->i_mapping, inode->i_size);
+ 			ret = ext4_truncate(inode);
+-			if (ret)
++			if (ret) {
++				/*
++				 * We need to clean up the in-core orphan list
++				 * manually if ext4_truncate() failed to get a
++				 * transaction handle.
++				 */
++				ext4_orphan_del(NULL, inode);
+ 				ext4_std_error(inode->i_sb, ret);
++			}
+ 			inode_unlock(inode);
+ 			nr_truncates++;
+ 		} else {
+@@ -5032,6 +5039,7 @@ no_journal:
+ 			ext4_msg(sb, KERN_ERR,
+ 			       "unable to initialize "
+ 			       "flex_bg meta info!");
++			ret = -ENOMEM;
+ 			goto failed_mount6;
+ 		}
+ 
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index 8804a5d513801..3c8a003ee6cf7 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -3971,6 +3971,12 @@ static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
+ 	if (f2fs_readonly(F2FS_I_SB(inode)->sb))
+ 		return -EROFS;
+ 
++	if (f2fs_lfs_mode(F2FS_I_SB(inode))) {
++		f2fs_err(F2FS_I_SB(inode),
++			"Swapfile not supported in LFS mode");
++		return -EINVAL;
++	}
++
+ 	ret = f2fs_convert_inline_inode(inode);
+ 	if (ret)
+ 		return ret;
+diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
+index e38a7f6921dd6..31eaac5b70c95 100644
+--- a/fs/f2fs/sysfs.c
++++ b/fs/f2fs/sysfs.c
+@@ -525,6 +525,7 @@ enum feat_id {
+ 	FEAT_CASEFOLD,
+ 	FEAT_COMPRESSION,
+ 	FEAT_TEST_DUMMY_ENCRYPTION_V2,
++	FEAT_ENCRYPTED_CASEFOLD,
+ };
+ 
+ static ssize_t f2fs_feature_show(struct f2fs_attr *a,
+@@ -546,6 +547,7 @@ static ssize_t f2fs_feature_show(struct f2fs_attr *a,
+ 	case FEAT_CASEFOLD:
+ 	case FEAT_COMPRESSION:
+ 	case FEAT_TEST_DUMMY_ENCRYPTION_V2:
++	case FEAT_ENCRYPTED_CASEFOLD:
+ 		return sprintf(buf, "supported\n");
+ 	}
+ 	return 0;
+@@ -649,7 +651,10 @@ F2FS_GENERAL_RO_ATTR(avg_vblocks);
+ #ifdef CONFIG_FS_ENCRYPTION
+ F2FS_FEATURE_RO_ATTR(encryption, FEAT_CRYPTO);
+ F2FS_FEATURE_RO_ATTR(test_dummy_encryption_v2, FEAT_TEST_DUMMY_ENCRYPTION_V2);
++#ifdef CONFIG_UNICODE
++F2FS_FEATURE_RO_ATTR(encrypted_casefold, FEAT_ENCRYPTED_CASEFOLD);
+ #endif
++#endif /* CONFIG_FS_ENCRYPTION */
+ #ifdef CONFIG_BLK_DEV_ZONED
+ F2FS_FEATURE_RO_ATTR(block_zoned, FEAT_BLKZONED);
+ #endif
+@@ -739,7 +744,10 @@ static struct attribute *f2fs_feat_attrs[] = {
+ #ifdef CONFIG_FS_ENCRYPTION
+ 	ATTR_LIST(encryption),
+ 	ATTR_LIST(test_dummy_encryption_v2),
++#ifdef CONFIG_UNICODE
++	ATTR_LIST(encrypted_casefold),
+ #endif
++#endif /* CONFIG_FS_ENCRYPTION */
+ #ifdef CONFIG_BLK_DEV_ZONED
+ 	ATTR_LIST(block_zoned),
+ #endif
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index e91980f493884..8d4130b01423b 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -505,12 +505,19 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
+ 	if (!isw)
+ 		return;
+ 
++	atomic_inc(&isw_nr_in_flight);
++
+ 	/* find and pin the new wb */
+ 	rcu_read_lock();
+ 	memcg_css = css_from_id(new_wb_id, &memory_cgrp_subsys);
+-	if (memcg_css)
+-		isw->new_wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
++	if (memcg_css && !css_tryget(memcg_css))
++		memcg_css = NULL;
+ 	rcu_read_unlock();
++	if (!memcg_css)
++		goto out_free;
++
++	isw->new_wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
++	css_put(memcg_css);
+ 	if (!isw->new_wb)
+ 		goto out_free;
+ 
+@@ -535,11 +542,10 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
+ 	 * Let's continue after I_WB_SWITCH is guaranteed to be visible.
+ 	 */
+ 	call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
+-
+-	atomic_inc(&isw_nr_in_flight);
+ 	return;
+ 
+ out_free:
++	atomic_dec(&isw_nr_in_flight);
+ 	if (isw->new_wb)
+ 		wb_put(isw->new_wb);
+ 	kfree(isw);
+@@ -2205,28 +2211,6 @@ int dirtytime_interval_handler(struct ctl_table *table, int write,
+ 	return ret;
+ }
+ 
+-static noinline void block_dump___mark_inode_dirty(struct inode *inode)
+-{
+-	if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
+-		struct dentry *dentry;
+-		const char *name = "?";
+-
+-		dentry = d_find_alias(inode);
+-		if (dentry) {
+-			spin_lock(&dentry->d_lock);
+-			name = (const char *) dentry->d_name.name;
+-		}
+-		printk(KERN_DEBUG
+-		       "%s(%d): dirtied inode %lu (%s) on %s\n",
+-		       current->comm, task_pid_nr(current), inode->i_ino,
+-		       name, inode->i_sb->s_id);
+-		if (dentry) {
+-			spin_unlock(&dentry->d_lock);
+-			dput(dentry);
+-		}
+-	}
+-}
+-
+ /**
+  * __mark_inode_dirty -	internal function to mark an inode dirty
+  *
+@@ -2296,9 +2280,6 @@ void __mark_inode_dirty(struct inode *inode, int flags)
+ 	    (dirtytime && (inode->i_state & I_DIRTY_INODE)))
+ 		return;
+ 
+-	if (unlikely(block_dump))
+-		block_dump___mark_inode_dirty(inode);
+-
+ 	spin_lock(&inode->i_lock);
+ 	if (dirtytime && (inode->i_state & I_DIRTY_INODE))
+ 		goto out_unlock_inode;
+diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
+index a5ceccc5ef00f..b8d58aa082062 100644
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -783,6 +783,7 @@ static int fuse_check_page(struct page *page)
+ 	       1 << PG_uptodate |
+ 	       1 << PG_lru |
+ 	       1 << PG_active |
++	       1 << PG_workingset |
+ 	       1 << PG_reclaim |
+ 	       1 << PG_waiters))) {
+ 		dump_page(page, "fuse: trying to steal weird page");
+@@ -1271,6 +1272,15 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
+ 		goto restart;
+ 	}
+ 	spin_lock(&fpq->lock);
++	/*
++	 *  Must not put request on fpq->io queue after having been shut down by
++	 *  fuse_abort_conn()
++	 */
++	if (!fpq->connected) {
++		req->out.h.error = err = -ECONNABORTED;
++		goto out_end;
++
++	}
+ 	list_add(&req->list, &fpq->io);
+ 	spin_unlock(&fpq->lock);
+ 	cs->req = req;
+@@ -1857,7 +1867,7 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
+ 	}
+ 
+ 	err = -EINVAL;
+-	if (oh.error <= -1000 || oh.error > 0)
++	if (oh.error <= -512 || oh.error > 0)
+ 		goto copy_finish;
+ 
+ 	spin_lock(&fpq->lock);
+diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
+index 06a18700a8455..5e09741282bfd 100644
+--- a/fs/fuse/dir.c
++++ b/fs/fuse/dir.c
+@@ -339,18 +339,33 @@ static struct vfsmount *fuse_dentry_automount(struct path *path)
+ 
+ 	/* Initialize superblock, making @mp_fi its root */
+ 	err = fuse_fill_super_submount(sb, mp_fi);
+-	if (err)
++	if (err) {
++		fuse_conn_put(fc);
++		kfree(fm);
++		sb->s_fs_info = NULL;
+ 		goto out_put_sb;
++	}
++
++	down_write(&fc->killsb);
++	list_add_tail(&fm->fc_entry, &fc->mounts);
++	up_write(&fc->killsb);
+ 
+ 	sb->s_flags |= SB_ACTIVE;
+ 	fsc->root = dget(sb->s_root);
++
++	/*
++	 * FIXME: setting SB_BORN requires a write barrier for
++	 *        super_cache_count(). We should actually come
++	 *        up with a proper ->get_tree() implementation
++	 *        for submounts and call vfs_get_tree() to take
++	 *        care of the write barrier.
++	 */
++	smp_wmb();
++	sb->s_flags |= SB_BORN;
++
+ 	/* We are done configuring the superblock, so unlock it */
+ 	up_write(&sb->s_umount);
+ 
+-	down_write(&fc->killsb);
+-	list_add_tail(&fm->fc_entry, &fc->mounts);
+-	up_write(&fc->killsb);
+-
+ 	/* Create the submount */
+ 	mnt = vfs_create_mount(fsc);
+ 	if (IS_ERR(mnt)) {
+diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
+index a86e6810237a1..d7e477ecb973d 100644
+--- a/fs/gfs2/file.c
++++ b/fs/gfs2/file.c
+@@ -474,8 +474,8 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
+ 	file_update_time(vmf->vma->vm_file);
+ 
+ 	/* page is wholly or partially inside EOF */
+-	if (offset > size - PAGE_SIZE)
+-		length = offset_in_page(size);
++	if (size - offset < PAGE_SIZE)
++		length = size - offset;
+ 	else
+ 		length = PAGE_SIZE;
+ 
+diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
+index aa4136055a83c..e3bd47454e49b 100644
+--- a/fs/gfs2/ops_fstype.c
++++ b/fs/gfs2/ops_fstype.c
+@@ -689,6 +689,7 @@ static int init_statfs(struct gfs2_sbd *sdp)
+ 	}
+ 
+ 	iput(pn);
++	pn = NULL;
+ 	ip = GFS2_I(sdp->sd_sc_inode);
+ 	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0,
+ 				   &sdp->sd_sc_gh);
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 359d1abb089c4..58ac04cca587f 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -2683,7 +2683,7 @@ static bool io_file_supports_async(struct file *file, int rw)
+ 			return true;
+ 		return false;
+ 	}
+-	if (S_ISCHR(mode) || S_ISSOCK(mode))
++	if (S_ISSOCK(mode))
+ 		return true;
+ 	if (S_ISREG(mode)) {
+ 		if (IS_ENABLED(CONFIG_BLOCK) &&
+@@ -3497,6 +3497,10 @@ static int io_renameat_prep(struct io_kiocb *req,
+ 	struct io_rename *ren = &req->rename;
+ 	const char __user *oldf, *newf;
+ 
++	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
++		return -EINVAL;
++	if (sqe->ioprio || sqe->buf_index)
++		return -EINVAL;
+ 	if (unlikely(req->flags & REQ_F_FIXED_FILE))
+ 		return -EBADF;
+ 
+@@ -3544,6 +3548,10 @@ static int io_unlinkat_prep(struct io_kiocb *req,
+ 	struct io_unlink *un = &req->unlink;
+ 	const char __user *fname;
+ 
++	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
++		return -EINVAL;
++	if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
++		return -EINVAL;
+ 	if (unlikely(req->flags & REQ_F_FIXED_FILE))
+ 		return -EBADF;
+ 
+diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
+index f5c058b3192ce..4474adb393ca8 100644
+--- a/fs/ntfs/inode.c
++++ b/fs/ntfs/inode.c
+@@ -477,7 +477,7 @@ err_corrupt_attr:
+ 		}
+ 		file_name_attr = (FILE_NAME_ATTR*)((u8*)attr +
+ 				le16_to_cpu(attr->data.resident.value_offset));
+-		p2 = (u8*)attr + le32_to_cpu(attr->data.resident.value_length);
++		p2 = (u8 *)file_name_attr + le32_to_cpu(attr->data.resident.value_length);
+ 		if (p2 < (u8*)attr || p2 > p)
+ 			goto err_corrupt_attr;
+ 		/* This attribute is ok, but is it in the $Extend directory? */
+diff --git a/fs/ocfs2/filecheck.c b/fs/ocfs2/filecheck.c
+index 50f11bfdc8c2d..82a3edc4aea4b 100644
+--- a/fs/ocfs2/filecheck.c
++++ b/fs/ocfs2/filecheck.c
+@@ -328,11 +328,7 @@ static ssize_t ocfs2_filecheck_attr_show(struct kobject *kobj,
+ 		ret = snprintf(buf + total, remain, "%lu\t\t%u\t%s\n",
+ 			       p->fe_ino, p->fe_done,
+ 			       ocfs2_filecheck_error(p->fe_status));
+-		if (ret < 0) {
+-			total = ret;
+-			break;
+-		}
+-		if (ret == remain) {
++		if (ret >= remain) {
+ 			/* snprintf() didn't fit */
+ 			total = -E2BIG;
+ 			break;
+diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c
+index a191094694c61..03eacb249f379 100644
+--- a/fs/ocfs2/stackglue.c
++++ b/fs/ocfs2/stackglue.c
+@@ -502,11 +502,7 @@ static ssize_t ocfs2_loaded_cluster_plugins_show(struct kobject *kobj,
+ 	list_for_each_entry(p, &ocfs2_stack_list, sp_list) {
+ 		ret = snprintf(buf, remain, "%s\n",
+ 			       p->sp_name);
+-		if (ret < 0) {
+-			total = ret;
+-			break;
+-		}
+-		if (ret == remain) {
++		if (ret >= remain) {
+ 			/* snprintf() didn't fit */
+ 			total = -E2BIG;
+ 			break;
+@@ -533,7 +529,7 @@ static ssize_t ocfs2_active_cluster_plugin_show(struct kobject *kobj,
+ 	if (active_stack) {
+ 		ret = snprintf(buf, PAGE_SIZE, "%s\n",
+ 			       active_stack->sp_name);
+-		if (ret == PAGE_SIZE)
++		if (ret >= PAGE_SIZE)
+ 			ret = -E2BIG;
+ 	}
+ 	spin_unlock(&ocfs2_stack_lock);
+diff --git a/fs/open.c b/fs/open.c
+index e53af13b5835f..53bc0573c0eca 100644
+--- a/fs/open.c
++++ b/fs/open.c
+@@ -1002,12 +1002,20 @@ inline struct open_how build_open_how(int flags, umode_t mode)
+ 
+ inline int build_open_flags(const struct open_how *how, struct open_flags *op)
+ {
+-	int flags = how->flags;
++	u64 flags = how->flags;
++	u64 strip = FMODE_NONOTIFY | O_CLOEXEC;
+ 	int lookup_flags = 0;
+ 	int acc_mode = ACC_MODE(flags);
+ 
+-	/* Must never be set by userspace */
+-	flags &= ~(FMODE_NONOTIFY | O_CLOEXEC);
++	BUILD_BUG_ON_MSG(upper_32_bits(VALID_OPEN_FLAGS),
++			 "struct open_flags doesn't yet handle flags > 32 bits");
++
++	/*
++	 * Strip flags that either shouldn't be set by userspace like
++	 * FMODE_NONOTIFY or that aren't relevant in determining struct
++	 * open_flags like O_CLOEXEC.
++	 */
++	flags &= ~strip;
+ 
+ 	/*
+ 	 * Older syscalls implicitly clear all of the invalid flags or argument
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index e862cab695838..a3f27ccec742b 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -829,7 +829,7 @@ static int show_smap(struct seq_file *m, void *v)
+ 	__show_smap(m, &mss, false);
+ 
+ 	seq_printf(m, "THPeligible:    %d\n",
+-		   transparent_hugepage_enabled(vma));
++		   transparent_hugepage_active(vma));
+ 
+ 	if (arch_pkeys_enabled())
+ 		seq_printf(m, "ProtectionKey:  %8u\n", vma_pkey(vma));
+diff --git a/fs/pstore/Kconfig b/fs/pstore/Kconfig
+index 8adabde685f13..328da35da3908 100644
+--- a/fs/pstore/Kconfig
++++ b/fs/pstore/Kconfig
+@@ -173,6 +173,7 @@ config PSTORE_BLK
+ 	tristate "Log panic/oops to a block device"
+ 	depends on PSTORE
+ 	depends on BLOCK
++	depends on BROKEN
+ 	select PSTORE_ZONE
+ 	default n
+ 	help
+diff --git a/include/asm-generic/pgtable-nop4d.h b/include/asm-generic/pgtable-nop4d.h
+index ce2cbb3c380ff..2f6b1befb1292 100644
+--- a/include/asm-generic/pgtable-nop4d.h
++++ b/include/asm-generic/pgtable-nop4d.h
+@@ -9,7 +9,6 @@
+ typedef struct { pgd_t pgd; } p4d_t;
+ 
+ #define P4D_SHIFT		PGDIR_SHIFT
+-#define MAX_PTRS_PER_P4D	1
+ #define PTRS_PER_P4D		1
+ #define P4D_SIZE		(1UL << P4D_SHIFT)
+ #define P4D_MASK		(~(P4D_SIZE-1))
+diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
+index d683f5e6d7913..b4d43a4af5f79 100644
+--- a/include/asm-generic/preempt.h
++++ b/include/asm-generic/preempt.h
+@@ -29,7 +29,7 @@ static __always_inline void preempt_count_set(int pc)
+ } while (0)
+ 
+ #define init_idle_preempt_count(p, cpu) do { \
+-	task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
++	task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
+ } while (0)
+ 
+ static __always_inline void set_preempt_need_resched(void)
+diff --git a/include/clocksource/timer-ti-dm.h b/include/clocksource/timer-ti-dm.h
+index 4c61dade8835f..f6da8a1326398 100644
+--- a/include/clocksource/timer-ti-dm.h
++++ b/include/clocksource/timer-ti-dm.h
+@@ -74,6 +74,7 @@
+ #define OMAP_TIMER_ERRATA_I103_I767			0x80000000
+ 
+ struct timer_regs {
++	u32 ocp_cfg;
+ 	u32 tidr;
+ 	u32 tier;
+ 	u32 twer;
+diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
+index 0a288dddcf5be..25806141db591 100644
+--- a/include/crypto/internal/hash.h
++++ b/include/crypto/internal/hash.h
+@@ -75,13 +75,7 @@ void crypto_unregister_ahashes(struct ahash_alg *algs, int count);
+ int ahash_register_instance(struct crypto_template *tmpl,
+ 			    struct ahash_instance *inst);
+ 
+-int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
+-		    unsigned int keylen);
+-
+-static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg)
+-{
+-	return alg->setkey != shash_no_setkey;
+-}
++bool crypto_shash_alg_has_setkey(struct shash_alg *alg);
+ 
+ static inline bool crypto_shash_alg_needs_key(struct shash_alg *alg)
+ {
+diff --git a/include/dt-bindings/clock/imx8mq-clock.h b/include/dt-bindings/clock/imx8mq-clock.h
+index 82e907ce7bdd3..afa74d7ba1009 100644
+--- a/include/dt-bindings/clock/imx8mq-clock.h
++++ b/include/dt-bindings/clock/imx8mq-clock.h
+@@ -405,25 +405,6 @@
+ 
+ #define IMX8MQ_VIDEO2_PLL1_REF_SEL		266
+ 
+-#define IMX8MQ_SYS1_PLL_40M_CG			267
+-#define IMX8MQ_SYS1_PLL_80M_CG			268
+-#define IMX8MQ_SYS1_PLL_100M_CG			269
+-#define IMX8MQ_SYS1_PLL_133M_CG			270
+-#define IMX8MQ_SYS1_PLL_160M_CG			271
+-#define IMX8MQ_SYS1_PLL_200M_CG			272
+-#define IMX8MQ_SYS1_PLL_266M_CG			273
+-#define IMX8MQ_SYS1_PLL_400M_CG			274
+-#define IMX8MQ_SYS1_PLL_800M_CG			275
+-#define IMX8MQ_SYS2_PLL_50M_CG			276
+-#define IMX8MQ_SYS2_PLL_100M_CG			277
+-#define IMX8MQ_SYS2_PLL_125M_CG			278
+-#define IMX8MQ_SYS2_PLL_166M_CG			279
+-#define IMX8MQ_SYS2_PLL_200M_CG			280
+-#define IMX8MQ_SYS2_PLL_250M_CG			281
+-#define IMX8MQ_SYS2_PLL_333M_CG			282
+-#define IMX8MQ_SYS2_PLL_500M_CG			283
+-#define IMX8MQ_SYS2_PLL_1000M_CG		284
+-
+ #define IMX8MQ_CLK_GPU_CORE			285
+ #define IMX8MQ_CLK_GPU_SHADER			286
+ #define IMX8MQ_CLK_M4_CORE			287
+diff --git a/include/linux/bio.h b/include/linux/bio.h
+index d0246c92a6e86..460c96da27ccf 100644
+--- a/include/linux/bio.h
++++ b/include/linux/bio.h
+@@ -44,9 +44,6 @@ static inline unsigned int bio_max_segs(unsigned int nr_segs)
+ #define bio_offset(bio)		bio_iter_offset((bio), (bio)->bi_iter)
+ #define bio_iovec(bio)		bio_iter_iovec((bio), (bio)->bi_iter)
+ 
+-#define bio_multiple_segments(bio)				\
+-	((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
+-
+ #define bvec_iter_sectors(iter)	((iter).bi_size >> 9)
+ #define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter)))
+ 
+@@ -271,7 +268,7 @@ static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
+ 
+ static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
+ {
+-	*bv = bio_iovec(bio);
++	*bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
+ }
+ 
+ static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
+@@ -279,10 +276,9 @@ static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
+ 	struct bvec_iter iter = bio->bi_iter;
+ 	int idx;
+ 
+-	if (unlikely(!bio_multiple_segments(bio))) {
+-		*bv = bio_iovec(bio);
+-		return;
+-	}
++	bio_get_first_bvec(bio, bv);
++	if (bv->bv_len == bio->bi_iter.bi_size)
++		return;		/* this bio only has a single bvec */
+ 
+ 	bio_advance_iter(bio, &iter, iter.bi_size);
+ 
+diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
+index 86d143db65231..83a3ebff74560 100644
+--- a/include/linux/clocksource.h
++++ b/include/linux/clocksource.h
+@@ -131,7 +131,7 @@ struct clocksource {
+ #define CLOCK_SOURCE_UNSTABLE			0x40
+ #define CLOCK_SOURCE_SUSPEND_NONSTOP		0x80
+ #define CLOCK_SOURCE_RESELECT			0x100
+-
++#define CLOCK_SOURCE_VERIFY_PERCPU		0x200
+ /* simplify initialization of mask field */
+ #define CLOCKSOURCE_MASK(bits) GENMASK_ULL((bits) - 1, 0)
+ 
+diff --git a/include/linux/cred.h b/include/linux/cred.h
+index 4c63505036977..66436e6550328 100644
+--- a/include/linux/cred.h
++++ b/include/linux/cred.h
+@@ -144,6 +144,7 @@ struct cred {
+ #endif
+ 	struct user_struct *user;	/* real user ID subscription */
+ 	struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
++	struct ucounts *ucounts;
+ 	struct group_info *group_info;	/* supplementary groups for euid/fsgid */
+ 	/* RCU deletion */
+ 	union {
+@@ -170,6 +171,7 @@ extern int set_security_override_from_ctx(struct cred *, const char *);
+ extern int set_create_files_as(struct cred *, struct inode *);
+ extern int cred_fscmp(const struct cred *, const struct cred *);
+ extern void __init cred_init(void);
++extern int set_cred_ucounts(struct cred *);
+ 
+ /*
+  * check for validity of credentials
+diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
+index 6686a0baa91d3..e72787731a5b2 100644
+--- a/include/linux/huge_mm.h
++++ b/include/linux/huge_mm.h
+@@ -118,9 +118,34 @@ extern struct kobj_attribute shmem_enabled_attr;
+ 
+ extern unsigned long transparent_hugepage_flags;
+ 
++static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
++		unsigned long haddr)
++{
++	/* Don't have to check pgoff for anonymous vma */
++	if (!vma_is_anonymous(vma)) {
++		if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
++				HPAGE_PMD_NR))
++			return false;
++	}
++
++	if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
++		return false;
++	return true;
++}
++
++static inline bool transhuge_vma_enabled(struct vm_area_struct *vma,
++					  unsigned long vm_flags)
++{
++	/* Explicitly disabled through madvise. */
++	if ((vm_flags & VM_NOHUGEPAGE) ||
++	    test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
++		return false;
++	return true;
++}
++
+ /*
+  * to be used on vmas which are known to support THP.
+- * Use transparent_hugepage_enabled otherwise
++ * Use transparent_hugepage_active otherwise
+  */
+ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
+ {
+@@ -131,15 +156,12 @@ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
+ 	if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_NEVER_DAX))
+ 		return false;
+ 
+-	if (vma->vm_flags & VM_NOHUGEPAGE)
++	if (!transhuge_vma_enabled(vma, vma->vm_flags))
+ 		return false;
+ 
+ 	if (vma_is_temporary_stack(vma))
+ 		return false;
+ 
+-	if (test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
+-		return false;
+-
+ 	if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG))
+ 		return true;
+ 
+@@ -153,24 +175,7 @@ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
+ 	return false;
+ }
+ 
+-bool transparent_hugepage_enabled(struct vm_area_struct *vma);
+-
+-#define HPAGE_CACHE_INDEX_MASK (HPAGE_PMD_NR - 1)
+-
+-static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
+-		unsigned long haddr)
+-{
+-	/* Don't have to check pgoff for anonymous vma */
+-	if (!vma_is_anonymous(vma)) {
+-		if (((vma->vm_start >> PAGE_SHIFT) & HPAGE_CACHE_INDEX_MASK) !=
+-			(vma->vm_pgoff & HPAGE_CACHE_INDEX_MASK))
+-			return false;
+-	}
+-
+-	if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
+-		return false;
+-	return true;
+-}
++bool transparent_hugepage_active(struct vm_area_struct *vma);
+ 
+ #define transparent_hugepage_use_zero_page()				\
+ 	(transparent_hugepage_flags &					\
+@@ -357,7 +362,7 @@ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
+ 	return false;
+ }
+ 
+-static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma)
++static inline bool transparent_hugepage_active(struct vm_area_struct *vma)
+ {
+ 	return false;
+ }
+@@ -368,6 +373,12 @@ static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
+ 	return false;
+ }
+ 
++static inline bool transhuge_vma_enabled(struct vm_area_struct *vma,
++					  unsigned long vm_flags)
++{
++	return false;
++}
++
+ static inline void prep_transhuge_page(struct page *page) {}
+ 
+ static inline bool is_transparent_hugepage(struct page *page)
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index 28fa3f9bbbfdd..7bbef3f195ae7 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -862,6 +862,11 @@ static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
+ #else	/* CONFIG_HUGETLB_PAGE */
+ struct hstate {};
+ 
++static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
++{
++	return NULL;
++}
++
+ static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
+ 					   unsigned long addr,
+ 					   int avoid_reserve)
+diff --git a/include/linux/iio/common/cros_ec_sensors_core.h b/include/linux/iio/common/cros_ec_sensors_core.h
+index c9b80be82440f..f82857bd693fd 100644
+--- a/include/linux/iio/common/cros_ec_sensors_core.h
++++ b/include/linux/iio/common/cros_ec_sensors_core.h
+@@ -77,7 +77,7 @@ struct cros_ec_sensors_core_state {
+ 		u16 scale;
+ 	} calib[CROS_EC_SENSOR_MAX_AXIS];
+ 	s8 sign[CROS_EC_SENSOR_MAX_AXIS];
+-	u8 samples[CROS_EC_SAMPLE_SIZE];
++	u8 samples[CROS_EC_SAMPLE_SIZE] __aligned(8);
+ 
+ 	int (*read_ec_sensors_data)(struct iio_dev *indio_dev,
+ 				    unsigned long scan_mask, s16 *data);
+diff --git a/include/linux/kthread.h b/include/linux/kthread.h
+index 2484ed97e72f5..d9133d6db3084 100644
+--- a/include/linux/kthread.h
++++ b/include/linux/kthread.h
+@@ -33,6 +33,8 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
+ 					  unsigned int cpu,
+ 					  const char *namefmt);
+ 
++void set_kthread_struct(struct task_struct *p);
++
+ void kthread_set_per_cpu(struct task_struct *k, int cpu);
+ bool kthread_is_per_cpu(struct task_struct *k);
+ 
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index cfb0842a7fb96..18b8373b1474a 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -2435,7 +2435,6 @@ extern void set_dma_reserve(unsigned long new_dma_reserve);
+ extern void memmap_init_range(unsigned long, int, unsigned long,
+ 		unsigned long, unsigned long, enum meminit_context,
+ 		struct vmem_altmap *, int migratetype);
+-extern void memmap_init_zone(struct zone *zone);
+ extern void setup_per_zone_wmarks(void);
+ extern int __meminit init_per_zone_wmark_min(void);
+ extern void mem_init(void);
+diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
+index 136b1d996075c..6fbbd620f6db3 100644
+--- a/include/linux/pgtable.h
++++ b/include/linux/pgtable.h
+@@ -1580,4 +1580,26 @@ typedef unsigned int pgtbl_mod_mask;
+ #define pte_leaf_size(x) PAGE_SIZE
+ #endif
+ 
++/*
++ * Some architectures have MMUs that are configurable or selectable at boot
++ * time. These lead to variable PTRS_PER_x. For statically allocated arrays it
++ * helps to have a static maximum value.
++ */
++
++#ifndef MAX_PTRS_PER_PTE
++#define MAX_PTRS_PER_PTE PTRS_PER_PTE
++#endif
++
++#ifndef MAX_PTRS_PER_PMD
++#define MAX_PTRS_PER_PMD PTRS_PER_PMD
++#endif
++
++#ifndef MAX_PTRS_PER_PUD
++#define MAX_PTRS_PER_PUD PTRS_PER_PUD
++#endif
++
++#ifndef MAX_PTRS_PER_P4D
++#define MAX_PTRS_PER_P4D PTRS_PER_P4D
++#endif
++
+ #endif /* _LINUX_PGTABLE_H */
+diff --git a/include/linux/prandom.h b/include/linux/prandom.h
+index bbf4b4ad61dfd..056d31317e499 100644
+--- a/include/linux/prandom.h
++++ b/include/linux/prandom.h
+@@ -111,7 +111,7 @@ static inline u32 __seed(u32 x, u32 m)
+  */
+ static inline void prandom_seed_state(struct rnd_state *state, u64 seed)
+ {
+-	u32 i = (seed >> 32) ^ (seed << 10) ^ seed;
++	u32 i = ((seed >> 32) ^ (seed << 10) ^ seed) & 0xffffffffUL;
+ 
+ 	state->s1 = __seed(i,   2U);
+ 	state->s2 = __seed(i,   8U);
+diff --git a/include/linux/swap.h b/include/linux/swap.h
+index 4cc6ec3bf0abb..7482f8b968ea9 100644
+--- a/include/linux/swap.h
++++ b/include/linux/swap.h
+@@ -504,6 +504,15 @@ static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry)
+ 	return NULL;
+ }
+ 
++static inline struct swap_info_struct *get_swap_device(swp_entry_t entry)
++{
++	return NULL;
++}
++
++static inline void put_swap_device(struct swap_info_struct *si)
++{
++}
++
+ #define swap_address_space(entry)		(NULL)
+ #define get_nr_swap_pages()			0L
+ #define total_swap_pages			0L
+diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
+index 9cfb099da58f6..752840d45e24d 100644
+--- a/include/linux/tracepoint.h
++++ b/include/linux/tracepoint.h
+@@ -41,7 +41,17 @@ extern int
+ tracepoint_probe_register_prio(struct tracepoint *tp, void *probe, void *data,
+ 			       int prio);
+ extern int
++tracepoint_probe_register_prio_may_exist(struct tracepoint *tp, void *probe, void *data,
++					 int prio);
++extern int
+ tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data);
++static inline int
++tracepoint_probe_register_may_exist(struct tracepoint *tp, void *probe,
++				    void *data)
++{
++	return tracepoint_probe_register_prio_may_exist(tp, probe, data,
++							TRACEPOINT_DEFAULT_PRIO);
++}
+ extern void
+ for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv),
+ 		void *priv);
+diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
+index f6c5f784be5ab..604cf6a5dc2d0 100644
+--- a/include/linux/user_namespace.h
++++ b/include/linux/user_namespace.h
+@@ -100,11 +100,15 @@ struct ucounts {
+ };
+ 
+ extern struct user_namespace init_user_ns;
++extern struct ucounts init_ucounts;
+ 
+ bool setup_userns_sysctls(struct user_namespace *ns);
+ void retire_userns_sysctls(struct user_namespace *ns);
+ struct ucounts *inc_ucount(struct user_namespace *ns, kuid_t uid, enum ucount_type type);
+ void dec_ucount(struct ucounts *ucounts, enum ucount_type type);
++struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid);
++struct ucounts *get_ucounts(struct ucounts *ucounts);
++void put_ucounts(struct ucounts *ucounts);
+ 
+ #ifdef CONFIG_USER_NS
+ 
+diff --git a/include/media/hevc-ctrls.h b/include/media/hevc-ctrls.h
+index b4cb2ef02f171..226fcfa0e0261 100644
+--- a/include/media/hevc-ctrls.h
++++ b/include/media/hevc-ctrls.h
+@@ -81,7 +81,7 @@ struct v4l2_ctrl_hevc_sps {
+ 	__u64	flags;
+ };
+ 
+-#define V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT		(1ULL << 0)
++#define V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT_ENABLED	(1ULL << 0)
+ #define V4L2_HEVC_PPS_FLAG_OUTPUT_FLAG_PRESENT			(1ULL << 1)
+ #define V4L2_HEVC_PPS_FLAG_SIGN_DATA_HIDING_ENABLED		(1ULL << 2)
+ #define V4L2_HEVC_PPS_FLAG_CABAC_INIT_PRESENT			(1ULL << 3)
+@@ -160,6 +160,7 @@ struct v4l2_hevc_pred_weight_table {
+ #define V4L2_HEVC_SLICE_PARAMS_FLAG_USE_INTEGER_MV		(1ULL << 6)
+ #define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_DEBLOCKING_FILTER_DISABLED (1ULL << 7)
+ #define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED (1ULL << 8)
++#define V4L2_HEVC_SLICE_PARAMS_FLAG_DEPENDENT_SLICE_SEGMENT	(1ULL << 9)
+ 
+ struct v4l2_ctrl_hevc_slice_params {
+ 	__u32	bit_size;
+diff --git a/include/media/media-dev-allocator.h b/include/media/media-dev-allocator.h
+index b35ea6062596b..2ab54d426c644 100644
+--- a/include/media/media-dev-allocator.h
++++ b/include/media/media-dev-allocator.h
+@@ -19,7 +19,7 @@
+ 
+ struct usb_device;
+ 
+-#if defined(CONFIG_MEDIA_CONTROLLER) && defined(CONFIG_USB)
++#if defined(CONFIG_MEDIA_CONTROLLER) && IS_ENABLED(CONFIG_USB)
+ /**
+  * media_device_usb_allocate() - Allocate and return struct &media device
+  *
+diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
+index ba2f439bc04d3..46d99c2778c3e 100644
+--- a/include/net/bluetooth/hci.h
++++ b/include/net/bluetooth/hci.h
+@@ -1773,13 +1773,15 @@ struct hci_cp_ext_adv_set {
+ 	__u8  max_events;
+ } __packed;
+ 
++#define HCI_MAX_EXT_AD_LENGTH	251
++
+ #define HCI_OP_LE_SET_EXT_ADV_DATA		0x2037
+ struct hci_cp_le_set_ext_adv_data {
+ 	__u8  handle;
+ 	__u8  operation;
+ 	__u8  frag_pref;
+ 	__u8  length;
+-	__u8  data[HCI_MAX_AD_LENGTH];
++	__u8  data[];
+ } __packed;
+ 
+ #define HCI_OP_LE_SET_EXT_SCAN_RSP_DATA		0x2038
+@@ -1788,7 +1790,7 @@ struct hci_cp_le_set_ext_scan_rsp_data {
+ 	__u8  operation;
+ 	__u8  frag_pref;
+ 	__u8  length;
+-	__u8  data[HCI_MAX_AD_LENGTH];
++	__u8  data[];
+ } __packed;
+ 
+ #define LE_SET_ADV_DATA_OP_COMPLETE	0x03
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index ca4ac6603b9a0..8674141337b73 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -228,9 +228,9 @@ struct adv_info {
+ 	__u16	remaining_time;
+ 	__u16	duration;
+ 	__u16	adv_data_len;
+-	__u8	adv_data[HCI_MAX_AD_LENGTH];
++	__u8	adv_data[HCI_MAX_EXT_AD_LENGTH];
+ 	__u16	scan_rsp_len;
+-	__u8	scan_rsp_data[HCI_MAX_AD_LENGTH];
++	__u8	scan_rsp_data[HCI_MAX_EXT_AD_LENGTH];
+ 	__s8	tx_power;
+ 	__u32   min_interval;
+ 	__u32   max_interval;
+@@ -550,9 +550,9 @@ struct hci_dev {
+ 	DECLARE_BITMAP(dev_flags, __HCI_NUM_FLAGS);
+ 
+ 	__s8			adv_tx_power;
+-	__u8			adv_data[HCI_MAX_AD_LENGTH];
++	__u8			adv_data[HCI_MAX_EXT_AD_LENGTH];
+ 	__u8			adv_data_len;
+-	__u8			scan_rsp_data[HCI_MAX_AD_LENGTH];
++	__u8			scan_rsp_data[HCI_MAX_EXT_AD_LENGTH];
+ 	__u8			scan_rsp_data_len;
+ 
+ 	struct list_head	adv_instances;
+diff --git a/include/net/ip.h b/include/net/ip.h
+index e20874059f826..d9683bef86840 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -31,6 +31,7 @@
+ #include <net/flow.h>
+ #include <net/flow_dissector.h>
+ #include <net/netns/hash.h>
++#include <net/lwtunnel.h>
+ 
+ #define IPV4_MAX_PMTU		65535U		/* RFC 2675, Section 5.1 */
+ #define IPV4_MIN_MTU		68			/* RFC 791 */
+@@ -445,22 +446,25 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
+ 
+ 	/* 'forwarding = true' case should always honour route mtu */
+ 	mtu = dst_metric_raw(dst, RTAX_MTU);
+-	if (mtu)
+-		return mtu;
++	if (!mtu)
++		mtu = min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU);
+ 
+-	return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU);
++	return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
+ }
+ 
+ static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
+ 					  const struct sk_buff *skb)
+ {
++	unsigned int mtu;
++
+ 	if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) {
+ 		bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
+ 
+ 		return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
+ 	}
+ 
+-	return min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
++	mtu = min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
++	return mtu - lwtunnel_headroom(skb_dst(skb)->lwtstate, mtu);
+ }
+ 
+ struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
+diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
+index f51a118bfce8b..f14149df5a654 100644
+--- a/include/net/ip6_route.h
++++ b/include/net/ip6_route.h
+@@ -265,11 +265,18 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
+ 
+ static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
+ {
++	int mtu;
++
+ 	struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
+ 				inet6_sk(skb->sk) : NULL;
+ 
+-	return (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) ?
+-	       skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
++	if (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) {
++		mtu = READ_ONCE(skb_dst(skb)->dev->mtu);
++		mtu -= lwtunnel_headroom(skb_dst(skb)->lwtstate, mtu);
++	} else
++		mtu = dst_mtu(skb_dst(skb));
++
++	return mtu;
+ }
+ 
+ static inline bool ip6_sk_accept_pmtu(const struct sock *sk)
+@@ -317,7 +324,7 @@ static inline unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
+ 	if (dst_metric_locked(dst, RTAX_MTU)) {
+ 		mtu = dst_metric_raw(dst, RTAX_MTU);
+ 		if (mtu)
+-			return mtu;
++			goto out;
+ 	}
+ 
+ 	mtu = IPV6_MIN_MTU;
+@@ -327,7 +334,8 @@ static inline unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
+ 		mtu = idev->cnf.mtu6;
+ 	rcu_read_unlock();
+ 
+-	return mtu;
++out:
++	return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
+ }
+ 
+ u32 ip6_mtu_from_fib6(const struct fib6_result *res,
+diff --git a/include/net/macsec.h b/include/net/macsec.h
+index 52874cdfe2260..d6fa6b97f6efa 100644
+--- a/include/net/macsec.h
++++ b/include/net/macsec.h
+@@ -241,7 +241,7 @@ struct macsec_context {
+ 	struct macsec_rx_sc *rx_sc;
+ 	struct {
+ 		unsigned char assoc_num;
+-		u8 key[MACSEC_KEYID_LEN];
++		u8 key[MACSEC_MAX_KEY_LEN];
+ 		union {
+ 			struct macsec_rx_sa *rx_sa;
+ 			struct macsec_tx_sa *tx_sa;
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index 2c4f3527cc098..b070e99c412d1 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -163,6 +163,12 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
+ 		if (spin_trylock(&qdisc->seqlock))
+ 			goto nolock_empty;
+ 
++		/* Paired with smp_mb__after_atomic() to make sure
++		 * STATE_MISSED checking is synchronized with clearing
++		 * in pfifo_fast_dequeue().
++		 */
++		smp_mb__before_atomic();
++
+ 		/* If the MISSED flag is set, it means other thread has
+ 		 * set the MISSED flag before second spin_trylock(), so
+ 		 * we can return false here to avoid multi cpus doing
+@@ -180,6 +186,12 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
+ 		 */
+ 		set_bit(__QDISC_STATE_MISSED, &qdisc->state);
+ 
++		/* spin_trylock() only has load-acquire semantic, so use
++		 * smp_mb__after_atomic() to ensure STATE_MISSED is set
++		 * before doing the second spin_trylock().
++		 */
++		smp_mb__after_atomic();
++
+ 		/* Retry again in case other CPU may not see the new flag
+ 		 * after it releases the lock at the end of qdisc_run_end().
+ 		 */
+diff --git a/include/net/tc_act/tc_vlan.h b/include/net/tc_act/tc_vlan.h
+index f051046ba0344..f94b8bc26f9ec 100644
+--- a/include/net/tc_act/tc_vlan.h
++++ b/include/net/tc_act/tc_vlan.h
+@@ -16,6 +16,7 @@ struct tcf_vlan_params {
+ 	u16               tcfv_push_vid;
+ 	__be16            tcfv_push_proto;
+ 	u8                tcfv_push_prio;
++	bool              tcfv_push_prio_exists;
+ 	struct rcu_head   rcu;
+ };
+ 
+diff --git a/include/net/xfrm.h b/include/net/xfrm.h
+index c58a6d4eb6103..6232a5f048bde 100644
+--- a/include/net/xfrm.h
++++ b/include/net/xfrm.h
+@@ -1546,6 +1546,7 @@ void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
+ void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
+ u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
+ int xfrm_init_replay(struct xfrm_state *x);
++u32 __xfrm_state_mtu(struct xfrm_state *x, int mtu);
+ u32 xfrm_state_mtu(struct xfrm_state *x, int mtu);
+ int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload);
+ int xfrm_init_state(struct xfrm_state *x);
+diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
+index eaa8386dbc630..7a9a23e7a604a 100644
+--- a/include/net/xsk_buff_pool.h
++++ b/include/net/xsk_buff_pool.h
+@@ -147,11 +147,16 @@ static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool,
+ {
+ 	bool cross_pg = (addr & (PAGE_SIZE - 1)) + len > PAGE_SIZE;
+ 
+-	if (pool->dma_pages_cnt && cross_pg) {
++	if (likely(!cross_pg))
++		return false;
++
++	if (pool->dma_pages_cnt) {
+ 		return !(pool->dma_pages[addr >> PAGE_SHIFT] &
+ 			 XSK_NEXT_PG_CONTIG_MASK);
+ 	}
+-	return false;
++
++	/* skb path */
++	return addr + len > pool->addrs_cnt;
+ }
+ 
+ static inline u64 xp_aligned_extract_addr(struct xsk_buff_pool *pool, u64 addr)
+diff --git a/include/scsi/fc/fc_ms.h b/include/scsi/fc/fc_ms.h
+index 9e273fed0a85f..800d53dc94705 100644
+--- a/include/scsi/fc/fc_ms.h
++++ b/include/scsi/fc/fc_ms.h
+@@ -63,8 +63,8 @@ enum fc_fdmi_hba_attr_type {
+  * HBA Attribute Length
+  */
+ #define FC_FDMI_HBA_ATTR_NODENAME_LEN		8
+-#define FC_FDMI_HBA_ATTR_MANUFACTURER_LEN	80
+-#define FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN	80
++#define FC_FDMI_HBA_ATTR_MANUFACTURER_LEN	64
++#define FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN	64
+ #define FC_FDMI_HBA_ATTR_MODEL_LEN		256
+ #define FC_FDMI_HBA_ATTR_MODELDESCR_LEN		256
+ #define FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN	256
+diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
+index 02f966e9358f6..091f284bd6e93 100644
+--- a/include/scsi/libiscsi.h
++++ b/include/scsi/libiscsi.h
+@@ -424,6 +424,7 @@ extern int iscsi_conn_start(struct iscsi_cls_conn *);
+ extern void iscsi_conn_stop(struct iscsi_cls_conn *, int);
+ extern int iscsi_conn_bind(struct iscsi_cls_session *, struct iscsi_cls_conn *,
+ 			   int);
++extern void iscsi_conn_unbind(struct iscsi_cls_conn *cls_conn, bool is_active);
+ extern void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err);
+ extern void iscsi_session_failure(struct iscsi_session *session,
+ 				  enum iscsi_err err);
+diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
+index fc5a39839b4b0..3974329d4d023 100644
+--- a/include/scsi/scsi_transport_iscsi.h
++++ b/include/scsi/scsi_transport_iscsi.h
+@@ -82,6 +82,7 @@ struct iscsi_transport {
+ 	void (*destroy_session) (struct iscsi_cls_session *session);
+ 	struct iscsi_cls_conn *(*create_conn) (struct iscsi_cls_session *sess,
+ 				uint32_t cid);
++	void (*unbind_conn) (struct iscsi_cls_conn *conn, bool is_active);
+ 	int (*bind_conn) (struct iscsi_cls_session *session,
+ 			  struct iscsi_cls_conn *cls_conn,
+ 			  uint64_t transport_eph, int is_leading);
+@@ -196,15 +197,23 @@ enum iscsi_connection_state {
+ 	ISCSI_CONN_BOUND,
+ };
+ 
++#define ISCSI_CLS_CONN_BIT_CLEANUP	1
++
+ struct iscsi_cls_conn {
+ 	struct list_head conn_list;	/* item in connlist */
+-	struct list_head conn_list_err;	/* item in connlist_err */
+ 	void *dd_data;			/* LLD private data */
+ 	struct iscsi_transport *transport;
+ 	uint32_t cid;			/* connection id */
++	/*
++	 * This protects the conn startup and binding/unbinding of the ep to
++	 * the conn. Unbinding includes ep_disconnect and stop_conn.
++	 */
+ 	struct mutex ep_mutex;
+ 	struct iscsi_endpoint *ep;
+ 
++	unsigned long flags;
++	struct work_struct cleanup_work;
++
+ 	struct device dev;		/* sysfs transport/container device */
+ 	enum iscsi_connection_state state;
+ };
+@@ -441,6 +450,7 @@ extern int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time);
+ extern struct iscsi_endpoint *iscsi_create_endpoint(int dd_size);
+ extern void iscsi_destroy_endpoint(struct iscsi_endpoint *ep);
+ extern struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle);
++extern void iscsi_put_endpoint(struct iscsi_endpoint *ep);
+ extern int iscsi_block_scsi_eh(struct scsi_cmnd *cmd);
+ extern struct iscsi_iface *iscsi_create_iface(struct Scsi_Host *shost,
+ 					      struct iscsi_transport *t,
+diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
+index 039c0d7add1b1..c7fe032df185d 100644
+--- a/include/uapi/linux/v4l2-controls.h
++++ b/include/uapi/linux/v4l2-controls.h
+@@ -50,6 +50,7 @@
+ #ifndef __LINUX_V4L2_CONTROLS_H
+ #define __LINUX_V4L2_CONTROLS_H
+ 
++#include <linux/const.h>
+ #include <linux/types.h>
+ 
+ /* Control classes */
+@@ -1593,30 +1594,30 @@ struct v4l2_ctrl_h264_decode_params {
+ #define V4L2_FWHT_VERSION			3
+ 
+ /* Set if this is an interlaced format */
+-#define V4L2_FWHT_FL_IS_INTERLACED		BIT(0)
++#define V4L2_FWHT_FL_IS_INTERLACED		_BITUL(0)
+ /* Set if this is a bottom-first (NTSC) interlaced format */
+-#define V4L2_FWHT_FL_IS_BOTTOM_FIRST		BIT(1)
++#define V4L2_FWHT_FL_IS_BOTTOM_FIRST		_BITUL(1)
+ /* Set if each 'frame' contains just one field */
+-#define V4L2_FWHT_FL_IS_ALTERNATE		BIT(2)
++#define V4L2_FWHT_FL_IS_ALTERNATE		_BITUL(2)
+ /*
+  * If V4L2_FWHT_FL_IS_ALTERNATE was set, then this is set if this
+  * 'frame' is the bottom field, else it is the top field.
+  */
+-#define V4L2_FWHT_FL_IS_BOTTOM_FIELD		BIT(3)
++#define V4L2_FWHT_FL_IS_BOTTOM_FIELD		_BITUL(3)
+ /* Set if the Y' plane is uncompressed */
+-#define V4L2_FWHT_FL_LUMA_IS_UNCOMPRESSED	BIT(4)
++#define V4L2_FWHT_FL_LUMA_IS_UNCOMPRESSED	_BITUL(4)
+ /* Set if the Cb plane is uncompressed */
+-#define V4L2_FWHT_FL_CB_IS_UNCOMPRESSED		BIT(5)
++#define V4L2_FWHT_FL_CB_IS_UNCOMPRESSED		_BITUL(5)
+ /* Set if the Cr plane is uncompressed */
+-#define V4L2_FWHT_FL_CR_IS_UNCOMPRESSED		BIT(6)
++#define V4L2_FWHT_FL_CR_IS_UNCOMPRESSED		_BITUL(6)
+ /* Set if the chroma plane is full height, if cleared it is half height */
+-#define V4L2_FWHT_FL_CHROMA_FULL_HEIGHT		BIT(7)
++#define V4L2_FWHT_FL_CHROMA_FULL_HEIGHT		_BITUL(7)
+ /* Set if the chroma plane is full width, if cleared it is half width */
+-#define V4L2_FWHT_FL_CHROMA_FULL_WIDTH		BIT(8)
++#define V4L2_FWHT_FL_CHROMA_FULL_WIDTH		_BITUL(8)
+ /* Set if the alpha plane is uncompressed */
+-#define V4L2_FWHT_FL_ALPHA_IS_UNCOMPRESSED	BIT(9)
++#define V4L2_FWHT_FL_ALPHA_IS_UNCOMPRESSED	_BITUL(9)
+ /* Set if this is an I Frame */
+-#define V4L2_FWHT_FL_I_FRAME			BIT(10)
++#define V4L2_FWHT_FL_I_FRAME			_BITUL(10)
+ 
+ /* A 4-values flag - the number of components - 1 */
+ #define V4L2_FWHT_FL_COMPONENTS_NUM_MSK		GENMASK(18, 16)
+diff --git a/init/main.c b/init/main.c
+index 5bd1a25f1d6f5..c97d3c0247a1d 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -918,11 +918,7 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void)
+ 	 * time - but meanwhile we still have a functioning scheduler.
+ 	 */
+ 	sched_init();
+-	/*
+-	 * Disable preemption - early bootup scheduling is extremely
+-	 * fragile until we cpu_idle() for the first time.
+-	 */
+-	preempt_disable();
++
+ 	if (WARN(!irqs_disabled(),
+ 		 "Interrupts were enabled *very* early, fixing it\n"))
+ 		local_irq_disable();
+diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
+index 85d9d1b72a33a..b0ab5b915e6d1 100644
+--- a/kernel/bpf/devmap.c
++++ b/kernel/bpf/devmap.c
+@@ -92,7 +92,7 @@ static struct hlist_head *dev_map_create_hash(unsigned int entries,
+ 	int i;
+ 	struct hlist_head *hash;
+ 
+-	hash = bpf_map_area_alloc(entries * sizeof(*hash), numa_node);
++	hash = bpf_map_area_alloc((u64) entries * sizeof(*hash), numa_node);
+ 	if (hash != NULL)
+ 		for (i = 0; i < entries; i++)
+ 			INIT_HLIST_HEAD(&hash[i]);
+@@ -143,7 +143,7 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
+ 
+ 		spin_lock_init(&dtab->index_lock);
+ 	} else {
+-		dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries *
++		dtab->netdev_map = bpf_map_area_alloc((u64) dtab->map.max_entries *
+ 						      sizeof(struct bpf_dtab_netdev *),
+ 						      dtab->map.numa_node);
+ 		if (!dtab->netdev_map)
+diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
+index d2de2abec35b6..dc56237d69600 100644
+--- a/kernel/bpf/inode.c
++++ b/kernel/bpf/inode.c
+@@ -543,7 +543,7 @@ int bpf_obj_get_user(const char __user *pathname, int flags)
+ 		return PTR_ERR(raw);
+ 
+ 	if (type == BPF_TYPE_PROG)
+-		ret = (f_flags != O_RDWR) ? -EINVAL : bpf_prog_new_fd(raw);
++		ret = bpf_prog_new_fd(raw);
+ 	else if (type == BPF_TYPE_MAP)
+ 		ret = bpf_map_new_fd(raw, f_flags);
+ 	else if (type == BPF_TYPE_LINK)
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 2423b4e918b90..87c4ea3b3cb78 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -10877,7 +10877,7 @@ static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len
+ 	}
+ }
+ 
+-static void adjust_poke_descs(struct bpf_prog *prog, u32 len)
++static void adjust_poke_descs(struct bpf_prog *prog, u32 off, u32 len)
+ {
+ 	struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
+ 	int i, sz = prog->aux->size_poke_tab;
+@@ -10885,6 +10885,8 @@ static void adjust_poke_descs(struct bpf_prog *prog, u32 len)
+ 
+ 	for (i = 0; i < sz; i++) {
+ 		desc = &tab[i];
++		if (desc->insn_idx <= off)
++			continue;
+ 		desc->insn_idx += len - 1;
+ 	}
+ }
+@@ -10905,7 +10907,7 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of
+ 	if (adjust_insn_aux_data(env, new_prog, off, len))
+ 		return NULL;
+ 	adjust_subprog_starts(env, off, len);
+-	adjust_poke_descs(new_prog, len);
++	adjust_poke_descs(new_prog, off, len);
+ 	return new_prog;
+ }
+ 
+diff --git a/kernel/cred.c b/kernel/cred.c
+index 421b1149c6516..098213d4a39c3 100644
+--- a/kernel/cred.c
++++ b/kernel/cred.c
+@@ -60,6 +60,7 @@ struct cred init_cred = {
+ 	.user			= INIT_USER,
+ 	.user_ns		= &init_user_ns,
+ 	.group_info		= &init_groups,
++	.ucounts		= &init_ucounts,
+ };
+ 
+ static inline void set_cred_subscribers(struct cred *cred, int n)
+@@ -119,6 +120,8 @@ static void put_cred_rcu(struct rcu_head *rcu)
+ 	if (cred->group_info)
+ 		put_group_info(cred->group_info);
+ 	free_uid(cred->user);
++	if (cred->ucounts)
++		put_ucounts(cred->ucounts);
+ 	put_user_ns(cred->user_ns);
+ 	kmem_cache_free(cred_jar, cred);
+ }
+@@ -222,6 +225,7 @@ struct cred *cred_alloc_blank(void)
+ #ifdef CONFIG_DEBUG_CREDENTIALS
+ 	new->magic = CRED_MAGIC;
+ #endif
++	new->ucounts = get_ucounts(&init_ucounts);
+ 
+ 	if (security_cred_alloc_blank(new, GFP_KERNEL_ACCOUNT) < 0)
+ 		goto error;
+@@ -284,6 +288,11 @@ struct cred *prepare_creds(void)
+ 
+ 	if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0)
+ 		goto error;
++
++	new->ucounts = get_ucounts(new->ucounts);
++	if (!new->ucounts)
++		goto error;
++
+ 	validate_creds(new);
+ 	return new;
+ 
+@@ -363,6 +372,9 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
+ 		ret = create_user_ns(new);
+ 		if (ret < 0)
+ 			goto error_put;
++		ret = set_cred_ucounts(new);
++		if (ret < 0)
++			goto error_put;
+ 	}
+ 
+ #ifdef CONFIG_KEYS
+@@ -653,6 +665,31 @@ int cred_fscmp(const struct cred *a, const struct cred *b)
+ }
+ EXPORT_SYMBOL(cred_fscmp);
+ 
++int set_cred_ucounts(struct cred *new)
++{
++	struct task_struct *task = current;
++	const struct cred *old = task->real_cred;
++	struct ucounts *old_ucounts = new->ucounts;
++
++	if (new->user == old->user && new->user_ns == old->user_ns)
++		return 0;
++
++	/*
++	 * This optimization is needed because alloc_ucounts() uses locks
++	 * for table lookups.
++	 */
++	if (old_ucounts && old_ucounts->ns == new->user_ns && uid_eq(old_ucounts->uid, new->euid))
++		return 0;
++
++	if (!(new->ucounts = alloc_ucounts(new->user_ns, new->euid)))
++		return -EAGAIN;
++
++	if (old_ucounts)
++		put_ucounts(old_ucounts);
++
++	return 0;
++}
++
+ /*
+  * initialise the credentials stuff
+  */
+@@ -719,6 +756,10 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
+ 	if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0)
+ 		goto error;
+ 
++	new->ucounts = get_ucounts(new->ucounts);
++	if (!new->ucounts)
++		goto error;
++
+ 	put_cred(old);
+ 	validate_creds(new);
+ 	return new;
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 426cd0c51f9eb..0c1d935521376 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -2000,7 +2000,7 @@ static __latent_entropy struct task_struct *copy_process(
+ 		goto bad_fork_cleanup_count;
+ 
+ 	delayacct_tsk_init(p);	/* Must remain after dup_task_struct() */
+-	p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE);
++	p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE | PF_NO_SETAFFINITY);
+ 	p->flags |= PF_FORKNOEXEC;
+ 	INIT_LIST_HEAD(&p->children);
+ 	INIT_LIST_HEAD(&p->sibling);
+@@ -2405,7 +2405,7 @@ static inline void init_idle_pids(struct task_struct *idle)
+ 	}
+ }
+ 
+-struct task_struct *fork_idle(int cpu)
++struct task_struct * __init fork_idle(int cpu)
+ {
+ 	struct task_struct *task;
+ 	struct kernel_clone_args args = {
+@@ -2995,6 +2995,12 @@ int ksys_unshare(unsigned long unshare_flags)
+ 	if (err)
+ 		goto bad_unshare_cleanup_cred;
+ 
++	if (new_cred) {
++		err = set_cred_ucounts(new_cred);
++		if (err)
++			goto bad_unshare_cleanup_cred;
++	}
++
+ 	if (new_fs || new_fd || do_sysvsem || new_cred || new_nsproxy) {
+ 		if (do_sysvsem) {
+ 			/*
+diff --git a/kernel/kthread.c b/kernel/kthread.c
+index 4fdf2bd9b5589..44f89a602b001 100644
+--- a/kernel/kthread.c
++++ b/kernel/kthread.c
+@@ -68,16 +68,6 @@ enum KTHREAD_BITS {
+ 	KTHREAD_SHOULD_PARK,
+ };
+ 
+-static inline void set_kthread_struct(void *kthread)
+-{
+-	/*
+-	 * We abuse ->set_child_tid to avoid the new member and because it
+-	 * can't be wrongly copied by copy_process(). We also rely on fact
+-	 * that the caller can't exec, so PF_KTHREAD can't be cleared.
+-	 */
+-	current->set_child_tid = (__force void __user *)kthread;
+-}
+-
+ static inline struct kthread *to_kthread(struct task_struct *k)
+ {
+ 	WARN_ON(!(k->flags & PF_KTHREAD));
+@@ -103,6 +93,22 @@ static inline struct kthread *__to_kthread(struct task_struct *p)
+ 	return kthread;
+ }
+ 
++void set_kthread_struct(struct task_struct *p)
++{
++	struct kthread *kthread;
++
++	if (__to_kthread(p))
++		return;
++
++	kthread = kzalloc(sizeof(*kthread), GFP_KERNEL);
++	/*
++	 * We abuse ->set_child_tid to avoid the new member and because it
++	 * can't be wrongly copied by copy_process(). We also rely on fact
++	 * that the caller can't exec, so PF_KTHREAD can't be cleared.
++	 */
++	p->set_child_tid = (__force void __user *)kthread;
++}
++
+ void free_kthread_struct(struct task_struct *k)
+ {
+ 	struct kthread *kthread;
+@@ -272,8 +278,8 @@ static int kthread(void *_create)
+ 	struct kthread *self;
+ 	int ret;
+ 
+-	self = kzalloc(sizeof(*self), GFP_KERNEL);
+-	set_kthread_struct(self);
++	set_kthread_struct(current);
++	self = to_kthread(current);
+ 
+ 	/* If user was SIGKILLed, I release the structure. */
+ 	done = xchg(&create->done, NULL);
+@@ -1155,14 +1161,14 @@ static bool __kthread_cancel_work(struct kthread_work *work)
+  * modify @dwork's timer so that it expires after @delay. If @delay is zero,
+  * @work is guaranteed to be queued immediately.
+  *
+- * Return: %true if @dwork was pending and its timer was modified,
+- * %false otherwise.
++ * Return: %false if @dwork was idle and queued, %true otherwise.
+  *
+  * A special case is when the work is being canceled in parallel.
+  * It might be caused either by the real kthread_cancel_delayed_work_sync()
+  * or yet another kthread_mod_delayed_work() call. We let the other command
+- * win and return %false here. The caller is supposed to synchronize these
+- * operations a reasonable way.
++ * win and return %true here. The return value can be used for reference
++ * counting and the number of queued works stays the same. Anyway, the caller
++ * is supposed to synchronize these operations a reasonable way.
+  *
+  * This function is safe to call from any context including IRQ handler.
+  * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
+@@ -1174,13 +1180,15 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
+ {
+ 	struct kthread_work *work = &dwork->work;
+ 	unsigned long flags;
+-	int ret = false;
++	int ret;
+ 
+ 	raw_spin_lock_irqsave(&worker->lock, flags);
+ 
+ 	/* Do not bother with canceling when never queued. */
+-	if (!work->worker)
++	if (!work->worker) {
++		ret = false;
+ 		goto fast_queue;
++	}
+ 
+ 	/* Work must not be used with >1 worker, see kthread_queue_work() */
+ 	WARN_ON_ONCE(work->worker != worker);
+@@ -1198,8 +1206,11 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
+ 	 * be used for reference counting.
+ 	 */
+ 	kthread_cancel_delayed_work_timer(work, &flags);
+-	if (work->canceling)
++	if (work->canceling) {
++		/* The number of works in the queue does not change. */
++		ret = true;
+ 		goto out;
++	}
+ 	ret = __kthread_cancel_work(work);
+ 
+ fast_queue:
+diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
+index 5bf6b1659215d..8f8cd43ec2a04 100644
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -2305,7 +2305,56 @@ static void print_lock_class_header(struct lock_class *class, int depth)
+ }
+ 
+ /*
+- * printk the shortest lock dependencies from @start to @end in reverse order:
++ * Dependency path printing:
++ *
++ * After BFS we get a lock dependency path (linked via ->parent of lock_list),
++ * printing out each lock in the dependency path will help on understanding how
++ * the deadlock could happen. Here are some details about dependency path
++ * printing:
++ *
++ * 1)	A lock_list can be either forwards or backwards for a lock dependency,
++ * 	for a lock dependency A -> B, there are two lock_lists:
++ *
++ * 	a)	lock_list in the ->locks_after list of A, whose ->class is B and
++ * 		->links_to is A. In this case, we can say the lock_list is
++ * 		"A -> B" (forwards case).
++ *
++ * 	b)	lock_list in the ->locks_before list of B, whose ->class is A
++ * 		and ->links_to is B. In this case, we can say the lock_list is
++ * 		"B <- A" (bacwards case).
++ *
++ * 	The ->trace of both a) and b) point to the call trace where B was
++ * 	acquired with A held.
++ *
++ * 2)	A "helper" lock_list is introduced during BFS, this lock_list doesn't
++ * 	represent a certain lock dependency, it only provides an initial entry
++ * 	for BFS. For example, BFS may introduce a "helper" lock_list whose
++ * 	->class is A, as a result BFS will search all dependencies starting with
++ * 	A, e.g. A -> B or A -> C.
++ *
++ * 	The notation of a forwards helper lock_list is like "-> A", which means
++ * 	we should search the forwards dependencies starting with "A", e.g A -> B
++ * 	or A -> C.
++ *
++ * 	The notation of a bacwards helper lock_list is like "<- B", which means
++ * 	we should search the backwards dependencies ending with "B", e.g.
++ * 	B <- A or B <- C.
++ */
++
++/*
++ * printk the shortest lock dependencies from @root to @leaf in reverse order.
++ *
++ * We have a lock dependency path as follow:
++ *
++ *    @root                                                                 @leaf
++ *      |                                                                     |
++ *      V                                                                     V
++ *	          ->parent                                   ->parent
++ * | lock_list | <--------- | lock_list | ... | lock_list  | <--------- | lock_list |
++ * |    -> L1  |            | L1 -> L2  | ... |Ln-2 -> Ln-1|            | Ln-1 -> Ln|
++ *
++ * , so it's natural that we start from @leaf and print every ->class and
++ * ->trace until we reach the @root.
+  */
+ static void __used
+ print_shortest_lock_dependencies(struct lock_list *leaf,
+@@ -2333,6 +2382,61 @@ print_shortest_lock_dependencies(struct lock_list *leaf,
+ 	} while (entry && (depth >= 0));
+ }
+ 
++/*
++ * printk the shortest lock dependencies from @leaf to @root.
++ *
++ * We have a lock dependency path (from a backwards search) as follow:
++ *
++ *    @leaf                                                                 @root
++ *      |                                                                     |
++ *      V                                                                     V
++ *	          ->parent                                   ->parent
++ * | lock_list | ---------> | lock_list | ... | lock_list  | ---------> | lock_list |
++ * | L2 <- L1  |            | L3 <- L2  | ... | Ln <- Ln-1 |            |    <- Ln  |
++ *
++ * , so when we iterate from @leaf to @root, we actually print the lock
++ * dependency path L1 -> L2 -> .. -> Ln in the non-reverse order.
++ *
++ * Another thing to notice here is that ->class of L2 <- L1 is L1, while the
++ * ->trace of L2 <- L1 is the call trace of L2, in fact we don't have the call
++ * trace of L1 in the dependency path, which is alright, because most of the
++ * time we can figure out where L1 is held from the call trace of L2.
++ */
++static void __used
++print_shortest_lock_dependencies_backwards(struct lock_list *leaf,
++					   struct lock_list *root)
++{
++	struct lock_list *entry = leaf;
++	const struct lock_trace *trace = NULL;
++	int depth;
++
++	/*compute depth from generated tree by BFS*/
++	depth = get_lock_depth(leaf);
++
++	do {
++		print_lock_class_header(entry->class, depth);
++		if (trace) {
++			printk("%*s ... acquired at:\n", depth, "");
++			print_lock_trace(trace, 2);
++			printk("\n");
++		}
++
++		/*
++		 * Record the pointer to the trace for the next lock_list
++		 * entry, see the comments for the function.
++		 */
++		trace = entry->trace;
++
++		if (depth == 0 && (entry != root)) {
++			printk("lockdep:%s bad path found in chain graph\n", __func__);
++			break;
++		}
++
++		entry = get_lock_parent(entry);
++		depth--;
++	} while (entry && (depth >= 0));
++}
++
+ static void
+ print_irq_lock_scenario(struct lock_list *safe_entry,
+ 			struct lock_list *unsafe_entry,
+@@ -2450,7 +2554,7 @@ print_bad_irq_dependency(struct task_struct *curr,
+ 	prev_root->trace = save_trace();
+ 	if (!prev_root->trace)
+ 		return;
+-	print_shortest_lock_dependencies(backwards_entry, prev_root);
++	print_shortest_lock_dependencies_backwards(backwards_entry, prev_root);
+ 
+ 	pr_warn("\nthe dependencies between the lock to be acquired");
+ 	pr_warn(" and %s-irq-unsafe lock:\n", irqclass);
+@@ -2668,8 +2772,18 @@ static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
+ 	 * Step 3: we found a bad match! Now retrieve a lock from the backward
+ 	 * list whose usage mask matches the exclusive usage mask from the
+ 	 * lock found on the forward list.
++	 *
++	 * Note, we should only keep the LOCKF_ENABLED_IRQ_ALL bits, considering
++	 * the follow case:
++	 *
++	 * When trying to add A -> B to the graph, we find that there is a
++	 * hardirq-safe L, that L -> ... -> A, and another hardirq-unsafe M,
++	 * that B -> ... -> M. However M is **softirq-safe**, if we use exact
++	 * invert bits of M's usage_mask, we will find another lock N that is
++	 * **softirq-unsafe** and N -> ... -> A, however N -> .. -> M will not
++	 * cause a inversion deadlock.
+ 	 */
+-	backward_mask = original_mask(target_entry1->class->usage_mask);
++	backward_mask = original_mask(target_entry1->class->usage_mask & LOCKF_ENABLED_IRQ_ALL);
+ 
+ 	ret = find_usage_backwards(&this, backward_mask, &target_entry);
+ 	if (bfs_error(ret)) {
+@@ -4578,7 +4692,7 @@ static int check_wait_context(struct task_struct *curr, struct held_lock *next)
+ 	u8 curr_inner;
+ 	int depth;
+ 
+-	if (!curr->lockdep_depth || !next_inner || next->trylock)
++	if (!next_inner || next->trylock)
+ 		return 0;
+ 
+ 	if (!next_outer)
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index 7356764e49a0b..a274622ed6fa2 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -2911,7 +2911,6 @@ static int __init rcu_spawn_core_kthreads(void)
+ 		  "%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__);
+ 	return 0;
+ }
+-early_initcall(rcu_spawn_core_kthreads);
+ 
+ /*
+  * Handle any core-RCU processing required by a call_rcu() invocation.
+@@ -4392,6 +4391,7 @@ static int __init rcu_spawn_gp_kthread(void)
+ 	wake_up_process(t);
+ 	rcu_spawn_nocb_kthreads();
+ 	rcu_spawn_boost_kthreads();
++	rcu_spawn_core_kthreads();
+ 	return 0;
+ }
+ early_initcall(rcu_spawn_gp_kthread);
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 814200541f8f5..2b66c9a16cbe8 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -1055,9 +1055,10 @@ static void uclamp_sync_util_min_rt_default(void)
+ static inline struct uclamp_se
+ uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id)
+ {
++	/* Copy by value as we could modify it */
+ 	struct uclamp_se uc_req = p->uclamp_req[clamp_id];
+ #ifdef CONFIG_UCLAMP_TASK_GROUP
+-	struct uclamp_se uc_max;
++	unsigned int tg_min, tg_max, value;
+ 
+ 	/*
+ 	 * Tasks in autogroups or root task group will be
+@@ -1068,9 +1069,11 @@ uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id)
+ 	if (task_group(p) == &root_task_group)
+ 		return uc_req;
+ 
+-	uc_max = task_group(p)->uclamp[clamp_id];
+-	if (uc_req.value > uc_max.value || !uc_req.user_defined)
+-		return uc_max;
++	tg_min = task_group(p)->uclamp[UCLAMP_MIN].value;
++	tg_max = task_group(p)->uclamp[UCLAMP_MAX].value;
++	value = uc_req.value;
++	value = clamp(value, tg_min, tg_max);
++	uclamp_se_set(&uc_req, value, false);
+ #endif
+ 
+ 	return uc_req;
+@@ -1269,8 +1272,9 @@ static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
+ }
+ 
+ static inline void
+-uclamp_update_active(struct task_struct *p, enum uclamp_id clamp_id)
++uclamp_update_active(struct task_struct *p)
+ {
++	enum uclamp_id clamp_id;
+ 	struct rq_flags rf;
+ 	struct rq *rq;
+ 
+@@ -1290,9 +1294,11 @@ uclamp_update_active(struct task_struct *p, enum uclamp_id clamp_id)
+ 	 * affecting a valid clamp bucket, the next time it's enqueued,
+ 	 * it will already see the updated clamp bucket value.
+ 	 */
+-	if (p->uclamp[clamp_id].active) {
+-		uclamp_rq_dec_id(rq, p, clamp_id);
+-		uclamp_rq_inc_id(rq, p, clamp_id);
++	for_each_clamp_id(clamp_id) {
++		if (p->uclamp[clamp_id].active) {
++			uclamp_rq_dec_id(rq, p, clamp_id);
++			uclamp_rq_inc_id(rq, p, clamp_id);
++		}
+ 	}
+ 
+ 	task_rq_unlock(rq, p, &rf);
+@@ -1300,20 +1306,14 @@ uclamp_update_active(struct task_struct *p, enum uclamp_id clamp_id)
+ 
+ #ifdef CONFIG_UCLAMP_TASK_GROUP
+ static inline void
+-uclamp_update_active_tasks(struct cgroup_subsys_state *css,
+-			   unsigned int clamps)
++uclamp_update_active_tasks(struct cgroup_subsys_state *css)
+ {
+-	enum uclamp_id clamp_id;
+ 	struct css_task_iter it;
+ 	struct task_struct *p;
+ 
+ 	css_task_iter_start(css, 0, &it);
+-	while ((p = css_task_iter_next(&it))) {
+-		for_each_clamp_id(clamp_id) {
+-			if ((0x1 << clamp_id) & clamps)
+-				uclamp_update_active(p, clamp_id);
+-		}
+-	}
++	while ((p = css_task_iter_next(&it)))
++		uclamp_update_active(p);
+ 	css_task_iter_end(&it);
+ }
+ 
+@@ -1906,7 +1906,6 @@ static int migration_cpu_stop(void *data)
+ 	struct migration_arg *arg = data;
+ 	struct set_affinity_pending *pending = arg->pending;
+ 	struct task_struct *p = arg->task;
+-	int dest_cpu = arg->dest_cpu;
+ 	struct rq *rq = this_rq();
+ 	bool complete = false;
+ 	struct rq_flags rf;
+@@ -1939,19 +1938,15 @@ static int migration_cpu_stop(void *data)
+ 			if (p->migration_pending == pending)
+ 				p->migration_pending = NULL;
+ 			complete = true;
+-		}
+ 
+-		if (dest_cpu < 0) {
+ 			if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask))
+ 				goto out;
+-
+-			dest_cpu = cpumask_any_distribute(&p->cpus_mask);
+ 		}
+ 
+ 		if (task_on_rq_queued(p))
+-			rq = __migrate_task(rq, &rf, p, dest_cpu);
++			rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
+ 		else
+-			p->wake_cpu = dest_cpu;
++			p->wake_cpu = arg->dest_cpu;
+ 
+ 		/*
+ 		 * XXX __migrate_task() can fail, at which point we might end
+@@ -2230,7 +2225,7 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
+ 			init_completion(&my_pending.done);
+ 			my_pending.arg = (struct migration_arg) {
+ 				.task = p,
+-				.dest_cpu = -1,		/* any */
++				.dest_cpu = dest_cpu,
+ 				.pending = &my_pending,
+ 			};
+ 
+@@ -2238,6 +2233,15 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
+ 		} else {
+ 			pending = p->migration_pending;
+ 			refcount_inc(&pending->refs);
++			/*
++			 * Affinity has changed, but we've already installed a
++			 * pending. migration_cpu_stop() *must* see this, else
++			 * we risk a completion of the pending despite having a
++			 * task on a disallowed CPU.
++			 *
++			 * Serialized by p->pi_lock, so this is safe.
++			 */
++			pending->arg.dest_cpu = dest_cpu;
+ 		}
+ 	}
+ 	pending = p->migration_pending;
+@@ -7428,19 +7432,32 @@ void show_state_filter(unsigned long state_filter)
+  * NOTE: this function does not set the idle thread's NEED_RESCHED
+  * flag, to make booting more robust.
+  */
+-void init_idle(struct task_struct *idle, int cpu)
++void __init init_idle(struct task_struct *idle, int cpu)
+ {
+ 	struct rq *rq = cpu_rq(cpu);
+ 	unsigned long flags;
+ 
+ 	__sched_fork(0, idle);
+ 
++	/*
++	 * The idle task doesn't need the kthread struct to function, but it
++	 * is dressed up as a per-CPU kthread and thus needs to play the part
++	 * if we want to avoid special-casing it in code that deals with per-CPU
++	 * kthreads.
++	 */
++	set_kthread_struct(idle);
++
+ 	raw_spin_lock_irqsave(&idle->pi_lock, flags);
+ 	raw_spin_lock(&rq->lock);
+ 
+ 	idle->state = TASK_RUNNING;
+ 	idle->se.exec_start = sched_clock();
+-	idle->flags |= PF_IDLE;
++	/*
++	 * PF_KTHREAD should already be set at this point; regardless, make it
++	 * look like a proper per-CPU kthread.
++	 */
++	idle->flags |= PF_IDLE | PF_KTHREAD | PF_NO_SETAFFINITY;
++	kthread_set_per_cpu(idle, cpu);
+ 
+ 	scs_task_reset(idle);
+ 	kasan_unpoison_task_stack(idle);
+@@ -7647,12 +7664,8 @@ static void balance_push(struct rq *rq)
+ 	/*
+ 	 * Both the cpu-hotplug and stop task are in this case and are
+ 	 * required to complete the hotplug process.
+-	 *
+-	 * XXX: the idle task does not match kthread_is_per_cpu() due to
+-	 * histerical raisins.
+ 	 */
+-	if (rq->idle == push_task ||
+-	    kthread_is_per_cpu(push_task) ||
++	if (kthread_is_per_cpu(push_task) ||
+ 	    is_migration_disabled(push_task)) {
+ 
+ 		/*
+@@ -8671,7 +8684,11 @@ static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
+ 
+ #ifdef CONFIG_UCLAMP_TASK_GROUP
+ 	/* Propagate the effective uclamp value for the new group */
++	mutex_lock(&uclamp_mutex);
++	rcu_read_lock();
+ 	cpu_util_update_eff(css);
++	rcu_read_unlock();
++	mutex_unlock(&uclamp_mutex);
+ #endif
+ 
+ 	return 0;
+@@ -8761,6 +8778,9 @@ static void cpu_util_update_eff(struct cgroup_subsys_state *css)
+ 	enum uclamp_id clamp_id;
+ 	unsigned int clamps;
+ 
++	lockdep_assert_held(&uclamp_mutex);
++	SCHED_WARN_ON(!rcu_read_lock_held());
++
+ 	css_for_each_descendant_pre(css, top_css) {
+ 		uc_parent = css_tg(css)->parent
+ 			? css_tg(css)->parent->uclamp : NULL;
+@@ -8793,7 +8813,7 @@ static void cpu_util_update_eff(struct cgroup_subsys_state *css)
+ 		}
+ 
+ 		/* Immediately update descendants RUNNABLE tasks */
+-		uclamp_update_active_tasks(css, clamps);
++		uclamp_update_active_tasks(css);
+ 	}
+ }
+ 
+diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
+index aac3539aa0fee..78b3bdcb84c1a 100644
+--- a/kernel/sched/deadline.c
++++ b/kernel/sched/deadline.c
+@@ -2486,6 +2486,8 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
+ 			check_preempt_curr_dl(rq, p, 0);
+ 		else
+ 			resched_curr(rq);
++	} else {
++		update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
+ 	}
+ }
+ 
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 47fcc3fe9dc5a..20ac5dff9a0ce 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -3134,7 +3134,7 @@ void reweight_task(struct task_struct *p, int prio)
+  *
+  *                     tg->weight * grq->load.weight
+  *   ge->load.weight = -----------------------------               (1)
+- *			  \Sum grq->load.weight
++ *                       \Sum grq->load.weight
+  *
+  * Now, because computing that sum is prohibitively expensive to compute (been
+  * there, done that) we approximate it with this average stuff. The average
+@@ -3148,7 +3148,7 @@ void reweight_task(struct task_struct *p, int prio)
+  *
+  *                     tg->weight * grq->avg.load_avg
+  *   ge->load.weight = ------------------------------              (3)
+- *				tg->load_avg
++ *                             tg->load_avg
+  *
+  * Where: tg->load_avg ~= \Sum grq->avg.load_avg
+  *
+@@ -3164,7 +3164,7 @@ void reweight_task(struct task_struct *p, int prio)
+  *
+  *                     tg->weight * grq->load.weight
+  *   ge->load.weight = ----------------------------- = tg->weight   (4)
+- *			    grp->load.weight
++ *                         grp->load.weight
+  *
+  * That is, the sum collapses because all other CPUs are idle; the UP scenario.
+  *
+@@ -3183,7 +3183,7 @@ void reweight_task(struct task_struct *p, int prio)
+  *
+  *                     tg->weight * grq->load.weight
+  *   ge->load.weight = -----------------------------		   (6)
+- *				tg_load_avg'
++ *                             tg_load_avg'
+  *
+  * Where:
+  *
+@@ -6564,8 +6564,11 @@ compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd)
+ 	struct cpumask *pd_mask = perf_domain_span(pd);
+ 	unsigned long cpu_cap = arch_scale_cpu_capacity(cpumask_first(pd_mask));
+ 	unsigned long max_util = 0, sum_util = 0;
++	unsigned long _cpu_cap = cpu_cap;
+ 	int cpu;
+ 
++	_cpu_cap -= arch_scale_thermal_pressure(cpumask_first(pd_mask));
++
+ 	/*
+ 	 * The capacity state of CPUs of the current rd can be driven by CPUs
+ 	 * of another rd if they belong to the same pd. So, account for the
+@@ -6601,8 +6604,10 @@ compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd)
+ 		 * is already enough to scale the EM reported power
+ 		 * consumption at the (eventually clamped) cpu_capacity.
+ 		 */
+-		sum_util += effective_cpu_util(cpu, util_running, cpu_cap,
+-					       ENERGY_UTIL, NULL);
++		cpu_util = effective_cpu_util(cpu, util_running, cpu_cap,
++					      ENERGY_UTIL, NULL);
++
++		sum_util += min(cpu_util, _cpu_cap);
+ 
+ 		/*
+ 		 * Performance domain frequency: utilization clamping
+@@ -6613,7 +6618,7 @@ compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd)
+ 		 */
+ 		cpu_util = effective_cpu_util(cpu, util_freq, cpu_cap,
+ 					      FREQUENCY_UTIL, tsk);
+-		max_util = max(max_util, cpu_util);
++		max_util = max(max_util, min(cpu_util, _cpu_cap));
+ 	}
+ 
+ 	return em_cpu_energy(pd->em_pd, max_util, sum_util);
+diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
+index ef37acd28e4ac..37f02fdbb35a3 100644
+--- a/kernel/sched/psi.c
++++ b/kernel/sched/psi.c
+@@ -179,6 +179,8 @@ struct psi_group psi_system = {
+ 
+ static void psi_avgs_work(struct work_struct *work);
+ 
++static void poll_timer_fn(struct timer_list *t);
++
+ static void group_init(struct psi_group *group)
+ {
+ 	int cpu;
+@@ -198,6 +200,8 @@ static void group_init(struct psi_group *group)
+ 	memset(group->polling_total, 0, sizeof(group->polling_total));
+ 	group->polling_next_update = ULLONG_MAX;
+ 	group->polling_until = 0;
++	init_waitqueue_head(&group->poll_wait);
++	timer_setup(&group->poll_timer, poll_timer_fn, 0);
+ 	rcu_assign_pointer(group->poll_task, NULL);
+ }
+ 
+@@ -1142,9 +1146,7 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group,
+ 			return ERR_CAST(task);
+ 		}
+ 		atomic_set(&group->poll_wakeup, 0);
+-		init_waitqueue_head(&group->poll_wait);
+ 		wake_up_process(task);
+-		timer_setup(&group->poll_timer, poll_timer_fn, 0);
+ 		rcu_assign_pointer(group->poll_task, task);
+ 	}
+ 
+@@ -1196,6 +1198,7 @@ static void psi_trigger_destroy(struct kref *ref)
+ 					group->poll_task,
+ 					lockdep_is_held(&group->trigger_lock));
+ 			rcu_assign_pointer(group->poll_task, NULL);
++			del_timer(&group->poll_timer);
+ 		}
+ 	}
+ 
+@@ -1208,17 +1211,14 @@ static void psi_trigger_destroy(struct kref *ref)
+ 	 */
+ 	synchronize_rcu();
+ 	/*
+-	 * Destroy the kworker after releasing trigger_lock to prevent a
++	 * Stop kthread 'psimon' after releasing trigger_lock to prevent a
+ 	 * deadlock while waiting for psi_poll_work to acquire trigger_lock
+ 	 */
+ 	if (task_to_destroy) {
+ 		/*
+ 		 * After the RCU grace period has expired, the worker
+ 		 * can no longer be found through group->poll_task.
+-		 * But it might have been already scheduled before
+-		 * that - deschedule it cleanly before destroying it.
+ 		 */
+-		del_timer_sync(&group->poll_timer);
+ 		kthread_stop(task_to_destroy);
+ 	}
+ 	kfree(t);
+diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
+index 8f720b71d13dd..e617287052d5c 100644
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -2331,13 +2331,20 @@ void __init init_sched_rt_class(void)
+ static void switched_to_rt(struct rq *rq, struct task_struct *p)
+ {
+ 	/*
+-	 * If we are already running, then there's nothing
+-	 * that needs to be done. But if we are not running
+-	 * we may need to preempt the current running task.
+-	 * If that current running task is also an RT task
++	 * If we are running, update the avg_rt tracking, as the running time
++	 * will now on be accounted into the latter.
++	 */
++	if (task_current(rq, p)) {
++		update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
++		return;
++	}
++
++	/*
++	 * If we are not running we may need to preempt the current
++	 * running task. If that current running task is also an RT task
+ 	 * then see if we can move to another run queue.
+ 	 */
+-	if (task_on_rq_queued(p) && rq->curr != p) {
++	if (task_on_rq_queued(p)) {
+ #ifdef CONFIG_SMP
+ 		if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
+ 			rt_queue_push_tasks(rq);
+diff --git a/kernel/smpboot.c b/kernel/smpboot.c
+index f25208e8df836..e4163042c4d66 100644
+--- a/kernel/smpboot.c
++++ b/kernel/smpboot.c
+@@ -33,7 +33,6 @@ struct task_struct *idle_thread_get(unsigned int cpu)
+ 
+ 	if (!tsk)
+ 		return ERR_PTR(-ENOMEM);
+-	init_idle(tsk, cpu);
+ 	return tsk;
+ }
+ 
+diff --git a/kernel/sys.c b/kernel/sys.c
+index 2e2e3f378d97f..cabfc5b861754 100644
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -552,6 +552,10 @@ long __sys_setreuid(uid_t ruid, uid_t euid)
+ 	if (retval < 0)
+ 		goto error;
+ 
++	retval = set_cred_ucounts(new);
++	if (retval < 0)
++		goto error;
++
+ 	return commit_creds(new);
+ 
+ error:
+@@ -610,6 +614,10 @@ long __sys_setuid(uid_t uid)
+ 	if (retval < 0)
+ 		goto error;
+ 
++	retval = set_cred_ucounts(new);
++	if (retval < 0)
++		goto error;
++
+ 	return commit_creds(new);
+ 
+ error:
+@@ -685,6 +693,10 @@ long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
+ 	if (retval < 0)
+ 		goto error;
+ 
++	retval = set_cred_ucounts(new);
++	if (retval < 0)
++		goto error;
++
+ 	return commit_creds(new);
+ 
+ error:
+diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
+index cce484a2cc7ca..242997b71f2d1 100644
+--- a/kernel/time/clocksource.c
++++ b/kernel/time/clocksource.c
+@@ -124,6 +124,13 @@ static void __clocksource_change_rating(struct clocksource *cs, int rating);
+ #define WATCHDOG_INTERVAL (HZ >> 1)
+ #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
+ 
++/*
++ * Maximum permissible delay between two readouts of the watchdog
++ * clocksource surrounding a read of the clocksource being validated.
++ * This delay could be due to SMIs, NMIs, or to VCPU preemptions.
++ */
++#define WATCHDOG_MAX_SKEW (100 * NSEC_PER_USEC)
++
+ static void clocksource_watchdog_work(struct work_struct *work)
+ {
+ 	/*
+@@ -184,12 +191,99 @@ void clocksource_mark_unstable(struct clocksource *cs)
+ 	spin_unlock_irqrestore(&watchdog_lock, flags);
+ }
+ 
++static ulong max_cswd_read_retries = 3;
++module_param(max_cswd_read_retries, ulong, 0644);
++
++static bool cs_watchdog_read(struct clocksource *cs, u64 *csnow, u64 *wdnow)
++{
++	unsigned int nretries;
++	u64 wd_end, wd_delta;
++	int64_t wd_delay;
++
++	for (nretries = 0; nretries <= max_cswd_read_retries; nretries++) {
++		local_irq_disable();
++		*wdnow = watchdog->read(watchdog);
++		*csnow = cs->read(cs);
++		wd_end = watchdog->read(watchdog);
++		local_irq_enable();
++
++		wd_delta = clocksource_delta(wd_end, *wdnow, watchdog->mask);
++		wd_delay = clocksource_cyc2ns(wd_delta, watchdog->mult,
++					      watchdog->shift);
++		if (wd_delay <= WATCHDOG_MAX_SKEW) {
++			if (nretries > 1 || nretries >= max_cswd_read_retries) {
++				pr_warn("timekeeping watchdog on CPU%d: %s retried %d times before success\n",
++					smp_processor_id(), watchdog->name, nretries);
++			}
++			return true;
++		}
++	}
++
++	pr_warn("timekeeping watchdog on CPU%d: %s read-back delay of %lldns, attempt %d, marking unstable\n",
++		smp_processor_id(), watchdog->name, wd_delay, nretries);
++	return false;
++}
++
++static u64 csnow_mid;
++static cpumask_t cpus_ahead;
++static cpumask_t cpus_behind;
++
++static void clocksource_verify_one_cpu(void *csin)
++{
++	struct clocksource *cs = (struct clocksource *)csin;
++
++	csnow_mid = cs->read(cs);
++}
++
++static void clocksource_verify_percpu(struct clocksource *cs)
++{
++	int64_t cs_nsec, cs_nsec_max = 0, cs_nsec_min = LLONG_MAX;
++	u64 csnow_begin, csnow_end;
++	int cpu, testcpu;
++	s64 delta;
++
++	cpumask_clear(&cpus_ahead);
++	cpumask_clear(&cpus_behind);
++	preempt_disable();
++	testcpu = smp_processor_id();
++	pr_warn("Checking clocksource %s synchronization from CPU %d.\n", cs->name, testcpu);
++	for_each_online_cpu(cpu) {
++		if (cpu == testcpu)
++			continue;
++		csnow_begin = cs->read(cs);
++		smp_call_function_single(cpu, clocksource_verify_one_cpu, cs, 1);
++		csnow_end = cs->read(cs);
++		delta = (s64)((csnow_mid - csnow_begin) & cs->mask);
++		if (delta < 0)
++			cpumask_set_cpu(cpu, &cpus_behind);
++		delta = (csnow_end - csnow_mid) & cs->mask;
++		if (delta < 0)
++			cpumask_set_cpu(cpu, &cpus_ahead);
++		delta = clocksource_delta(csnow_end, csnow_begin, cs->mask);
++		cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift);
++		if (cs_nsec > cs_nsec_max)
++			cs_nsec_max = cs_nsec;
++		if (cs_nsec < cs_nsec_min)
++			cs_nsec_min = cs_nsec;
++	}
++	preempt_enable();
++	if (!cpumask_empty(&cpus_ahead))
++		pr_warn("        CPUs %*pbl ahead of CPU %d for clocksource %s.\n",
++			cpumask_pr_args(&cpus_ahead), testcpu, cs->name);
++	if (!cpumask_empty(&cpus_behind))
++		pr_warn("        CPUs %*pbl behind CPU %d for clocksource %s.\n",
++			cpumask_pr_args(&cpus_behind), testcpu, cs->name);
++	if (!cpumask_empty(&cpus_ahead) || !cpumask_empty(&cpus_behind))
++		pr_warn("        CPU %d check durations %lldns - %lldns for clocksource %s.\n",
++			testcpu, cs_nsec_min, cs_nsec_max, cs->name);
++}
++
+ static void clocksource_watchdog(struct timer_list *unused)
+ {
+-	struct clocksource *cs;
+ 	u64 csnow, wdnow, cslast, wdlast, delta;
+-	int64_t wd_nsec, cs_nsec;
+ 	int next_cpu, reset_pending;
++	int64_t wd_nsec, cs_nsec;
++	struct clocksource *cs;
+ 
+ 	spin_lock(&watchdog_lock);
+ 	if (!watchdog_running)
+@@ -206,10 +300,11 @@ static void clocksource_watchdog(struct timer_list *unused)
+ 			continue;
+ 		}
+ 
+-		local_irq_disable();
+-		csnow = cs->read(cs);
+-		wdnow = watchdog->read(watchdog);
+-		local_irq_enable();
++		if (!cs_watchdog_read(cs, &csnow, &wdnow)) {
++			/* Clock readout unreliable, so give it up. */
++			__clocksource_unstable(cs);
++			continue;
++		}
+ 
+ 		/* Clocksource initialized ? */
+ 		if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) ||
+@@ -407,6 +502,12 @@ static int __clocksource_watchdog_kthread(void)
+ 	unsigned long flags;
+ 	int select = 0;
+ 
++	/* Do any required per-CPU skew verification. */
++	if (curr_clocksource &&
++	    curr_clocksource->flags & CLOCK_SOURCE_UNSTABLE &&
++	    curr_clocksource->flags & CLOCK_SOURCE_VERIFY_PERCPU)
++		clocksource_verify_percpu(curr_clocksource);
++
+ 	spin_lock_irqsave(&watchdog_lock, flags);
+ 	list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
+ 		if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
+diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
+index 9bb3d2823f442..80fbee47194ba 100644
+--- a/kernel/trace/bpf_trace.c
++++ b/kernel/trace/bpf_trace.c
+@@ -2143,7 +2143,8 @@ static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *
+ 	if (prog->aux->max_tp_access > btp->writable_size)
+ 		return -EINVAL;
+ 
+-	return tracepoint_probe_register(tp, (void *)btp->bpf_func, prog);
++	return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func,
++						   prog);
+ }
+ 
+ int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
+diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
+index 39ebe1826fc38..a696120b9f1ee 100644
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -1539,6 +1539,13 @@ static int contains_operator(char *str)
+ 
+ 	switch (*op) {
+ 	case '-':
++		/*
++		 * Unfortunately, the modifier ".sym-offset"
++		 * can confuse things.
++		 */
++		if (op - str >= 4 && !strncmp(op - 4, ".sym-offset", 11))
++			return FIELD_OP_NONE;
++
+ 		if (*str == '-')
+ 			field_op = FIELD_OP_UNARY_MINUS;
+ 		else
+diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
+index 9f478d29b9264..976bf8ce80396 100644
+--- a/kernel/tracepoint.c
++++ b/kernel/tracepoint.c
+@@ -273,7 +273,8 @@ static void tracepoint_update_call(struct tracepoint *tp, struct tracepoint_func
+  * Add the probe function to a tracepoint.
+  */
+ static int tracepoint_add_func(struct tracepoint *tp,
+-			       struct tracepoint_func *func, int prio)
++			       struct tracepoint_func *func, int prio,
++			       bool warn)
+ {
+ 	struct tracepoint_func *old, *tp_funcs;
+ 	int ret;
+@@ -288,7 +289,7 @@ static int tracepoint_add_func(struct tracepoint *tp,
+ 			lockdep_is_held(&tracepoints_mutex));
+ 	old = func_add(&tp_funcs, func, prio);
+ 	if (IS_ERR(old)) {
+-		WARN_ON_ONCE(PTR_ERR(old) != -ENOMEM);
++		WARN_ON_ONCE(warn && PTR_ERR(old) != -ENOMEM);
+ 		return PTR_ERR(old);
+ 	}
+ 
+@@ -343,6 +344,32 @@ static int tracepoint_remove_func(struct tracepoint *tp,
+ 	return 0;
+ }
+ 
++/**
++ * tracepoint_probe_register_prio_may_exist -  Connect a probe to a tracepoint with priority
++ * @tp: tracepoint
++ * @probe: probe handler
++ * @data: tracepoint data
++ * @prio: priority of this function over other registered functions
++ *
++ * Same as tracepoint_probe_register_prio() except that it will not warn
++ * if the tracepoint is already registered.
++ */
++int tracepoint_probe_register_prio_may_exist(struct tracepoint *tp, void *probe,
++					     void *data, int prio)
++{
++	struct tracepoint_func tp_func;
++	int ret;
++
++	mutex_lock(&tracepoints_mutex);
++	tp_func.func = probe;
++	tp_func.data = data;
++	tp_func.prio = prio;
++	ret = tracepoint_add_func(tp, &tp_func, prio, false);
++	mutex_unlock(&tracepoints_mutex);
++	return ret;
++}
++EXPORT_SYMBOL_GPL(tracepoint_probe_register_prio_may_exist);
++
+ /**
+  * tracepoint_probe_register_prio -  Connect a probe to a tracepoint with priority
+  * @tp: tracepoint
+@@ -366,7 +393,7 @@ int tracepoint_probe_register_prio(struct tracepoint *tp, void *probe,
+ 	tp_func.func = probe;
+ 	tp_func.data = data;
+ 	tp_func.prio = prio;
+-	ret = tracepoint_add_func(tp, &tp_func, prio);
++	ret = tracepoint_add_func(tp, &tp_func, prio, true);
+ 	mutex_unlock(&tracepoints_mutex);
+ 	return ret;
+ }
+diff --git a/kernel/ucount.c b/kernel/ucount.c
+index 11b1596e2542a..9894795043c42 100644
+--- a/kernel/ucount.c
++++ b/kernel/ucount.c
+@@ -8,6 +8,12 @@
+ #include <linux/kmemleak.h>
+ #include <linux/user_namespace.h>
+ 
++struct ucounts init_ucounts = {
++	.ns    = &init_user_ns,
++	.uid   = GLOBAL_ROOT_UID,
++	.count = 1,
++};
++
+ #define UCOUNTS_HASHTABLE_BITS 10
+ static struct hlist_head ucounts_hashtable[(1 << UCOUNTS_HASHTABLE_BITS)];
+ static DEFINE_SPINLOCK(ucounts_lock);
+@@ -125,7 +131,15 @@ static struct ucounts *find_ucounts(struct user_namespace *ns, kuid_t uid, struc
+ 	return NULL;
+ }
+ 
+-static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
++static void hlist_add_ucounts(struct ucounts *ucounts)
++{
++	struct hlist_head *hashent = ucounts_hashentry(ucounts->ns, ucounts->uid);
++	spin_lock_irq(&ucounts_lock);
++	hlist_add_head(&ucounts->node, hashent);
++	spin_unlock_irq(&ucounts_lock);
++}
++
++struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
+ {
+ 	struct hlist_head *hashent = ucounts_hashentry(ns, uid);
+ 	struct ucounts *ucounts, *new;
+@@ -160,7 +174,26 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
+ 	return ucounts;
+ }
+ 
+-static void put_ucounts(struct ucounts *ucounts)
++struct ucounts *get_ucounts(struct ucounts *ucounts)
++{
++	unsigned long flags;
++
++	if (!ucounts)
++		return NULL;
++
++	spin_lock_irqsave(&ucounts_lock, flags);
++	if (ucounts->count == INT_MAX) {
++		WARN_ONCE(1, "ucounts: counter has reached its maximum value");
++		ucounts = NULL;
++	} else {
++		ucounts->count += 1;
++	}
++	spin_unlock_irqrestore(&ucounts_lock, flags);
++
++	return ucounts;
++}
++
++void put_ucounts(struct ucounts *ucounts)
+ {
+ 	unsigned long flags;
+ 
+@@ -194,7 +227,7 @@ struct ucounts *inc_ucount(struct user_namespace *ns, kuid_t uid,
+ {
+ 	struct ucounts *ucounts, *iter, *bad;
+ 	struct user_namespace *tns;
+-	ucounts = get_ucounts(ns, uid);
++	ucounts = alloc_ucounts(ns, uid);
+ 	for (iter = ucounts; iter; iter = tns->ucounts) {
+ 		int max;
+ 		tns = iter->ns;
+@@ -237,6 +270,7 @@ static __init int user_namespace_sysctl_init(void)
+ 	BUG_ON(!user_header);
+ 	BUG_ON(!setup_userns_sysctls(&init_user_ns));
+ #endif
++	hlist_add_ucounts(&init_ucounts);
+ 	return 0;
+ }
+ subsys_initcall(user_namespace_sysctl_init);
+diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
+index 9a4b980d695b8..f1b7b4b8ffa25 100644
+--- a/kernel/user_namespace.c
++++ b/kernel/user_namespace.c
+@@ -1340,6 +1340,9 @@ static int userns_install(struct nsset *nsset, struct ns_common *ns)
+ 	put_user_ns(cred->user_ns);
+ 	set_cred_user_ns(cred, get_user_ns(user_ns));
+ 
++	if (set_cred_ucounts(cred) < 0)
++		return -EINVAL;
++
+ 	return 0;
+ }
+ 
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
+index 417c3d3e521bf..5c9f528dd46d1 100644
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -1363,7 +1363,6 @@ config LOCKDEP
+ 	bool
+ 	depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
+ 	select STACKTRACE
+-	depends on FRAME_POINTER || MIPS || PPC || S390 || MICROBLAZE || ARM || ARC || X86
+ 	select KALLSYMS
+ 	select KALLSYMS_ALL
+ 
+diff --git a/lib/iov_iter.c b/lib/iov_iter.c
+index f66c62aa7154d..9a76f676d2482 100644
+--- a/lib/iov_iter.c
++++ b/lib/iov_iter.c
+@@ -432,7 +432,7 @@ int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
+ 	int err;
+ 	struct iovec v;
+ 
+-	if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
++	if (iter_is_iovec(i)) {
+ 		iterate_iovec(i, bytes, v, iov, skip, ({
+ 			err = fault_in_pages_readable(v.iov_base, v.iov_len);
+ 			if (unlikely(err))
+@@ -906,9 +906,12 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
+ 		size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
+ 		kunmap_atomic(kaddr);
+ 		return wanted;
+-	} else if (unlikely(iov_iter_is_discard(i)))
++	} else if (unlikely(iov_iter_is_discard(i))) {
++		if (unlikely(i->count < bytes))
++			bytes = i->count;
++		i->count -= bytes;
+ 		return bytes;
+-	else if (likely(!iov_iter_is_pipe(i)))
++	} else if (likely(!iov_iter_is_pipe(i)))
+ 		return copy_page_to_iter_iovec(page, offset, bytes, i);
+ 	else
+ 		return copy_page_to_iter_pipe(page, offset, bytes, i);
+diff --git a/lib/kstrtox.c b/lib/kstrtox.c
+index a118b0b1e9b2c..0b5fe8b411732 100644
+--- a/lib/kstrtox.c
++++ b/lib/kstrtox.c
+@@ -39,20 +39,22 @@ const char *_parse_integer_fixup_radix(const char *s, unsigned int *base)
+ 
+ /*
+  * Convert non-negative integer string representation in explicitly given radix
+- * to an integer.
++ * to an integer. A maximum of max_chars characters will be converted.
++ *
+  * Return number of characters consumed maybe or-ed with overflow bit.
+  * If overflow occurs, result integer (incorrect) is still returned.
+  *
+  * Don't you dare use this function.
+  */
+-unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long *p)
++unsigned int _parse_integer_limit(const char *s, unsigned int base, unsigned long long *p,
++				  size_t max_chars)
+ {
+ 	unsigned long long res;
+ 	unsigned int rv;
+ 
+ 	res = 0;
+ 	rv = 0;
+-	while (1) {
++	while (max_chars--) {
+ 		unsigned int c = *s;
+ 		unsigned int lc = c | 0x20; /* don't tolower() this line */
+ 		unsigned int val;
+@@ -82,6 +84,11 @@ unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long
+ 	return rv;
+ }
+ 
++unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long *p)
++{
++	return _parse_integer_limit(s, base, p, INT_MAX);
++}
++
+ static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res)
+ {
+ 	unsigned long long _res;
+diff --git a/lib/kstrtox.h b/lib/kstrtox.h
+index 3b4637bcd2540..158c400ca8658 100644
+--- a/lib/kstrtox.h
++++ b/lib/kstrtox.h
+@@ -4,6 +4,8 @@
+ 
+ #define KSTRTOX_OVERFLOW	(1U << 31)
+ const char *_parse_integer_fixup_radix(const char *s, unsigned int *base);
++unsigned int _parse_integer_limit(const char *s, unsigned int base, unsigned long long *res,
++				  size_t max_chars);
+ unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long *res);
+ 
+ #endif
+diff --git a/lib/kunit/test.c b/lib/kunit/test.c
+index ec9494e914ef3..c2b7248ebc9eb 100644
+--- a/lib/kunit/test.c
++++ b/lib/kunit/test.c
+@@ -343,7 +343,7 @@ static void kunit_run_case_catch_errors(struct kunit_suite *suite,
+ 	context.test_case = test_case;
+ 	kunit_try_catch_run(try_catch, &context);
+ 
+-	test_case->success = test->success;
++	test_case->success &= test->success;
+ }
+ 
+ int kunit_run_tests(struct kunit_suite *suite)
+@@ -355,7 +355,7 @@ int kunit_run_tests(struct kunit_suite *suite)
+ 
+ 	kunit_suite_for_each_test_case(suite, test_case) {
+ 		struct kunit test = { .param_value = NULL, .param_index = 0 };
+-		bool test_success = true;
++		test_case->success = true;
+ 
+ 		if (test_case->generate_params) {
+ 			/* Get initial param. */
+@@ -365,7 +365,6 @@ int kunit_run_tests(struct kunit_suite *suite)
+ 
+ 		do {
+ 			kunit_run_case_catch_errors(suite, test_case, &test);
+-			test_success &= test_case->success;
+ 
+ 			if (test_case->generate_params) {
+ 				if (param_desc[0] == '\0') {
+@@ -387,7 +386,7 @@ int kunit_run_tests(struct kunit_suite *suite)
+ 			}
+ 		} while (test.param_value);
+ 
+-		kunit_print_ok_not_ok(&test, true, test_success,
++		kunit_print_ok_not_ok(&test, true, test_case->success,
+ 				      kunit_test_case_num(suite, test_case),
+ 				      test_case->name);
+ 	}
+diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
+index 2d85abac17448..0f6b262e09648 100644
+--- a/lib/locking-selftest.c
++++ b/lib/locking-selftest.c
+@@ -194,6 +194,7 @@ static void init_shared_classes(void)
+ #define HARDIRQ_ENTER()				\
+ 	local_irq_disable();			\
+ 	__irq_enter();				\
++	lockdep_hardirq_threaded();		\
+ 	WARN_ON(!in_irq());
+ 
+ #define HARDIRQ_EXIT()				\
+diff --git a/lib/math/rational.c b/lib/math/rational.c
+index 9781d521963d1..c0ab51d8fbb98 100644
+--- a/lib/math/rational.c
++++ b/lib/math/rational.c
+@@ -12,6 +12,7 @@
+ #include <linux/compiler.h>
+ #include <linux/export.h>
+ #include <linux/minmax.h>
++#include <linux/limits.h>
+ 
+ /*
+  * calculate best rational approximation for a given fraction
+@@ -78,13 +79,18 @@ void rational_best_approximation(
+ 		 * found below as 't'.
+ 		 */
+ 		if ((n2 > max_numerator) || (d2 > max_denominator)) {
+-			unsigned long t = min((max_numerator - n0) / n1,
+-					      (max_denominator - d0) / d1);
++			unsigned long t = ULONG_MAX;
+ 
+-			/* This tests if the semi-convergent is closer
+-			 * than the previous convergent.
++			if (d1)
++				t = (max_denominator - d0) / d1;
++			if (n1)
++				t = min(t, (max_numerator - n0) / n1);
++
++			/* This tests if the semi-convergent is closer than the previous
++			 * convergent.  If d1 is zero there is no previous convergent as this
++			 * is the 1st iteration, so always choose the semi-convergent.
+ 			 */
+-			if (2u * t > a || (2u * t == a && d0 * dp > d1 * d)) {
++			if (!d1 || 2u * t > a || (2u * t == a && d0 * dp > d1 * d)) {
+ 				n1 = n0 + t * n1;
+ 				d1 = d0 + t * d1;
+ 			}
+diff --git a/lib/seq_buf.c b/lib/seq_buf.c
+index 707453f5d58ee..89c26c393bdba 100644
+--- a/lib/seq_buf.c
++++ b/lib/seq_buf.c
+@@ -243,12 +243,14 @@ int seq_buf_putmem_hex(struct seq_buf *s, const void *mem,
+ 			break;
+ 
+ 		/* j increments twice per loop */
+-		len -= j / 2;
+ 		hex[j++] = ' ';
+ 
+ 		seq_buf_putmem(s, hex, j);
+ 		if (seq_buf_has_overflowed(s))
+ 			return -1;
++
++		len -= start_len;
++		data += start_len;
+ 	}
+ 	return 0;
+ }
+diff --git a/lib/vsprintf.c b/lib/vsprintf.c
+index 39ef2e314da5e..9d6722199390f 100644
+--- a/lib/vsprintf.c
++++ b/lib/vsprintf.c
+@@ -53,6 +53,31 @@
+ #include <linux/string_helpers.h>
+ #include "kstrtox.h"
+ 
++static unsigned long long simple_strntoull(const char *startp, size_t max_chars,
++					   char **endp, unsigned int base)
++{
++	const char *cp;
++	unsigned long long result = 0ULL;
++	size_t prefix_chars;
++	unsigned int rv;
++
++	cp = _parse_integer_fixup_radix(startp, &base);
++	prefix_chars = cp - startp;
++	if (prefix_chars < max_chars) {
++		rv = _parse_integer_limit(cp, base, &result, max_chars - prefix_chars);
++		/* FIXME */
++		cp += (rv & ~KSTRTOX_OVERFLOW);
++	} else {
++		/* Field too short for prefix + digit, skip over without converting */
++		cp = startp + max_chars;
++	}
++
++	if (endp)
++		*endp = (char *)cp;
++
++	return result;
++}
++
+ /**
+  * simple_strtoull - convert a string to an unsigned long long
+  * @cp: The start of the string
+@@ -63,18 +88,7 @@
+  */
+ unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base)
+ {
+-	unsigned long long result;
+-	unsigned int rv;
+-
+-	cp = _parse_integer_fixup_radix(cp, &base);
+-	rv = _parse_integer(cp, base, &result);
+-	/* FIXME */
+-	cp += (rv & ~KSTRTOX_OVERFLOW);
+-
+-	if (endp)
+-		*endp = (char *)cp;
+-
+-	return result;
++	return simple_strntoull(cp, INT_MAX, endp, base);
+ }
+ EXPORT_SYMBOL(simple_strtoull);
+ 
+@@ -109,6 +123,21 @@ long simple_strtol(const char *cp, char **endp, unsigned int base)
+ }
+ EXPORT_SYMBOL(simple_strtol);
+ 
++static long long simple_strntoll(const char *cp, size_t max_chars, char **endp,
++				 unsigned int base)
++{
++	/*
++	 * simple_strntoull() safely handles receiving max_chars==0 in the
++	 * case cp[0] == '-' && max_chars == 1.
++	 * If max_chars == 0 we can drop through and pass it to simple_strntoull()
++	 * and the content of *cp is irrelevant.
++	 */
++	if (*cp == '-' && max_chars > 0)
++		return -simple_strntoull(cp + 1, max_chars - 1, endp, base);
++
++	return simple_strntoull(cp, max_chars, endp, base);
++}
++
+ /**
+  * simple_strtoll - convert a string to a signed long long
+  * @cp: The start of the string
+@@ -119,10 +148,7 @@ EXPORT_SYMBOL(simple_strtol);
+  */
+ long long simple_strtoll(const char *cp, char **endp, unsigned int base)
+ {
+-	if (*cp == '-')
+-		return -simple_strtoull(cp + 1, endp, base);
+-
+-	return simple_strtoull(cp, endp, base);
++	return simple_strntoll(cp, INT_MAX, endp, base);
+ }
+ EXPORT_SYMBOL(simple_strtoll);
+ 
+@@ -3475,25 +3501,13 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
+ 			break;
+ 
+ 		if (is_sign)
+-			val.s = qualifier != 'L' ?
+-				simple_strtol(str, &next, base) :
+-				simple_strtoll(str, &next, base);
++			val.s = simple_strntoll(str,
++						field_width >= 0 ? field_width : INT_MAX,
++						&next, base);
+ 		else
+-			val.u = qualifier != 'L' ?
+-				simple_strtoul(str, &next, base) :
+-				simple_strtoull(str, &next, base);
+-
+-		if (field_width > 0 && next - str > field_width) {
+-			if (base == 0)
+-				_parse_integer_fixup_radix(str, &base);
+-			while (next - str > field_width) {
+-				if (is_sign)
+-					val.s = div_s64(val.s, base);
+-				else
+-					val.u = div_u64(val.u, base);
+-				--next;
+-			}
+-		}
++			val.u = simple_strntoull(str,
++						 field_width >= 0 ? field_width : INT_MAX,
++						 &next, base);
+ 
+ 		switch (qualifier) {
+ 		case 'H':	/* that's 'hh' in format */
+diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c
+index 726fd2030f645..12ebc97e8b435 100644
+--- a/mm/debug_vm_pgtable.c
++++ b/mm/debug_vm_pgtable.c
+@@ -146,13 +146,14 @@ static void __init pte_savedwrite_tests(unsigned long pfn, pgprot_t prot)
+ static void __init pmd_basic_tests(unsigned long pfn, int idx)
+ {
+ 	pgprot_t prot = protection_map[idx];
+-	pmd_t pmd = pfn_pmd(pfn, prot);
+ 	unsigned long val = idx, *ptr = &val;
++	pmd_t pmd;
+ 
+ 	if (!has_transparent_hugepage())
+ 		return;
+ 
+ 	pr_debug("Validating PMD basic (%pGv)\n", ptr);
++	pmd = pfn_pmd(pfn, prot);
+ 
+ 	/*
+ 	 * This test needs to be executed after the given page table entry
+@@ -185,7 +186,7 @@ static void __init pmd_advanced_tests(struct mm_struct *mm,
+ 				      unsigned long pfn, unsigned long vaddr,
+ 				      pgprot_t prot, pgtable_t pgtable)
+ {
+-	pmd_t pmd = pfn_pmd(pfn, prot);
++	pmd_t pmd;
+ 
+ 	if (!has_transparent_hugepage())
+ 		return;
+@@ -232,9 +233,14 @@ static void __init pmd_advanced_tests(struct mm_struct *mm,
+ 
+ static void __init pmd_leaf_tests(unsigned long pfn, pgprot_t prot)
+ {
+-	pmd_t pmd = pfn_pmd(pfn, prot);
++	pmd_t pmd;
++
++	if (!has_transparent_hugepage())
++		return;
+ 
+ 	pr_debug("Validating PMD leaf\n");
++	pmd = pfn_pmd(pfn, prot);
++
+ 	/*
+ 	 * PMD based THP is a leaf entry.
+ 	 */
+@@ -267,12 +273,16 @@ static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot)
+ 
+ static void __init pmd_savedwrite_tests(unsigned long pfn, pgprot_t prot)
+ {
+-	pmd_t pmd = pfn_pmd(pfn, prot);
++	pmd_t pmd;
+ 
+ 	if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
+ 		return;
+ 
++	if (!has_transparent_hugepage())
++		return;
++
+ 	pr_debug("Validating PMD saved write\n");
++	pmd = pfn_pmd(pfn, prot);
+ 	WARN_ON(!pmd_savedwrite(pmd_mk_savedwrite(pmd_clear_savedwrite(pmd))));
+ 	WARN_ON(pmd_savedwrite(pmd_clear_savedwrite(pmd_mk_savedwrite(pmd))));
+ }
+@@ -281,13 +291,14 @@ static void __init pmd_savedwrite_tests(unsigned long pfn, pgprot_t prot)
+ static void __init pud_basic_tests(struct mm_struct *mm, unsigned long pfn, int idx)
+ {
+ 	pgprot_t prot = protection_map[idx];
+-	pud_t pud = pfn_pud(pfn, prot);
+ 	unsigned long val = idx, *ptr = &val;
++	pud_t pud;
+ 
+ 	if (!has_transparent_hugepage())
+ 		return;
+ 
+ 	pr_debug("Validating PUD basic (%pGv)\n", ptr);
++	pud = pfn_pud(pfn, prot);
+ 
+ 	/*
+ 	 * This test needs to be executed after the given page table entry
+@@ -323,7 +334,7 @@ static void __init pud_advanced_tests(struct mm_struct *mm,
+ 				      unsigned long pfn, unsigned long vaddr,
+ 				      pgprot_t prot)
+ {
+-	pud_t pud = pfn_pud(pfn, prot);
++	pud_t pud;
+ 
+ 	if (!has_transparent_hugepage())
+ 		return;
+@@ -332,6 +343,7 @@ static void __init pud_advanced_tests(struct mm_struct *mm,
+ 	/* Align the address wrt HPAGE_PUD_SIZE */
+ 	vaddr &= HPAGE_PUD_MASK;
+ 
++	pud = pfn_pud(pfn, prot);
+ 	set_pud_at(mm, vaddr, pudp, pud);
+ 	pudp_set_wrprotect(mm, vaddr, pudp);
+ 	pud = READ_ONCE(*pudp);
+@@ -370,9 +382,13 @@ static void __init pud_advanced_tests(struct mm_struct *mm,
+ 
+ static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot)
+ {
+-	pud_t pud = pfn_pud(pfn, prot);
++	pud_t pud;
++
++	if (!has_transparent_hugepage())
++		return;
+ 
+ 	pr_debug("Validating PUD leaf\n");
++	pud = pfn_pud(pfn, prot);
+ 	/*
+ 	 * PUD based THP is a leaf entry.
+ 	 */
+@@ -654,12 +670,16 @@ static void __init pte_protnone_tests(unsigned long pfn, pgprot_t prot)
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ static void __init pmd_protnone_tests(unsigned long pfn, pgprot_t prot)
+ {
+-	pmd_t pmd = pmd_mkhuge(pfn_pmd(pfn, prot));
++	pmd_t pmd;
+ 
+ 	if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
+ 		return;
+ 
++	if (!has_transparent_hugepage())
++		return;
++
+ 	pr_debug("Validating PMD protnone\n");
++	pmd = pmd_mkhuge(pfn_pmd(pfn, prot));
+ 	WARN_ON(!pmd_protnone(pmd));
+ 	WARN_ON(!pmd_present(pmd));
+ }
+@@ -679,18 +699,26 @@ static void __init pte_devmap_tests(unsigned long pfn, pgprot_t prot)
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ static void __init pmd_devmap_tests(unsigned long pfn, pgprot_t prot)
+ {
+-	pmd_t pmd = pfn_pmd(pfn, prot);
++	pmd_t pmd;
++
++	if (!has_transparent_hugepage())
++		return;
+ 
+ 	pr_debug("Validating PMD devmap\n");
++	pmd = pfn_pmd(pfn, prot);
+ 	WARN_ON(!pmd_devmap(pmd_mkdevmap(pmd)));
+ }
+ 
+ #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+ static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot)
+ {
+-	pud_t pud = pfn_pud(pfn, prot);
++	pud_t pud;
++
++	if (!has_transparent_hugepage())
++		return;
+ 
+ 	pr_debug("Validating PUD devmap\n");
++	pud = pfn_pud(pfn, prot);
+ 	WARN_ON(!pud_devmap(pud_mkdevmap(pud)));
+ }
+ #else  /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
+@@ -733,25 +761,33 @@ static void __init pte_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ static void __init pmd_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
+ {
+-	pmd_t pmd = pfn_pmd(pfn, prot);
++	pmd_t pmd;
+ 
+ 	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
+ 		return;
+ 
++	if (!has_transparent_hugepage())
++		return;
++
+ 	pr_debug("Validating PMD soft dirty\n");
++	pmd = pfn_pmd(pfn, prot);
+ 	WARN_ON(!pmd_soft_dirty(pmd_mksoft_dirty(pmd)));
+ 	WARN_ON(pmd_soft_dirty(pmd_clear_soft_dirty(pmd)));
+ }
+ 
+ static void __init pmd_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
+ {
+-	pmd_t pmd = pfn_pmd(pfn, prot);
++	pmd_t pmd;
+ 
+ 	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) ||
+ 		!IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION))
+ 		return;
+ 
++	if (!has_transparent_hugepage())
++		return;
++
+ 	pr_debug("Validating PMD swap soft dirty\n");
++	pmd = pfn_pmd(pfn, prot);
+ 	WARN_ON(!pmd_swp_soft_dirty(pmd_swp_mksoft_dirty(pmd)));
+ 	WARN_ON(pmd_swp_soft_dirty(pmd_swp_clear_soft_dirty(pmd)));
+ }
+@@ -780,6 +816,9 @@ static void __init pmd_swap_tests(unsigned long pfn, pgprot_t prot)
+ 	swp_entry_t swp;
+ 	pmd_t pmd;
+ 
++	if (!has_transparent_hugepage())
++		return;
++
+ 	pr_debug("Validating PMD swap\n");
+ 	pmd = pfn_pmd(pfn, prot);
+ 	swp = __pmd_to_swp_entry(pmd);
+diff --git a/mm/gup.c b/mm/gup.c
+index 4164a70160e31..4ad276859bbda 100644
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -44,6 +44,23 @@ static void hpage_pincount_sub(struct page *page, int refs)
+ 	atomic_sub(refs, compound_pincount_ptr(page));
+ }
+ 
++/* Equivalent to calling put_page() @refs times. */
++static void put_page_refs(struct page *page, int refs)
++{
++#ifdef CONFIG_DEBUG_VM
++	if (VM_WARN_ON_ONCE_PAGE(page_ref_count(page) < refs, page))
++		return;
++#endif
++
++	/*
++	 * Calling put_page() for each ref is unnecessarily slow. Only the last
++	 * ref needs a put_page().
++	 */
++	if (refs > 1)
++		page_ref_sub(page, refs - 1);
++	put_page(page);
++}
++
+ /*
+  * Return the compound head page with ref appropriately incremented,
+  * or NULL if that failed.
+@@ -56,6 +73,21 @@ static inline struct page *try_get_compound_head(struct page *page, int refs)
+ 		return NULL;
+ 	if (unlikely(!page_cache_add_speculative(head, refs)))
+ 		return NULL;
++
++	/*
++	 * At this point we have a stable reference to the head page; but it
++	 * could be that between the compound_head() lookup and the refcount
++	 * increment, the compound page was split, in which case we'd end up
++	 * holding a reference on a page that has nothing to do with the page
++	 * we were given anymore.
++	 * So now that the head page is stable, recheck that the pages still
++	 * belong together.
++	 */
++	if (unlikely(compound_head(page) != head)) {
++		put_page_refs(head, refs);
++		return NULL;
++	}
++
+ 	return head;
+ }
+ 
+@@ -94,6 +126,14 @@ __maybe_unused struct page *try_grab_compound_head(struct page *page,
+ 				is_migrate_cma_page(page))
+ 			return NULL;
+ 
++		/*
++		 * CAUTION: Don't use compound_head() on the page before this
++		 * point, the result won't be stable.
++		 */
++		page = try_get_compound_head(page, refs);
++		if (!page)
++			return NULL;
++
+ 		/*
+ 		 * When pinning a compound page of order > 1 (which is what
+ 		 * hpage_pincount_available() checks for), use an exact count to
+@@ -102,15 +142,10 @@ __maybe_unused struct page *try_grab_compound_head(struct page *page,
+ 		 * However, be sure to *also* increment the normal page refcount
+ 		 * field at least once, so that the page really is pinned.
+ 		 */
+-		if (!hpage_pincount_available(page))
+-			refs *= GUP_PIN_COUNTING_BIAS;
+-
+-		page = try_get_compound_head(page, refs);
+-		if (!page)
+-			return NULL;
+-
+ 		if (hpage_pincount_available(page))
+ 			hpage_pincount_add(page, refs);
++		else
++			page_ref_add(page, refs * (GUP_PIN_COUNTING_BIAS - 1));
+ 
+ 		mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_ACQUIRED,
+ 				    orig_refs);
+@@ -134,14 +169,7 @@ static void put_compound_head(struct page *page, int refs, unsigned int flags)
+ 			refs *= GUP_PIN_COUNTING_BIAS;
+ 	}
+ 
+-	VM_BUG_ON_PAGE(page_ref_count(page) < refs, page);
+-	/*
+-	 * Calling put_page() for each ref is unnecessarily slow. Only the last
+-	 * ref needs a put_page().
+-	 */
+-	if (refs > 1)
+-		page_ref_sub(page, refs - 1);
+-	put_page(page);
++	put_page_refs(page, refs);
+ }
+ 
+ /**
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 44c455dbbd637..bc642923e0c97 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -63,7 +63,14 @@ static atomic_t huge_zero_refcount;
+ struct page *huge_zero_page __read_mostly;
+ unsigned long huge_zero_pfn __read_mostly = ~0UL;
+ 
+-bool transparent_hugepage_enabled(struct vm_area_struct *vma)
++static inline bool file_thp_enabled(struct vm_area_struct *vma)
++{
++	return transhuge_vma_enabled(vma, vma->vm_flags) && vma->vm_file &&
++	       !inode_is_open_for_write(vma->vm_file->f_inode) &&
++	       (vma->vm_flags & VM_EXEC);
++}
++
++bool transparent_hugepage_active(struct vm_area_struct *vma)
+ {
+ 	/* The addr is used to check if the vma size fits */
+ 	unsigned long addr = (vma->vm_end & HPAGE_PMD_MASK) - HPAGE_PMD_SIZE;
+@@ -74,6 +81,8 @@ bool transparent_hugepage_enabled(struct vm_area_struct *vma)
+ 		return __transparent_hugepage_enabled(vma);
+ 	if (vma_is_shmem(vma))
+ 		return shmem_huge_enabled(vma);
++	if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS))
++		return file_thp_enabled(vma);
+ 
+ 	return false;
+ }
+@@ -1606,7 +1615,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ 	 * If other processes are mapping this page, we couldn't discard
+ 	 * the page unless they all do MADV_FREE so let's skip the page.
+ 	 */
+-	if (page_mapcount(page) != 1)
++	if (total_mapcount(page) != 1)
+ 		goto out;
+ 
+ 	if (!trylock_page(page))
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 7ba7d9b20494a..dbf44b92651b7 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -1313,8 +1313,6 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
+ 	return alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask);
+ }
+ 
+-static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
+-static void prep_compound_gigantic_page(struct page *page, unsigned int order);
+ #else /* !CONFIG_CONTIG_ALLOC */
+ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
+ 					int nid, nodemask_t *nodemask)
+@@ -2504,16 +2502,10 @@ found:
+ 	return 1;
+ }
+ 
+-static void __init prep_compound_huge_page(struct page *page,
+-		unsigned int order)
+-{
+-	if (unlikely(order > (MAX_ORDER - 1)))
+-		prep_compound_gigantic_page(page, order);
+-	else
+-		prep_compound_page(page, order);
+-}
+-
+-/* Put bootmem huge pages into the standard lists after mem_map is up */
++/*
++ * Put bootmem huge pages into the standard lists after mem_map is up.
++ * Note: This only applies to gigantic (order > MAX_ORDER) pages.
++ */
+ static void __init gather_bootmem_prealloc(void)
+ {
+ 	struct huge_bootmem_page *m;
+@@ -2522,20 +2514,19 @@ static void __init gather_bootmem_prealloc(void)
+ 		struct page *page = virt_to_page(m);
+ 		struct hstate *h = m->hstate;
+ 
++		VM_BUG_ON(!hstate_is_gigantic(h));
+ 		WARN_ON(page_count(page) != 1);
+-		prep_compound_huge_page(page, huge_page_order(h));
++		prep_compound_gigantic_page(page, huge_page_order(h));
+ 		WARN_ON(PageReserved(page));
+ 		prep_new_huge_page(h, page, page_to_nid(page));
+ 		put_page(page); /* free it into the hugepage allocator */
+ 
+ 		/*
+-		 * If we had gigantic hugepages allocated at boot time, we need
+-		 * to restore the 'stolen' pages to totalram_pages in order to
+-		 * fix confusing memory reports from free(1) and another
+-		 * side-effects, like CommitLimit going negative.
++		 * We need to restore the 'stolen' pages to totalram_pages
++		 * in order to fix confusing memory reports from free(1) and
++		 * other side-effects, like CommitLimit going negative.
+ 		 */
+-		if (hstate_is_gigantic(h))
+-			adjust_managed_page_count(page, pages_per_huge_page(h));
++		adjust_managed_page_count(page, pages_per_huge_page(h));
+ 		cond_resched();
+ 	}
+ }
+diff --git a/mm/khugepaged.c b/mm/khugepaged.c
+index 2680d5ffee7f2..1259efcd94cac 100644
+--- a/mm/khugepaged.c
++++ b/mm/khugepaged.c
+@@ -442,9 +442,7 @@ static inline int khugepaged_test_exit(struct mm_struct *mm)
+ static bool hugepage_vma_check(struct vm_area_struct *vma,
+ 			       unsigned long vm_flags)
+ {
+-	/* Explicitly disabled through madvise. */
+-	if ((vm_flags & VM_NOHUGEPAGE) ||
+-	    test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
++	if (!transhuge_vma_enabled(vma, vm_flags))
+ 		return false;
+ 
+ 	/* Enabled via shmem mount options or sysfs settings. */
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index e876ba693998d..769b73151f05b 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -2906,6 +2906,13 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg)
+ }
+ 
+ #ifdef CONFIG_MEMCG_KMEM
++/*
++ * The allocated objcg pointers array is not accounted directly.
++ * Moreover, it should not come from DMA buffer and is not readily
++ * reclaimable. So those GFP bits should be masked off.
++ */
++#define OBJCGS_CLEAR_MASK	(__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT)
++
+ int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
+ 				 gfp_t gfp, bool new_page)
+ {
+@@ -2913,6 +2920,7 @@ int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
+ 	unsigned long memcg_data;
+ 	void *vec;
+ 
++	gfp &= ~OBJCGS_CLEAR_MASK;
+ 	vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp,
+ 			   page_to_nid(page));
+ 	if (!vec)
+diff --git a/mm/memory.c b/mm/memory.c
+index 36624986130be..e0073089bc9f8 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -3310,6 +3310,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
+ {
+ 	struct vm_area_struct *vma = vmf->vma;
+ 	struct page *page = NULL, *swapcache;
++	struct swap_info_struct *si = NULL;
+ 	swp_entry_t entry;
+ 	pte_t pte;
+ 	int locked;
+@@ -3337,14 +3338,16 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
+ 		goto out;
+ 	}
+ 
++	/* Prevent swapoff from happening to us. */
++	si = get_swap_device(entry);
++	if (unlikely(!si))
++		goto out;
+ 
+ 	delayacct_set_flag(DELAYACCT_PF_SWAPIN);
+ 	page = lookup_swap_cache(entry, vma, vmf->address);
+ 	swapcache = page;
+ 
+ 	if (!page) {
+-		struct swap_info_struct *si = swp_swap_info(entry);
+-
+ 		if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
+ 		    __swap_count(entry) == 1) {
+ 			/* skip swapcache */
+@@ -3515,6 +3518,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
+ unlock:
+ 	pte_unmap_unlock(vmf->pte, vmf->ptl);
+ out:
++	if (si)
++		put_swap_device(si);
+ 	return ret;
+ out_nomap:
+ 	pte_unmap_unlock(vmf->pte, vmf->ptl);
+@@ -3526,6 +3531,8 @@ out_release:
+ 		unlock_page(swapcache);
+ 		put_page(swapcache);
+ 	}
++	if (si)
++		put_swap_device(si);
+ 	return ret;
+ }
+ 
+diff --git a/mm/migrate.c b/mm/migrate.c
+index 40455e753c5b4..3138600cf435f 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -1315,7 +1315,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
+ 	 * page_mapping() set, hugetlbfs specific move page routine will not
+ 	 * be called and we could leak usage counts for subpools.
+ 	 */
+-	if (page_private(hpage) && !page_mapping(hpage)) {
++	if (hugetlb_page_subpool(hpage) && !page_mapping(hpage)) {
+ 		rc = -EBUSY;
+ 		goto out_unlock;
+ 	}
+diff --git a/mm/mmap_lock.c b/mm/mmap_lock.c
+index dcdde4f722a40..2ae3f33b85b16 100644
+--- a/mm/mmap_lock.c
++++ b/mm/mmap_lock.c
+@@ -11,6 +11,7 @@
+ #include <linux/rcupdate.h>
+ #include <linux/smp.h>
+ #include <linux/trace_events.h>
++#include <linux/local_lock.h>
+ 
+ EXPORT_TRACEPOINT_SYMBOL(mmap_lock_start_locking);
+ EXPORT_TRACEPOINT_SYMBOL(mmap_lock_acquire_returned);
+@@ -39,21 +40,30 @@ static int reg_refcount; /* Protected by reg_lock. */
+  */
+ #define CONTEXT_COUNT 4
+ 
+-static DEFINE_PER_CPU(char __rcu *, memcg_path_buf);
++struct memcg_path {
++	local_lock_t lock;
++	char __rcu *buf;
++	local_t buf_idx;
++};
++static DEFINE_PER_CPU(struct memcg_path, memcg_paths) = {
++	.lock = INIT_LOCAL_LOCK(lock),
++	.buf_idx = LOCAL_INIT(0),
++};
++
+ static char **tmp_bufs;
+-static DEFINE_PER_CPU(int, memcg_path_buf_idx);
+ 
+ /* Called with reg_lock held. */
+ static void free_memcg_path_bufs(void)
+ {
++	struct memcg_path *memcg_path;
+ 	int cpu;
+ 	char **old = tmp_bufs;
+ 
+ 	for_each_possible_cpu(cpu) {
+-		*(old++) = rcu_dereference_protected(
+-			per_cpu(memcg_path_buf, cpu),
++		memcg_path = per_cpu_ptr(&memcg_paths, cpu);
++		*(old++) = rcu_dereference_protected(memcg_path->buf,
+ 			lockdep_is_held(&reg_lock));
+-		rcu_assign_pointer(per_cpu(memcg_path_buf, cpu), NULL);
++		rcu_assign_pointer(memcg_path->buf, NULL);
+ 	}
+ 
+ 	/* Wait for inflight memcg_path_buf users to finish. */
+@@ -88,7 +98,7 @@ int trace_mmap_lock_reg(void)
+ 		new = kmalloc(MEMCG_PATH_BUF_SIZE * CONTEXT_COUNT, GFP_KERNEL);
+ 		if (new == NULL)
+ 			goto out_fail_free;
+-		rcu_assign_pointer(per_cpu(memcg_path_buf, cpu), new);
++		rcu_assign_pointer(per_cpu_ptr(&memcg_paths, cpu)->buf, new);
+ 		/* Don't need to wait for inflights, they'd have gotten NULL. */
+ 	}
+ 
+@@ -122,23 +132,24 @@ out:
+ 
+ static inline char *get_memcg_path_buf(void)
+ {
++	struct memcg_path *memcg_path = this_cpu_ptr(&memcg_paths);
+ 	char *buf;
+ 	int idx;
+ 
+ 	rcu_read_lock();
+-	buf = rcu_dereference(*this_cpu_ptr(&memcg_path_buf));
++	buf = rcu_dereference(memcg_path->buf);
+ 	if (buf == NULL) {
+ 		rcu_read_unlock();
+ 		return NULL;
+ 	}
+-	idx = this_cpu_add_return(memcg_path_buf_idx, MEMCG_PATH_BUF_SIZE) -
++	idx = local_add_return(MEMCG_PATH_BUF_SIZE, &memcg_path->buf_idx) -
+ 	      MEMCG_PATH_BUF_SIZE;
+ 	return &buf[idx];
+ }
+ 
+ static inline void put_memcg_path_buf(void)
+ {
+-	this_cpu_sub(memcg_path_buf_idx, MEMCG_PATH_BUF_SIZE);
++	local_sub(MEMCG_PATH_BUF_SIZE, &this_cpu_ptr(&memcg_paths)->buf_idx);
+ 	rcu_read_unlock();
+ }
+ 
+@@ -179,14 +190,14 @@ out:
+ #define TRACE_MMAP_LOCK_EVENT(type, mm, ...)                                   \
+ 	do {                                                                   \
+ 		const char *memcg_path;                                        \
+-		preempt_disable();                                             \
++		local_lock(&memcg_paths.lock);				       \
+ 		memcg_path = get_mm_memcg_path(mm);                            \
+ 		trace_mmap_lock_##type(mm,                                     \
+ 				       memcg_path != NULL ? memcg_path : "",   \
+ 				       ##__VA_ARGS__);                         \
+ 		if (likely(memcg_path != NULL))                                \
+ 			put_memcg_path_buf();                                  \
+-		preempt_enable();                                              \
++		local_unlock(&memcg_paths.lock);			       \
+ 	} while (0)
+ 
+ #else /* !CONFIG_MEMCG */
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index d9dbf45f7590e..382af53772747 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -6207,7 +6207,7 @@ void __ref memmap_init_zone_device(struct zone *zone,
+ 		return;
+ 
+ 	/*
+-	 * The call to memmap_init_zone should have already taken care
++	 * The call to memmap_init should have already taken care
+ 	 * of the pages reserved for the memmap, so we can just jump to
+ 	 * the end of that region and start processing the device pages.
+ 	 */
+@@ -6272,7 +6272,7 @@ static void __meminit zone_init_free_lists(struct zone *zone)
+ /*
+  * Only struct pages that correspond to ranges defined by memblock.memory
+  * are zeroed and initialized by going through __init_single_page() during
+- * memmap_init_zone().
++ * memmap_init_zone_range().
+  *
+  * But, there could be struct pages that correspond to holes in
+  * memblock.memory. This can happen because of the following reasons:
+@@ -6291,9 +6291,9 @@ static void __meminit zone_init_free_lists(struct zone *zone)
+  *   zone/node above the hole except for the trailing pages in the last
+  *   section that will be appended to the zone/node below.
+  */
+-static u64 __meminit init_unavailable_range(unsigned long spfn,
+-					    unsigned long epfn,
+-					    int zone, int node)
++static void __init init_unavailable_range(unsigned long spfn,
++					  unsigned long epfn,
++					  int zone, int node)
+ {
+ 	unsigned long pfn;
+ 	u64 pgcnt = 0;
+@@ -6309,56 +6309,77 @@ static u64 __meminit init_unavailable_range(unsigned long spfn,
+ 		pgcnt++;
+ 	}
+ 
+-	return pgcnt;
++	if (pgcnt)
++		pr_info("On node %d, zone %s: %lld pages in unavailable ranges",
++			node, zone_names[zone], pgcnt);
+ }
+ #else
+-static inline u64 init_unavailable_range(unsigned long spfn, unsigned long epfn,
+-					 int zone, int node)
++static inline void init_unavailable_range(unsigned long spfn,
++					  unsigned long epfn,
++					  int zone, int node)
+ {
+-	return 0;
+ }
+ #endif
+ 
+-void __meminit __weak memmap_init_zone(struct zone *zone)
++static void __init memmap_init_zone_range(struct zone *zone,
++					  unsigned long start_pfn,
++					  unsigned long end_pfn,
++					  unsigned long *hole_pfn)
+ {
+ 	unsigned long zone_start_pfn = zone->zone_start_pfn;
+ 	unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages;
+-	int i, nid = zone_to_nid(zone), zone_id = zone_idx(zone);
+-	static unsigned long hole_pfn;
++	int nid = zone_to_nid(zone), zone_id = zone_idx(zone);
++
++	start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn);
++	end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn);
++
++	if (start_pfn >= end_pfn)
++		return;
++
++	memmap_init_range(end_pfn - start_pfn, nid, zone_id, start_pfn,
++			  zone_end_pfn, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
++
++	if (*hole_pfn < start_pfn)
++		init_unavailable_range(*hole_pfn, start_pfn, zone_id, nid);
++
++	*hole_pfn = end_pfn;
++}
++
++static void __init memmap_init(void)
++{
+ 	unsigned long start_pfn, end_pfn;
+-	u64 pgcnt = 0;
++	unsigned long hole_pfn = 0;
++	int i, j, zone_id, nid;
+ 
+-	for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
+-		start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn);
+-		end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn);
++	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
++		struct pglist_data *node = NODE_DATA(nid);
++
++		for (j = 0; j < MAX_NR_ZONES; j++) {
++			struct zone *zone = node->node_zones + j;
+ 
+-		if (end_pfn > start_pfn)
+-			memmap_init_range(end_pfn - start_pfn, nid,
+-					zone_id, start_pfn, zone_end_pfn,
+-					MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
++			if (!populated_zone(zone))
++				continue;
+ 
+-		if (hole_pfn < start_pfn)
+-			pgcnt += init_unavailable_range(hole_pfn, start_pfn,
+-							zone_id, nid);
+-		hole_pfn = end_pfn;
++			memmap_init_zone_range(zone, start_pfn, end_pfn,
++					       &hole_pfn);
++			zone_id = j;
++		}
+ 	}
+ 
+ #ifdef CONFIG_SPARSEMEM
+ 	/*
+-	 * Initialize the hole in the range [zone_end_pfn, section_end].
+-	 * If zone boundary falls in the middle of a section, this hole
+-	 * will be re-initialized during the call to this function for the
+-	 * higher zone.
++	 * Initialize the memory map for hole in the range [memory_end,
++	 * section_end].
++	 * Append the pages in this hole to the highest zone in the last
++	 * node.
++	 * The call to init_unavailable_range() is outside the ifdef to
++	 * silence the compiler warining about zone_id set but not used;
++	 * for FLATMEM it is a nop anyway
+ 	 */
+-	end_pfn = round_up(zone_end_pfn, PAGES_PER_SECTION);
++	end_pfn = round_up(end_pfn, PAGES_PER_SECTION);
+ 	if (hole_pfn < end_pfn)
+-		pgcnt += init_unavailable_range(hole_pfn, end_pfn,
+-						zone_id, nid);
+ #endif
+-
+-	if (pgcnt)
+-		pr_info("  %s zone: %llu pages in unavailable ranges\n",
+-			zone->name, pgcnt);
++		init_unavailable_range(hole_pfn, end_pfn, zone_id, nid);
+ }
+ 
+ static int zone_batchsize(struct zone *zone)
+@@ -7061,7 +7082,6 @@ static void __init free_area_init_core(struct pglist_data *pgdat)
+ 		set_pageblock_order();
+ 		setup_usemap(zone);
+ 		init_currently_empty_zone(zone, zone->zone_start_pfn, size);
+-		memmap_init_zone(zone);
+ 	}
+ }
+ 
+@@ -7587,6 +7607,8 @@ void __init free_area_init(unsigned long *max_zone_pfn)
+ 			node_set_state(nid, N_MEMORY);
+ 		check_for_memory(pgdat, nid);
+ 	}
++
++	memmap_init();
+ }
+ 
+ static int __init cmdline_parse_core(char *p, unsigned long *core,
+@@ -7872,14 +7894,14 @@ static void setup_per_zone_lowmem_reserve(void)
+ 			unsigned long managed_pages = 0;
+ 
+ 			for (j = i + 1; j < MAX_NR_ZONES; j++) {
+-				if (clear) {
+-					zone->lowmem_reserve[j] = 0;
+-				} else {
+-					struct zone *upper_zone = &pgdat->node_zones[j];
++				struct zone *upper_zone = &pgdat->node_zones[j];
+ 
+-					managed_pages += zone_managed_pages(upper_zone);
++				managed_pages += zone_managed_pages(upper_zone);
++
++				if (clear)
++					zone->lowmem_reserve[j] = 0;
++				else
+ 					zone->lowmem_reserve[j] = managed_pages / ratio;
+-				}
+ 			}
+ 		}
+ 	}
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 6e99a4ad6e1f3..3c39116fd0711 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -1696,7 +1696,8 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index,
+ 	struct address_space *mapping = inode->i_mapping;
+ 	struct shmem_inode_info *info = SHMEM_I(inode);
+ 	struct mm_struct *charge_mm = vma ? vma->vm_mm : current->mm;
+-	struct page *page;
++	struct swap_info_struct *si;
++	struct page *page = NULL;
+ 	swp_entry_t swap;
+ 	int error;
+ 
+@@ -1704,6 +1705,12 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index,
+ 	swap = radix_to_swp_entry(*pagep);
+ 	*pagep = NULL;
+ 
++	/* Prevent swapoff from happening to us. */
++	si = get_swap_device(swap);
++	if (!si) {
++		error = EINVAL;
++		goto failed;
++	}
+ 	/* Look it up and read it in.. */
+ 	page = lookup_swap_cache(swap, NULL, 0);
+ 	if (!page) {
+@@ -1765,6 +1772,8 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index,
+ 	swap_free(swap);
+ 
+ 	*pagep = page;
++	if (si)
++		put_swap_device(si);
+ 	return 0;
+ failed:
+ 	if (!shmem_confirm_swap(mapping, index, swap))
+@@ -1775,6 +1784,9 @@ unlock:
+ 		put_page(page);
+ 	}
+ 
++	if (si)
++		put_swap_device(si);
++
+ 	return error;
+ }
+ 
+@@ -4025,8 +4037,7 @@ bool shmem_huge_enabled(struct vm_area_struct *vma)
+ 	loff_t i_size;
+ 	pgoff_t off;
+ 
+-	if ((vma->vm_flags & VM_NOHUGEPAGE) ||
+-	    test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
++	if (!transhuge_vma_enabled(vma, vma->vm_flags))
+ 		return false;
+ 	if (shmem_huge == SHMEM_HUGE_FORCE)
+ 		return true;
+diff --git a/mm/slab.h b/mm/slab.h
+index 076582f58f687..440133f93a53b 100644
+--- a/mm/slab.h
++++ b/mm/slab.h
+@@ -309,7 +309,6 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
+ 	if (!memcg_kmem_enabled() || !objcg)
+ 		return;
+ 
+-	flags &= ~__GFP_ACCOUNT;
+ 	for (i = 0; i < size; i++) {
+ 		if (likely(p[i])) {
+ 			page = virt_to_head_page(p[i]);
+diff --git a/mm/z3fold.c b/mm/z3fold.c
+index 9d889ad2bb869..7c417fb8404aa 100644
+--- a/mm/z3fold.c
++++ b/mm/z3fold.c
+@@ -1059,6 +1059,7 @@ static void z3fold_destroy_pool(struct z3fold_pool *pool)
+ 	destroy_workqueue(pool->compact_wq);
+ 	destroy_workqueue(pool->release_wq);
+ 	z3fold_unregister_migration(pool);
++	free_percpu(pool->unbuddied);
+ 	kfree(pool);
+ }
+ 
+@@ -1382,7 +1383,7 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
+ 			if (zhdr->foreign_handles ||
+ 			    test_and_set_bit(PAGE_CLAIMED, &page->private)) {
+ 				if (kref_put(&zhdr->refcount,
+-						release_z3fold_page))
++						release_z3fold_page_locked))
+ 					atomic64_dec(&pool->pages_nr);
+ 				else
+ 					z3fold_page_unlock(zhdr);
+diff --git a/mm/zswap.c b/mm/zswap.c
+index 578d9f2569200..91f7439fde7fb 100644
+--- a/mm/zswap.c
++++ b/mm/zswap.c
+@@ -967,6 +967,13 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
+ 	spin_unlock(&tree->lock);
+ 	BUG_ON(offset != entry->offset);
+ 
++	src = (u8 *)zhdr + sizeof(struct zswap_header);
++	if (!zpool_can_sleep_mapped(pool)) {
++		memcpy(tmp, src, entry->length);
++		src = tmp;
++		zpool_unmap_handle(pool, handle);
++	}
++
+ 	/* try to allocate swap cache page */
+ 	switch (zswap_get_swap_cache_page(swpentry, &page)) {
+ 	case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */
+@@ -982,17 +989,7 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
+ 	case ZSWAP_SWAPCACHE_NEW: /* page is locked */
+ 		/* decompress */
+ 		acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
+-
+ 		dlen = PAGE_SIZE;
+-		src = (u8 *)zhdr + sizeof(struct zswap_header);
+-
+-		if (!zpool_can_sleep_mapped(pool)) {
+-
+-			memcpy(tmp, src, entry->length);
+-			src = tmp;
+-
+-			zpool_unmap_handle(pool, handle);
+-		}
+ 
+ 		mutex_lock(acomp_ctx->mutex);
+ 		sg_init_one(&input, src, entry->length);
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 82f4973a011d9..c6f400b108d94 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -5271,8 +5271,19 @@ static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
+ 
+ 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
+ 
+-	if (ev->status)
++	if (ev->status) {
++		struct adv_info *adv;
++
++		adv = hci_find_adv_instance(hdev, ev->handle);
++		if (!adv)
++			return;
++
++		/* Remove advertising as it has been terminated */
++		hci_remove_adv_instance(hdev, ev->handle);
++		mgmt_advertising_removed(NULL, hdev, ev->handle);
++
+ 		return;
++	}
+ 
+ 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
+ 	if (conn) {
+@@ -5416,7 +5427,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
+ 	struct hci_conn *conn;
+ 	bool match;
+ 	u32 flags;
+-	u8 *ptr, real_len;
++	u8 *ptr;
+ 
+ 	switch (type) {
+ 	case LE_ADV_IND:
+@@ -5447,14 +5458,10 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
+ 			break;
+ 	}
+ 
+-	real_len = ptr - data;
+-
+-	/* Adjust for actual length */
+-	if (len != real_len) {
+-		bt_dev_err_ratelimited(hdev, "advertising data len corrected %u -> %u",
+-				       len, real_len);
+-		len = real_len;
+-	}
++	/* Adjust for actual length. This handles the case when remote
++	 * device is advertising with incorrect data length.
++	 */
++	len = ptr - data;
+ 
+ 	/* If the direct address is present, then this report is from
+ 	 * a LE Direct Advertising Report event. In that case it is
+diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
+index 805ce546b8133..e5d6b1d127645 100644
+--- a/net/bluetooth/hci_request.c
++++ b/net/bluetooth/hci_request.c
+@@ -1685,30 +1685,33 @@ void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
+ 		return;
+ 
+ 	if (ext_adv_capable(hdev)) {
+-		struct hci_cp_le_set_ext_scan_rsp_data cp;
++		struct {
++			struct hci_cp_le_set_ext_scan_rsp_data cp;
++			u8 data[HCI_MAX_EXT_AD_LENGTH];
++		} pdu;
+ 
+-		memset(&cp, 0, sizeof(cp));
++		memset(&pdu, 0, sizeof(pdu));
+ 
+ 		if (instance)
+ 			len = create_instance_scan_rsp_data(hdev, instance,
+-							    cp.data);
++							    pdu.data);
+ 		else
+-			len = create_default_scan_rsp_data(hdev, cp.data);
++			len = create_default_scan_rsp_data(hdev, pdu.data);
+ 
+ 		if (hdev->scan_rsp_data_len == len &&
+-		    !memcmp(cp.data, hdev->scan_rsp_data, len))
++		    !memcmp(pdu.data, hdev->scan_rsp_data, len))
+ 			return;
+ 
+-		memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
++		memcpy(hdev->scan_rsp_data, pdu.data, len);
+ 		hdev->scan_rsp_data_len = len;
+ 
+-		cp.handle = instance;
+-		cp.length = len;
+-		cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
+-		cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
++		pdu.cp.handle = instance;
++		pdu.cp.length = len;
++		pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
++		pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
+ 
+-		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
+-			    &cp);
++		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
++			    sizeof(pdu.cp) + len, &pdu.cp);
+ 	} else {
+ 		struct hci_cp_le_set_scan_rsp_data cp;
+ 
+@@ -1831,26 +1834,30 @@ void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
+ 		return;
+ 
+ 	if (ext_adv_capable(hdev)) {
+-		struct hci_cp_le_set_ext_adv_data cp;
++		struct {
++			struct hci_cp_le_set_ext_adv_data cp;
++			u8 data[HCI_MAX_EXT_AD_LENGTH];
++		} pdu;
+ 
+-		memset(&cp, 0, sizeof(cp));
++		memset(&pdu, 0, sizeof(pdu));
+ 
+-		len = create_instance_adv_data(hdev, instance, cp.data);
++		len = create_instance_adv_data(hdev, instance, pdu.data);
+ 
+ 		/* There's nothing to do if the data hasn't changed */
+ 		if (hdev->adv_data_len == len &&
+-		    memcmp(cp.data, hdev->adv_data, len) == 0)
++		    memcmp(pdu.data, hdev->adv_data, len) == 0)
+ 			return;
+ 
+-		memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
++		memcpy(hdev->adv_data, pdu.data, len);
+ 		hdev->adv_data_len = len;
+ 
+-		cp.length = len;
+-		cp.handle = instance;
+-		cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
+-		cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
++		pdu.cp.length = len;
++		pdu.cp.handle = instance;
++		pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
++		pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
+ 
+-		hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
++		hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA,
++			    sizeof(pdu.cp) + len, &pdu.cp);
+ 	} else {
+ 		struct hci_cp_le_set_adv_data cp;
+ 
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index 939c6f77fecc2..71de147f55584 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -7579,6 +7579,9 @@ static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
+ 	for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
+ 		cur_len = data[i];
+ 
++		if (!cur_len)
++			continue;
++
+ 		if (data[i + 1] == EIR_FLAGS &&
+ 		    (!is_adv_data || flags_managed(adv_flags)))
+ 			return false;
+diff --git a/net/bpfilter/main.c b/net/bpfilter/main.c
+index 05e1cfc1e5cd1..291a925462463 100644
+--- a/net/bpfilter/main.c
++++ b/net/bpfilter/main.c
+@@ -57,7 +57,7 @@ int main(void)
+ {
+ 	debug_f = fopen("/dev/kmsg", "w");
+ 	setvbuf(debug_f, 0, _IOLBF, 0);
+-	fprintf(debug_f, "Started bpfilter\n");
++	fprintf(debug_f, "<5>Started bpfilter\n");
+ 	loop();
+ 	fclose(debug_f);
+ 	return 0;
+diff --git a/net/can/bcm.c b/net/can/bcm.c
+index f3e4d9528fa38..0928a39c4423b 100644
+--- a/net/can/bcm.c
++++ b/net/can/bcm.c
+@@ -785,6 +785,7 @@ static int bcm_delete_rx_op(struct list_head *ops, struct bcm_msg_head *mh,
+ 						  bcm_rx_handler, op);
+ 
+ 			list_del(&op->list);
++			synchronize_rcu();
+ 			bcm_remove_op(op);
+ 			return 1; /* done */
+ 		}
+@@ -1533,9 +1534,13 @@ static int bcm_release(struct socket *sock)
+ 					  REGMASK(op->can_id),
+ 					  bcm_rx_handler, op);
+ 
+-		bcm_remove_op(op);
+ 	}
+ 
++	synchronize_rcu();
++
++	list_for_each_entry_safe(op, next, &bo->rx_ops, list)
++		bcm_remove_op(op);
++
+ #if IS_ENABLED(CONFIG_PROC_FS)
+ 	/* remove procfs entry */
+ 	if (net->can.bcmproc_dir && bo->bcm_proc_read)
+diff --git a/net/can/gw.c b/net/can/gw.c
+index ba41248056029..d8861e862f157 100644
+--- a/net/can/gw.c
++++ b/net/can/gw.c
+@@ -596,6 +596,7 @@ static int cgw_notifier(struct notifier_block *nb,
+ 			if (gwj->src.dev == dev || gwj->dst.dev == dev) {
+ 				hlist_del(&gwj->list);
+ 				cgw_unregister_filter(net, gwj);
++				synchronize_rcu();
+ 				kmem_cache_free(cgw_cache, gwj);
+ 			}
+ 		}
+@@ -1154,6 +1155,7 @@ static void cgw_remove_all_jobs(struct net *net)
+ 	hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) {
+ 		hlist_del(&gwj->list);
+ 		cgw_unregister_filter(net, gwj);
++		synchronize_rcu();
+ 		kmem_cache_free(cgw_cache, gwj);
+ 	}
+ }
+@@ -1222,6 +1224,7 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh,
+ 
+ 		hlist_del(&gwj->list);
+ 		cgw_unregister_filter(net, gwj);
++		synchronize_rcu();
+ 		kmem_cache_free(cgw_cache, gwj);
+ 		err = 0;
+ 		break;
+diff --git a/net/can/isotp.c b/net/can/isotp.c
+index be6183f8ca110..234cc4ad179a2 100644
+--- a/net/can/isotp.c
++++ b/net/can/isotp.c
+@@ -1028,9 +1028,6 @@ static int isotp_release(struct socket *sock)
+ 
+ 	lock_sock(sk);
+ 
+-	hrtimer_cancel(&so->txtimer);
+-	hrtimer_cancel(&so->rxtimer);
+-
+ 	/* remove current filters & unregister */
+ 	if (so->bound && (!(so->opt.flags & CAN_ISOTP_SF_BROADCAST))) {
+ 		if (so->ifindex) {
+@@ -1042,10 +1039,14 @@ static int isotp_release(struct socket *sock)
+ 						  SINGLE_MASK(so->rxid),
+ 						  isotp_rcv, sk);
+ 				dev_put(dev);
++				synchronize_rcu();
+ 			}
+ 		}
+ 	}
+ 
++	hrtimer_cancel(&so->txtimer);
++	hrtimer_cancel(&so->rxtimer);
++
+ 	so->ifindex = 0;
+ 	so->bound = 0;
+ 
+diff --git a/net/can/j1939/main.c b/net/can/j1939/main.c
+index da3a7a7bcff2b..08c8606cfd9c7 100644
+--- a/net/can/j1939/main.c
++++ b/net/can/j1939/main.c
+@@ -193,6 +193,10 @@ static void j1939_can_rx_unregister(struct j1939_priv *priv)
+ 	can_rx_unregister(dev_net(ndev), ndev, J1939_CAN_ID, J1939_CAN_MASK,
+ 			  j1939_can_recv, priv);
+ 
++	/* The last reference of priv is dropped by the RCU deferred
++	 * j1939_sk_sock_destruct() of the last socket, so we can
++	 * safely drop this reference here.
++	 */
+ 	j1939_priv_put(priv);
+ }
+ 
+diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
+index 56aa66147d5ac..e1a399821238f 100644
+--- a/net/can/j1939/socket.c
++++ b/net/can/j1939/socket.c
+@@ -398,6 +398,9 @@ static int j1939_sk_init(struct sock *sk)
+ 	atomic_set(&jsk->skb_pending, 0);
+ 	spin_lock_init(&jsk->sk_session_queue_lock);
+ 	INIT_LIST_HEAD(&jsk->sk_session_queue);
++
++	/* j1939_sk_sock_destruct() depends on SOCK_RCU_FREE flag */
++	sock_set_flag(sk, SOCK_RCU_FREE);
+ 	sk->sk_destruct = j1939_sk_sock_destruct;
+ 	sk->sk_protocol = CAN_J1939;
+ 
+@@ -673,7 +676,7 @@ static int j1939_sk_setsockopt(struct socket *sock, int level, int optname,
+ 
+ 	switch (optname) {
+ 	case SO_J1939_FILTER:
+-		if (!sockptr_is_null(optval)) {
++		if (!sockptr_is_null(optval) && optlen != 0) {
+ 			struct j1939_filter *f;
+ 			int c;
+ 
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 52f4359efbd2b..0d1273d40fcf3 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -3266,8 +3266,6 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
+ 			shinfo->gso_type |=  SKB_GSO_TCPV6;
+ 		}
+ 
+-		/* Due to IPv6 header, MSS needs to be downgraded. */
+-		skb_decrease_gso_size(shinfo, len_diff);
+ 		/* Header must be checked, and gso_segs recomputed. */
+ 		shinfo->gso_type |= SKB_GSO_DODGY;
+ 		shinfo->gso_segs = 0;
+@@ -3307,8 +3305,6 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
+ 			shinfo->gso_type |=  SKB_GSO_TCPV4;
+ 		}
+ 
+-		/* Due to IPv4 header, MSS can be upgraded. */
+-		skb_increase_gso_size(shinfo, len_diff);
+ 		/* Header must be checked, and gso_segs recomputed. */
+ 		shinfo->gso_type |= SKB_GSO_DODGY;
+ 		shinfo->gso_segs = 0;
+diff --git a/net/core/sock_map.c b/net/core/sock_map.c
+index d758fb83c8841..ae62e6f96a95c 100644
+--- a/net/core/sock_map.c
++++ b/net/core/sock_map.c
+@@ -44,7 +44,7 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
+ 	bpf_map_init_from_attr(&stab->map, attr);
+ 	raw_spin_lock_init(&stab->lock);
+ 
+-	stab->sks = bpf_map_area_alloc(stab->map.max_entries *
++	stab->sks = bpf_map_area_alloc((u64) stab->map.max_entries *
+ 				       sizeof(struct sock *),
+ 				       stab->map.numa_node);
+ 	if (!stab->sks) {
+diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
+index 4b834bbf95e07..ed9857b2875dc 100644
+--- a/net/ipv4/esp4.c
++++ b/net/ipv4/esp4.c
+@@ -673,7 +673,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
+ 		struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
+ 		u32 padto;
+ 
+-		padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached));
++		padto = min(x->tfcpad, __xfrm_state_mtu(x, dst->child_mtu_cached));
+ 		if (skb->len < padto)
+ 			esp.tfclen = padto - skb->len;
+ 	}
+diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
+index 84bb707bd88d8..647bceab56c2d 100644
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -371,6 +371,8 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
+ 		fl4.flowi4_proto = 0;
+ 		fl4.fl4_sport = 0;
+ 		fl4.fl4_dport = 0;
++	} else {
++		swap(fl4.fl4_sport, fl4.fl4_dport);
+ 	}
+ 
+ 	if (fib_lookup(net, &fl4, &res, 0))
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 09506203156d1..484064daa95a4 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -1331,7 +1331,7 @@ INDIRECT_CALLABLE_SCOPE unsigned int ipv4_mtu(const struct dst_entry *dst)
+ 		mtu = dst_metric_raw(dst, RTAX_MTU);
+ 
+ 	if (mtu)
+-		return mtu;
++		goto out;
+ 
+ 	mtu = READ_ONCE(dst->dev->mtu);
+ 
+@@ -1340,6 +1340,7 @@ INDIRECT_CALLABLE_SCOPE unsigned int ipv4_mtu(const struct dst_entry *dst)
+ 			mtu = 576;
+ 	}
+ 
++out:
+ 	mtu = min_t(unsigned int, mtu, IP_MAX_MTU);
+ 
+ 	return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
+diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
+index 727d791ed5e67..9d1327b36bd3b 100644
+--- a/net/ipv6/esp6.c
++++ b/net/ipv6/esp6.c
+@@ -708,7 +708,7 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
+ 		struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
+ 		u32 padto;
+ 
+-		padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached));
++		padto = min(x->tfcpad, __xfrm_state_mtu(x, dst->child_mtu_cached));
+ 		if (skb->len < padto)
+ 			esp.tfclen = padto - skb->len;
+ 	}
+diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
+index 6126f8bf94b39..6ffa05298cc0e 100644
+--- a/net/ipv6/exthdrs.c
++++ b/net/ipv6/exthdrs.c
+@@ -135,18 +135,23 @@ static bool ip6_parse_tlv(const struct tlvtype_proc *procs,
+ 	len -= 2;
+ 
+ 	while (len > 0) {
+-		int optlen = nh[off + 1] + 2;
+-		int i;
++		int optlen, i;
+ 
+-		switch (nh[off]) {
+-		case IPV6_TLV_PAD1:
+-			optlen = 1;
++		if (nh[off] == IPV6_TLV_PAD1) {
+ 			padlen++;
+ 			if (padlen > 7)
+ 				goto bad;
+-			break;
++			off++;
++			len--;
++			continue;
++		}
++		if (len < 2)
++			goto bad;
++		optlen = nh[off + 1] + 2;
++		if (optlen > len)
++			goto bad;
+ 
+-		case IPV6_TLV_PADN:
++		if (nh[off] == IPV6_TLV_PADN) {
+ 			/* RFC 2460 states that the purpose of PadN is
+ 			 * to align the containing header to multiples
+ 			 * of 8. 7 is therefore the highest valid value.
+@@ -163,12 +168,7 @@ static bool ip6_parse_tlv(const struct tlvtype_proc *procs,
+ 				if (nh[off + i] != 0)
+ 					goto bad;
+ 			}
+-			break;
+-
+-		default: /* Other TLV code so scan list */
+-			if (optlen > len)
+-				goto bad;
+-
++		} else {
+ 			tlv_count++;
+ 			if (tlv_count > max_count)
+ 				goto bad;
+@@ -188,7 +188,6 @@ static bool ip6_parse_tlv(const struct tlvtype_proc *procs,
+ 				return false;
+ 
+ 			padlen = 0;
+-			break;
+ 		}
+ 		off += optlen;
+ 		len -= optlen;
+@@ -306,7 +305,7 @@ fail_and_free:
+ #endif
+ 
+ 	if (ip6_parse_tlv(tlvprocdestopt_lst, skb,
+-			  init_net.ipv6.sysctl.max_dst_opts_cnt)) {
++			  net->ipv6.sysctl.max_dst_opts_cnt)) {
+ 		skb->transport_header += extlen;
+ 		opt = IP6CB(skb);
+ #if IS_ENABLED(CONFIG_IPV6_MIP6)
+@@ -1036,7 +1035,7 @@ fail_and_free:
+ 
+ 	opt->flags |= IP6SKB_HOPBYHOP;
+ 	if (ip6_parse_tlv(tlvprochopopt_lst, skb,
+-			  init_net.ipv6.sysctl.max_hbh_opts_cnt)) {
++			  net->ipv6.sysctl.max_hbh_opts_cnt)) {
+ 		skb->transport_header += extlen;
+ 		opt = IP6CB(skb);
+ 		opt->nhoff = sizeof(struct ipv6hdr);
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index d42f471b0d655..adbc9bf65d30e 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -1239,8 +1239,6 @@ route_lookup:
+ 	if (max_headroom > dev->needed_headroom)
+ 		dev->needed_headroom = max_headroom;
+ 
+-	skb_set_inner_ipproto(skb, proto);
+-
+ 	err = ip6_tnl_encap(skb, t, &proto, fl6);
+ 	if (err)
+ 		return err;
+@@ -1377,6 +1375,8 @@ ipxip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev,
+ 	if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
+ 		return -1;
+ 
++	skb_set_inner_ipproto(skb, protocol);
++
+ 	err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
+ 			   protocol);
+ 	if (err != 0) {
+diff --git a/net/mac80211/he.c b/net/mac80211/he.c
+index 0c0b970835ceb..a87421c8637d6 100644
+--- a/net/mac80211/he.c
++++ b/net/mac80211/he.c
+@@ -111,7 +111,7 @@ ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata,
+ 				  struct sta_info *sta)
+ {
+ 	struct ieee80211_sta_he_cap *he_cap = &sta->sta.he_cap;
+-	struct ieee80211_sta_he_cap own_he_cap = sband->iftype_data->he_cap;
++	struct ieee80211_sta_he_cap own_he_cap;
+ 	struct ieee80211_he_cap_elem *he_cap_ie_elem = (void *)he_cap_ie;
+ 	u8 he_ppe_size;
+ 	u8 mcs_nss_size;
+@@ -123,6 +123,8 @@ ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata,
+ 	if (!he_cap_ie || !ieee80211_get_he_sta_cap(sband))
+ 		return;
+ 
++	own_he_cap = sband->iftype_data->he_cap;
++
+ 	/* Make sure size is OK */
+ 	mcs_nss_size = ieee80211_he_mcs_nss_size(he_cap_ie_elem);
+ 	he_ppe_size =
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 437d88822d8f8..3d915b9752a88 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -1094,11 +1094,6 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
+ 	struct ieee80211_hdr_3addr *nullfunc;
+ 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ 
+-	/* Don't send NDPs when STA is connected HE */
+-	if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+-	    !(ifmgd->flags & IEEE80211_STA_DISABLE_HE))
+-		return;
+-
+ 	skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif,
+ 		!ieee80211_hw_check(&local->hw, DOESNT_SUPPORT_QOS_NDP));
+ 	if (!skb)
+@@ -1130,10 +1125,6 @@ static void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local,
+ 	if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION))
+ 		return;
+ 
+-	/* Don't send NDPs when connected HE */
+-	if (!(sdata->u.mgd.flags & IEEE80211_STA_DISABLE_HE))
+-		return;
+-
+ 	skb = dev_alloc_skb(local->hw.extra_tx_headroom + 30);
+ 	if (!skb)
+ 		return;
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index f2fb69da9b6e1..13250cadb4202 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -1398,11 +1398,6 @@ static void ieee80211_send_null_response(struct sta_info *sta, int tid,
+ 	struct ieee80211_tx_info *info;
+ 	struct ieee80211_chanctx_conf *chanctx_conf;
+ 
+-	/* Don't send NDPs when STA is connected HE */
+-	if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+-	    !(sdata->u.mgd.flags & IEEE80211_STA_DISABLE_HE))
+-		return;
+-
+ 	if (qos) {
+ 		fc = cpu_to_le16(IEEE80211_FTYPE_DATA |
+ 				 IEEE80211_STYPE_QOS_NULLFUNC |
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index d6d8ad4f918e7..189139b8d401c 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -409,15 +409,15 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
+ 			goto do_reset;
+ 		}
+ 
++		if (!mptcp_finish_join(sk))
++			goto do_reset;
++
+ 		subflow_generate_hmac(subflow->local_key, subflow->remote_key,
+ 				      subflow->local_nonce,
+ 				      subflow->remote_nonce,
+ 				      hmac);
+ 		memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN);
+ 
+-		if (!mptcp_finish_join(sk))
+-			goto do_reset;
+-
+ 		subflow->mp_join = 1;
+ 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
+ 
+diff --git a/net/mptcp/token.c b/net/mptcp/token.c
+index feb4b9ffd4625..0691a4883f3ab 100644
+--- a/net/mptcp/token.c
++++ b/net/mptcp/token.c
+@@ -156,9 +156,6 @@ int mptcp_token_new_connect(struct sock *sk)
+ 	int retries = TOKEN_MAX_RETRIES;
+ 	struct token_bucket *bucket;
+ 
+-	pr_debug("ssk=%p, local_key=%llu, token=%u, idsn=%llu\n",
+-		 sk, subflow->local_key, subflow->token, subflow->idsn);
+-
+ again:
+ 	mptcp_crypto_key_gen_sha(&subflow->local_key, &subflow->token,
+ 				 &subflow->idsn);
+@@ -172,6 +169,9 @@ again:
+ 		goto again;
+ 	}
+ 
++	pr_debug("ssk=%p, local_key=%llu, token=%u, idsn=%llu\n",
++		 sk, subflow->local_key, subflow->token, subflow->idsn);
++
+ 	WRITE_ONCE(msk->token, subflow->token);
+ 	__sk_nulls_add_node_rcu((struct sock *)msk, &bucket->msk_chain);
+ 	bucket->chain_len++;
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 9d5ea23529657..6b79fa357bfea 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -521,7 +521,7 @@ static struct nft_table *nft_table_lookup(const struct net *net,
+ 		    table->family == family &&
+ 		    nft_active_genmask(table, genmask)) {
+ 			if (nft_table_has_owner(table) &&
+-			    table->nlpid != nlpid)
++			    nlpid && table->nlpid != nlpid)
+ 				return ERR_PTR(-EPERM);
+ 
+ 			return table;
+@@ -533,14 +533,19 @@ static struct nft_table *nft_table_lookup(const struct net *net,
+ 
+ static struct nft_table *nft_table_lookup_byhandle(const struct net *net,
+ 						   const struct nlattr *nla,
+-						   u8 genmask)
++						   u8 genmask, u32 nlpid)
+ {
+ 	struct nft_table *table;
+ 
+ 	list_for_each_entry(table, &net->nft.tables, list) {
+ 		if (be64_to_cpu(nla_get_be64(nla)) == table->handle &&
+-		    nft_active_genmask(table, genmask))
++		    nft_active_genmask(table, genmask)) {
++			if (nft_table_has_owner(table) &&
++			    nlpid && table->nlpid != nlpid)
++				return ERR_PTR(-EPERM);
++
+ 			return table;
++		}
+ 	}
+ 
+ 	return ERR_PTR(-ENOENT);
+@@ -1213,7 +1218,8 @@ static int nf_tables_deltable(struct net *net, struct sock *nlsk,
+ 
+ 	if (nla[NFTA_TABLE_HANDLE]) {
+ 		attr = nla[NFTA_TABLE_HANDLE];
+-		table = nft_table_lookup_byhandle(net, attr, genmask);
++		table = nft_table_lookup_byhandle(net, attr, genmask,
++						  NETLINK_CB(skb).portid);
+ 	} else {
+ 		attr = nla[NFTA_TABLE_NAME];
+ 		table = nft_table_lookup(net, attr, family, genmask,
+diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c
+index 2b00f7f47693b..9ce776175214c 100644
+--- a/net/netfilter/nf_tables_offload.c
++++ b/net/netfilter/nf_tables_offload.c
+@@ -54,15 +54,10 @@ static void nft_flow_rule_transfer_vlan(struct nft_offload_ctx *ctx,
+ 					struct nft_flow_rule *flow)
+ {
+ 	struct nft_flow_match *match = &flow->match;
+-	struct nft_offload_ethertype ethertype;
+-
+-	if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL) &&
+-	    match->key.basic.n_proto != htons(ETH_P_8021Q) &&
+-	    match->key.basic.n_proto != htons(ETH_P_8021AD))
+-		return;
+-
+-	ethertype.value = match->key.basic.n_proto;
+-	ethertype.mask = match->mask.basic.n_proto;
++	struct nft_offload_ethertype ethertype = {
++		.value	= match->key.basic.n_proto,
++		.mask	= match->mask.basic.n_proto,
++	};
+ 
+ 	if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_VLAN) &&
+ 	    (match->key.vlan.vlan_tpid == htons(ETH_P_8021Q) ||
+@@ -76,7 +71,9 @@ static void nft_flow_rule_transfer_vlan(struct nft_offload_ctx *ctx,
+ 		match->dissector.offset[FLOW_DISSECTOR_KEY_CVLAN] =
+ 			offsetof(struct nft_flow_key, cvlan);
+ 		match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CVLAN);
+-	} else {
++	} else if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC) &&
++		   (match->key.basic.n_proto == htons(ETH_P_8021Q) ||
++		    match->key.basic.n_proto == htons(ETH_P_8021AD))) {
+ 		match->key.basic.n_proto = match->key.vlan.vlan_tpid;
+ 		match->mask.basic.n_proto = match->mask.vlan.vlan_tpid;
+ 		match->key.vlan.vlan_tpid = ethertype.value;
+diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
+index f64f0017e9a53..670dd146fb2b1 100644
+--- a/net/netfilter/nft_exthdr.c
++++ b/net/netfilter/nft_exthdr.c
+@@ -42,6 +42,9 @@ static void nft_exthdr_ipv6_eval(const struct nft_expr *expr,
+ 	unsigned int offset = 0;
+ 	int err;
+ 
++	if (pkt->skb->protocol != htons(ETH_P_IPV6))
++		goto err;
++
+ 	err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL);
+ 	if (priv->flags & NFT_EXTHDR_F_PRESENT) {
+ 		nft_reg_store8(dest, err >= 0);
+diff --git a/net/netfilter/nft_osf.c b/net/netfilter/nft_osf.c
+index ac61f708b82d2..d82677e83400b 100644
+--- a/net/netfilter/nft_osf.c
++++ b/net/netfilter/nft_osf.c
+@@ -28,6 +28,11 @@ static void nft_osf_eval(const struct nft_expr *expr, struct nft_regs *regs,
+ 	struct nf_osf_data data;
+ 	struct tcphdr _tcph;
+ 
++	if (pkt->tprot != IPPROTO_TCP) {
++		regs->verdict.code = NFT_BREAK;
++		return;
++	}
++
+ 	tcp = skb_header_pointer(skb, ip_hdrlen(skb),
+ 				 sizeof(struct tcphdr), &_tcph);
+ 	if (!tcp) {
+diff --git a/net/netfilter/nft_tproxy.c b/net/netfilter/nft_tproxy.c
+index 43a5a780a6d3b..37c728bdad41c 100644
+--- a/net/netfilter/nft_tproxy.c
++++ b/net/netfilter/nft_tproxy.c
+@@ -30,6 +30,12 @@ static void nft_tproxy_eval_v4(const struct nft_expr *expr,
+ 	__be16 tport = 0;
+ 	struct sock *sk;
+ 
++	if (pkt->tprot != IPPROTO_TCP &&
++	    pkt->tprot != IPPROTO_UDP) {
++		regs->verdict.code = NFT_BREAK;
++		return;
++	}
++
+ 	hp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_hdr), &_hdr);
+ 	if (!hp) {
+ 		regs->verdict.code = NFT_BREAK;
+@@ -91,7 +97,8 @@ static void nft_tproxy_eval_v6(const struct nft_expr *expr,
+ 
+ 	memset(&taddr, 0, sizeof(taddr));
+ 
+-	if (!pkt->tprot_set) {
++	if (pkt->tprot != IPPROTO_TCP &&
++	    pkt->tprot != IPPROTO_UDP) {
+ 		regs->verdict.code = NFT_BREAK;
+ 		return;
+ 	}
+diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c
+index df1b41ed73fd9..19e4fffccf783 100644
+--- a/net/netlabel/netlabel_mgmt.c
++++ b/net/netlabel/netlabel_mgmt.c
+@@ -76,6 +76,7 @@ static const struct nla_policy netlbl_mgmt_genl_policy[NLBL_MGMT_A_MAX + 1] = {
+ static int netlbl_mgmt_add_common(struct genl_info *info,
+ 				  struct netlbl_audit *audit_info)
+ {
++	void *pmap = NULL;
+ 	int ret_val = -EINVAL;
+ 	struct netlbl_domaddr_map *addrmap = NULL;
+ 	struct cipso_v4_doi *cipsov4 = NULL;
+@@ -175,6 +176,7 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
+ 			ret_val = -ENOMEM;
+ 			goto add_free_addrmap;
+ 		}
++		pmap = map;
+ 		map->list.addr = addr->s_addr & mask->s_addr;
+ 		map->list.mask = mask->s_addr;
+ 		map->list.valid = 1;
+@@ -183,10 +185,8 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
+ 			map->def.cipso = cipsov4;
+ 
+ 		ret_val = netlbl_af4list_add(&map->list, &addrmap->list4);
+-		if (ret_val != 0) {
+-			kfree(map);
+-			goto add_free_addrmap;
+-		}
++		if (ret_val != 0)
++			goto add_free_map;
+ 
+ 		entry->family = AF_INET;
+ 		entry->def.type = NETLBL_NLTYPE_ADDRSELECT;
+@@ -223,6 +223,7 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
+ 			ret_val = -ENOMEM;
+ 			goto add_free_addrmap;
+ 		}
++		pmap = map;
+ 		map->list.addr = *addr;
+ 		map->list.addr.s6_addr32[0] &= mask->s6_addr32[0];
+ 		map->list.addr.s6_addr32[1] &= mask->s6_addr32[1];
+@@ -235,10 +236,8 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
+ 			map->def.calipso = calipso;
+ 
+ 		ret_val = netlbl_af6list_add(&map->list, &addrmap->list6);
+-		if (ret_val != 0) {
+-			kfree(map);
+-			goto add_free_addrmap;
+-		}
++		if (ret_val != 0)
++			goto add_free_map;
+ 
+ 		entry->family = AF_INET6;
+ 		entry->def.type = NETLBL_NLTYPE_ADDRSELECT;
+@@ -248,10 +247,12 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
+ 
+ 	ret_val = netlbl_domhsh_add(entry, audit_info);
+ 	if (ret_val != 0)
+-		goto add_free_addrmap;
++		goto add_free_map;
+ 
+ 	return 0;
+ 
++add_free_map:
++	kfree(pmap);
+ add_free_addrmap:
+ 	kfree(addrmap);
+ add_doi_put_def:
+diff --git a/net/qrtr/ns.c b/net/qrtr/ns.c
+index 8d00dfe8139e8..1990d496fcfc0 100644
+--- a/net/qrtr/ns.c
++++ b/net/qrtr/ns.c
+@@ -775,8 +775,10 @@ int qrtr_ns_init(void)
+ 	}
+ 
+ 	qrtr_ns.workqueue = alloc_workqueue("qrtr_ns_handler", WQ_UNBOUND, 1);
+-	if (!qrtr_ns.workqueue)
++	if (!qrtr_ns.workqueue) {
++		ret = -ENOMEM;
+ 		goto err_sock;
++	}
+ 
+ 	qrtr_ns.sock->sk->sk_data_ready = qrtr_ns_data_ready;
+ 
+diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
+index 1cac3c6fbb49c..a108469c664f7 100644
+--- a/net/sched/act_vlan.c
++++ b/net/sched/act_vlan.c
+@@ -70,7 +70,7 @@ static int tcf_vlan_act(struct sk_buff *skb, const struct tc_action *a,
+ 		/* replace the vid */
+ 		tci = (tci & ~VLAN_VID_MASK) | p->tcfv_push_vid;
+ 		/* replace prio bits, if tcfv_push_prio specified */
+-		if (p->tcfv_push_prio) {
++		if (p->tcfv_push_prio_exists) {
+ 			tci &= ~VLAN_PRIO_MASK;
+ 			tci |= p->tcfv_push_prio << VLAN_PRIO_SHIFT;
+ 		}
+@@ -121,6 +121,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
+ 	struct tc_action_net *tn = net_generic(net, vlan_net_id);
+ 	struct nlattr *tb[TCA_VLAN_MAX + 1];
+ 	struct tcf_chain *goto_ch = NULL;
++	bool push_prio_exists = false;
+ 	struct tcf_vlan_params *p;
+ 	struct tc_vlan *parm;
+ 	struct tcf_vlan *v;
+@@ -189,7 +190,8 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
+ 			push_proto = htons(ETH_P_8021Q);
+ 		}
+ 
+-		if (tb[TCA_VLAN_PUSH_VLAN_PRIORITY])
++		push_prio_exists = !!tb[TCA_VLAN_PUSH_VLAN_PRIORITY];
++		if (push_prio_exists)
+ 			push_prio = nla_get_u8(tb[TCA_VLAN_PUSH_VLAN_PRIORITY]);
+ 		break;
+ 	case TCA_VLAN_ACT_POP_ETH:
+@@ -241,6 +243,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
+ 	p->tcfv_action = action;
+ 	p->tcfv_push_vid = push_vid;
+ 	p->tcfv_push_prio = push_prio;
++	p->tcfv_push_prio_exists = push_prio_exists || action == TCA_VLAN_ACT_PUSH;
+ 	p->tcfv_push_proto = push_proto;
+ 
+ 	if (action == TCA_VLAN_ACT_PUSH_ETH) {
+diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
+index c4007b9cd16d6..5b274534264c2 100644
+--- a/net/sched/cls_tcindex.c
++++ b/net/sched/cls_tcindex.c
+@@ -304,7 +304,7 @@ static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp)
+ 	int i, err = 0;
+ 
+ 	cp->perfect = kcalloc(cp->hash, sizeof(struct tcindex_filter_result),
+-			      GFP_KERNEL);
++			      GFP_KERNEL | __GFP_NOWARN);
+ 	if (!cp->perfect)
+ 		return -ENOMEM;
+ 
+diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
+index 1db9d4a2ef5ef..b692a0de1ad5e 100644
+--- a/net/sched/sch_qfq.c
++++ b/net/sched/sch_qfq.c
+@@ -485,11 +485,6 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
+ 
+ 	if (cl->qdisc != &noop_qdisc)
+ 		qdisc_hash_add(cl->qdisc, true);
+-	sch_tree_lock(sch);
+-	qdisc_class_hash_insert(&q->clhash, &cl->common);
+-	sch_tree_unlock(sch);
+-
+-	qdisc_class_hash_grow(sch, &q->clhash);
+ 
+ set_change_agg:
+ 	sch_tree_lock(sch);
+@@ -507,8 +502,11 @@ set_change_agg:
+ 	}
+ 	if (existing)
+ 		qfq_deact_rm_from_agg(q, cl);
++	else
++		qdisc_class_hash_insert(&q->clhash, &cl->common);
+ 	qfq_add_to_agg(q, new_agg, cl);
+ 	sch_tree_unlock(sch);
++	qdisc_class_hash_grow(sch, &q->clhash);
+ 
+ 	*arg = (unsigned long)cl;
+ 	return 0;
+diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
+index 39ed0e0afe6d9..c045f63d11fa6 100644
+--- a/net/sunrpc/sched.c
++++ b/net/sunrpc/sched.c
+@@ -591,11 +591,21 @@ static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *q
+ 	struct list_head *q;
+ 	struct rpc_task *task;
+ 
++	/*
++	 * Service the privileged queue.
++	 */
++	q = &queue->tasks[RPC_NR_PRIORITY - 1];
++	if (queue->maxpriority > RPC_PRIORITY_PRIVILEGED && !list_empty(q)) {
++		task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
++		goto out;
++	}
++
+ 	/*
+ 	 * Service a batch of tasks from a single owner.
+ 	 */
+ 	q = &queue->tasks[queue->priority];
+-	if (!list_empty(q) && --queue->nr) {
++	if (!list_empty(q) && queue->nr) {
++		queue->nr--;
+ 		task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
+ 		goto out;
+ 	}
+diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
+index d4beca895992d..593846d252143 100644
+--- a/net/tipc/bcast.c
++++ b/net/tipc/bcast.c
+@@ -699,7 +699,7 @@ int tipc_bcast_init(struct net *net)
+ 	spin_lock_init(&tipc_net(net)->bclock);
+ 
+ 	if (!tipc_link_bc_create(net, 0, 0, NULL,
+-				 FB_MTU,
++				 one_page_mtu,
+ 				 BCLINK_WIN_DEFAULT,
+ 				 BCLINK_WIN_DEFAULT,
+ 				 0,
+diff --git a/net/tipc/msg.c b/net/tipc/msg.c
+index d0fc5fadbc680..b7943da9d0950 100644
+--- a/net/tipc/msg.c
++++ b/net/tipc/msg.c
+@@ -44,12 +44,15 @@
+ #define MAX_FORWARD_SIZE 1024
+ #ifdef CONFIG_TIPC_CRYPTO
+ #define BUF_HEADROOM ALIGN(((LL_MAX_HEADER + 48) + EHDR_MAX_SIZE), 16)
+-#define BUF_TAILROOM (TIPC_AES_GCM_TAG_SIZE)
++#define BUF_OVERHEAD (BUF_HEADROOM + TIPC_AES_GCM_TAG_SIZE)
+ #else
+ #define BUF_HEADROOM (LL_MAX_HEADER + 48)
+-#define BUF_TAILROOM 16
++#define BUF_OVERHEAD BUF_HEADROOM
+ #endif
+ 
++const int one_page_mtu = PAGE_SIZE - SKB_DATA_ALIGN(BUF_OVERHEAD) -
++			 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
++
+ static unsigned int align(unsigned int i)
+ {
+ 	return (i + 3) & ~3u;
+@@ -69,13 +72,8 @@ static unsigned int align(unsigned int i)
+ struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp)
+ {
+ 	struct sk_buff *skb;
+-#ifdef CONFIG_TIPC_CRYPTO
+-	unsigned int buf_size = (BUF_HEADROOM + size + BUF_TAILROOM + 3) & ~3u;
+-#else
+-	unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u;
+-#endif
+ 
+-	skb = alloc_skb_fclone(buf_size, gfp);
++	skb = alloc_skb_fclone(BUF_OVERHEAD + size, gfp);
+ 	if (skb) {
+ 		skb_reserve(skb, BUF_HEADROOM);
+ 		skb_put(skb, size);
+@@ -395,7 +393,8 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
+ 		if (unlikely(!skb)) {
+ 			if (pktmax != MAX_MSG_SIZE)
+ 				return -ENOMEM;
+-			rc = tipc_msg_build(mhdr, m, offset, dsz, FB_MTU, list);
++			rc = tipc_msg_build(mhdr, m, offset, dsz,
++					    one_page_mtu, list);
+ 			if (rc != dsz)
+ 				return rc;
+ 			if (tipc_msg_assemble(list))
+diff --git a/net/tipc/msg.h b/net/tipc/msg.h
+index 5d64596ba9877..64ae4c4c44f8c 100644
+--- a/net/tipc/msg.h
++++ b/net/tipc/msg.h
+@@ -99,9 +99,10 @@ struct plist;
+ #define MAX_H_SIZE                60	/* Largest possible TIPC header size */
+ 
+ #define MAX_MSG_SIZE (MAX_H_SIZE + TIPC_MAX_USER_MSG_SIZE)
+-#define FB_MTU                  3744
+ #define TIPC_MEDIA_INFO_OFFSET	5
+ 
++extern const int one_page_mtu;
++
+ struct tipc_skb_cb {
+ 	union {
+ 		struct {
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index 6086cf4f10a7c..60d2ff13fa9ee 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -1153,7 +1153,7 @@ static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
+ 	int ret = 0;
+ 	bool eor;
+ 
+-	eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST));
++	eor = !(flags & MSG_SENDPAGE_NOTLAST);
+ 	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
+ 
+ 	/* Call the sk_stream functions to manage the sndbuf mem. */
+diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
+index 40f359bf20440..35938dfa784de 100644
+--- a/net/xdp/xsk_queue.h
++++ b/net/xdp/xsk_queue.h
+@@ -128,12 +128,15 @@ static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
+ static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool,
+ 					    struct xdp_desc *desc)
+ {
+-	u64 chunk;
+-
+-	if (desc->len > pool->chunk_size)
+-		return false;
++	u64 chunk, chunk_end;
+ 
+ 	chunk = xp_aligned_extract_addr(pool, desc->addr);
++	if (likely(desc->len)) {
++		chunk_end = xp_aligned_extract_addr(pool, desc->addr + desc->len - 1);
++		if (chunk != chunk_end)
++			return false;
++	}
++
+ 	if (chunk >= pool->addrs_cnt)
+ 		return false;
+ 
+diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
+index 6d6917b68856f..e843b0d9e2a61 100644
+--- a/net/xfrm/xfrm_device.c
++++ b/net/xfrm/xfrm_device.c
+@@ -268,6 +268,7 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
+ 		xso->num_exthdrs = 0;
+ 		xso->flags = 0;
+ 		xso->dev = NULL;
++		xso->real_dev = NULL;
+ 		dev_put(dev);
+ 
+ 		if (err != -EOPNOTSUPP)
+diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
+index e4cb0ff4dcf41..ac907b9d32d1e 100644
+--- a/net/xfrm/xfrm_output.c
++++ b/net/xfrm/xfrm_output.c
+@@ -711,15 +711,8 @@ out:
+ static int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb)
+ {
+ #if IS_ENABLED(CONFIG_IPV6)
+-	unsigned int ptr = 0;
+ 	int err;
+ 
+-	if (x->outer_mode.encap == XFRM_MODE_BEET &&
+-	    ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT, NULL, NULL) >= 0) {
+-		net_warn_ratelimited("BEET mode doesn't support inner IPv6 fragments\n");
+-		return -EAFNOSUPPORT;
+-	}
+-
+ 	err = xfrm6_tunnel_check_size(skb);
+ 	if (err)
+ 		return err;
+diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
+index 4496f7efa2200..c25586156c6a7 100644
+--- a/net/xfrm/xfrm_state.c
++++ b/net/xfrm/xfrm_state.c
+@@ -2518,7 +2518,7 @@ void xfrm_state_delete_tunnel(struct xfrm_state *x)
+ }
+ EXPORT_SYMBOL(xfrm_state_delete_tunnel);
+ 
+-u32 xfrm_state_mtu(struct xfrm_state *x, int mtu)
++u32 __xfrm_state_mtu(struct xfrm_state *x, int mtu)
+ {
+ 	const struct xfrm_type *type = READ_ONCE(x->type);
+ 	struct crypto_aead *aead;
+@@ -2549,7 +2549,17 @@ u32 xfrm_state_mtu(struct xfrm_state *x, int mtu)
+ 	return ((mtu - x->props.header_len - crypto_aead_authsize(aead) -
+ 		 net_adj) & ~(blksize - 1)) + net_adj - 2;
+ }
+-EXPORT_SYMBOL_GPL(xfrm_state_mtu);
++EXPORT_SYMBOL_GPL(__xfrm_state_mtu);
++
++u32 xfrm_state_mtu(struct xfrm_state *x, int mtu)
++{
++	mtu = __xfrm_state_mtu(x, mtu);
++
++	if (x->props.family == AF_INET6 && mtu < IPV6_MIN_MTU)
++		return IPV6_MIN_MTU;
++
++	return mtu;
++}
+ 
+ int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload)
+ {
+diff --git a/samples/bpf/xdp_redirect_user.c b/samples/bpf/xdp_redirect_user.c
+index 41d705c3a1f7f..93854e135134c 100644
+--- a/samples/bpf/xdp_redirect_user.c
++++ b/samples/bpf/xdp_redirect_user.c
+@@ -130,7 +130,7 @@ int main(int argc, char **argv)
+ 	if (!(xdp_flags & XDP_FLAGS_SKB_MODE))
+ 		xdp_flags |= XDP_FLAGS_DRV_MODE;
+ 
+-	if (optind == argc) {
++	if (optind + 2 != argc) {
+ 		printf("usage: %s <IFNAME|IFINDEX>_IN <IFNAME|IFINDEX>_OUT\n", argv[0]);
+ 		return 1;
+ 	}
+@@ -213,5 +213,5 @@ int main(int argc, char **argv)
+ 	poll_stats(2, ifindex_out);
+ 
+ out:
+-	return 0;
++	return ret;
+ }
+diff --git a/scripts/Makefile.build b/scripts/Makefile.build
+index 1b6094a130346..73701d637ed56 100644
+--- a/scripts/Makefile.build
++++ b/scripts/Makefile.build
+@@ -267,7 +267,8 @@ define rule_as_o_S
+ endef
+ 
+ # Built-in and composite module parts
+-$(obj)/%.o: $(src)/%.c $(recordmcount_source) $(objtool_dep) FORCE
++.SECONDEXPANSION:
++$(obj)/%.o: $(src)/%.c $(recordmcount_source) $$(objtool_dep) FORCE
+ 	$(call if_changed_rule,cc_o_c)
+ 	$(call cmd,force_checksrc)
+ 
+@@ -348,7 +349,7 @@ cmd_modversions_S =								\
+ 	fi
+ endif
+ 
+-$(obj)/%.o: $(src)/%.S $(objtool_dep) FORCE
++$(obj)/%.o: $(src)/%.S $$(objtool_dep) FORCE
+ 	$(call if_changed_rule,as_o_S)
+ 
+ targets += $(filter-out $(subdir-builtin), $(real-obj-y))
+diff --git a/scripts/tools-support-relr.sh b/scripts/tools-support-relr.sh
+index 45e8aa360b457..cb55878bd5b81 100755
+--- a/scripts/tools-support-relr.sh
++++ b/scripts/tools-support-relr.sh
+@@ -7,7 +7,8 @@ trap "rm -f $tmp_file.o $tmp_file $tmp_file.bin" EXIT
+ cat << "END" | $CC -c -x c - -o $tmp_file.o >/dev/null 2>&1
+ void *p = &p;
+ END
+-$LD $tmp_file.o -shared -Bsymbolic --pack-dyn-relocs=relr -o $tmp_file
++$LD $tmp_file.o -shared -Bsymbolic --pack-dyn-relocs=relr \
++  --use-android-relr-tags -o $tmp_file
+ 
+ # Despite printing an error message, GNU nm still exits with exit code 0 if it
+ # sees a relr section. So we need to check that nothing is printed to stderr.
+diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
+index 0de367aaa2d31..7ac5204c8d1f2 100644
+--- a/security/integrity/evm/evm_main.c
++++ b/security/integrity/evm/evm_main.c
+@@ -521,7 +521,7 @@ void evm_inode_post_setattr(struct dentry *dentry, int ia_valid)
+ }
+ 
+ /*
+- * evm_inode_init_security - initializes security.evm
++ * evm_inode_init_security - initializes security.evm HMAC value
+  */
+ int evm_inode_init_security(struct inode *inode,
+ 				 const struct xattr *lsm_xattr,
+@@ -530,7 +530,8 @@ int evm_inode_init_security(struct inode *inode,
+ 	struct evm_xattr *xattr_data;
+ 	int rc;
+ 
+-	if (!evm_key_loaded() || !evm_protected_xattr(lsm_xattr->name))
++	if (!(evm_initialized & EVM_INIT_HMAC) ||
++	    !evm_protected_xattr(lsm_xattr->name))
+ 		return 0;
+ 
+ 	xattr_data = kzalloc(sizeof(*xattr_data), GFP_NOFS);
+diff --git a/security/integrity/evm/evm_secfs.c b/security/integrity/evm/evm_secfs.c
+index bbc85637e18b2..5f0da41bccd07 100644
+--- a/security/integrity/evm/evm_secfs.c
++++ b/security/integrity/evm/evm_secfs.c
+@@ -66,12 +66,13 @@ static ssize_t evm_read_key(struct file *filp, char __user *buf,
+ static ssize_t evm_write_key(struct file *file, const char __user *buf,
+ 			     size_t count, loff_t *ppos)
+ {
+-	int i, ret;
++	unsigned int i;
++	int ret;
+ 
+ 	if (!capable(CAP_SYS_ADMIN) || (evm_initialized & EVM_SETUP_COMPLETE))
+ 		return -EPERM;
+ 
+-	ret = kstrtoint_from_user(buf, count, 0, &i);
++	ret = kstrtouint_from_user(buf, count, 0, &i);
+ 
+ 	if (ret)
+ 		return ret;
+@@ -80,12 +81,12 @@ static ssize_t evm_write_key(struct file *file, const char __user *buf,
+ 	if (!i || (i & ~EVM_INIT_MASK) != 0)
+ 		return -EINVAL;
+ 
+-	/* Don't allow a request to freshly enable metadata writes if
+-	 * keys are loaded.
++	/*
++	 * Don't allow a request to enable metadata writes if
++	 * an HMAC key is loaded.
+ 	 */
+ 	if ((i & EVM_ALLOW_METADATA_WRITES) &&
+-	    ((evm_initialized & EVM_KEY_MASK) != 0) &&
+-	    !(evm_initialized & EVM_ALLOW_METADATA_WRITES))
++	    (evm_initialized & EVM_INIT_HMAC) != 0)
+ 		return -EPERM;
+ 
+ 	if (i & EVM_INIT_HMAC) {
+diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
+index 565e33ff19d0d..d7cc6f8977461 100644
+--- a/security/integrity/ima/ima_appraise.c
++++ b/security/integrity/ima/ima_appraise.c
+@@ -522,8 +522,6 @@ void ima_inode_post_setattr(struct user_namespace *mnt_userns,
+ 		return;
+ 
+ 	action = ima_must_appraise(mnt_userns, inode, MAY_ACCESS, POST_SETATTR);
+-	if (!action)
+-		__vfs_removexattr(&init_user_ns, dentry, XATTR_NAME_IMA);
+ 	iint = integrity_iint_find(inode);
+ 	if (iint) {
+ 		set_bit(IMA_CHANGE_ATTR, &iint->atomic_flags);
+diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
+index 5805c5de39fbf..7a282d8e71485 100644
+--- a/sound/firewire/amdtp-stream.c
++++ b/sound/firewire/amdtp-stream.c
+@@ -1404,14 +1404,17 @@ int amdtp_domain_start(struct amdtp_domain *d, unsigned int ir_delay_cycle)
+ 	unsigned int queue_size;
+ 	struct amdtp_stream *s;
+ 	int cycle;
++	bool found = false;
+ 	int err;
+ 
+ 	// Select an IT context as IRQ target.
+ 	list_for_each_entry(s, &d->streams, list) {
+-		if (s->direction == AMDTP_OUT_STREAM)
++		if (s->direction == AMDTP_OUT_STREAM) {
++			found = true;
+ 			break;
++		}
+ 	}
+-	if (!s)
++	if (!found)
+ 		return -ENXIO;
+ 	d->irq_target = s;
+ 
+diff --git a/sound/firewire/motu/motu-protocol-v2.c b/sound/firewire/motu/motu-protocol-v2.c
+index e59e69ab1538b..784073aa10265 100644
+--- a/sound/firewire/motu/motu-protocol-v2.c
++++ b/sound/firewire/motu/motu-protocol-v2.c
+@@ -353,6 +353,7 @@ const struct snd_motu_spec snd_motu_spec_8pre = {
+ 	.protocol_version = SND_MOTU_PROTOCOL_V2,
+ 	.flags = SND_MOTU_SPEC_RX_MIDI_2ND_Q |
+ 		 SND_MOTU_SPEC_TX_MIDI_2ND_Q,
+-	.tx_fixed_pcm_chunks = {10, 6, 0},
+-	.rx_fixed_pcm_chunks = {10, 6, 0},
++	// Two dummy chunks always in the end of data block.
++	.tx_fixed_pcm_chunks = {10, 10, 0},
++	.rx_fixed_pcm_chunks = {6, 6, 0},
+ };
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index e46e43dac6bfd..1cc83344c2ecf 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -385,6 +385,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
+ 		alc_update_coef_idx(codec, 0x67, 0xf000, 0x3000);
+ 		fallthrough;
+ 	case 0x10ec0215:
++	case 0x10ec0230:
+ 	case 0x10ec0233:
+ 	case 0x10ec0235:
+ 	case 0x10ec0236:
+@@ -3153,6 +3154,7 @@ static void alc_disable_headset_jack_key(struct hda_codec *codec)
+ 		alc_update_coef_idx(codec, 0x49, 0x0045, 0x0);
+ 		alc_update_coef_idx(codec, 0x44, 0x0045 << 8, 0x0);
+ 		break;
++	case 0x10ec0230:
+ 	case 0x10ec0236:
+ 	case 0x10ec0256:
+ 		alc_write_coef_idx(codec, 0x48, 0x0);
+@@ -3180,6 +3182,7 @@ static void alc_enable_headset_jack_key(struct hda_codec *codec)
+ 		alc_update_coef_idx(codec, 0x49, 0x007f, 0x0045);
+ 		alc_update_coef_idx(codec, 0x44, 0x007f << 8, 0x0045 << 8);
+ 		break;
++	case 0x10ec0230:
+ 	case 0x10ec0236:
+ 	case 0x10ec0256:
+ 		alc_write_coef_idx(codec, 0x48, 0xd011);
+@@ -4737,6 +4740,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
+ 	case 0x10ec0255:
+ 		alc_process_coef_fw(codec, coef0255);
+ 		break;
++	case 0x10ec0230:
+ 	case 0x10ec0236:
+ 	case 0x10ec0256:
+ 		alc_process_coef_fw(codec, coef0256);
+@@ -4851,6 +4855,7 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
+ 		alc_process_coef_fw(codec, coef0255);
+ 		snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
+ 		break;
++	case 0x10ec0230:
+ 	case 0x10ec0236:
+ 	case 0x10ec0256:
+ 		alc_write_coef_idx(codec, 0x45, 0xc489);
+@@ -5000,6 +5005,7 @@ static void alc_headset_mode_default(struct hda_codec *codec)
+ 	case 0x10ec0255:
+ 		alc_process_coef_fw(codec, coef0255);
+ 		break;
++	case 0x10ec0230:
+ 	case 0x10ec0236:
+ 	case 0x10ec0256:
+ 		alc_write_coef_idx(codec, 0x1b, 0x0e4b);
+@@ -5098,6 +5104,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
+ 	case 0x10ec0255:
+ 		alc_process_coef_fw(codec, coef0255);
+ 		break;
++	case 0x10ec0230:
+ 	case 0x10ec0236:
+ 	case 0x10ec0256:
+ 		alc_process_coef_fw(codec, coef0256);
+@@ -5211,6 +5218,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
+ 	case 0x10ec0255:
+ 		alc_process_coef_fw(codec, coef0255);
+ 		break;
++	case 0x10ec0230:
+ 	case 0x10ec0236:
+ 	case 0x10ec0256:
+ 		alc_process_coef_fw(codec, coef0256);
+@@ -5311,6 +5319,7 @@ static void alc_determine_headset_type(struct hda_codec *codec)
+ 		val = alc_read_coef_idx(codec, 0x46);
+ 		is_ctia = (val & 0x0070) == 0x0070;
+ 		break;
++	case 0x10ec0230:
+ 	case 0x10ec0236:
+ 	case 0x10ec0256:
+ 		alc_write_coef_idx(codec, 0x1b, 0x0e4b);
+@@ -5604,6 +5613,7 @@ static void alc255_set_default_jack_type(struct hda_codec *codec)
+ 	case 0x10ec0255:
+ 		alc_process_coef_fw(codec, alc255fw);
+ 		break;
++	case 0x10ec0230:
+ 	case 0x10ec0236:
+ 	case 0x10ec0256:
+ 		alc_process_coef_fw(codec, alc256fw);
+@@ -6204,6 +6214,7 @@ static void alc_combo_jack_hp_jd_restart(struct hda_codec *codec)
+ 		alc_update_coef_idx(codec, 0x4a, 0x8000, 1 << 15); /* Reset HP JD */
+ 		alc_update_coef_idx(codec, 0x4a, 0x8000, 0 << 15);
+ 		break;
++	case 0x10ec0230:
+ 	case 0x10ec0235:
+ 	case 0x10ec0236:
+ 	case 0x10ec0255:
+@@ -6336,6 +6347,24 @@ static void alc_fixup_no_int_mic(struct hda_codec *codec,
+ 	}
+ }
+ 
++static void alc285_fixup_hp_spectre_x360(struct hda_codec *codec,
++					  const struct hda_fixup *fix, int action)
++{
++	static const hda_nid_t conn[] = { 0x02 };
++	static const struct hda_pintbl pincfgs[] = {
++		{ 0x14, 0x90170110 },  /* rear speaker */
++		{ }
++	};
++
++	switch (action) {
++	case HDA_FIXUP_ACT_PRE_PROBE:
++		snd_hda_apply_pincfgs(codec, pincfgs);
++		/* force front speaker to DAC1 */
++		snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn);
++		break;
++	}
++}
++
+ /* for hda_fixup_thinkpad_acpi() */
+ #include "thinkpad_helper.c"
+ 
+@@ -7802,6 +7831,8 @@ static const struct hda_fixup alc269_fixups[] = {
+ 			{ 0x20, AC_VERB_SET_PROC_COEF, 0x4e4b },
+ 			{ }
+ 		},
++		.chained = true,
++		.chain_id = ALC289_FIXUP_ASUS_GA401,
+ 	},
+ 	[ALC285_FIXUP_HP_GPIO_LED] = {
+ 		.type = HDA_FIXUP_FUNC,
+@@ -8113,13 +8144,8 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chain_id = ALC269_FIXUP_HP_LINE1_MIC1_LED,
+ 	},
+ 	[ALC285_FIXUP_HP_SPECTRE_X360] = {
+-		.type = HDA_FIXUP_PINS,
+-		.v.pins = (const struct hda_pintbl[]) {
+-			{ 0x14, 0x90170110 }, /* enable top speaker */
+-			{}
+-		},
+-		.chained = true,
+-		.chain_id = ALC285_FIXUP_SPEAKER2_TO_DAC1,
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc285_fixup_hp_spectre_x360,
+ 	},
+ 	[ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP] = {
+ 		.type = HDA_FIXUP_FUNC,
+@@ -8305,6 +8331,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x84da, "HP OMEN dc0019-ur", ALC295_FIXUP_HP_OMEN),
+ 	SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ 	SND_PCI_QUIRK(0x103c, 0x8519, "HP Spectre x360 15-df0xxx", ALC285_FIXUP_HP_SPECTRE_X360),
++	SND_PCI_QUIRK(0x103c, 0x861f, "HP Elite Dragonfly G1", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ 	SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO),
+ 	SND_PCI_QUIRK(0x103c, 0x8716, "HP Elite Dragonfly G2 Notebook PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+@@ -8322,13 +8349,19 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 		      ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ 	SND_PCI_QUIRK(0x103c, 0x87c8, "HP", ALC287_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x87e5, "HP ProBook 440 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x87e7, "HP ProBook 450 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x87f1, "HP ProBook 630 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x87f2, "HP ProBook 640 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x87f4, "HP", ALC287_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
++	SND_PCI_QUIRK(0x103c, 0x880d, "HP EliteBook 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8846, "HP EliteBook 850 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8847, "HP EliteBook x360 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x884b, "HP EliteBook 840 Aero G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x884c, "HP EliteBook 840 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8862, "HP ProBook 445 G8 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++	SND_PCI_QUIRK(0x103c, 0x8863, "HP ProBook 445 G8 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x886d, "HP ZBook Fury 17.3 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ 	SND_PCI_QUIRK(0x103c, 0x8870, "HP ZBook Fury 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ 	SND_PCI_QUIRK(0x103c, 0x8873, "HP ZBook Studio 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+@@ -9326,6 +9359,7 @@ static int patch_alc269(struct hda_codec *codec)
+ 		spec->shutup = alc256_shutup;
+ 		spec->init_hook = alc256_init;
+ 		break;
++	case 0x10ec0230:
+ 	case 0x10ec0236:
+ 	case 0x10ec0256:
+ 		spec->codec_variant = ALC269_TYPE_ALC256;
+@@ -10617,6 +10651,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
+ 	HDA_CODEC_ENTRY(0x10ec0221, "ALC221", patch_alc269),
+ 	HDA_CODEC_ENTRY(0x10ec0222, "ALC222", patch_alc269),
+ 	HDA_CODEC_ENTRY(0x10ec0225, "ALC225", patch_alc269),
++	HDA_CODEC_ENTRY(0x10ec0230, "ALC236", patch_alc269),
+ 	HDA_CODEC_ENTRY(0x10ec0231, "ALC231", patch_alc269),
+ 	HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269),
+ 	HDA_CODEC_ENTRY(0x10ec0234, "ALC234", patch_alc269),
+diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
+index 5b124c4ad5725..11b398be0954f 100644
+--- a/sound/pci/intel8x0.c
++++ b/sound/pci/intel8x0.c
+@@ -692,7 +692,7 @@ static inline void snd_intel8x0_update(struct intel8x0 *chip, struct ichdev *ich
+ 	int status, civ, i, step;
+ 	int ack = 0;
+ 
+-	if (!ichdev->prepared || ichdev->suspended)
++	if (!(ichdev->prepared || chip->in_measurement) || ichdev->suspended)
+ 		return;
+ 
+ 	spin_lock_irqsave(&chip->reg_lock, flags);
+diff --git a/sound/soc/atmel/atmel-i2s.c b/sound/soc/atmel/atmel-i2s.c
+index 7c6187e41f2b9..a383c6bef8e09 100644
+--- a/sound/soc/atmel/atmel-i2s.c
++++ b/sound/soc/atmel/atmel-i2s.c
+@@ -200,6 +200,7 @@ struct atmel_i2s_dev {
+ 	unsigned int				fmt;
+ 	const struct atmel_i2s_gck_param	*gck_param;
+ 	const struct atmel_i2s_caps		*caps;
++	int					clk_use_no;
+ };
+ 
+ static irqreturn_t atmel_i2s_interrupt(int irq, void *dev_id)
+@@ -321,9 +322,16 @@ static int atmel_i2s_hw_params(struct snd_pcm_substream *substream,
+ {
+ 	struct atmel_i2s_dev *dev = snd_soc_dai_get_drvdata(dai);
+ 	bool is_playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
+-	unsigned int mr = 0;
++	unsigned int mr = 0, mr_mask;
+ 	int ret;
+ 
++	mr_mask = ATMEL_I2SC_MR_FORMAT_MASK | ATMEL_I2SC_MR_MODE_MASK |
++		ATMEL_I2SC_MR_DATALENGTH_MASK;
++	if (is_playback)
++		mr_mask |= ATMEL_I2SC_MR_TXMONO;
++	else
++		mr_mask |= ATMEL_I2SC_MR_RXMONO;
++
+ 	switch (dev->fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ 	case SND_SOC_DAIFMT_I2S:
+ 		mr |= ATMEL_I2SC_MR_FORMAT_I2S;
+@@ -402,7 +410,7 @@ static int atmel_i2s_hw_params(struct snd_pcm_substream *substream,
+ 		return -EINVAL;
+ 	}
+ 
+-	return regmap_write(dev->regmap, ATMEL_I2SC_MR, mr);
++	return regmap_update_bits(dev->regmap, ATMEL_I2SC_MR, mr_mask, mr);
+ }
+ 
+ static int atmel_i2s_switch_mck_generator(struct atmel_i2s_dev *dev,
+@@ -495,18 +503,28 @@ static int atmel_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
+ 	is_master = (mr & ATMEL_I2SC_MR_MODE_MASK) == ATMEL_I2SC_MR_MODE_MASTER;
+ 
+ 	/* If master starts, enable the audio clock. */
+-	if (is_master && mck_enabled)
+-		err = atmel_i2s_switch_mck_generator(dev, true);
+-	if (err)
+-		return err;
++	if (is_master && mck_enabled) {
++		if (!dev->clk_use_no) {
++			err = atmel_i2s_switch_mck_generator(dev, true);
++			if (err)
++				return err;
++		}
++		dev->clk_use_no++;
++	}
+ 
+ 	err = regmap_write(dev->regmap, ATMEL_I2SC_CR, cr);
+ 	if (err)
+ 		return err;
+ 
+ 	/* If master stops, disable the audio clock. */
+-	if (is_master && !mck_enabled)
+-		err = atmel_i2s_switch_mck_generator(dev, false);
++	if (is_master && !mck_enabled) {
++		if (dev->clk_use_no == 1) {
++			err = atmel_i2s_switch_mck_generator(dev, false);
++			if (err)
++				return err;
++		}
++		dev->clk_use_no--;
++	}
+ 
+ 	return err;
+ }
+@@ -542,6 +560,7 @@ static struct snd_soc_dai_driver atmel_i2s_dai = {
+ 	},
+ 	.ops = &atmel_i2s_dai_ops,
+ 	.symmetric_rate = 1,
++	.symmetric_sample_bits = 1,
+ };
+ 
+ static const struct snd_soc_component_driver atmel_i2s_component = {
+diff --git a/sound/soc/codecs/cs42l42.h b/sound/soc/codecs/cs42l42.h
+index 866d7c873e3c9..ca2019732013e 100644
+--- a/sound/soc/codecs/cs42l42.h
++++ b/sound/soc/codecs/cs42l42.h
+@@ -77,7 +77,7 @@
+ #define CS42L42_HP_PDN_SHIFT		3
+ #define CS42L42_HP_PDN_MASK		(1 << CS42L42_HP_PDN_SHIFT)
+ #define CS42L42_ADC_PDN_SHIFT		2
+-#define CS42L42_ADC_PDN_MASK		(1 << CS42L42_HP_PDN_SHIFT)
++#define CS42L42_ADC_PDN_MASK		(1 << CS42L42_ADC_PDN_SHIFT)
+ #define CS42L42_PDN_ALL_SHIFT		0
+ #define CS42L42_PDN_ALL_MASK		(1 << CS42L42_PDN_ALL_SHIFT)
+ 
+diff --git a/sound/soc/codecs/max98373-sdw.c b/sound/soc/codecs/max98373-sdw.c
+index f3a12205cd484..dc520effc61cb 100644
+--- a/sound/soc/codecs/max98373-sdw.c
++++ b/sound/soc/codecs/max98373-sdw.c
+@@ -271,7 +271,7 @@ static __maybe_unused int max98373_resume(struct device *dev)
+ 	struct max98373_priv *max98373 = dev_get_drvdata(dev);
+ 	unsigned long time;
+ 
+-	if (!max98373->hw_init)
++	if (!max98373->first_hw_init)
+ 		return 0;
+ 
+ 	if (!slave->unattach_request)
+@@ -362,7 +362,7 @@ static int max98373_io_init(struct sdw_slave *slave)
+ 	struct device *dev = &slave->dev;
+ 	struct max98373_priv *max98373 = dev_get_drvdata(dev);
+ 
+-	if (max98373->pm_init_once) {
++	if (max98373->first_hw_init) {
+ 		regcache_cache_only(max98373->regmap, false);
+ 		regcache_cache_bypass(max98373->regmap, true);
+ 	}
+@@ -370,7 +370,7 @@ static int max98373_io_init(struct sdw_slave *slave)
+ 	/*
+ 	 * PM runtime is only enabled when a Slave reports as Attached
+ 	 */
+-	if (!max98373->pm_init_once) {
++	if (!max98373->first_hw_init) {
+ 		/* set autosuspend parameters */
+ 		pm_runtime_set_autosuspend_delay(dev, 3000);
+ 		pm_runtime_use_autosuspend(dev);
+@@ -462,12 +462,12 @@ static int max98373_io_init(struct sdw_slave *slave)
+ 	regmap_write(max98373->regmap, MAX98373_R20B5_BDE_EN, 1);
+ 	regmap_write(max98373->regmap, MAX98373_R20E2_LIMITER_EN, 1);
+ 
+-	if (max98373->pm_init_once) {
++	if (max98373->first_hw_init) {
+ 		regcache_cache_bypass(max98373->regmap, false);
+ 		regcache_mark_dirty(max98373->regmap);
+ 	}
+ 
+-	max98373->pm_init_once = true;
++	max98373->first_hw_init = true;
+ 	max98373->hw_init = true;
+ 
+ 	pm_runtime_mark_last_busy(dev);
+@@ -787,6 +787,8 @@ static int max98373_init(struct sdw_slave *slave, struct regmap *regmap)
+ 	max98373->cache = devm_kcalloc(dev, max98373->cache_num,
+ 				       sizeof(*max98373->cache),
+ 				       GFP_KERNEL);
++	if (!max98373->cache)
++		return -ENOMEM;
+ 
+ 	for (i = 0; i < max98373->cache_num; i++)
+ 		max98373->cache[i].reg = max98373_sdw_cache_reg[i];
+@@ -795,7 +797,7 @@ static int max98373_init(struct sdw_slave *slave, struct regmap *regmap)
+ 	max98373_slot_config(dev, max98373);
+ 
+ 	max98373->hw_init = false;
+-	max98373->pm_init_once = false;
++	max98373->first_hw_init = false;
+ 
+ 	/* codec registration  */
+ 	ret = devm_snd_soc_register_component(dev, &soc_codec_dev_max98373_sdw,
+diff --git a/sound/soc/codecs/max98373.h b/sound/soc/codecs/max98373.h
+index 71f5a5228f34b..c09c73678a9a5 100644
+--- a/sound/soc/codecs/max98373.h
++++ b/sound/soc/codecs/max98373.h
+@@ -223,7 +223,7 @@ struct max98373_priv {
+ 	/* variables to support soundwire */
+ 	struct sdw_slave *slave;
+ 	bool hw_init;
+-	bool pm_init_once;
++	bool first_hw_init;
+ 	int slot;
+ 	unsigned int rx_mask;
+ };
+diff --git a/sound/soc/codecs/rk3328_codec.c b/sound/soc/codecs/rk3328_codec.c
+index bfefefcc76d81..758d439e8c7a5 100644
+--- a/sound/soc/codecs/rk3328_codec.c
++++ b/sound/soc/codecs/rk3328_codec.c
+@@ -474,7 +474,8 @@ static int rk3328_platform_probe(struct platform_device *pdev)
+ 	rk3328->pclk = devm_clk_get(&pdev->dev, "pclk");
+ 	if (IS_ERR(rk3328->pclk)) {
+ 		dev_err(&pdev->dev, "can't get acodec pclk\n");
+-		return PTR_ERR(rk3328->pclk);
++		ret = PTR_ERR(rk3328->pclk);
++		goto err_unprepare_mclk;
+ 	}
+ 
+ 	ret = clk_prepare_enable(rk3328->pclk);
+@@ -484,19 +485,34 @@ static int rk3328_platform_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	base = devm_platform_ioremap_resource(pdev, 0);
+-	if (IS_ERR(base))
+-		return PTR_ERR(base);
++	if (IS_ERR(base)) {
++		ret = PTR_ERR(base);
++		goto err_unprepare_pclk;
++	}
+ 
+ 	rk3328->regmap = devm_regmap_init_mmio(&pdev->dev, base,
+ 					       &rk3328_codec_regmap_config);
+-	if (IS_ERR(rk3328->regmap))
+-		return PTR_ERR(rk3328->regmap);
++	if (IS_ERR(rk3328->regmap)) {
++		ret = PTR_ERR(rk3328->regmap);
++		goto err_unprepare_pclk;
++	}
+ 
+ 	platform_set_drvdata(pdev, rk3328);
+ 
+-	return devm_snd_soc_register_component(&pdev->dev, &soc_codec_rk3328,
++	ret = devm_snd_soc_register_component(&pdev->dev, &soc_codec_rk3328,
+ 					       rk3328_dai,
+ 					       ARRAY_SIZE(rk3328_dai));
++	if (ret)
++		goto err_unprepare_pclk;
++
++	return 0;
++
++err_unprepare_pclk:
++	clk_disable_unprepare(rk3328->pclk);
++
++err_unprepare_mclk:
++	clk_disable_unprepare(rk3328->mclk);
++	return ret;
+ }
+ 
+ static const struct of_device_id rk3328_codec_of_match[] __maybe_unused = {
+diff --git a/sound/soc/codecs/rt1308-sdw.c b/sound/soc/codecs/rt1308-sdw.c
+index afd2c3b687ccb..0ec741cf70fce 100644
+--- a/sound/soc/codecs/rt1308-sdw.c
++++ b/sound/soc/codecs/rt1308-sdw.c
+@@ -709,7 +709,7 @@ static int __maybe_unused rt1308_dev_resume(struct device *dev)
+ 	struct rt1308_sdw_priv *rt1308 = dev_get_drvdata(dev);
+ 	unsigned long time;
+ 
+-	if (!rt1308->hw_init)
++	if (!rt1308->first_hw_init)
+ 		return 0;
+ 
+ 	if (!slave->unattach_request)
+diff --git a/sound/soc/codecs/rt5682-i2c.c b/sound/soc/codecs/rt5682-i2c.c
+index 93c1603b42f10..8265b537ff4f3 100644
+--- a/sound/soc/codecs/rt5682-i2c.c
++++ b/sound/soc/codecs/rt5682-i2c.c
+@@ -273,6 +273,7 @@ static void rt5682_i2c_shutdown(struct i2c_client *client)
+ {
+ 	struct rt5682_priv *rt5682 = i2c_get_clientdata(client);
+ 
++	disable_irq(client->irq);
+ 	cancel_delayed_work_sync(&rt5682->jack_detect_work);
+ 	cancel_delayed_work_sync(&rt5682->jd_check_work);
+ 
+diff --git a/sound/soc/codecs/rt5682-sdw.c b/sound/soc/codecs/rt5682-sdw.c
+index d1dd7f720ba48..b4649b599eaa9 100644
+--- a/sound/soc/codecs/rt5682-sdw.c
++++ b/sound/soc/codecs/rt5682-sdw.c
+@@ -400,6 +400,11 @@ static int rt5682_io_init(struct device *dev, struct sdw_slave *slave)
+ 
+ 	pm_runtime_get_noresume(&slave->dev);
+ 
++	if (rt5682->first_hw_init) {
++		regcache_cache_only(rt5682->regmap, false);
++		regcache_cache_bypass(rt5682->regmap, true);
++	}
++
+ 	while (loop > 0) {
+ 		regmap_read(rt5682->regmap, RT5682_DEVICE_ID, &val);
+ 		if (val == DEVICE_ID)
+@@ -408,14 +413,11 @@ static int rt5682_io_init(struct device *dev, struct sdw_slave *slave)
+ 		usleep_range(30000, 30005);
+ 		loop--;
+ 	}
++
+ 	if (val != DEVICE_ID) {
+ 		dev_err(dev, "Device with ID register %x is not rt5682\n", val);
+-		return -ENODEV;
+-	}
+-
+-	if (rt5682->first_hw_init) {
+-		regcache_cache_only(rt5682->regmap, false);
+-		regcache_cache_bypass(rt5682->regmap, true);
++		ret = -ENODEV;
++		goto err_nodev;
+ 	}
+ 
+ 	rt5682_calibrate(rt5682);
+@@ -486,10 +488,11 @@ reinit:
+ 	rt5682->hw_init = true;
+ 	rt5682->first_hw_init = true;
+ 
++err_nodev:
+ 	pm_runtime_mark_last_busy(&slave->dev);
+ 	pm_runtime_put_autosuspend(&slave->dev);
+ 
+-	dev_dbg(&slave->dev, "%s hw_init complete\n", __func__);
++	dev_dbg(&slave->dev, "%s hw_init complete: %d\n", __func__, ret);
+ 
+ 	return ret;
+ }
+@@ -743,7 +746,7 @@ static int __maybe_unused rt5682_dev_resume(struct device *dev)
+ 	struct rt5682_priv *rt5682 = dev_get_drvdata(dev);
+ 	unsigned long time;
+ 
+-	if (!rt5682->hw_init)
++	if (!rt5682->first_hw_init)
+ 		return 0;
+ 
+ 	if (!slave->unattach_request)
+diff --git a/sound/soc/codecs/rt700-sdw.c b/sound/soc/codecs/rt700-sdw.c
+index 4001612dfd737..fc6299a6022d6 100644
+--- a/sound/soc/codecs/rt700-sdw.c
++++ b/sound/soc/codecs/rt700-sdw.c
+@@ -498,7 +498,7 @@ static int __maybe_unused rt700_dev_resume(struct device *dev)
+ 	struct rt700_priv *rt700 = dev_get_drvdata(dev);
+ 	unsigned long time;
+ 
+-	if (!rt700->hw_init)
++	if (!rt700->first_hw_init)
+ 		return 0;
+ 
+ 	if (!slave->unattach_request)
+diff --git a/sound/soc/codecs/rt711-sdw.c b/sound/soc/codecs/rt711-sdw.c
+index 2beb4286d997b..bfa9fede7f908 100644
+--- a/sound/soc/codecs/rt711-sdw.c
++++ b/sound/soc/codecs/rt711-sdw.c
+@@ -501,7 +501,7 @@ static int __maybe_unused rt711_dev_resume(struct device *dev)
+ 	struct rt711_priv *rt711 = dev_get_drvdata(dev);
+ 	unsigned long time;
+ 
+-	if (!rt711->hw_init)
++	if (!rt711->first_hw_init)
+ 		return 0;
+ 
+ 	if (!slave->unattach_request)
+diff --git a/sound/soc/codecs/rt715-sdw.c b/sound/soc/codecs/rt715-sdw.c
+index 71dd3b97a4590..157a97acc6c28 100644
+--- a/sound/soc/codecs/rt715-sdw.c
++++ b/sound/soc/codecs/rt715-sdw.c
+@@ -541,7 +541,7 @@ static int __maybe_unused rt715_dev_resume(struct device *dev)
+ 	struct rt715_priv *rt715 = dev_get_drvdata(dev);
+ 	unsigned long time;
+ 
+-	if (!rt715->hw_init)
++	if (!rt715->first_hw_init)
+ 		return 0;
+ 
+ 	if (!slave->unattach_request)
+diff --git a/sound/soc/fsl/fsl_spdif.c b/sound/soc/fsl/fsl_spdif.c
+index 174e558224d8c..6d5e9c0acdb4f 100644
+--- a/sound/soc/fsl/fsl_spdif.c
++++ b/sound/soc/fsl/fsl_spdif.c
+@@ -1400,14 +1400,27 @@ static int fsl_spdif_probe(struct platform_device *pdev)
+ 					      &spdif_priv->cpu_dai_drv, 1);
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "failed to register DAI: %d\n", ret);
+-		return ret;
++		goto err_pm_disable;
+ 	}
+ 
+ 	ret = imx_pcm_dma_init(pdev, IMX_SPDIF_DMABUF_SIZE);
+-	if (ret && ret != -EPROBE_DEFER)
+-		dev_err(&pdev->dev, "imx_pcm_dma_init failed: %d\n", ret);
++	if (ret) {
++		dev_err_probe(&pdev->dev, ret, "imx_pcm_dma_init failed\n");
++		goto err_pm_disable;
++	}
+ 
+ 	return ret;
++
++err_pm_disable:
++	pm_runtime_disable(&pdev->dev);
++	return ret;
++}
++
++static int fsl_spdif_remove(struct platform_device *pdev)
++{
++	pm_runtime_disable(&pdev->dev);
++
++	return 0;
+ }
+ 
+ #ifdef CONFIG_PM
+@@ -1416,6 +1429,9 @@ static int fsl_spdif_runtime_suspend(struct device *dev)
+ 	struct fsl_spdif_priv *spdif_priv = dev_get_drvdata(dev);
+ 	int i;
+ 
++	/* Disable all the interrupts */
++	regmap_update_bits(spdif_priv->regmap, REG_SPDIF_SIE, 0xffffff, 0);
++
+ 	regmap_read(spdif_priv->regmap, REG_SPDIF_SRPC,
+ 			&spdif_priv->regcache_srpc);
+ 	regcache_cache_only(spdif_priv->regmap, true);
+@@ -1512,6 +1528,7 @@ static struct platform_driver fsl_spdif_driver = {
+ 		.pm = &fsl_spdif_pm,
+ 	},
+ 	.probe = fsl_spdif_probe,
++	.remove = fsl_spdif_remove,
+ };
+ 
+ module_platform_driver(fsl_spdif_driver);
+diff --git a/sound/soc/fsl/fsl_xcvr.c b/sound/soc/fsl/fsl_xcvr.c
+index 6dd0a5fcd4556..070e3f32859fa 100644
+--- a/sound/soc/fsl/fsl_xcvr.c
++++ b/sound/soc/fsl/fsl_xcvr.c
+@@ -1236,6 +1236,16 @@ static __maybe_unused int fsl_xcvr_runtime_suspend(struct device *dev)
+ 	struct fsl_xcvr *xcvr = dev_get_drvdata(dev);
+ 	int ret;
+ 
++	/*
++	 * Clear interrupts, when streams starts or resumes after
++	 * suspend, interrupts are enabled in prepare(), so no need
++	 * to enable interrupts in resume().
++	 */
++	ret = regmap_update_bits(xcvr->regmap, FSL_XCVR_EXT_IER0,
++				 FSL_XCVR_IRQ_EARC_ALL, 0);
++	if (ret < 0)
++		dev_err(dev, "Failed to clear IER0: %d\n", ret);
++
+ 	/* Assert M0+ reset */
+ 	ret = regmap_update_bits(xcvr->regmap, FSL_XCVR_EXT_CTRL,
+ 				 FSL_XCVR_EXT_CTRL_CORE_RESET,
+diff --git a/sound/soc/hisilicon/hi6210-i2s.c b/sound/soc/hisilicon/hi6210-i2s.c
+index 907f5f1f7b445..ff05b9779e4be 100644
+--- a/sound/soc/hisilicon/hi6210-i2s.c
++++ b/sound/soc/hisilicon/hi6210-i2s.c
+@@ -102,18 +102,15 @@ static int hi6210_i2s_startup(struct snd_pcm_substream *substream,
+ 
+ 	for (n = 0; n < i2s->clocks; n++) {
+ 		ret = clk_prepare_enable(i2s->clk[n]);
+-		if (ret) {
+-			while (n--)
+-				clk_disable_unprepare(i2s->clk[n]);
+-			return ret;
+-		}
++		if (ret)
++			goto err_unprepare_clk;
+ 	}
+ 
+ 	ret = clk_set_rate(i2s->clk[CLK_I2S_BASE], 49152000);
+ 	if (ret) {
+ 		dev_err(i2s->dev, "%s: setting 49.152MHz base rate failed %d\n",
+ 			__func__, ret);
+-		return ret;
++		goto err_unprepare_clk;
+ 	}
+ 
+ 	/* enable clock before frequency division */
+@@ -165,6 +162,11 @@ static int hi6210_i2s_startup(struct snd_pcm_substream *substream,
+ 	hi6210_write_reg(i2s, HII2S_SW_RST_N, val);
+ 
+ 	return 0;
++
++err_unprepare_clk:
++	while (n--)
++		clk_disable_unprepare(i2s->clk[n]);
++	return ret;
+ }
+ 
+ static void hi6210_i2s_shutdown(struct snd_pcm_substream *substream,
+diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
+index ecd3f90f4bbea..dfad2ad129abb 100644
+--- a/sound/soc/intel/boards/sof_sdw.c
++++ b/sound/soc/intel/boards/sof_sdw.c
+@@ -196,6 +196,7 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ 		},
+ 		.driver_data = (void *)(SOF_RT711_JD_SRC_JD1 |
+ 					SOF_SDW_TGL_HDMI |
++					SOF_RT715_DAI_ID_FIX |
+ 					SOF_SDW_PCH_DMIC),
+ 	},
+ 	{}
+diff --git a/sound/soc/mediatek/common/mtk-btcvsd.c b/sound/soc/mediatek/common/mtk-btcvsd.c
+index a554c57b64605..6299dee9a6deb 100644
+--- a/sound/soc/mediatek/common/mtk-btcvsd.c
++++ b/sound/soc/mediatek/common/mtk-btcvsd.c
+@@ -1281,7 +1281,7 @@ static const struct snd_soc_component_driver mtk_btcvsd_snd_platform = {
+ 
+ static int mtk_btcvsd_snd_probe(struct platform_device *pdev)
+ {
+-	int ret = 0;
++	int ret;
+ 	int irq_id;
+ 	u32 offset[5] = {0, 0, 0, 0, 0};
+ 	struct mtk_btcvsd_snd *btcvsd;
+@@ -1337,7 +1337,8 @@ static int mtk_btcvsd_snd_probe(struct platform_device *pdev)
+ 	btcvsd->bt_sram_bank2_base = of_iomap(dev->of_node, 1);
+ 	if (!btcvsd->bt_sram_bank2_base) {
+ 		dev_err(dev, "iomap bt_sram_bank2_base fail\n");
+-		return -EIO;
++		ret = -EIO;
++		goto unmap_pkv_err;
+ 	}
+ 
+ 	btcvsd->infra = syscon_regmap_lookup_by_phandle(dev->of_node,
+@@ -1345,7 +1346,8 @@ static int mtk_btcvsd_snd_probe(struct platform_device *pdev)
+ 	if (IS_ERR(btcvsd->infra)) {
+ 		dev_err(dev, "cannot find infra controller: %ld\n",
+ 			PTR_ERR(btcvsd->infra));
+-		return PTR_ERR(btcvsd->infra);
++		ret = PTR_ERR(btcvsd->infra);
++		goto unmap_bank2_err;
+ 	}
+ 
+ 	/* get offset */
+@@ -1354,7 +1356,7 @@ static int mtk_btcvsd_snd_probe(struct platform_device *pdev)
+ 					 ARRAY_SIZE(offset));
+ 	if (ret) {
+ 		dev_warn(dev, "%s(), get offset fail, ret %d\n", __func__, ret);
+-		return ret;
++		goto unmap_bank2_err;
+ 	}
+ 	btcvsd->infra_misc_offset = offset[0];
+ 	btcvsd->conn_bt_cvsd_mask = offset[1];
+@@ -1373,8 +1375,18 @@ static int mtk_btcvsd_snd_probe(struct platform_device *pdev)
+ 	mtk_btcvsd_snd_set_state(btcvsd, btcvsd->tx, BT_SCO_STATE_IDLE);
+ 	mtk_btcvsd_snd_set_state(btcvsd, btcvsd->rx, BT_SCO_STATE_IDLE);
+ 
+-	return devm_snd_soc_register_component(dev, &mtk_btcvsd_snd_platform,
+-					       NULL, 0);
++	ret = devm_snd_soc_register_component(dev, &mtk_btcvsd_snd_platform,
++					      NULL, 0);
++	if (ret)
++		goto unmap_bank2_err;
++
++	return 0;
++
++unmap_bank2_err:
++	iounmap(btcvsd->bt_sram_bank2_base);
++unmap_pkv_err:
++	iounmap(btcvsd->bt_pkv_base);
++	return ret;
+ }
+ 
+ static int mtk_btcvsd_snd_remove(struct platform_device *pdev)
+diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
+index abdfd9cf91e2a..19c604b2e2486 100644
+--- a/sound/soc/sh/rcar/adg.c
++++ b/sound/soc/sh/rcar/adg.c
+@@ -289,7 +289,6 @@ static void rsnd_adg_set_ssi_clk(struct rsnd_mod *ssi_mod, u32 val)
+ int rsnd_adg_clk_query(struct rsnd_priv *priv, unsigned int rate)
+ {
+ 	struct rsnd_adg *adg = rsnd_priv_to_adg(priv);
+-	struct clk *clk;
+ 	int i;
+ 	int sel_table[] = {
+ 		[CLKA] = 0x1,
+@@ -302,10 +301,9 @@ int rsnd_adg_clk_query(struct rsnd_priv *priv, unsigned int rate)
+ 	 * find suitable clock from
+ 	 * AUDIO_CLKA/AUDIO_CLKB/AUDIO_CLKC/AUDIO_CLKI.
+ 	 */
+-	for_each_rsnd_clk(clk, adg, i) {
++	for (i = 0; i < CLKMAX; i++)
+ 		if (rate == adg->clk_rate[i])
+ 			return sel_table[i];
+-	}
+ 
+ 	/*
+ 	 * find divided clock from BRGA/BRGB
+diff --git a/sound/usb/format.c b/sound/usb/format.c
+index 2287f8c653150..eb216fef4ba75 100644
+--- a/sound/usb/format.c
++++ b/sound/usb/format.c
+@@ -223,9 +223,11 @@ static int parse_audio_format_rates_v1(struct snd_usb_audio *chip, struct audiof
+ 				continue;
+ 			/* C-Media CM6501 mislabels its 96 kHz altsetting */
+ 			/* Terratec Aureon 7.1 USB C-Media 6206, too */
++			/* Ozone Z90 USB C-Media, too */
+ 			if (rate == 48000 && nr_rates == 1 &&
+ 			    (chip->usb_id == USB_ID(0x0d8c, 0x0201) ||
+ 			     chip->usb_id == USB_ID(0x0d8c, 0x0102) ||
++			     chip->usb_id == USB_ID(0x0d8c, 0x0078) ||
+ 			     chip->usb_id == USB_ID(0x0ccd, 0x00b1)) &&
+ 			    fp->altsetting == 5 && fp->maxpacksize == 392)
+ 				rate = 96000;
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index b004b2e63a5d8..6a7415cdef6c1 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -3279,8 +3279,9 @@ static void snd_usb_mixer_dump_cval(struct snd_info_buffer *buffer,
+ 				    struct usb_mixer_elem_list *list)
+ {
+ 	struct usb_mixer_elem_info *cval = mixer_elem_list_to_info(list);
+-	static const char * const val_types[] = {"BOOLEAN", "INV_BOOLEAN",
+-				    "S8", "U8", "S16", "U16"};
++	static const char * const val_types[] = {
++		"BOOLEAN", "INV_BOOLEAN", "S8", "U8", "S16", "U16", "S32", "U32",
++	};
+ 	snd_iprintf(buffer, "    Info: id=%i, control=%i, cmask=0x%x, "
+ 			    "channels=%i, type=\"%s\"\n", cval->head.id,
+ 			    cval->control, cval->cmask, cval->channels,
+@@ -3590,6 +3591,9 @@ static int restore_mixer_value(struct usb_mixer_elem_list *list)
+ 	struct usb_mixer_elem_info *cval = mixer_elem_list_to_info(list);
+ 	int c, err, idx;
+ 
++	if (cval->val_type == USB_MIXER_BESPOKEN)
++		return 0;
++
+ 	if (cval->cmask) {
+ 		idx = 0;
+ 		for (c = 0; c < MAX_CHANNELS; c++) {
+diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h
+index c29e27ac43a7a..6d20ba7ee88fd 100644
+--- a/sound/usb/mixer.h
++++ b/sound/usb/mixer.h
+@@ -55,6 +55,7 @@ enum {
+ 	USB_MIXER_U16,
+ 	USB_MIXER_S32,
+ 	USB_MIXER_U32,
++	USB_MIXER_BESPOKEN,	/* non-standard type */
+ };
+ 
+ typedef void (*usb_mixer_elem_dump_func_t)(struct snd_info_buffer *buffer,
+diff --git a/sound/usb/mixer_scarlett_gen2.c b/sound/usb/mixer_scarlett_gen2.c
+index 4caf379d5b991..bca3e7fe27df6 100644
+--- a/sound/usb/mixer_scarlett_gen2.c
++++ b/sound/usb/mixer_scarlett_gen2.c
+@@ -949,10 +949,15 @@ static int scarlett2_add_new_ctl(struct usb_mixer_interface *mixer,
+ 	if (!elem)
+ 		return -ENOMEM;
+ 
++	/* We set USB_MIXER_BESPOKEN type, so that the core USB mixer code
++	 * ignores them for resume and other operations.
++	 * Also, the head.id field is set to 0, as we don't use this field.
++	 */
+ 	elem->head.mixer = mixer;
+ 	elem->control = index;
+-	elem->head.id = index;
++	elem->head.id = 0;
+ 	elem->channels = channels;
++	elem->val_type = USB_MIXER_BESPOKEN;
+ 
+ 	kctl = snd_ctl_new1(ncontrol, elem);
+ 	if (!kctl) {
+diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
+index d9afb730136a4..0f36b9edd3f55 100644
+--- a/tools/bpf/bpftool/main.c
++++ b/tools/bpf/bpftool/main.c
+@@ -340,8 +340,10 @@ static int do_batch(int argc, char **argv)
+ 		n_argc = make_args(buf, n_argv, BATCH_ARG_NB_MAX, lines);
+ 		if (!n_argc)
+ 			continue;
+-		if (n_argc < 0)
++		if (n_argc < 0) {
++			err = n_argc;
+ 			goto err_close;
++		}
+ 
+ 		if (json_output) {
+ 			jsonw_start_object(json_wtr);
+diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c
+index 80d966cfcaa14..7ce4558a932fd 100644
+--- a/tools/bpf/resolve_btfids/main.c
++++ b/tools/bpf/resolve_btfids/main.c
+@@ -656,6 +656,9 @@ static int symbols_patch(struct object *obj)
+ 	if (sets_patch(obj))
+ 		return -1;
+ 
++	/* Set type to ensure endian translation occurs. */
++	obj->efile.idlist->d_type = ELF_T_WORD;
++
+ 	elf_flagdata(obj->efile.idlist, ELF_C_SET, ELF_F_DIRTY);
+ 
+ 	err = elf_update(obj->efile.elf, ELF_C_WRITE);
+diff --git a/tools/perf/util/llvm-utils.c b/tools/perf/util/llvm-utils.c
+index dbdffb6673feb..0bf6b4d4c90a7 100644
+--- a/tools/perf/util/llvm-utils.c
++++ b/tools/perf/util/llvm-utils.c
+@@ -504,6 +504,7 @@ int llvm__compile_bpf(const char *path, void **p_obj_buf,
+ 			goto errout;
+ 		}
+ 
++		err = -ENOMEM;
+ 		if (asprintf(&pipe_template, "%s -emit-llvm | %s -march=bpf %s -filetype=obj -o -",
+ 			      template, llc_path, opts) < 0) {
+ 			pr_err("ERROR:\tnot enough memory to setup command line\n");
+@@ -524,6 +525,7 @@ int llvm__compile_bpf(const char *path, void **p_obj_buf,
+ 
+ 	pr_debug("llvm compiling command template: %s\n", template);
+ 
++	err = -ENOMEM;
+ 	if (asprintf(&command_echo, "echo -n \"%s\"", template) < 0)
+ 		goto errout;
+ 
+diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
+index c83c2c6564e01..23dc5014e7119 100644
+--- a/tools/perf/util/scripting-engines/trace-event-python.c
++++ b/tools/perf/util/scripting-engines/trace-event-python.c
+@@ -934,7 +934,7 @@ static PyObject *tuple_new(unsigned int sz)
+ 	return t;
+ }
+ 
+-static int tuple_set_u64(PyObject *t, unsigned int pos, u64 val)
++static int tuple_set_s64(PyObject *t, unsigned int pos, s64 val)
+ {
+ #if BITS_PER_LONG == 64
+ 	return PyTuple_SetItem(t, pos, _PyLong_FromLong(val));
+@@ -944,6 +944,22 @@ static int tuple_set_u64(PyObject *t, unsigned int pos, u64 val)
+ #endif
+ }
+ 
++/*
++ * Databases support only signed 64-bit numbers, so even though we are
++ * exporting a u64, it must be as s64.
++ */
++#define tuple_set_d64 tuple_set_s64
++
++static int tuple_set_u64(PyObject *t, unsigned int pos, u64 val)
++{
++#if BITS_PER_LONG == 64
++	return PyTuple_SetItem(t, pos, PyLong_FromUnsignedLong(val));
++#endif
++#if BITS_PER_LONG == 32
++	return PyTuple_SetItem(t, pos, PyLong_FromUnsignedLongLong(val));
++#endif
++}
++
+ static int tuple_set_s32(PyObject *t, unsigned int pos, s32 val)
+ {
+ 	return PyTuple_SetItem(t, pos, _PyLong_FromLong(val));
+@@ -967,7 +983,7 @@ static int python_export_evsel(struct db_export *dbe, struct evsel *evsel)
+ 
+ 	t = tuple_new(2);
+ 
+-	tuple_set_u64(t, 0, evsel->db_id);
++	tuple_set_d64(t, 0, evsel->db_id);
+ 	tuple_set_string(t, 1, evsel__name(evsel));
+ 
+ 	call_object(tables->evsel_handler, t, "evsel_table");
+@@ -985,7 +1001,7 @@ static int python_export_machine(struct db_export *dbe,
+ 
+ 	t = tuple_new(3);
+ 
+-	tuple_set_u64(t, 0, machine->db_id);
++	tuple_set_d64(t, 0, machine->db_id);
+ 	tuple_set_s32(t, 1, machine->pid);
+ 	tuple_set_string(t, 2, machine->root_dir ? machine->root_dir : "");
+ 
+@@ -1004,9 +1020,9 @@ static int python_export_thread(struct db_export *dbe, struct thread *thread,
+ 
+ 	t = tuple_new(5);
+ 
+-	tuple_set_u64(t, 0, thread->db_id);
+-	tuple_set_u64(t, 1, machine->db_id);
+-	tuple_set_u64(t, 2, main_thread_db_id);
++	tuple_set_d64(t, 0, thread->db_id);
++	tuple_set_d64(t, 1, machine->db_id);
++	tuple_set_d64(t, 2, main_thread_db_id);
+ 	tuple_set_s32(t, 3, thread->pid_);
+ 	tuple_set_s32(t, 4, thread->tid);
+ 
+@@ -1025,10 +1041,10 @@ static int python_export_comm(struct db_export *dbe, struct comm *comm,
+ 
+ 	t = tuple_new(5);
+ 
+-	tuple_set_u64(t, 0, comm->db_id);
++	tuple_set_d64(t, 0, comm->db_id);
+ 	tuple_set_string(t, 1, comm__str(comm));
+-	tuple_set_u64(t, 2, thread->db_id);
+-	tuple_set_u64(t, 3, comm->start);
++	tuple_set_d64(t, 2, thread->db_id);
++	tuple_set_d64(t, 3, comm->start);
+ 	tuple_set_s32(t, 4, comm->exec);
+ 
+ 	call_object(tables->comm_handler, t, "comm_table");
+@@ -1046,9 +1062,9 @@ static int python_export_comm_thread(struct db_export *dbe, u64 db_id,
+ 
+ 	t = tuple_new(3);
+ 
+-	tuple_set_u64(t, 0, db_id);
+-	tuple_set_u64(t, 1, comm->db_id);
+-	tuple_set_u64(t, 2, thread->db_id);
++	tuple_set_d64(t, 0, db_id);
++	tuple_set_d64(t, 1, comm->db_id);
++	tuple_set_d64(t, 2, thread->db_id);
+ 
+ 	call_object(tables->comm_thread_handler, t, "comm_thread_table");
+ 
+@@ -1068,8 +1084,8 @@ static int python_export_dso(struct db_export *dbe, struct dso *dso,
+ 
+ 	t = tuple_new(5);
+ 
+-	tuple_set_u64(t, 0, dso->db_id);
+-	tuple_set_u64(t, 1, machine->db_id);
++	tuple_set_d64(t, 0, dso->db_id);
++	tuple_set_d64(t, 1, machine->db_id);
+ 	tuple_set_string(t, 2, dso->short_name);
+ 	tuple_set_string(t, 3, dso->long_name);
+ 	tuple_set_string(t, 4, sbuild_id);
+@@ -1090,10 +1106,10 @@ static int python_export_symbol(struct db_export *dbe, struct symbol *sym,
+ 
+ 	t = tuple_new(6);
+ 
+-	tuple_set_u64(t, 0, *sym_db_id);
+-	tuple_set_u64(t, 1, dso->db_id);
+-	tuple_set_u64(t, 2, sym->start);
+-	tuple_set_u64(t, 3, sym->end);
++	tuple_set_d64(t, 0, *sym_db_id);
++	tuple_set_d64(t, 1, dso->db_id);
++	tuple_set_d64(t, 2, sym->start);
++	tuple_set_d64(t, 3, sym->end);
+ 	tuple_set_s32(t, 4, sym->binding);
+ 	tuple_set_string(t, 5, sym->name);
+ 
+@@ -1130,30 +1146,30 @@ static void python_export_sample_table(struct db_export *dbe,
+ 
+ 	t = tuple_new(24);
+ 
+-	tuple_set_u64(t, 0, es->db_id);
+-	tuple_set_u64(t, 1, es->evsel->db_id);
+-	tuple_set_u64(t, 2, es->al->maps->machine->db_id);
+-	tuple_set_u64(t, 3, es->al->thread->db_id);
+-	tuple_set_u64(t, 4, es->comm_db_id);
+-	tuple_set_u64(t, 5, es->dso_db_id);
+-	tuple_set_u64(t, 6, es->sym_db_id);
+-	tuple_set_u64(t, 7, es->offset);
+-	tuple_set_u64(t, 8, es->sample->ip);
+-	tuple_set_u64(t, 9, es->sample->time);
++	tuple_set_d64(t, 0, es->db_id);
++	tuple_set_d64(t, 1, es->evsel->db_id);
++	tuple_set_d64(t, 2, es->al->maps->machine->db_id);
++	tuple_set_d64(t, 3, es->al->thread->db_id);
++	tuple_set_d64(t, 4, es->comm_db_id);
++	tuple_set_d64(t, 5, es->dso_db_id);
++	tuple_set_d64(t, 6, es->sym_db_id);
++	tuple_set_d64(t, 7, es->offset);
++	tuple_set_d64(t, 8, es->sample->ip);
++	tuple_set_d64(t, 9, es->sample->time);
+ 	tuple_set_s32(t, 10, es->sample->cpu);
+-	tuple_set_u64(t, 11, es->addr_dso_db_id);
+-	tuple_set_u64(t, 12, es->addr_sym_db_id);
+-	tuple_set_u64(t, 13, es->addr_offset);
+-	tuple_set_u64(t, 14, es->sample->addr);
+-	tuple_set_u64(t, 15, es->sample->period);
+-	tuple_set_u64(t, 16, es->sample->weight);
+-	tuple_set_u64(t, 17, es->sample->transaction);
+-	tuple_set_u64(t, 18, es->sample->data_src);
++	tuple_set_d64(t, 11, es->addr_dso_db_id);
++	tuple_set_d64(t, 12, es->addr_sym_db_id);
++	tuple_set_d64(t, 13, es->addr_offset);
++	tuple_set_d64(t, 14, es->sample->addr);
++	tuple_set_d64(t, 15, es->sample->period);
++	tuple_set_d64(t, 16, es->sample->weight);
++	tuple_set_d64(t, 17, es->sample->transaction);
++	tuple_set_d64(t, 18, es->sample->data_src);
+ 	tuple_set_s32(t, 19, es->sample->flags & PERF_BRANCH_MASK);
+ 	tuple_set_s32(t, 20, !!(es->sample->flags & PERF_IP_FLAG_IN_TX));
+-	tuple_set_u64(t, 21, es->call_path_id);
+-	tuple_set_u64(t, 22, es->sample->insn_cnt);
+-	tuple_set_u64(t, 23, es->sample->cyc_cnt);
++	tuple_set_d64(t, 21, es->call_path_id);
++	tuple_set_d64(t, 22, es->sample->insn_cnt);
++	tuple_set_d64(t, 23, es->sample->cyc_cnt);
+ 
+ 	call_object(tables->sample_handler, t, "sample_table");
+ 
+@@ -1167,8 +1183,8 @@ static void python_export_synth(struct db_export *dbe, struct export_sample *es)
+ 
+ 	t = tuple_new(3);
+ 
+-	tuple_set_u64(t, 0, es->db_id);
+-	tuple_set_u64(t, 1, es->evsel->core.attr.config);
++	tuple_set_d64(t, 0, es->db_id);
++	tuple_set_d64(t, 1, es->evsel->core.attr.config);
+ 	tuple_set_bytes(t, 2, es->sample->raw_data, es->sample->raw_size);
+ 
+ 	call_object(tables->synth_handler, t, "synth_data");
+@@ -1200,10 +1216,10 @@ static int python_export_call_path(struct db_export *dbe, struct call_path *cp)
+ 
+ 	t = tuple_new(4);
+ 
+-	tuple_set_u64(t, 0, cp->db_id);
+-	tuple_set_u64(t, 1, parent_db_id);
+-	tuple_set_u64(t, 2, sym_db_id);
+-	tuple_set_u64(t, 3, cp->ip);
++	tuple_set_d64(t, 0, cp->db_id);
++	tuple_set_d64(t, 1, parent_db_id);
++	tuple_set_d64(t, 2, sym_db_id);
++	tuple_set_d64(t, 3, cp->ip);
+ 
+ 	call_object(tables->call_path_handler, t, "call_path_table");
+ 
+@@ -1221,20 +1237,20 @@ static int python_export_call_return(struct db_export *dbe,
+ 
+ 	t = tuple_new(14);
+ 
+-	tuple_set_u64(t, 0, cr->db_id);
+-	tuple_set_u64(t, 1, cr->thread->db_id);
+-	tuple_set_u64(t, 2, comm_db_id);
+-	tuple_set_u64(t, 3, cr->cp->db_id);
+-	tuple_set_u64(t, 4, cr->call_time);
+-	tuple_set_u64(t, 5, cr->return_time);
+-	tuple_set_u64(t, 6, cr->branch_count);
+-	tuple_set_u64(t, 7, cr->call_ref);
+-	tuple_set_u64(t, 8, cr->return_ref);
+-	tuple_set_u64(t, 9, cr->cp->parent->db_id);
++	tuple_set_d64(t, 0, cr->db_id);
++	tuple_set_d64(t, 1, cr->thread->db_id);
++	tuple_set_d64(t, 2, comm_db_id);
++	tuple_set_d64(t, 3, cr->cp->db_id);
++	tuple_set_d64(t, 4, cr->call_time);
++	tuple_set_d64(t, 5, cr->return_time);
++	tuple_set_d64(t, 6, cr->branch_count);
++	tuple_set_d64(t, 7, cr->call_ref);
++	tuple_set_d64(t, 8, cr->return_ref);
++	tuple_set_d64(t, 9, cr->cp->parent->db_id);
+ 	tuple_set_s32(t, 10, cr->flags);
+-	tuple_set_u64(t, 11, cr->parent_db_id);
+-	tuple_set_u64(t, 12, cr->insn_count);
+-	tuple_set_u64(t, 13, cr->cyc_count);
++	tuple_set_d64(t, 11, cr->parent_db_id);
++	tuple_set_d64(t, 12, cr->insn_count);
++	tuple_set_d64(t, 13, cr->cyc_count);
+ 
+ 	call_object(tables->call_return_handler, t, "call_return_table");
+ 
+@@ -1254,14 +1270,14 @@ static int python_export_context_switch(struct db_export *dbe, u64 db_id,
+ 
+ 	t = tuple_new(9);
+ 
+-	tuple_set_u64(t, 0, db_id);
+-	tuple_set_u64(t, 1, machine->db_id);
+-	tuple_set_u64(t, 2, sample->time);
++	tuple_set_d64(t, 0, db_id);
++	tuple_set_d64(t, 1, machine->db_id);
++	tuple_set_d64(t, 2, sample->time);
+ 	tuple_set_s32(t, 3, sample->cpu);
+-	tuple_set_u64(t, 4, th_out_id);
+-	tuple_set_u64(t, 5, comm_out_id);
+-	tuple_set_u64(t, 6, th_in_id);
+-	tuple_set_u64(t, 7, comm_in_id);
++	tuple_set_d64(t, 4, th_out_id);
++	tuple_set_d64(t, 5, comm_out_id);
++	tuple_set_d64(t, 6, th_in_id);
++	tuple_set_d64(t, 7, comm_in_id);
+ 	tuple_set_s32(t, 8, flags);
+ 
+ 	call_object(tables->context_switch_handler, t, "context_switch");
+diff --git a/tools/power/x86/intel-speed-select/isst-config.c b/tools/power/x86/intel-speed-select/isst-config.c
+index 582feb88eca34..3ff8d64369d71 100644
+--- a/tools/power/x86/intel-speed-select/isst-config.c
++++ b/tools/power/x86/intel-speed-select/isst-config.c
+@@ -106,6 +106,22 @@ int is_skx_based_platform(void)
+ 	return 0;
+ }
+ 
++int is_spr_platform(void)
++{
++	if (cpu_model == 0x8F)
++		return 1;
++
++	return 0;
++}
++
++int is_icx_platform(void)
++{
++	if (cpu_model == 0x6A || cpu_model == 0x6C)
++		return 1;
++
++	return 0;
++}
++
+ static int update_cpu_model(void)
+ {
+ 	unsigned int ebx, ecx, edx;
+diff --git a/tools/power/x86/intel-speed-select/isst-core.c b/tools/power/x86/intel-speed-select/isst-core.c
+index 6a26d57699845..4431c8a0d40ae 100644
+--- a/tools/power/x86/intel-speed-select/isst-core.c
++++ b/tools/power/x86/intel-speed-select/isst-core.c
+@@ -201,6 +201,7 @@ void isst_get_uncore_mem_freq(int cpu, int config_index,
+ {
+ 	unsigned int resp;
+ 	int ret;
++
+ 	ret = isst_send_mbox_command(cpu, CONFIG_TDP, CONFIG_TDP_GET_MEM_FREQ,
+ 				     0, config_index, &resp);
+ 	if (ret) {
+@@ -209,6 +210,20 @@ void isst_get_uncore_mem_freq(int cpu, int config_index,
+ 	}
+ 
+ 	ctdp_level->mem_freq = resp & GENMASK(7, 0);
++	if (is_spr_platform()) {
++		ctdp_level->mem_freq *= 200;
++	} else if (is_icx_platform()) {
++		if (ctdp_level->mem_freq < 7) {
++			ctdp_level->mem_freq = (12 - ctdp_level->mem_freq) * 133.33 * 2 * 10;
++			ctdp_level->mem_freq /= 10;
++			if (ctdp_level->mem_freq % 10 > 5)
++				ctdp_level->mem_freq++;
++		} else {
++			ctdp_level->mem_freq = 0;
++		}
++	} else {
++		ctdp_level->mem_freq = 0;
++	}
+ 	debug_printf(
+ 		"cpu:%d ctdp:%d CONFIG_TDP_GET_MEM_FREQ resp:%x uncore mem_freq:%d\n",
+ 		cpu, config_index, resp, ctdp_level->mem_freq);
+diff --git a/tools/power/x86/intel-speed-select/isst-display.c b/tools/power/x86/intel-speed-select/isst-display.c
+index 3bf1820c0da11..f97d8859ada72 100644
+--- a/tools/power/x86/intel-speed-select/isst-display.c
++++ b/tools/power/x86/intel-speed-select/isst-display.c
+@@ -446,7 +446,7 @@ void isst_ctdp_display_information(int cpu, FILE *outf, int tdp_level,
+ 		if (ctdp_level->mem_freq) {
+ 			snprintf(header, sizeof(header), "mem-frequency(MHz)");
+ 			snprintf(value, sizeof(value), "%d",
+-				 ctdp_level->mem_freq * DISP_FREQ_MULTIPLIER);
++				 ctdp_level->mem_freq);
+ 			format_and_print(outf, level + 2, header, value);
+ 		}
+ 
+diff --git a/tools/power/x86/intel-speed-select/isst.h b/tools/power/x86/intel-speed-select/isst.h
+index 0cac6c54be873..1aa15d5ea57ce 100644
+--- a/tools/power/x86/intel-speed-select/isst.h
++++ b/tools/power/x86/intel-speed-select/isst.h
+@@ -257,5 +257,7 @@ extern int get_cpufreq_base_freq(int cpu);
+ extern int isst_read_pm_config(int cpu, int *cp_state, int *cp_cap);
+ extern void isst_display_error_info_message(int error, char *msg, int arg_valid, int arg);
+ extern int is_skx_based_platform(void);
++extern int is_spr_platform(void);
++extern int is_icx_platform(void);
+ extern void isst_trl_display_information(int cpu, FILE *outf, unsigned long long trl);
+ #endif
+diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
+index c0c48fdb9ac1d..76d495fe3a175 100644
+--- a/tools/testing/selftests/bpf/.gitignore
++++ b/tools/testing/selftests/bpf/.gitignore
+@@ -8,6 +8,7 @@ FEATURE-DUMP.libbpf
+ fixdep
+ test_dev_cgroup
+ /test_progs*
++!test_progs.h
+ test_verifier_log
+ feature
+ test_sock
+diff --git a/tools/testing/selftests/ftrace/test.d/event/event-no-pid.tc b/tools/testing/selftests/ftrace/test.d/event/event-no-pid.tc
+index e6eb78f0b9545..9933ed24f9012 100644
+--- a/tools/testing/selftests/ftrace/test.d/event/event-no-pid.tc
++++ b/tools/testing/selftests/ftrace/test.d/event/event-no-pid.tc
+@@ -57,6 +57,10 @@ enable_events() {
+     echo 1 > tracing_on
+ }
+ 
++other_task() {
++    sleep .001 || usleep 1 || sleep 1
++}
++
+ echo 0 > options/event-fork
+ 
+ do_reset
+@@ -94,6 +98,9 @@ child=$!
+ echo "child = $child"
+ wait $child
+ 
++# Be sure some other events will happen for small systems (e.g. 1 core)
++other_task
++
+ echo 0 > tracing_on
+ 
+ cnt=`count_pid $mypid`
+diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
+index 81edbd23d371c..b4d24f50aca62 100644
+--- a/tools/testing/selftests/kvm/dirty_log_test.c
++++ b/tools/testing/selftests/kvm/dirty_log_test.c
+@@ -16,7 +16,6 @@
+ #include <errno.h>
+ #include <linux/bitmap.h>
+ #include <linux/bitops.h>
+-#include <asm/barrier.h>
+ #include <linux/atomic.h>
+ 
+ #include "kvm_util.h"
+diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
+index 8b90256bca96d..03e13938fd079 100644
+--- a/tools/testing/selftests/kvm/lib/kvm_util.c
++++ b/tools/testing/selftests/kvm/lib/kvm_util.c
+@@ -310,10 +310,6 @@ struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
+ 		uint32_t vcpuid = vcpuids ? vcpuids[i] : i;
+ 
+ 		vm_vcpu_add_default(vm, vcpuid, guest_code);
+-
+-#ifdef __x86_64__
+-		vcpu_set_cpuid(vm, vcpuid, kvm_get_supported_cpuid());
+-#endif
+ 	}
+ 
+ 	return vm;
+diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
+index a8906e60a1081..09cc685599a21 100644
+--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
++++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
+@@ -600,6 +600,9 @@ void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
+ 	/* Setup the MP state */
+ 	mp_state.mp_state = 0;
+ 	vcpu_set_mp_state(vm, vcpuid, &mp_state);
++
++	/* Setup supported CPUIDs */
++	vcpu_set_cpuid(vm, vcpuid, kvm_get_supported_cpuid());
+ }
+ 
+ /*
+diff --git a/tools/testing/selftests/kvm/steal_time.c b/tools/testing/selftests/kvm/steal_time.c
+index fcc840088c919..a6fe75cb9a6eb 100644
+--- a/tools/testing/selftests/kvm/steal_time.c
++++ b/tools/testing/selftests/kvm/steal_time.c
+@@ -73,8 +73,6 @@ static void steal_time_init(struct kvm_vm *vm)
+ 	for (i = 0; i < NR_VCPUS; ++i) {
+ 		int ret;
+ 
+-		vcpu_set_cpuid(vm, i, kvm_get_supported_cpuid());
+-
+ 		/* ST_GPA_BASE is identity mapped */
+ 		st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE);
+ 		sync_global_to_guest(vm, st_gva[i]);
+diff --git a/tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c b/tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c
+index 12c558fc8074a..c8d2bbe202d0e 100644
+--- a/tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c
++++ b/tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c
+@@ -106,8 +106,6 @@ static void add_x86_vcpu(struct kvm_vm *vm, uint32_t vcpuid, bool bsp_code)
+ 		vm_vcpu_add_default(vm, vcpuid, guest_bsp_vcpu);
+ 	else
+ 		vm_vcpu_add_default(vm, vcpuid, guest_not_bsp_vcpu);
+-
+-	vcpu_set_cpuid(vm, vcpuid, kvm_get_supported_cpuid());
+ }
+ 
+ static void run_vm_bsp(uint32_t bsp_vcpu)
+diff --git a/tools/testing/selftests/lkdtm/run.sh b/tools/testing/selftests/lkdtm/run.sh
+index bb7a1775307b8..e95e79bd31268 100755
+--- a/tools/testing/selftests/lkdtm/run.sh
++++ b/tools/testing/selftests/lkdtm/run.sh
+@@ -76,10 +76,14 @@ fi
+ # Save existing dmesg so we can detect new content below
+ dmesg > "$DMESG"
+ 
+-# Most shells yell about signals and we're expecting the "cat" process
+-# to usually be killed by the kernel. So we have to run it in a sub-shell
+-# and silence errors.
+-($SHELL -c 'cat <(echo '"$test"') >'"$TRIGGER" 2>/dev/null) || true
++# Since the kernel is likely killing the process writing to the trigger
++# file, it must not be the script's shell itself. i.e. we cannot do:
++#     echo "$test" >"$TRIGGER"
++# Instead, use "cat" to take the signal. Since the shell will yell about
++# the signal that killed the subprocess, we must ignore the failure and
++# continue. However we don't silence stderr since there might be other
++# useful details reported there in the case of other unexpected conditions.
++echo "$test" | cat >"$TRIGGER" || true
+ 
+ # Record and dump the results
+ dmesg | comm --nocheck-order -13 "$DMESG" - > "$LOG" || true
+diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
+index 426d07875a48e..112d41d01b12d 100644
+--- a/tools/testing/selftests/net/tls.c
++++ b/tools/testing/selftests/net/tls.c
+@@ -25,6 +25,47 @@
+ #define TLS_PAYLOAD_MAX_LEN 16384
+ #define SOL_TLS 282
+ 
++struct tls_crypto_info_keys {
++	union {
++		struct tls12_crypto_info_aes_gcm_128 aes128;
++		struct tls12_crypto_info_chacha20_poly1305 chacha20;
++	};
++	size_t len;
++};
++
++static void tls_crypto_info_init(uint16_t tls_version, uint16_t cipher_type,
++				 struct tls_crypto_info_keys *tls12)
++{
++	memset(tls12, 0, sizeof(*tls12));
++
++	switch (cipher_type) {
++	case TLS_CIPHER_CHACHA20_POLY1305:
++		tls12->len = sizeof(struct tls12_crypto_info_chacha20_poly1305);
++		tls12->chacha20.info.version = tls_version;
++		tls12->chacha20.info.cipher_type = cipher_type;
++		break;
++	case TLS_CIPHER_AES_GCM_128:
++		tls12->len = sizeof(struct tls12_crypto_info_aes_gcm_128);
++		tls12->aes128.info.version = tls_version;
++		tls12->aes128.info.cipher_type = cipher_type;
++		break;
++	default:
++		break;
++	}
++}
++
++static void memrnd(void *s, size_t n)
++{
++	int *dword = s;
++	char *byte;
++
++	for (; n >= 4; n -= 4)
++		*dword++ = rand();
++	byte = (void *)dword;
++	while (n--)
++		*byte++ = rand();
++}
++
+ FIXTURE(tls_basic)
+ {
+ 	int fd, cfd;
+@@ -133,33 +174,16 @@ FIXTURE_VARIANT_ADD(tls, 13_chacha)
+ 
+ FIXTURE_SETUP(tls)
+ {
+-	union {
+-		struct tls12_crypto_info_aes_gcm_128 aes128;
+-		struct tls12_crypto_info_chacha20_poly1305 chacha20;
+-	} tls12;
++	struct tls_crypto_info_keys tls12;
+ 	struct sockaddr_in addr;
+ 	socklen_t len;
+ 	int sfd, ret;
+-	size_t tls12_sz;
+ 
+ 	self->notls = false;
+ 	len = sizeof(addr);
+ 
+-	memset(&tls12, 0, sizeof(tls12));
+-	switch (variant->cipher_type) {
+-	case TLS_CIPHER_CHACHA20_POLY1305:
+-		tls12_sz = sizeof(struct tls12_crypto_info_chacha20_poly1305);
+-		tls12.chacha20.info.version = variant->tls_version;
+-		tls12.chacha20.info.cipher_type = variant->cipher_type;
+-		break;
+-	case TLS_CIPHER_AES_GCM_128:
+-		tls12_sz = sizeof(struct tls12_crypto_info_aes_gcm_128);
+-		tls12.aes128.info.version = variant->tls_version;
+-		tls12.aes128.info.cipher_type = variant->cipher_type;
+-		break;
+-	default:
+-		tls12_sz = 0;
+-	}
++	tls_crypto_info_init(variant->tls_version, variant->cipher_type,
++			     &tls12);
+ 
+ 	addr.sin_family = AF_INET;
+ 	addr.sin_addr.s_addr = htonl(INADDR_ANY);
+@@ -187,7 +211,7 @@ FIXTURE_SETUP(tls)
+ 
+ 	if (!self->notls) {
+ 		ret = setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12,
+-				 tls12_sz);
++				 tls12.len);
+ 		ASSERT_EQ(ret, 0);
+ 	}
+ 
+@@ -200,7 +224,7 @@ FIXTURE_SETUP(tls)
+ 		ASSERT_EQ(ret, 0);
+ 
+ 		ret = setsockopt(self->cfd, SOL_TLS, TLS_RX, &tls12,
+-				 tls12_sz);
++				 tls12.len);
+ 		ASSERT_EQ(ret, 0);
+ 	}
+ 
+@@ -308,6 +332,8 @@ TEST_F(tls, recv_max)
+ 	char recv_mem[TLS_PAYLOAD_MAX_LEN];
+ 	char buf[TLS_PAYLOAD_MAX_LEN];
+ 
++	memrnd(buf, sizeof(buf));
++
+ 	EXPECT_GE(send(self->fd, buf, send_len, 0), 0);
+ 	EXPECT_NE(recv(self->cfd, recv_mem, send_len, 0), -1);
+ 	EXPECT_EQ(memcmp(buf, recv_mem, send_len), 0);
+@@ -588,6 +614,8 @@ TEST_F(tls, recvmsg_single_max)
+ 	struct iovec vec;
+ 	struct msghdr hdr;
+ 
++	memrnd(send_mem, sizeof(send_mem));
++
+ 	EXPECT_EQ(send(self->fd, send_mem, send_len, 0), send_len);
+ 	vec.iov_base = (char *)recv_mem;
+ 	vec.iov_len = TLS_PAYLOAD_MAX_LEN;
+@@ -610,6 +638,8 @@ TEST_F(tls, recvmsg_multiple)
+ 	struct msghdr hdr;
+ 	int i;
+ 
++	memrnd(buf, sizeof(buf));
++
+ 	EXPECT_EQ(send(self->fd, buf, send_len, 0), send_len);
+ 	for (i = 0; i < msg_iovlen; i++) {
+ 		iov_base[i] = (char *)malloc(iov_len);
+@@ -634,6 +664,8 @@ TEST_F(tls, single_send_multiple_recv)
+ 	char send_mem[TLS_PAYLOAD_MAX_LEN * 2];
+ 	char recv_mem[TLS_PAYLOAD_MAX_LEN * 2];
+ 
++	memrnd(send_mem, sizeof(send_mem));
++
+ 	EXPECT_GE(send(self->fd, send_mem, total_len, 0), 0);
+ 	memset(recv_mem, 0, total_len);
+ 
+@@ -834,18 +866,17 @@ TEST_F(tls, bidir)
+ 	int ret;
+ 
+ 	if (!self->notls) {
+-		struct tls12_crypto_info_aes_gcm_128 tls12;
++		struct tls_crypto_info_keys tls12;
+ 
+-		memset(&tls12, 0, sizeof(tls12));
+-		tls12.info.version = variant->tls_version;
+-		tls12.info.cipher_type = TLS_CIPHER_AES_GCM_128;
++		tls_crypto_info_init(variant->tls_version, variant->cipher_type,
++				     &tls12);
+ 
+ 		ret = setsockopt(self->fd, SOL_TLS, TLS_RX, &tls12,
+-				 sizeof(tls12));
++				 tls12.len);
+ 		ASSERT_EQ(ret, 0);
+ 
+ 		ret = setsockopt(self->cfd, SOL_TLS, TLS_TX, &tls12,
+-				 sizeof(tls12));
++				 tls12.len);
+ 		ASSERT_EQ(ret, 0);
+ 	}
+ 
+diff --git a/tools/testing/selftests/splice/short_splice_read.sh b/tools/testing/selftests/splice/short_splice_read.sh
+index 7810d3589d9ab..22b6c8910b182 100755
+--- a/tools/testing/selftests/splice/short_splice_read.sh
++++ b/tools/testing/selftests/splice/short_splice_read.sh
+@@ -1,21 +1,87 @@
+ #!/bin/sh
+ # SPDX-License-Identifier: GPL-2.0
++#
++# Test for mishandling of splice() on pseudofilesystems, which should catch
++# bugs like 11990a5bd7e5 ("module: Correctly truncate sysfs sections output")
++#
++# Since splice fallback was removed as part of the set_fs() rework, many of these
++# tests expect to fail now. See https://lore.kernel.org/lkml/202009181443.C2179FB@keescook/
+ set -e
+ 
++DIR=$(dirname "$0")
++
+ ret=0
+ 
++expect_success()
++{
++	title="$1"
++	shift
++
++	echo "" >&2
++	echo "$title ..." >&2
++
++	set +e
++	"$@"
++	rc=$?
++	set -e
++
++	case "$rc" in
++	0)
++		echo "ok: $title succeeded" >&2
++		;;
++	1)
++		echo "FAIL: $title should work" >&2
++		ret=$(( ret + 1 ))
++		;;
++	*)
++		echo "FAIL: something else went wrong" >&2
++		ret=$(( ret + 1 ))
++		;;
++	esac
++}
++
++expect_failure()
++{
++	title="$1"
++	shift
++
++	echo "" >&2
++	echo "$title ..." >&2
++
++	set +e
++	"$@"
++	rc=$?
++	set -e
++
++	case "$rc" in
++	0)
++		echo "FAIL: $title unexpectedly worked" >&2
++		ret=$(( ret + 1 ))
++		;;
++	1)
++		echo "ok: $title correctly failed" >&2
++		;;
++	*)
++		echo "FAIL: something else went wrong" >&2
++		ret=$(( ret + 1 ))
++		;;
++	esac
++}
++
+ do_splice()
+ {
+ 	filename="$1"
+ 	bytes="$2"
+ 	expected="$3"
++	report="$4"
+ 
+-	out=$(./splice_read "$filename" "$bytes" | cat)
++	out=$("$DIR"/splice_read "$filename" "$bytes" | cat)
+ 	if [ "$out" = "$expected" ] ; then
+-		echo "ok: $filename $bytes"
++		echo "      matched $report" >&2
++		return 0
+ 	else
+-		echo "FAIL: $filename $bytes"
+-		ret=1
++		echo "      no match: '$out' vs $report" >&2
++		return 1
+ 	fi
+ }
+ 
+@@ -23,34 +89,45 @@ test_splice()
+ {
+ 	filename="$1"
+ 
++	echo "  checking $filename ..." >&2
++
+ 	full=$(cat "$filename")
++	rc=$?
++	if [ $rc -ne 0 ] ; then
++		return 2
++	fi
++
+ 	two=$(echo "$full" | grep -m1 . | cut -c-2)
+ 
+ 	# Make sure full splice has the same contents as a standard read.
+-	do_splice "$filename" 4096 "$full"
++	echo "    splicing 4096 bytes ..." >&2
++	if ! do_splice "$filename" 4096 "$full" "full read" ; then
++		return 1
++	fi
+ 
+ 	# Make sure a partial splice see the first two characters.
+-	do_splice "$filename" 2 "$two"
++	echo "    splicing 2 bytes ..." >&2
++	if ! do_splice "$filename" 2 "$two" "'$two'" ; then
++		return 1
++	fi
++
++	return 0
+ }
+ 
+-# proc_single_open(), seq_read()
+-test_splice /proc/$$/limits
+-# special open, seq_read()
+-test_splice /proc/$$/comm
++### /proc/$pid/ has no splice interface; these should all fail.
++expect_failure "proc_single_open(), seq_read() splice" test_splice /proc/$$/limits
++expect_failure "special open(), seq_read() splice" test_splice /proc/$$/comm
+ 
+-# proc_handler, proc_dointvec_minmax
+-test_splice /proc/sys/fs/nr_open
+-# proc_handler, proc_dostring
+-test_splice /proc/sys/kernel/modprobe
+-# proc_handler, special read
+-test_splice /proc/sys/kernel/version
++### /proc/sys/ has a splice interface; these should all succeed.
++expect_success "proc_handler: proc_dointvec_minmax() splice" test_splice /proc/sys/fs/nr_open
++expect_success "proc_handler: proc_dostring() splice" test_splice /proc/sys/kernel/modprobe
++expect_success "proc_handler: special read splice" test_splice /proc/sys/kernel/version
+ 
++### /sys/ has no splice interface; these should all fail.
+ if ! [ -d /sys/module/test_module/sections ] ; then
+-	modprobe test_module
++	expect_success "test_module kernel module load" modprobe test_module
+ fi
+-# kernfs, attr
+-test_splice /sys/module/test_module/coresize
+-# kernfs, binattr
+-test_splice /sys/module/test_module/sections/.init.text
++expect_failure "kernfs attr splice" test_splice /sys/module/test_module/coresize
++expect_failure "kernfs binattr splice" test_splice /sys/module/test_module/sections/.init.text
+ 
+ exit $ret
+diff --git a/tools/testing/selftests/tc-testing/plugin-lib/scapyPlugin.py b/tools/testing/selftests/tc-testing/plugin-lib/scapyPlugin.py
+index 229ee185b27e1..a7b21658af9b4 100644
+--- a/tools/testing/selftests/tc-testing/plugin-lib/scapyPlugin.py
++++ b/tools/testing/selftests/tc-testing/plugin-lib/scapyPlugin.py
+@@ -36,7 +36,7 @@ class SubPlugin(TdcPlugin):
+         for k in scapy_keys:
+             if k not in scapyinfo:
+                 keyfail = True
+-                missing_keys.add(k)
++                missing_keys.append(k)
+         if keyfail:
+             print('{}: Scapy block present in the test, but is missing info:'
+                 .format(self.sub_class))
+diff --git a/tools/testing/selftests/vm/protection_keys.c b/tools/testing/selftests/vm/protection_keys.c
+index fdbb602ecf325..87eecd5ba577b 100644
+--- a/tools/testing/selftests/vm/protection_keys.c
++++ b/tools/testing/selftests/vm/protection_keys.c
+@@ -510,7 +510,7 @@ int alloc_pkey(void)
+ 			" shadow: 0x%016llx\n",
+ 			__func__, __LINE__, ret, __read_pkey_reg(),
+ 			shadow_pkey_reg);
+-	if (ret) {
++	if (ret > 0) {
+ 		/* clear both the bits: */
+ 		shadow_pkey_reg = set_pkey_bits(shadow_pkey_reg, ret,
+ 						~PKEY_MASK);
+@@ -561,7 +561,6 @@ int alloc_random_pkey(void)
+ 	int nr_alloced = 0;
+ 	int random_index;
+ 	memset(alloced_pkeys, 0, sizeof(alloced_pkeys));
+-	srand((unsigned int)time(NULL));
+ 
+ 	/* allocate every possible key and make a note of which ones we got */
+ 	max_nr_pkey_allocs = NR_PKEYS;
+@@ -1449,6 +1448,13 @@ void test_implicit_mprotect_exec_only_memory(int *ptr, u16 pkey)
+ 	ret = mprotect(p1, PAGE_SIZE, PROT_EXEC);
+ 	pkey_assert(!ret);
+ 
++	/*
++	 * Reset the shadow, assuming that the above mprotect()
++	 * correctly changed PKRU, but to an unknown value since
++	 * the actual alllocated pkey is unknown.
++	 */
++	shadow_pkey_reg = __read_pkey_reg();
++
+ 	dprintf2("pkey_reg: %016llx\n", read_pkey_reg());
+ 
+ 	/* Make sure this is an *instruction* fault */
+@@ -1552,6 +1558,8 @@ int main(void)
+ 	int nr_iterations = 22;
+ 	int pkeys_supported = is_pkeys_supported();
+ 
++	srand((unsigned int)time(NULL));
++
+ 	setup_handlers();
+ 
+ 	printf("has pkeys: %d\n", pkeys_supported);


^ permalink raw reply related	[flat|nested] 33+ messages in thread
* [gentoo-commits] proj/linux-patches:5.12 commit in: /
@ 2021-07-13 12:36 Mike Pagano
  0 siblings, 0 replies; 33+ messages in thread
From: Mike Pagano @ 2021-07-13 12:36 UTC (permalink / raw
  To: gentoo-commits

commit:     beccc056611dad943a45161a8c1e6784f597f3d6
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Jul 13 12:35:57 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Jul 13 12:35:57 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=beccc056

Update Homepage for CPU Optimization patch

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/0000_README b/0000_README
index 279298d..010c87c 100644
--- a/0000_README
+++ b/0000_README
@@ -132,7 +132,7 @@ From:   Tom Wijsman <TomWij@gentoo.org>
 Desc:   Add Gentoo Linux support config settings and defaults.
 
 Patch:  5010_enable-cpu-optimizations-universal.patch
-From:   https://github.com/graysky2/kernel_gcc_patch/
+From:   https://github.com/graysky2/kernel_compiler_patch
 Desc:   Kernel >= 5.8 patch enables gcc = v9+ optimizations for additional CPUs.
 
 Patch:  5020_BMQ-and-PDS-io-scheduler-v5.12-r0.patch


^ permalink raw reply related	[flat|nested] 33+ messages in thread
* [gentoo-commits] proj/linux-patches:5.12 commit in: /
@ 2021-07-11 14:42 Mike Pagano
  0 siblings, 0 replies; 33+ messages in thread
From: Mike Pagano @ 2021-07-11 14:42 UTC (permalink / raw
  To: gentoo-commits

commit:     f258dc76d3adedc729b5b395b42dbbd8a4cb6a71
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Jul 11 14:42:03 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Jul 11 14:42:03 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=f258dc76

Linux patch 5.12.16

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1015_linux-5.12.16.patch | 1016 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1020 insertions(+)

diff --git a/0000_README b/0000_README
index 7954680..279298d 100644
--- a/0000_README
+++ b/0000_README
@@ -103,6 +103,10 @@ Patch:  1014_linux-5.12.15.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.12.15
 
+Patch:  1015_linux-5.12.16.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.12.16
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1015_linux-5.12.16.patch b/1015_linux-5.12.16.patch
new file mode 100644
index 0000000..5f03442
--- /dev/null
+++ b/1015_linux-5.12.16.patch
@@ -0,0 +1,1016 @@
+diff --git a/Makefile b/Makefile
+index 09e1a0967bab7..bf6accb2328c3 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 12
+-SUBLEVEL = 15
++SUBLEVEL = 16
+ EXTRAVERSION =
+ NAME = Frozen Wasteland
+ 
+diff --git a/arch/hexagon/Makefile b/arch/hexagon/Makefile
+index c168c6980d050..74b644ea8a00a 100644
+--- a/arch/hexagon/Makefile
++++ b/arch/hexagon/Makefile
+@@ -10,6 +10,9 @@ LDFLAGS_vmlinux += -G0
+ # Do not use single-byte enums; these will overflow.
+ KBUILD_CFLAGS += -fno-short-enums
+ 
++# We must use long-calls:
++KBUILD_CFLAGS += -mlong-calls
++
+ # Modules must use either long-calls, or use pic/plt.
+ # Use long-calls for now, it's easier.  And faster.
+ # KBUILD_CFLAGS_MODULE += -fPIC
+@@ -30,9 +33,6 @@ TIR_NAME := r19
+ KBUILD_CFLAGS += -ffixed-$(TIR_NAME) -DTHREADINFO_REG=$(TIR_NAME) -D__linux__
+ KBUILD_AFLAGS += -DTHREADINFO_REG=$(TIR_NAME)
+ 
+-LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name 2>/dev/null)
+-libs-y += $(LIBGCC)
+-
+ head-y := arch/hexagon/kernel/head.o
+ 
+ core-y += arch/hexagon/kernel/ \
+diff --git a/arch/hexagon/include/asm/futex.h b/arch/hexagon/include/asm/futex.h
+index 6b9c554aee78e..9fb00a0ae89f7 100644
+--- a/arch/hexagon/include/asm/futex.h
++++ b/arch/hexagon/include/asm/futex.h
+@@ -21,7 +21,7 @@
+ 	"3:\n" \
+ 	".section .fixup,\"ax\"\n" \
+ 	"4: %1 = #%5;\n" \
+-	"   jump 3b\n" \
++	"   jump ##3b\n" \
+ 	".previous\n" \
+ 	".section __ex_table,\"a\"\n" \
+ 	".long 1b,4b,2b,4b\n" \
+@@ -90,7 +90,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
+ 	"3:\n"
+ 	".section .fixup,\"ax\"\n"
+ 	"4: %0 = #%6\n"
+-	"   jump 3b\n"
++	"   jump ##3b\n"
+ 	".previous\n"
+ 	".section __ex_table,\"a\"\n"
+ 	".long 1b,4b,2b,4b\n"
+diff --git a/arch/hexagon/include/asm/timex.h b/arch/hexagon/include/asm/timex.h
+index 78338d8ada83f..8d4ec76fceb45 100644
+--- a/arch/hexagon/include/asm/timex.h
++++ b/arch/hexagon/include/asm/timex.h
+@@ -8,6 +8,7 @@
+ 
+ #include <asm-generic/timex.h>
+ #include <asm/timer-regs.h>
++#include <asm/hexagon_vm.h>
+ 
+ /* Using TCX0 as our clock.  CLOCK_TICK_RATE scheduled to be removed. */
+ #define CLOCK_TICK_RATE              TCX0_CLK_RATE
+@@ -16,7 +17,7 @@
+ 
+ static inline int read_current_timer(unsigned long *timer_val)
+ {
+-	*timer_val = (unsigned long) __vmgettime();
++	*timer_val = __vmgettime();
+ 	return 0;
+ }
+ 
+diff --git a/arch/hexagon/kernel/hexagon_ksyms.c b/arch/hexagon/kernel/hexagon_ksyms.c
+index 6fb1aaab1c298..35545a7386a06 100644
+--- a/arch/hexagon/kernel/hexagon_ksyms.c
++++ b/arch/hexagon/kernel/hexagon_ksyms.c
+@@ -35,8 +35,8 @@ EXPORT_SYMBOL(_dflt_cache_att);
+ DECLARE_EXPORT(__hexagon_memcpy_likely_aligned_min32bytes_mult8bytes);
+ 
+ /* Additional functions */
+-DECLARE_EXPORT(__divsi3);
+-DECLARE_EXPORT(__modsi3);
+-DECLARE_EXPORT(__udivsi3);
+-DECLARE_EXPORT(__umodsi3);
++DECLARE_EXPORT(__hexagon_divsi3);
++DECLARE_EXPORT(__hexagon_modsi3);
++DECLARE_EXPORT(__hexagon_udivsi3);
++DECLARE_EXPORT(__hexagon_umodsi3);
+ DECLARE_EXPORT(csum_tcpudp_magic);
+diff --git a/arch/hexagon/kernel/ptrace.c b/arch/hexagon/kernel/ptrace.c
+index a5a89e944257c..8975f9b4cedf0 100644
+--- a/arch/hexagon/kernel/ptrace.c
++++ b/arch/hexagon/kernel/ptrace.c
+@@ -35,7 +35,7 @@ void user_disable_single_step(struct task_struct *child)
+ 
+ static int genregs_get(struct task_struct *target,
+ 		   const struct user_regset *regset,
+-		   srtuct membuf to)
++		   struct membuf to)
+ {
+ 	struct pt_regs *regs = task_pt_regs(target);
+ 
+@@ -54,7 +54,7 @@ static int genregs_get(struct task_struct *target,
+ 	membuf_store(&to, regs->m0);
+ 	membuf_store(&to, regs->m1);
+ 	membuf_store(&to, regs->usr);
+-	membuf_store(&to, regs->p3_0);
++	membuf_store(&to, regs->preds);
+ 	membuf_store(&to, regs->gp);
+ 	membuf_store(&to, regs->ugp);
+ 	membuf_store(&to, pt_elr(regs)); // pc
+diff --git a/arch/hexagon/lib/Makefile b/arch/hexagon/lib/Makefile
+index 54be529d17a25..a64641e89d5fe 100644
+--- a/arch/hexagon/lib/Makefile
++++ b/arch/hexagon/lib/Makefile
+@@ -2,4 +2,5 @@
+ #
+ # Makefile for hexagon-specific library files.
+ #
+-obj-y = checksum.o io.o memcpy.o memset.o
++obj-y = checksum.o io.o memcpy.o memset.o memcpy_likely_aligned.o \
++         divsi3.o modsi3.o udivsi3.o  umodsi3.o
+diff --git a/arch/hexagon/lib/divsi3.S b/arch/hexagon/lib/divsi3.S
+new file mode 100644
+index 0000000000000..783e09424c2c8
+--- /dev/null
++++ b/arch/hexagon/lib/divsi3.S
+@@ -0,0 +1,67 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++/*
++ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
++ */
++
++#include <linux/linkage.h>
++
++SYM_FUNC_START(__hexagon_divsi3)
++        {
++                p0 = cmp.gt(r0,#-1)
++                p1 = cmp.gt(r1,#-1)
++                r3:2 = vabsw(r1:0)
++        }
++        {
++                p3 = xor(p0,p1)
++                r4 = sub(r2,r3)
++                r6 = cl0(r2)
++                p0 = cmp.gtu(r3,r2)
++        }
++        {
++                r0 = mux(p3,#-1,#1)
++                r7 = cl0(r3)
++                p1 = cmp.gtu(r3,r4)
++        }
++        {
++                r0 = mux(p0,#0,r0)
++                p0 = or(p0,p1)
++                if (p0.new) jumpr:nt r31
++                r6 = sub(r7,r6)
++        }
++        {
++                r7 = r6
++                r5:4 = combine(#1,r3)
++                r6 = add(#1,lsr(r6,#1))
++                p0 = cmp.gtu(r6,#4)
++        }
++        {
++                r5:4 = vaslw(r5:4,r7)
++                if (!p0) r6 = #3
++        }
++        {
++                loop0(1f,r6)
++                r7:6 = vlsrw(r5:4,#1)
++                r1:0 = #0
++        }
++        .falign
++1:
++        {
++                r5:4 = vlsrw(r5:4,#2)
++                if (!p0.new) r0 = add(r0,r5)
++                if (!p0.new) r2 = sub(r2,r4)
++                p0 = cmp.gtu(r4,r2)
++        }
++        {
++                r7:6 = vlsrw(r7:6,#2)
++                if (!p0.new) r0 = add(r0,r7)
++                if (!p0.new) r2 = sub(r2,r6)
++                p0 = cmp.gtu(r6,r2)
++        }:endloop0
++        {
++                if (!p0) r0 = add(r0,r7)
++        }
++        {
++                if (p3) r0 = sub(r1,r0)
++                jumpr r31
++        }
++SYM_FUNC_END(__hexagon_divsi3)
+diff --git a/arch/hexagon/lib/memcpy_likely_aligned.S b/arch/hexagon/lib/memcpy_likely_aligned.S
+new file mode 100644
+index 0000000000000..6a541fb90a540
+--- /dev/null
++++ b/arch/hexagon/lib/memcpy_likely_aligned.S
+@@ -0,0 +1,56 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++/*
++ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
++ */
++
++#include <linux/linkage.h>
++
++SYM_FUNC_START(__hexagon_memcpy_likely_aligned_min32bytes_mult8bytes)
++        {
++                p0 = bitsclr(r1,#7)
++                p0 = bitsclr(r0,#7)
++                if (p0.new) r5:4 = memd(r1)
++                if (p0.new) r7:6 = memd(r1+#8)
++        }
++        {
++                if (!p0) jump:nt .Lmemcpy_call
++                if (p0) r9:8 = memd(r1+#16)
++                if (p0) r11:10 = memd(r1+#24)
++                p0 = cmp.gtu(r2,#64)
++        }
++        {
++                if (p0) jump:nt .Lmemcpy_call
++                if (!p0) memd(r0) = r5:4
++                if (!p0) memd(r0+#8) = r7:6
++                p0 = cmp.gtu(r2,#32)
++        }
++        {
++                p1 = cmp.gtu(r2,#40)
++                p2 = cmp.gtu(r2,#48)
++                if (p0) r13:12 = memd(r1+#32)
++                if (p1.new) r15:14 = memd(r1+#40)
++        }
++        {
++                memd(r0+#16) = r9:8
++                memd(r0+#24) = r11:10
++        }
++        {
++                if (p0) memd(r0+#32) = r13:12
++                if (p1) memd(r0+#40) = r15:14
++                if (!p2) jumpr:t r31
++        }
++        {
++                p0 = cmp.gtu(r2,#56)
++                r5:4 = memd(r1+#48)
++                if (p0.new) r7:6 = memd(r1+#56)
++        }
++        {
++                memd(r0+#48) = r5:4
++                if (p0) memd(r0+#56) = r7:6
++                jumpr r31
++        }
++
++.Lmemcpy_call:
++        jump memcpy
++
++SYM_FUNC_END(__hexagon_memcpy_likely_aligned_min32bytes_mult8bytes)
+diff --git a/arch/hexagon/lib/modsi3.S b/arch/hexagon/lib/modsi3.S
+new file mode 100644
+index 0000000000000..9ea1c86efac2b
+--- /dev/null
++++ b/arch/hexagon/lib/modsi3.S
+@@ -0,0 +1,46 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++/*
++ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
++ */
++
++#include <linux/linkage.h>
++
++SYM_FUNC_START(__hexagon_modsi3)
++        {
++                p2 = cmp.ge(r0,#0)
++                r2 = abs(r0)
++                r1 = abs(r1)
++        }
++        {
++                r3 = cl0(r2)
++                r4 = cl0(r1)
++                p0 = cmp.gtu(r1,r2)
++        }
++        {
++                r3 = sub(r4,r3)
++                if (p0) jumpr r31
++        }
++        {
++                p1 = cmp.eq(r3,#0)
++                loop0(1f,r3)
++                r0 = r2
++                r2 = lsl(r1,r3)
++        }
++        .falign
++1:
++        {
++                p0 = cmp.gtu(r2,r0)
++                if (!p0.new) r0 = sub(r0,r2)
++                r2 = lsr(r2,#1)
++                if (p1) r1 = #0
++        }:endloop0
++        {
++                p0 = cmp.gtu(r2,r0)
++                if (!p0.new) r0 = sub(r0,r1)
++                if (p2) jumpr r31
++        }
++        {
++                r0 = neg(r0)
++                jumpr r31
++        }
++SYM_FUNC_END(__hexagon_modsi3)
+diff --git a/arch/hexagon/lib/udivsi3.S b/arch/hexagon/lib/udivsi3.S
+new file mode 100644
+index 0000000000000..477f27b9311cd
+--- /dev/null
++++ b/arch/hexagon/lib/udivsi3.S
+@@ -0,0 +1,38 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++/*
++ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
++ */
++
++#include <linux/linkage.h>
++
++SYM_FUNC_START(__hexagon_udivsi3)
++        {
++                r2 = cl0(r0)
++                r3 = cl0(r1)
++                r5:4 = combine(#1,#0)
++                p0 = cmp.gtu(r1,r0)
++        }
++        {
++                r6 = sub(r3,r2)
++                r4 = r1
++                r1:0 = combine(r0,r4)
++                if (p0) jumpr r31
++        }
++        {
++                r3:2 = vlslw(r5:4,r6)
++                loop0(1f,r6)
++        }
++        .falign
++1:
++        {
++                p0 = cmp.gtu(r2,r1)
++                if (!p0.new) r1 = sub(r1,r2)
++                if (!p0.new) r0 = add(r0,r3)
++                r3:2 = vlsrw(r3:2,#1)
++        }:endloop0
++        {
++                p0 = cmp.gtu(r2,r1)
++                if (!p0.new) r0 = add(r0,r3)
++                jumpr r31
++        }
++SYM_FUNC_END(__hexagon_udivsi3)
+diff --git a/arch/hexagon/lib/umodsi3.S b/arch/hexagon/lib/umodsi3.S
+new file mode 100644
+index 0000000000000..280bf06a55e7d
+--- /dev/null
++++ b/arch/hexagon/lib/umodsi3.S
+@@ -0,0 +1,36 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++/*
++ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
++ */
++
++#include <linux/linkage.h>
++
++SYM_FUNC_START(__hexagon_umodsi3)
++        {
++                r2 = cl0(r0)
++                r3 = cl0(r1)
++                p0 = cmp.gtu(r1,r0)
++        }
++        {
++                r2 = sub(r3,r2)
++                if (p0) jumpr r31
++        }
++        {
++                loop0(1f,r2)
++                p1 = cmp.eq(r2,#0)
++                r2 = lsl(r1,r2)
++        }
++        .falign
++1:
++        {
++                p0 = cmp.gtu(r2,r0)
++                if (!p0.new) r0 = sub(r0,r2)
++                r2 = lsr(r2,#1)
++                if (p1) r1 = #0
++        }:endloop0
++        {
++                p0 = cmp.gtu(r2,r0)
++                if (!p0.new) r0 = sub(r0,r1)
++                jumpr r31
++        }
++SYM_FUNC_END(__hexagon_umodsi3)
+diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
+index 7196fa9047e6d..426787f4b2ae4 100644
+--- a/drivers/net/wireless/mediatek/mt76/dma.c
++++ b/drivers/net/wireless/mediatek/mt76/dma.c
+@@ -79,13 +79,38 @@ mt76_free_pending_txwi(struct mt76_dev *dev)
+ 	local_bh_enable();
+ }
+ 
++static void
++mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
++{
++	writel(q->desc_dma, &q->regs->desc_base);
++	writel(q->ndesc, &q->regs->ring_size);
++	q->head = readl(&q->regs->dma_idx);
++	q->tail = q->head;
++}
++
++static void
++mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
++{
++	int i;
++
++	if (!q)
++		return;
++
++	/* clear descriptors */
++	for (i = 0; i < q->ndesc; i++)
++		q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
++
++	writel(0, &q->regs->cpu_idx);
++	writel(0, &q->regs->dma_idx);
++	mt76_dma_sync_idx(dev, q);
++}
++
+ static int
+ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
+ 		     int idx, int n_desc, int bufsize,
+ 		     u32 ring_base)
+ {
+ 	int size;
+-	int i;
+ 
+ 	spin_lock_init(&q->lock);
+ 	spin_lock_init(&q->cleanup_lock);
+@@ -105,14 +130,7 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
+ 	if (!q->entry)
+ 		return -ENOMEM;
+ 
+-	/* clear descriptors */
+-	for (i = 0; i < q->ndesc; i++)
+-		q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
+-
+-	writel(q->desc_dma, &q->regs->desc_base);
+-	writel(0, &q->regs->cpu_idx);
+-	writel(0, &q->regs->dma_idx);
+-	writel(q->ndesc, &q->regs->ring_size);
++	mt76_dma_queue_reset(dev, q);
+ 
+ 	return 0;
+ }
+@@ -201,15 +219,6 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
+ 	memset(e, 0, sizeof(*e));
+ }
+ 
+-static void
+-mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
+-{
+-	writel(q->desc_dma, &q->regs->desc_base);
+-	writel(q->ndesc, &q->regs->ring_size);
+-	q->head = readl(&q->regs->dma_idx);
+-	q->tail = q->head;
+-}
+-
+ static void
+ mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
+ {
+@@ -640,9 +649,11 @@ mt76_dma_init(struct mt76_dev *dev)
+ static const struct mt76_queue_ops mt76_dma_ops = {
+ 	.init = mt76_dma_init,
+ 	.alloc = mt76_dma_alloc_queue,
++	.reset_q = mt76_dma_queue_reset,
+ 	.tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw,
+ 	.tx_queue_skb = mt76_dma_tx_queue_skb,
+ 	.tx_cleanup = mt76_dma_tx_cleanup,
++	.rx_cleanup = mt76_dma_rx_cleanup,
+ 	.rx_reset = mt76_dma_rx_reset,
+ 	.kick = mt76_dma_kick_queue,
+ };
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
+index 36a430f09f64c..967fe3e11d94e 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76.h
++++ b/drivers/net/wireless/mediatek/mt76/mt76.h
+@@ -190,7 +190,11 @@ struct mt76_queue_ops {
+ 	void (*tx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q,
+ 			   bool flush);
+ 
++	void (*rx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q);
++
+ 	void (*kick)(struct mt76_dev *dev, struct mt76_queue *q);
++
++	void (*reset_q)(struct mt76_dev *dev, struct mt76_queue *q);
+ };
+ 
+ enum mt76_wcid_flags {
+@@ -784,8 +788,10 @@ static inline u16 mt76_rev(struct mt76_dev *dev)
+ #define mt76_tx_queue_skb_raw(dev, ...)	(dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__)
+ #define mt76_tx_queue_skb(dev, ...)	(dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mt76), __VA_ARGS__)
+ #define mt76_queue_rx_reset(dev, ...)	(dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__)
+-#define mt76_queue_tx_cleanup(dev, ...)        (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
++#define mt76_queue_tx_cleanup(dev, ...)	(dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
++#define mt76_queue_rx_cleanup(dev, ...)	(dev)->mt76.queue_ops->rx_cleanup(&((dev)->mt76), __VA_ARGS__)
+ #define mt76_queue_kick(dev, ...)	(dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__)
++#define mt76_queue_reset(dev, ...)	(dev)->mt76.queue_ops->reset_q(&((dev)->mt76), __VA_ARGS__)
+ 
+ #define mt76_for_each_q_rx(dev, i)	\
+ 	for (i = 0; i < ARRAY_SIZE((dev)->q_rx) && \
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+index c0001e38fcce9..a0797cec136e9 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+@@ -142,7 +142,7 @@ mt7921_mac_init_band(struct mt7921_dev *dev, u8 band)
+ 	mt76_clear(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_RXD_G5_EN);
+ }
+ 
+-static void mt7921_mac_init(struct mt7921_dev *dev)
++void mt7921_mac_init(struct mt7921_dev *dev)
+ {
+ 	int i;
+ 
+@@ -232,7 +232,6 @@ int mt7921_register_device(struct mt7921_dev *dev)
+ 	INIT_LIST_HEAD(&dev->sta_poll_list);
+ 	spin_lock_init(&dev->sta_poll_lock);
+ 
+-	init_waitqueue_head(&dev->reset_wait);
+ 	INIT_WORK(&dev->reset_work, mt7921_mac_reset_work);
+ 
+ 	ret = mt7921_init_hardware(dev);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+index a6d2a25b3495f..ce4eae7f1e448 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+@@ -1184,43 +1184,77 @@ void mt7921_update_channel(struct mt76_dev *mdev)
+ 	mt76_connac_power_save_sched(&dev->mphy, &dev->pm);
+ }
+ 
+-static bool
+-mt7921_wait_reset_state(struct mt7921_dev *dev, u32 state)
++static int
++mt7921_wfsys_reset(struct mt7921_dev *dev)
+ {
+-	bool ret;
++	mt76_set(dev, 0x70002600, BIT(0));
++	msleep(200);
++	mt76_clear(dev, 0x70002600, BIT(0));
+ 
+-	ret = wait_event_timeout(dev->reset_wait,
+-				 (READ_ONCE(dev->reset_state) & state),
+-				 MT7921_RESET_TIMEOUT);
+-
+-	WARN(!ret, "Timeout waiting for MCU reset state %x\n", state);
+-	return ret;
++	return __mt76_poll_msec(&dev->mt76, MT_WFSYS_SW_RST_B,
++				WFSYS_SW_INIT_DONE, WFSYS_SW_INIT_DONE, 500);
+ }
+ 
+ static void
+-mt7921_dma_reset(struct mt7921_phy *phy)
++mt7921_dma_reset(struct mt7921_dev *dev)
+ {
+-	struct mt7921_dev *dev = phy->dev;
+ 	int i;
+ 
++	/* reset */
++	mt76_clear(dev, MT_WFDMA0_RST,
++		   MT_WFDMA0_RST_DMASHDL_ALL_RST | MT_WFDMA0_RST_LOGIC_RST);
++
++	mt76_set(dev, MT_WFDMA0_RST,
++		 MT_WFDMA0_RST_DMASHDL_ALL_RST | MT_WFDMA0_RST_LOGIC_RST);
++
++	/* disable WFDMA0 */
+ 	mt76_clear(dev, MT_WFDMA0_GLO_CFG,
+-		   MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
++		   MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN |
++		   MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
++		   MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
++		   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
++		   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
+ 
+-	usleep_range(1000, 2000);
++	mt76_poll(dev, MT_WFDMA0_GLO_CFG,
++		  MT_WFDMA0_GLO_CFG_TX_DMA_BUSY |
++		  MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 1000);
+ 
+-	mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WA], true);
++	/* reset hw queues */
+ 	for (i = 0; i < __MT_TXQ_MAX; i++)
+-		mt76_queue_tx_cleanup(dev, phy->mt76->q_tx[i], true);
++		mt76_queue_reset(dev, dev->mphy.q_tx[i]);
+ 
+-	mt76_for_each_q_rx(&dev->mt76, i) {
+-		mt76_queue_rx_reset(dev, i);
+-	}
++	for (i = 0; i < __MT_MCUQ_MAX; i++)
++		mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
+ 
+-	/* re-init prefetch settings after reset */
++	mt76_for_each_q_rx(&dev->mt76, i)
++		mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
++
++	/* configure perfetch settings */
+ 	mt7921_dma_prefetch(dev);
+ 
++	/* reset dma idx */
++	mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0);
++
++	/* configure delay interrupt */
++	mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0);
++
++	mt76_set(dev, MT_WFDMA0_GLO_CFG,
++		 MT_WFDMA0_GLO_CFG_TX_WB_DDONE |
++		 MT_WFDMA0_GLO_CFG_FIFO_LITTLE_ENDIAN |
++		 MT_WFDMA0_GLO_CFG_CLK_GAT_DIS |
++		 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
++		 MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
++		 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
++
+ 	mt76_set(dev, MT_WFDMA0_GLO_CFG,
+ 		 MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
++
++	mt76_set(dev, 0x54000120, BIT(1));
++
++	/* enable interrupts for TX/RX rings */
++	mt7921_irq_enable(dev,
++			  MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
++			  MT_INT_MCU_CMD);
+ }
+ 
+ void mt7921_tx_token_put(struct mt7921_dev *dev)
+@@ -1244,71 +1278,133 @@ void mt7921_tx_token_put(struct mt7921_dev *dev)
+ 	idr_destroy(&dev->token);
+ }
+ 
+-/* system error recovery */
+-void mt7921_mac_reset_work(struct work_struct *work)
++static void
++mt7921_vif_connect_iter(void *priv, u8 *mac,
++			struct ieee80211_vif *vif)
+ {
+-	struct mt7921_dev *dev;
++	struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
++	struct mt7921_dev *dev = mvif->phy->dev;
+ 
+-	dev = container_of(work, struct mt7921_dev, reset_work);
++	ieee80211_disconnect(vif, true);
+ 
+-	if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_DMA))
+-		return;
++	mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, true);
++	mt7921_mcu_set_tx(dev, vif);
++}
+ 
+-	ieee80211_stop_queues(mt76_hw(dev));
++static int
++mt7921_mac_reset(struct mt7921_dev *dev)
++{
++	int i, err;
++
++	mt76_connac_free_pending_tx_skbs(&dev->pm, NULL);
++
++	mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
++	mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
+ 
+-	set_bit(MT76_RESET, &dev->mphy.state);
+ 	set_bit(MT76_MCU_RESET, &dev->mphy.state);
+ 	wake_up(&dev->mt76.mcu.wait);
+-	cancel_delayed_work_sync(&dev->mphy.mac_work);
++	skb_queue_purge(&dev->mt76.mcu.res_q);
+ 
+-	/* lock/unlock all queues to ensure that no tx is pending */
+ 	mt76_txq_schedule_all(&dev->mphy);
+ 
+ 	mt76_worker_disable(&dev->mt76.tx_worker);
+-	napi_disable(&dev->mt76.napi[0]);
+-	napi_disable(&dev->mt76.napi[1]);
+-	napi_disable(&dev->mt76.napi[2]);
++	napi_disable(&dev->mt76.napi[MT_RXQ_MAIN]);
++	napi_disable(&dev->mt76.napi[MT_RXQ_MCU]);
++	napi_disable(&dev->mt76.napi[MT_RXQ_MCU_WA]);
+ 	napi_disable(&dev->mt76.tx_napi);
+ 
+-	mt7921_mutex_acquire(dev);
+-
+-	mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED);
+-
+ 	mt7921_tx_token_put(dev);
+ 	idr_init(&dev->token);
+ 
+-	if (mt7921_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
+-		mt7921_dma_reset(&dev->phy);
++	/* clean up hw queues */
++	for (i = 0; i < ARRAY_SIZE(dev->mt76.phy.q_tx); i++)
++		mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
+ 
+-		mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT);
+-		mt7921_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
+-	}
++	for (i = 0; i < ARRAY_SIZE(dev->mt76.q_mcu); i++)
++		mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
+ 
+-	clear_bit(MT76_MCU_RESET, &dev->mphy.state);
+-	clear_bit(MT76_RESET, &dev->mphy.state);
++	mt76_for_each_q_rx(&dev->mt76, i)
++		mt76_queue_rx_cleanup(dev, &dev->mt76.q_rx[i]);
++
++	mt7921_wfsys_reset(dev);
++	mt7921_dma_reset(dev);
++
++	mt76_for_each_q_rx(&dev->mt76, i) {
++		mt76_queue_rx_reset(dev, i);
++		napi_enable(&dev->mt76.napi[i]);
++		napi_schedule(&dev->mt76.napi[i]);
++	}
+ 
+-	mt76_worker_enable(&dev->mt76.tx_worker);
+ 	napi_enable(&dev->mt76.tx_napi);
+ 	napi_schedule(&dev->mt76.tx_napi);
++	mt76_worker_enable(&dev->mt76.tx_worker);
+ 
+-	napi_enable(&dev->mt76.napi[0]);
+-	napi_schedule(&dev->mt76.napi[0]);
++	clear_bit(MT76_MCU_RESET, &dev->mphy.state);
+ 
+-	napi_enable(&dev->mt76.napi[1]);
+-	napi_schedule(&dev->mt76.napi[1]);
++	mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
++	mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
++	mt7921_irq_enable(dev,
++			  MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
++			  MT_INT_MCU_CMD);
+ 
+-	napi_enable(&dev->mt76.napi[2]);
+-	napi_schedule(&dev->mt76.napi[2]);
++	err = mt7921_run_firmware(dev);
++	if (err)
++		return err;
+ 
+-	ieee80211_wake_queues(mt76_hw(dev));
++	err = mt7921_mcu_set_eeprom(dev);
++	if (err)
++		return err;
+ 
+-	mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
+-	mt7921_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
++	mt7921_mac_init(dev);
++	return __mt7921_start(&dev->phy);
++}
+ 
+-	mt7921_mutex_release(dev);
++/* system error recovery */
++void mt7921_mac_reset_work(struct work_struct *work)
++{
++	struct ieee80211_hw *hw;
++	struct mt7921_dev *dev;
++	int i;
+ 
+-	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
+-				     MT7921_WATCHDOG_TIME);
++	dev = container_of(work, struct mt7921_dev, reset_work);
++	hw = mt76_hw(dev);
++
++	dev_err(dev->mt76.dev, "chip reset\n");
++	ieee80211_stop_queues(hw);
++
++	cancel_delayed_work_sync(&dev->mphy.mac_work);
++	cancel_delayed_work_sync(&dev->pm.ps_work);
++	cancel_work_sync(&dev->pm.wake_work);
++
++	mutex_lock(&dev->mt76.mutex);
++	for (i = 0; i < 10; i++) {
++		if (!mt7921_mac_reset(dev))
++			break;
++	}
++	mutex_unlock(&dev->mt76.mutex);
++
++	if (i == 10)
++		dev_err(dev->mt76.dev, "chip reset failed\n");
++
++	if (test_and_clear_bit(MT76_HW_SCANNING, &dev->mphy.state)) {
++		struct cfg80211_scan_info info = {
++			.aborted = true,
++		};
++
++		ieee80211_scan_completed(dev->mphy.hw, &info);
++	}
++
++	ieee80211_wake_queues(hw);
++	ieee80211_iterate_active_interfaces(hw,
++					    IEEE80211_IFACE_ITER_RESUME_ALL,
++					    mt7921_vif_connect_iter, 0);
++}
++
++void mt7921_reset(struct mt76_dev *mdev)
++{
++	struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
++
++	queue_work(dev->mt76.wq, &dev->reset_work);
+ }
+ 
+ static void
+@@ -1505,4 +1601,5 @@ void mt7921_coredump_work(struct work_struct *work)
+ 	}
+ 	dev_coredumpv(dev->mt76.dev, dump, MT76_CONNAC_COREDUMP_SZ,
+ 		      GFP_KERNEL);
++	mt7921_reset(&dev->mt76);
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+index 2c781b6f89e53..c6e8857067a3a 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+@@ -168,28 +168,44 @@ void mt7921_set_stream_he_caps(struct mt7921_phy *phy)
+ 	}
+ }
+ 
+-static int mt7921_start(struct ieee80211_hw *hw)
++int __mt7921_start(struct mt7921_phy *phy)
+ {
+-	struct mt7921_dev *dev = mt7921_hw_dev(hw);
+-	struct mt7921_phy *phy = mt7921_hw_phy(hw);
++	struct mt76_phy *mphy = phy->mt76;
++	int err;
+ 
+-	mt7921_mutex_acquire(dev);
++	err = mt76_connac_mcu_set_mac_enable(mphy->dev, 0, true, false);
++	if (err)
++		return err;
+ 
+-	mt76_connac_mcu_set_mac_enable(&dev->mt76, 0, true, false);
+-	mt76_connac_mcu_set_channel_domain(phy->mt76);
++	err = mt76_connac_mcu_set_channel_domain(mphy);
++	if (err)
++		return err;
++
++	err = mt7921_mcu_set_chan_info(phy, MCU_EXT_CMD_SET_RX_PATH);
++	if (err)
++		return err;
+ 
+-	mt7921_mcu_set_chan_info(phy, MCU_EXT_CMD_SET_RX_PATH);
+ 	mt7921_mac_reset_counters(phy);
+-	set_bit(MT76_STATE_RUNNING, &phy->mt76->state);
++	set_bit(MT76_STATE_RUNNING, &mphy->state);
+ 
+-	ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work,
++	ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
+ 				     MT7921_WATCHDOG_TIME);
+ 
+-	mt7921_mutex_release(dev);
+-
+ 	return 0;
+ }
+ 
++static int mt7921_start(struct ieee80211_hw *hw)
++{
++	struct mt7921_phy *phy = mt7921_hw_phy(hw);
++	int err;
++
++	mt7921_mutex_acquire(phy->dev);
++	err = __mt7921_start(phy);
++	mt7921_mutex_release(phy->dev);
++
++	return err;
++}
++
+ static void mt7921_stop(struct ieee80211_hw *hw)
+ {
+ 	struct mt7921_dev *dev = mt7921_hw_dev(hw);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+index be88c9f5637a5..a4f070cd78fda 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+@@ -161,6 +161,8 @@ mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
+ 	if (!skb) {
+ 		dev_err(mdev->dev, "Message %d (seq %d) timeout\n",
+ 			cmd, seq);
++		mt7921_reset(mdev);
++
+ 		return -ETIMEDOUT;
+ 	}
+ 
+@@ -927,6 +929,24 @@ int mt7921_mcu_fw_log_2_host(struct mt7921_dev *dev, u8 ctrl)
+ 				 sizeof(data), false);
+ }
+ 
++int mt7921_run_firmware(struct mt7921_dev *dev)
++{
++	int err;
++
++	err = mt7921_driver_own(dev);
++	if (err)
++		return err;
++
++	err = mt7921_load_firmware(dev);
++	if (err)
++		return err;
++
++	set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
++	mt7921_mcu_fw_log_2_host(dev, 1);
++
++	return 0;
++}
++
+ int mt7921_mcu_init(struct mt7921_dev *dev)
+ {
+ 	static const struct mt76_mcu_ops mt7921_mcu_ops = {
+@@ -935,22 +955,10 @@ int mt7921_mcu_init(struct mt7921_dev *dev)
+ 		.mcu_parse_response = mt7921_mcu_parse_response,
+ 		.mcu_restart = mt7921_mcu_restart,
+ 	};
+-	int ret;
+ 
+ 	dev->mt76.mcu_ops = &mt7921_mcu_ops;
+ 
+-	ret = mt7921_driver_own(dev);
+-	if (ret)
+-		return ret;
+-
+-	ret = mt7921_load_firmware(dev);
+-	if (ret)
+-		return ret;
+-
+-	set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
+-	mt7921_mcu_fw_log_2_host(dev, 1);
+-
+-	return 0;
++	return mt7921_run_firmware(dev);
+ }
+ 
+ void mt7921_mcu_exit(struct mt7921_dev *dev)
+@@ -1263,6 +1271,7 @@ int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev)
+ 
+ 	if (i == MT7921_DRV_OWN_RETRY_COUNT) {
+ 		dev_err(dev->mt76.dev, "driver own failed\n");
++		mt7921_reset(&dev->mt76);
+ 		return -EIO;
+ 	}
+ 
+@@ -1289,6 +1298,7 @@ int mt7921_mcu_fw_pmctrl(struct mt7921_dev *dev)
+ 
+ 	if (i == MT7921_DRV_OWN_RETRY_COUNT) {
+ 		dev_err(dev->mt76.dev, "firmware own failed\n");
++		mt7921_reset(&dev->mt76);
+ 		return -EIO;
+ 	}
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
+index 25a1a6acb6baf..bc26639695bd4 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
+@@ -151,8 +151,6 @@ struct mt7921_dev {
+ 
+ 	struct work_struct init_work;
+ 	struct work_struct reset_work;
+-	wait_queue_head_t reset_wait;
+-	u32 reset_state;
+ 
+ 	struct list_head sta_poll_list;
+ 	spinlock_t sta_poll_lock;
+@@ -209,6 +207,7 @@ extern struct pci_driver mt7921_pci_driver;
+ 
+ u32 mt7921_reg_map(struct mt7921_dev *dev, u32 addr);
+ 
++int __mt7921_start(struct mt7921_phy *phy);
+ int mt7921_register_device(struct mt7921_dev *dev);
+ void mt7921_unregister_device(struct mt7921_dev *dev);
+ int mt7921_eeprom_init(struct mt7921_dev *dev);
+@@ -220,6 +219,7 @@ void mt7921_eeprom_init_sku(struct mt7921_dev *dev);
+ int mt7921_dma_init(struct mt7921_dev *dev);
+ void mt7921_dma_prefetch(struct mt7921_dev *dev);
+ void mt7921_dma_cleanup(struct mt7921_dev *dev);
++int mt7921_run_firmware(struct mt7921_dev *dev);
+ int mt7921_mcu_init(struct mt7921_dev *dev);
+ int mt7921_mcu_add_bss_info(struct mt7921_phy *phy,
+ 			    struct ieee80211_vif *vif, int enable);
+@@ -281,6 +281,7 @@ mt7921_l1_rmw(struct mt7921_dev *dev, u32 addr, u32 mask, u32 val)
+ #define mt7921_l1_set(dev, addr, val)	mt7921_l1_rmw(dev, addr, 0, val)
+ #define mt7921_l1_clear(dev, addr, val)	mt7921_l1_rmw(dev, addr, val, 0)
+ 
++void mt7921_mac_init(struct mt7921_dev *dev);
+ bool mt7921_mac_wtbl_update(struct mt7921_dev *dev, int idx, u32 mask);
+ void mt7921_mac_reset_counters(struct mt7921_phy *phy);
+ void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi,
+@@ -296,6 +297,7 @@ void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ 			   struct ieee80211_sta *sta);
+ void mt7921_mac_work(struct work_struct *work);
+ void mt7921_mac_reset_work(struct work_struct *work);
++void mt7921_reset(struct mt76_dev *mdev);
+ int mt7921_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ 			  enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ 			  struct ieee80211_sta *sta,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
+index 73878d3e24951..50249e9571a66 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
+@@ -418,6 +418,10 @@
+ #define PCIE_LPCR_HOST_CLR_OWN		BIT(1)
+ #define PCIE_LPCR_HOST_SET_OWN		BIT(0)
+ 
++#define MT_WFSYS_SW_RST_B		0x18000140
++#define WFSYS_SW_RST_B			BIT(0)
++#define WFSYS_SW_INIT_DONE		BIT(4)
++
+ #define MT_CONN_ON_MISC			0x7c0600f0
+ #define MT_TOP_MISC2_FW_N9_RDY		GENMASK(1, 0)
+ 


^ permalink raw reply related	[flat|nested] 33+ messages in thread
* [gentoo-commits] proj/linux-patches:5.12 commit in: /
@ 2021-07-07 13:12 Mike Pagano
  0 siblings, 0 replies; 33+ messages in thread
From: Mike Pagano @ 2021-07-07 13:12 UTC (permalink / raw
  To: gentoo-commits

commit:     1543a4ff033001d8fec13c3222bdb05883fb220d
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Jul  7 13:12:02 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Jul  7 13:12:02 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1543a4ff

Linuxpatch 5.12.15

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |   4 ++
 1014_linux-5.12.15.patch | 126 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 130 insertions(+)

diff --git a/0000_README b/0000_README
index 96f1ac7..7954680 100644
--- a/0000_README
+++ b/0000_README
@@ -99,6 +99,10 @@ Patch:  1013_linux-5.12.14.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.12.14
 
+Patch:  1014_linux-5.12.15.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.12.15
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1014_linux-5.12.15.patch b/1014_linux-5.12.15.patch
new file mode 100644
index 0000000..a027bec
--- /dev/null
+++ b/1014_linux-5.12.15.patch
@@ -0,0 +1,126 @@
+diff --git a/Makefile b/Makefile
+index 433f164f9ee0f..09e1a0967bab7 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 12
+-SUBLEVEL = 14
++SUBLEVEL = 15
+ EXTRAVERSION =
+ NAME = Frozen Wasteland
+ 
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index eec2dcca2f390..ac7c786fa09f9 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -307,6 +307,7 @@ union kvm_mmu_extended_role {
+ 		unsigned int cr4_pke:1;
+ 		unsigned int cr4_smap:1;
+ 		unsigned int cr4_smep:1;
++		unsigned int cr4_la57:1;
+ 		unsigned int maxphyaddr:6;
+ 	};
+ };
+diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
+index 676ec0d1e6be4..fb2231cf19b5d 100644
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -4463,6 +4463,7 @@ static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu)
+ 	ext.cr4_smap = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
+ 	ext.cr4_pse = !!is_pse(vcpu);
+ 	ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE);
++	ext.cr4_la57 = !!kvm_read_cr4_bits(vcpu, X86_CR4_LA57);
+ 	ext.maxphyaddr = cpuid_maxphyaddr(vcpu);
+ 
+ 	ext.valid = 1;
+diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
+index e3607ec4c2e83..fb365aef336b5 100644
+--- a/drivers/gpio/Kconfig
++++ b/drivers/gpio/Kconfig
+@@ -1361,6 +1361,7 @@ config GPIO_TPS68470
+ config GPIO_TQMX86
+ 	tristate "TQ-Systems QTMX86 GPIO"
+ 	depends on MFD_TQMX86 || COMPILE_TEST
++	depends on HAS_IOPORT_MAP
+ 	select GPIOLIB_IRQCHIP
+ 	help
+ 	  This driver supports GPIO on the TQMX86 IO controller.
+@@ -1428,6 +1429,7 @@ menu "PCI GPIO expanders"
+ config GPIO_AMD8111
+ 	tristate "AMD 8111 GPIO driver"
+ 	depends on X86 || COMPILE_TEST
++	depends on HAS_IOPORT_MAP
+ 	help
+ 	  The AMD 8111 south bridge contains 32 GPIO pins which can be used.
+ 
+diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
+index 157106e1e4381..b9fdf05d76694 100644
+--- a/drivers/gpio/gpio-mxc.c
++++ b/drivers/gpio/gpio-mxc.c
+@@ -334,7 +334,7 @@ static int mxc_gpio_init_gc(struct mxc_gpio_port *port, int irq_base)
+ 	ct->chip.irq_unmask = irq_gc_mask_set_bit;
+ 	ct->chip.irq_set_type = gpio_set_irq_type;
+ 	ct->chip.irq_set_wake = gpio_set_wake_irq;
+-	ct->chip.flags = IRQCHIP_MASK_ON_SUSPEND;
++	ct->chip.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND;
+ 	ct->regs.ack = GPIO_ISR;
+ 	ct->regs.mask = GPIO_IMR;
+ 
+diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
+index f2720a0061993..0a47a2a5553d1 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
++++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
+@@ -549,7 +549,7 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
+ 	struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
+ 	int i, j;
+ 
+-	if (!ttm_dma)
++	if (!ttm_dma || !ttm_dma->dma_address)
+ 		return;
+ 	if (!ttm_dma->pages) {
+ 		NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma);
+@@ -585,7 +585,7 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
+ 	struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
+ 	int i, j;
+ 
+-	if (!ttm_dma)
++	if (!ttm_dma || !ttm_dma->dma_address)
+ 		return;
+ 	if (!ttm_dma->pages) {
+ 		NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma);
+diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
+index 6946a7e26eff7..ef5e792c665fd 100644
+--- a/drivers/s390/crypto/vfio_ap_ops.c
++++ b/drivers/s390/crypto/vfio_ap_ops.c
+@@ -366,16 +366,6 @@ static int vfio_ap_mdev_remove(struct mdev_device *mdev)
+ 	struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ 
+ 	mutex_lock(&matrix_dev->lock);
+-
+-	/*
+-	 * If the KVM pointer is in flux or the guest is running, disallow
+-	 * un-assignment of control domain.
+-	 */
+-	if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
+-		mutex_unlock(&matrix_dev->lock);
+-		return -EBUSY;
+-	}
+-
+ 	vfio_ap_mdev_reset_queues(mdev);
+ 	list_del(&matrix_mdev->node);
+ 	kfree(matrix_mdev);
+diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
+index e4633b84c556a..7815ed642d434 100644
+--- a/drivers/scsi/sr.c
++++ b/drivers/scsi/sr.c
+@@ -220,6 +220,8 @@ static unsigned int sr_get_events(struct scsi_device *sdev)
+ 		return DISK_EVENT_EJECT_REQUEST;
+ 	else if (med->media_event_code == 2)
+ 		return DISK_EVENT_MEDIA_CHANGE;
++	else if (med->media_event_code == 3)
++		return DISK_EVENT_EJECT_REQUEST;
+ 	return 0;
+ }
+ 


^ permalink raw reply related	[flat|nested] 33+ messages in thread
* [gentoo-commits] proj/linux-patches:5.12 commit in: /
@ 2021-07-04 15:43 Mike Pagano
  0 siblings, 0 replies; 33+ messages in thread
From: Mike Pagano @ 2021-07-04 15:43 UTC (permalink / raw
  To: gentoo-commits

commit:     37672d711eae0b754a6a900065ecdec7ade118b9
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Jul  4 15:42:47 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Jul  4 15:42:47 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=37672d71

KSPP:Fix DEVMEM Select and move help text

Thanks to Peter for reporting

Bug: https://bugs.gentoo.org/798315

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 4567_distro-Gentoo-Kconfig.patch | 26 +++++++++++++-------------
 1 file changed, 13 insertions(+), 13 deletions(-)

diff --git a/4567_distro-Gentoo-Kconfig.patch b/4567_distro-Gentoo-Kconfig.patch
index b671313..c063c6d 100644
--- a/4567_distro-Gentoo-Kconfig.patch
+++ b/4567_distro-Gentoo-Kconfig.patch
@@ -6,8 +6,8 @@
  source "Documentation/Kconfig"
 +
 +source "distro/Kconfig"
---- /dev/null	2021-06-08 16:56:49.698138501 -0400
-+++ b/distro/Kconfig	2021-06-08 17:11:33.377999003 -0400
+--- /dev/null	2021-07-04 10:53:51.006624416 -0400
++++ b/distro/Kconfig	2021-07-04 11:07:33.534248860 -0400
 @@ -0,0 +1,263 @@
 +menu "Gentoo Linux"
 +
@@ -172,15 +172,6 @@
 +config GENTOO_KERNEL_SELF_PROTECTION
 +	bool "Architecture Independant Kernel Self Protection Project Recommendations"
 +
-+	help
-+  Recommended Kernel settings based on the suggestions from the Kernel Self Protection Project
-+	See: https://kernsec.org/wiki/index.php/Kernel_Self_Protection_Project/Recommended_Settings
-+	Note, there may be additional settings for which the CONFIG_ setting is invisible in menuconfig due 
-+	to unmet dependencies. Search for GENTOO_KERNEL_SELF_PROTECTION_{X86_64, ARM64, X86_32, ARM} for 
-+	dependency information on your specific architecture.
-+	Note 2: Please see the URL above for numeric settings, e.g. CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 
-+	for X86_64
-+
 +	depends on GENTOO_LINUX && !ACPI_CUSTOM_METHOD && !COMPAT_BRK && !DEVKMEM && !PROC_KCORE && !COMPAT_VDSO && !KEXEC && !HIBERNATION && !LEGACY_PTYS && !X86_X32 && !MODIFY_LDT_SYSCALL
 +
 +	select BUG
@@ -188,8 +179,8 @@
 +	select DEBUG_WX
 +	select STACKPROTECTOR
 +	select STACKPROTECTOR_STRONG
-+	select STRICT_DEVMEM
-+	select IO_STRICT_DEVMEM
++	select STRICT_DEVMEM if DEVMEM=y
++	select IO_STRICT_DEVMEM if DEVMEM=y
 +	select SYN_COOKIES
 +	select DEBUG_CREDENTIALS
 +	select DEBUG_NOTIFIERS
@@ -222,6 +213,15 @@
 +	select GCC_PLUGIN_RANDSTRUCT
 +	select GCC_PLUGIN_RANDSTRUCT_PERFORMANCE
 +
++	help
++  		Recommended Kernel settings based on the suggestions from the Kernel Self Protection Project
++		See: https://kernsec.org/wiki/index.php/Kernel_Self_Protection_Project/Recommended_Settings
++		Note, there may be additional settings for which the CONFIG_ setting is invisible in menuconfig due 
++		to unmet dependencies. Search for GENTOO_KERNEL_SELF_PROTECTION_{X86_64, ARM64, X86_32, ARM} for 
++		dependency information on your specific architecture.
++		Note 2: Please see the URL above for numeric settings, e.g. CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 
++		for X86_64
++
 +menu "Architecture Specific Self Protection Project Recommendations"
 +
 +config GENTOO_KERNEL_SELF_PROTECTION_X86_64


^ permalink raw reply related	[flat|nested] 33+ messages in thread
* [gentoo-commits] proj/linux-patches:5.12 commit in: /
@ 2021-07-01 14:28 Mike Pagano
  0 siblings, 0 replies; 33+ messages in thread
From: Mike Pagano @ 2021-07-01 14:28 UTC (permalink / raw
  To: gentoo-commits

commit:     2454e4a6ae16783c690c3ffb3ee8b688b4f8659d
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Jul  1 14:28:08 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Jul  1 14:28:08 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=2454e4a6

Update CPU OPT Patch 06062021

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 5010_enable-cpu-optimizations-universal.patch | 74 +++++++++++++--------------
 1 file changed, 36 insertions(+), 38 deletions(-)

diff --git a/5010_enable-cpu-optimizations-universal.patch b/5010_enable-cpu-optimizations-universal.patch
index 1868f23..c45d13b 100644
--- a/5010_enable-cpu-optimizations-universal.patch
+++ b/5010_enable-cpu-optimizations-universal.patch
@@ -1,23 +1,18 @@
-From 59db769ad69e080c512b3890e1d27d6120f4a1a4 Mon Sep 17 00:00:00 2001
+From 4af44fbc97bc51eb742f0d6555bde23cf580d4e3 Mon Sep 17 00:00:00 2001
 From: graysky <graysky@archlinux.us>
-Date: Mon, 12 Apr 2021 07:09:27 -0400
+Date: Sun, 6 Jun 2021 09:41:36 -0400
 Subject: [PATCH] more uarches for kernel 5.8+
 MIME-Version: 1.0
 Content-Type: text/plain; charset=UTF-8
 Content-Transfer-Encoding: 8bit
 
-WARNING
-This patch works with all gcc versions 9.0+ and with kernel version 5.8+ and should
-NOT be applied when compiling on older versions of gcc due to key name changes
-of the march flags introduced with the version 4.9 release of gcc.[1]
-
 FEATURES
 This patch adds additional CPU options to the Linux kernel accessible under:
  Processor type and features  --->
   Processor family --->
 
-With the release of gcc 11.0, several generic 64-bit levels are offered which
-are good for supported Intel or AMD CPUs:
+With the release of gcc 11.1 and clang 12.0, several generic 64-bit levels are
+offered which are good for supported Intel or AMD CPUs:
 • x86-64-v2
 • x86-64-v3
 • x86-64-v4
@@ -26,7 +21,7 @@ Users of glibc 2.33 and above can see which level is supported by current
 hardware by running:
   /lib/ld-linux-x86-64.so.2 --help | grep supported
 
-Alternatively, compare the flags from /proc/cpuinfo to this list.[2]
+Alternatively, compare the flags from /proc/cpuinfo to this list.[1]
 
 CPU-specific microarchitectures include:
 • AMD Improved K8-family
@@ -62,13 +57,15 @@ CPU-specific microarchitectures include:
 • Intel 12th Gen i3/i5/i7/i9-family (Alder Lake)‡
 
 Notes: If not otherwise noted, gcc >=9.1 is required for support.
-       *Requires gcc >=10.1  †Required gcc >=10.3  ‡Required gcc >=11.0
+       *Requires gcc >=10.1 or clang >=10.0
+       †Required gcc >=10.3 or clang >=12.0
+       ‡Required gcc >=11.1 or clang >=12.0
 
 It also offers to compile passing the 'native' option which, "selects the CPU
 to generate code for at compilation time by determining the processor type of
 the compiling machine. Using -march=native enables all instruction subsets
 supported by the local machine and will produce code optimized for the local
-machine under the constraints of the selected instruction set."[3]
+machine under the constraints of the selected instruction set."[2]
 
 Users of Intel CPUs should select the 'Intel-Native' option and users of AMD
 CPUs should select the 'AMD-Native' option.
@@ -76,9 +73,9 @@ CPUs should select the 'AMD-Native' option.
 MINOR NOTES RELATING TO INTEL ATOM PROCESSORS
 This patch also changes -march=atom to -march=bonnell in accordance with the
 gcc v4.9 changes. Upstream is using the deprecated -match=atom flags when I
-believe it should use the newer -march=bonnell flag for atom processors.[4]
+believe it should use the newer -march=bonnell flag for atom processors.[3]
 
-It is not recommended to compile on Atom-CPUs with the 'native' option.[5] The
+It is not recommended to compile on Atom-CPUs with the 'native' option.[4] The
 recommendation is to use the 'atom' option instead.
 
 BENEFITS
@@ -90,18 +87,19 @@ https://github.com/graysky2/kernel_gcc_patch
 
 REQUIREMENTS
 linux version >=5.8
-gcc version >=9.0
+gcc version >=9.0 or clang version >=9.0
 
 ACKNOWLEDGMENTS
-This patch builds on the seminal work by Jeroen.[6]
+This patch builds on the seminal work by Jeroen.[5]
 
 REFERENCES
-1.  https://gcc.gnu.org/gcc-4.9/changes.html
-2.  https://gitlab.com/x86-psABIs/x86-64-ABI/-/commit/77566eb03bc6a326811cb7e9
-3.  https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html#index-x86-Options
-4.  https://bugzilla.kernel.org/show_bug.cgi?id=77461
-5.  https://github.com/graysky2/kernel_gcc_patch/issues/15
-6.  http://www.linuxforge.net/docs/linux/linux-gcc.php
+1.  https://gitlab.com/x86-psABIs/x86-64-ABI/-/commit/77566eb03bc6a326811cb7e9
+2.  https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html#index-x86-Options
+3.  https://bugzilla.kernel.org/show_bug.cgi?id=77461
+4.  https://github.com/graysky2/kernel_gcc_patch/issues/15
+5.  http://www.linuxforge.net/docs/linux/linux-gcc.php
+
+Signed-off-by: graysky <graysky@archlinux.us>
 ---
  arch/x86/Kconfig.cpu            | 332 ++++++++++++++++++++++++++++++--
  arch/x86/Makefile               |  47 ++++-
@@ -109,7 +107,7 @@ REFERENCES
  3 files changed, 428 insertions(+), 17 deletions(-)
 
 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
-index 814fe0d349b0..872b9cf598e3 100644
+index 814fe0d349b0..8acf6519d279 100644
 --- a/arch/x86/Kconfig.cpu
 +++ b/arch/x86/Kconfig.cpu
 @@ -157,7 +157,7 @@ config MPENTIUM4
@@ -221,7 +219,7 @@ index 814fe0d349b0..872b9cf598e3 100644
 +
 +config MZEN3
 +	bool "AMD Zen 3"
-+	depends on GCC_VERSION > 100300
++	depends on ( CC_IS_GCC && GCC_VERSION >= 100300 ) || ( CC_IS_CLANG && CLANG_VERSION >= 120000 )
 +	help
 +	  Select this for AMD Family 19h Zen 3 processors.
 +
@@ -380,7 +378,7 @@ index 814fe0d349b0..872b9cf598e3 100644
 +
 +config MCOOPERLAKE
 +	bool "Intel Cooper Lake"
-+	depends on GCC_VERSION > 100100
++	depends on ( CC_IS_GCC && GCC_VERSION > 100100 ) || ( CC_IS_CLANG && CLANG_VERSION >= 100000 )
 +	select X86_P6_NOP
 +	help
 +
@@ -390,7 +388,7 @@ index 814fe0d349b0..872b9cf598e3 100644
 +
 +config MTIGERLAKE
 +	bool "Intel Tiger Lake"
-+	depends on GCC_VERSION > 100100
++	depends on  ( CC_IS_GCC && GCC_VERSION > 100100 ) || ( CC_IS_CLANG && CLANG_VERSION >= 100000 )
 +	select X86_P6_NOP
 +	help
 +
@@ -400,7 +398,7 @@ index 814fe0d349b0..872b9cf598e3 100644
 +
 +config MSAPPHIRERAPIDS
 +	bool "Intel Sapphire Rapids"
-+	depends on GCC_VERSION > 110000
++	depends on ( CC_IS_GCC && GCC_VERSION > 110000 ) || ( CC_IS_CLANG && CLANG_VERSION >= 120000 )
 +	select X86_P6_NOP
 +	help
 +
@@ -410,7 +408,7 @@ index 814fe0d349b0..872b9cf598e3 100644
 +
 +config MROCKETLAKE
 +	bool "Intel Rocket Lake"
-+	depends on GCC_VERSION > 110000
++	depends on ( CC_IS_GCC && GCC_VERSION > 110000 ) || ( CC_IS_CLANG && CLANG_VERSION >= 120000 )
 +	select X86_P6_NOP
 +	help
 +
@@ -420,7 +418,7 @@ index 814fe0d349b0..872b9cf598e3 100644
 +
 +config MALDERLAKE
 +	bool "Intel Alder Lake"
-+	depends on GCC_VERSION > 110000
++	depends on ( CC_IS_GCC && GCC_VERSION > 110000 ) || ( CC_IS_CLANG && CLANG_VERSION >= 120000 )
 +	select X86_P6_NOP
 +	help
 +
@@ -437,7 +435,7 @@ index 814fe0d349b0..872b9cf598e3 100644
  
 +config GENERIC_CPU2
 +	bool "Generic-x86-64-v2"
-+	depends on GCC_VERSION > 110000
++	depends on ( CC_IS_GCC && GCC_VERSION > 110000 ) || ( CC_IS_CLANG && LANG_VERSION >= 120000 )
 +	depends on X86_64
 +	help
 +	  Generic x86-64 CPU.
@@ -445,7 +443,7 @@ index 814fe0d349b0..872b9cf598e3 100644
 +
 +config GENERIC_CPU3
 +	bool "Generic-x86-64-v3"
-+	depends on GCC_VERSION > 110000
++	depends on ( CC_IS_GCC && GCC_VERSION > 110000 ) || ( CC_IS_CLANG && LANG_VERSION >= 120000 )
 +	depends on X86_64
 +	help
 +	  Generic x86-64-v3 CPU with v3 instructions.
@@ -453,27 +451,27 @@ index 814fe0d349b0..872b9cf598e3 100644
 +
 +config GENERIC_CPU4
 +	bool "Generic-x86-64-v4"
-+	depends on GCC_VERSION > 110000
++	depends on ( CC_IS_GCC && GCC_VERSION > 110000 ) || ( CC_IS_CLANG && LANG_VERSION >= 120000 )
 +	depends on X86_64
 +	help
 +	  Generic x86-64 CPU with v4 instructions.
 +	  Run equally well on all x86-64 CPUs with min support of x86-64-v4.
 +
 +config MNATIVE_INTEL
-+	bool "Intel-Native optimizations autodetected by GCC"
++	bool "Intel-Native optimizations autodetected by the compiler"
 +	help
 +
-+	  GCC 4.2 and above support -march=native, which automatically detects
++	  Clang 3.8, GCC 4.2 and above support -march=native, which automatically detects
 +	  the optimum settings to use based on your processor. Do NOT use this
 +	  for AMD CPUs.  Intel Only!
 +
 +	  Enables -march=native
 +
 +config MNATIVE_AMD
-+	bool "AMD-Native optimizations autodetected by GCC"
++	bool "AMD-Native optimizations autodetected by the compiler"
 +	help
 +
-+	  GCC 4.2 and above support -march=native, which automatically detects
++	  Clang 3.8, GCC 4.2 and above support -march=native, which automatically detects
 +	  the optimum settings to use based on your processor. Do NOT use this
 +	  for Intel CPUs.  AMD Only!
 +
@@ -538,10 +536,10 @@ index 814fe0d349b0..872b9cf598e3 100644
  	default "4"
  
 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
-index 9a85eae37b17..facf9a278fe3 100644
+index 78faf9c7e3ae..ee0cd507af8b 100644
 --- a/arch/x86/Makefile
 +++ b/arch/x86/Makefile
-@@ -113,11 +113,48 @@ else
+@@ -114,11 +114,48 @@ else
          # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
          cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
          cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)


^ permalink raw reply related	[flat|nested] 33+ messages in thread
* [gentoo-commits] proj/linux-patches:5.12 commit in: /
@ 2021-06-30 14:22 Mike Pagano
  0 siblings, 0 replies; 33+ messages in thread
From: Mike Pagano @ 2021-06-30 14:22 UTC (permalink / raw
  To: gentoo-commits

commit:     fcca3bfe3f19c1750be503fe947954344ceecfbd
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Jun 30 14:21:41 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Jun 30 14:21:41 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=fcca3bfe

Linux patch 5.12.14

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1013_linux-5.12.14.patch | 4590 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 4594 insertions(+)

diff --git a/0000_README b/0000_README
index 34c90d1..96f1ac7 100644
--- a/0000_README
+++ b/0000_README
@@ -95,6 +95,10 @@ Patch:  1012_linux-5.12.13.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.12.13
 
+Patch:  1013_linux-5.12.14.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.12.14
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1013_linux-5.12.14.patch b/1013_linux-5.12.14.patch
new file mode 100644
index 0000000..6e18a21
--- /dev/null
+++ b/1013_linux-5.12.14.patch
@@ -0,0 +1,4590 @@
+diff --git a/Makefile b/Makefile
+index d2fe36db78aed..433f164f9ee0f 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 12
+-SUBLEVEL = 13
++SUBLEVEL = 14
+ EXTRAVERSION =
+ NAME = Frozen Wasteland
+ 
+diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
+index 1a5edf562e85e..73ca7797b92f6 100644
+--- a/arch/arm/kernel/setup.c
++++ b/arch/arm/kernel/setup.c
+@@ -545,9 +545,11 @@ void notrace cpu_init(void)
+ 	 * In Thumb-2, msr with an immediate value is not allowed.
+ 	 */
+ #ifdef CONFIG_THUMB2_KERNEL
+-#define PLC	"r"
++#define PLC_l	"l"
++#define PLC_r	"r"
+ #else
+-#define PLC	"I"
++#define PLC_l	"I"
++#define PLC_r	"I"
+ #endif
+ 
+ 	/*
+@@ -569,15 +571,15 @@ void notrace cpu_init(void)
+ 	"msr	cpsr_c, %9"
+ 	    :
+ 	    : "r" (stk),
+-	      PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
++	      PLC_r (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
+ 	      "I" (offsetof(struct stack, irq[0])),
+-	      PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
++	      PLC_r (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
+ 	      "I" (offsetof(struct stack, abt[0])),
+-	      PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
++	      PLC_r (PSR_F_BIT | PSR_I_BIT | UND_MODE),
+ 	      "I" (offsetof(struct stack, und[0])),
+-	      PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
++	      PLC_r (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
+ 	      "I" (offsetof(struct stack, fiq[0])),
+-	      PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
++	      PLC_l (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
+ 	    : "r14");
+ #endif
+ }
+diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
+index 5243bf2327c02..a5ee34117321d 100644
+--- a/arch/riscv/Makefile
++++ b/arch/riscv/Makefile
+@@ -16,7 +16,7 @@ ifeq ($(CONFIG_DYNAMIC_FTRACE),y)
+ 	CC_FLAGS_FTRACE := -fpatchable-function-entry=8
+ endif
+ 
+-ifeq ($(CONFIG_64BIT)$(CONFIG_CMODEL_MEDLOW),yy)
++ifeq ($(CONFIG_CMODEL_MEDLOW),y)
+ KBUILD_CFLAGS_MODULE += -mcmodel=medany
+ endif
+ 
+diff --git a/arch/riscv/boot/dts/sifive/fu740-c000.dtsi b/arch/riscv/boot/dts/sifive/fu740-c000.dtsi
+index eeb4f8c3e0e72..d0d206cdb9990 100644
+--- a/arch/riscv/boot/dts/sifive/fu740-c000.dtsi
++++ b/arch/riscv/boot/dts/sifive/fu740-c000.dtsi
+@@ -272,7 +272,7 @@
+ 			cache-size = <2097152>;
+ 			cache-unified;
+ 			interrupt-parent = <&plic0>;
+-			interrupts = <19 20 21 22>;
++			interrupts = <19 21 22 20>;
+ 			reg = <0x0 0x2010000 0x0 0x1000>;
+ 		};
+ 		gpio: gpio@10060000 {
+diff --git a/arch/s390/include/asm/stacktrace.h b/arch/s390/include/asm/stacktrace.h
+index 2b543163d90a0..76c6034428be8 100644
+--- a/arch/s390/include/asm/stacktrace.h
++++ b/arch/s390/include/asm/stacktrace.h
+@@ -91,12 +91,16 @@ struct stack_frame {
+ 	CALL_ARGS_4(arg1, arg2, arg3, arg4);				\
+ 	register unsigned long r4 asm("6") = (unsigned long)(arg5)
+ 
+-#define CALL_FMT_0 "=&d" (r2) :
+-#define CALL_FMT_1 "+&d" (r2) :
+-#define CALL_FMT_2 CALL_FMT_1 "d" (r3),
+-#define CALL_FMT_3 CALL_FMT_2 "d" (r4),
+-#define CALL_FMT_4 CALL_FMT_3 "d" (r5),
+-#define CALL_FMT_5 CALL_FMT_4 "d" (r6),
++/*
++ * To keep this simple mark register 2-6 as being changed (volatile)
++ * by the called function, even though register 6 is saved/nonvolatile.
++ */
++#define CALL_FMT_0 "=&d" (r2)
++#define CALL_FMT_1 "+&d" (r2)
++#define CALL_FMT_2 CALL_FMT_1, "+&d" (r3)
++#define CALL_FMT_3 CALL_FMT_2, "+&d" (r4)
++#define CALL_FMT_4 CALL_FMT_3, "+&d" (r5)
++#define CALL_FMT_5 CALL_FMT_4, "+&d" (r6)
+ 
+ #define CALL_CLOBBER_5 "0", "1", "14", "cc", "memory"
+ #define CALL_CLOBBER_4 CALL_CLOBBER_5
+@@ -118,7 +122,7 @@ struct stack_frame {
+ 		"	brasl	14,%[_fn]\n"				\
+ 		"	la	15,0(%[_prev])\n"			\
+ 		: [_prev] "=&a" (prev), CALL_FMT_##nr			\
+-		  [_stack] "R" (stack),					\
++		: [_stack] "R" (stack),					\
+ 		  [_bc] "i" (offsetof(struct stack_frame, back_chain)),	\
+ 		  [_frame] "d" (frame),					\
+ 		  [_fn] "X" (fn) : CALL_CLOBBER_##nr);			\
+diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
+index 9cc71ca9a88f9..e84f495e7eb29 100644
+--- a/arch/s390/kernel/entry.S
++++ b/arch/s390/kernel/entry.S
+@@ -418,6 +418,7 @@ ENTRY(\name)
+ 	xgr	%r6,%r6
+ 	xgr	%r7,%r7
+ 	xgr	%r10,%r10
++	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
+ 	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
+ 	stmg	%r8,%r9,__PT_PSW(%r11)
+ 	tm	%r8,0x0001		# coming from user space?
+diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
+index 90163e6184f5c..080e7aed181f4 100644
+--- a/arch/s390/kernel/signal.c
++++ b/arch/s390/kernel/signal.c
+@@ -512,7 +512,6 @@ void arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal)
+ 
+ 	/* No handlers present - check for system call restart */
+ 	clear_pt_regs_flag(regs, PIF_SYSCALL);
+-	clear_pt_regs_flag(regs, PIF_SYSCALL_RESTART);
+ 	if (current->thread.system_call) {
+ 		regs->int_code = current->thread.system_call;
+ 		switch (regs->gprs[2]) {
+diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
+index bfcc327acc6b2..26aa2614ee352 100644
+--- a/arch/s390/kernel/topology.c
++++ b/arch/s390/kernel/topology.c
+@@ -66,7 +66,10 @@ static void cpu_group_map(cpumask_t *dst, struct mask_info *info, unsigned int c
+ {
+ 	static cpumask_t mask;
+ 
+-	cpumask_copy(&mask, cpumask_of(cpu));
++	cpumask_clear(&mask);
++	if (!cpu_online(cpu))
++		goto out;
++	cpumask_set_cpu(cpu, &mask);
+ 	switch (topology_mode) {
+ 	case TOPOLOGY_MODE_HW:
+ 		while (info) {
+@@ -83,10 +86,10 @@ static void cpu_group_map(cpumask_t *dst, struct mask_info *info, unsigned int c
+ 	default:
+ 		fallthrough;
+ 	case TOPOLOGY_MODE_SINGLE:
+-		cpumask_copy(&mask, cpumask_of(cpu));
+ 		break;
+ 	}
+ 	cpumask_and(&mask, &mask, cpu_online_mask);
++out:
+ 	cpumask_copy(dst, &mask);
+ }
+ 
+@@ -95,7 +98,10 @@ static void cpu_thread_map(cpumask_t *dst, unsigned int cpu)
+ 	static cpumask_t mask;
+ 	int i;
+ 
+-	cpumask_copy(&mask, cpumask_of(cpu));
++	cpumask_clear(&mask);
++	if (!cpu_online(cpu))
++		goto out;
++	cpumask_set_cpu(cpu, &mask);
+ 	if (topology_mode != TOPOLOGY_MODE_HW)
+ 		goto out;
+ 	cpu -= cpu % (smp_cpu_mtid + 1);
+diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
+index 4efd39aacb9f2..8767dc53b5699 100644
+--- a/arch/x86/entry/common.c
++++ b/arch/x86/entry/common.c
+@@ -127,8 +127,8 @@ static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
+ 		/* User code screwed up. */
+ 		regs->ax = -EFAULT;
+ 
+-		instrumentation_end();
+ 		local_irq_disable();
++		instrumentation_end();
+ 		irqentry_exit_to_user_mode(regs);
+ 		return false;
+ 	}
+@@ -266,15 +266,16 @@ __visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
+ 	irqentry_state_t state = irqentry_enter(regs);
+ 	bool inhcall;
+ 
++	instrumentation_begin();
+ 	run_sysvec_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
+ 
+ 	inhcall = get_and_clear_inhcall();
+ 	if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) {
+-		instrumentation_begin();
+ 		irqentry_exit_cond_resched();
+ 		instrumentation_end();
+ 		restore_inhcall(inhcall);
+ 	} else {
++		instrumentation_end();
+ 		irqentry_exit(regs, state);
+ 	}
+ }
+diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
+index 18df171296955..7050a9ebd73f1 100644
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -45,9 +45,11 @@
+ #include "perf_event.h"
+ 
+ struct x86_pmu x86_pmu __read_mostly;
++static struct pmu pmu;
+ 
+ DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
+ 	.enabled = 1,
++	.pmu = &pmu,
+ };
+ 
+ DEFINE_STATIC_KEY_FALSE(rdpmc_never_available_key);
+@@ -380,10 +382,12 @@ int x86_reserve_hardware(void)
+ 	if (!atomic_inc_not_zero(&pmc_refcount)) {
+ 		mutex_lock(&pmc_reserve_mutex);
+ 		if (atomic_read(&pmc_refcount) == 0) {
+-			if (!reserve_pmc_hardware())
++			if (!reserve_pmc_hardware()) {
+ 				err = -EBUSY;
+-			else
++			} else {
+ 				reserve_ds_buffers();
++				reserve_lbr_buffers();
++			}
+ 		}
+ 		if (!err)
+ 			atomic_inc(&pmc_refcount);
+@@ -724,16 +728,23 @@ void x86_pmu_enable_all(int added)
+ 	}
+ }
+ 
+-static struct pmu pmu;
+-
+ static inline int is_x86_event(struct perf_event *event)
+ {
+ 	return event->pmu == &pmu;
+ }
+ 
+-struct pmu *x86_get_pmu(void)
++struct pmu *x86_get_pmu(unsigned int cpu)
+ {
+-	return &pmu;
++	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
++
++	/*
++	 * All CPUs of the hybrid type have been offline.
++	 * The x86_get_pmu() should not be invoked.
++	 */
++	if (WARN_ON_ONCE(!cpuc->pmu))
++		return &pmu;
++
++	return cpuc->pmu;
+ }
+ /*
+  * Event scheduler state:
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index 4c18e7fb58f58..77fe4fece6798 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -4879,7 +4879,7 @@ static void update_tfa_sched(void *ignored)
+ 	 * and if so force schedule out for all event types all contexts
+ 	 */
+ 	if (test_bit(3, cpuc->active_mask))
+-		perf_pmu_resched(x86_get_pmu());
++		perf_pmu_resched(x86_get_pmu(smp_processor_id()));
+ }
+ 
+ static ssize_t show_sysctl_tfa(struct device *cdev,
+diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
+index d32b302719fe5..72df2f392c863 100644
+--- a/arch/x86/events/intel/ds.c
++++ b/arch/x86/events/intel/ds.c
+@@ -2192,7 +2192,7 @@ void __init intel_ds_init(void)
+ 					PERF_SAMPLE_TIME;
+ 				x86_pmu.flags |= PMU_FL_PEBS_ALL;
+ 				pebs_qual = "-baseline";
+-				x86_get_pmu()->capabilities |= PERF_PMU_CAP_EXTENDED_REGS;
++				x86_get_pmu(smp_processor_id())->capabilities |= PERF_PMU_CAP_EXTENDED_REGS;
+ 			} else {
+ 				/* Only basic record supported */
+ 				x86_pmu.large_pebs_flags &=
+@@ -2207,7 +2207,7 @@ void __init intel_ds_init(void)
+ 
+ 			if (x86_pmu.intel_cap.pebs_output_pt_available) {
+ 				pr_cont("PEBS-via-PT, ");
+-				x86_get_pmu()->capabilities |= PERF_PMU_CAP_AUX_OUTPUT;
++				x86_get_pmu(smp_processor_id())->capabilities |= PERF_PMU_CAP_AUX_OUTPUT;
+ 			}
+ 
+ 			break;
+diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
+index 21890dacfcfee..c9cd6ce0fa2ad 100644
+--- a/arch/x86/events/intel/lbr.c
++++ b/arch/x86/events/intel/lbr.c
+@@ -658,7 +658,6 @@ static inline bool branch_user_callstack(unsigned br_sel)
+ 
+ void intel_pmu_lbr_add(struct perf_event *event)
+ {
+-	struct kmem_cache *kmem_cache = event->pmu->task_ctx_cache;
+ 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ 
+ 	if (!x86_pmu.lbr_nr)
+@@ -696,16 +695,11 @@ void intel_pmu_lbr_add(struct perf_event *event)
+ 	perf_sched_cb_inc(event->ctx->pmu);
+ 	if (!cpuc->lbr_users++ && !event->total_time_running)
+ 		intel_pmu_lbr_reset();
+-
+-	if (static_cpu_has(X86_FEATURE_ARCH_LBR) &&
+-	    kmem_cache && !cpuc->lbr_xsave &&
+-	    (cpuc->lbr_users != cpuc->lbr_pebs_users))
+-		cpuc->lbr_xsave = kmem_cache_alloc(kmem_cache, GFP_KERNEL);
+ }
+ 
+ void release_lbr_buffers(void)
+ {
+-	struct kmem_cache *kmem_cache = x86_get_pmu()->task_ctx_cache;
++	struct kmem_cache *kmem_cache;
+ 	struct cpu_hw_events *cpuc;
+ 	int cpu;
+ 
+@@ -714,6 +708,7 @@ void release_lbr_buffers(void)
+ 
+ 	for_each_possible_cpu(cpu) {
+ 		cpuc = per_cpu_ptr(&cpu_hw_events, cpu);
++		kmem_cache = x86_get_pmu(cpu)->task_ctx_cache;
+ 		if (kmem_cache && cpuc->lbr_xsave) {
+ 			kmem_cache_free(kmem_cache, cpuc->lbr_xsave);
+ 			cpuc->lbr_xsave = NULL;
+@@ -721,6 +716,27 @@ void release_lbr_buffers(void)
+ 	}
+ }
+ 
++void reserve_lbr_buffers(void)
++{
++	struct kmem_cache *kmem_cache;
++	struct cpu_hw_events *cpuc;
++	int cpu;
++
++	if (!static_cpu_has(X86_FEATURE_ARCH_LBR))
++		return;
++
++	for_each_possible_cpu(cpu) {
++		cpuc = per_cpu_ptr(&cpu_hw_events, cpu);
++		kmem_cache = x86_get_pmu(cpu)->task_ctx_cache;
++		if (!kmem_cache || cpuc->lbr_xsave)
++			continue;
++
++		cpuc->lbr_xsave = kmem_cache_alloc_node(kmem_cache,
++							GFP_KERNEL | __GFP_ZERO,
++							cpu_to_node(cpu));
++	}
++}
++
+ void intel_pmu_lbr_del(struct perf_event *event)
+ {
+ 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+@@ -1609,7 +1625,7 @@ void intel_pmu_lbr_init_hsw(void)
+ 	x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
+ 	x86_pmu.lbr_sel_map  = hsw_lbr_sel_map;
+ 
+-	x86_get_pmu()->task_ctx_cache = create_lbr_kmem_cache(size, 0);
++	x86_get_pmu(smp_processor_id())->task_ctx_cache = create_lbr_kmem_cache(size, 0);
+ 
+ 	if (lbr_from_signext_quirk_needed())
+ 		static_branch_enable(&lbr_from_quirk_key);
+@@ -1629,7 +1645,7 @@ __init void intel_pmu_lbr_init_skl(void)
+ 	x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
+ 	x86_pmu.lbr_sel_map  = hsw_lbr_sel_map;
+ 
+-	x86_get_pmu()->task_ctx_cache = create_lbr_kmem_cache(size, 0);
++	x86_get_pmu(smp_processor_id())->task_ctx_cache = create_lbr_kmem_cache(size, 0);
+ 
+ 	/*
+ 	 * SW branch filter usage:
+@@ -1726,7 +1742,7 @@ static bool is_arch_lbr_xsave_available(void)
+ 
+ void __init intel_pmu_arch_lbr_init(void)
+ {
+-	struct pmu *pmu = x86_get_pmu();
++	struct pmu *pmu = x86_get_pmu(smp_processor_id());
+ 	union cpuid28_eax eax;
+ 	union cpuid28_ebx ebx;
+ 	union cpuid28_ecx ecx;
+diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
+index 53b2b5fc23bca..35cdece5644fb 100644
+--- a/arch/x86/events/perf_event.h
++++ b/arch/x86/events/perf_event.h
+@@ -327,6 +327,8 @@ struct cpu_hw_events {
+ 	int				n_pair; /* Large increment events */
+ 
+ 	void				*kfree_on_online[X86_PERF_KFREE_MAX];
++
++	struct pmu			*pmu;
+ };
+ 
+ #define __EVENT_CONSTRAINT_RANGE(c, e, n, m, w, o, f) {	\
+@@ -905,7 +907,7 @@ static struct perf_pmu_events_ht_attr event_attr_##v = {		\
+ 	.event_str_ht	= ht,						\
+ }
+ 
+-struct pmu *x86_get_pmu(void);
++struct pmu *x86_get_pmu(unsigned int cpu);
+ extern struct x86_pmu x86_pmu __read_mostly;
+ 
+ static __always_inline struct x86_perf_task_context_opt *task_context_opt(void *ctx)
+@@ -1135,6 +1137,8 @@ void reserve_ds_buffers(void);
+ 
+ void release_lbr_buffers(void);
+ 
++void reserve_lbr_buffers(void);
++
+ extern struct event_constraint bts_constraint;
+ extern struct event_constraint vlbr_constraint;
+ 
+@@ -1282,6 +1286,10 @@ static inline void release_lbr_buffers(void)
+ {
+ }
+ 
++static inline void reserve_lbr_buffers(void)
++{
++}
++
+ static inline int intel_pmu_init(void)
+ {
+ 	return 0;
+diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
+index fdee23ea4e173..16bf4d4a8159e 100644
+--- a/arch/x86/include/asm/fpu/internal.h
++++ b/arch/x86/include/asm/fpu/internal.h
+@@ -204,6 +204,14 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
+ 		asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state.fxsave));
+ }
+ 
++static inline void fxsave(struct fxregs_state *fx)
++{
++	if (IS_ENABLED(CONFIG_X86_32))
++		asm volatile( "fxsave %[fx]" : [fx] "=m" (*fx));
++	else
++		asm volatile("fxsaveq %[fx]" : [fx] "=m" (*fx));
++}
++
+ /* These macros all use (%edi)/(%rdi) as the single memory argument. */
+ #define XSAVE		".byte " REX_PREFIX "0x0f,0xae,0x27"
+ #define XSAVEOPT	".byte " REX_PREFIX "0x0f,0xae,0x37"
+@@ -268,28 +276,6 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
+ 		     : "D" (st), "m" (*st), "a" (lmask), "d" (hmask)	\
+ 		     : "memory")
+ 
+-/*
+- * This function is called only during boot time when x86 caps are not set
+- * up and alternative can not be used yet.
+- */
+-static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate)
+-{
+-	u64 mask = xfeatures_mask_all;
+-	u32 lmask = mask;
+-	u32 hmask = mask >> 32;
+-	int err;
+-
+-	WARN_ON(system_state != SYSTEM_BOOTING);
+-
+-	if (boot_cpu_has(X86_FEATURE_XSAVES))
+-		XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
+-	else
+-		XSTATE_OP(XSAVE, xstate, lmask, hmask, err);
+-
+-	/* We should never fault when copying to a kernel buffer: */
+-	WARN_ON_FPU(err);
+-}
+-
+ /*
+  * This function is called only during boot time when x86 caps are not set
+  * up and alternative can not be used yet.
+diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
+index ec3ae30547920..b7b92cdf3add4 100644
+--- a/arch/x86/kernel/fpu/signal.c
++++ b/arch/x86/kernel/fpu/signal.c
+@@ -221,28 +221,18 @@ sanitize_restored_user_xstate(union fpregs_state *state,
+ 
+ 	if (use_xsave()) {
+ 		/*
+-		 * Note: we don't need to zero the reserved bits in the
+-		 * xstate_header here because we either didn't copy them at all,
+-		 * or we checked earlier that they aren't set.
++		 * Clear all feature bits which are not set in
++		 * user_xfeatures and clear all extended features
++		 * for fx_only mode.
+ 		 */
++		u64 mask = fx_only ? XFEATURE_MASK_FPSSE : user_xfeatures;
+ 
+ 		/*
+-		 * 'user_xfeatures' might have bits clear which are
+-		 * set in header->xfeatures. This represents features that
+-		 * were in init state prior to a signal delivery, and need
+-		 * to be reset back to the init state.  Clear any user
+-		 * feature bits which are set in the kernel buffer to get
+-		 * them back to the init state.
+-		 *
+-		 * Supervisor state is unchanged by input from userspace.
+-		 * Ensure supervisor state bits stay set and supervisor
+-		 * state is not modified.
++		 * Supervisor state has to be preserved. The sigframe
++		 * restore can only modify user features, i.e. @mask
++		 * cannot contain them.
+ 		 */
+-		if (fx_only)
+-			header->xfeatures = XFEATURE_MASK_FPSSE;
+-		else
+-			header->xfeatures &= user_xfeatures |
+-					     xfeatures_mask_supervisor();
++		header->xfeatures &= mask | xfeatures_mask_supervisor();
+ 	}
+ 
+ 	if (use_fxsr()) {
+diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
+index 2ad57cc14b83f..451435d7ff413 100644
+--- a/arch/x86/kernel/fpu/xstate.c
++++ b/arch/x86/kernel/fpu/xstate.c
+@@ -440,6 +440,25 @@ static void __init print_xstate_offset_size(void)
+ 	}
+ }
+ 
++/*
++ * All supported features have either init state all zeros or are
++ * handled in setup_init_fpu() individually. This is an explicit
++ * feature list and does not use XFEATURE_MASK*SUPPORTED to catch
++ * newly added supported features at build time and make people
++ * actually look at the init state for the new feature.
++ */
++#define XFEATURES_INIT_FPSTATE_HANDLED		\
++	(XFEATURE_MASK_FP |			\
++	 XFEATURE_MASK_SSE |			\
++	 XFEATURE_MASK_YMM |			\
++	 XFEATURE_MASK_OPMASK |			\
++	 XFEATURE_MASK_ZMM_Hi256 |		\
++	 XFEATURE_MASK_Hi16_ZMM	 |		\
++	 XFEATURE_MASK_PKRU |			\
++	 XFEATURE_MASK_BNDREGS |		\
++	 XFEATURE_MASK_BNDCSR |			\
++	 XFEATURE_MASK_PASID)
++
+ /*
+  * setup the xstate image representing the init state
+  */
+@@ -447,6 +466,10 @@ static void __init setup_init_fpu_buf(void)
+ {
+ 	static int on_boot_cpu __initdata = 1;
+ 
++	BUILD_BUG_ON((XFEATURE_MASK_USER_SUPPORTED |
++		      XFEATURE_MASK_SUPERVISOR_SUPPORTED) !=
++		     XFEATURES_INIT_FPSTATE_HANDLED);
++
+ 	WARN_ON_FPU(!on_boot_cpu);
+ 	on_boot_cpu = 0;
+ 
+@@ -466,10 +489,22 @@ static void __init setup_init_fpu_buf(void)
+ 	copy_kernel_to_xregs_booting(&init_fpstate.xsave);
+ 
+ 	/*
+-	 * Dump the init state again. This is to identify the init state
+-	 * of any feature which is not represented by all zero's.
++	 * All components are now in init state. Read the state back so
++	 * that init_fpstate contains all non-zero init state. This only
++	 * works with XSAVE, but not with XSAVEOPT and XSAVES because
++	 * those use the init optimization which skips writing data for
++	 * components in init state.
++	 *
++	 * XSAVE could be used, but that would require to reshuffle the
++	 * data when XSAVES is available because XSAVES uses xstate
++	 * compaction. But doing so is a pointless exercise because most
++	 * components have an all zeros init state except for the legacy
++	 * ones (FP and SSE). Those can be saved with FXSAVE into the
++	 * legacy area. Adding new features requires to ensure that init
++	 * state is all zeroes or if not to add the necessary handling
++	 * here.
+ 	 */
+-	copy_xregs_to_kernel_booting(&init_fpstate.xsave);
++	fxsave(&init_fpstate.fxsave);
+ }
+ 
+ static int xfeature_uncompacted_offset(int xfeature_nr)
+diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
+index dbc6214d69def..8f3b438f6fd3b 100644
+--- a/arch/x86/kvm/svm/sev.c
++++ b/arch/x86/kvm/svm/sev.c
+@@ -143,9 +143,25 @@ static void sev_asid_free(int asid)
+ 	mutex_unlock(&sev_bitmap_lock);
+ }
+ 
+-static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
++static void sev_decommission(unsigned int handle)
+ {
+ 	struct sev_data_decommission *decommission;
++
++	if (!handle)
++		return;
++
++	decommission = kzalloc(sizeof(*decommission), GFP_KERNEL);
++	if (!decommission)
++		return;
++
++	decommission->handle = handle;
++	sev_guest_decommission(decommission, NULL);
++
++	kfree(decommission);
++}
++
++static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
++{
+ 	struct sev_data_deactivate *data;
+ 
+ 	if (!handle)
+@@ -165,15 +181,7 @@ static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
+ 
+ 	kfree(data);
+ 
+-	decommission = kzalloc(sizeof(*decommission), GFP_KERNEL);
+-	if (!decommission)
+-		return;
+-
+-	/* decommission handle */
+-	decommission->handle = handle;
+-	sev_guest_decommission(decommission, NULL);
+-
+-	kfree(decommission);
++	sev_decommission(handle);
+ }
+ 
+ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
+@@ -303,8 +311,10 @@ static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ 
+ 	/* Bind ASID to this guest */
+ 	ret = sev_bind_asid(kvm, start->handle, error);
+-	if (ret)
++	if (ret) {
++		sev_decommission(start->handle);
+ 		goto e_free_session;
++	}
+ 
+ 	/* return handle to userspace */
+ 	params.handle = start->handle;
+diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
+index 0a0e168be1cbe..9b0e771302cee 100644
+--- a/arch/x86/pci/fixup.c
++++ b/arch/x86/pci/fixup.c
+@@ -779,4 +779,48 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar);
+ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar);
+ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar);
+ 
++#define RS690_LOWER_TOP_OF_DRAM2	0x30
++#define RS690_LOWER_TOP_OF_DRAM2_VALID	0x1
++#define RS690_UPPER_TOP_OF_DRAM2	0x31
++#define RS690_HTIU_NB_INDEX		0xA8
++#define RS690_HTIU_NB_INDEX_WR_ENABLE	0x100
++#define RS690_HTIU_NB_DATA		0xAC
++
++/*
++ * Some BIOS implementations support RAM above 4GB, but do not configure the
++ * PCI host to respond to bus master accesses for these addresses. These
++ * implementations set the TOP_OF_DRAM_SLOT1 register correctly, so PCI DMA
++ * works as expected for addresses below 4GB.
++ *
++ * Reference: "AMD RS690 ASIC Family Register Reference Guide" (pg. 2-57)
++ * https://www.amd.com/system/files/TechDocs/43372_rs690_rrg_3.00o.pdf
++ */
++static void rs690_fix_64bit_dma(struct pci_dev *pdev)
++{
++	u32 val = 0;
++	phys_addr_t top_of_dram = __pa(high_memory - 1) + 1;
++
++	if (top_of_dram <= (1ULL << 32))
++		return;
++
++	pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
++				RS690_LOWER_TOP_OF_DRAM2);
++	pci_read_config_dword(pdev, RS690_HTIU_NB_DATA, &val);
++
++	if (val)
++		return;
++
++	pci_info(pdev, "Adjusting top of DRAM to %pa for 64-bit DMA support\n", &top_of_dram);
++
++	pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
++		RS690_UPPER_TOP_OF_DRAM2 | RS690_HTIU_NB_INDEX_WR_ENABLE);
++	pci_write_config_dword(pdev, RS690_HTIU_NB_DATA, top_of_dram >> 32);
++
++	pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
++		RS690_LOWER_TOP_OF_DRAM2 | RS690_HTIU_NB_INDEX_WR_ENABLE);
++	pci_write_config_dword(pdev, RS690_HTIU_NB_DATA,
++		top_of_dram | RS690_LOWER_TOP_OF_DRAM2_VALID);
++}
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7910, rs690_fix_64bit_dma);
++
+ #endif
+diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
+index 8183ddb3700c4..64db5852432e7 100644
+--- a/arch/x86/xen/enlighten_pv.c
++++ b/arch/x86/xen/enlighten_pv.c
+@@ -592,8 +592,10 @@ DEFINE_IDTENTRY_RAW(xenpv_exc_debug)
+ DEFINE_IDTENTRY_RAW(exc_xen_unknown_trap)
+ {
+ 	/* This should never happen and there is no way to handle it. */
++	instrumentation_begin();
+ 	pr_err("Unknown trap in Xen PV mode.");
+ 	BUG();
++	instrumentation_end();
+ }
+ 
+ #ifdef CONFIG_X86_MCE
+diff --git a/certs/Kconfig b/certs/Kconfig
+index c94e93d8bccf0..ab88d2a7f3c7f 100644
+--- a/certs/Kconfig
++++ b/certs/Kconfig
+@@ -83,4 +83,21 @@ config SYSTEM_BLACKLIST_HASH_LIST
+ 	  wrapper to incorporate the list into the kernel.  Each <hash> should
+ 	  be a string of hex digits.
+ 
++config SYSTEM_REVOCATION_LIST
++	bool "Provide system-wide ring of revocation certificates"
++	depends on SYSTEM_BLACKLIST_KEYRING
++	depends on PKCS7_MESSAGE_PARSER=y
++	help
++	  If set, this allows revocation certificates to be stored in the
++	  blacklist keyring and implements a hook whereby a PKCS#7 message can
++	  be checked to see if it matches such a certificate.
++
++config SYSTEM_REVOCATION_KEYS
++	string "X.509 certificates to be preloaded into the system blacklist keyring"
++	depends on SYSTEM_REVOCATION_LIST
++	help
++	  If set, this option should be the filename of a PEM-formatted file
++	  containing X.509 certificates to be included in the default blacklist
++	  keyring.
++
+ endmenu
+diff --git a/certs/Makefile b/certs/Makefile
+index f4c25b67aad90..b6db52ebf0beb 100644
+--- a/certs/Makefile
++++ b/certs/Makefile
+@@ -3,8 +3,9 @@
+ # Makefile for the linux kernel signature checking certificates.
+ #
+ 
+-obj-$(CONFIG_SYSTEM_TRUSTED_KEYRING) += system_keyring.o system_certificates.o
+-obj-$(CONFIG_SYSTEM_BLACKLIST_KEYRING) += blacklist.o
++obj-$(CONFIG_SYSTEM_TRUSTED_KEYRING) += system_keyring.o system_certificates.o common.o
++obj-$(CONFIG_SYSTEM_BLACKLIST_KEYRING) += blacklist.o common.o
++obj-$(CONFIG_SYSTEM_REVOCATION_LIST) += revocation_certificates.o
+ ifneq ($(CONFIG_SYSTEM_BLACKLIST_HASH_LIST),"")
+ obj-$(CONFIG_SYSTEM_BLACKLIST_KEYRING) += blacklist_hashes.o
+ else
+@@ -29,7 +30,7 @@ $(obj)/x509_certificate_list: scripts/extract-cert $(SYSTEM_TRUSTED_KEYS_SRCPREF
+ 	$(call if_changed,extract_certs,$(SYSTEM_TRUSTED_KEYS_SRCPREFIX)$(CONFIG_SYSTEM_TRUSTED_KEYS))
+ endif # CONFIG_SYSTEM_TRUSTED_KEYRING
+ 
+-clean-files := x509_certificate_list .x509.list
++clean-files := x509_certificate_list .x509.list x509_revocation_list
+ 
+ ifeq ($(CONFIG_MODULE_SIG),y)
+ ###############################################################################
+@@ -104,3 +105,17 @@ targets += signing_key.x509
+ $(obj)/signing_key.x509: scripts/extract-cert $(X509_DEP) FORCE
+ 	$(call if_changed,extract_certs,$(MODULE_SIG_KEY_SRCPREFIX)$(CONFIG_MODULE_SIG_KEY))
+ endif # CONFIG_MODULE_SIG
++
++ifeq ($(CONFIG_SYSTEM_REVOCATION_LIST),y)
++
++$(eval $(call config_filename,SYSTEM_REVOCATION_KEYS))
++
++$(obj)/revocation_certificates.o: $(obj)/x509_revocation_list
++
++quiet_cmd_extract_certs  = EXTRACT_CERTS   $(patsubst "%",%,$(2))
++      cmd_extract_certs  = scripts/extract-cert $(2) $@
++
++targets += x509_revocation_list
++$(obj)/x509_revocation_list: scripts/extract-cert $(SYSTEM_REVOCATION_KEYS_SRCPREFIX)$(SYSTEM_REVOCATION_KEYS_FILENAME) FORCE
++	$(call if_changed,extract_certs,$(SYSTEM_REVOCATION_KEYS_SRCPREFIX)$(CONFIG_SYSTEM_REVOCATION_KEYS))
++endif
+diff --git a/certs/blacklist.c b/certs/blacklist.c
+index bffe4c6f4a9e2..c9a435b15af40 100644
+--- a/certs/blacklist.c
++++ b/certs/blacklist.c
+@@ -17,9 +17,15 @@
+ #include <linux/uidgid.h>
+ #include <keys/system_keyring.h>
+ #include "blacklist.h"
++#include "common.h"
+ 
+ static struct key *blacklist_keyring;
+ 
++#ifdef CONFIG_SYSTEM_REVOCATION_LIST
++extern __initconst const u8 revocation_certificate_list[];
++extern __initconst const unsigned long revocation_certificate_list_size;
++#endif
++
+ /*
+  * The description must be a type prefix, a colon and then an even number of
+  * hex digits.  The hash is kept in the description.
+@@ -145,6 +151,49 @@ int is_binary_blacklisted(const u8 *hash, size_t hash_len)
+ }
+ EXPORT_SYMBOL_GPL(is_binary_blacklisted);
+ 
++#ifdef CONFIG_SYSTEM_REVOCATION_LIST
++/**
++ * add_key_to_revocation_list - Add a revocation certificate to the blacklist
++ * @data: The data blob containing the certificate
++ * @size: The size of data blob
++ */
++int add_key_to_revocation_list(const char *data, size_t size)
++{
++	key_ref_t key;
++
++	key = key_create_or_update(make_key_ref(blacklist_keyring, true),
++				   "asymmetric",
++				   NULL,
++				   data,
++				   size,
++				   ((KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW),
++				   KEY_ALLOC_NOT_IN_QUOTA | KEY_ALLOC_BUILT_IN);
++
++	if (IS_ERR(key)) {
++		pr_err("Problem with revocation key (%ld)\n", PTR_ERR(key));
++		return PTR_ERR(key);
++	}
++
++	return 0;
++}
++
++/**
++ * is_key_on_revocation_list - Determine if the key for a PKCS#7 message is revoked
++ * @pkcs7: The PKCS#7 message to check
++ */
++int is_key_on_revocation_list(struct pkcs7_message *pkcs7)
++{
++	int ret;
++
++	ret = pkcs7_validate_trust(pkcs7, blacklist_keyring);
++
++	if (ret == 0)
++		return -EKEYREJECTED;
++
++	return -ENOKEY;
++}
++#endif
++
+ /*
+  * Initialise the blacklist
+  */
+@@ -177,3 +226,18 @@ static int __init blacklist_init(void)
+  * Must be initialised before we try and load the keys into the keyring.
+  */
+ device_initcall(blacklist_init);
++
++#ifdef CONFIG_SYSTEM_REVOCATION_LIST
++/*
++ * Load the compiled-in list of revocation X.509 certificates.
++ */
++static __init int load_revocation_certificate_list(void)
++{
++	if (revocation_certificate_list_size)
++		pr_notice("Loading compiled-in revocation X.509 certificates\n");
++
++	return load_certificate_list(revocation_certificate_list, revocation_certificate_list_size,
++				     blacklist_keyring);
++}
++late_initcall(load_revocation_certificate_list);
++#endif
+diff --git a/certs/blacklist.h b/certs/blacklist.h
+index 1efd6fa0dc608..51b320cf85749 100644
+--- a/certs/blacklist.h
++++ b/certs/blacklist.h
+@@ -1,3 +1,5 @@
+ #include <linux/kernel.h>
++#include <linux/errno.h>
++#include <crypto/pkcs7.h>
+ 
+ extern const char __initconst *const blacklist_hashes[];
+diff --git a/certs/common.c b/certs/common.c
+new file mode 100644
+index 0000000000000..16a220887a53e
+--- /dev/null
++++ b/certs/common.c
+@@ -0,0 +1,57 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++
++#include <linux/kernel.h>
++#include <linux/key.h>
++#include "common.h"
++
++int load_certificate_list(const u8 cert_list[],
++			  const unsigned long list_size,
++			  const struct key *keyring)
++{
++	key_ref_t key;
++	const u8 *p, *end;
++	size_t plen;
++
++	p = cert_list;
++	end = p + list_size;
++	while (p < end) {
++		/* Each cert begins with an ASN.1 SEQUENCE tag and must be more
++		 * than 256 bytes in size.
++		 */
++		if (end - p < 4)
++			goto dodgy_cert;
++		if (p[0] != 0x30 &&
++		    p[1] != 0x82)
++			goto dodgy_cert;
++		plen = (p[2] << 8) | p[3];
++		plen += 4;
++		if (plen > end - p)
++			goto dodgy_cert;
++
++		key = key_create_or_update(make_key_ref(keyring, 1),
++					   "asymmetric",
++					   NULL,
++					   p,
++					   plen,
++					   ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
++					   KEY_USR_VIEW | KEY_USR_READ),
++					   KEY_ALLOC_NOT_IN_QUOTA |
++					   KEY_ALLOC_BUILT_IN |
++					   KEY_ALLOC_BYPASS_RESTRICTION);
++		if (IS_ERR(key)) {
++			pr_err("Problem loading in-kernel X.509 certificate (%ld)\n",
++			       PTR_ERR(key));
++		} else {
++			pr_notice("Loaded X.509 cert '%s'\n",
++				  key_ref_to_ptr(key)->description);
++			key_ref_put(key);
++		}
++		p += plen;
++	}
++
++	return 0;
++
++dodgy_cert:
++	pr_err("Problem parsing in-kernel X.509 certificate list\n");
++	return 0;
++}
+diff --git a/certs/common.h b/certs/common.h
+new file mode 100644
+index 0000000000000..abdb5795936b7
+--- /dev/null
++++ b/certs/common.h
+@@ -0,0 +1,9 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++
++#ifndef _CERT_COMMON_H
++#define _CERT_COMMON_H
++
++int load_certificate_list(const u8 cert_list[], const unsigned long list_size,
++			  const struct key *keyring);
++
++#endif
+diff --git a/certs/revocation_certificates.S b/certs/revocation_certificates.S
+new file mode 100644
+index 0000000000000..f21aae8a8f0ef
+--- /dev/null
++++ b/certs/revocation_certificates.S
+@@ -0,0 +1,21 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#include <linux/export.h>
++#include <linux/init.h>
++
++	__INITRODATA
++
++	.align 8
++	.globl revocation_certificate_list
++revocation_certificate_list:
++__revocation_list_start:
++	.incbin "certs/x509_revocation_list"
++__revocation_list_end:
++
++	.align 8
++	.globl revocation_certificate_list_size
++revocation_certificate_list_size:
++#ifdef CONFIG_64BIT
++	.quad __revocation_list_end - __revocation_list_start
++#else
++	.long __revocation_list_end - __revocation_list_start
++#endif
+diff --git a/certs/system_keyring.c b/certs/system_keyring.c
+index 4b693da488f14..0c9a4795e847b 100644
+--- a/certs/system_keyring.c
++++ b/certs/system_keyring.c
+@@ -16,6 +16,7 @@
+ #include <keys/asymmetric-type.h>
+ #include <keys/system_keyring.h>
+ #include <crypto/pkcs7.h>
++#include "common.h"
+ 
+ static struct key *builtin_trusted_keys;
+ #ifdef CONFIG_SECONDARY_TRUSTED_KEYRING
+@@ -137,54 +138,10 @@ device_initcall(system_trusted_keyring_init);
+  */
+ static __init int load_system_certificate_list(void)
+ {
+-	key_ref_t key;
+-	const u8 *p, *end;
+-	size_t plen;
+-
+ 	pr_notice("Loading compiled-in X.509 certificates\n");
+ 
+-	p = system_certificate_list;
+-	end = p + system_certificate_list_size;
+-	while (p < end) {
+-		/* Each cert begins with an ASN.1 SEQUENCE tag and must be more
+-		 * than 256 bytes in size.
+-		 */
+-		if (end - p < 4)
+-			goto dodgy_cert;
+-		if (p[0] != 0x30 &&
+-		    p[1] != 0x82)
+-			goto dodgy_cert;
+-		plen = (p[2] << 8) | p[3];
+-		plen += 4;
+-		if (plen > end - p)
+-			goto dodgy_cert;
+-
+-		key = key_create_or_update(make_key_ref(builtin_trusted_keys, 1),
+-					   "asymmetric",
+-					   NULL,
+-					   p,
+-					   plen,
+-					   ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
+-					   KEY_USR_VIEW | KEY_USR_READ),
+-					   KEY_ALLOC_NOT_IN_QUOTA |
+-					   KEY_ALLOC_BUILT_IN |
+-					   KEY_ALLOC_BYPASS_RESTRICTION);
+-		if (IS_ERR(key)) {
+-			pr_err("Problem loading in-kernel X.509 certificate (%ld)\n",
+-			       PTR_ERR(key));
+-		} else {
+-			pr_notice("Loaded X.509 cert '%s'\n",
+-				  key_ref_to_ptr(key)->description);
+-			key_ref_put(key);
+-		}
+-		p += plen;
+-	}
+-
+-	return 0;
+-
+-dodgy_cert:
+-	pr_err("Problem parsing in-kernel X.509 certificate list\n");
+-	return 0;
++	return load_certificate_list(system_certificate_list, system_certificate_list_size,
++				     builtin_trusted_keys);
+ }
+ late_initcall(load_system_certificate_list);
+ 
+@@ -242,6 +199,12 @@ int verify_pkcs7_message_sig(const void *data, size_t len,
+ 			pr_devel("PKCS#7 platform keyring is not available\n");
+ 			goto error;
+ 		}
++
++		ret = is_key_on_revocation_list(pkcs7);
++		if (ret != -ENOKEY) {
++			pr_devel("PKCS#7 platform key is on revocation list\n");
++			goto error;
++		}
+ 	}
+ 	ret = pkcs7_validate_trust(pkcs7, trusted_keys);
+ 	if (ret < 0) {
+diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
+index 88310ac9ce906..62c536f9d9258 100644
+--- a/drivers/base/swnode.c
++++ b/drivers/base/swnode.c
+@@ -1032,7 +1032,15 @@ int device_add_software_node(struct device *dev, const struct software_node *nod
+ 	}
+ 
+ 	set_secondary_fwnode(dev, &swnode->fwnode);
+-	software_node_notify(dev, KOBJ_ADD);
++
++	/*
++	 * If the device has been fully registered by the time this function is
++	 * called, software_node_notify() must be called separately so that the
++	 * symlinks get created and the reference count of the node is kept in
++	 * balance.
++	 */
++	if (device_is_registered(dev))
++		software_node_notify(dev, KOBJ_ADD);
+ 
+ 	return 0;
+ }
+@@ -1052,7 +1060,8 @@ void device_remove_software_node(struct device *dev)
+ 	if (!swnode)
+ 		return;
+ 
+-	software_node_notify(dev, KOBJ_REMOVE);
++	if (device_is_registered(dev))
++		software_node_notify(dev, KOBJ_REMOVE);
+ 	set_secondary_fwnode(dev, NULL);
+ 	kobject_put(&swnode->kobj);
+ }
+@@ -1106,8 +1115,7 @@ int software_node_notify(struct device *dev, unsigned long action)
+ 
+ 	switch (action) {
+ 	case KOBJ_ADD:
+-		ret = sysfs_create_link_nowarn(&dev->kobj, &swnode->kobj,
+-					       "software_node");
++		ret = sysfs_create_link(&dev->kobj, &swnode->kobj, "software_node");
+ 		if (ret)
+ 			break;
+ 
+diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
+index 03b1b03349477..c42b17b76640e 100644
+--- a/drivers/dma/Kconfig
++++ b/drivers/dma/Kconfig
+@@ -690,6 +690,7 @@ config XILINX_ZYNQMP_DMA
+ 
+ config XILINX_ZYNQMP_DPDMA
+ 	tristate "Xilinx DPDMA Engine"
++	depends on HAS_IOMEM && OF
+ 	select DMA_ENGINE
+ 	select DMA_VIRTUAL_CHANNELS
+ 	help
+diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
+index 1d8a3876b7452..5ba8e8bc609fc 100644
+--- a/drivers/dma/idxd/cdev.c
++++ b/drivers/dma/idxd/cdev.c
+@@ -110,6 +110,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
+ 		pasid = iommu_sva_get_pasid(sva);
+ 		if (pasid == IOMMU_PASID_INVALID) {
+ 			iommu_sva_unbind_device(sva);
++			rc = -EINVAL;
+ 			goto failed;
+ 		}
+ 
+diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c
+index 27c07350971dd..375e7e647df6b 100644
+--- a/drivers/dma/mediatek/mtk-uart-apdma.c
++++ b/drivers/dma/mediatek/mtk-uart-apdma.c
+@@ -131,10 +131,7 @@ static unsigned int mtk_uart_apdma_read(struct mtk_chan *c, unsigned int reg)
+ 
+ static void mtk_uart_apdma_desc_free(struct virt_dma_desc *vd)
+ {
+-	struct dma_chan *chan = vd->tx.chan;
+-	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
+-
+-	kfree(c->desc);
++	kfree(container_of(vd, struct mtk_uart_apdma_desc, vd));
+ }
+ 
+ static void mtk_uart_apdma_start_tx(struct mtk_chan *c)
+@@ -207,14 +204,9 @@ static void mtk_uart_apdma_start_rx(struct mtk_chan *c)
+ 
+ static void mtk_uart_apdma_tx_handler(struct mtk_chan *c)
+ {
+-	struct mtk_uart_apdma_desc *d = c->desc;
+-
+ 	mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B);
+ 	mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
+ 	mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
+-
+-	list_del(&d->vd.node);
+-	vchan_cookie_complete(&d->vd);
+ }
+ 
+ static void mtk_uart_apdma_rx_handler(struct mtk_chan *c)
+@@ -245,9 +237,17 @@ static void mtk_uart_apdma_rx_handler(struct mtk_chan *c)
+ 
+ 	c->rx_status = d->avail_len - cnt;
+ 	mtk_uart_apdma_write(c, VFF_RPT, wg);
++}
+ 
+-	list_del(&d->vd.node);
+-	vchan_cookie_complete(&d->vd);
++static void mtk_uart_apdma_chan_complete_handler(struct mtk_chan *c)
++{
++	struct mtk_uart_apdma_desc *d = c->desc;
++
++	if (d) {
++		list_del(&d->vd.node);
++		vchan_cookie_complete(&d->vd);
++		c->desc = NULL;
++	}
+ }
+ 
+ static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id)
+@@ -261,6 +261,7 @@ static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id)
+ 		mtk_uart_apdma_rx_handler(c);
+ 	else if (c->dir == DMA_MEM_TO_DEV)
+ 		mtk_uart_apdma_tx_handler(c);
++	mtk_uart_apdma_chan_complete_handler(c);
+ 	spin_unlock_irqrestore(&c->vc.lock, flags);
+ 
+ 	return IRQ_HANDLED;
+@@ -348,7 +349,7 @@ static struct dma_async_tx_descriptor *mtk_uart_apdma_prep_slave_sg
+ 		return NULL;
+ 
+ 	/* Now allocate and setup the descriptor */
+-	d = kzalloc(sizeof(*d), GFP_ATOMIC);
++	d = kzalloc(sizeof(*d), GFP_NOWAIT);
+ 	if (!d)
+ 		return NULL;
+ 
+@@ -366,7 +367,7 @@ static void mtk_uart_apdma_issue_pending(struct dma_chan *chan)
+ 	unsigned long flags;
+ 
+ 	spin_lock_irqsave(&c->vc.lock, flags);
+-	if (vchan_issue_pending(&c->vc)) {
++	if (vchan_issue_pending(&c->vc) && !c->desc) {
+ 		vd = vchan_next_desc(&c->vc);
+ 		c->desc = to_mtk_uart_apdma_desc(&vd->tx);
+ 
+diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
+index d530c1bf11d97..6885b3dcd7a97 100644
+--- a/drivers/dma/sh/rcar-dmac.c
++++ b/drivers/dma/sh/rcar-dmac.c
+@@ -1913,7 +1913,7 @@ static int rcar_dmac_probe(struct platform_device *pdev)
+ 
+ 	/* Enable runtime PM and initialize the device. */
+ 	pm_runtime_enable(&pdev->dev);
+-	ret = pm_runtime_get_sync(&pdev->dev);
++	ret = pm_runtime_resume_and_get(&pdev->dev);
+ 	if (ret < 0) {
+ 		dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
+ 		return ret;
+diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
+index 36ba8b43e78de..18cbd1e43c2e8 100644
+--- a/drivers/dma/stm32-mdma.c
++++ b/drivers/dma/stm32-mdma.c
+@@ -1452,7 +1452,7 @@ static int stm32_mdma_alloc_chan_resources(struct dma_chan *c)
+ 		return -ENOMEM;
+ 	}
+ 
+-	ret = pm_runtime_get_sync(dmadev->ddev.dev);
++	ret = pm_runtime_resume_and_get(dmadev->ddev.dev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+@@ -1718,7 +1718,7 @@ static int stm32_mdma_pm_suspend(struct device *dev)
+ 	u32 ccr, id;
+ 	int ret;
+ 
+-	ret = pm_runtime_get_sync(dev);
++	ret = pm_runtime_resume_and_get(dev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
+index ff7dfb3fdeb47..6c709803203ad 100644
+--- a/drivers/dma/xilinx/xilinx_dpdma.c
++++ b/drivers/dma/xilinx/xilinx_dpdma.c
+@@ -113,6 +113,7 @@
+ #define XILINX_DPDMA_CH_VDO				0x020
+ #define XILINX_DPDMA_CH_PYLD_SZ				0x024
+ #define XILINX_DPDMA_CH_DESC_ID				0x028
++#define XILINX_DPDMA_CH_DESC_ID_MASK			GENMASK(15, 0)
+ 
+ /* DPDMA descriptor fields */
+ #define XILINX_DPDMA_DESC_CONTROL_PREEMBLE		0xa5
+@@ -866,7 +867,8 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
+ 	 * will be used, but it should be enough.
+ 	 */
+ 	list_for_each_entry(sw_desc, &desc->descriptors, node)
+-		sw_desc->hw.desc_id = desc->vdesc.tx.cookie;
++		sw_desc->hw.desc_id = desc->vdesc.tx.cookie
++				    & XILINX_DPDMA_CH_DESC_ID_MASK;
+ 
+ 	sw_desc = list_first_entry(&desc->descriptors,
+ 				   struct xilinx_dpdma_sw_desc, node);
+@@ -1086,7 +1088,8 @@ static void xilinx_dpdma_chan_vsync_irq(struct  xilinx_dpdma_chan *chan)
+ 	if (!chan->running || !pending)
+ 		goto out;
+ 
+-	desc_id = dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_ID);
++	desc_id = dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_ID)
++		& XILINX_DPDMA_CH_DESC_ID_MASK;
+ 
+ 	/* If the retrigger raced with vsync, retry at the next frame. */
+ 	sw_desc = list_first_entry(&pending->descriptors,
+diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
+index d8419565b92cc..5fecf5aa6e858 100644
+--- a/drivers/dma/xilinx/zynqmp_dma.c
++++ b/drivers/dma/xilinx/zynqmp_dma.c
+@@ -468,7 +468,7 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
+ 	struct zynqmp_dma_desc_sw *desc;
+ 	int i, ret;
+ 
+-	ret = pm_runtime_get_sync(chan->dev);
++	ret = pm_runtime_resume_and_get(chan->dev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
+index 1631727bf0da1..c7b5446d01fd2 100644
+--- a/drivers/gpio/gpiolib-cdev.c
++++ b/drivers/gpio/gpiolib-cdev.c
+@@ -1880,6 +1880,7 @@ static void gpio_v2_line_info_changed_to_v1(
+ 		struct gpio_v2_line_info_changed *lic_v2,
+ 		struct gpioline_info_changed *lic_v1)
+ {
++	memset(lic_v1, 0, sizeof(*lic_v1));
+ 	gpio_v2_line_info_to_v1(&lic_v2->info, &lic_v1->info);
+ 	lic_v1->timestamp = lic_v2->timestamp_ns;
+ 	lic_v1->event_type = lic_v2->event_type;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+index 47e0b48dc26fd..1c4623d25a62a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+@@ -214,9 +214,21 @@ static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
+ {
+ 	struct drm_gem_object *obj = attach->dmabuf->priv;
+ 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
++	int r;
+ 
+ 	/* pin buffer into GTT */
+-	return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
++	r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
++	if (r)
++		return r;
++
++	if (bo->tbo.moving) {
++		r = dma_fence_wait(bo->tbo.moving, true);
++		if (r) {
++			amdgpu_bo_unpin(bo);
++			return r;
++		}
++	}
++	return 0;
+ }
+ 
+ /**
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+index 72d23651501d4..2342c5d216f9b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+@@ -6769,12 +6769,8 @@ static int gfx_v10_0_kiq_init_register(struct amdgpu_ring *ring)
+ 	if (ring->use_doorbell) {
+ 		WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
+ 			(adev->doorbell_index.kiq * 2) << 2);
+-		/* If GC has entered CGPG, ringing doorbell > first page doesn't
+-		 * wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to workaround
+-		 * this issue.
+-		 */
+ 		WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
+-			(adev->doorbell.size - 4));
++			(adev->doorbell_index.userqueue_end * 2) << 2);
+ 	}
+ 
+ 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 1fdfb7783404e..d2c020a91c0be 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -3623,12 +3623,8 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
+ 	if (ring->use_doorbell) {
+ 		WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
+ 					(adev->doorbell_index.kiq * 2) << 2);
+-		/* If GC has entered CGPG, ringing doorbell > first page doesn't
+-		 * wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to workaround
+-		 * this issue.
+-		 */
+ 		WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
+-					(adev->doorbell.size - 4));
++					(adev->doorbell_index.userqueue_end * 2) << 2);
+ 	}
+ 
+ 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
+diff --git a/drivers/gpu/drm/kmb/kmb_drv.c b/drivers/gpu/drm/kmb/kmb_drv.c
+index f64e06e1067dd..96ea1a2c11dd6 100644
+--- a/drivers/gpu/drm/kmb/kmb_drv.c
++++ b/drivers/gpu/drm/kmb/kmb_drv.c
+@@ -137,6 +137,7 @@ static int kmb_hw_init(struct drm_device *drm, unsigned long flags)
+ 	/* Allocate LCD interrupt resources */
+ 	irq_lcd = platform_get_irq(pdev, 0);
+ 	if (irq_lcd < 0) {
++		ret = irq_lcd;
+ 		drm_err(&kmb->drm, "irq_lcd not found");
+ 		goto setup_fail;
+ 	}
+diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
+index 347488685f745..60019d0532fcf 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
++++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
+@@ -93,7 +93,22 @@ int nouveau_gem_prime_pin(struct drm_gem_object *obj)
+ 	if (ret)
+ 		return -EINVAL;
+ 
+-	return 0;
++	ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
++	if (ret)
++		goto error;
++
++	if (nvbo->bo.moving)
++		ret = dma_fence_wait(nvbo->bo.moving, true);
++
++	ttm_bo_unreserve(&nvbo->bo);
++	if (ret)
++		goto error;
++
++	return ret;
++
++error:
++	nouveau_bo_unpin(nvbo);
++	return ret;
+ }
+ 
+ void nouveau_gem_prime_unpin(struct drm_gem_object *obj)
+diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
+index 42a87948e28c5..4a90807351e72 100644
+--- a/drivers/gpu/drm/radeon/radeon_prime.c
++++ b/drivers/gpu/drm/radeon/radeon_prime.c
+@@ -77,9 +77,19 @@ int radeon_gem_prime_pin(struct drm_gem_object *obj)
+ 
+ 	/* pin buffer into GTT */
+ 	ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
+-	if (likely(ret == 0))
+-		bo->prime_shared_count++;
+-
++	if (unlikely(ret))
++		goto error;
++
++	if (bo->tbo.moving) {
++		ret = dma_fence_wait(bo->tbo.moving, false);
++		if (unlikely(ret)) {
++			radeon_bo_unpin(bo);
++			goto error;
++		}
++	}
++
++	bo->prime_shared_count++;
++error:
+ 	radeon_bo_unreserve(bo);
+ 	return ret;
+ }
+diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
+index 1fda574579afc..8106b5634fe10 100644
+--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
++++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
+@@ -159,6 +159,8 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
+ 	struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
+ 	bool connected = false;
+ 
++	WARN_ON(pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev));
++
+ 	if (vc4_hdmi->hpd_gpio) {
+ 		if (gpio_get_value_cansleep(vc4_hdmi->hpd_gpio) ^
+ 		    vc4_hdmi->hpd_active_low)
+@@ -180,10 +182,12 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
+ 			}
+ 		}
+ 
++		pm_runtime_put(&vc4_hdmi->pdev->dev);
+ 		return connector_status_connected;
+ 	}
+ 
+ 	cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
++	pm_runtime_put(&vc4_hdmi->pdev->dev);
+ 	return connector_status_disconnected;
+ }
+ 
+@@ -473,7 +477,6 @@ static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder,
+ 		   HDMI_READ(HDMI_VID_CTL) & ~VC4_HD_VID_CTL_ENABLE);
+ 
+ 	clk_disable_unprepare(vc4_hdmi->pixel_bvb_clock);
+-	clk_disable_unprepare(vc4_hdmi->hsm_clock);
+ 	clk_disable_unprepare(vc4_hdmi->pixel_clock);
+ 
+ 	ret = pm_runtime_put(&vc4_hdmi->pdev->dev);
+@@ -784,13 +787,6 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
+ 		return;
+ 	}
+ 
+-	ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
+-	if (ret) {
+-		DRM_ERROR("Failed to turn on HSM clock: %d\n", ret);
+-		clk_disable_unprepare(vc4_hdmi->pixel_clock);
+-		return;
+-	}
+-
+ 	vc4_hdmi_cec_update_clk_div(vc4_hdmi);
+ 
+ 	/*
+@@ -801,7 +797,6 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
+ 			       (hsm_rate > VC4_HSM_MID_CLOCK ? 150000000 : 75000000));
+ 	if (ret) {
+ 		DRM_ERROR("Failed to set pixel bvb clock rate: %d\n", ret);
+-		clk_disable_unprepare(vc4_hdmi->hsm_clock);
+ 		clk_disable_unprepare(vc4_hdmi->pixel_clock);
+ 		return;
+ 	}
+@@ -809,7 +804,6 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
+ 	ret = clk_prepare_enable(vc4_hdmi->pixel_bvb_clock);
+ 	if (ret) {
+ 		DRM_ERROR("Failed to turn on pixel bvb clock: %d\n", ret);
+-		clk_disable_unprepare(vc4_hdmi->hsm_clock);
+ 		clk_disable_unprepare(vc4_hdmi->pixel_clock);
+ 		return;
+ 	}
+@@ -1929,6 +1923,29 @@ static int vc5_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
+ 	return 0;
+ }
+ 
++#ifdef CONFIG_PM
++static int vc4_hdmi_runtime_suspend(struct device *dev)
++{
++	struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
++
++	clk_disable_unprepare(vc4_hdmi->hsm_clock);
++
++	return 0;
++}
++
++static int vc4_hdmi_runtime_resume(struct device *dev)
++{
++	struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
++	int ret;
++
++	ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
++	if (ret)
++		return ret;
++
++	return 0;
++}
++#endif
++
+ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
+ {
+ 	const struct vc4_hdmi_variant *variant = of_device_get_match_data(dev);
+@@ -2165,11 +2182,18 @@ static const struct of_device_id vc4_hdmi_dt_match[] = {
+ 	{}
+ };
+ 
++static const struct dev_pm_ops vc4_hdmi_pm_ops = {
++	SET_RUNTIME_PM_OPS(vc4_hdmi_runtime_suspend,
++			   vc4_hdmi_runtime_resume,
++			   NULL)
++};
++
+ struct platform_driver vc4_hdmi_driver = {
+ 	.probe = vc4_hdmi_dev_probe,
+ 	.remove = vc4_hdmi_dev_remove,
+ 	.driver = {
+ 		.name = "vc4_hdmi",
+ 		.of_match_table = vc4_hdmi_dt_match,
++		.pm = &vc4_hdmi_pm_ops,
+ 	},
+ };
+diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
+index f9e1c2ceaac05..04a1e38f2a6f0 100644
+--- a/drivers/i2c/busses/i2c-i801.c
++++ b/drivers/i2c/busses/i2c-i801.c
+@@ -978,6 +978,9 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
+ 	}
+ 
+ out:
++	/* Unlock the SMBus device for use by BIOS/ACPI */
++	outb_p(SMBHSTSTS_INUSE_STS, SMBHSTSTS(priv));
++
+ 	pm_runtime_mark_last_busy(&priv->pci_dev->dev);
+ 	pm_runtime_put_autosuspend(&priv->pci_dev->dev);
+ 	mutex_unlock(&priv->acpi_lock);
+diff --git a/drivers/i2c/busses/i2c-robotfuzz-osif.c b/drivers/i2c/busses/i2c-robotfuzz-osif.c
+index a39f7d0927973..66dfa211e736b 100644
+--- a/drivers/i2c/busses/i2c-robotfuzz-osif.c
++++ b/drivers/i2c/busses/i2c-robotfuzz-osif.c
+@@ -83,7 +83,7 @@ static int osif_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
+ 			}
+ 		}
+ 
+-		ret = osif_usb_read(adapter, OSIFI2C_STOP, 0, 0, NULL, 0);
++		ret = osif_usb_write(adapter, OSIFI2C_STOP, 0, 0, NULL, 0);
+ 		if (ret) {
+ 			dev_err(&adapter->dev, "failure sending STOP\n");
+ 			return -EREMOTEIO;
+@@ -153,7 +153,7 @@ static int osif_probe(struct usb_interface *interface,
+ 	 * Set bus frequency. The frequency is:
+ 	 * 120,000,000 / ( 16 + 2 * div * 4^prescale).
+ 	 * Using dev = 52, prescale = 0 give 100KHz */
+-	ret = osif_usb_read(&priv->adapter, OSIFI2C_SET_BIT_RATE, 52, 0,
++	ret = osif_usb_write(&priv->adapter, OSIFI2C_SET_BIT_RATE, 52, 0,
+ 			    NULL, 0);
+ 	if (ret) {
+ 		dev_err(&interface->dev, "failure sending bit rate");
+diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
+index 016a6106151a5..3f28eb4d17fe7 100644
+--- a/drivers/mmc/host/meson-gx-mmc.c
++++ b/drivers/mmc/host/meson-gx-mmc.c
+@@ -165,6 +165,7 @@ struct meson_host {
+ 
+ 	unsigned int bounce_buf_size;
+ 	void *bounce_buf;
++	void __iomem *bounce_iomem_buf;
+ 	dma_addr_t bounce_dma_addr;
+ 	struct sd_emmc_desc *descs;
+ 	dma_addr_t descs_dma_addr;
+@@ -745,6 +746,47 @@ static void meson_mmc_desc_chain_transfer(struct mmc_host *mmc, u32 cmd_cfg)
+ 	writel(start, host->regs + SD_EMMC_START);
+ }
+ 
++/* local sg copy to buffer version with _to/fromio usage for dram_access_quirk */
++static void meson_mmc_copy_buffer(struct meson_host *host, struct mmc_data *data,
++				  size_t buflen, bool to_buffer)
++{
++	unsigned int sg_flags = SG_MITER_ATOMIC;
++	struct scatterlist *sgl = data->sg;
++	unsigned int nents = data->sg_len;
++	struct sg_mapping_iter miter;
++	unsigned int offset = 0;
++
++	if (to_buffer)
++		sg_flags |= SG_MITER_FROM_SG;
++	else
++		sg_flags |= SG_MITER_TO_SG;
++
++	sg_miter_start(&miter, sgl, nents, sg_flags);
++
++	while ((offset < buflen) && sg_miter_next(&miter)) {
++		unsigned int len;
++
++		len = min(miter.length, buflen - offset);
++
++		/* When dram_access_quirk, the bounce buffer is a iomem mapping */
++		if (host->dram_access_quirk) {
++			if (to_buffer)
++				memcpy_toio(host->bounce_iomem_buf + offset, miter.addr, len);
++			else
++				memcpy_fromio(miter.addr, host->bounce_iomem_buf + offset, len);
++		} else {
++			if (to_buffer)
++				memcpy(host->bounce_buf + offset, miter.addr, len);
++			else
++				memcpy(miter.addr, host->bounce_buf + offset, len);
++		}
++
++		offset += len;
++	}
++
++	sg_miter_stop(&miter);
++}
++
+ static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
+ {
+ 	struct meson_host *host = mmc_priv(mmc);
+@@ -788,8 +830,7 @@ static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
+ 		if (data->flags & MMC_DATA_WRITE) {
+ 			cmd_cfg |= CMD_CFG_DATA_WR;
+ 			WARN_ON(xfer_bytes > host->bounce_buf_size);
+-			sg_copy_to_buffer(data->sg, data->sg_len,
+-					  host->bounce_buf, xfer_bytes);
++			meson_mmc_copy_buffer(host, data, xfer_bytes, true);
+ 			dma_wmb();
+ 		}
+ 
+@@ -958,8 +999,7 @@ static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id)
+ 	if (meson_mmc_bounce_buf_read(data)) {
+ 		xfer_bytes = data->blksz * data->blocks;
+ 		WARN_ON(xfer_bytes > host->bounce_buf_size);
+-		sg_copy_from_buffer(data->sg, data->sg_len,
+-				    host->bounce_buf, xfer_bytes);
++		meson_mmc_copy_buffer(host, data, xfer_bytes, false);
+ 	}
+ 
+ 	next_cmd = meson_mmc_get_next_command(cmd);
+@@ -1179,7 +1219,7 @@ static int meson_mmc_probe(struct platform_device *pdev)
+ 		 * instead of the DDR memory
+ 		 */
+ 		host->bounce_buf_size = SD_EMMC_SRAM_DATA_BUF_LEN;
+-		host->bounce_buf = host->regs + SD_EMMC_SRAM_DATA_BUF_OFF;
++		host->bounce_iomem_buf = host->regs + SD_EMMC_SRAM_DATA_BUF_OFF;
+ 		host->bounce_dma_addr = res->start + SD_EMMC_SRAM_DATA_BUF_OFF;
+ 	} else {
+ 		/* data bounce buffer */
+diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
+index 9f30748da4ab9..8c38f224becbc 100644
+--- a/drivers/net/caif/caif_serial.c
++++ b/drivers/net/caif/caif_serial.c
+@@ -350,6 +350,7 @@ static int ldisc_open(struct tty_struct *tty)
+ 	rtnl_lock();
+ 	result = register_netdevice(dev);
+ 	if (result) {
++		tty_kref_put(tty);
+ 		rtnl_unlock();
+ 		free_netdev(dev);
+ 		return -ENODEV;
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+index 17d5b649eb36b..e81dd34a3cac2 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+@@ -1266,9 +1266,11 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
+ 		p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_STATIC;
+ 
+ 	p_hwfn->p_dcbx_info->set.enabled = dcbx_info->operational.enabled;
++	BUILD_BUG_ON(sizeof(dcbx_info->operational.params) !=
++		     sizeof(p_hwfn->p_dcbx_info->set.config.params));
+ 	memcpy(&p_hwfn->p_dcbx_info->set.config.params,
+ 	       &dcbx_info->operational.params,
+-	       sizeof(struct qed_dcbx_admin_params));
++	       sizeof(p_hwfn->p_dcbx_info->set.config.params));
+ 	p_hwfn->p_dcbx_info->set.config.valid = true;
+ 
+ 	memcpy(params, &p_hwfn->p_dcbx_info->set, sizeof(struct qed_dcbx_set));
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index 1df2c002c9f64..f7a56e05ec8a4 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -1673,7 +1673,7 @@ static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+ {
+ 	switch(stringset) {
+ 	case ETH_SS_STATS:
+-		memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
++		memcpy(data, rtl8169_gstrings, sizeof(rtl8169_gstrings));
+ 		break;
+ 	}
+ }
+diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
+index f029c7c03804f..393cf99856ed3 100644
+--- a/drivers/net/ethernet/renesas/sh_eth.c
++++ b/drivers/net/ethernet/renesas/sh_eth.c
+@@ -2287,7 +2287,7 @@ static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+ {
+ 	switch (stringset) {
+ 	case ETH_SS_STATS:
+-		memcpy(data, *sh_eth_gstrings_stats,
++		memcpy(data, sh_eth_gstrings_stats,
+ 		       sizeof(sh_eth_gstrings_stats));
+ 		break;
+ 	}
+diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
+index 01bb36e7cff0a..6bd3a389d389c 100644
+--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
++++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
+@@ -774,12 +774,15 @@ static void temac_start_xmit_done(struct net_device *ndev)
+ 	stat = be32_to_cpu(cur_p->app0);
+ 
+ 	while (stat & STS_CTRL_APP0_CMPLT) {
++		/* Make sure that the other fields are read after bd is
++		 * released by dma
++		 */
++		rmb();
+ 		dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys),
+ 				 be32_to_cpu(cur_p->len), DMA_TO_DEVICE);
+ 		skb = (struct sk_buff *)ptr_from_txbd(cur_p);
+ 		if (skb)
+ 			dev_consume_skb_irq(skb);
+-		cur_p->app0 = 0;
+ 		cur_p->app1 = 0;
+ 		cur_p->app2 = 0;
+ 		cur_p->app3 = 0;
+@@ -788,6 +791,12 @@ static void temac_start_xmit_done(struct net_device *ndev)
+ 		ndev->stats.tx_packets++;
+ 		ndev->stats.tx_bytes += be32_to_cpu(cur_p->len);
+ 
++		/* app0 must be visible last, as it is used to flag
++		 * availability of the bd
++		 */
++		smp_mb();
++		cur_p->app0 = 0;
++
+ 		lp->tx_bd_ci++;
+ 		if (lp->tx_bd_ci >= lp->tx_bd_num)
+ 			lp->tx_bd_ci = 0;
+@@ -814,6 +823,9 @@ static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag)
+ 		if (cur_p->app0)
+ 			return NETDEV_TX_BUSY;
+ 
++		/* Make sure to read next bd app0 after this one */
++		rmb();
++
+ 		tail++;
+ 		if (tail >= lp->tx_bd_num)
+ 			tail = 0;
+@@ -930,6 +942,11 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ 	wmb();
+ 	lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
+ 
++	if (temac_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
++		netdev_info(ndev, "%s -> netif_stop_queue\n", __func__);
++		netif_stop_queue(ndev);
++	}
++
+ 	return NETDEV_TX_OK;
+ }
+ 
+diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
+index 9bd9a5c0b1db3..6bbc81ad295fb 100644
+--- a/drivers/net/phy/dp83867.c
++++ b/drivers/net/phy/dp83867.c
+@@ -826,16 +826,12 @@ static int dp83867_phy_reset(struct phy_device *phydev)
+ {
+ 	int err;
+ 
+-	err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESET);
++	err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESTART);
+ 	if (err < 0)
+ 		return err;
+ 
+ 	usleep_range(10, 20);
+ 
+-	/* After reset FORCE_LINK_GOOD bit is set. Although the
+-	 * default value should be unset. Disable FORCE_LINK_GOOD
+-	 * for the phy to work properly.
+-	 */
+ 	return phy_modify(phydev, MII_DP83867_PHYCTRL,
+ 			 DP83867_PHYCR_FORCE_LINK_GOOD, 0);
+ }
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 6700f1970b240..bc55ec739af90 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -575,7 +575,7 @@ static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ 
+ 	if (info->flags & QMI_WWAN_FLAG_PASS_THROUGH) {
+ 		skb->protocol = htons(ETH_P_MAP);
+-		return (netif_rx(skb) == NET_RX_SUCCESS);
++		return 1;
+ 	}
+ 
+ 	switch (skb->data[0] & 0xf0) {
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
+index 20fb5638ac653..23fae943a1192 100644
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -6078,7 +6078,7 @@ static void rtl8152_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+ {
+ 	switch (stringset) {
+ 	case ETH_SS_STATS:
+-		memcpy(data, *rtl8152_gstrings, sizeof(rtl8152_gstrings));
++		memcpy(data, rtl8152_gstrings, sizeof(rtl8152_gstrings));
+ 		break;
+ 	}
+ }
+diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
+index fa7d4c20dc13a..30b39cb4056a3 100644
+--- a/drivers/net/wireless/mac80211_hwsim.c
++++ b/drivers/net/wireless/mac80211_hwsim.c
+@@ -1693,8 +1693,13 @@ static int mac80211_hwsim_start(struct ieee80211_hw *hw)
+ static void mac80211_hwsim_stop(struct ieee80211_hw *hw)
+ {
+ 	struct mac80211_hwsim_data *data = hw->priv;
++
+ 	data->started = false;
+ 	hrtimer_cancel(&data->beacon_timer);
++
++	while (!skb_queue_empty(&data->pending))
++		ieee80211_free_txskb(hw, skb_dequeue(&data->pending));
++
+ 	wiphy_dbg(hw->wiphy, "%s\n", __func__);
+ }
+ 
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index e4d4e399004b4..16a17215f633d 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -1870,11 +1870,21 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
+ 	int err;
+ 	int i, bars = 0;
+ 
+-	if (atomic_inc_return(&dev->enable_cnt) > 1) {
+-		pci_update_current_state(dev, dev->current_state);
+-		return 0;		/* already enabled */
++	/*
++	 * Power state could be unknown at this point, either due to a fresh
++	 * boot or a device removal call.  So get the current power state
++	 * so that things like MSI message writing will behave as expected
++	 * (e.g. if the device really is in D0 at enable time).
++	 */
++	if (dev->pm_cap) {
++		u16 pmcsr;
++		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
++		dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
+ 	}
+ 
++	if (atomic_inc_return(&dev->enable_cnt) > 1)
++		return 0;		/* already enabled */
++
+ 	bridge = pci_upstream_bridge(dev);
+ 	if (bridge)
+ 		pci_enable_bridge(bridge);
+diff --git a/drivers/pinctrl/pinctrl-microchip-sgpio.c b/drivers/pinctrl/pinctrl-microchip-sgpio.c
+index c12fa57ebd12c..165cb7a597155 100644
+--- a/drivers/pinctrl/pinctrl-microchip-sgpio.c
++++ b/drivers/pinctrl/pinctrl-microchip-sgpio.c
+@@ -845,8 +845,10 @@ static int microchip_sgpio_probe(struct platform_device *pdev)
+ 	i = 0;
+ 	device_for_each_child_node(dev, fwnode) {
+ 		ret = microchip_sgpio_register_bank(dev, priv, fwnode, i++);
+-		if (ret)
++		if (ret) {
++			fwnode_handle_put(fwnode);
+ 			return ret;
++		}
+ 	}
+ 
+ 	if (priv->in.gpio.ngpio != priv->out.gpio.ngpio) {
+diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
+index 7d9bdedcd71bb..3af4430543dca 100644
+--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
++++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
+@@ -1229,7 +1229,7 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl,
+ 	struct device *dev = pctl->dev;
+ 	struct resource res;
+ 	int npins = STM32_GPIO_PINS_PER_BANK;
+-	int bank_nr, err;
++	int bank_nr, err, i = 0;
+ 
+ 	if (!IS_ERR(bank->rstc))
+ 		reset_control_deassert(bank->rstc);
+@@ -1251,9 +1251,14 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl,
+ 
+ 	of_property_read_string(np, "st,bank-name", &bank->gpio_chip.label);
+ 
+-	if (!of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &args)) {
++	if (!of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, i, &args)) {
+ 		bank_nr = args.args[1] / STM32_GPIO_PINS_PER_BANK;
+ 		bank->gpio_chip.base = args.args[1];
++
++		npins = args.args[2];
++		while (!of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3,
++							 ++i, &args))
++			npins += args.args[2];
+ 	} else {
+ 		bank_nr = pctl->nbanks;
+ 		bank->gpio_chip.base = bank_nr * STM32_GPIO_PINS_PER_BANK;
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index ed0b1bb99f083..a0356f3707b86 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -1387,6 +1387,22 @@ static void sd_uninit_command(struct scsi_cmnd *SCpnt)
+ 	}
+ }
+ 
++static bool sd_need_revalidate(struct block_device *bdev,
++		struct scsi_disk *sdkp)
++{
++	if (sdkp->device->removable || sdkp->write_prot) {
++		if (bdev_check_media_change(bdev))
++			return true;
++	}
++
++	/*
++	 * Force a full rescan after ioctl(BLKRRPART).  While the disk state has
++	 * nothing to do with partitions, BLKRRPART is used to force a full
++	 * revalidate after things like a format for historical reasons.
++	 */
++	return test_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
++}
++
+ /**
+  *	sd_open - open a scsi disk device
+  *	@bdev: Block device of the scsi disk to open
+@@ -1423,10 +1439,8 @@ static int sd_open(struct block_device *bdev, fmode_t mode)
+ 	if (!scsi_block_when_processing_errors(sdev))
+ 		goto error_out;
+ 
+-	if (sdev->removable || sdkp->write_prot) {
+-		if (bdev_check_media_change(bdev))
+-			sd_revalidate_disk(bdev->bd_disk);
+-	}
++	if (sd_need_revalidate(bdev, sdkp))
++		sd_revalidate_disk(bdev->bd_disk);
+ 
+ 	/*
+ 	 * If the drive is empty, just let the open fail.
+diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
+index ab9035662717a..bcc0b5a3a459c 100644
+--- a/drivers/spi/spi-nxp-fspi.c
++++ b/drivers/spi/spi-nxp-fspi.c
+@@ -1033,12 +1033,6 @@ static int nxp_fspi_probe(struct platform_device *pdev)
+ 		goto err_put_ctrl;
+ 	}
+ 
+-	/* Clear potential interrupts */
+-	reg = fspi_readl(f, f->iobase + FSPI_INTR);
+-	if (reg)
+-		fspi_writel(f, reg, f->iobase + FSPI_INTR);
+-
+-
+ 	/* find the resources - controller memory mapped space */
+ 	if (is_acpi_node(f->dev->fwnode))
+ 		res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+@@ -1076,6 +1070,11 @@ static int nxp_fspi_probe(struct platform_device *pdev)
+ 		}
+ 	}
+ 
++	/* Clear potential interrupts */
++	reg = fspi_readl(f, f->iobase + FSPI_INTR);
++	if (reg)
++		fspi_writel(f, reg, f->iobase + FSPI_INTR);
++
+ 	/* find the irq */
+ 	ret = platform_get_irq(pdev, 0);
+ 	if (ret < 0)
+diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
+index 7bbfd58958bcc..d7e361fb05482 100644
+--- a/drivers/xen/events/events_base.c
++++ b/drivers/xen/events/events_base.c
+@@ -642,6 +642,9 @@ static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious)
+ 	}
+ 
+ 	info->eoi_time = 0;
++
++	/* is_active hasn't been reset yet, do it now. */
++	smp_store_release(&info->is_active, 0);
+ 	do_unmask(info, EVT_MASK_REASON_EOI_PENDING);
+ }
+ 
+@@ -811,6 +814,7 @@ static void xen_evtchn_close(evtchn_port_t port)
+ 		BUG();
+ }
+ 
++/* Not called for lateeoi events. */
+ static void event_handler_exit(struct irq_info *info)
+ {
+ 	smp_store_release(&info->is_active, 0);
+@@ -1883,7 +1887,12 @@ static void lateeoi_ack_dynirq(struct irq_data *data)
+ 
+ 	if (VALID_EVTCHN(evtchn)) {
+ 		do_mask(info, EVT_MASK_REASON_EOI_PENDING);
+-		event_handler_exit(info);
++		/*
++		 * Don't call event_handler_exit().
++		 * Need to keep is_active non-zero in order to ignore re-raised
++		 * events after cpu affinity changes while a lateeoi is pending.
++		 */
++		clear_evtchn(evtchn);
+ 	}
+ }
+ 
+diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
+index 26e66436f0058..c000fe338f7e0 100644
+--- a/fs/ceph/addr.c
++++ b/fs/ceph/addr.c
+@@ -1302,6 +1302,45 @@ ceph_find_incompatible(struct page *page)
+ 	return NULL;
+ }
+ 
++/**
++ * prep_noread_page - prep a page for writing without reading first
++ * @page: page being prepared
++ * @pos: starting position for the write
++ * @len: length of write
++ *
++ * In some cases, write_begin doesn't need to read at all:
++ * - full page write
++ * - file is currently zero-length
++ * - write that lies in a page that is completely beyond EOF
++ * - write that covers the the page from start to EOF or beyond it
++ *
++ * If any of these criteria are met, then zero out the unwritten parts
++ * of the page and return true. Otherwise, return false.
++ */
++static bool skip_page_read(struct page *page, loff_t pos, size_t len)
++{
++	struct inode *inode = page->mapping->host;
++	loff_t i_size = i_size_read(inode);
++	size_t offset = offset_in_page(pos);
++
++	/* Full page write */
++	if (offset == 0 && len >= PAGE_SIZE)
++		return true;
++
++	/* pos beyond last page in the file */
++	if (pos - offset >= i_size)
++		goto zero_out;
++
++	/* write that covers the whole page from start to EOF or beyond it */
++	if (offset == 0 && (pos + len) >= i_size)
++		goto zero_out;
++
++	return false;
++zero_out:
++	zero_user_segments(page, 0, offset, offset + len, PAGE_SIZE);
++	return true;
++}
++
+ /*
+  * We are only allowed to write into/dirty the page if the page is
+  * clean, or already dirty within the same snap context.
+@@ -1315,7 +1354,6 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping,
+ 	struct ceph_snap_context *snapc;
+ 	struct page *page = NULL;
+ 	pgoff_t index = pos >> PAGE_SHIFT;
+-	int pos_in_page = pos & ~PAGE_MASK;
+ 	int r = 0;
+ 
+ 	dout("write_begin file %p inode %p page %p %d~%d\n", file, inode, page, (int)pos, (int)len);
+@@ -1350,19 +1388,9 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping,
+ 			break;
+ 		}
+ 
+-		/*
+-		 * In some cases we don't need to read at all:
+-		 * - full page write
+-		 * - write that lies completely beyond EOF
+-		 * - write that covers the the page from start to EOF or beyond it
+-		 */
+-		if ((pos_in_page == 0 && len == PAGE_SIZE) ||
+-		    (pos >= i_size_read(inode)) ||
+-		    (pos_in_page == 0 && (pos + len) >= i_size_read(inode))) {
+-			zero_user_segments(page, 0, pos_in_page,
+-					   pos_in_page + len, PAGE_SIZE);
++		/* No need to read in some cases */
++		if (skip_page_read(page, pos, len))
+ 			break;
+-		}
+ 
+ 		/*
+ 		 * We need to read it. If we get back -EINPROGRESS, then the page was
+diff --git a/fs/ceph/file.c b/fs/ceph/file.c
+index 209535d5b8d38..3d2e3dd4ee01d 100644
+--- a/fs/ceph/file.c
++++ b/fs/ceph/file.c
+@@ -578,6 +578,7 @@ static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
+ 	struct ceph_inode_info *ci = ceph_inode(dir);
+ 	struct inode *inode;
+ 	struct timespec64 now;
++	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
+ 	struct ceph_vino vino = { .ino = req->r_deleg_ino,
+ 				  .snap = CEPH_NOSNAP };
+ 
+@@ -615,8 +616,10 @@ static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
+ 
+ 	ceph_file_layout_to_legacy(lo, &in.layout);
+ 
++	down_read(&mdsc->snap_rwsem);
+ 	ret = ceph_fill_inode(inode, NULL, &iinfo, NULL, req->r_session,
+ 			      req->r_fmode, NULL);
++	up_read(&mdsc->snap_rwsem);
+ 	if (ret) {
+ 		dout("%s failed to fill inode: %d\n", __func__, ret);
+ 		ceph_dir_clear_complete(dir);
+diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
+index 179d2ef69a24a..7ee6023adb363 100644
+--- a/fs/ceph/inode.c
++++ b/fs/ceph/inode.c
+@@ -762,6 +762,8 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
+ 	bool new_version = false;
+ 	bool fill_inline = false;
+ 
++	lockdep_assert_held(&mdsc->snap_rwsem);
++
+ 	dout("%s %p ino %llx.%llx v %llu had %llu\n", __func__,
+ 	     inode, ceph_vinop(inode), le64_to_cpu(info->version),
+ 	     ci->i_version);
+diff --git a/fs/nilfs2/sysfs.c b/fs/nilfs2/sysfs.c
+index 303d71430bdd1..9c6c0e2e5880a 100644
+--- a/fs/nilfs2/sysfs.c
++++ b/fs/nilfs2/sysfs.c
+@@ -1053,6 +1053,7 @@ void nilfs_sysfs_delete_device_group(struct the_nilfs *nilfs)
+ 	nilfs_sysfs_delete_superblock_group(nilfs);
+ 	nilfs_sysfs_delete_segctor_group(nilfs);
+ 	kobject_del(&nilfs->ns_dev_kobj);
++	kobject_put(&nilfs->ns_dev_kobj);
+ 	kfree(nilfs->ns_dev_subgroups);
+ }
+ 
+diff --git a/include/keys/system_keyring.h b/include/keys/system_keyring.h
+index fb8b07daa9d15..875e002a41804 100644
+--- a/include/keys/system_keyring.h
++++ b/include/keys/system_keyring.h
+@@ -31,6 +31,7 @@ extern int restrict_link_by_builtin_and_secondary_trusted(
+ #define restrict_link_by_builtin_and_secondary_trusted restrict_link_by_builtin_trusted
+ #endif
+ 
++extern struct pkcs7_message *pkcs7;
+ #ifdef CONFIG_SYSTEM_BLACKLIST_KEYRING
+ extern int mark_hash_blacklisted(const char *hash);
+ extern int is_hash_blacklisted(const u8 *hash, size_t hash_len,
+@@ -49,6 +50,20 @@ static inline int is_binary_blacklisted(const u8 *hash, size_t hash_len)
+ }
+ #endif
+ 
++#ifdef CONFIG_SYSTEM_REVOCATION_LIST
++extern int add_key_to_revocation_list(const char *data, size_t size);
++extern int is_key_on_revocation_list(struct pkcs7_message *pkcs7);
++#else
++static inline int add_key_to_revocation_list(const char *data, size_t size)
++{
++	return 0;
++}
++static inline int is_key_on_revocation_list(struct pkcs7_message *pkcs7)
++{
++	return -ENOKEY;
++}
++#endif
++
+ #ifdef CONFIG_IMA_BLACKLIST_KEYRING
+ extern struct key *ima_blacklist_keyring;
+ 
+diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
+index 2915f56ad4214..edb5c186b0b7a 100644
+--- a/include/linux/debug_locks.h
++++ b/include/linux/debug_locks.h
+@@ -27,8 +27,10 @@ extern int debug_locks_off(void);
+ 	int __ret = 0;							\
+ 									\
+ 	if (!oops_in_progress && unlikely(c)) {				\
++		instrumentation_begin();				\
+ 		if (debug_locks_off() && !debug_locks_silent)		\
+ 			WARN(1, "DEBUG_LOCKS_WARN_ON(%s)", #c);		\
++		instrumentation_end();					\
+ 		__ret = 1;						\
+ 	}								\
+ 	__ret;								\
+diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
+index ba973efcd3692..6686a0baa91d3 100644
+--- a/include/linux/huge_mm.h
++++ b/include/linux/huge_mm.h
+@@ -289,6 +289,7 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
+ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
+ 
+ extern struct page *huge_zero_page;
++extern unsigned long huge_zero_pfn;
+ 
+ static inline bool is_huge_zero_page(struct page *page)
+ {
+@@ -297,7 +298,7 @@ static inline bool is_huge_zero_page(struct page *page)
+ 
+ static inline bool is_huge_zero_pmd(pmd_t pmd)
+ {
+-	return is_huge_zero_page(pmd_page(pmd));
++	return READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd) && pmd_present(pmd);
+ }
+ 
+ static inline bool is_huge_zero_pud(pud_t pud)
+@@ -443,6 +444,11 @@ static inline bool is_huge_zero_page(struct page *page)
+ 	return false;
+ }
+ 
++static inline bool is_huge_zero_pmd(pmd_t pmd)
++{
++	return false;
++}
++
+ static inline bool is_huge_zero_pud(pud_t pud)
+ {
+ 	return false;
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index 5dae4187210d9..28fa3f9bbbfdd 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -728,17 +728,6 @@ static inline int hstate_index(struct hstate *h)
+ 	return h - hstates;
+ }
+ 
+-pgoff_t __basepage_index(struct page *page);
+-
+-/* Return page->index in PAGE_SIZE units */
+-static inline pgoff_t basepage_index(struct page *page)
+-{
+-	if (!PageCompound(page))
+-		return page->index;
+-
+-	return __basepage_index(page);
+-}
+-
+ extern int dissolve_free_huge_page(struct page *page);
+ extern int dissolve_free_huge_pages(unsigned long start_pfn,
+ 				    unsigned long end_pfn);
+@@ -969,11 +958,6 @@ static inline int hstate_index(struct hstate *h)
+ 	return 0;
+ }
+ 
+-static inline pgoff_t basepage_index(struct page *page)
+-{
+-	return page->index;
+-}
+-
+ static inline int dissolve_free_huge_page(struct page *page)
+ {
+ 	return 0;
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index 6c1b29bb35636..cfb0842a7fb96 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -1680,6 +1680,7 @@ struct zap_details {
+ 	struct address_space *check_mapping;	/* Check page->mapping if set */
+ 	pgoff_t	first_index;			/* Lowest page->index to unmap */
+ 	pgoff_t last_index;			/* Highest page->index to unmap */
++	struct page *single_page;		/* Locked page to be unmapped */
+ };
+ 
+ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
+@@ -1727,6 +1728,7 @@ extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
+ extern int fixup_user_fault(struct mm_struct *mm,
+ 			    unsigned long address, unsigned int fault_flags,
+ 			    bool *unlocked);
++void unmap_mapping_page(struct page *page);
+ void unmap_mapping_pages(struct address_space *mapping,
+ 		pgoff_t start, pgoff_t nr, bool even_cows);
+ void unmap_mapping_range(struct address_space *mapping,
+@@ -1747,6 +1749,7 @@ static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address,
+ 	BUG();
+ 	return -EFAULT;
+ }
++static inline void unmap_mapping_page(struct page *page) { }
+ static inline void unmap_mapping_pages(struct address_space *mapping,
+ 		pgoff_t start, pgoff_t nr, bool even_cows) { }
+ static inline void unmap_mapping_range(struct address_space *mapping,
+diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
+index 8c9947fd62f30..e0023e5f9aa67 100644
+--- a/include/linux/pagemap.h
++++ b/include/linux/pagemap.h
+@@ -501,7 +501,7 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
+ }
+ 
+ /*
+- * Get index of the page with in radix-tree
++ * Get index of the page within radix-tree (but not for hugetlb pages).
+  * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
+  */
+ static inline pgoff_t page_to_index(struct page *page)
+@@ -520,15 +520,16 @@ static inline pgoff_t page_to_index(struct page *page)
+ 	return pgoff;
+ }
+ 
++extern pgoff_t hugetlb_basepage_index(struct page *page);
++
+ /*
+- * Get the offset in PAGE_SIZE.
+- * (TODO: hugepage should have ->index in PAGE_SIZE)
++ * Get the offset in PAGE_SIZE (even for hugetlb pages).
++ * (TODO: hugetlb pages should have ->index in PAGE_SIZE)
+  */
+ static inline pgoff_t page_to_pgoff(struct page *page)
+ {
+-	if (unlikely(PageHeadHuge(page)))
+-		return page->index << compound_order(page);
+-
++	if (unlikely(PageHuge(page)))
++		return hugetlb_basepage_index(page);
+ 	return page_to_index(page);
+ }
+ 
+diff --git a/include/linux/rmap.h b/include/linux/rmap.h
+index def5c62c93b3b..8d04e7deedc66 100644
+--- a/include/linux/rmap.h
++++ b/include/linux/rmap.h
+@@ -91,6 +91,7 @@ enum ttu_flags {
+ 
+ 	TTU_SPLIT_HUGE_PMD	= 0x4,	/* split huge PMD if any */
+ 	TTU_IGNORE_MLOCK	= 0x8,	/* ignore mlock */
++	TTU_SYNC		= 0x10,	/* avoid racy checks with PVMW_SYNC */
+ 	TTU_IGNORE_HWPOISON	= 0x20,	/* corrupted page is recoverable */
+ 	TTU_BATCH_FLUSH		= 0x40,	/* Batch TLB flushes where possible
+ 					 * and caller guarantees they will
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 62e3811e95a78..b9bdeca1d784f 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -1928,7 +1928,8 @@ static inline u32 net_tx_rndhash(void)
+ 
+ static inline void sk_set_txhash(struct sock *sk)
+ {
+-	sk->sk_txhash = net_tx_rndhash();
++	/* This pairs with READ_ONCE() in skb_set_hash_from_sk() */
++	WRITE_ONCE(sk->sk_txhash, net_tx_rndhash());
+ }
+ 
+ static inline bool sk_rethink_txhash(struct sock *sk)
+@@ -2200,9 +2201,12 @@ static inline void sock_poll_wait(struct file *filp, struct socket *sock,
+ 
+ static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk)
+ {
+-	if (sk->sk_txhash) {
++	/* This pairs with WRITE_ONCE() in sk_set_txhash() */
++	u32 txhash = READ_ONCE(sk->sk_txhash);
++
++	if (txhash) {
+ 		skb->l4_hash = 1;
+-		skb->hash = sk->sk_txhash;
++		skb->hash = txhash;
+ 	}
+ }
+ 
+@@ -2260,8 +2264,13 @@ struct sk_buff *sock_dequeue_err_skb(struct sock *sk);
+ static inline int sock_error(struct sock *sk)
+ {
+ 	int err;
+-	if (likely(!sk->sk_err))
++
++	/* Avoid an atomic operation for the common case.
++	 * This is racy since another cpu/thread can change sk_err under us.
++	 */
++	if (likely(data_race(!sk->sk_err)))
+ 		return 0;
++
+ 	err = xchg(&sk->sk_err, 0);
+ 	return -err;
+ }
+diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
+index fe4c01c14ab2c..e96f3808e4316 100644
+--- a/kernel/dma/swiotlb.c
++++ b/kernel/dma/swiotlb.c
+@@ -724,11 +724,17 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
+ 	int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
+ 	size_t orig_size = io_tlb_orig_size[index];
+ 	phys_addr_t orig_addr = io_tlb_orig_addr[index];
++	unsigned int tlb_offset;
+ 
+ 	if (orig_addr == INVALID_PHYS_ADDR)
+ 		return;
+ 
+-	validate_sync_size_and_truncate(hwdev, orig_size, &size);
++	tlb_offset = (tlb_addr & (IO_TLB_SIZE - 1)) -
++		     swiotlb_align_offset(hwdev, orig_addr);
++
++	orig_addr += tlb_offset;
++
++	validate_sync_size_and_truncate(hwdev, orig_size - tlb_offset, &size);
+ 
+ 	switch (target) {
+ 	case SYNC_FOR_CPU:
+diff --git a/kernel/futex.c b/kernel/futex.c
+index a8629b695d38e..5aa6d0a6c7677 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -35,7 +35,6 @@
+ #include <linux/jhash.h>
+ #include <linux/pagemap.h>
+ #include <linux/syscalls.h>
+-#include <linux/hugetlb.h>
+ #include <linux/freezer.h>
+ #include <linux/memblock.h>
+ #include <linux/fault-inject.h>
+@@ -650,7 +649,7 @@ again:
+ 
+ 		key->both.offset |= FUT_OFF_INODE; /* inode-based key */
+ 		key->shared.i_seq = get_inode_sequence_number(inode);
+-		key->shared.pgoff = basepage_index(tail);
++		key->shared.pgoff = page_to_pgoff(tail);
+ 		rcu_read_unlock();
+ 	}
+ 
+diff --git a/kernel/kthread.c b/kernel/kthread.c
+index 6d3c488a0f824..4fdf2bd9b5589 100644
+--- a/kernel/kthread.c
++++ b/kernel/kthread.c
+@@ -1092,8 +1092,38 @@ void kthread_flush_work(struct kthread_work *work)
+ EXPORT_SYMBOL_GPL(kthread_flush_work);
+ 
+ /*
+- * This function removes the work from the worker queue. Also it makes sure
+- * that it won't get queued later via the delayed work's timer.
++ * Make sure that the timer is neither set nor running and could
++ * not manipulate the work list_head any longer.
++ *
++ * The function is called under worker->lock. The lock is temporary
++ * released but the timer can't be set again in the meantime.
++ */
++static void kthread_cancel_delayed_work_timer(struct kthread_work *work,
++					      unsigned long *flags)
++{
++	struct kthread_delayed_work *dwork =
++		container_of(work, struct kthread_delayed_work, work);
++	struct kthread_worker *worker = work->worker;
++
++	/*
++	 * del_timer_sync() must be called to make sure that the timer
++	 * callback is not running. The lock must be temporary released
++	 * to avoid a deadlock with the callback. In the meantime,
++	 * any queuing is blocked by setting the canceling counter.
++	 */
++	work->canceling++;
++	raw_spin_unlock_irqrestore(&worker->lock, *flags);
++	del_timer_sync(&dwork->timer);
++	raw_spin_lock_irqsave(&worker->lock, *flags);
++	work->canceling--;
++}
++
++/*
++ * This function removes the work from the worker queue.
++ *
++ * It is called under worker->lock. The caller must make sure that
++ * the timer used by delayed work is not running, e.g. by calling
++ * kthread_cancel_delayed_work_timer().
+  *
+  * The work might still be in use when this function finishes. See the
+  * current_work proceed by the worker.
+@@ -1101,28 +1131,8 @@ EXPORT_SYMBOL_GPL(kthread_flush_work);
+  * Return: %true if @work was pending and successfully canceled,
+  *	%false if @work was not pending
+  */
+-static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
+-				  unsigned long *flags)
++static bool __kthread_cancel_work(struct kthread_work *work)
+ {
+-	/* Try to cancel the timer if exists. */
+-	if (is_dwork) {
+-		struct kthread_delayed_work *dwork =
+-			container_of(work, struct kthread_delayed_work, work);
+-		struct kthread_worker *worker = work->worker;
+-
+-		/*
+-		 * del_timer_sync() must be called to make sure that the timer
+-		 * callback is not running. The lock must be temporary released
+-		 * to avoid a deadlock with the callback. In the meantime,
+-		 * any queuing is blocked by setting the canceling counter.
+-		 */
+-		work->canceling++;
+-		raw_spin_unlock_irqrestore(&worker->lock, *flags);
+-		del_timer_sync(&dwork->timer);
+-		raw_spin_lock_irqsave(&worker->lock, *flags);
+-		work->canceling--;
+-	}
+-
+ 	/*
+ 	 * Try to remove the work from a worker list. It might either
+ 	 * be from worker->work_list or from worker->delayed_work_list.
+@@ -1175,11 +1185,23 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
+ 	/* Work must not be used with >1 worker, see kthread_queue_work() */
+ 	WARN_ON_ONCE(work->worker != worker);
+ 
+-	/* Do not fight with another command that is canceling this work. */
++	/*
++	 * Temporary cancel the work but do not fight with another command
++	 * that is canceling the work as well.
++	 *
++	 * It is a bit tricky because of possible races with another
++	 * mod_delayed_work() and cancel_delayed_work() callers.
++	 *
++	 * The timer must be canceled first because worker->lock is released
++	 * when doing so. But the work can be removed from the queue (list)
++	 * only when it can be queued again so that the return value can
++	 * be used for reference counting.
++	 */
++	kthread_cancel_delayed_work_timer(work, &flags);
+ 	if (work->canceling)
+ 		goto out;
++	ret = __kthread_cancel_work(work);
+ 
+-	ret = __kthread_cancel_work(work, true, &flags);
+ fast_queue:
+ 	__kthread_queue_delayed_work(worker, dwork, delay);
+ out:
+@@ -1201,7 +1223,10 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
+ 	/* Work must not be used with >1 worker, see kthread_queue_work(). */
+ 	WARN_ON_ONCE(work->worker != worker);
+ 
+-	ret = __kthread_cancel_work(work, is_dwork, &flags);
++	if (is_dwork)
++		kthread_cancel_delayed_work_timer(work, &flags);
++
++	ret = __kthread_cancel_work(work);
+ 
+ 	if (worker->current_work != work)
+ 		goto out_fast;
+diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
+index f39c383c71804..5bf6b1659215d 100644
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -842,7 +842,7 @@ static int count_matching_names(struct lock_class *new_class)
+ }
+ 
+ /* used from NMI context -- must be lockless */
+-static __always_inline struct lock_class *
++static noinstr struct lock_class *
+ look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass)
+ {
+ 	struct lockdep_subclass_key *key;
+@@ -850,12 +850,14 @@ look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass)
+ 	struct lock_class *class;
+ 
+ 	if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
++		instrumentation_begin();
+ 		debug_locks_off();
+ 		printk(KERN_ERR
+ 			"BUG: looking up invalid subclass: %u\n", subclass);
+ 		printk(KERN_ERR
+ 			"turning off the locking correctness validator.\n");
+ 		dump_stack();
++		instrumentation_end();
+ 		return NULL;
+ 	}
+ 
+diff --git a/kernel/module.c b/kernel/module.c
+index 30479355ab850..260d6f3f6d68f 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -266,9 +266,18 @@ static void module_assert_mutex_or_preempt(void)
+ #endif
+ }
+ 
++#ifdef CONFIG_MODULE_SIG
+ static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE);
+ module_param(sig_enforce, bool_enable_only, 0644);
+ 
++void set_module_sig_enforced(void)
++{
++	sig_enforce = true;
++}
++#else
++#define sig_enforce false
++#endif
++
+ /*
+  * Export sig_enforce kernel cmdline parameter to allow other subsystems rely
+  * on that instead of directly to CONFIG_MODULE_SIG_FORCE config.
+@@ -279,11 +288,6 @@ bool is_module_sig_enforced(void)
+ }
+ EXPORT_SYMBOL(is_module_sig_enforced);
+ 
+-void set_module_sig_enforced(void)
+-{
+-	sig_enforce = true;
+-}
+-
+ /* Block module loading/unloading? */
+ int modules_disabled = 0;
+ core_param(nomodule, modules_disabled, bint, 0);
+diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
+index 651218ded9817..ef37acd28e4ac 100644
+--- a/kernel/sched/psi.c
++++ b/kernel/sched/psi.c
+@@ -965,7 +965,7 @@ void psi_cgroup_free(struct cgroup *cgroup)
+  */
+ void cgroup_move_task(struct task_struct *task, struct css_set *to)
+ {
+-	unsigned int task_flags = 0;
++	unsigned int task_flags;
+ 	struct rq_flags rf;
+ 	struct rq *rq;
+ 
+@@ -980,15 +980,31 @@ void cgroup_move_task(struct task_struct *task, struct css_set *to)
+ 
+ 	rq = task_rq_lock(task, &rf);
+ 
+-	if (task_on_rq_queued(task)) {
+-		task_flags = TSK_RUNNING;
+-		if (task_current(rq, task))
+-			task_flags |= TSK_ONCPU;
+-	} else if (task->in_iowait)
+-		task_flags = TSK_IOWAIT;
+-
+-	if (task->in_memstall)
+-		task_flags |= TSK_MEMSTALL;
++	/*
++	 * We may race with schedule() dropping the rq lock between
++	 * deactivating prev and switching to next. Because the psi
++	 * updates from the deactivation are deferred to the switch
++	 * callback to save cgroup tree updates, the task's scheduling
++	 * state here is not coherent with its psi state:
++	 *
++	 * schedule()                   cgroup_move_task()
++	 *   rq_lock()
++	 *   deactivate_task()
++	 *     p->on_rq = 0
++	 *     psi_dequeue() // defers TSK_RUNNING & TSK_IOWAIT updates
++	 *   pick_next_task()
++	 *     rq_unlock()
++	 *                                rq_lock()
++	 *                                psi_task_change() // old cgroup
++	 *                                task->cgroups = to
++	 *                                psi_task_change() // new cgroup
++	 *                                rq_unlock()
++	 *     rq_lock()
++	 *   psi_sched_switch() // does deferred updates in new cgroup
++	 *
++	 * Don't rely on the scheduling state. Use psi_flags instead.
++	 */
++	task_flags = task->psi_flags;
+ 
+ 	if (task_flags)
+ 		psi_task_change(task, task_flags, 0);
+diff --git a/lib/debug_locks.c b/lib/debug_locks.c
+index 06d3135bd184c..a75ee30b77cb8 100644
+--- a/lib/debug_locks.c
++++ b/lib/debug_locks.c
+@@ -36,7 +36,7 @@ EXPORT_SYMBOL_GPL(debug_locks_silent);
+ /*
+  * Generic 'turn off all lock debugging' function:
+  */
+-noinstr int debug_locks_off(void)
++int debug_locks_off(void)
+ {
+ 	if (debug_locks && __debug_locks_off()) {
+ 		if (!debug_locks_silent) {
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index ae907a9c20506..44c455dbbd637 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -61,6 +61,7 @@ static struct shrinker deferred_split_shrinker;
+ 
+ static atomic_t huge_zero_refcount;
+ struct page *huge_zero_page __read_mostly;
++unsigned long huge_zero_pfn __read_mostly = ~0UL;
+ 
+ bool transparent_hugepage_enabled(struct vm_area_struct *vma)
+ {
+@@ -97,6 +98,7 @@ retry:
+ 		__free_pages(zero_page, compound_order(zero_page));
+ 		goto retry;
+ 	}
++	WRITE_ONCE(huge_zero_pfn, page_to_pfn(zero_page));
+ 
+ 	/* We take additional reference here. It will be put back by shrinker */
+ 	atomic_set(&huge_zero_refcount, 2);
+@@ -146,6 +148,7 @@ static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
+ 	if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
+ 		struct page *zero_page = xchg(&huge_zero_page, NULL);
+ 		BUG_ON(zero_page == NULL);
++		WRITE_ONCE(huge_zero_pfn, ~0UL);
+ 		__free_pages(zero_page, compound_order(zero_page));
+ 		return HPAGE_PMD_NR;
+ 	}
+@@ -2046,7 +2049,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
+ 	count_vm_event(THP_SPLIT_PMD);
+ 
+ 	if (!vma_is_anonymous(vma)) {
+-		_pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
++		old_pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
+ 		/*
+ 		 * We are going to unmap this huge page. So
+ 		 * just go ahead and zap it
+@@ -2055,16 +2058,25 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
+ 			zap_deposited_table(mm, pmd);
+ 		if (vma_is_special_huge(vma))
+ 			return;
+-		page = pmd_page(_pmd);
+-		if (!PageDirty(page) && pmd_dirty(_pmd))
+-			set_page_dirty(page);
+-		if (!PageReferenced(page) && pmd_young(_pmd))
+-			SetPageReferenced(page);
+-		page_remove_rmap(page, true);
+-		put_page(page);
++		if (unlikely(is_pmd_migration_entry(old_pmd))) {
++			swp_entry_t entry;
++
++			entry = pmd_to_swp_entry(old_pmd);
++			page = migration_entry_to_page(entry);
++		} else {
++			page = pmd_page(old_pmd);
++			if (!PageDirty(page) && pmd_dirty(old_pmd))
++				set_page_dirty(page);
++			if (!PageReferenced(page) && pmd_young(old_pmd))
++				SetPageReferenced(page);
++			page_remove_rmap(page, true);
++			put_page(page);
++		}
+ 		add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
+ 		return;
+-	} else if (pmd_trans_huge(*pmd) && is_huge_zero_pmd(*pmd)) {
++	}
++
++	if (is_huge_zero_pmd(*pmd)) {
+ 		/*
+ 		 * FIXME: Do we want to invalidate secondary mmu by calling
+ 		 * mmu_notifier_invalidate_range() see comments below inside
+@@ -2346,17 +2358,17 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
+ 
+ static void unmap_page(struct page *page)
+ {
+-	enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK |
++	enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_SYNC |
+ 		TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD;
+-	bool unmap_success;
+ 
+ 	VM_BUG_ON_PAGE(!PageHead(page), page);
+ 
+ 	if (PageAnon(page))
+ 		ttu_flags |= TTU_SPLIT_FREEZE;
+ 
+-	unmap_success = try_to_unmap(page, ttu_flags);
+-	VM_BUG_ON_PAGE(!unmap_success, page);
++	try_to_unmap(page, ttu_flags);
++
++	VM_WARN_ON_ONCE_PAGE(page_mapped(page), page);
+ }
+ 
+ static void remap_page(struct page *page, unsigned int nr)
+@@ -2667,7 +2679,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
+ 	struct deferred_split *ds_queue = get_deferred_split_queue(head);
+ 	struct anon_vma *anon_vma = NULL;
+ 	struct address_space *mapping = NULL;
+-	int count, mapcount, extra_pins, ret;
++	int extra_pins, ret;
+ 	pgoff_t end;
+ 
+ 	VM_BUG_ON_PAGE(is_huge_zero_page(head), head);
+@@ -2726,7 +2738,6 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
+ 	}
+ 
+ 	unmap_page(head);
+-	VM_BUG_ON_PAGE(compound_mapcount(head), head);
+ 
+ 	/* block interrupt reentry in xa_lock and spinlock */
+ 	local_irq_disable();
+@@ -2744,9 +2755,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
+ 
+ 	/* Prevent deferred_split_scan() touching ->_refcount */
+ 	spin_lock(&ds_queue->split_queue_lock);
+-	count = page_count(head);
+-	mapcount = total_mapcount(head);
+-	if (!mapcount && page_ref_freeze(head, 1 + extra_pins)) {
++	if (page_ref_freeze(head, 1 + extra_pins)) {
+ 		if (!list_empty(page_deferred_list(head))) {
+ 			ds_queue->split_queue_len--;
+ 			list_del(page_deferred_list(head));
+@@ -2766,16 +2775,9 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
+ 		__split_huge_page(page, list, end);
+ 		ret = 0;
+ 	} else {
+-		if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) {
+-			pr_alert("total_mapcount: %u, page_count(): %u\n",
+-					mapcount, count);
+-			if (PageTail(page))
+-				dump_page(head, NULL);
+-			dump_page(page, "total_mapcount(head) > 0");
+-			BUG();
+-		}
+ 		spin_unlock(&ds_queue->split_queue_lock);
+-fail:		if (mapping)
++fail:
++		if (mapping)
+ 			xa_unlock(&mapping->i_pages);
+ 		local_irq_enable();
+ 		remap_page(head, thp_nr_pages(head));
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 3da4817190f3d..7ba7d9b20494a 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -1584,15 +1584,12 @@ struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage)
+ 	return NULL;
+ }
+ 
+-pgoff_t __basepage_index(struct page *page)
++pgoff_t hugetlb_basepage_index(struct page *page)
+ {
+ 	struct page *page_head = compound_head(page);
+ 	pgoff_t index = page_index(page_head);
+ 	unsigned long compound_idx;
+ 
+-	if (!PageHuge(page_head))
+-		return page_index(page);
+-
+ 	if (compound_order(page_head) >= MAX_ORDER)
+ 		compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
+ 	else
+diff --git a/mm/internal.h b/mm/internal.h
+index 1432feec62df0..08323e622bbd1 100644
+--- a/mm/internal.h
++++ b/mm/internal.h
+@@ -379,27 +379,52 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page)
+ extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
+ 
+ /*
+- * At what user virtual address is page expected in @vma?
++ * At what user virtual address is page expected in vma?
++ * Returns -EFAULT if all of the page is outside the range of vma.
++ * If page is a compound head, the entire compound page is considered.
+  */
+ static inline unsigned long
+-__vma_address(struct page *page, struct vm_area_struct *vma)
++vma_address(struct page *page, struct vm_area_struct *vma)
+ {
+-	pgoff_t pgoff = page_to_pgoff(page);
+-	return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
++	pgoff_t pgoff;
++	unsigned long address;
++
++	VM_BUG_ON_PAGE(PageKsm(page), page);	/* KSM page->index unusable */
++	pgoff = page_to_pgoff(page);
++	if (pgoff >= vma->vm_pgoff) {
++		address = vma->vm_start +
++			((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
++		/* Check for address beyond vma (or wrapped through 0?) */
++		if (address < vma->vm_start || address >= vma->vm_end)
++			address = -EFAULT;
++	} else if (PageHead(page) &&
++		   pgoff + compound_nr(page) - 1 >= vma->vm_pgoff) {
++		/* Test above avoids possibility of wrap to 0 on 32-bit */
++		address = vma->vm_start;
++	} else {
++		address = -EFAULT;
++	}
++	return address;
+ }
+ 
++/*
++ * Then at what user virtual address will none of the page be found in vma?
++ * Assumes that vma_address() already returned a good starting address.
++ * If page is a compound head, the entire compound page is considered.
++ */
+ static inline unsigned long
+-vma_address(struct page *page, struct vm_area_struct *vma)
++vma_address_end(struct page *page, struct vm_area_struct *vma)
+ {
+-	unsigned long start, end;
+-
+-	start = __vma_address(page, vma);
+-	end = start + thp_size(page) - PAGE_SIZE;
+-
+-	/* page should be within @vma mapping range */
+-	VM_BUG_ON_VMA(end < vma->vm_start || start >= vma->vm_end, vma);
+-
+-	return max(start, vma->vm_start);
++	pgoff_t pgoff;
++	unsigned long address;
++
++	VM_BUG_ON_PAGE(PageKsm(page), page);	/* KSM page->index unusable */
++	pgoff = page_to_pgoff(page) + compound_nr(page);
++	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
++	/* Check for address beyond vma (or wrapped through 0?) */
++	if (address < vma->vm_start || address > vma->vm_end)
++		address = vma->vm_end;
++	return address;
+ }
+ 
+ static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 704d05057d8c3..4db6f95e55be0 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -658,6 +658,7 @@ static int truncate_error_page(struct page *p, unsigned long pfn,
+  */
+ static int me_kernel(struct page *p, unsigned long pfn)
+ {
++	unlock_page(p);
+ 	return MF_IGNORED;
+ }
+ 
+@@ -667,6 +668,7 @@ static int me_kernel(struct page *p, unsigned long pfn)
+ static int me_unknown(struct page *p, unsigned long pfn)
+ {
+ 	pr_err("Memory failure: %#lx: Unknown page state\n", pfn);
++	unlock_page(p);
+ 	return MF_FAILED;
+ }
+ 
+@@ -675,6 +677,7 @@ static int me_unknown(struct page *p, unsigned long pfn)
+  */
+ static int me_pagecache_clean(struct page *p, unsigned long pfn)
+ {
++	int ret;
+ 	struct address_space *mapping;
+ 
+ 	delete_from_lru_cache(p);
+@@ -683,8 +686,10 @@ static int me_pagecache_clean(struct page *p, unsigned long pfn)
+ 	 * For anonymous pages we're done the only reference left
+ 	 * should be the one m_f() holds.
+ 	 */
+-	if (PageAnon(p))
+-		return MF_RECOVERED;
++	if (PageAnon(p)) {
++		ret = MF_RECOVERED;
++		goto out;
++	}
+ 
+ 	/*
+ 	 * Now truncate the page in the page cache. This is really
+@@ -698,7 +703,8 @@ static int me_pagecache_clean(struct page *p, unsigned long pfn)
+ 		/*
+ 		 * Page has been teared down in the meanwhile
+ 		 */
+-		return MF_FAILED;
++		ret = MF_FAILED;
++		goto out;
+ 	}
+ 
+ 	/*
+@@ -706,7 +712,10 @@ static int me_pagecache_clean(struct page *p, unsigned long pfn)
+ 	 *
+ 	 * Open: to take i_mutex or not for this? Right now we don't.
+ 	 */
+-	return truncate_error_page(p, pfn, mapping);
++	ret = truncate_error_page(p, pfn, mapping);
++out:
++	unlock_page(p);
++	return ret;
+ }
+ 
+ /*
+@@ -782,24 +791,26 @@ static int me_pagecache_dirty(struct page *p, unsigned long pfn)
+  */
+ static int me_swapcache_dirty(struct page *p, unsigned long pfn)
+ {
++	int ret;
++
+ 	ClearPageDirty(p);
+ 	/* Trigger EIO in shmem: */
+ 	ClearPageUptodate(p);
+ 
+-	if (!delete_from_lru_cache(p))
+-		return MF_DELAYED;
+-	else
+-		return MF_FAILED;
++	ret = delete_from_lru_cache(p) ? MF_FAILED : MF_DELAYED;
++	unlock_page(p);
++	return ret;
+ }
+ 
+ static int me_swapcache_clean(struct page *p, unsigned long pfn)
+ {
++	int ret;
++
+ 	delete_from_swap_cache(p);
+ 
+-	if (!delete_from_lru_cache(p))
+-		return MF_RECOVERED;
+-	else
+-		return MF_FAILED;
++	ret = delete_from_lru_cache(p) ? MF_FAILED : MF_RECOVERED;
++	unlock_page(p);
++	return ret;
+ }
+ 
+ /*
+@@ -820,6 +831,7 @@ static int me_huge_page(struct page *p, unsigned long pfn)
+ 	mapping = page_mapping(hpage);
+ 	if (mapping) {
+ 		res = truncate_error_page(hpage, pfn, mapping);
++		unlock_page(hpage);
+ 	} else {
+ 		res = MF_FAILED;
+ 		unlock_page(hpage);
+@@ -834,7 +846,6 @@ static int me_huge_page(struct page *p, unsigned long pfn)
+ 			page_ref_inc(p);
+ 			res = MF_RECOVERED;
+ 		}
+-		lock_page(hpage);
+ 	}
+ 
+ 	return res;
+@@ -866,6 +877,8 @@ static struct page_state {
+ 	unsigned long mask;
+ 	unsigned long res;
+ 	enum mf_action_page_type type;
++
++	/* Callback ->action() has to unlock the relevant page inside it. */
+ 	int (*action)(struct page *p, unsigned long pfn);
+ } error_states[] = {
+ 	{ reserved,	reserved,	MF_MSG_KERNEL,	me_kernel },
+@@ -929,6 +942,7 @@ static int page_action(struct page_state *ps, struct page *p,
+ 	int result;
+ 	int count;
+ 
++	/* page p should be unlocked after returning from ps->action().  */
+ 	result = ps->action(p, pfn);
+ 
+ 	count = page_count(p) - 1;
+@@ -1313,7 +1327,7 @@ static int memory_failure_hugetlb(unsigned long pfn, int flags)
+ 		goto out;
+ 	}
+ 
+-	res = identify_page_state(pfn, p, page_flags);
++	return identify_page_state(pfn, p, page_flags);
+ out:
+ 	unlock_page(head);
+ 	return res;
+@@ -1429,9 +1443,10 @@ int memory_failure(unsigned long pfn, int flags)
+ 	struct page *hpage;
+ 	struct page *orig_head;
+ 	struct dev_pagemap *pgmap;
+-	int res;
++	int res = 0;
+ 	unsigned long page_flags;
+ 	bool retry = true;
++	static DEFINE_MUTEX(mf_mutex);
+ 
+ 	if (!sysctl_memory_failure_recovery)
+ 		panic("Memory failure on page %lx", pfn);
+@@ -1449,13 +1464,18 @@ int memory_failure(unsigned long pfn, int flags)
+ 		return -ENXIO;
+ 	}
+ 
++	mutex_lock(&mf_mutex);
++
+ try_again:
+-	if (PageHuge(p))
+-		return memory_failure_hugetlb(pfn, flags);
++	if (PageHuge(p)) {
++		res = memory_failure_hugetlb(pfn, flags);
++		goto unlock_mutex;
++	}
++
+ 	if (TestSetPageHWPoison(p)) {
+ 		pr_err("Memory failure: %#lx: already hardware poisoned\n",
+ 			pfn);
+-		return 0;
++		goto unlock_mutex;
+ 	}
+ 
+ 	orig_head = hpage = compound_head(p);
+@@ -1488,17 +1508,19 @@ try_again:
+ 				res = MF_FAILED;
+ 			}
+ 			action_result(pfn, MF_MSG_BUDDY, res);
+-			return res == MF_RECOVERED ? 0 : -EBUSY;
++			res = res == MF_RECOVERED ? 0 : -EBUSY;
+ 		} else {
+ 			action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED);
+-			return -EBUSY;
++			res = -EBUSY;
+ 		}
++		goto unlock_mutex;
+ 	}
+ 
+ 	if (PageTransHuge(hpage)) {
+ 		if (try_to_split_thp_page(p, "Memory Failure") < 0) {
+ 			action_result(pfn, MF_MSG_UNSPLIT_THP, MF_IGNORED);
+-			return -EBUSY;
++			res = -EBUSY;
++			goto unlock_mutex;
+ 		}
+ 		VM_BUG_ON_PAGE(!page_count(p), p);
+ 	}
+@@ -1522,7 +1544,7 @@ try_again:
+ 	if (PageCompound(p) && compound_head(p) != orig_head) {
+ 		action_result(pfn, MF_MSG_DIFFERENT_COMPOUND, MF_IGNORED);
+ 		res = -EBUSY;
+-		goto out;
++		goto unlock_page;
+ 	}
+ 
+ 	/*
+@@ -1542,14 +1564,14 @@ try_again:
+ 		num_poisoned_pages_dec();
+ 		unlock_page(p);
+ 		put_page(p);
+-		return 0;
++		goto unlock_mutex;
+ 	}
+ 	if (hwpoison_filter(p)) {
+ 		if (TestClearPageHWPoison(p))
+ 			num_poisoned_pages_dec();
+ 		unlock_page(p);
+ 		put_page(p);
+-		return 0;
++		goto unlock_mutex;
+ 	}
+ 
+ 	/*
+@@ -1573,7 +1595,7 @@ try_again:
+ 	if (!hwpoison_user_mappings(p, pfn, flags, &p)) {
+ 		action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
+ 		res = -EBUSY;
+-		goto out;
++		goto unlock_page;
+ 	}
+ 
+ 	/*
+@@ -1582,13 +1604,17 @@ try_again:
+ 	if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
+ 		action_result(pfn, MF_MSG_TRUNCATED_LRU, MF_IGNORED);
+ 		res = -EBUSY;
+-		goto out;
++		goto unlock_page;
+ 	}
+ 
+ identify_page_state:
+ 	res = identify_page_state(pfn, p, page_flags);
+-out:
++	mutex_unlock(&mf_mutex);
++	return res;
++unlock_page:
+ 	unlock_page(p);
++unlock_mutex:
++	mutex_unlock(&mf_mutex);
+ 	return res;
+ }
+ EXPORT_SYMBOL_GPL(memory_failure);
+diff --git a/mm/memory.c b/mm/memory.c
+index 14a6c66b37483..36624986130be 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -1361,7 +1361,18 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
+ 			else if (zap_huge_pmd(tlb, vma, pmd, addr))
+ 				goto next;
+ 			/* fall through */
++		} else if (details && details->single_page &&
++			   PageTransCompound(details->single_page) &&
++			   next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) {
++			spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
++			/*
++			 * Take and drop THP pmd lock so that we cannot return
++			 * prematurely, while zap_huge_pmd() has cleared *pmd,
++			 * but not yet decremented compound_mapcount().
++			 */
++			spin_unlock(ptl);
+ 		}
++
+ 		/*
+ 		 * Here there can be other concurrent MADV_DONTNEED or
+ 		 * trans huge page faults running, and if the pmd is
+@@ -3193,6 +3204,36 @@ static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
+ 	}
+ }
+ 
++/**
++ * unmap_mapping_page() - Unmap single page from processes.
++ * @page: The locked page to be unmapped.
++ *
++ * Unmap this page from any userspace process which still has it mmaped.
++ * Typically, for efficiency, the range of nearby pages has already been
++ * unmapped by unmap_mapping_pages() or unmap_mapping_range().  But once
++ * truncation or invalidation holds the lock on a page, it may find that
++ * the page has been remapped again: and then uses unmap_mapping_page()
++ * to unmap it finally.
++ */
++void unmap_mapping_page(struct page *page)
++{
++	struct address_space *mapping = page->mapping;
++	struct zap_details details = { };
++
++	VM_BUG_ON(!PageLocked(page));
++	VM_BUG_ON(PageTail(page));
++
++	details.check_mapping = mapping;
++	details.first_index = page->index;
++	details.last_index = page->index + thp_nr_pages(page) - 1;
++	details.single_page = page;
++
++	i_mmap_lock_write(mapping);
++	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
++		unmap_mapping_range_tree(&mapping->i_mmap, &details);
++	i_mmap_unlock_write(mapping);
++}
++
+ /**
+  * unmap_mapping_pages() - Unmap pages from processes.
+  * @mapping: The address space containing pages to be unmapped.
+diff --git a/mm/migrate.c b/mm/migrate.c
+index 773622cffe779..40455e753c5b4 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -322,6 +322,7 @@ void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
+ 		goto out;
+ 
+ 	page = migration_entry_to_page(entry);
++	page = compound_head(page);
+ 
+ 	/*
+ 	 * Once page cache replacement of page migration started, page_count
+diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
+index 86e3a3688d592..3350faeb199a6 100644
+--- a/mm/page_vma_mapped.c
++++ b/mm/page_vma_mapped.c
+@@ -116,6 +116,13 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw)
+ 	return pfn_is_match(pvmw->page, pfn);
+ }
+ 
++static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size)
++{
++	pvmw->address = (pvmw->address + size) & ~(size - 1);
++	if (!pvmw->address)
++		pvmw->address = ULONG_MAX;
++}
++
+ /**
+  * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
+  * @pvmw->address
+@@ -144,6 +151,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
+ {
+ 	struct mm_struct *mm = pvmw->vma->vm_mm;
+ 	struct page *page = pvmw->page;
++	unsigned long end;
+ 	pgd_t *pgd;
+ 	p4d_t *p4d;
+ 	pud_t *pud;
+@@ -153,10 +161,11 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
+ 	if (pvmw->pmd && !pvmw->pte)
+ 		return not_found(pvmw);
+ 
+-	if (pvmw->pte)
+-		goto next_pte;
++	if (unlikely(PageHuge(page))) {
++		/* The only possible mapping was handled on last iteration */
++		if (pvmw->pte)
++			return not_found(pvmw);
+ 
+-	if (unlikely(PageHuge(pvmw->page))) {
+ 		/* when pud is not present, pte will be NULL */
+ 		pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
+ 		if (!pvmw->pte)
+@@ -168,78 +177,108 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
+ 			return not_found(pvmw);
+ 		return true;
+ 	}
+-restart:
+-	pgd = pgd_offset(mm, pvmw->address);
+-	if (!pgd_present(*pgd))
+-		return false;
+-	p4d = p4d_offset(pgd, pvmw->address);
+-	if (!p4d_present(*p4d))
+-		return false;
+-	pud = pud_offset(p4d, pvmw->address);
+-	if (!pud_present(*pud))
+-		return false;
+-	pvmw->pmd = pmd_offset(pud, pvmw->address);
++
+ 	/*
+-	 * Make sure the pmd value isn't cached in a register by the
+-	 * compiler and used as a stale value after we've observed a
+-	 * subsequent update.
++	 * Seek to next pte only makes sense for THP.
++	 * But more important than that optimization, is to filter out
++	 * any PageKsm page: whose page->index misleads vma_address()
++	 * and vma_address_end() to disaster.
+ 	 */
+-	pmde = READ_ONCE(*pvmw->pmd);
+-	if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
+-		pvmw->ptl = pmd_lock(mm, pvmw->pmd);
+-		if (likely(pmd_trans_huge(*pvmw->pmd))) {
+-			if (pvmw->flags & PVMW_MIGRATION)
+-				return not_found(pvmw);
+-			if (pmd_page(*pvmw->pmd) != page)
+-				return not_found(pvmw);
+-			return true;
+-		} else if (!pmd_present(*pvmw->pmd)) {
+-			if (thp_migration_supported()) {
+-				if (!(pvmw->flags & PVMW_MIGRATION))
++	end = PageTransCompound(page) ?
++		vma_address_end(page, pvmw->vma) :
++		pvmw->address + PAGE_SIZE;
++	if (pvmw->pte)
++		goto next_pte;
++restart:
++	do {
++		pgd = pgd_offset(mm, pvmw->address);
++		if (!pgd_present(*pgd)) {
++			step_forward(pvmw, PGDIR_SIZE);
++			continue;
++		}
++		p4d = p4d_offset(pgd, pvmw->address);
++		if (!p4d_present(*p4d)) {
++			step_forward(pvmw, P4D_SIZE);
++			continue;
++		}
++		pud = pud_offset(p4d, pvmw->address);
++		if (!pud_present(*pud)) {
++			step_forward(pvmw, PUD_SIZE);
++			continue;
++		}
++
++		pvmw->pmd = pmd_offset(pud, pvmw->address);
++		/*
++		 * Make sure the pmd value isn't cached in a register by the
++		 * compiler and used as a stale value after we've observed a
++		 * subsequent update.
++		 */
++		pmde = READ_ONCE(*pvmw->pmd);
++
++		if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
++			pvmw->ptl = pmd_lock(mm, pvmw->pmd);
++			pmde = *pvmw->pmd;
++			if (likely(pmd_trans_huge(pmde))) {
++				if (pvmw->flags & PVMW_MIGRATION)
+ 					return not_found(pvmw);
+-				if (is_migration_entry(pmd_to_swp_entry(*pvmw->pmd))) {
+-					swp_entry_t entry = pmd_to_swp_entry(*pvmw->pmd);
++				if (pmd_page(pmde) != page)
++					return not_found(pvmw);
++				return true;
++			}
++			if (!pmd_present(pmde)) {
++				swp_entry_t entry;
+ 
+-					if (migration_entry_to_page(entry) != page)
+-						return not_found(pvmw);
+-					return true;
+-				}
++				if (!thp_migration_supported() ||
++				    !(pvmw->flags & PVMW_MIGRATION))
++					return not_found(pvmw);
++				entry = pmd_to_swp_entry(pmde);
++				if (!is_migration_entry(entry) ||
++				    migration_entry_to_page(entry) != page)
++					return not_found(pvmw);
++				return true;
+ 			}
+-			return not_found(pvmw);
+-		} else {
+ 			/* THP pmd was split under us: handle on pte level */
+ 			spin_unlock(pvmw->ptl);
+ 			pvmw->ptl = NULL;
++		} else if (!pmd_present(pmde)) {
++			/*
++			 * If PVMW_SYNC, take and drop THP pmd lock so that we
++			 * cannot return prematurely, while zap_huge_pmd() has
++			 * cleared *pmd but not decremented compound_mapcount().
++			 */
++			if ((pvmw->flags & PVMW_SYNC) &&
++			    PageTransCompound(page)) {
++				spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
++
++				spin_unlock(ptl);
++			}
++			step_forward(pvmw, PMD_SIZE);
++			continue;
+ 		}
+-	} else if (!pmd_present(pmde)) {
+-		return false;
+-	}
+-	if (!map_pte(pvmw))
+-		goto next_pte;
+-	while (1) {
++		if (!map_pte(pvmw))
++			goto next_pte;
++this_pte:
+ 		if (check_pte(pvmw))
+ 			return true;
+ next_pte:
+-		/* Seek to next pte only makes sense for THP */
+-		if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
+-			return not_found(pvmw);
+ 		do {
+ 			pvmw->address += PAGE_SIZE;
+-			if (pvmw->address >= pvmw->vma->vm_end ||
+-			    pvmw->address >=
+-					__vma_address(pvmw->page, pvmw->vma) +
+-					thp_size(pvmw->page))
++			if (pvmw->address >= end)
+ 				return not_found(pvmw);
+ 			/* Did we cross page table boundary? */
+-			if (pvmw->address % PMD_SIZE == 0) {
+-				pte_unmap(pvmw->pte);
++			if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) {
+ 				if (pvmw->ptl) {
+ 					spin_unlock(pvmw->ptl);
+ 					pvmw->ptl = NULL;
+ 				}
++				pte_unmap(pvmw->pte);
++				pvmw->pte = NULL;
+ 				goto restart;
+-			} else {
+-				pvmw->pte++;
++			}
++			pvmw->pte++;
++			if ((pvmw->flags & PVMW_SYNC) && !pvmw->ptl) {
++				pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
++				spin_lock(pvmw->ptl);
+ 			}
+ 		} while (pte_none(*pvmw->pte));
+ 
+@@ -247,7 +286,10 @@ next_pte:
+ 			pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
+ 			spin_lock(pvmw->ptl);
+ 		}
+-	}
++		goto this_pte;
++	} while (pvmw->address < end);
++
++	return false;
+ }
+ 
+ /**
+@@ -266,14 +308,10 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
+ 		.vma = vma,
+ 		.flags = PVMW_SYNC,
+ 	};
+-	unsigned long start, end;
+-
+-	start = __vma_address(page, vma);
+-	end = start + thp_size(page) - PAGE_SIZE;
+ 
+-	if (unlikely(end < vma->vm_start || start >= vma->vm_end))
++	pvmw.address = vma_address(page, vma);
++	if (pvmw.address == -EFAULT)
+ 		return 0;
+-	pvmw.address = max(start, vma->vm_start);
+ 	if (!page_vma_mapped_walk(&pvmw))
+ 		return 0;
+ 	page_vma_mapped_walk_done(&pvmw);
+diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
+index c2210e1cdb515..4e640baf97948 100644
+--- a/mm/pgtable-generic.c
++++ b/mm/pgtable-generic.c
+@@ -135,9 +135,8 @@ pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
+ {
+ 	pmd_t pmd;
+ 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+-	VM_BUG_ON(!pmd_present(*pmdp));
+-	/* Below assumes pmd_present() is true */
+-	VM_BUG_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
++	VM_BUG_ON(pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) &&
++			   !pmd_devmap(*pmdp));
+ 	pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
+ 	flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+ 	return pmd;
+diff --git a/mm/rmap.c b/mm/rmap.c
+index b0fc27e77d6d7..3665d062cc9ce 100644
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -707,7 +707,6 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
+  */
+ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
+ {
+-	unsigned long address;
+ 	if (PageAnon(page)) {
+ 		struct anon_vma *page__anon_vma = page_anon_vma(page);
+ 		/*
+@@ -717,15 +716,13 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
+ 		if (!vma->anon_vma || !page__anon_vma ||
+ 		    vma->anon_vma->root != page__anon_vma->root)
+ 			return -EFAULT;
+-	} else if (page->mapping) {
+-		if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping)
+-			return -EFAULT;
+-	} else
++	} else if (!vma->vm_file) {
+ 		return -EFAULT;
+-	address = __vma_address(page, vma);
+-	if (unlikely(address < vma->vm_start || address >= vma->vm_end))
++	} else if (vma->vm_file->f_mapping != compound_head(page)->mapping) {
+ 		return -EFAULT;
+-	return address;
++	}
++
++	return vma_address(page, vma);
+ }
+ 
+ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
+@@ -919,7 +916,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
+ 	 */
+ 	mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
+ 				0, vma, vma->vm_mm, address,
+-				min(vma->vm_end, address + page_size(page)));
++				vma_address_end(page, vma));
+ 	mmu_notifier_invalidate_range_start(&range);
+ 
+ 	while (page_vma_mapped_walk(&pvmw)) {
+@@ -1405,6 +1402,15 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
+ 	struct mmu_notifier_range range;
+ 	enum ttu_flags flags = (enum ttu_flags)(long)arg;
+ 
++	/*
++	 * When racing against e.g. zap_pte_range() on another cpu,
++	 * in between its ptep_get_and_clear_full() and page_remove_rmap(),
++	 * try_to_unmap() may return false when it is about to become true,
++	 * if page table locking is skipped: use TTU_SYNC to wait for that.
++	 */
++	if (flags & TTU_SYNC)
++		pvmw.flags = PVMW_SYNC;
++
+ 	/* munlock has nothing to gain from examining un-locked vmas */
+ 	if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED))
+ 		return true;
+@@ -1426,9 +1432,10 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
+ 	 * Note that the page can not be free in this function as call of
+ 	 * try_to_unmap() must hold a reference on the page.
+ 	 */
++	range.end = PageKsm(page) ?
++			address + PAGE_SIZE : vma_address_end(page, vma);
+ 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
+-				address,
+-				min(vma->vm_end, address + page_size(page)));
++				address, range.end);
+ 	if (PageHuge(page)) {
+ 		/*
+ 		 * If sharing is possible, start and end will be adjusted
+@@ -1777,7 +1784,13 @@ bool try_to_unmap(struct page *page, enum ttu_flags flags)
+ 	else
+ 		rmap_walk(page, &rwc);
+ 
+-	return !page_mapcount(page) ? true : false;
++	/*
++	 * When racing against e.g. zap_pte_range() on another cpu,
++	 * in between its ptep_get_and_clear_full() and page_remove_rmap(),
++	 * try_to_unmap() may return false when it is about to become true,
++	 * if page table locking is skipped: use TTU_SYNC to wait for that.
++	 */
++	return !page_mapcount(page);
+ }
+ 
+ /**
+@@ -1874,6 +1887,7 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
+ 		struct vm_area_struct *vma = avc->vma;
+ 		unsigned long address = vma_address(page, vma);
+ 
++		VM_BUG_ON_VMA(address == -EFAULT, vma);
+ 		cond_resched();
+ 
+ 		if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
+@@ -1928,6 +1942,7 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
+ 			pgoff_start, pgoff_end) {
+ 		unsigned long address = vma_address(page, vma);
+ 
++		VM_BUG_ON_VMA(address == -EFAULT, vma);
+ 		cond_resched();
+ 
+ 		if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
+diff --git a/mm/truncate.c b/mm/truncate.c
+index 455944264663e..bf092be0a6f01 100644
+--- a/mm/truncate.c
++++ b/mm/truncate.c
+@@ -168,13 +168,10 @@ void do_invalidatepage(struct page *page, unsigned int offset,
+  * its lock, b) when a concurrent invalidate_mapping_pages got there first and
+  * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
+  */
+-static void
+-truncate_cleanup_page(struct address_space *mapping, struct page *page)
++static void truncate_cleanup_page(struct page *page)
+ {
+-	if (page_mapped(page)) {
+-		unsigned int nr = thp_nr_pages(page);
+-		unmap_mapping_pages(mapping, page->index, nr, false);
+-	}
++	if (page_mapped(page))
++		unmap_mapping_page(page);
+ 
+ 	if (page_has_private(page))
+ 		do_invalidatepage(page, 0, thp_size(page));
+@@ -219,7 +216,7 @@ int truncate_inode_page(struct address_space *mapping, struct page *page)
+ 	if (page->mapping != mapping)
+ 		return -EIO;
+ 
+-	truncate_cleanup_page(mapping, page);
++	truncate_cleanup_page(page);
+ 	delete_from_page_cache(page);
+ 	return 0;
+ }
+@@ -326,7 +323,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
+ 		index = indices[pagevec_count(&pvec) - 1] + 1;
+ 		truncate_exceptional_pvec_entries(mapping, &pvec, indices);
+ 		for (i = 0; i < pagevec_count(&pvec); i++)
+-			truncate_cleanup_page(mapping, pvec.pages[i]);
++			truncate_cleanup_page(pvec.pages[i]);
+ 		delete_from_page_cache_batch(mapping, &pvec);
+ 		for (i = 0; i < pagevec_count(&pvec); i++)
+ 			unlock_page(pvec.pages[i]);
+@@ -652,6 +649,16 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
+ 				continue;
+ 			}
+ 
++			if (!did_range_unmap && page_mapped(page)) {
++				/*
++				 * If page is mapped, before taking its lock,
++				 * zap the rest of the file in one hit.
++				 */
++				unmap_mapping_pages(mapping, index,
++						(1 + end - index), false);
++				did_range_unmap = 1;
++			}
++
+ 			lock_page(page);
+ 			WARN_ON(page_to_index(page) != index);
+ 			if (page->mapping != mapping) {
+@@ -659,23 +666,11 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
+ 				continue;
+ 			}
+ 			wait_on_page_writeback(page);
+-			if (page_mapped(page)) {
+-				if (!did_range_unmap) {
+-					/*
+-					 * Zap the rest of the file in one hit.
+-					 */
+-					unmap_mapping_pages(mapping, index,
+-						(1 + end - index), false);
+-					did_range_unmap = 1;
+-				} else {
+-					/*
+-					 * Just zap this page
+-					 */
+-					unmap_mapping_pages(mapping, index,
+-								1, false);
+-				}
+-			}
++
++			if (page_mapped(page))
++				unmap_mapping_page(page);
+ 			BUG_ON(page_mapped(page));
++
+ 			ret2 = do_launder_page(mapping, page);
+ 			if (ret2 == 0) {
+ 				if (!invalidate_complete_page2(mapping, page))
+diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
+index 2603966da904d..e910890a868c1 100644
+--- a/net/ethtool/ioctl.c
++++ b/net/ethtool/ioctl.c
+@@ -1421,7 +1421,7 @@ static int ethtool_get_any_eeprom(struct net_device *dev, void __user *useraddr,
+ 	if (eeprom.offset + eeprom.len > total_len)
+ 		return -EINVAL;
+ 
+-	data = kmalloc(PAGE_SIZE, GFP_USER);
++	data = kzalloc(PAGE_SIZE, GFP_USER);
+ 	if (!data)
+ 		return -ENOMEM;
+ 
+@@ -1486,7 +1486,7 @@ static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr)
+ 	if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev))
+ 		return -EINVAL;
+ 
+-	data = kmalloc(PAGE_SIZE, GFP_USER);
++	data = kzalloc(PAGE_SIZE, GFP_USER);
+ 	if (!data)
+ 		return -ENOMEM;
+ 
+@@ -1765,7 +1765,7 @@ static int ethtool_self_test(struct net_device *dev, char __user *useraddr)
+ 		return -EFAULT;
+ 
+ 	test.len = test_len;
+-	data = kmalloc_array(test_len, sizeof(u64), GFP_USER);
++	data = kcalloc(test_len, sizeof(u64), GFP_USER);
+ 	if (!data)
+ 		return -ENOMEM;
+ 
+@@ -2281,7 +2281,7 @@ static int ethtool_get_tunable(struct net_device *dev, void __user *useraddr)
+ 	ret = ethtool_tunable_valid(&tuna);
+ 	if (ret)
+ 		return ret;
+-	data = kmalloc(tuna.len, GFP_USER);
++	data = kzalloc(tuna.len, GFP_USER);
+ 	if (!data)
+ 		return -ENOMEM;
+ 	ret = ops->get_tunable(dev, &tuna, data);
+@@ -2473,7 +2473,7 @@ static int get_phy_tunable(struct net_device *dev, void __user *useraddr)
+ 	ret = ethtool_phy_tunable_valid(&tuna);
+ 	if (ret)
+ 		return ret;
+-	data = kmalloc(tuna.len, GFP_USER);
++	data = kzalloc(tuna.len, GFP_USER);
+ 	if (!data)
+ 		return -ENOMEM;
+ 	if (phy_drv_tunable) {
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index 1355e6c0d5677..faa7856c7fb07 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -575,7 +575,7 @@ int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
+ 			return err;
+ 	}
+ 
+-	if (!inet_sk(sk)->inet_num && inet_autobind(sk))
++	if (data_race(!inet_sk(sk)->inet_num) && inet_autobind(sk))
+ 		return -EAGAIN;
+ 	return sk->sk_prot->connect(sk, uaddr, addr_len);
+ }
+@@ -803,7 +803,7 @@ int inet_send_prepare(struct sock *sk)
+ 	sock_rps_record_flow(sk);
+ 
+ 	/* We may need to bind the socket. */
+-	if (!inet_sk(sk)->inet_num && !sk->sk_prot->no_autobind &&
++	if (data_race(!inet_sk(sk)->inet_num) && !sk->sk_prot->no_autobind &&
+ 	    inet_autobind(sk))
+ 		return -EAGAIN;
+ 
+diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
+index 2e35f68da40a7..1c6429c353a96 100644
+--- a/net/ipv4/devinet.c
++++ b/net/ipv4/devinet.c
+@@ -1989,7 +1989,7 @@ static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla,
+ 		return -EAFNOSUPPORT;
+ 
+ 	if (nla_parse_nested_deprecated(tb, IFLA_INET_MAX, nla, NULL, NULL) < 0)
+-		BUG();
++		return -EINVAL;
+ 
+ 	if (tb[IFLA_INET_CONF]) {
+ 		nla_for_each_nested(a, tb[IFLA_INET_CONF], rem)
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index 8b943f85fff9d..ea22768f76b8a 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -952,6 +952,7 @@ bool ping_rcv(struct sk_buff *skb)
+ 	struct sock *sk;
+ 	struct net *net = dev_net(skb->dev);
+ 	struct icmphdr *icmph = icmp_hdr(skb);
++	bool rc = false;
+ 
+ 	/* We assume the packet has already been checked by icmp_rcv */
+ 
+@@ -966,14 +967,15 @@ bool ping_rcv(struct sk_buff *skb)
+ 		struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
+ 
+ 		pr_debug("rcv on socket %p\n", sk);
+-		if (skb2)
+-			ping_queue_rcv_skb(sk, skb2);
++		if (skb2 && !ping_queue_rcv_skb(sk, skb2))
++			rc = true;
+ 		sock_put(sk);
+-		return true;
+ 	}
+-	pr_debug("no socket, dropping\n");
+ 
+-	return false;
++	if (!rc)
++		pr_debug("no socket, dropping\n");
++
++	return rc;
+ }
+ EXPORT_SYMBOL_GPL(ping_rcv);
+ 
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index a9e53f5942fae..eab0a46983c0b 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -5822,7 +5822,7 @@ static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla,
+ 		return -EAFNOSUPPORT;
+ 
+ 	if (nla_parse_nested_deprecated(tb, IFLA_INET6_MAX, nla, NULL, NULL) < 0)
+-		BUG();
++		return -EINVAL;
+ 
+ 	if (tb[IFLA_INET6_TOKEN]) {
+ 		err = inet6_set_iftoken(idev, nla_data(tb[IFLA_INET6_TOKEN]),
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index 02e818d740f60..5ec437e8e7132 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -1442,7 +1442,7 @@ ieee80211_get_sband(struct ieee80211_sub_if_data *sdata)
+ 	rcu_read_lock();
+ 	chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+ 
+-	if (WARN_ON_ONCE(!chanctx_conf)) {
++	if (!chanctx_conf) {
+ 		rcu_read_unlock();
+ 		return NULL;
+ 	}
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 0fe91dc9817eb..437d88822d8f8 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -4062,10 +4062,14 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
+ 		if (elems.mbssid_config_ie)
+ 			bss_conf->profile_periodicity =
+ 				elems.mbssid_config_ie->profile_periodicity;
++		else
++			bss_conf->profile_periodicity = 0;
+ 
+ 		if (elems.ext_capab_len >= 11 &&
+ 		    (elems.ext_capab[10] & WLAN_EXT_CAPA11_EMA_SUPPORT))
+ 			bss_conf->ema_ap = true;
++		else
++			bss_conf->ema_ap = false;
+ 
+ 		/* continue assoc process */
+ 		ifmgd->assoc_data->timeout = jiffies;
+@@ -5802,12 +5806,16 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
+ 					      beacon_ies->data, beacon_ies->len);
+ 		if (elem && elem->datalen >= 3)
+ 			sdata->vif.bss_conf.profile_periodicity = elem->data[2];
++		else
++			sdata->vif.bss_conf.profile_periodicity = 0;
+ 
+ 		elem = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY,
+ 					  beacon_ies->data, beacon_ies->len);
+ 		if (elem && elem->datalen >= 11 &&
+ 		    (elem->data[10] & WLAN_EXT_CAPA11_EMA_SUPPORT))
+ 			sdata->vif.bss_conf.ema_ap = true;
++		else
++			sdata->vif.bss_conf.ema_ap = false;
+ 	} else {
+ 		assoc_data->timeout = jiffies;
+ 		assoc_data->timeout_started = true;
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index 59de7a86599dc..cb5cbf02dbac9 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -2239,17 +2239,15 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
+ 	sc = le16_to_cpu(hdr->seq_ctrl);
+ 	frag = sc & IEEE80211_SCTL_FRAG;
+ 
+-	if (is_multicast_ether_addr(hdr->addr1)) {
+-		I802_DEBUG_INC(rx->local->dot11MulticastReceivedFrameCount);
+-		goto out_no_led;
+-	}
+-
+ 	if (rx->sta)
+ 		cache = &rx->sta->frags;
+ 
+ 	if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
+ 		goto out;
+ 
++	if (is_multicast_ether_addr(hdr->addr1))
++		return RX_DROP_MONITOR;
++
+ 	I802_DEBUG_INC(rx->local->rx_handlers_fragments);
+ 
+ 	if (skb_linearize(rx->skb))
+@@ -2375,7 +2373,6 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
+ 
+  out:
+ 	ieee80211_led_rx(rx->local);
+- out_no_led:
+ 	if (rx->sta)
+ 		rx->sta->rx_stats.packets++;
+ 	return RX_CONTINUE;
+diff --git a/net/mac80211/util.c b/net/mac80211/util.c
+index 53755a05f73b5..06342693799eb 100644
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -955,7 +955,7 @@ static void ieee80211_parse_extension_element(u32 *crc,
+ 
+ 	switch (elem->data[0]) {
+ 	case WLAN_EID_EXT_HE_MU_EDCA:
+-		if (len == sizeof(*elems->mu_edca_param_set)) {
++		if (len >= sizeof(*elems->mu_edca_param_set)) {
+ 			elems->mu_edca_param_set = data;
+ 			if (crc)
+ 				*crc = crc32_be(*crc, (void *)elem,
+@@ -976,7 +976,7 @@ static void ieee80211_parse_extension_element(u32 *crc,
+ 		}
+ 		break;
+ 	case WLAN_EID_EXT_UORA:
+-		if (len == 1)
++		if (len >= 1)
+ 			elems->uora_element = data;
+ 		break;
+ 	case WLAN_EID_EXT_MAX_CHANNEL_SWITCH_TIME:
+@@ -984,7 +984,7 @@ static void ieee80211_parse_extension_element(u32 *crc,
+ 			elems->max_channel_switch_time = data;
+ 		break;
+ 	case WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION:
+-		if (len == sizeof(*elems->mbssid_config_ie))
++		if (len >= sizeof(*elems->mbssid_config_ie))
+ 			elems->mbssid_config_ie = data;
+ 		break;
+ 	case WLAN_EID_EXT_HE_SPR:
+@@ -993,7 +993,7 @@ static void ieee80211_parse_extension_element(u32 *crc,
+ 			elems->he_spr = data;
+ 		break;
+ 	case WLAN_EID_EXT_HE_6GHZ_CAPA:
+-		if (len == sizeof(*elems->he_6ghz_capa))
++		if (len >= sizeof(*elems->he_6ghz_capa))
+ 			elems->he_6ghz_capa = data;
+ 		break;
+ 	}
+@@ -1082,14 +1082,14 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
+ 
+ 		switch (id) {
+ 		case WLAN_EID_LINK_ID:
+-			if (elen + 2 != sizeof(struct ieee80211_tdls_lnkie)) {
++			if (elen + 2 < sizeof(struct ieee80211_tdls_lnkie)) {
+ 				elem_parse_failed = true;
+ 				break;
+ 			}
+ 			elems->lnk_id = (void *)(pos - 2);
+ 			break;
+ 		case WLAN_EID_CHAN_SWITCH_TIMING:
+-			if (elen != sizeof(struct ieee80211_ch_switch_timing)) {
++			if (elen < sizeof(struct ieee80211_ch_switch_timing)) {
+ 				elem_parse_failed = true;
+ 				break;
+ 			}
+@@ -1252,7 +1252,7 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
+ 			elems->sec_chan_offs = (void *)pos;
+ 			break;
+ 		case WLAN_EID_CHAN_SWITCH_PARAM:
+-			if (elen !=
++			if (elen <
+ 			    sizeof(*elems->mesh_chansw_params_ie)) {
+ 				elem_parse_failed = true;
+ 				break;
+@@ -1261,7 +1261,7 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
+ 			break;
+ 		case WLAN_EID_WIDE_BW_CHANNEL_SWITCH:
+ 			if (!action ||
+-			    elen != sizeof(*elems->wide_bw_chansw_ie)) {
++			    elen < sizeof(*elems->wide_bw_chansw_ie)) {
+ 				elem_parse_failed = true;
+ 				break;
+ 			}
+@@ -1280,7 +1280,7 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
+ 			ie = cfg80211_find_ie(WLAN_EID_WIDE_BW_CHANNEL_SWITCH,
+ 					      pos, elen);
+ 			if (ie) {
+-				if (ie[1] == sizeof(*elems->wide_bw_chansw_ie))
++				if (ie[1] >= sizeof(*elems->wide_bw_chansw_ie))
+ 					elems->wide_bw_chansw_ie =
+ 						(void *)(ie + 2);
+ 				else
+@@ -1324,7 +1324,7 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
+ 			elems->cisco_dtpc_elem = pos;
+ 			break;
+ 		case WLAN_EID_ADDBA_EXT:
+-			if (elen != sizeof(struct ieee80211_addba_ext_ie)) {
++			if (elen < sizeof(struct ieee80211_addba_ext_ie)) {
+ 				elem_parse_failed = true;
+ 				break;
+ 			}
+@@ -1350,7 +1350,7 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
+ 							  elem, elems);
+ 			break;
+ 		case WLAN_EID_S1G_CAPABILITIES:
+-			if (elen == sizeof(*elems->s1g_capab))
++			if (elen >= sizeof(*elems->s1g_capab))
+ 				elems->s1g_capab = (void *)pos;
+ 			else
+ 				elem_parse_failed = true;
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index c52557ec7fb33..68a4dd2512427 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2683,7 +2683,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
+ 	}
+ 	if (likely(saddr == NULL)) {
+ 		dev	= packet_cached_dev_get(po);
+-		proto	= po->num;
++		proto	= READ_ONCE(po->num);
+ 	} else {
+ 		err = -EINVAL;
+ 		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
+@@ -2896,7 +2896,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
+ 
+ 	if (likely(saddr == NULL)) {
+ 		dev	= packet_cached_dev_get(po);
+-		proto	= po->num;
++		proto	= READ_ONCE(po->num);
+ 	} else {
+ 		err = -EINVAL;
+ 		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
+@@ -3034,10 +3034,13 @@ static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ 	struct sock *sk = sock->sk;
+ 	struct packet_sock *po = pkt_sk(sk);
+ 
+-	if (po->tx_ring.pg_vec)
++	/* Reading tx_ring.pg_vec without holding pg_vec_lock is racy.
++	 * tpacket_snd() will redo the check safely.
++	 */
++	if (data_race(po->tx_ring.pg_vec))
+ 		return tpacket_snd(po, msg);
+-	else
+-		return packet_snd(sock, msg, len);
++
++	return packet_snd(sock, msg, len);
+ }
+ 
+ /*
+@@ -3168,7 +3171,7 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
+ 			/* prevents packet_notifier() from calling
+ 			 * register_prot_hook()
+ 			 */
+-			po->num = 0;
++			WRITE_ONCE(po->num, 0);
+ 			__unregister_prot_hook(sk, true);
+ 			rcu_read_lock();
+ 			dev_curr = po->prot_hook.dev;
+@@ -3178,17 +3181,17 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
+ 		}
+ 
+ 		BUG_ON(po->running);
+-		po->num = proto;
++		WRITE_ONCE(po->num, proto);
+ 		po->prot_hook.type = proto;
+ 
+ 		if (unlikely(unlisted)) {
+ 			dev_put(dev);
+ 			po->prot_hook.dev = NULL;
+-			po->ifindex = -1;
++			WRITE_ONCE(po->ifindex, -1);
+ 			packet_cached_dev_reset(po);
+ 		} else {
+ 			po->prot_hook.dev = dev;
+-			po->ifindex = dev ? dev->ifindex : 0;
++			WRITE_ONCE(po->ifindex, dev ? dev->ifindex : 0);
+ 			packet_cached_dev_assign(po, dev);
+ 		}
+ 	}
+@@ -3502,7 +3505,7 @@ static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
+ 	uaddr->sa_family = AF_PACKET;
+ 	memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
+ 	rcu_read_lock();
+-	dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
++	dev = dev_get_by_index_rcu(sock_net(sk), READ_ONCE(pkt_sk(sk)->ifindex));
+ 	if (dev)
+ 		strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
+ 	rcu_read_unlock();
+@@ -3517,16 +3520,18 @@ static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
+ 	struct sock *sk = sock->sk;
+ 	struct packet_sock *po = pkt_sk(sk);
+ 	DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
++	int ifindex;
+ 
+ 	if (peer)
+ 		return -EOPNOTSUPP;
+ 
++	ifindex = READ_ONCE(po->ifindex);
+ 	sll->sll_family = AF_PACKET;
+-	sll->sll_ifindex = po->ifindex;
+-	sll->sll_protocol = po->num;
++	sll->sll_ifindex = ifindex;
++	sll->sll_protocol = READ_ONCE(po->num);
+ 	sll->sll_pkttype = 0;
+ 	rcu_read_lock();
+-	dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
++	dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
+ 	if (dev) {
+ 		sll->sll_hatype = dev->type;
+ 		sll->sll_halen = dev->addr_len;
+@@ -4105,7 +4110,7 @@ static int packet_notifier(struct notifier_block *this,
+ 				}
+ 				if (msg == NETDEV_UNREGISTER) {
+ 					packet_cached_dev_reset(po);
+-					po->ifindex = -1;
++					WRITE_ONCE(po->ifindex, -1);
+ 					if (po->prot_hook.dev)
+ 						dev_put(po->prot_hook.dev);
+ 					po->prot_hook.dev = NULL;
+@@ -4411,7 +4416,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
+ 	was_running = po->running;
+ 	num = po->num;
+ 	if (was_running) {
+-		po->num = 0;
++		WRITE_ONCE(po->num, 0);
+ 		__unregister_prot_hook(sk, false);
+ 	}
+ 	spin_unlock(&po->bind_lock);
+@@ -4446,7 +4451,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
+ 
+ 	spin_lock(&po->bind_lock);
+ 	if (was_running) {
+-		po->num = num;
++		WRITE_ONCE(po->num, num);
+ 		register_prot_hook(sk);
+ 	}
+ 	spin_unlock(&po->bind_lock);
+@@ -4616,8 +4621,8 @@ static int packet_seq_show(struct seq_file *seq, void *v)
+ 			   s,
+ 			   refcount_read(&s->sk_refcnt),
+ 			   s->sk_type,
+-			   ntohs(po->num),
+-			   po->ifindex,
++			   ntohs(READ_ONCE(po->num)),
++			   READ_ONCE(po->ifindex),
+ 			   po->running,
+ 			   atomic_read(&s->sk_rmem_alloc),
+ 			   from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
+diff --git a/net/wireless/util.c b/net/wireless/util.c
+index f342b61476754..726e7d2342bd5 100644
+--- a/net/wireless/util.c
++++ b/net/wireless/util.c
+@@ -1059,6 +1059,9 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
+ 		case NL80211_IFTYPE_MESH_POINT:
+ 			/* mesh should be handled? */
+ 			break;
++		case NL80211_IFTYPE_OCB:
++			cfg80211_leave_ocb(rdev, dev);
++			break;
+ 		default:
+ 			break;
+ 		}
+diff --git a/scripts/Makefile b/scripts/Makefile
+index c36106bce80ee..9adb6d247818f 100644
+--- a/scripts/Makefile
++++ b/scripts/Makefile
+@@ -14,6 +14,7 @@ hostprogs-always-$(CONFIG_ASN1)				+= asn1_compiler
+ hostprogs-always-$(CONFIG_MODULE_SIG_FORMAT)		+= sign-file
+ hostprogs-always-$(CONFIG_SYSTEM_TRUSTED_KEYRING)	+= extract-cert
+ hostprogs-always-$(CONFIG_SYSTEM_EXTRA_CERTIFICATE)	+= insert-sys-cert
++hostprogs-always-$(CONFIG_SYSTEM_REVOCATION_LIST)	+= extract-cert
+ 
+ HOSTCFLAGS_sorttable.o = -I$(srctree)/tools/include
+ HOSTCFLAGS_asn1_compiler.o = -I$(srctree)/include
+diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h
+index f9b19524da112..1e9baa5c4fc6e 100644
+--- a/scripts/recordmcount.h
++++ b/scripts/recordmcount.h
+@@ -192,15 +192,20 @@ static unsigned int get_symindex(Elf_Sym const *sym, Elf32_Word const *symtab,
+ 				 Elf32_Word const *symtab_shndx)
+ {
+ 	unsigned long offset;
++	unsigned short shndx = w2(sym->st_shndx);
+ 	int index;
+ 
+-	if (sym->st_shndx != SHN_XINDEX)
+-		return w2(sym->st_shndx);
++	if (shndx > SHN_UNDEF && shndx < SHN_LORESERVE)
++		return shndx;
+ 
+-	offset = (unsigned long)sym - (unsigned long)symtab;
+-	index = offset / sizeof(*sym);
++	if (shndx == SHN_XINDEX) {
++		offset = (unsigned long)sym - (unsigned long)symtab;
++		index = offset / sizeof(*sym);
+ 
+-	return w(symtab_shndx[index]);
++		return w(symtab_shndx[index]);
++	}
++
++	return 0;
+ }
+ 
+ static unsigned int get_shnum(Elf_Ehdr const *ehdr, Elf_Shdr const *shdr0)
+diff --git a/security/integrity/platform_certs/keyring_handler.c b/security/integrity/platform_certs/keyring_handler.c
+index c5ba695c10e3a..5604bd57c9907 100644
+--- a/security/integrity/platform_certs/keyring_handler.c
++++ b/security/integrity/platform_certs/keyring_handler.c
+@@ -55,6 +55,15 @@ static __init void uefi_blacklist_binary(const char *source,
+ 	uefi_blacklist_hash(source, data, len, "bin:", 4);
+ }
+ 
++/*
++ * Add an X509 cert to the revocation list.
++ */
++static __init void uefi_revocation_list_x509(const char *source,
++					     const void *data, size_t len)
++{
++	add_key_to_revocation_list(data, len);
++}
++
+ /*
+  * Return the appropriate handler for particular signature list types found in
+  * the UEFI db and MokListRT tables.
+@@ -76,5 +85,7 @@ __init efi_element_handler_t get_handler_for_dbx(const efi_guid_t *sig_type)
+ 		return uefi_blacklist_x509_tbs;
+ 	if (efi_guidcmp(*sig_type, efi_cert_sha256_guid) == 0)
+ 		return uefi_blacklist_binary;
++	if (efi_guidcmp(*sig_type, efi_cert_x509_guid) == 0)
++		return uefi_revocation_list_x509;
+ 	return 0;
+ }
+diff --git a/security/integrity/platform_certs/load_uefi.c b/security/integrity/platform_certs/load_uefi.c
+index ee4b4c666854f..f290f78c3f301 100644
+--- a/security/integrity/platform_certs/load_uefi.c
++++ b/security/integrity/platform_certs/load_uefi.c
+@@ -132,8 +132,9 @@ static int __init load_moklist_certs(void)
+ static int __init load_uefi_certs(void)
+ {
+ 	efi_guid_t secure_var = EFI_IMAGE_SECURITY_DATABASE_GUID;
+-	void *db = NULL, *dbx = NULL;
+-	unsigned long dbsize = 0, dbxsize = 0;
++	efi_guid_t mok_var = EFI_SHIM_LOCK_GUID;
++	void *db = NULL, *dbx = NULL, *mokx = NULL;
++	unsigned long dbsize = 0, dbxsize = 0, mokxsize = 0;
+ 	efi_status_t status;
+ 	int rc = 0;
+ 
+@@ -175,6 +176,21 @@ static int __init load_uefi_certs(void)
+ 		kfree(dbx);
+ 	}
+ 
++	mokx = get_cert_list(L"MokListXRT", &mok_var, &mokxsize, &status);
++	if (!mokx) {
++		if (status == EFI_NOT_FOUND)
++			pr_debug("mokx variable wasn't found\n");
++		else
++			pr_info("Couldn't get mokx list\n");
++	} else {
++		rc = parse_efi_signature_list("UEFI:MokListXRT",
++					      mokx, mokxsize,
++					      get_handler_for_dbx);
++		if (rc)
++			pr_err("Couldn't parse mokx signatures %d\n", rc);
++		kfree(mokx);
++	}
++
+ 	/* Load the MokListRT certs */
+ 	rc = load_moklist_certs();
+ 
+diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
+index 58b5a349d3baf..ea3158b0d551d 100644
+--- a/tools/testing/selftests/bpf/test_verifier.c
++++ b/tools/testing/selftests/bpf/test_verifier.c
+@@ -1147,7 +1147,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
+ 		}
+ 	}
+ 
+-	if (test->insn_processed) {
++	if (!unpriv && test->insn_processed) {
+ 		uint32_t insn_processed;
+ 		char *proc;
+ 
+diff --git a/tools/testing/selftests/bpf/verifier/and.c b/tools/testing/selftests/bpf/verifier/and.c
+index ca8fdb1b3f015..7d7ebee5cc7a8 100644
+--- a/tools/testing/selftests/bpf/verifier/and.c
++++ b/tools/testing/selftests/bpf/verifier/and.c
+@@ -61,6 +61,8 @@
+ 	BPF_MOV64_IMM(BPF_REG_0, 0),
+ 	BPF_EXIT_INSN(),
+ 	},
++	.errstr_unpriv = "R1 !read_ok",
++	.result_unpriv = REJECT,
+ 	.result = ACCEPT,
+ 	.retval = 0
+ },
+diff --git a/tools/testing/selftests/bpf/verifier/bounds.c b/tools/testing/selftests/bpf/verifier/bounds.c
+index 8a1caf46ffbc3..e061e8799ce23 100644
+--- a/tools/testing/selftests/bpf/verifier/bounds.c
++++ b/tools/testing/selftests/bpf/verifier/bounds.c
+@@ -508,6 +508,8 @@
+ 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, -1),
+ 	BPF_EXIT_INSN(),
+ 	},
++	.errstr_unpriv = "R0 invalid mem access 'inv'",
++	.result_unpriv = REJECT,
+ 	.result = ACCEPT
+ },
+ {
+@@ -528,6 +530,8 @@
+ 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, -1),
+ 	BPF_EXIT_INSN(),
+ 	},
++	.errstr_unpriv = "R0 invalid mem access 'inv'",
++	.result_unpriv = REJECT,
+ 	.result = ACCEPT
+ },
+ {
+@@ -569,6 +573,8 @@
+ 	BPF_MOV64_IMM(BPF_REG_0, 0),
+ 	BPF_EXIT_INSN(),
+ 	},
++	.errstr_unpriv = "R0 min value is outside of the allowed memory range",
++	.result_unpriv = REJECT,
+ 	.fixup_map_hash_8b = { 3 },
+ 	.result = ACCEPT,
+ },
+@@ -589,6 +595,8 @@
+ 	BPF_MOV64_IMM(BPF_REG_0, 0),
+ 	BPF_EXIT_INSN(),
+ 	},
++	.errstr_unpriv = "R0 min value is outside of the allowed memory range",
++	.result_unpriv = REJECT,
+ 	.fixup_map_hash_8b = { 3 },
+ 	.result = ACCEPT,
+ },
+@@ -609,6 +617,8 @@
+ 	BPF_MOV64_IMM(BPF_REG_0, 0),
+ 	BPF_EXIT_INSN(),
+ 	},
++	.errstr_unpriv = "R0 min value is outside of the allowed memory range",
++	.result_unpriv = REJECT,
+ 	.fixup_map_hash_8b = { 3 },
+ 	.result = ACCEPT,
+ },
+@@ -674,6 +684,8 @@
+ 	BPF_MOV64_IMM(BPF_REG_0, 0),
+ 	BPF_EXIT_INSN(),
+ 	},
++	.errstr_unpriv = "R0 min value is outside of the allowed memory range",
++	.result_unpriv = REJECT,
+ 	.fixup_map_hash_8b = { 3 },
+ 	.result = ACCEPT,
+ },
+@@ -695,6 +707,8 @@
+ 	BPF_MOV64_IMM(BPF_REG_0, 0),
+ 	BPF_EXIT_INSN(),
+ 	},
++	.errstr_unpriv = "R0 min value is outside of the allowed memory range",
++	.result_unpriv = REJECT,
+ 	.fixup_map_hash_8b = { 3 },
+ 	.result = ACCEPT,
+ },
+diff --git a/tools/testing/selftests/bpf/verifier/dead_code.c b/tools/testing/selftests/bpf/verifier/dead_code.c
+index 5cf361d8eb1cc..721ec9391be5a 100644
+--- a/tools/testing/selftests/bpf/verifier/dead_code.c
++++ b/tools/testing/selftests/bpf/verifier/dead_code.c
+@@ -8,6 +8,8 @@
+ 	BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, -4),
+ 	BPF_EXIT_INSN(),
+ 	},
++	.errstr_unpriv = "R9 !read_ok",
++	.result_unpriv = REJECT,
+ 	.result = ACCEPT,
+ 	.retval = 7,
+ },
+diff --git a/tools/testing/selftests/bpf/verifier/jmp32.c b/tools/testing/selftests/bpf/verifier/jmp32.c
+index bd5cae4a7f733..1c857b2fbdf0a 100644
+--- a/tools/testing/selftests/bpf/verifier/jmp32.c
++++ b/tools/testing/selftests/bpf/verifier/jmp32.c
+@@ -87,6 +87,8 @@
+ 	BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ 	BPF_EXIT_INSN(),
+ 	},
++	.errstr_unpriv = "R9 !read_ok",
++	.result_unpriv = REJECT,
+ 	.result = ACCEPT,
+ },
+ {
+@@ -150,6 +152,8 @@
+ 	BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ 	BPF_EXIT_INSN(),
+ 	},
++	.errstr_unpriv = "R9 !read_ok",
++	.result_unpriv = REJECT,
+ 	.result = ACCEPT,
+ },
+ {
+@@ -213,6 +217,8 @@
+ 	BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ 	BPF_EXIT_INSN(),
+ 	},
++	.errstr_unpriv = "R9 !read_ok",
++	.result_unpriv = REJECT,
+ 	.result = ACCEPT,
+ },
+ {
+@@ -280,6 +286,8 @@
+ 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ 	BPF_EXIT_INSN(),
+ 	},
++	.errstr_unpriv = "R0 invalid mem access 'inv'",
++	.result_unpriv = REJECT,
+ 	.result = ACCEPT,
+ 	.retval = 2,
+ },
+@@ -348,6 +356,8 @@
+ 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ 	BPF_EXIT_INSN(),
+ 	},
++	.errstr_unpriv = "R0 invalid mem access 'inv'",
++	.result_unpriv = REJECT,
+ 	.result = ACCEPT,
+ 	.retval = 2,
+ },
+@@ -416,6 +426,8 @@
+ 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ 	BPF_EXIT_INSN(),
+ 	},
++	.errstr_unpriv = "R0 invalid mem access 'inv'",
++	.result_unpriv = REJECT,
+ 	.result = ACCEPT,
+ 	.retval = 2,
+ },
+@@ -484,6 +496,8 @@
+ 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ 	BPF_EXIT_INSN(),
+ 	},
++	.errstr_unpriv = "R0 invalid mem access 'inv'",
++	.result_unpriv = REJECT,
+ 	.result = ACCEPT,
+ 	.retval = 2,
+ },
+@@ -552,6 +566,8 @@
+ 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ 	BPF_EXIT_INSN(),
+ 	},
++	.errstr_unpriv = "R0 invalid mem access 'inv'",
++	.result_unpriv = REJECT,
+ 	.result = ACCEPT,
+ 	.retval = 2,
+ },
+@@ -620,6 +636,8 @@
+ 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ 	BPF_EXIT_INSN(),
+ 	},
++	.errstr_unpriv = "R0 invalid mem access 'inv'",
++	.result_unpriv = REJECT,
+ 	.result = ACCEPT,
+ 	.retval = 2,
+ },
+@@ -688,6 +706,8 @@
+ 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ 	BPF_EXIT_INSN(),
+ 	},
++	.errstr_unpriv = "R0 invalid mem access 'inv'",
++	.result_unpriv = REJECT,
+ 	.result = ACCEPT,
+ 	.retval = 2,
+ },
+@@ -756,6 +776,8 @@
+ 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ 	BPF_EXIT_INSN(),
+ 	},
++	.errstr_unpriv = "R0 invalid mem access 'inv'",
++	.result_unpriv = REJECT,
+ 	.result = ACCEPT,
+ 	.retval = 2,
+ },
+diff --git a/tools/testing/selftests/bpf/verifier/jset.c b/tools/testing/selftests/bpf/verifier/jset.c
+index 8dcd4e0383d57..11fc68da735ea 100644
+--- a/tools/testing/selftests/bpf/verifier/jset.c
++++ b/tools/testing/selftests/bpf/verifier/jset.c
+@@ -82,8 +82,8 @@
+ 	BPF_EXIT_INSN(),
+ 	},
+ 	.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+-	.retval_unpriv = 1,
+-	.result_unpriv = ACCEPT,
++	.errstr_unpriv = "R9 !read_ok",
++	.result_unpriv = REJECT,
+ 	.retval = 1,
+ 	.result = ACCEPT,
+ },
+@@ -141,7 +141,8 @@
+ 	BPF_EXIT_INSN(),
+ 	},
+ 	.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+-	.result_unpriv = ACCEPT,
++	.errstr_unpriv = "R9 !read_ok",
++	.result_unpriv = REJECT,
+ 	.result = ACCEPT,
+ },
+ {
+@@ -162,6 +163,7 @@
+ 	BPF_EXIT_INSN(),
+ 	},
+ 	.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+-	.result_unpriv = ACCEPT,
++	.errstr_unpriv = "R9 !read_ok",
++	.result_unpriv = REJECT,
+ 	.result = ACCEPT,
+ },
+diff --git a/tools/testing/selftests/bpf/verifier/unpriv.c b/tools/testing/selftests/bpf/verifier/unpriv.c
+index bd436df5cc326..111801aea5e35 100644
+--- a/tools/testing/selftests/bpf/verifier/unpriv.c
++++ b/tools/testing/selftests/bpf/verifier/unpriv.c
+@@ -420,6 +420,8 @@
+ 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
+ 	BPF_EXIT_INSN(),
+ 	},
++	.errstr_unpriv = "R7 invalid mem access 'inv'",
++	.result_unpriv = REJECT,
+ 	.result = ACCEPT,
+ 	.retval = 0,
+ },
+diff --git a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c
+index 7ae2859d495c5..a3e593ddfafc9 100644
+--- a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c
++++ b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c
+@@ -120,7 +120,7 @@
+ 	.fixup_map_array_48b = { 1 },
+ 	.result = ACCEPT,
+ 	.result_unpriv = REJECT,
+-	.errstr_unpriv = "R2 tried to add from different maps, paths or scalars",
++	.errstr_unpriv = "R2 pointer comparison prohibited",
+ 	.retval = 0,
+ },
+ {
+@@ -159,7 +159,8 @@
+ 	BPF_MOV64_IMM(BPF_REG_0, 0),
+ 	BPF_EXIT_INSN(),
+ 	// fake-dead code; targeted from branch A to
+-	// prevent dead code sanitization
++	// prevent dead code sanitization, rejected
++	// via branch B however
+ 	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ 	BPF_MOV64_IMM(BPF_REG_0, 0),
+ 	BPF_EXIT_INSN(),
+@@ -167,7 +168,7 @@
+ 	.fixup_map_array_48b = { 1 },
+ 	.result = ACCEPT,
+ 	.result_unpriv = REJECT,
+-	.errstr_unpriv = "R2 tried to add from different maps, paths or scalars",
++	.errstr_unpriv = "R0 invalid mem access 'inv'",
+ 	.retval = 0,
+ },
+ {
+diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
+index 2f0e4365f61bd..8b90256bca96d 100644
+--- a/tools/testing/selftests/kvm/lib/kvm_util.c
++++ b/tools/testing/selftests/kvm/lib/kvm_util.c
+@@ -58,7 +58,7 @@ int kvm_check_cap(long cap)
+ 		exit(KSFT_SKIP);
+ 
+ 	ret = ioctl(kvm_fd, KVM_CHECK_EXTENSION, cap);
+-	TEST_ASSERT(ret != -1, "KVM_CHECK_EXTENSION IOCTL failed,\n"
++	TEST_ASSERT(ret >= 0, "KVM_CHECK_EXTENSION IOCTL failed,\n"
+ 		"  rc: %i errno: %i", ret, errno);
+ 
+ 	close(kvm_fd);
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 5cabc6c748db1..4cce5735271ef 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -1919,6 +1919,13 @@ static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
+ 	return true;
+ }
+ 
++static int kvm_try_get_pfn(kvm_pfn_t pfn)
++{
++	if (kvm_is_reserved_pfn(pfn))
++		return 1;
++	return get_page_unless_zero(pfn_to_page(pfn));
++}
++
+ static int hva_to_pfn_remapped(struct vm_area_struct *vma,
+ 			       unsigned long addr, bool *async,
+ 			       bool write_fault, bool *writable,
+@@ -1968,13 +1975,21 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma,
+ 	 * Whoever called remap_pfn_range is also going to call e.g.
+ 	 * unmap_mapping_range before the underlying pages are freed,
+ 	 * causing a call to our MMU notifier.
++	 *
++	 * Certain IO or PFNMAP mappings can be backed with valid
++	 * struct pages, but be allocated without refcounting e.g.,
++	 * tail pages of non-compound higher order allocations, which
++	 * would then underflow the refcount when the caller does the
++	 * required put_page. Don't allow those pages here.
+ 	 */ 
+-	kvm_get_pfn(pfn);
++	if (!kvm_try_get_pfn(pfn))
++		r = -EFAULT;
+ 
+ out:
+ 	pte_unmap_unlock(ptep, ptl);
+ 	*p_pfn = pfn;
+-	return 0;
++
++	return r;
+ }
+ 
+ /*


^ permalink raw reply related	[flat|nested] 33+ messages in thread
* [gentoo-commits] proj/linux-patches:5.12 commit in: /
@ 2021-06-23 15:15 Mike Pagano
  0 siblings, 0 replies; 33+ messages in thread
From: Mike Pagano @ 2021-06-23 15:15 UTC (permalink / raw
  To: gentoo-commits

commit:     1a7f084fb13953ad56900d4a19ac2e2aecf413af
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Jun 23 15:14:53 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Jun 23 15:14:53 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1a7f084f

Linux patch 5.12.13

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1012_linux-5.12.13.patch | 6477 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 6481 insertions(+)

diff --git a/0000_README b/0000_README
index 07044b3..34c90d1 100644
--- a/0000_README
+++ b/0000_README
@@ -91,6 +91,10 @@ Patch:  1011_linux-5.12.12.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.12.12
 
+Patch:  1012_linux-5.12.13.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.12.13
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1012_linux-5.12.13.patch b/1012_linux-5.12.13.patch
new file mode 100644
index 0000000..ef75d57
--- /dev/null
+++ b/1012_linux-5.12.13.patch
@@ -0,0 +1,6477 @@
+diff --git a/Documentation/vm/slub.rst b/Documentation/vm/slub.rst
+index 03f294a638bd8..d3028554b1e9c 100644
+--- a/Documentation/vm/slub.rst
++++ b/Documentation/vm/slub.rst
+@@ -181,7 +181,7 @@ SLUB Debug output
+ Here is a sample of slub debug output::
+ 
+  ====================================================================
+- BUG kmalloc-8: Redzone overwritten
++ BUG kmalloc-8: Right Redzone overwritten
+  --------------------------------------------------------------------
+ 
+  INFO: 0xc90f6d28-0xc90f6d2b. First byte 0x00 instead of 0xcc
+@@ -189,10 +189,10 @@ Here is a sample of slub debug output::
+  INFO: Object 0xc90f6d20 @offset=3360 fp=0xc90f6d58
+  INFO: Allocated in get_modalias+0x61/0xf5 age=53 cpu=1 pid=554
+ 
+- Bytes b4 0xc90f6d10:  00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ
+-   Object 0xc90f6d20:  31 30 31 39 2e 30 30 35                         1019.005
+-  Redzone 0xc90f6d28:  00 cc cc cc                                     .
+-  Padding 0xc90f6d50:  5a 5a 5a 5a 5a 5a 5a 5a                         ZZZZZZZZ
++ Bytes b4 (0xc90f6d10): 00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ
++ Object   (0xc90f6d20): 31 30 31 39 2e 30 30 35                         1019.005
++ Redzone  (0xc90f6d28): 00 cc cc cc                                     .
++ Padding  (0xc90f6d50): 5a 5a 5a 5a 5a 5a 5a 5a                         ZZZZZZZZ
+ 
+    [<c010523d>] dump_trace+0x63/0x1eb
+    [<c01053df>] show_trace_log_lvl+0x1a/0x2f
+diff --git a/Makefile b/Makefile
+index e0a252b644630..d2fe36db78aed 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 12
+-SUBLEVEL = 12
++SUBLEVEL = 13
+ EXTRAVERSION =
+ NAME = Frozen Wasteland
+ 
+@@ -913,11 +913,14 @@ CC_FLAGS_LTO	+= -fvisibility=hidden
+ # Limit inlining across translation units to reduce binary size
+ KBUILD_LDFLAGS += -mllvm -import-instr-limit=5
+ 
+-# Check for frame size exceeding threshold during prolog/epilog insertion.
++# Check for frame size exceeding threshold during prolog/epilog insertion
++# when using lld < 13.0.0.
+ ifneq ($(CONFIG_FRAME_WARN),0)
++ifeq ($(shell test $(CONFIG_LLD_VERSION) -lt 130000; echo $$?),0)
+ KBUILD_LDFLAGS	+= -plugin-opt=-warn-stack-size=$(CONFIG_FRAME_WARN)
+ endif
+ endif
++endif
+ 
+ ifdef CONFIG_LTO
+ KBUILD_CFLAGS	+= -fno-lto $(CC_FLAGS_LTO)
+diff --git a/arch/arc/include/uapi/asm/sigcontext.h b/arch/arc/include/uapi/asm/sigcontext.h
+index 95f8a4380e110..7a5449dfcb290 100644
+--- a/arch/arc/include/uapi/asm/sigcontext.h
++++ b/arch/arc/include/uapi/asm/sigcontext.h
+@@ -18,6 +18,7 @@
+  */
+ struct sigcontext {
+ 	struct user_regs_struct regs;
++	struct user_regs_arcv2 v2abi;
+ };
+ 
+ #endif /* _ASM_ARC_SIGCONTEXT_H */
+diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
+index fdbe06c98895e..4868bdebf586d 100644
+--- a/arch/arc/kernel/signal.c
++++ b/arch/arc/kernel/signal.c
+@@ -61,6 +61,41 @@ struct rt_sigframe {
+ 	unsigned int sigret_magic;
+ };
+ 
++static int save_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
++{
++	int err = 0;
++#ifndef CONFIG_ISA_ARCOMPACT
++	struct user_regs_arcv2 v2abi;
++
++	v2abi.r30 = regs->r30;
++#ifdef CONFIG_ARC_HAS_ACCL_REGS
++	v2abi.r58 = regs->r58;
++	v2abi.r59 = regs->r59;
++#else
++	v2abi.r58 = v2abi.r59 = 0;
++#endif
++	err = __copy_to_user(&mctx->v2abi, &v2abi, sizeof(v2abi));
++#endif
++	return err;
++}
++
++static int restore_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
++{
++	int err = 0;
++#ifndef CONFIG_ISA_ARCOMPACT
++	struct user_regs_arcv2 v2abi;
++
++	err = __copy_from_user(&v2abi, &mctx->v2abi, sizeof(v2abi));
++
++	regs->r30 = v2abi.r30;
++#ifdef CONFIG_ARC_HAS_ACCL_REGS
++	regs->r58 = v2abi.r58;
++	regs->r59 = v2abi.r59;
++#endif
++#endif
++	return err;
++}
++
+ static int
+ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
+ 	       sigset_t *set)
+@@ -94,6 +129,10 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
+ 
+ 	err = __copy_to_user(&(sf->uc.uc_mcontext.regs.scratch), &uregs.scratch,
+ 			     sizeof(sf->uc.uc_mcontext.regs.scratch));
++
++	if (is_isa_arcv2())
++		err |= save_arcv2_regs(&(sf->uc.uc_mcontext), regs);
++
+ 	err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
+ 
+ 	return err ? -EFAULT : 0;
+@@ -109,6 +148,10 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
+ 	err |= __copy_from_user(&uregs.scratch,
+ 				&(sf->uc.uc_mcontext.regs.scratch),
+ 				sizeof(sf->uc.uc_mcontext.regs.scratch));
++
++	if (is_isa_arcv2())
++		err |= restore_arcv2_regs(&(sf->uc.uc_mcontext), regs);
++
+ 	if (err)
+ 		return -EFAULT;
+ 
+diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
+index 766f064f00fbf..167fde7f2fce1 100644
+--- a/arch/powerpc/perf/core-book3s.c
++++ b/arch/powerpc/perf/core-book3s.c
+@@ -2242,7 +2242,7 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs)
+ 	bool use_siar = regs_use_siar(regs);
+ 	unsigned long siar = mfspr(SPRN_SIAR);
+ 
+-	if (ppmu->flags & PPMU_P10_DD1) {
++	if (ppmu && (ppmu->flags & PPMU_P10_DD1)) {
+ 		if (siar)
+ 			return siar;
+ 		else
+diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
+index d9522fc35ca5a..4f116be9152f5 100644
+--- a/arch/riscv/Kconfig
++++ b/arch/riscv/Kconfig
+@@ -54,11 +54,11 @@ config RISCV
+ 	select GENERIC_TIME_VSYSCALL if MMU && 64BIT
+ 	select HANDLE_DOMAIN_IRQ
+ 	select HAVE_ARCH_AUDITSYSCALL
+-	select HAVE_ARCH_JUMP_LABEL
+-	select HAVE_ARCH_JUMP_LABEL_RELATIVE
++	select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
++	select HAVE_ARCH_JUMP_LABEL_RELATIVE if !XIP_KERNEL
+ 	select HAVE_ARCH_KASAN if MMU && 64BIT
+ 	select HAVE_ARCH_KASAN_VMALLOC if MMU && 64BIT
+-	select HAVE_ARCH_KGDB
++	select HAVE_ARCH_KGDB if !XIP_KERNEL
+ 	select HAVE_ARCH_KGDB_QXFER_PKT
+ 	select HAVE_ARCH_MMAP_RND_BITS if MMU
+ 	select HAVE_ARCH_SECCOMP_FILTER
+@@ -73,9 +73,9 @@ config RISCV
+ 	select HAVE_GCC_PLUGINS
+ 	select HAVE_GENERIC_VDSO if MMU && 64BIT
+ 	select HAVE_IRQ_TIME_ACCOUNTING
+-	select HAVE_KPROBES
+-	select HAVE_KPROBES_ON_FTRACE
+-	select HAVE_KRETPROBES
++	select HAVE_KPROBES if !XIP_KERNEL
++	select HAVE_KPROBES_ON_FTRACE if !XIP_KERNEL
++	select HAVE_KRETPROBES if !XIP_KERNEL
+ 	select HAVE_PCI
+ 	select HAVE_PERF_EVENTS
+ 	select HAVE_PERF_REGS
+@@ -227,11 +227,11 @@ config ARCH_RV64I
+ 	bool "RV64I"
+ 	select 64BIT
+ 	select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && GCC_VERSION >= 50000
+-	select HAVE_DYNAMIC_FTRACE if MMU && $(cc-option,-fpatchable-function-entry=8)
++	select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && MMU && $(cc-option,-fpatchable-function-entry=8)
+ 	select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
+-	select HAVE_FTRACE_MCOUNT_RECORD
++	select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
+ 	select HAVE_FUNCTION_GRAPH_TRACER
+-	select HAVE_FUNCTION_TRACER
++	select HAVE_FUNCTION_TRACER if !XIP_KERNEL
+ 	select SWIOTLB if MMU
+ 
+ endchoice
+diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
+index 12de7a9c85b35..9cc71ca9a88f9 100644
+--- a/arch/s390/kernel/entry.S
++++ b/arch/s390/kernel/entry.S
+@@ -651,9 +651,9 @@ ENDPROC(stack_overflow)
+ .Lcleanup_sie_mcck:
+ 	larl	%r13,.Lsie_entry
+ 	slgr	%r9,%r13
+-	larl	%r13,.Lsie_skip
++	lghi	%r13,.Lsie_skip - .Lsie_entry
+ 	clgr	%r9,%r13
+-	jh	.Lcleanup_sie_int
++	jhe	.Lcleanup_sie_int
+ 	oi	__LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
+ .Lcleanup_sie_int:
+ 	BPENTER	__SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
+diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
+index ceeba9f631722..fdee23ea4e173 100644
+--- a/arch/x86/include/asm/fpu/internal.h
++++ b/arch/x86/include/asm/fpu/internal.h
+@@ -578,10 +578,17 @@ static inline void switch_fpu_finish(struct fpu *new_fpu)
+ 	 * PKRU state is switched eagerly because it needs to be valid before we
+ 	 * return to userland e.g. for a copy_to_user() operation.
+ 	 */
+-	if (current->mm) {
++	if (!(current->flags & PF_KTHREAD)) {
++		/*
++		 * If the PKRU bit in xsave.header.xfeatures is not set,
++		 * then the PKRU component was in init state, which means
++		 * XRSTOR will set PKRU to 0. If the bit is not set then
++		 * get_xsave_addr() will return NULL because the PKRU value
++		 * in memory is not valid. This means pkru_val has to be
++		 * set to 0 and not to init_pkru_value.
++		 */
+ 		pk = get_xsave_addr(&new_fpu->state.xsave, XFEATURE_PKRU);
+-		if (pk)
+-			pkru_val = pk->pkru;
++		pkru_val = pk ? pk->pkru : 0;
+ 	}
+ 	__write_pkru(pkru_val);
+ }
+diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
+index a4ec65317a7fa..ec3ae30547920 100644
+--- a/arch/x86/kernel/fpu/signal.c
++++ b/arch/x86/kernel/fpu/signal.c
+@@ -307,13 +307,17 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
+ 		return 0;
+ 	}
+ 
+-	if (!access_ok(buf, size))
+-		return -EACCES;
++	if (!access_ok(buf, size)) {
++		ret = -EACCES;
++		goto out;
++	}
+ 
+-	if (!static_cpu_has(X86_FEATURE_FPU))
+-		return fpregs_soft_set(current, NULL,
+-				       0, sizeof(struct user_i387_ia32_struct),
+-				       NULL, buf) != 0;
++	if (!static_cpu_has(X86_FEATURE_FPU)) {
++		ret = fpregs_soft_set(current, NULL, 0,
++				      sizeof(struct user_i387_ia32_struct),
++				      NULL, buf);
++		goto out;
++	}
+ 
+ 	if (use_xsave()) {
+ 		struct _fpx_sw_bytes fx_sw_user;
+@@ -369,6 +373,25 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
+ 			fpregs_unlock();
+ 			return 0;
+ 		}
++
++		/*
++		 * The above did an FPU restore operation, restricted to
++		 * the user portion of the registers, and failed, but the
++		 * microcode might have modified the FPU registers
++		 * nevertheless.
++		 *
++		 * If the FPU registers do not belong to current, then
++		 * invalidate the FPU register state otherwise the task might
++		 * preempt current and return to user space with corrupted
++		 * FPU registers.
++		 *
++		 * In case current owns the FPU registers then no further
++		 * action is required. The fixup below will handle it
++		 * correctly.
++		 */
++		if (test_thread_flag(TIF_NEED_FPU_LOAD))
++			__cpu_invalidate_fpregs_state();
++
+ 		fpregs_unlock();
+ 	} else {
+ 		/*
+@@ -377,7 +400,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
+ 		 */
+ 		ret = __copy_from_user(&env, buf, sizeof(env));
+ 		if (ret)
+-			goto err_out;
++			goto out;
+ 		envp = &env;
+ 	}
+ 
+@@ -405,16 +428,9 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
+ 	if (use_xsave() && !fx_only) {
+ 		u64 init_bv = xfeatures_mask_user() & ~user_xfeatures;
+ 
+-		if (using_compacted_format()) {
+-			ret = copy_user_to_xstate(&fpu->state.xsave, buf_fx);
+-		} else {
+-			ret = __copy_from_user(&fpu->state.xsave, buf_fx, state_size);
+-
+-			if (!ret && state_size > offsetof(struct xregs_state, header))
+-				ret = validate_user_xstate_header(&fpu->state.xsave.header);
+-		}
++		ret = copy_user_to_xstate(&fpu->state.xsave, buf_fx);
+ 		if (ret)
+-			goto err_out;
++			goto out;
+ 
+ 		sanitize_restored_user_xstate(&fpu->state, envp, user_xfeatures,
+ 					      fx_only);
+@@ -434,7 +450,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
+ 		ret = __copy_from_user(&fpu->state.fxsave, buf_fx, state_size);
+ 		if (ret) {
+ 			ret = -EFAULT;
+-			goto err_out;
++			goto out;
+ 		}
+ 
+ 		sanitize_restored_user_xstate(&fpu->state, envp, user_xfeatures,
+@@ -452,7 +468,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
+ 	} else {
+ 		ret = __copy_from_user(&fpu->state.fsave, buf_fx, state_size);
+ 		if (ret)
+-			goto err_out;
++			goto out;
+ 
+ 		fpregs_lock();
+ 		ret = copy_kernel_to_fregs_err(&fpu->state.fsave);
+@@ -463,7 +479,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
+ 		fpregs_deactivate(fpu);
+ 	fpregs_unlock();
+ 
+-err_out:
++out:
+ 	if (ret)
+ 		fpu__clear_user_states(fpu);
+ 	return ret;
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index fa023f3feb25d..43013ac0fd4d9 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -1410,6 +1410,9 @@ int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
+ 	if (!apic_x2apic_mode(apic))
+ 		valid_reg_mask |= APIC_REG_MASK(APIC_ARBPRI);
+ 
++	if (alignment + len > 4)
++		return 1;
++
+ 	if (offset > 0x3f0 || !(valid_reg_mask & APIC_REG_MASK(offset)))
+ 		return 1;
+ 
+diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
+index cd0faa1876743..676ec0d1e6be4 100644
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -4726,9 +4726,33 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
+ 	context->inject_page_fault = kvm_inject_page_fault;
+ }
+ 
++static union kvm_mmu_role kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu)
++{
++	union kvm_mmu_role role = kvm_calc_shadow_root_page_role_common(vcpu, false);
++
++	/*
++	 * Nested MMUs are used only for walking L2's gva->gpa, they never have
++	 * shadow pages of their own and so "direct" has no meaning.   Set it
++	 * to "true" to try to detect bogus usage of the nested MMU.
++	 */
++	role.base.direct = true;
++
++	if (!is_paging(vcpu))
++		role.base.level = 0;
++	else if (is_long_mode(vcpu))
++		role.base.level = is_la57_mode(vcpu) ? PT64_ROOT_5LEVEL :
++						       PT64_ROOT_4LEVEL;
++	else if (is_pae(vcpu))
++		role.base.level = PT32E_ROOT_LEVEL;
++	else
++		role.base.level = PT32_ROOT_LEVEL;
++
++	return role;
++}
++
+ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
+ {
+-	union kvm_mmu_role new_role = kvm_calc_mmu_role_common(vcpu, false);
++	union kvm_mmu_role new_role = kvm_calc_nested_mmu_role(vcpu);
+ 	struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
+ 
+ 	if (new_role.as_u64 == g_context->mmu_role.as_u64)
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index cf37205784297..a6ca7e657af27 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -6991,7 +6991,10 @@ static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)
+ 
+ static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_flags)
+ {
+-	emul_to_vcpu(ctxt)->arch.hflags = emul_flags;
++	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
++
++	vcpu->arch.hflags = emul_flags;
++	kvm_mmu_reset_context(vcpu);
+ }
+ 
+ static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt,
+@@ -8147,6 +8150,7 @@ void kvm_arch_exit(void)
+ 	kvm_x86_ops.hardware_enable = NULL;
+ 	kvm_mmu_module_exit();
+ 	free_percpu(user_return_msrs);
++	kmem_cache_destroy(x86_emulator_cache);
+ 	kmem_cache_destroy(x86_fpu_cache);
+ #ifdef CONFIG_KVM_XEN
+ 	static_key_deferred_flush(&kvm_xen_enabled);
+diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
+index 9e5ccc56f8e07..356b746dfbe7a 100644
+--- a/arch/x86/mm/ioremap.c
++++ b/arch/x86/mm/ioremap.c
+@@ -118,7 +118,9 @@ static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *des
+ 	if (!IS_ENABLED(CONFIG_EFI))
+ 		return;
+ 
+-	if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA)
++	if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA ||
++	    (efi_mem_type(addr) == EFI_BOOT_SERVICES_DATA &&
++	     efi_mem_attributes(addr) & EFI_MEMORY_RUNTIME))
+ 		desc->flags |= IORES_MAP_ENCRYPTED;
+ }
+ 
+diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
+index 5eb4dc2b97dac..e94da744386f3 100644
+--- a/arch/x86/mm/numa.c
++++ b/arch/x86/mm/numa.c
+@@ -254,7 +254,13 @@ int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
+ 
+ 		/* make sure all non-reserved blocks are inside the limits */
+ 		bi->start = max(bi->start, low);
+-		bi->end = min(bi->end, high);
++
++		/* preserve info for non-RAM areas above 'max_pfn': */
++		if (bi->end > high) {
++			numa_add_memblk_to(bi->nid, high, bi->end,
++					   &numa_reserved_meminfo);
++			bi->end = high;
++		}
+ 
+ 		/* and there's no empty block */
+ 		if (bi->start >= bi->end)
+diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
+index 0c2827fd8c195..03b1b03349477 100644
+--- a/drivers/dma/Kconfig
++++ b/drivers/dma/Kconfig
+@@ -59,6 +59,7 @@ config DMA_OF
+ #devices
+ config ALTERA_MSGDMA
+ 	tristate "Altera / Intel mSGDMA Engine"
++	depends on HAS_IOMEM
+ 	select DMA_ENGINE
+ 	help
+ 	  Enable support for Altera / Intel mSGDMA controller.
+diff --git a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
+index 4ec909e0b8106..4ae057922ef1f 100644
+--- a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
++++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
+@@ -332,6 +332,7 @@ static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
+ 	}
+ 
+ 	if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) {
++		err = -EINVAL;
+ 		dev_err(dev, "DPDMAI major version mismatch\n"
+ 			     "Found %u.%u, supported version is %u.%u\n",
+ 				priv->dpdmai_attr.version.major,
+@@ -341,6 +342,7 @@ static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
+ 	}
+ 
+ 	if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) {
++		err = -EINVAL;
+ 		dev_err(dev, "DPDMAI minor version mismatch\n"
+ 			     "Found %u.%u, supported version is %u.%u\n",
+ 				priv->dpdmai_attr.version.major,
+@@ -475,6 +477,7 @@ static int __cold dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv *priv)
+ 		ppriv->store =
+ 			dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE, dev);
+ 		if (!ppriv->store) {
++			err = -ENOMEM;
+ 			dev_err(dev, "dpaa2_io_store_create() failed\n");
+ 			goto err_store;
+ 		}
+diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
+index 59f2104ffc771..eb41bb9df0fd9 100644
+--- a/drivers/dma/idxd/init.c
++++ b/drivers/dma/idxd/init.c
+@@ -218,6 +218,7 @@ static int idxd_setup_engines(struct idxd_device *idxd)
+ 		engine->idxd = idxd;
+ 		device_initialize(&engine->conf_dev);
+ 		engine->conf_dev.parent = &idxd->conf_dev;
++		engine->conf_dev.bus = &dsa_bus_type;
+ 		engine->conf_dev.type = &idxd_engine_device_type;
+ 		rc = dev_set_name(&engine->conf_dev, "engine%d.%d", idxd->id, engine->id);
+ 		if (rc < 0) {
+@@ -718,6 +719,7 @@ module_init(idxd_init_module);
+ 
+ static void __exit idxd_exit_module(void)
+ {
++	idxd_unregister_driver();
+ 	pci_unregister_driver(&idxd_pci_driver);
+ 	idxd_cdev_remove();
+ 	idxd_unregister_bus_type();
+diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
+index fd8d2bc3be9f5..110de8a600588 100644
+--- a/drivers/dma/pl330.c
++++ b/drivers/dma/pl330.c
+@@ -2694,13 +2694,15 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
+ 	for (i = 0; i < len / period_len; i++) {
+ 		desc = pl330_get_desc(pch);
+ 		if (!desc) {
++			unsigned long iflags;
++
+ 			dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n",
+ 				__func__, __LINE__);
+ 
+ 			if (!first)
+ 				return NULL;
+ 
+-			spin_lock_irqsave(&pl330->pool_lock, flags);
++			spin_lock_irqsave(&pl330->pool_lock, iflags);
+ 
+ 			while (!list_empty(&first->node)) {
+ 				desc = list_entry(first->node.next,
+@@ -2710,7 +2712,7 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
+ 
+ 			list_move_tail(&first->node, &pl330->desc_pool);
+ 
+-			spin_unlock_irqrestore(&pl330->pool_lock, flags);
++			spin_unlock_irqrestore(&pl330->pool_lock, iflags);
+ 
+ 			return NULL;
+ 		}
+diff --git a/drivers/dma/qcom/Kconfig b/drivers/dma/qcom/Kconfig
+index 365f94eb3b081..3f926a653bd88 100644
+--- a/drivers/dma/qcom/Kconfig
++++ b/drivers/dma/qcom/Kconfig
+@@ -33,6 +33,7 @@ config QCOM_GPI_DMA
+ 
+ config QCOM_HIDMA_MGMT
+ 	tristate "Qualcomm Technologies HIDMA Management support"
++	depends on HAS_IOMEM
+ 	select DMA_ENGINE
+ 	help
+ 	  Enable support for the Qualcomm Technologies HIDMA Management.
+diff --git a/drivers/dma/sf-pdma/Kconfig b/drivers/dma/sf-pdma/Kconfig
+index f8ffa02e279ff..ba46a0a15a936 100644
+--- a/drivers/dma/sf-pdma/Kconfig
++++ b/drivers/dma/sf-pdma/Kconfig
+@@ -1,5 +1,6 @@
+ config SF_PDMA
+ 	tristate "Sifive PDMA controller driver"
++	depends on HAS_IOMEM
+ 	select DMA_ENGINE
+ 	select DMA_VIRTUAL_CHANNELS
+ 	help
+diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
+index 265d7c07b348e..e1827393143f1 100644
+--- a/drivers/dma/ste_dma40.c
++++ b/drivers/dma/ste_dma40.c
+@@ -3675,6 +3675,9 @@ static int __init d40_probe(struct platform_device *pdev)
+ 
+ 	kfree(base->lcla_pool.base_unaligned);
+ 
++	if (base->lcpa_base)
++		iounmap(base->lcpa_base);
++
+ 	if (base->phy_lcpa)
+ 		release_mem_region(base->phy_lcpa,
+ 				   base->lcpa_size);
+diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
+index 70b29bd079c9f..ff7dfb3fdeb47 100644
+--- a/drivers/dma/xilinx/xilinx_dpdma.c
++++ b/drivers/dma/xilinx/xilinx_dpdma.c
+@@ -1459,7 +1459,7 @@ static void xilinx_dpdma_enable_irq(struct xilinx_dpdma_device *xdev)
+  */
+ static void xilinx_dpdma_disable_irq(struct xilinx_dpdma_device *xdev)
+ {
+-	dpdma_write(xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ERR_ALL);
++	dpdma_write(xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ALL);
+ 	dpdma_write(xdev->reg, XILINX_DPDMA_EIDS, XILINX_DPDMA_EINTR_ALL);
+ }
+ 
+@@ -1596,6 +1596,26 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
+ 	return dma_get_slave_channel(&xdev->chan[chan_id]->vchan.chan);
+ }
+ 
++static void dpdma_hw_init(struct xilinx_dpdma_device *xdev)
++{
++	unsigned int i;
++	void __iomem *reg;
++
++	/* Disable all interrupts */
++	xilinx_dpdma_disable_irq(xdev);
++
++	/* Stop all channels */
++	for (i = 0; i < ARRAY_SIZE(xdev->chan); i++) {
++		reg = xdev->reg + XILINX_DPDMA_CH_BASE
++				+ XILINX_DPDMA_CH_OFFSET * i;
++		dpdma_clr(reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_ENABLE);
++	}
++
++	/* Clear the interrupt status registers */
++	dpdma_write(xdev->reg, XILINX_DPDMA_ISR, XILINX_DPDMA_INTR_ALL);
++	dpdma_write(xdev->reg, XILINX_DPDMA_EISR, XILINX_DPDMA_EINTR_ALL);
++}
++
+ static int xilinx_dpdma_probe(struct platform_device *pdev)
+ {
+ 	struct xilinx_dpdma_device *xdev;
+@@ -1622,6 +1642,8 @@ static int xilinx_dpdma_probe(struct platform_device *pdev)
+ 	if (IS_ERR(xdev->reg))
+ 		return PTR_ERR(xdev->reg);
+ 
++	dpdma_hw_init(xdev);
++
+ 	xdev->irq = platform_get_irq(pdev, 0);
+ 	if (xdev->irq < 0) {
+ 		dev_err(xdev->dev, "failed to get platform irq\n");
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+index 2342c5d216f9b..72d23651501d4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+@@ -6769,8 +6769,12 @@ static int gfx_v10_0_kiq_init_register(struct amdgpu_ring *ring)
+ 	if (ring->use_doorbell) {
+ 		WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
+ 			(adev->doorbell_index.kiq * 2) << 2);
++		/* If GC has entered CGPG, ringing doorbell > first page doesn't
++		 * wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to workaround
++		 * this issue.
++		 */
+ 		WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
+-			(adev->doorbell_index.userqueue_end * 2) << 2);
++			(adev->doorbell.size - 4));
+ 	}
+ 
+ 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index d2c020a91c0be..1fdfb7783404e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -3623,8 +3623,12 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
+ 	if (ring->use_doorbell) {
+ 		WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
+ 					(adev->doorbell_index.kiq * 2) << 2);
++		/* If GC has entered CGPG, ringing doorbell > first page doesn't
++		 * wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to workaround
++		 * this issue.
++		 */
+ 		WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
+-					(adev->doorbell_index.userqueue_end * 2) << 2);
++					(adev->doorbell.size - 4));
+ 	}
+ 
+ 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
+diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
+index dfa9fdbe98da2..06bb24d7a9fee 100644
+--- a/drivers/gpu/drm/radeon/radeon_uvd.c
++++ b/drivers/gpu/drm/radeon/radeon_uvd.c
+@@ -286,7 +286,7 @@ int radeon_uvd_resume(struct radeon_device *rdev)
+ 	if (rdev->uvd.vcpu_bo == NULL)
+ 		return -EINVAL;
+ 
+-	memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
++	memcpy_toio((void __iomem *)rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
+ 
+ 	size = radeon_bo_size(rdev->uvd.vcpu_bo);
+ 	size -= rdev->uvd_fw->size;
+@@ -294,7 +294,7 @@ int radeon_uvd_resume(struct radeon_device *rdev)
+ 	ptr = rdev->uvd.cpu_addr;
+ 	ptr += rdev->uvd_fw->size;
+ 
+-	memset(ptr, 0, size);
++	memset_io((void __iomem *)ptr, 0, size);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+index bbdfd5e26ec88..f75fb157f2ff7 100644
+--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
++++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+@@ -209,7 +209,7 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
+ 		goto err_disable_clk_tmds;
+ 	}
+ 
+-	ret = sun8i_hdmi_phy_probe(hdmi, phy_node);
++	ret = sun8i_hdmi_phy_get(hdmi, phy_node);
+ 	of_node_put(phy_node);
+ 	if (ret) {
+ 		dev_err(dev, "Couldn't get the HDMI PHY\n");
+@@ -242,7 +242,6 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
+ 
+ cleanup_encoder:
+ 	drm_encoder_cleanup(encoder);
+-	sun8i_hdmi_phy_remove(hdmi);
+ err_disable_clk_tmds:
+ 	clk_disable_unprepare(hdmi->clk_tmds);
+ err_assert_ctrl_reset:
+@@ -263,7 +262,6 @@ static void sun8i_dw_hdmi_unbind(struct device *dev, struct device *master,
+ 	struct sun8i_dw_hdmi *hdmi = dev_get_drvdata(dev);
+ 
+ 	dw_hdmi_unbind(hdmi->hdmi);
+-	sun8i_hdmi_phy_remove(hdmi);
+ 	clk_disable_unprepare(hdmi->clk_tmds);
+ 	reset_control_assert(hdmi->rst_ctrl);
+ 	gpiod_set_value(hdmi->ddc_en, 0);
+@@ -320,7 +318,32 @@ static struct platform_driver sun8i_dw_hdmi_pltfm_driver = {
+ 		.of_match_table = sun8i_dw_hdmi_dt_ids,
+ 	},
+ };
+-module_platform_driver(sun8i_dw_hdmi_pltfm_driver);
++
++static int __init sun8i_dw_hdmi_init(void)
++{
++	int ret;
++
++	ret = platform_driver_register(&sun8i_dw_hdmi_pltfm_driver);
++	if (ret)
++		return ret;
++
++	ret = platform_driver_register(&sun8i_hdmi_phy_driver);
++	if (ret) {
++		platform_driver_unregister(&sun8i_dw_hdmi_pltfm_driver);
++		return ret;
++	}
++
++	return ret;
++}
++
++static void __exit sun8i_dw_hdmi_exit(void)
++{
++	platform_driver_unregister(&sun8i_dw_hdmi_pltfm_driver);
++	platform_driver_unregister(&sun8i_hdmi_phy_driver);
++}
++
++module_init(sun8i_dw_hdmi_init);
++module_exit(sun8i_dw_hdmi_exit);
+ 
+ MODULE_AUTHOR("Jernej Skrabec <jernej.skrabec@siol.net>");
+ MODULE_DESCRIPTION("Allwinner DW HDMI bridge");
+diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
+index d4b55af0592f8..74f6ed0e25709 100644
+--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
++++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
+@@ -195,14 +195,15 @@ struct sun8i_dw_hdmi {
+ 	struct gpio_desc		*ddc_en;
+ };
+ 
++extern struct platform_driver sun8i_hdmi_phy_driver;
++
+ static inline struct sun8i_dw_hdmi *
+ encoder_to_sun8i_dw_hdmi(struct drm_encoder *encoder)
+ {
+ 	return container_of(encoder, struct sun8i_dw_hdmi, encoder);
+ }
+ 
+-int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node);
+-void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi);
++int sun8i_hdmi_phy_get(struct sun8i_dw_hdmi *hdmi, struct device_node *node);
+ 
+ void sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy);
+ void sun8i_hdmi_phy_set_ops(struct sun8i_hdmi_phy *phy,
+diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
+index 9994edf675096..c9239708d398c 100644
+--- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
++++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
+@@ -5,6 +5,7 @@
+ 
+ #include <linux/delay.h>
+ #include <linux/of_address.h>
++#include <linux/of_platform.h>
+ 
+ #include "sun8i_dw_hdmi.h"
+ 
+@@ -597,10 +598,30 @@ static const struct of_device_id sun8i_hdmi_phy_of_table[] = {
+ 	{ /* sentinel */ }
+ };
+ 
+-int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
++int sun8i_hdmi_phy_get(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
++{
++	struct platform_device *pdev = of_find_device_by_node(node);
++	struct sun8i_hdmi_phy *phy;
++
++	if (!pdev)
++		return -EPROBE_DEFER;
++
++	phy = platform_get_drvdata(pdev);
++	if (!phy)
++		return -EPROBE_DEFER;
++
++	hdmi->phy = phy;
++
++	put_device(&pdev->dev);
++
++	return 0;
++}
++
++static int sun8i_hdmi_phy_probe(struct platform_device *pdev)
+ {
+ 	const struct of_device_id *match;
+-	struct device *dev = hdmi->dev;
++	struct device *dev = &pdev->dev;
++	struct device_node *node = dev->of_node;
+ 	struct sun8i_hdmi_phy *phy;
+ 	struct resource res;
+ 	void __iomem *regs;
+@@ -704,7 +725,7 @@ int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
+ 		clk_prepare_enable(phy->clk_phy);
+ 	}
+ 
+-	hdmi->phy = phy;
++	platform_set_drvdata(pdev, phy);
+ 
+ 	return 0;
+ 
+@@ -728,9 +749,9 @@ err_put_clk_bus:
+ 	return ret;
+ }
+ 
+-void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi)
++static int sun8i_hdmi_phy_remove(struct platform_device *pdev)
+ {
+-	struct sun8i_hdmi_phy *phy = hdmi->phy;
++	struct sun8i_hdmi_phy *phy = platform_get_drvdata(pdev);
+ 
+ 	clk_disable_unprepare(phy->clk_mod);
+ 	clk_disable_unprepare(phy->clk_bus);
+@@ -744,4 +765,14 @@ void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi)
+ 	clk_put(phy->clk_pll1);
+ 	clk_put(phy->clk_mod);
+ 	clk_put(phy->clk_bus);
++	return 0;
+ }
++
++struct platform_driver sun8i_hdmi_phy_driver = {
++	.probe  = sun8i_hdmi_phy_probe,
++	.remove = sun8i_hdmi_phy_remove,
++	.driver = {
++		.name = "sun8i-hdmi-phy",
++		.of_match_table = sun8i_hdmi_phy_of_table,
++	},
++};
+diff --git a/drivers/hwmon/scpi-hwmon.c b/drivers/hwmon/scpi-hwmon.c
+index 25aac40f2764a..919877970ae3b 100644
+--- a/drivers/hwmon/scpi-hwmon.c
++++ b/drivers/hwmon/scpi-hwmon.c
+@@ -99,6 +99,15 @@ scpi_show_sensor(struct device *dev, struct device_attribute *attr, char *buf)
+ 
+ 	scpi_scale_reading(&value, sensor);
+ 
++	/*
++	 * Temperature sensor values are treated as signed values based on
++	 * observation even though that is not explicitly specified, and
++	 * because an unsigned u64 temperature does not really make practical
++	 * sense especially when the temperature is below zero degrees Celsius.
++	 */
++	if (sensor->info.class == TEMPERATURE)
++		return sprintf(buf, "%lld\n", (s64)value);
++
+ 	return sprintf(buf, "%llu\n", value);
+ }
+ 
+diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
+index 00404024d7cd5..fea237838bb0a 100644
+--- a/drivers/irqchip/irq-gic-v3.c
++++ b/drivers/irqchip/irq-gic-v3.c
+@@ -642,11 +642,45 @@ static inline void gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
+ 		nmi_exit();
+ }
+ 
++static u32 do_read_iar(struct pt_regs *regs)
++{
++	u32 iar;
++
++	if (gic_supports_nmi() && unlikely(!interrupts_enabled(regs))) {
++		u64 pmr;
++
++		/*
++		 * We were in a context with IRQs disabled. However, the
++		 * entry code has set PMR to a value that allows any
++		 * interrupt to be acknowledged, and not just NMIs. This can
++		 * lead to surprising effects if the NMI has been retired in
++		 * the meantime, and that there is an IRQ pending. The IRQ
++		 * would then be taken in NMI context, something that nobody
++		 * wants to debug twice.
++		 *
++		 * Until we sort this, drop PMR again to a level that will
++		 * actually only allow NMIs before reading IAR, and then
++		 * restore it to what it was.
++		 */
++		pmr = gic_read_pmr();
++		gic_pmr_mask_irqs();
++		isb();
++
++		iar = gic_read_iar();
++
++		gic_write_pmr(pmr);
++	} else {
++		iar = gic_read_iar();
++	}
++
++	return iar;
++}
++
+ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
+ {
+ 	u32 irqnr;
+ 
+-	irqnr = gic_read_iar();
++	irqnr = do_read_iar(regs);
+ 
+ 	/* Check for special IDs first */
+ 	if ((irqnr >= 1020 && irqnr <= 1023))
+diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
+index 1f649d1780107..4484f9c079b75 100644
+--- a/drivers/net/can/usb/mcba_usb.c
++++ b/drivers/net/can/usb/mcba_usb.c
+@@ -82,6 +82,8 @@ struct mcba_priv {
+ 	bool can_ka_first_pass;
+ 	bool can_speed_check;
+ 	atomic_t free_ctx_cnt;
++	void *rxbuf[MCBA_MAX_RX_URBS];
++	dma_addr_t rxbuf_dma[MCBA_MAX_RX_URBS];
+ };
+ 
+ /* CAN frame */
+@@ -633,6 +635,7 @@ static int mcba_usb_start(struct mcba_priv *priv)
+ 	for (i = 0; i < MCBA_MAX_RX_URBS; i++) {
+ 		struct urb *urb = NULL;
+ 		u8 *buf;
++		dma_addr_t buf_dma;
+ 
+ 		/* create a URB, and a buffer for it */
+ 		urb = usb_alloc_urb(0, GFP_KERNEL);
+@@ -642,7 +645,7 @@ static int mcba_usb_start(struct mcba_priv *priv)
+ 		}
+ 
+ 		buf = usb_alloc_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE,
+-					 GFP_KERNEL, &urb->transfer_dma);
++					 GFP_KERNEL, &buf_dma);
+ 		if (!buf) {
+ 			netdev_err(netdev, "No memory left for USB buffer\n");
+ 			usb_free_urb(urb);
+@@ -661,11 +664,14 @@ static int mcba_usb_start(struct mcba_priv *priv)
+ 		if (err) {
+ 			usb_unanchor_urb(urb);
+ 			usb_free_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE,
+-					  buf, urb->transfer_dma);
++					  buf, buf_dma);
+ 			usb_free_urb(urb);
+ 			break;
+ 		}
+ 
++		priv->rxbuf[i] = buf;
++		priv->rxbuf_dma[i] = buf_dma;
++
+ 		/* Drop reference, USB core will take care of freeing it */
+ 		usb_free_urb(urb);
+ 	}
+@@ -708,7 +714,14 @@ static int mcba_usb_open(struct net_device *netdev)
+ 
+ static void mcba_urb_unlink(struct mcba_priv *priv)
+ {
++	int i;
++
+ 	usb_kill_anchored_urbs(&priv->rx_submitted);
++
++	for (i = 0; i < MCBA_MAX_RX_URBS; ++i)
++		usb_free_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE,
++				  priv->rxbuf[i], priv->rxbuf_dma[i]);
++
+ 	usb_kill_anchored_urbs(&priv->tx_submitted);
+ }
+ 
+diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+index 102f2c91fdb85..20f8012bbe04a 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+@@ -236,36 +236,48 @@ static int ena_xdp_io_poll(struct napi_struct *napi, int budget)
+ static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring,
+ 				struct ena_tx_buffer *tx_info,
+ 				struct xdp_frame *xdpf,
+-				void **push_hdr,
+-				u32 *push_len)
++				struct ena_com_tx_ctx *ena_tx_ctx)
+ {
+ 	struct ena_adapter *adapter = xdp_ring->adapter;
+ 	struct ena_com_buf *ena_buf;
+-	dma_addr_t dma = 0;
++	int push_len = 0;
++	dma_addr_t dma;
++	void *data;
+ 	u32 size;
+ 
+ 	tx_info->xdpf = xdpf;
++	data = tx_info->xdpf->data;
+ 	size = tx_info->xdpf->len;
+-	ena_buf = tx_info->bufs;
+ 
+-	/* llq push buffer */
+-	*push_len = min_t(u32, size, xdp_ring->tx_max_header_size);
+-	*push_hdr = tx_info->xdpf->data;
++	if (xdp_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
++		/* Designate part of the packet for LLQ */
++		push_len = min_t(u32, size, xdp_ring->tx_max_header_size);
++
++		ena_tx_ctx->push_header = data;
++
++		size -= push_len;
++		data += push_len;
++	}
++
++	ena_tx_ctx->header_len = push_len;
+ 
+-	if (size - *push_len > 0) {
++	if (size > 0) {
+ 		dma = dma_map_single(xdp_ring->dev,
+-				     *push_hdr + *push_len,
+-				     size - *push_len,
++				     data,
++				     size,
+ 				     DMA_TO_DEVICE);
+ 		if (unlikely(dma_mapping_error(xdp_ring->dev, dma)))
+ 			goto error_report_dma_error;
+ 
+-		tx_info->map_linear_data = 1;
+-		tx_info->num_of_bufs = 1;
+-	}
++		tx_info->map_linear_data = 0;
+ 
+-	ena_buf->paddr = dma;
+-	ena_buf->len = size;
++		ena_buf = tx_info->bufs;
++		ena_buf->paddr = dma;
++		ena_buf->len = size;
++
++		ena_tx_ctx->ena_bufs = ena_buf;
++		ena_tx_ctx->num_bufs = tx_info->num_of_bufs = 1;
++	}
+ 
+ 	return 0;
+ 
+@@ -274,10 +286,6 @@ error_report_dma_error:
+ 			  &xdp_ring->syncp);
+ 	netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n");
+ 
+-	xdp_return_frame_rx_napi(tx_info->xdpf);
+-	tx_info->xdpf = NULL;
+-	tx_info->num_of_bufs = 0;
+-
+ 	return -EINVAL;
+ }
+ 
+@@ -289,8 +297,6 @@ static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
+ 	struct ena_com_tx_ctx ena_tx_ctx = {};
+ 	struct ena_tx_buffer *tx_info;
+ 	u16 next_to_use, req_id;
+-	void *push_hdr;
+-	u32 push_len;
+ 	int rc;
+ 
+ 	next_to_use = xdp_ring->next_to_use;
+@@ -298,15 +304,11 @@ static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
+ 	tx_info = &xdp_ring->tx_buffer_info[req_id];
+ 	tx_info->num_of_bufs = 0;
+ 
+-	rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &push_hdr, &push_len);
++	rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &ena_tx_ctx);
+ 	if (unlikely(rc))
+ 		goto error_drop_packet;
+ 
+-	ena_tx_ctx.ena_bufs = tx_info->bufs;
+-	ena_tx_ctx.push_header = push_hdr;
+-	ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
+ 	ena_tx_ctx.req_id = req_id;
+-	ena_tx_ctx.header_len = push_len;
+ 
+ 	rc = ena_xmit_common(dev,
+ 			     xdp_ring,
+diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
+index 9e02f88645931..5e90df42b2013 100644
+--- a/drivers/net/ethernet/atheros/alx/main.c
++++ b/drivers/net/ethernet/atheros/alx/main.c
+@@ -1849,6 +1849,7 @@ out_free_netdev:
+ 	free_netdev(netdev);
+ out_pci_release:
+ 	pci_release_mem_regions(pdev);
++	pci_disable_pcie_error_reporting(pdev);
+ out_pci_disable:
+ 	pci_disable_device(pdev);
+ 	return err;
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 027997c711aba..c118de27bc5c3 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -7295,7 +7295,7 @@ skip_rdma:
+ 	entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
+ 		     2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
+ 	entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
+-	entries = ctx->qp_max_l2_entries + extra_qps + ctx->qp_min_qp1_entries;
++	entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries);
+ 	entries = roundup(entries, ctx->tqm_entries_multiple);
+ 	entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
+ 	for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
+@@ -11573,6 +11573,8 @@ static void bnxt_fw_init_one_p3(struct bnxt *bp)
+ 	bnxt_hwrm_coal_params_qcaps(bp);
+ }
+ 
++static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
++
+ static int bnxt_fw_init_one(struct bnxt *bp)
+ {
+ 	int rc;
+@@ -11587,6 +11589,9 @@ static int bnxt_fw_init_one(struct bnxt *bp)
+ 		netdev_err(bp->dev, "Firmware init phase 2 failed\n");
+ 		return rc;
+ 	}
++	rc = bnxt_probe_phy(bp, false);
++	if (rc)
++		return rc;
+ 	rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
+ 	if (rc)
+ 		return rc;
+@@ -12976,6 +12981,7 @@ init_err_pci_clean:
+ 	bnxt_hwrm_func_drv_unrgtr(bp);
+ 	bnxt_free_hwrm_short_cmd_req(bp);
+ 	bnxt_free_hwrm_resources(bp);
++	bnxt_ethtool_free(bp);
+ 	kfree(bp->fw_health);
+ 	bp->fw_health = NULL;
+ 	bnxt_cleanup_pci(bp);
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+index 61ea3ec5c3fcc..83ed10ac86606 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+@@ -1337,13 +1337,27 @@ static int cxgb4_ethtool_flash_phy(struct net_device *netdev,
+ 		return ret;
+ 	}
+ 
+-	spin_lock_bh(&adap->win0_lock);
++	/* We have to RESET the chip/firmware because we need the
++	 * chip in uninitialized state for loading new PHY image.
++	 * Otherwise, the running firmware will only store the PHY
++	 * image in local RAM which will be lost after next reset.
++	 */
++	ret = t4_fw_reset(adap, adap->mbox, PIORSTMODE_F | PIORST_F);
++	if (ret < 0) {
++		dev_err(adap->pdev_dev,
++			"Set FW to RESET for flashing PHY FW failed. ret: %d\n",
++			ret);
++		return ret;
++	}
++
+ 	ret = t4_load_phy_fw(adap, MEMWIN_NIC, NULL, data, size);
+-	spin_unlock_bh(&adap->win0_lock);
+-	if (ret)
+-		dev_err(adap->pdev_dev, "Failed to load PHY FW\n");
++	if (ret < 0) {
++		dev_err(adap->pdev_dev, "Failed to load PHY FW. ret: %d\n",
++			ret);
++		return ret;
++	}
+ 
+-	return ret;
++	return 0;
+ }
+ 
+ static int cxgb4_ethtool_flash_fw(struct net_device *netdev,
+@@ -1610,16 +1624,14 @@ static struct filter_entry *cxgb4_get_filter_entry(struct adapter *adap,
+ 						   u32 ftid)
+ {
+ 	struct tid_info *t = &adap->tids;
+-	struct filter_entry *f;
+ 
+-	if (ftid < t->nhpftids)
+-		f = &adap->tids.hpftid_tab[ftid];
+-	else if (ftid < t->nftids)
+-		f = &adap->tids.ftid_tab[ftid - t->nhpftids];
+-	else
+-		f = lookup_tid(&adap->tids, ftid);
++	if (ftid >= t->hpftid_base && ftid < t->hpftid_base + t->nhpftids)
++		return &t->hpftid_tab[ftid - t->hpftid_base];
+ 
+-	return f;
++	if (ftid >= t->ftid_base && ftid < t->ftid_base + t->nftids)
++		return &t->ftid_tab[ftid - t->ftid_base];
++
++	return lookup_tid(t, ftid);
+ }
+ 
+ static void cxgb4_fill_filter_rule(struct ethtool_rx_flow_spec *fs,
+@@ -1826,6 +1838,11 @@ static int cxgb4_ntuple_del_filter(struct net_device *dev,
+ 	filter_id = filter_info->loc_array[cmd->fs.location];
+ 	f = cxgb4_get_filter_entry(adapter, filter_id);
+ 
++	if (f->fs.prio)
++		filter_id -= adapter->tids.hpftid_base;
++	else if (!f->fs.hash)
++		filter_id -= (adapter->tids.ftid_base - adapter->tids.nhpftids);
++
+ 	ret = cxgb4_flow_rule_destroy(dev, f->fs.tc_prio, &f->fs, filter_id);
+ 	if (ret)
+ 		goto err;
+@@ -1885,6 +1902,11 @@ static int cxgb4_ntuple_set_filter(struct net_device *netdev,
+ 
+ 	filter_info = &adapter->ethtool_filters->port[pi->port_id];
+ 
++	if (fs.prio)
++		tid += adapter->tids.hpftid_base;
++	else if (!fs.hash)
++		tid += (adapter->tids.ftid_base - adapter->tids.nhpftids);
++
+ 	filter_info->loc_array[cmd->fs.location] = tid;
+ 	set_bit(cmd->fs.location, filter_info->bmap);
+ 	filter_info->in_use++;
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+index e664e05b9f026..5fbc087268dbe 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+@@ -198,7 +198,7 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
+ 				      WORD_MASK, f->fs.nat_lip[3] |
+ 				      f->fs.nat_lip[2] << 8 |
+ 				      f->fs.nat_lip[1] << 16 |
+-				      (u64)f->fs.nat_lip[0] << 25, 1);
++				      (u64)f->fs.nat_lip[0] << 24, 1);
+ 		}
+ 	}
+ 
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+index 1f601de02e706..762113a04dde6 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+@@ -4424,10 +4424,8 @@ static int adap_init0_phy(struct adapter *adap)
+ 
+ 	/* Load PHY Firmware onto adapter.
+ 	 */
+-	spin_lock_bh(&adap->win0_lock);
+ 	ret = t4_load_phy_fw(adap, MEMWIN_NIC, phy_info->phy_fw_version,
+ 			     (u8 *)phyf->data, phyf->size);
+-	spin_unlock_bh(&adap->win0_lock);
+ 	if (ret < 0)
+ 		dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
+ 			-ret);
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+index 80882cfc370f5..601853bb34c91 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+@@ -3060,16 +3060,19 @@ int t4_read_flash(struct adapter *adapter, unsigned int addr,
+  *	@addr: the start address to write
+  *	@n: length of data to write in bytes
+  *	@data: the data to write
++ *	@byte_oriented: whether to store data as bytes or as words
+  *
+  *	Writes up to a page of data (256 bytes) to the serial flash starting
+  *	at the given address.  All the data must be written to the same page.
++ *	If @byte_oriented is set the write data is stored as byte stream
++ *	(i.e. matches what on disk), otherwise in big-endian.
+  */
+ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
+-			  unsigned int n, const u8 *data)
++			  unsigned int n, const u8 *data, bool byte_oriented)
+ {
+-	int ret;
+-	u32 buf[64];
+ 	unsigned int i, c, left, val, offset = addr & 0xff;
++	u32 buf[64];
++	int ret;
+ 
+ 	if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
+ 		return -EINVAL;
+@@ -3080,10 +3083,14 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
+ 	    (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
+ 		goto unlock;
+ 
+-	for (left = n; left; left -= c) {
++	for (left = n; left; left -= c, data += c) {
+ 		c = min(left, 4U);
+-		for (val = 0, i = 0; i < c; ++i)
+-			val = (val << 8) + *data++;
++		for (val = 0, i = 0; i < c; ++i) {
++			if (byte_oriented)
++				val = (val << 8) + data[i];
++			else
++				val = (val << 8) + data[c - i - 1];
++		}
+ 
+ 		ret = sf1_write(adapter, c, c != left, 1, val);
+ 		if (ret)
+@@ -3096,7 +3103,8 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
+ 	t4_write_reg(adapter, SF_OP_A, 0);    /* unlock SF */
+ 
+ 	/* Read the page to verify the write succeeded */
+-	ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
++	ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
++			    byte_oriented);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -3692,7 +3700,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
+ 	 */
+ 	memcpy(first_page, fw_data, SF_PAGE_SIZE);
+ 	((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
+-	ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page);
++	ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, true);
+ 	if (ret)
+ 		goto out;
+ 
+@@ -3700,14 +3708,14 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
+ 	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
+ 		addr += SF_PAGE_SIZE;
+ 		fw_data += SF_PAGE_SIZE;
+-		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
++		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, true);
+ 		if (ret)
+ 			goto out;
+ 	}
+ 
+-	ret = t4_write_flash(adap,
+-			     fw_start + offsetof(struct fw_hdr, fw_ver),
+-			     sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
++	ret = t4_write_flash(adap, fw_start + offsetof(struct fw_hdr, fw_ver),
++			     sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver,
++			     true);
+ out:
+ 	if (ret)
+ 		dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
+@@ -3812,9 +3820,11 @@ int t4_load_phy_fw(struct adapter *adap, int win,
+ 	/* Copy the supplied PHY Firmware image to the adapter memory location
+ 	 * allocated by the adapter firmware.
+ 	 */
++	spin_lock_bh(&adap->win0_lock);
+ 	ret = t4_memory_rw(adap, win, mtype, maddr,
+ 			   phy_fw_size, (__be32 *)phy_fw_data,
+ 			   T4_MEMORY_WRITE);
++	spin_unlock_bh(&adap->win0_lock);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -10208,7 +10218,7 @@ int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
+ 			n = size - i;
+ 		else
+ 			n = SF_PAGE_SIZE;
+-		ret = t4_write_flash(adap, addr, n, cfg_data);
++		ret = t4_write_flash(adap, addr, n, cfg_data, true);
+ 		if (ret)
+ 			goto out;
+ 
+@@ -10677,13 +10687,14 @@ int t4_load_boot(struct adapter *adap, u8 *boot_data,
+ 	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
+ 		addr += SF_PAGE_SIZE;
+ 		boot_data += SF_PAGE_SIZE;
+-		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data);
++		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data,
++				     false);
+ 		if (ret)
+ 			goto out;
+ 	}
+ 
+ 	ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
+-			     (const u8 *)header);
++			     (const u8 *)header, false);
+ 
+ out:
+ 	if (ret)
+@@ -10758,7 +10769,7 @@ int t4_load_bootcfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
+ 	for (i = 0; i < size; i += SF_PAGE_SIZE) {
+ 		n = min_t(u32, size - i, SF_PAGE_SIZE);
+ 
+-		ret = t4_write_flash(adap, addr, n, cfg_data);
++		ret = t4_write_flash(adap, addr, n, cfg_data, false);
+ 		if (ret)
+ 			goto out;
+ 
+@@ -10770,7 +10781,8 @@ int t4_load_bootcfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
+ 	for (i = 0; i < npad; i++) {
+ 		u8 data = 0;
+ 
+-		ret = t4_write_flash(adap, cfg_addr + size + i, 1, &data);
++		ret = t4_write_flash(adap, cfg_addr + size + i, 1, &data,
++				     false);
+ 		if (ret)
+ 			goto out;
+ 	}
+diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
+index 46b0dbab8aadc..7c992172933bc 100644
+--- a/drivers/net/ethernet/ec_bhf.c
++++ b/drivers/net/ethernet/ec_bhf.c
+@@ -576,10 +576,12 @@ static void ec_bhf_remove(struct pci_dev *dev)
+ 	struct ec_bhf_priv *priv = netdev_priv(net_dev);
+ 
+ 	unregister_netdev(net_dev);
+-	free_netdev(net_dev);
+ 
+ 	pci_iounmap(dev, priv->dma_io);
+ 	pci_iounmap(dev, priv->io);
++
++	free_netdev(net_dev);
++
+ 	pci_release_regions(dev);
+ 	pci_clear_master(dev);
+ 	pci_disable_device(dev);
+diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
+index b6eba29d8e99e..7968568bbe214 100644
+--- a/drivers/net/ethernet/emulex/benet/be_main.c
++++ b/drivers/net/ethernet/emulex/benet/be_main.c
+@@ -5897,6 +5897,7 @@ drv_cleanup:
+ unmap_bars:
+ 	be_unmap_pci_bars(adapter);
+ free_netdev:
++	pci_disable_pcie_error_reporting(pdev);
+ 	free_netdev(netdev);
+ rel_reg:
+ 	pci_release_regions(pdev);
+diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
+index 1753807cbf97e..d71eac7e19249 100644
+--- a/drivers/net/ethernet/freescale/fec_ptp.c
++++ b/drivers/net/ethernet/freescale/fec_ptp.c
+@@ -215,15 +215,13 @@ static u64 fec_ptp_read(const struct cyclecounter *cc)
+ {
+ 	struct fec_enet_private *fep =
+ 		container_of(cc, struct fec_enet_private, cc);
+-	const struct platform_device_id *id_entry =
+-		platform_get_device_id(fep->pdev);
+ 	u32 tempval;
+ 
+ 	tempval = readl(fep->hwp + FEC_ATIME_CTRL);
+ 	tempval |= FEC_T_CTRL_CAPTURE;
+ 	writel(tempval, fep->hwp + FEC_ATIME_CTRL);
+ 
+-	if (id_entry->driver_data & FEC_QUIRK_BUG_CAPTURE)
++	if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
+ 		udelay(1);
+ 
+ 	return readl(fep->hwp + FEC_ATIME);
+@@ -604,6 +602,10 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
+ 	fep->ptp_caps.enable = fec_ptp_enable;
+ 
+ 	fep->cycle_speed = clk_get_rate(fep->clk_ptp);
++	if (!fep->cycle_speed) {
++		fep->cycle_speed = NSEC_PER_SEC;
++		dev_err(&fep->pdev->dev, "clk_ptp clock rate is zero\n");
++	}
+ 	fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed;
+ 
+ 	spin_lock_init(&fep->tmreg_lock);
+diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
+index 27e439853c3b0..55432ea360ad4 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
+@@ -1715,12 +1715,13 @@ setup_rings:
+  * ice_vsi_cfg_txqs - Configure the VSI for Tx
+  * @vsi: the VSI being configured
+  * @rings: Tx ring array to be configured
++ * @count: number of Tx ring array elements
+  *
+  * Return 0 on success and a negative value on error
+  * Configure the Tx VSI for operation.
+  */
+ static int
+-ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings)
++ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, u16 count)
+ {
+ 	struct ice_aqc_add_tx_qgrp *qg_buf;
+ 	u16 q_idx = 0;
+@@ -1732,7 +1733,7 @@ ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings)
+ 
+ 	qg_buf->num_txqs = 1;
+ 
+-	for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
++	for (q_idx = 0; q_idx < count; q_idx++) {
+ 		err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
+ 		if (err)
+ 			goto err_cfg_txqs;
+@@ -1752,7 +1753,7 @@ err_cfg_txqs:
+  */
+ int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
+ {
+-	return ice_vsi_cfg_txqs(vsi, vsi->tx_rings);
++	return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq);
+ }
+ 
+ /**
+@@ -1767,7 +1768,7 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
+ 	int ret;
+ 	int i;
+ 
+-	ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings);
++	ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1965,17 +1966,18 @@ int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi)
+  * @rst_src: reset source
+  * @rel_vmvf_num: Relative ID of VF/VM
+  * @rings: Tx ring array to be stopped
++ * @count: number of Tx ring array elements
+  */
+ static int
+ ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
+-		      u16 rel_vmvf_num, struct ice_ring **rings)
++		      u16 rel_vmvf_num, struct ice_ring **rings, u16 count)
+ {
+ 	u16 q_idx;
+ 
+ 	if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
+ 		return -EINVAL;
+ 
+-	for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
++	for (q_idx = 0; q_idx < count; q_idx++) {
+ 		struct ice_txq_meta txq_meta = { };
+ 		int status;
+ 
+@@ -2003,7 +2005,7 @@ int
+ ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
+ 			  u16 rel_vmvf_num)
+ {
+-	return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings);
++	return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, vsi->num_txq);
+ }
+ 
+ /**
+@@ -2012,7 +2014,7 @@ ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
+  */
+ int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi)
+ {
+-	return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings);
++	return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings, vsi->num_xdp_txq);
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index d821c687f239c..b61cd84be97fd 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -2554,6 +2554,20 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
+ 	return (ret || xdp_ring_err) ? -ENOMEM : 0;
+ }
+ 
++/**
++ * ice_xdp_safe_mode - XDP handler for safe mode
++ * @dev: netdevice
++ * @xdp: XDP command
++ */
++static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
++			     struct netdev_bpf *xdp)
++{
++	NL_SET_ERR_MSG_MOD(xdp->extack,
++			   "Please provide working DDP firmware package in order to use XDP\n"
++			   "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
++	return -EOPNOTSUPP;
++}
++
+ /**
+  * ice_xdp - implements XDP handler
+  * @dev: netdevice
+@@ -6805,6 +6819,7 @@ static const struct net_device_ops ice_netdev_safe_mode_ops = {
+ 	.ndo_change_mtu = ice_change_mtu,
+ 	.ndo_get_stats64 = ice_get_stats64,
+ 	.ndo_tx_timeout = ice_tx_timeout,
++	.ndo_bpf = ice_xdp_safe_mode,
+ };
+ 
+ static const struct net_device_ops ice_netdev_ops = {
+diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
+index 135ba5b6ae980..072075bc60ee9 100644
+--- a/drivers/net/ethernet/lantiq_xrx200.c
++++ b/drivers/net/ethernet/lantiq_xrx200.c
+@@ -154,6 +154,7 @@ static int xrx200_close(struct net_device *net_dev)
+ 
+ static int xrx200_alloc_skb(struct xrx200_chan *ch)
+ {
++	struct sk_buff *skb = ch->skb[ch->dma.desc];
+ 	dma_addr_t mapping;
+ 	int ret = 0;
+ 
+@@ -168,6 +169,7 @@ static int xrx200_alloc_skb(struct xrx200_chan *ch)
+ 				 XRX200_DMA_DATA_LEN, DMA_FROM_DEVICE);
+ 	if (unlikely(dma_mapping_error(ch->priv->dev, mapping))) {
+ 		dev_kfree_skb_any(ch->skb[ch->dma.desc]);
++		ch->skb[ch->dma.desc] = skb;
+ 		ret = -ENOMEM;
+ 		goto skip;
+ 	}
+@@ -198,7 +200,6 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
+ 	ch->dma.desc %= LTQ_DESC_NUM;
+ 
+ 	if (ret) {
+-		ch->skb[ch->dma.desc] = skb;
+ 		net_dev->stats.rx_dropped++;
+ 		netdev_err(net_dev, "failed to allocate new rx buffer\n");
+ 		return ret;
+@@ -352,8 +353,8 @@ static irqreturn_t xrx200_dma_irq(int irq, void *ptr)
+ 	struct xrx200_chan *ch = ptr;
+ 
+ 	if (napi_schedule_prep(&ch->napi)) {
+-		__napi_schedule(&ch->napi);
+ 		ltq_dma_disable_irq(&ch->dma);
++		__napi_schedule(&ch->napi);
+ 	}
+ 
+ 	ltq_dma_ack_irq(&ch->dma);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+index 9153c9bda96fa..897853a68cd03 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+@@ -306,6 +306,7 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
+ 	int ret = 0, i;
+ 
+ 	mutex_lock(&mlx5_intf_mutex);
++	priv->flags &= ~MLX5_PRIV_FLAGS_DETACH;
+ 	for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) {
+ 		if (!priv->adev[i]) {
+ 			bool is_supported = false;
+@@ -323,6 +324,16 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
+ 			}
+ 		} else {
+ 			adev = &priv->adev[i]->adev;
++
++			/* Pay attention that this is not PCI driver that
++			 * mlx5_core_dev is connected, but auxiliary driver.
++			 *
++			 * Here we can race of module unload with devlink
++			 * reload, but we don't need to take extra lock because
++			 * we are holding global mlx5_intf_mutex.
++			 */
++			if (!adev->dev.driver)
++				continue;
+ 			adrv = to_auxiliary_drv(adev->dev.driver);
+ 
+ 			if (adrv->resume)
+@@ -353,6 +364,10 @@ void mlx5_detach_device(struct mlx5_core_dev *dev)
+ 			continue;
+ 
+ 		adev = &priv->adev[i]->adev;
++		/* Auxiliary driver was unbind manually through sysfs */
++		if (!adev->dev.driver)
++			goto skip_suspend;
++
+ 		adrv = to_auxiliary_drv(adev->dev.driver);
+ 
+ 		if (adrv->suspend) {
+@@ -360,9 +375,11 @@ void mlx5_detach_device(struct mlx5_core_dev *dev)
+ 			continue;
+ 		}
+ 
++skip_suspend:
+ 		del_adev(&priv->adev[i]->adev);
+ 		priv->adev[i] = NULL;
+ 	}
++	priv->flags |= MLX5_PRIV_FLAGS_DETACH;
+ 	mutex_unlock(&mlx5_intf_mutex);
+ }
+ 
+@@ -451,6 +468,8 @@ int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev)
+ 	struct mlx5_priv *priv = &dev->priv;
+ 
+ 	lockdep_assert_held(&mlx5_intf_mutex);
++	if (priv->flags & MLX5_PRIV_FLAGS_DETACH)
++		return 0;
+ 
+ 	delete_drivers(dev);
+ 	if (priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
+index be0ee03de7217..2e9bee4e5209b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
+@@ -129,10 +129,9 @@ static void mlx5e_rep_neigh_update(struct work_struct *work)
+ 							     work);
+ 	struct mlx5e_neigh_hash_entry *nhe = update_work->nhe;
+ 	struct neighbour *n = update_work->n;
++	struct mlx5e_encap_entry *e = NULL;
+ 	bool neigh_connected, same_dev;
+-	struct mlx5e_encap_entry *e;
+ 	unsigned char ha[ETH_ALEN];
+-	struct mlx5e_priv *priv;
+ 	u8 nud_state, dead;
+ 
+ 	rtnl_lock();
+@@ -156,14 +155,12 @@ static void mlx5e_rep_neigh_update(struct work_struct *work)
+ 	if (!same_dev)
+ 		goto out;
+ 
+-	list_for_each_entry(e, &nhe->encap_list, encap_list) {
+-		if (!mlx5e_encap_take(e))
+-			continue;
++	/* mlx5e_get_next_init_encap() releases previous encap before returning
++	 * the next one.
++	 */
++	while ((e = mlx5e_get_next_init_encap(nhe, e)) != NULL)
++		mlx5e_rep_update_flows(netdev_priv(e->out_dev), e, neigh_connected, ha);
+ 
+-		priv = netdev_priv(e->out_dev);
+-		mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
+-		mlx5e_encap_put(priv, e);
+-	}
+ out:
+ 	rtnl_unlock();
+ 	mlx5e_release_neigh_update_work(update_work);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+index 96ba027dbef3d..9992f94f794b6 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+@@ -93,13 +93,9 @@ void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
+ 
+ 	ASSERT_RTNL();
+ 
+-	/* wait for encap to be fully initialized */
+-	wait_for_completion(&e->res_ready);
+-
+ 	mutex_lock(&esw->offloads.encap_tbl_lock);
+ 	encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
+-	if (e->compl_result < 0 || (encap_connected == neigh_connected &&
+-				    ether_addr_equal(e->h_dest, ha)))
++	if (encap_connected == neigh_connected && ether_addr_equal(e->h_dest, ha))
+ 		goto unlock;
+ 
+ 	mlx5e_take_all_encap_flows(e, &flow_list);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+index 1560fcbf4ac7c..a17d79effa273 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+@@ -250,9 +250,12 @@ static void mlx5e_take_all_route_decap_flows(struct mlx5e_route_entry *r,
+ 		mlx5e_take_tmp_flow(flow, flow_list, 0);
+ }
+ 
++typedef bool (match_cb)(struct mlx5e_encap_entry *);
++
+ static struct mlx5e_encap_entry *
+-mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe,
+-			   struct mlx5e_encap_entry *e)
++mlx5e_get_next_matching_encap(struct mlx5e_neigh_hash_entry *nhe,
++			      struct mlx5e_encap_entry *e,
++			      match_cb match)
+ {
+ 	struct mlx5e_encap_entry *next = NULL;
+ 
+@@ -287,7 +290,7 @@ retry:
+ 	/* wait for encap to be fully initialized */
+ 	wait_for_completion(&next->res_ready);
+ 	/* continue searching if encap entry is not in valid state after completion */
+-	if (!(next->flags & MLX5_ENCAP_ENTRY_VALID)) {
++	if (!match(next)) {
+ 		e = next;
+ 		goto retry;
+ 	}
+@@ -295,6 +298,30 @@ retry:
+ 	return next;
+ }
+ 
++static bool mlx5e_encap_valid(struct mlx5e_encap_entry *e)
++{
++	return e->flags & MLX5_ENCAP_ENTRY_VALID;
++}
++
++static struct mlx5e_encap_entry *
++mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe,
++			   struct mlx5e_encap_entry *e)
++{
++	return mlx5e_get_next_matching_encap(nhe, e, mlx5e_encap_valid);
++}
++
++static bool mlx5e_encap_initialized(struct mlx5e_encap_entry *e)
++{
++	return e->compl_result >= 0;
++}
++
++struct mlx5e_encap_entry *
++mlx5e_get_next_init_encap(struct mlx5e_neigh_hash_entry *nhe,
++			  struct mlx5e_encap_entry *e)
++{
++	return mlx5e_get_next_matching_encap(nhe, e, mlx5e_encap_initialized);
++}
++
+ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
+ {
+ 	struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+index 3d45341e2216f..26f7fab109d97 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+@@ -532,9 +532,6 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
+ 	struct mlx5_core_dev *mdev = priv->mdev;
+ 	struct net_device *netdev = priv->netdev;
+ 
+-	if (!priv->ipsec)
+-		return;
+-
+ 	if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_ESP) ||
+ 	    !MLX5_CAP_ETH(mdev, swp)) {
+ 		mlx5_core_dbg(mdev, "mlx5e: ESP and SWP offload not supported\n");
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 99dc9f2beed5b..16b8f52450329 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -5168,22 +5168,15 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
+ 	}
+ 
+ 	if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev)) {
+-		netdev->hw_features     |= NETIF_F_GSO_UDP_TUNNEL |
+-					   NETIF_F_GSO_UDP_TUNNEL_CSUM;
+-		netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
+-					   NETIF_F_GSO_UDP_TUNNEL_CSUM;
+-		netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
+-		netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL |
+-					 NETIF_F_GSO_UDP_TUNNEL_CSUM;
++		netdev->hw_features     |= NETIF_F_GSO_UDP_TUNNEL;
++		netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
++		netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL;
+ 	}
+ 
+ 	if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_GRE)) {
+-		netdev->hw_features     |= NETIF_F_GSO_GRE |
+-					   NETIF_F_GSO_GRE_CSUM;
+-		netdev->hw_enc_features |= NETIF_F_GSO_GRE |
+-					   NETIF_F_GSO_GRE_CSUM;
+-		netdev->gso_partial_features |= NETIF_F_GSO_GRE |
+-						NETIF_F_GSO_GRE_CSUM;
++		netdev->hw_features     |= NETIF_F_GSO_GRE;
++		netdev->hw_enc_features |= NETIF_F_GSO_GRE;
++		netdev->gso_partial_features |= NETIF_F_GSO_GRE;
+ 	}
+ 
+ 	if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_IPIP)) {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index b633f669ea57f..b3b8e44540a5d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -4622,7 +4622,7 @@ static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
+ 	list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
+ 		wait_for_completion(&hpe->res_ready);
+ 		if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id)
+-			hpe->hp->pair->peer_gone = true;
++			mlx5_core_hairpin_clear_dead_peer(hpe->hp->pair);
+ 
+ 		mlx5e_hairpin_put(priv, hpe);
+ 	}
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+index 25c091795bcd8..17027536efbaa 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+@@ -178,6 +178,9 @@ void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *f
+ void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list);
+ 
+ struct mlx5e_neigh_hash_entry;
++struct mlx5e_encap_entry *
++mlx5e_get_next_init_encap(struct mlx5e_neigh_hash_entry *nhe,
++			  struct mlx5e_encap_entry *e);
+ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe);
+ 
+ void mlx5e_tc_reoffload_flows_work(struct work_struct *work);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+index 1fa9c18563da9..31c6a3b91f4a9 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+@@ -136,7 +136,7 @@ static int mlx5_eq_comp_int(struct notifier_block *nb,
+ 
+ 	eqe = next_eqe_sw(eq);
+ 	if (!eqe)
+-		return 0;
++		goto out;
+ 
+ 	do {
+ 		struct mlx5_core_cq *cq;
+@@ -161,6 +161,8 @@ static int mlx5_eq_comp_int(struct notifier_block *nb,
+ 		++eq->cons_index;
+ 
+ 	} while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
++
++out:
+ 	eq_update_ci(eq, 1);
+ 
+ 	if (cqn != -1)
+@@ -248,9 +250,9 @@ static int mlx5_eq_async_int(struct notifier_block *nb,
+ 		++eq->cons_index;
+ 
+ 	} while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
+-	eq_update_ci(eq, 1);
+ 
+ out:
++	eq_update_ci(eq, 1);
+ 	mlx5_eq_async_int_unlock(eq_async, recovery, &flags);
+ 
+ 	return unlikely(recovery) ? num_eqes : 0;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+index 2c6d95900e3c9..a3edeea4ddd78 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+@@ -1308,6 +1308,12 @@ int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num,
+ 			goto err_vhca_mapping;
+ 	}
+ 
++	/* External controller host PF has factory programmed MAC.
++	 * Read it from the device.
++	 */
++	if (mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF)
++		mlx5_query_nic_vport_mac_address(esw->dev, vport_num, true, vport->info.mac);
++
+ 	esw_vport_change_handle_locked(vport);
+ 
+ 	esw->enabled_vports++;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index efb93d63e54cb..58b8f75d7a01e 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -1157,7 +1157,7 @@ static int mlx5_load(struct mlx5_core_dev *dev)
+ 	err = mlx5_core_set_hca_defaults(dev);
+ 	if (err) {
+ 		mlx5_core_err(dev, "Failed to set hca defaults\n");
+-		goto err_sriov;
++		goto err_set_hca;
+ 	}
+ 
+ 	mlx5_vhca_event_start(dev);
+@@ -1190,6 +1190,7 @@ err_ec:
+ 	mlx5_sf_hw_table_destroy(dev);
+ err_vhca:
+ 	mlx5_vhca_event_stop(dev);
++err_set_hca:
+ 	mlx5_cleanup_fs(dev);
+ err_fs:
+ 	mlx5_accel_tls_cleanup(dev);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+index 50af84e76fb6a..174f71ed52800 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+@@ -54,7 +54,7 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
+ 	mkey_index = MLX5_GET(create_mkey_out, lout, mkey_index);
+ 	mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
+ 	mkey->size = MLX5_GET64(mkc, mkc, len);
+-	mkey->key |= mlx5_idx_to_mkey(mkey_index);
++	mkey->key = (u32)mlx5_mkey_variant(mkey->key) | mlx5_idx_to_mkey(mkey_index);
+ 	mkey->pd = MLX5_GET(mkc, mkc, pd);
+ 	init_waitqueue_head(&mkey->wait);
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
+index 8e0dddc6383f0..2389239acadc9 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
+@@ -156,6 +156,9 @@ void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
+ {
+ 	int err;
+ 
++	if (!MLX5_CAP_GEN(dev, roce))
++		return;
++
+ 	err = mlx5_nic_vport_enable_roce(dev);
+ 	if (err) {
+ 		mlx5_core_err(dev, "Failed to enable RoCE: %d\n", err);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
+index 90b524c59f3c3..c4139f4648bf1 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
+@@ -153,6 +153,7 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_
+ 	sf_index = event->function_id - MLX5_CAP_GEN(table->dev, sf_base_id);
+ 	sf_dev = xa_load(&table->devices, sf_index);
+ 	switch (event->new_vhca_state) {
++	case MLX5_VHCA_STATE_INVALID:
+ 	case MLX5_VHCA_STATE_ALLOCATED:
+ 		if (sf_dev)
+ 			mlx5_sf_dev_del(table->dev, sf_dev, sf_index);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
+index f146c618a78e7..46ef45fa91675 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
+@@ -712,7 +712,11 @@ static int dr_ste_v1_set_action_decap_l3_list(void *data,
+ 	if (hw_action_sz / DR_STE_ACTION_DOUBLE_SZ < DR_STE_DECAP_L3_ACTION_NUM)
+ 		return -EINVAL;
+ 
+-	memcpy(padded_data, data, data_sz);
++	inline_data_sz =
++		MLX5_FLD_SZ_BYTES(ste_double_action_insert_with_inline_v1, inline_data);
++
++	/* Add an alignment padding  */
++	memcpy(padded_data + data_sz % inline_data_sz, data, data_sz);
+ 
+ 	/* Remove L2L3 outer headers */
+ 	MLX5_SET(ste_single_action_remove_header_v1, hw_action, action_id,
+@@ -724,32 +728,34 @@ static int dr_ste_v1_set_action_decap_l3_list(void *data,
+ 	hw_action += DR_STE_ACTION_DOUBLE_SZ;
+ 	used_actions++; /* Remove and NOP are a single double action */
+ 
+-	inline_data_sz =
+-		MLX5_FLD_SZ_BYTES(ste_double_action_insert_with_inline_v1, inline_data);
++	/* Point to the last dword of the header */
++	data_ptr += (data_sz / inline_data_sz) * inline_data_sz;
+ 
+-	/* Add the new header inline + 2 extra bytes */
++	/* Add the new header using inline action 4Byte at a time, the header
++	 * is added in reversed order to the beginning of the packet to avoid
++	 * incorrect parsing by the HW. Since header is 14B or 18B an extra
++	 * two bytes are padded and later removed.
++	 */
+ 	for (i = 0; i < data_sz / inline_data_sz + 1; i++) {
+ 		void *addr_inline;
+ 
+ 		MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, action_id,
+ 			 DR_STE_V1_ACTION_ID_INSERT_INLINE);
+ 		/* The hardware expects here offset to words (2 bytes) */
+-		MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, start_offset,
+-			 i * 2);
++		MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, start_offset, 0);
+ 
+ 		/* Copy bytes one by one to avoid endianness problem */
+ 		addr_inline = MLX5_ADDR_OF(ste_double_action_insert_with_inline_v1,
+ 					   hw_action, inline_data);
+-		memcpy(addr_inline, data_ptr, inline_data_sz);
++		memcpy(addr_inline, data_ptr - i * inline_data_sz, inline_data_sz);
+ 		hw_action += DR_STE_ACTION_DOUBLE_SZ;
+-		data_ptr += inline_data_sz;
+ 		used_actions++;
+ 	}
+ 
+-	/* Remove 2 extra bytes */
++	/* Remove first 2 extra bytes */
+ 	MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, action_id,
+ 		 DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
+-	MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, start_offset, data_sz / 2);
++	MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, start_offset, 0);
+ 	/* The hardware expects here size in words (2 bytes) */
+ 	MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, remove_size, 1);
+ 	used_actions++;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+index 612b0ac31db23..9737565cd8d43 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+@@ -124,10 +124,11 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action);
+ static inline bool
+ mlx5dr_is_supported(struct mlx5_core_dev *dev)
+ {
+-	return MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) ||
+-	       (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) &&
+-		(MLX5_CAP_GEN(dev, steering_format_version) <=
+-		 MLX5_STEERING_FORMAT_CONNECTX_6DX));
++	return MLX5_CAP_GEN(dev, roce) &&
++	       (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) ||
++		(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) &&
++		 (MLX5_CAP_GEN(dev, steering_format_version) <=
++		  MLX5_STEERING_FORMAT_CONNECTX_6DX)));
+ }
+ 
+ /* buddy functions & structure */
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+index 01cc00ad8acf2..b6931bbe52d29 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+@@ -424,6 +424,15 @@ err_modify_sq:
+ 	return err;
+ }
+ 
++static void mlx5_hairpin_unpair_peer_sq(struct mlx5_hairpin *hp)
++{
++	int i;
++
++	for (i = 0; i < hp->num_channels; i++)
++		mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY,
++				       MLX5_SQC_STATE_RST, 0, 0);
++}
++
+ static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
+ {
+ 	int i;
+@@ -432,13 +441,9 @@ static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
+ 	for (i = 0; i < hp->num_channels; i++)
+ 		mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn[i], MLX5_RQC_STATE_RDY,
+ 				       MLX5_RQC_STATE_RST, 0, 0);
+-
+ 	/* unset peer SQs */
+-	if (hp->peer_gone)
+-		return;
+-	for (i = 0; i < hp->num_channels; i++)
+-		mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY,
+-				       MLX5_SQC_STATE_RST, 0, 0);
++	if (!hp->peer_gone)
++		mlx5_hairpin_unpair_peer_sq(hp);
+ }
+ 
+ struct mlx5_hairpin *
+@@ -485,3 +490,16 @@ void mlx5_core_hairpin_destroy(struct mlx5_hairpin *hp)
+ 	mlx5_hairpin_destroy_queues(hp);
+ 	kfree(hp);
+ }
++
++void mlx5_core_hairpin_clear_dead_peer(struct mlx5_hairpin *hp)
++{
++	int i;
++
++	mlx5_hairpin_unpair_peer_sq(hp);
++
++	/* destroy peer SQ */
++	for (i = 0; i < hp->num_channels; i++)
++		mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]);
++
++	hp->peer_gone = true;
++}
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+index e05c5c0f3ae1d..7d21fbb9192f6 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+@@ -465,8 +465,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
+ 	void *in;
+ 	int err;
+ 
+-	if (!vport)
+-		return -EINVAL;
+ 	if (!MLX5_CAP_GEN(mdev, vport_group_manager))
+ 		return -EACCES;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+index bf85ce9835d7f..42e4437ac3c16 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+@@ -708,7 +708,8 @@ mlxsw_thermal_module_tz_init(struct mlxsw_thermal_module *module_tz)
+ 							MLXSW_THERMAL_TRIP_MASK,
+ 							module_tz,
+ 							&mlxsw_thermal_module_ops,
+-							NULL, 0, 0);
++							NULL, 0,
++							module_tz->parent->polling_delay);
+ 	if (IS_ERR(module_tz->tzdev)) {
+ 		err = PTR_ERR(module_tz->tzdev);
+ 		return err;
+@@ -830,7 +831,8 @@ mlxsw_thermal_gearbox_tz_init(struct mlxsw_thermal_module *gearbox_tz)
+ 						MLXSW_THERMAL_TRIP_MASK,
+ 						gearbox_tz,
+ 						&mlxsw_thermal_gearbox_ops,
+-						NULL, 0, 0);
++						NULL, 0,
++						gearbox_tz->parent->polling_delay);
+ 	if (IS_ERR(gearbox_tz->tzdev))
+ 		return PTR_ERR(gearbox_tz->tzdev);
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
+index c4adc7f740d3e..769386971ac3b 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
++++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
+@@ -3863,7 +3863,7 @@ MLXSW_ITEM32(reg, qeec, max_shaper_bs, 0x1C, 0, 6);
+ #define MLXSW_REG_QEEC_HIGHEST_SHAPER_BS	25
+ #define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1	5
+ #define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2	11
+-#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3	5
++#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3	11
+ 
+ static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port,
+ 				       enum mlxsw_reg_qeec_hr hr, u8 index,
+diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
+index 46e5c9136bacd..0c4c976548c85 100644
+--- a/drivers/net/ethernet/mscc/ocelot.c
++++ b/drivers/net/ethernet/mscc/ocelot.c
+@@ -378,6 +378,7 @@ static u32 ocelot_read_eq_avail(struct ocelot *ocelot, int port)
+ 
+ int ocelot_port_flush(struct ocelot *ocelot, int port)
+ {
++	unsigned int pause_ena;
+ 	int err, val;
+ 
+ 	/* Disable dequeuing from the egress queues */
+@@ -386,6 +387,7 @@ int ocelot_port_flush(struct ocelot *ocelot, int port)
+ 		       QSYS_PORT_MODE, port);
+ 
+ 	/* Disable flow control */
++	ocelot_fields_read(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, &pause_ena);
+ 	ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0);
+ 
+ 	/* Disable priority flow control */
+@@ -421,6 +423,9 @@ int ocelot_port_flush(struct ocelot *ocelot, int port)
+ 	/* Clear flushing again. */
+ 	ocelot_rmw_gix(ocelot, 0, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG, port);
+ 
++	/* Re-enable flow control */
++	ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, pause_ena);
++
+ 	return err;
+ }
+ EXPORT_SYMBOL(ocelot_port_flush);
+diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+index 7e6bac85495d3..344ea11434549 100644
+--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
++++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+@@ -1602,6 +1602,8 @@ err_out_free_netdev:
+ 	free_netdev(netdev);
+ 
+ err_out_free_res:
++	if (NX_IS_REVISION_P3(pdev->revision))
++		pci_disable_pcie_error_reporting(pdev);
+ 	pci_release_regions(pdev);
+ 
+ err_out_disable_pdev:
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+index 96b947fde646b..3beafc60747e6 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+@@ -2690,6 +2690,7 @@ err_out_free_hw_res:
+ 	kfree(ahw);
+ 
+ err_out_free_res:
++	pci_disable_pcie_error_reporting(pdev);
+ 	pci_release_regions(pdev);
+ 
+ err_out_disable_pdev:
+diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+index 41fbd2ceeede4..ab1e0fcccabb6 100644
+--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
++++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+@@ -126,24 +126,24 @@ static void rmnet_get_stats64(struct net_device *dev,
+ 			      struct rtnl_link_stats64 *s)
+ {
+ 	struct rmnet_priv *priv = netdev_priv(dev);
+-	struct rmnet_vnd_stats total_stats;
++	struct rmnet_vnd_stats total_stats = { };
+ 	struct rmnet_pcpu_stats *pcpu_ptr;
++	struct rmnet_vnd_stats snapshot;
+ 	unsigned int cpu, start;
+ 
+-	memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats));
+-
+ 	for_each_possible_cpu(cpu) {
+ 		pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu);
+ 
+ 		do {
+ 			start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp);
+-			total_stats.rx_pkts += pcpu_ptr->stats.rx_pkts;
+-			total_stats.rx_bytes += pcpu_ptr->stats.rx_bytes;
+-			total_stats.tx_pkts += pcpu_ptr->stats.tx_pkts;
+-			total_stats.tx_bytes += pcpu_ptr->stats.tx_bytes;
++			snapshot = pcpu_ptr->stats;	/* struct assignment */
+ 		} while (u64_stats_fetch_retry_irq(&pcpu_ptr->syncp, start));
+ 
+-		total_stats.tx_drops += pcpu_ptr->stats.tx_drops;
++		total_stats.rx_pkts += snapshot.rx_pkts;
++		total_stats.rx_bytes += snapshot.rx_bytes;
++		total_stats.tx_pkts += snapshot.tx_pkts;
++		total_stats.tx_bytes += snapshot.tx_bytes;
++		total_stats.tx_drops += snapshot.tx_drops;
+ 	}
+ 
+ 	s->rx_packets = total_stats.rx_pkts;
+@@ -354,4 +354,4 @@ int rmnet_vnd_update_dev_mtu(struct rmnet_port *port,
+ 	}
+ 
+ 	return 0;
+-}
+\ No newline at end of file
++}
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+index b70d44ac09906..3c73453725f94 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+@@ -76,10 +76,10 @@ enum power_event {
+ #define LPI_CTRL_STATUS_TLPIEN	0x00000001	/* Transmit LPI Entry */
+ 
+ /* GMAC HW ADDR regs */
+-#define GMAC_ADDR_HIGH(reg)	(((reg > 15) ? 0x00000800 : 0x00000040) + \
+-				(reg * 8))
+-#define GMAC_ADDR_LOW(reg)	(((reg > 15) ? 0x00000804 : 0x00000044) + \
+-				(reg * 8))
++#define GMAC_ADDR_HIGH(reg)	((reg > 15) ? 0x00000800 + (reg - 16) * 8 : \
++				 0x00000040 + (reg * 8))
++#define GMAC_ADDR_LOW(reg)	((reg > 15) ? 0x00000804 + (reg - 16) * 8 : \
++				 0x00000044 + (reg * 8))
+ #define GMAC_MAX_PERFECT_ADDRESSES	1
+ 
+ #define GMAC_PCS_BASE		0x000000c0	/* PCS register base */
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+index 6dc9f10414e47..7e6bead6429c5 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+@@ -622,6 +622,8 @@ error_pclk_get:
+ void stmmac_remove_config_dt(struct platform_device *pdev,
+ 			     struct plat_stmmacenet_data *plat)
+ {
++	clk_disable_unprepare(plat->stmmac_clk);
++	clk_disable_unprepare(plat->pclk);
+ 	of_node_put(plat->phy_node);
+ 	of_node_put(plat->mdio_node);
+ }
+diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
+index 030185301014c..01bb36e7cff0a 100644
+--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
++++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
+@@ -849,7 +849,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ 		smp_mb();
+ 
+ 		/* Space might have just been freed - check again */
+-		if (temac_check_tx_bd_space(lp, num_frag))
++		if (temac_check_tx_bd_space(lp, num_frag + 1))
+ 			return NETDEV_TX_BUSY;
+ 
+ 		netif_wake_queue(ndev);
+@@ -876,7 +876,6 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ 		return NETDEV_TX_OK;
+ 	}
+ 	cur_p->phys = cpu_to_be32(skb_dma_addr);
+-	ptr_to_txbd((void *)skb, cur_p);
+ 
+ 	for (ii = 0; ii < num_frag; ii++) {
+ 		if (++lp->tx_bd_tail >= lp->tx_bd_num)
+@@ -915,6 +914,11 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ 	}
+ 	cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_EOP);
+ 
++	/* Mark last fragment with skb address, so it can be consumed
++	 * in temac_start_xmit_done()
++	 */
++	ptr_to_txbd((void *)skb, cur_p);
++
+ 	tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
+ 	lp->tx_bd_tail++;
+ 	if (lp->tx_bd_tail >= lp->tx_bd_num)
+diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
+index 17be2bb2985cd..920e9f888cc35 100644
+--- a/drivers/net/hamradio/mkiss.c
++++ b/drivers/net/hamradio/mkiss.c
+@@ -799,6 +799,7 @@ static void mkiss_close(struct tty_struct *tty)
+ 	ax->tty = NULL;
+ 
+ 	unregister_netdev(ax->dev);
++	free_netdev(ax->dev);
+ }
+ 
+ /* Perform I/O control on an active ax25 channel. */
+diff --git a/drivers/net/mhi/net.c b/drivers/net/mhi/net.c
+index f59960876083f..8e7f8728998f1 100644
+--- a/drivers/net/mhi/net.c
++++ b/drivers/net/mhi/net.c
+@@ -49,7 +49,7 @@ static int mhi_ndo_stop(struct net_device *ndev)
+ 	return 0;
+ }
+ 
+-static int mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev)
++static netdev_tx_t mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev)
+ {
+ 	struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
+ 	const struct mhi_net_proto *proto = mhi_netdev->proto;
+diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
+index 0eeec80bec311..e4a5703666461 100644
+--- a/drivers/net/usb/cdc_eem.c
++++ b/drivers/net/usb/cdc_eem.c
+@@ -123,10 +123,10 @@ static struct sk_buff *eem_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+ 	}
+ 
+ 	skb2 = skb_copy_expand(skb, EEM_HEAD, ETH_FCS_LEN + padlen, flags);
++	dev_kfree_skb_any(skb);
+ 	if (!skb2)
+ 		return NULL;
+ 
+-	dev_kfree_skb_any(skb);
+ 	skb = skb2;
+ 
+ done:
+diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
+index 8acf301154282..dc3d84b43e4e8 100644
+--- a/drivers/net/usb/cdc_ncm.c
++++ b/drivers/net/usb/cdc_ncm.c
+@@ -1902,7 +1902,7 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
+ static const struct driver_info cdc_ncm_info = {
+ 	.description = "CDC NCM",
+ 	.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
+-			| FLAG_LINK_INTR,
++			| FLAG_LINK_INTR | FLAG_ETHER,
+ 	.bind = cdc_ncm_bind,
+ 	.unbind = cdc_ncm_unbind,
+ 	.manage_power = usbnet_manage_power,
+diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
+index 76ed79bb1e3f1..5281291711aff 100644
+--- a/drivers/net/usb/smsc75xx.c
++++ b/drivers/net/usb/smsc75xx.c
+@@ -1483,7 +1483,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
+ 	ret = smsc75xx_wait_ready(dev, 0);
+ 	if (ret < 0) {
+ 		netdev_warn(dev->net, "device not ready in smsc75xx_bind\n");
+-		goto err;
++		goto free_pdata;
+ 	}
+ 
+ 	smsc75xx_init_mac_address(dev);
+@@ -1492,7 +1492,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
+ 	ret = smsc75xx_reset(dev);
+ 	if (ret < 0) {
+ 		netdev_warn(dev->net, "smsc75xx_reset error %d\n", ret);
+-		goto err;
++		goto cancel_work;
+ 	}
+ 
+ 	dev->net->netdev_ops = &smsc75xx_netdev_ops;
+@@ -1503,8 +1503,11 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
+ 	dev->net->max_mtu = MAX_SINGLE_PACKET_SIZE;
+ 	return 0;
+ 
+-err:
++cancel_work:
++	cancel_work_sync(&pdata->set_multicast);
++free_pdata:
+ 	kfree(pdata);
++	dev->data[0] = 0;
+ 	return ret;
+ }
+ 
+@@ -1515,7 +1518,6 @@ static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
+ 		cancel_work_sync(&pdata->set_multicast);
+ 		netif_dbg(dev, ifdown, dev->net, "free pdata\n");
+ 		kfree(pdata);
+-		pdata = NULL;
+ 		dev->data[0] = 0;
+ 	}
+ }
+diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
+index 503e2fd7ce518..28a6c4cfe9b8c 100644
+--- a/drivers/net/vrf.c
++++ b/drivers/net/vrf.c
+@@ -1183,9 +1183,6 @@ static int vrf_dev_init(struct net_device *dev)
+ 
+ 	dev->flags = IFF_MASTER | IFF_NOARP;
+ 
+-	/* MTU is irrelevant for VRF device; set to 64k similar to lo */
+-	dev->mtu = 64 * 1024;
+-
+ 	/* similarly, oper state is irrelevant; set to up to avoid confusion */
+ 	dev->operstate = IF_OPER_UP;
+ 	netdev_lockdep_set_classes(dev);
+@@ -1685,7 +1682,8 @@ static void vrf_setup(struct net_device *dev)
+ 	 * which breaks networking.
+ 	 */
+ 	dev->min_mtu = IPV6_MIN_MTU;
+-	dev->max_mtu = ETH_MAX_MTU;
++	dev->max_mtu = IP6_MAX_MTU;
++	dev->mtu = dev->max_mtu;
+ }
+ 
+ static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
+diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
+index 051b48bd7985d..e3f5e7ab76063 100644
+--- a/drivers/pci/controller/pci-aardvark.c
++++ b/drivers/pci/controller/pci-aardvark.c
+@@ -514,7 +514,7 @@ static int advk_pcie_wait_pio(struct advk_pcie *pcie)
+ 		udelay(PIO_RETRY_DELAY);
+ 	}
+ 
+-	dev_err(dev, "config read/write timed out\n");
++	dev_err(dev, "PIO read/write transfer time out\n");
+ 	return -ETIMEDOUT;
+ }
+ 
+@@ -657,6 +657,35 @@ static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
+ 	return true;
+ }
+ 
++static bool advk_pcie_pio_is_running(struct advk_pcie *pcie)
++{
++	struct device *dev = &pcie->pdev->dev;
++
++	/*
++	 * Trying to start a new PIO transfer when previous has not completed
++	 * cause External Abort on CPU which results in kernel panic:
++	 *
++	 *     SError Interrupt on CPU0, code 0xbf000002 -- SError
++	 *     Kernel panic - not syncing: Asynchronous SError Interrupt
++	 *
++	 * Functions advk_pcie_rd_conf() and advk_pcie_wr_conf() are protected
++	 * by raw_spin_lock_irqsave() at pci_lock_config() level to prevent
++	 * concurrent calls at the same time. But because PIO transfer may take
++	 * about 1.5s when link is down or card is disconnected, it means that
++	 * advk_pcie_wait_pio() does not always have to wait for completion.
++	 *
++	 * Some versions of ARM Trusted Firmware handles this External Abort at
++	 * EL3 level and mask it to prevent kernel panic. Relevant TF-A commit:
++	 * https://git.trustedfirmware.org/TF-A/trusted-firmware-a.git/commit/?id=3c7dcdac5c50
++	 */
++	if (advk_readl(pcie, PIO_START)) {
++		dev_err(dev, "Previous PIO read/write transfer is still running\n");
++		return true;
++	}
++
++	return false;
++}
++
+ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
+ 			     int where, int size, u32 *val)
+ {
+@@ -673,9 +702,10 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
+ 		return pci_bridge_emul_conf_read(&pcie->bridge, where,
+ 						 size, val);
+ 
+-	/* Start PIO */
+-	advk_writel(pcie, 0, PIO_START);
+-	advk_writel(pcie, 1, PIO_ISR);
++	if (advk_pcie_pio_is_running(pcie)) {
++		*val = 0xffffffff;
++		return PCIBIOS_SET_FAILED;
++	}
+ 
+ 	/* Program the control register */
+ 	reg = advk_readl(pcie, PIO_CTRL);
+@@ -694,7 +724,8 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
+ 	/* Program the data strobe */
+ 	advk_writel(pcie, 0xf, PIO_WR_DATA_STRB);
+ 
+-	/* Start the transfer */
++	/* Clear PIO DONE ISR and start the transfer */
++	advk_writel(pcie, 1, PIO_ISR);
+ 	advk_writel(pcie, 1, PIO_START);
+ 
+ 	ret = advk_pcie_wait_pio(pcie);
+@@ -734,9 +765,8 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
+ 	if (where % size)
+ 		return PCIBIOS_SET_FAILED;
+ 
+-	/* Start PIO */
+-	advk_writel(pcie, 0, PIO_START);
+-	advk_writel(pcie, 1, PIO_ISR);
++	if (advk_pcie_pio_is_running(pcie))
++		return PCIBIOS_SET_FAILED;
+ 
+ 	/* Program the control register */
+ 	reg = advk_readl(pcie, PIO_CTRL);
+@@ -763,7 +793,8 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
+ 	/* Program the data strobe */
+ 	advk_writel(pcie, data_strobe, PIO_WR_DATA_STRB);
+ 
+-	/* Start the transfer */
++	/* Clear PIO DONE ISR and start the transfer */
++	advk_writel(pcie, 1, PIO_ISR);
+ 	advk_writel(pcie, 1, PIO_START);
+ 
+ 	ret = advk_pcie_wait_pio(pcie);
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 653660e3ba9ef..7bf76bca888da 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -3558,6 +3558,18 @@ static void quirk_no_bus_reset(struct pci_dev *dev)
+ 	dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET;
+ }
+ 
++/*
++ * Some NVIDIA GPU devices do not work with bus reset, SBR needs to be
++ * prevented for those affected devices.
++ */
++static void quirk_nvidia_no_bus_reset(struct pci_dev *dev)
++{
++	if ((dev->device & 0xffc0) == 0x2340)
++		quirk_no_bus_reset(dev);
++}
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
++			 quirk_nvidia_no_bus_reset);
++
+ /*
+  * Some Atheros AR9xxx and QCA988x chips do not behave after a bus reset.
+  * The device will throw a Link Down error on AER-capable systems and
+@@ -3578,6 +3590,16 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0034, quirk_no_bus_reset);
+  */
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CAVIUM, 0xa100, quirk_no_bus_reset);
+ 
++/*
++ * Some TI KeyStone C667X devices do not support bus/hot reset.  The PCIESS
++ * automatically disables LTSSM when Secondary Bus Reset is received and
++ * the device stops working.  Prevent bus reset for these devices.  With
++ * this change, the device can be assigned to VMs with VFIO, but it will
++ * leak state between VMs.  Reference
++ * https://e2e.ti.com/support/processors/f/791/t/954382
++ */
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TI, 0xb005, quirk_no_bus_reset);
++
+ static void quirk_no_pm_reset(struct pci_dev *dev)
+ {
+ 	/*
+@@ -3913,6 +3935,69 @@ static int delay_250ms_after_flr(struct pci_dev *dev, int probe)
+ 	return 0;
+ }
+ 
++#define PCI_DEVICE_ID_HINIC_VF      0x375E
++#define HINIC_VF_FLR_TYPE           0x1000
++#define HINIC_VF_FLR_CAP_BIT        (1UL << 30)
++#define HINIC_VF_OP                 0xE80
++#define HINIC_VF_FLR_PROC_BIT       (1UL << 18)
++#define HINIC_OPERATION_TIMEOUT     15000	/* 15 seconds */
++
++/* Device-specific reset method for Huawei Intelligent NIC virtual functions */
++static int reset_hinic_vf_dev(struct pci_dev *pdev, int probe)
++{
++	unsigned long timeout;
++	void __iomem *bar;
++	u32 val;
++
++	if (probe)
++		return 0;
++
++	bar = pci_iomap(pdev, 0, 0);
++	if (!bar)
++		return -ENOTTY;
++
++	/* Get and check firmware capabilities */
++	val = ioread32be(bar + HINIC_VF_FLR_TYPE);
++	if (!(val & HINIC_VF_FLR_CAP_BIT)) {
++		pci_iounmap(pdev, bar);
++		return -ENOTTY;
++	}
++
++	/* Set HINIC_VF_FLR_PROC_BIT for the start of FLR */
++	val = ioread32be(bar + HINIC_VF_OP);
++	val = val | HINIC_VF_FLR_PROC_BIT;
++	iowrite32be(val, bar + HINIC_VF_OP);
++
++	pcie_flr(pdev);
++
++	/*
++	 * The device must recapture its Bus and Device Numbers after FLR
++	 * in order generate Completions.  Issue a config write to let the
++	 * device capture this information.
++	 */
++	pci_write_config_word(pdev, PCI_VENDOR_ID, 0);
++
++	/* Firmware clears HINIC_VF_FLR_PROC_BIT when reset is complete */
++	timeout = jiffies + msecs_to_jiffies(HINIC_OPERATION_TIMEOUT);
++	do {
++		val = ioread32be(bar + HINIC_VF_OP);
++		if (!(val & HINIC_VF_FLR_PROC_BIT))
++			goto reset_complete;
++		msleep(20);
++	} while (time_before(jiffies, timeout));
++
++	val = ioread32be(bar + HINIC_VF_OP);
++	if (!(val & HINIC_VF_FLR_PROC_BIT))
++		goto reset_complete;
++
++	pci_warn(pdev, "Reset dev timeout, FLR ack reg: %#010x\n", val);
++
++reset_complete:
++	pci_iounmap(pdev, bar);
++
++	return 0;
++}
++
+ static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
+ 	{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82599_SFP_VF,
+ 		 reset_intel_82599_sfp_virtfn },
+@@ -3924,6 +4009,8 @@ static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
+ 	{ PCI_VENDOR_ID_INTEL, 0x0953, delay_250ms_after_flr },
+ 	{ PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
+ 		reset_chelsio_generic_dev },
++	{ PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HINIC_VF,
++		reset_hinic_vf_dev },
+ 	{ 0 }
+ };
+ 
+@@ -4764,6 +4851,8 @@ static const struct pci_dev_acs_enabled {
+ 	{ PCI_VENDOR_ID_AMPERE, 0xE00A, pci_quirk_xgene_acs },
+ 	{ PCI_VENDOR_ID_AMPERE, 0xE00B, pci_quirk_xgene_acs },
+ 	{ PCI_VENDOR_ID_AMPERE, 0xE00C, pci_quirk_xgene_acs },
++	/* Broadcom multi-function device */
++	{ PCI_VENDOR_ID_BROADCOM, 0x16D7, pci_quirk_mf_endpoint_acs },
+ 	{ PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs },
+ 	/* Amazon Annapurna Labs */
+ 	{ PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs },
+@@ -5165,7 +5254,8 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
+ static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
+ {
+ 	if ((pdev->device == 0x7312 && pdev->revision != 0x00) ||
+-	    (pdev->device == 0x7340 && pdev->revision != 0xc5))
++	    (pdev->device == 0x7340 && pdev->revision != 0xc5) ||
++	    (pdev->device == 0x7341 && pdev->revision != 0x00))
+ 		return;
+ 
+ 	if (pdev->device == 0x15d8) {
+@@ -5192,6 +5282,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7312, quirk_amd_harvest_no_ats);
+ /* AMD Navi14 dGPU */
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7341, quirk_amd_harvest_no_ats);
+ /* AMD Raven platform iGPU */
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x15d8, quirk_amd_harvest_no_ats);
+ #endif /* CONFIG_PCI_ATS */
+diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c
+index cdbcc49f71152..731c483a04dea 100644
+--- a/drivers/phy/mediatek/phy-mtk-tphy.c
++++ b/drivers/phy/mediatek/phy-mtk-tphy.c
+@@ -949,6 +949,8 @@ static int mtk_phy_init(struct phy *phy)
+ 		break;
+ 	default:
+ 		dev_err(tphy->dev, "incompatible PHY type\n");
++		clk_disable_unprepare(instance->ref_clk);
++		clk_disable_unprepare(instance->da_ref_clk);
+ 		return -EINVAL;
+ 	}
+ 
+diff --git a/drivers/pinctrl/ralink/pinctrl-rt2880.c b/drivers/pinctrl/ralink/pinctrl-rt2880.c
+index 1f4bca854add5..a9b511c7e8500 100644
+--- a/drivers/pinctrl/ralink/pinctrl-rt2880.c
++++ b/drivers/pinctrl/ralink/pinctrl-rt2880.c
+@@ -127,7 +127,7 @@ static int rt2880_pmx_group_enable(struct pinctrl_dev *pctrldev,
+ 	if (p->groups[group].enabled) {
+ 		dev_err(p->dev, "%s is already enabled\n",
+ 			p->groups[group].name);
+-		return -EBUSY;
++		return 0;
+ 	}
+ 
+ 	p->groups[group].enabled = 1;
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index 61f1c91c62de2..3390168ac0793 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -8808,6 +8808,7 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
+ 	TPACPI_Q_LNV3('N', '2', 'O', TPACPI_FAN_2CTL),	/* P1 / X1 Extreme (2nd gen) */
+ 	TPACPI_Q_LNV3('N', '2', 'V', TPACPI_FAN_2CTL),	/* P1 / X1 Extreme (3nd gen) */
+ 	TPACPI_Q_LNV3('N', '3', '0', TPACPI_FAN_2CTL),	/* P15 (1st gen) / P15v (1st gen) */
++	TPACPI_Q_LNV3('N', '3', '2', TPACPI_FAN_2CTL),	/* X1 Carbon (9th gen) */
+ };
+ 
+ static int __init fan_init(struct ibm_init_struct *iibm)
+diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
+index 03a246e60fd98..21c4c34c52d8d 100644
+--- a/drivers/ptp/ptp_clock.c
++++ b/drivers/ptp/ptp_clock.c
+@@ -63,7 +63,7 @@ static void enqueue_external_timestamp(struct timestamp_event_queue *queue,
+ 	spin_unlock_irqrestore(&queue->lock, flags);
+ }
+ 
+-s32 scaled_ppm_to_ppb(long ppm)
++long scaled_ppm_to_ppb(long ppm)
+ {
+ 	/*
+ 	 * The 'freq' field in the 'struct timex' is in parts per
+@@ -80,7 +80,7 @@ s32 scaled_ppm_to_ppb(long ppm)
+ 	s64 ppb = 1 + ppm;
+ 	ppb *= 125;
+ 	ppb >>= 13;
+-	return (s32) ppb;
++	return (long) ppb;
+ }
+ EXPORT_SYMBOL(scaled_ppm_to_ppb);
+ 
+@@ -138,7 +138,7 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx)
+ 		delta = ktime_to_ns(kt);
+ 		err = ops->adjtime(ops, delta);
+ 	} else if (tx->modes & ADJ_FREQUENCY) {
+-		s32 ppb = scaled_ppm_to_ppb(tx->freq);
++		long ppb = scaled_ppm_to_ppb(tx->freq);
+ 		if (ppb > ops->max_adj || ppb < -ops->max_adj)
+ 			return -ERANGE;
+ 		if (ops->adjfine)
+diff --git a/drivers/regulator/cros-ec-regulator.c b/drivers/regulator/cros-ec-regulator.c
+index eb3fc1db4edc8..c4754f3cf2337 100644
+--- a/drivers/regulator/cros-ec-regulator.c
++++ b/drivers/regulator/cros-ec-regulator.c
+@@ -225,8 +225,9 @@ static int cros_ec_regulator_probe(struct platform_device *pdev)
+ 
+ 	drvdata->dev = devm_regulator_register(dev, &drvdata->desc, &cfg);
+ 	if (IS_ERR(drvdata->dev)) {
++		ret = PTR_ERR(drvdata->dev);
+ 		dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret);
+-		return PTR_ERR(drvdata->dev);
++		return ret;
+ 	}
+ 
+ 	platform_set_drvdata(pdev, drvdata);
+diff --git a/drivers/regulator/mt6315-regulator.c b/drivers/regulator/mt6315-regulator.c
+index 9edc34981ee0a..6b8be52c3772a 100644
+--- a/drivers/regulator/mt6315-regulator.c
++++ b/drivers/regulator/mt6315-regulator.c
+@@ -59,7 +59,7 @@ static const struct linear_range mt_volt_range1[] = {
+ 	REGULATOR_LINEAR_RANGE(0, 0, 0xbf, 6250),
+ };
+ 
+-static unsigned int mt6315_map_mode(u32 mode)
++static unsigned int mt6315_map_mode(unsigned int mode)
+ {
+ 	switch (mode) {
+ 	case MT6315_BUCK_MODE_AUTO:
+diff --git a/drivers/regulator/rt4801-regulator.c b/drivers/regulator/rt4801-regulator.c
+index 2055a9cb13ba5..7a87788d3f092 100644
+--- a/drivers/regulator/rt4801-regulator.c
++++ b/drivers/regulator/rt4801-regulator.c
+@@ -66,7 +66,7 @@ static int rt4801_enable(struct regulator_dev *rdev)
+ 	struct gpio_descs *gpios = priv->enable_gpios;
+ 	int id = rdev_get_id(rdev), ret;
+ 
+-	if (gpios->ndescs <= id) {
++	if (!gpios || gpios->ndescs <= id) {
+ 		dev_warn(&rdev->dev, "no dedicated gpio can control\n");
+ 		goto bypass_gpio;
+ 	}
+@@ -88,7 +88,7 @@ static int rt4801_disable(struct regulator_dev *rdev)
+ 	struct gpio_descs *gpios = priv->enable_gpios;
+ 	int id = rdev_get_id(rdev);
+ 
+-	if (gpios->ndescs <= id) {
++	if (!gpios || gpios->ndescs <= id) {
+ 		dev_warn(&rdev->dev, "no dedicated gpio can control\n");
+ 		goto bypass_gpio;
+ 	}
+diff --git a/drivers/regulator/rtmv20-regulator.c b/drivers/regulator/rtmv20-regulator.c
+index 5adc552dffd58..4bca64de0f672 100644
+--- a/drivers/regulator/rtmv20-regulator.c
++++ b/drivers/regulator/rtmv20-regulator.c
+@@ -27,6 +27,7 @@
+ #define RTMV20_REG_LDIRQ	0x30
+ #define RTMV20_REG_LDSTAT	0x40
+ #define RTMV20_REG_LDMASK	0x50
++#define RTMV20_MAX_REGS		(RTMV20_REG_LDMASK + 1)
+ 
+ #define RTMV20_VID_MASK		GENMASK(7, 4)
+ #define RICHTEK_VID		0x80
+@@ -313,6 +314,7 @@ static const struct regmap_config rtmv20_regmap_config = {
+ 	.val_bits = 8,
+ 	.cache_type = REGCACHE_RBTREE,
+ 	.max_register = RTMV20_REG_LDMASK,
++	.num_reg_defaults_raw = RTMV20_MAX_REGS,
+ 
+ 	.writeable_reg = rtmv20_is_accessible_reg,
+ 	.readable_reg = rtmv20_is_accessible_reg,
+diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
+index ecefc25eff0c0..337353c9655ed 100644
+--- a/drivers/s390/crypto/ap_queue.c
++++ b/drivers/s390/crypto/ap_queue.c
+@@ -135,12 +135,13 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
+ {
+ 	struct ap_queue_status status;
+ 	struct ap_message *ap_msg;
++	bool found = false;
+ 
+ 	status = ap_dqap(aq->qid, &aq->reply->psmid,
+ 			 aq->reply->msg, aq->reply->len);
+ 	switch (status.response_code) {
+ 	case AP_RESPONSE_NORMAL:
+-		aq->queue_count--;
++		aq->queue_count = max_t(int, 0, aq->queue_count - 1);
+ 		if (aq->queue_count > 0)
+ 			mod_timer(&aq->timeout,
+ 				  jiffies + aq->request_timeout);
+@@ -150,8 +151,14 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
+ 			list_del_init(&ap_msg->list);
+ 			aq->pendingq_count--;
+ 			ap_msg->receive(aq, ap_msg, aq->reply);
++			found = true;
+ 			break;
+ 		}
++		if (!found) {
++			AP_DBF_WARN("%s unassociated reply psmid=0x%016llx on 0x%02x.%04x\n",
++				    __func__, aq->reply->psmid,
++				    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
++		}
+ 		fallthrough;
+ 	case AP_RESPONSE_NO_PENDING_REPLY:
+ 		if (!status.queue_empty || aq->queue_count <= 0)
+@@ -232,7 +239,7 @@ static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
+ 			   ap_msg->flags & AP_MSG_FLAG_SPECIAL);
+ 	switch (status.response_code) {
+ 	case AP_RESPONSE_NORMAL:
+-		aq->queue_count++;
++		aq->queue_count = max_t(int, 1, aq->queue_count + 1);
+ 		if (aq->queue_count == 1)
+ 			mod_timer(&aq->timeout, jiffies + aq->request_timeout);
+ 		list_move_tail(&ap_msg->list, &aq->pendingq);
+diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
+index 2786470a52011..4f24f63922126 100644
+--- a/drivers/spi/spi-stm32-qspi.c
++++ b/drivers/spi/spi-stm32-qspi.c
+@@ -293,7 +293,7 @@ static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi,
+ 	int err = 0;
+ 
+ 	if (!op->data.nbytes)
+-		return stm32_qspi_wait_nobusy(qspi);
++		goto wait_nobusy;
+ 
+ 	if (readl_relaxed(qspi->io_base + QSPI_SR) & SR_TCF)
+ 		goto out;
+@@ -314,6 +314,9 @@ static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi,
+ out:
+ 	/* clear flags */
+ 	writel_relaxed(FCR_CTCF | FCR_CTEF, qspi->io_base + QSPI_FCR);
++wait_nobusy:
++	if (!err)
++		err = stm32_qspi_wait_nobusy(qspi);
+ 
+ 	return err;
+ }
+diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c
+index 2765289028fae..68193db8b2e3c 100644
+--- a/drivers/spi/spi-zynq-qspi.c
++++ b/drivers/spi/spi-zynq-qspi.c
+@@ -678,14 +678,14 @@ static int zynq_qspi_probe(struct platform_device *pdev)
+ 	xqspi->irq = platform_get_irq(pdev, 0);
+ 	if (xqspi->irq <= 0) {
+ 		ret = -ENXIO;
+-		goto remove_master;
++		goto clk_dis_all;
+ 	}
+ 	ret = devm_request_irq(&pdev->dev, xqspi->irq, zynq_qspi_irq,
+ 			       0, pdev->name, xqspi);
+ 	if (ret != 0) {
+ 		ret = -ENXIO;
+ 		dev_err(&pdev->dev, "request_irq failed\n");
+-		goto remove_master;
++		goto clk_dis_all;
+ 	}
+ 
+ 	ret = of_property_read_u32(np, "num-cs",
+@@ -693,8 +693,9 @@ static int zynq_qspi_probe(struct platform_device *pdev)
+ 	if (ret < 0) {
+ 		ctlr->num_chipselect = 1;
+ 	} else if (num_cs > ZYNQ_QSPI_MAX_NUM_CS) {
++		ret = -EINVAL;
+ 		dev_err(&pdev->dev, "only 2 chip selects are available\n");
+-		goto remove_master;
++		goto clk_dis_all;
+ 	} else {
+ 		ctlr->num_chipselect = num_cs;
+ 	}
+diff --git a/drivers/staging/hikey9xx/hi6421v600-regulator.c b/drivers/staging/hikey9xx/hi6421v600-regulator.c
+index f6a14e9c3cbfe..e10fe3058176d 100644
+--- a/drivers/staging/hikey9xx/hi6421v600-regulator.c
++++ b/drivers/staging/hikey9xx/hi6421v600-regulator.c
+@@ -83,7 +83,7 @@ static const unsigned int ldo34_voltages[] = {
+ 			.owner		= THIS_MODULE,			       \
+ 			.volt_table	= vtable,			       \
+ 			.n_voltages	= ARRAY_SIZE(vtable),		       \
+-			.vsel_mask	= (1 << (ARRAY_SIZE(vtable) - 1)) - 1, \
++			.vsel_mask	= ARRAY_SIZE(vtable) - 1,	       \
+ 			.vsel_reg	= vreg,				       \
+ 			.enable_reg	= ereg,				       \
+ 			.enable_mask	= emask,			       \
+diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+index cbec65e5a4645..62ea47f9fee5e 100644
+--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
++++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+@@ -2579,7 +2579,7 @@ static int rtw_cfg80211_add_monitor_if(struct adapter *padapter, char *name, str
+ 	mon_wdev->iftype = NL80211_IFTYPE_MONITOR;
+ 	mon_ndev->ieee80211_ptr = mon_wdev;
+ 
+-	ret = register_netdevice(mon_ndev);
++	ret = cfg80211_register_netdevice(mon_ndev);
+ 	if (ret) {
+ 		goto out;
+ 	}
+@@ -2661,7 +2661,7 @@ static int cfg80211_rtw_del_virtual_intf(struct wiphy *wiphy,
+ 	adapter = rtw_netdev_priv(ndev);
+ 	pwdev_priv = adapter_wdev_data(adapter);
+ 
+-	unregister_netdevice(ndev);
++	cfg80211_unregister_netdevice(ndev);
+ 
+ 	if (ndev == pwdev_priv->pmon_ndev) {
+ 		pwdev_priv->pmon_ndev = NULL;
+diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
+index 4545b23bda3f1..bac0f5458cab9 100644
+--- a/drivers/usb/chipidea/usbmisc_imx.c
++++ b/drivers/usb/chipidea/usbmisc_imx.c
+@@ -686,6 +686,16 @@ static int imx7d_charger_secondary_detection(struct imx_usbmisc_data *data)
+ 	int val;
+ 	unsigned long flags;
+ 
++	/* Clear VDATSRCENB0 to disable VDP_SRC and IDM_SNK required by BC 1.2 spec */
++	spin_lock_irqsave(&usbmisc->lock, flags);
++	val = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
++	val &= ~MX7D_USB_OTG_PHY_CFG2_CHRG_VDATSRCENB0;
++	writel(val, usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
++	spin_unlock_irqrestore(&usbmisc->lock, flags);
++
++	/* TVDMSRC_DIS */
++	msleep(20);
++
+ 	/* VDM_SRC is connected to D- and IDP_SINK is connected to D+ */
+ 	spin_lock_irqsave(&usbmisc->lock, flags);
+ 	val = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
+@@ -695,7 +705,8 @@ static int imx7d_charger_secondary_detection(struct imx_usbmisc_data *data)
+ 				usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
+ 	spin_unlock_irqrestore(&usbmisc->lock, flags);
+ 
+-	usleep_range(1000, 2000);
++	/* TVDMSRC_ON */
++	msleep(40);
+ 
+ 	/*
+ 	 * Per BC 1.2, check voltage of D+:
+@@ -798,7 +809,8 @@ static int imx7d_charger_primary_detection(struct imx_usbmisc_data *data)
+ 				usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
+ 	spin_unlock_irqrestore(&usbmisc->lock, flags);
+ 
+-	usleep_range(1000, 2000);
++	/* TVDPSRC_ON */
++	msleep(40);
+ 
+ 	/* Check if D- is less than VDAT_REF to determine an SDP per BC 1.2 */
+ 	val = readl(usbmisc->base + MX7D_USB_OTG_PHY_STATUS);
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 13fe37fbbd2c8..6ebb8bd92e9df 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -40,6 +40,8 @@
+ #define USB_VENDOR_GENESYS_LOGIC		0x05e3
+ #define USB_VENDOR_SMSC				0x0424
+ #define USB_PRODUCT_USB5534B			0x5534
++#define USB_VENDOR_CYPRESS			0x04b4
++#define USB_PRODUCT_CY7C65632			0x6570
+ #define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND	0x01
+ #define HUB_QUIRK_DISABLE_AUTOSUSPEND		0x02
+ 
+@@ -5644,6 +5646,11 @@ static const struct usb_device_id hub_id_table[] = {
+       .idProduct = USB_PRODUCT_USB5534B,
+       .bInterfaceClass = USB_CLASS_HUB,
+       .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
++    { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
++                   | USB_DEVICE_ID_MATCH_PRODUCT,
++      .idVendor = USB_VENDOR_CYPRESS,
++      .idProduct = USB_PRODUCT_CY7C65632,
++      .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
+     { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
+ 			| USB_DEVICE_ID_MATCH_INT_CLASS,
+       .idVendor = USB_VENDOR_GENESYS_LOGIC,
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index 126f0e10b3ef4..0022039bc2355 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -1657,8 +1657,8 @@ static int dwc3_remove(struct platform_device *pdev)
+ 
+ 	pm_runtime_get_sync(&pdev->dev);
+ 
+-	dwc3_debugfs_exit(dwc);
+ 	dwc3_core_exit_mode(dwc);
++	dwc3_debugfs_exit(dwc);
+ 
+ 	dwc3_core_exit(dwc);
+ 	dwc3_ulpi_exit(dwc);
+diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
+index 8ab3949423604..74d9c2c38193d 100644
+--- a/drivers/usb/dwc3/debug.h
++++ b/drivers/usb/dwc3/debug.h
+@@ -413,9 +413,12 @@ static inline const char *dwc3_gadget_generic_cmd_status_string(int status)
+ 
+ 
+ #ifdef CONFIG_DEBUG_FS
++extern void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep);
+ extern void dwc3_debugfs_init(struct dwc3 *d);
+ extern void dwc3_debugfs_exit(struct dwc3 *d);
+ #else
++static inline void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep)
++{  }
+ static inline void dwc3_debugfs_init(struct dwc3 *d)
+ {  }
+ static inline void dwc3_debugfs_exit(struct dwc3 *d)
+diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
+index 5da4f6082d930..3ebe3e6c284d2 100644
+--- a/drivers/usb/dwc3/debugfs.c
++++ b/drivers/usb/dwc3/debugfs.c
+@@ -890,30 +890,14 @@ static void dwc3_debugfs_create_endpoint_files(struct dwc3_ep *dep,
+ 	}
+ }
+ 
+-static void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep,
+-		struct dentry *parent)
++void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep)
+ {
+ 	struct dentry		*dir;
+ 
+-	dir = debugfs_create_dir(dep->name, parent);
++	dir = debugfs_create_dir(dep->name, dep->dwc->root);
+ 	dwc3_debugfs_create_endpoint_files(dep, dir);
+ }
+ 
+-static void dwc3_debugfs_create_endpoint_dirs(struct dwc3 *dwc,
+-		struct dentry *parent)
+-{
+-	int			i;
+-
+-	for (i = 0; i < dwc->num_eps; i++) {
+-		struct dwc3_ep	*dep = dwc->eps[i];
+-
+-		if (!dep)
+-			continue;
+-
+-		dwc3_debugfs_create_endpoint_dir(dep, parent);
+-	}
+-}
+-
+ void dwc3_debugfs_init(struct dwc3 *dwc)
+ {
+ 	struct dentry		*root;
+@@ -944,7 +928,6 @@ void dwc3_debugfs_init(struct dwc3 *dwc)
+ 				&dwc3_testmode_fops);
+ 		debugfs_create_file("link_state", 0644, root, dwc,
+ 				    &dwc3_link_state_fops);
+-		dwc3_debugfs_create_endpoint_dirs(dwc, root);
+ 	}
+ }
+ 
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 1f9454e0d447b..755ab6fc0791f 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -2719,6 +2719,8 @@ static int dwc3_gadget_init_endpoint(struct dwc3 *dwc, u8 epnum)
+ 	INIT_LIST_HEAD(&dep->started_list);
+ 	INIT_LIST_HEAD(&dep->cancelled_list);
+ 
++	dwc3_debugfs_create_endpoint_dir(dep);
++
+ 	return 0;
+ }
+ 
+@@ -2762,6 +2764,7 @@ static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
+ 			list_del(&dep->endpoint.ep_list);
+ 		}
+ 
++		debugfs_remove_recursive(debugfs_lookup(dep->name, dwc->root));
+ 		kfree(dep);
+ 	}
+ }
+diff --git a/fs/afs/main.c b/fs/afs/main.c
+index b2975256dadbd..179004b15566d 100644
+--- a/fs/afs/main.c
++++ b/fs/afs/main.c
+@@ -203,8 +203,8 @@ static int __init afs_init(void)
+ 		goto error_fs;
+ 
+ 	afs_proc_symlink = proc_symlink("fs/afs", NULL, "../self/net/afs");
+-	if (IS_ERR(afs_proc_symlink)) {
+-		ret = PTR_ERR(afs_proc_symlink);
++	if (!afs_proc_symlink) {
++		ret = -ENOMEM;
+ 		goto error_proc;
+ 	}
+ 
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index a7d9e147dee62..595fd083c4ad1 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -2347,16 +2347,16 @@ void btrfs_dec_block_group_ro(struct btrfs_block_group *cache)
+ 	spin_lock(&sinfo->lock);
+ 	spin_lock(&cache->lock);
+ 	if (!--cache->ro) {
+-		num_bytes = cache->length - cache->reserved -
+-			    cache->pinned - cache->bytes_super -
+-			    cache->zone_unusable - cache->used;
+-		sinfo->bytes_readonly -= num_bytes;
+ 		if (btrfs_is_zoned(cache->fs_info)) {
+ 			/* Migrate zone_unusable bytes back */
+ 			cache->zone_unusable = cache->alloc_offset - cache->used;
+ 			sinfo->bytes_zone_unusable += cache->zone_unusable;
+ 			sinfo->bytes_readonly -= cache->zone_unusable;
+ 		}
++		num_bytes = cache->length - cache->reserved -
++			    cache->pinned - cache->bytes_super -
++			    cache->zone_unusable - cache->used;
++		sinfo->bytes_readonly -= num_bytes;
+ 		list_del_init(&cache->ro_list);
+ 	}
+ 	spin_unlock(&cache->lock);
+diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
+index c63d0a7f7ba4f..527c972b562dd 100644
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -738,6 +738,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
+ 		__SetPageUptodate(page);
+ 		error = huge_add_to_page_cache(page, mapping, index);
+ 		if (unlikely(error)) {
++			restore_reserve_on_error(h, &pseudo_vma, addr, page);
+ 			put_page(page);
+ 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
+ 			goto out;
+diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
+index 9e0c1afac8bdf..c175523b0a2c1 100644
+--- a/fs/notify/fanotify/fanotify_user.c
++++ b/fs/notify/fanotify/fanotify_user.c
+@@ -378,7 +378,7 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
+ 					info_type, fanotify_info_name(info),
+ 					info->name_len, buf, count);
+ 		if (ret < 0)
+-			return ret;
++			goto out_close_fd;
+ 
+ 		buf += ret;
+ 		count -= ret;
+@@ -426,7 +426,7 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
+ 					fanotify_event_object_fh(event),
+ 					info_type, dot, dot_len, buf, count);
+ 		if (ret < 0)
+-			return ret;
++			goto out_close_fd;
+ 
+ 		buf += ret;
+ 		count -= ret;
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index cccd1aab69dd1..5dae4187210d9 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -145,6 +145,7 @@ bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
+ long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
+ 						long freed);
+ bool isolate_huge_page(struct page *page, struct list_head *list);
++int get_hwpoison_huge_page(struct page *page, bool *hugetlb);
+ void putback_active_hugepage(struct page *page);
+ void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
+ void free_huge_page(struct page *page);
+@@ -330,6 +331,11 @@ static inline bool isolate_huge_page(struct page *page, struct list_head *list)
+ 	return false;
+ }
+ 
++static inline int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
++{
++	return 0;
++}
++
+ static inline void putback_active_hugepage(struct page *page)
+ {
+ }
+@@ -591,6 +597,8 @@ struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
+ 				unsigned long address);
+ int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
+ 			pgoff_t idx);
++void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
++				unsigned long address, struct page *page);
+ 
+ /* arch callback */
+ int __init __alloc_bootmem_huge_page(struct hstate *h);
+diff --git a/include/linux/mfd/rohm-bd70528.h b/include/linux/mfd/rohm-bd70528.h
+index a57af878fd0cd..4a5966475a35a 100644
+--- a/include/linux/mfd/rohm-bd70528.h
++++ b/include/linux/mfd/rohm-bd70528.h
+@@ -26,9 +26,7 @@ struct bd70528_data {
+ 	struct mutex rtc_timer_lock;
+ };
+ 
+-#define BD70528_BUCK_VOLTS 17
+-#define BD70528_BUCK_VOLTS 17
+-#define BD70528_BUCK_VOLTS 17
++#define BD70528_BUCK_VOLTS 0x10
+ #define BD70528_LDO_VOLTS 0x20
+ 
+ #define BD70528_REG_BUCK1_EN	0x0F
+diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
+index 133967c40214b..6a31bbba1b6f1 100644
+--- a/include/linux/mlx5/driver.h
++++ b/include/linux/mlx5/driver.h
+@@ -541,6 +541,10 @@ struct mlx5_core_roce {
+ enum {
+ 	MLX5_PRIV_FLAGS_DISABLE_IB_ADEV = 1 << 0,
+ 	MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV = 1 << 1,
++	/* Set during device detach to block any further devices
++	 * creation/deletion on drivers rescan. Unset during device attach.
++	 */
++	MLX5_PRIV_FLAGS_DETACH = 1 << 2,
+ };
+ 
+ struct mlx5_adev {
+diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h
+index 028f442530cf5..60ffeb6b67ae7 100644
+--- a/include/linux/mlx5/transobj.h
++++ b/include/linux/mlx5/transobj.h
+@@ -85,4 +85,5 @@ mlx5_core_hairpin_create(struct mlx5_core_dev *func_mdev,
+ 			 struct mlx5_hairpin_params *params);
+ 
+ void mlx5_core_hairpin_destroy(struct mlx5_hairpin *pair);
++void mlx5_core_hairpin_clear_dead_peer(struct mlx5_hairpin *hp);
+ #endif /* __TRANSOBJ_H__ */
+diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
+index 5aacc1c10a45a..8f0fb62e8975c 100644
+--- a/include/linux/mm_types.h
++++ b/include/linux/mm_types.h
+@@ -445,13 +445,6 @@ struct mm_struct {
+ 		 */
+ 		atomic_t has_pinned;
+ 
+-		/**
+-		 * @write_protect_seq: Locked when any thread is write
+-		 * protecting pages mapped by this mm to enforce a later COW,
+-		 * for instance during page table copying for fork().
+-		 */
+-		seqcount_t write_protect_seq;
+-
+ #ifdef CONFIG_MMU
+ 		atomic_long_t pgtables_bytes;	/* PTE page table pages */
+ #endif
+@@ -460,6 +453,18 @@ struct mm_struct {
+ 		spinlock_t page_table_lock; /* Protects page tables and some
+ 					     * counters
+ 					     */
++		/*
++		 * With some kernel config, the current mmap_lock's offset
++		 * inside 'mm_struct' is at 0x120, which is very optimal, as
++		 * its two hot fields 'count' and 'owner' sit in 2 different
++		 * cachelines,  and when mmap_lock is highly contended, both
++		 * of the 2 fields will be accessed frequently, current layout
++		 * will help to reduce cache bouncing.
++		 *
++		 * So please be careful with adding new fields before
++		 * mmap_lock, which can easily push the 2 fields into one
++		 * cacheline.
++		 */
+ 		struct rw_semaphore mmap_lock;
+ 
+ 		struct list_head mmlist; /* List of maybe swapped mm's.	These
+@@ -480,7 +485,15 @@ struct mm_struct {
+ 		unsigned long stack_vm;	   /* VM_STACK */
+ 		unsigned long def_flags;
+ 
++		/**
++		 * @write_protect_seq: Locked when any thread is write
++		 * protecting pages mapped by this mm to enforce a later COW,
++		 * for instance during page table copying for fork().
++		 */
++		seqcount_t write_protect_seq;
++
+ 		spinlock_t arg_lock; /* protect the below fields */
++
+ 		unsigned long start_code, end_code, start_data, end_data;
+ 		unsigned long start_brk, brk, start_stack;
+ 		unsigned long arg_start, arg_end, env_start, env_end;
+diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
+index 0d47fd33b2285..51d7f1b8b32aa 100644
+--- a/include/linux/ptp_clock_kernel.h
++++ b/include/linux/ptp_clock_kernel.h
+@@ -235,7 +235,7 @@ extern int ptp_clock_index(struct ptp_clock *ptp);
+  * @ppm:    Parts per million, but with a 16 bit binary fractional field
+  */
+ 
+-extern s32 scaled_ppm_to_ppb(long ppm);
++extern long scaled_ppm_to_ppb(long ppm);
+ 
+ /**
+  * ptp_find_pin() - obtain the pin index of a given auxiliary function
+diff --git a/include/linux/socket.h b/include/linux/socket.h
+index 385894b4a8bba..42222a84167f3 100644
+--- a/include/linux/socket.h
++++ b/include/linux/socket.h
+@@ -438,6 +438,4 @@ extern int __sys_socketpair(int family, int type, int protocol,
+ 			    int __user *usockvec);
+ extern int __sys_shutdown_sock(struct socket *sock, int how);
+ extern int __sys_shutdown(int fd, int how);
+-
+-extern struct ns_common *get_net_ns(struct ns_common *ns);
+ #endif /* _LINUX_SOCKET_H */
+diff --git a/include/linux/swapops.h b/include/linux/swapops.h
+index d9b7c9132c2f6..6430a94c69818 100644
+--- a/include/linux/swapops.h
++++ b/include/linux/swapops.h
+@@ -23,6 +23,16 @@
+ #define SWP_TYPE_SHIFT	(BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT)
+ #define SWP_OFFSET_MASK	((1UL << SWP_TYPE_SHIFT) - 1)
+ 
++/* Clear all flags but only keep swp_entry_t related information */
++static inline pte_t pte_swp_clear_flags(pte_t pte)
++{
++	if (pte_swp_soft_dirty(pte))
++		pte = pte_swp_clear_soft_dirty(pte);
++	if (pte_swp_uffd_wp(pte))
++		pte = pte_swp_clear_uffd_wp(pte);
++	return pte;
++}
++
+ /*
+  * Store a type+offset into a swp_entry_t in an arch-independent format
+  */
+@@ -66,10 +76,7 @@ static inline swp_entry_t pte_to_swp_entry(pte_t pte)
+ {
+ 	swp_entry_t arch_entry;
+ 
+-	if (pte_swp_soft_dirty(pte))
+-		pte = pte_swp_clear_soft_dirty(pte);
+-	if (pte_swp_uffd_wp(pte))
+-		pte = pte_swp_clear_uffd_wp(pte);
++	pte = pte_swp_clear_flags(pte);
+ 	arch_entry = __pte_to_swp_entry(pte);
+ 	return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
+ }
+diff --git a/include/net/mac80211.h b/include/net/mac80211.h
+index 2d1d629e5d14b..a5ca18cfdb6fb 100644
+--- a/include/net/mac80211.h
++++ b/include/net/mac80211.h
+@@ -6388,7 +6388,12 @@ bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw,
+ 
+ /**
+  * ieee80211_parse_tx_radiotap - Sanity-check and parse the radiotap header
+- *				 of injected frames
++ *				 of injected frames.
++ *
++ * To accurately parse and take into account rate and retransmission fields,
++ * you must initialize the chandef field in the ieee80211_tx_info structure
++ * of the skb before calling this function.
++ *
+  * @skb: packet injected by userspace
+  * @dev: the &struct device of this 802.11 device
+  */
+diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
+index dcaee24a4d877..14b6f7f445322 100644
+--- a/include/net/net_namespace.h
++++ b/include/net/net_namespace.h
+@@ -197,6 +197,8 @@ struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns,
+ void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid);
+ 
+ void net_ns_barrier(void);
++
++struct ns_common *get_net_ns(struct ns_common *ns);
+ #else /* CONFIG_NET_NS */
+ #include <linux/sched.h>
+ #include <linux/nsproxy.h>
+@@ -216,6 +218,11 @@ static inline void net_ns_get_ownership(const struct net *net,
+ }
+ 
+ static inline void net_ns_barrier(void) {}
++
++static inline struct ns_common *get_net_ns(struct ns_common *ns)
++{
++	return ERR_PTR(-EINVAL);
++}
+ #endif /* CONFIG_NET_NS */
+ 
+ 
+diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h
+index 7d6687618d808..d1b327036ae43 100644
+--- a/include/uapi/linux/in.h
++++ b/include/uapi/linux/in.h
+@@ -289,6 +289,9 @@ struct sockaddr_in {
+ /* Address indicating an error return. */
+ #define	INADDR_NONE		((unsigned long int) 0xffffffff)
+ 
++/* Dummy address for src of ICMP replies if no real address is set (RFC7600). */
++#define	INADDR_DUMMY		((unsigned long int) 0xc0000008)
++
+ /* Network number for local host loopback. */
+ #define	IN_LOOPBACKNET		127
+ 
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 9e600767803b5..2423b4e918b90 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -5937,6 +5937,27 @@ struct bpf_sanitize_info {
+ 	bool mask_to_left;
+ };
+ 
++static struct bpf_verifier_state *
++sanitize_speculative_path(struct bpf_verifier_env *env,
++			  const struct bpf_insn *insn,
++			  u32 next_idx, u32 curr_idx)
++{
++	struct bpf_verifier_state *branch;
++	struct bpf_reg_state *regs;
++
++	branch = push_stack(env, next_idx, curr_idx, true);
++	if (branch && insn) {
++		regs = branch->frame[branch->curframe]->regs;
++		if (BPF_SRC(insn->code) == BPF_K) {
++			mark_reg_unknown(env, regs, insn->dst_reg);
++		} else if (BPF_SRC(insn->code) == BPF_X) {
++			mark_reg_unknown(env, regs, insn->dst_reg);
++			mark_reg_unknown(env, regs, insn->src_reg);
++		}
++	}
++	return branch;
++}
++
+ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
+ 			    struct bpf_insn *insn,
+ 			    const struct bpf_reg_state *ptr_reg,
+@@ -6020,12 +6041,26 @@ do_sim:
+ 		tmp = *dst_reg;
+ 		*dst_reg = *ptr_reg;
+ 	}
+-	ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
++	ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1,
++					env->insn_idx);
+ 	if (!ptr_is_dst_reg && ret)
+ 		*dst_reg = tmp;
+ 	return !ret ? REASON_STACK : 0;
+ }
+ 
++static void sanitize_mark_insn_seen(struct bpf_verifier_env *env)
++{
++	struct bpf_verifier_state *vstate = env->cur_state;
++
++	/* If we simulate paths under speculation, we don't update the
++	 * insn as 'seen' such that when we verify unreachable paths in
++	 * the non-speculative domain, sanitize_dead_code() can still
++	 * rewrite/sanitize them.
++	 */
++	if (!vstate->speculative)
++		env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
++}
++
+ static int sanitize_err(struct bpf_verifier_env *env,
+ 			const struct bpf_insn *insn, int reason,
+ 			const struct bpf_reg_state *off_reg,
+@@ -8204,14 +8239,28 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
+ 		if (err)
+ 			return err;
+ 	}
++
+ 	if (pred == 1) {
+-		/* only follow the goto, ignore fall-through */
++		/* Only follow the goto, ignore fall-through. If needed, push
++		 * the fall-through branch for simulation under speculative
++		 * execution.
++		 */
++		if (!env->bypass_spec_v1 &&
++		    !sanitize_speculative_path(env, insn, *insn_idx + 1,
++					       *insn_idx))
++			return -EFAULT;
+ 		*insn_idx += insn->off;
+ 		return 0;
+ 	} else if (pred == 0) {
+-		/* only follow fall-through branch, since
+-		 * that's where the program will go
++		/* Only follow the fall-through branch, since that's where the
++		 * program will go. If needed, push the goto branch for
++		 * simulation under speculative execution.
+ 		 */
++		if (!env->bypass_spec_v1 &&
++		    !sanitize_speculative_path(env, insn,
++					       *insn_idx + insn->off + 1,
++					       *insn_idx))
++			return -EFAULT;
+ 		return 0;
+ 	}
+ 
+@@ -10060,7 +10109,7 @@ static int do_check(struct bpf_verifier_env *env)
+ 		}
+ 
+ 		regs = cur_regs(env);
+-		env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
++		sanitize_mark_insn_seen(env);
+ 		prev_insn_idx = env->insn_idx;
+ 
+ 		if (class == BPF_ALU || class == BPF_ALU64) {
+@@ -10285,7 +10334,7 @@ process_bpf_exit:
+ 					return err;
+ 
+ 				env->insn_idx++;
+-				env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
++				sanitize_mark_insn_seen(env);
+ 			} else {
+ 				verbose(env, "invalid BPF_LD mode\n");
+ 				return -EINVAL;
+@@ -10784,6 +10833,7 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env,
+ {
+ 	struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
+ 	struct bpf_insn *insn = new_prog->insnsi;
++	u32 old_seen = old_data[off].seen;
+ 	u32 prog_len;
+ 	int i;
+ 
+@@ -10804,7 +10854,8 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env,
+ 	memcpy(new_data + off + cnt - 1, old_data + off,
+ 	       sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
+ 	for (i = off; i < off + cnt - 1; i++) {
+-		new_data[i].seen = env->pass_cnt;
++		/* Expand insni[off]'s seen count to the patched range. */
++		new_data[i].seen = old_seen;
+ 		new_data[i].zext_dst = insn_has_def32(env, insn + i);
+ 	}
+ 	env->insn_aux_data = new_data;
+@@ -12060,6 +12111,9 @@ static void free_states(struct bpf_verifier_env *env)
+  * insn_aux_data was touched. These variables are compared to clear temporary
+  * data from failed pass. For testing and experiments do_check_common() can be
+  * run multiple times even when prior attempt to verify is unsuccessful.
++ *
++ * Note that special handling is needed on !env->bypass_spec_v1 if this is
++ * ever called outside of error path with subsequent program rejection.
+  */
+ static void sanitize_insn_aux_data(struct bpf_verifier_env *env)
+ {
+diff --git a/kernel/crash_core.c b/kernel/crash_core.c
+index 825284baaf466..684a6061a13a4 100644
+--- a/kernel/crash_core.c
++++ b/kernel/crash_core.c
+@@ -464,6 +464,7 @@ static int __init crash_save_vmcoreinfo_init(void)
+ 	VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
+ 	VMCOREINFO_STRUCT_SIZE(mem_section);
+ 	VMCOREINFO_OFFSET(mem_section, section_mem_map);
++	VMCOREINFO_NUMBER(SECTION_SIZE_BITS);
+ 	VMCOREINFO_NUMBER(MAX_PHYSMEM_BITS);
+ #endif
+ 	VMCOREINFO_STRUCT_SIZE(page);
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 487312a5ceabb..47fcc3fe9dc5a 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -3760,11 +3760,17 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
+  */
+ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
++	/*
++	 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
++	 * See ___update_load_avg() for details.
++	 */
++	u32 divider = get_pelt_divider(&cfs_rq->avg);
++
+ 	dequeue_load_avg(cfs_rq, se);
+ 	sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
+-	sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
++	cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * divider;
+ 	sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg);
+-	sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum);
++	cfs_rq->avg.runnable_sum = cfs_rq->avg.runnable_avg * divider;
+ 
+ 	add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
+ 
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index f2d4ee80feb34..7c8151d74faf0 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -2198,9 +2198,6 @@ struct saved_cmdlines_buffer {
+ };
+ static struct saved_cmdlines_buffer *savedcmd;
+ 
+-/* temporary disable recording */
+-static atomic_t trace_record_taskinfo_disabled __read_mostly;
+-
+ static inline char *get_saved_cmdlines(int idx)
+ {
+ 	return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
+@@ -2486,8 +2483,6 @@ static bool tracing_record_taskinfo_skip(int flags)
+ {
+ 	if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
+ 		return true;
+-	if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
+-		return true;
+ 	if (!__this_cpu_read(trace_taskinfo_save))
+ 		return true;
+ 	return false;
+@@ -3742,9 +3737,6 @@ static void *s_start(struct seq_file *m, loff_t *pos)
+ 		return ERR_PTR(-EBUSY);
+ #endif
+ 
+-	if (!iter->snapshot)
+-		atomic_inc(&trace_record_taskinfo_disabled);
+-
+ 	if (*pos != iter->pos) {
+ 		iter->ent = NULL;
+ 		iter->cpu = 0;
+@@ -3787,9 +3779,6 @@ static void s_stop(struct seq_file *m, void *p)
+ 		return;
+ #endif
+ 
+-	if (!iter->snapshot)
+-		atomic_dec(&trace_record_taskinfo_disabled);
+-
+ 	trace_access_unlock(iter->cpu_file);
+ 	trace_event_read_unlock();
+ }
+diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
+index c1637f90c8a38..4702efb00ff21 100644
+--- a/kernel/trace/trace_clock.c
++++ b/kernel/trace/trace_clock.c
+@@ -115,9 +115,9 @@ u64 notrace trace_clock_global(void)
+ 	prev_time = READ_ONCE(trace_clock_struct.prev_time);
+ 	now = sched_clock_cpu(this_cpu);
+ 
+-	/* Make sure that now is always greater than prev_time */
++	/* Make sure that now is always greater than or equal to prev_time */
+ 	if ((s64)(now - prev_time) < 0)
+-		now = prev_time + 1;
++		now = prev_time;
+ 
+ 	/*
+ 	 * If in an NMI context then dont risk lockups and simply return
+@@ -131,7 +131,7 @@ u64 notrace trace_clock_global(void)
+ 		/* Reread prev_time in case it was already updated */
+ 		prev_time = READ_ONCE(trace_clock_struct.prev_time);
+ 		if ((s64)(now - prev_time) < 0)
+-			now = prev_time + 1;
++			now = prev_time;
+ 
+ 		trace_clock_struct.prev_time = now;
+ 
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index ce63ec0187c55..3da4817190f3d 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -2127,12 +2127,18 @@ out:
+  * be restored when a newly allocated huge page must be freed.  It is
+  * to be called after calling vma_needs_reservation to determine if a
+  * reservation exists.
++ *
++ * vma_del_reservation is used in error paths where an entry in the reserve
++ * map was created during huge page allocation and must be removed.  It is to
++ * be called after calling vma_needs_reservation to determine if a reservation
++ * exists.
+  */
+ enum vma_resv_mode {
+ 	VMA_NEEDS_RESV,
+ 	VMA_COMMIT_RESV,
+ 	VMA_END_RESV,
+ 	VMA_ADD_RESV,
++	VMA_DEL_RESV,
+ };
+ static long __vma_reservation_common(struct hstate *h,
+ 				struct vm_area_struct *vma, unsigned long addr,
+@@ -2176,11 +2182,21 @@ static long __vma_reservation_common(struct hstate *h,
+ 			ret = region_del(resv, idx, idx + 1);
+ 		}
+ 		break;
++	case VMA_DEL_RESV:
++		if (vma->vm_flags & VM_MAYSHARE) {
++			region_abort(resv, idx, idx + 1, 1);
++			ret = region_del(resv, idx, idx + 1);
++		} else {
++			ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
++			/* region_add calls of range 1 should never fail. */
++			VM_BUG_ON(ret < 0);
++		}
++		break;
+ 	default:
+ 		BUG();
+ 	}
+ 
+-	if (vma->vm_flags & VM_MAYSHARE)
++	if (vma->vm_flags & VM_MAYSHARE || mode == VMA_DEL_RESV)
+ 		return ret;
+ 	else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) {
+ 		/*
+@@ -2229,25 +2245,39 @@ static long vma_add_reservation(struct hstate *h,
+ 	return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
+ }
+ 
++static long vma_del_reservation(struct hstate *h,
++			struct vm_area_struct *vma, unsigned long addr)
++{
++	return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV);
++}
++
+ /*
+- * This routine is called to restore a reservation on error paths.  In the
+- * specific error paths, a huge page was allocated (via alloc_huge_page)
+- * and is about to be freed.  If a reservation for the page existed,
+- * alloc_huge_page would have consumed the reservation and set
+- * HPageRestoreReserve in the newly allocated page.  When the page is freed
+- * via free_huge_page, the global reservation count will be incremented if
+- * HPageRestoreReserve is set.  However, free_huge_page can not adjust the
+- * reserve map.  Adjust the reserve map here to be consistent with global
+- * reserve count adjustments to be made by free_huge_page.
++ * This routine is called to restore reservation information on error paths.
++ * It should ONLY be called for pages allocated via alloc_huge_page(), and
++ * the hugetlb mutex should remain held when calling this routine.
++ *
++ * It handles two specific cases:
++ * 1) A reservation was in place and the page consumed the reservation.
++ *    HPageRestoreReserve is set in the page.
++ * 2) No reservation was in place for the page, so HPageRestoreReserve is
++ *    not set.  However, alloc_huge_page always updates the reserve map.
++ *
++ * In case 1, free_huge_page later in the error path will increment the
++ * global reserve count.  But, free_huge_page does not have enough context
++ * to adjust the reservation map.  This case deals primarily with private
++ * mappings.  Adjust the reserve map here to be consistent with global
++ * reserve count adjustments to be made by free_huge_page.  Make sure the
++ * reserve map indicates there is a reservation present.
++ *
++ * In case 2, simply undo reserve map modifications done by alloc_huge_page.
+  */
+-static void restore_reserve_on_error(struct hstate *h,
+-			struct vm_area_struct *vma, unsigned long address,
+-			struct page *page)
++void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
++			unsigned long address, struct page *page)
+ {
+-	if (unlikely(HPageRestoreReserve(page))) {
+-		long rc = vma_needs_reservation(h, vma, address);
++	long rc = vma_needs_reservation(h, vma, address);
+ 
+-		if (unlikely(rc < 0)) {
++	if (HPageRestoreReserve(page)) {
++		if (unlikely(rc < 0))
+ 			/*
+ 			 * Rare out of memory condition in reserve map
+ 			 * manipulation.  Clear HPageRestoreReserve so that
+@@ -2260,16 +2290,57 @@ static void restore_reserve_on_error(struct hstate *h,
+ 			 * accounting of reserve counts.
+ 			 */
+ 			ClearHPageRestoreReserve(page);
+-		} else if (rc) {
+-			rc = vma_add_reservation(h, vma, address);
+-			if (unlikely(rc < 0))
++		else if (rc)
++			(void)vma_add_reservation(h, vma, address);
++		else
++			vma_end_reservation(h, vma, address);
++	} else {
++		if (!rc) {
++			/*
++			 * This indicates there is an entry in the reserve map
++			 * added by alloc_huge_page.  We know it was added
++			 * before the alloc_huge_page call, otherwise
++			 * HPageRestoreReserve would be set on the page.
++			 * Remove the entry so that a subsequent allocation
++			 * does not consume a reservation.
++			 */
++			rc = vma_del_reservation(h, vma, address);
++			if (rc < 0)
+ 				/*
+-				 * See above comment about rare out of
+-				 * memory condition.
++				 * VERY rare out of memory condition.  Since
++				 * we can not delete the entry, set
++				 * HPageRestoreReserve so that the reserve
++				 * count will be incremented when the page
++				 * is freed.  This reserve will be consumed
++				 * on a subsequent allocation.
+ 				 */
+-				ClearHPageRestoreReserve(page);
++				SetHPageRestoreReserve(page);
++		} else if (rc < 0) {
++			/*
++			 * Rare out of memory condition from
++			 * vma_needs_reservation call.  Memory allocation is
++			 * only attempted if a new entry is needed.  Therefore,
++			 * this implies there is not an entry in the
++			 * reserve map.
++			 *
++			 * For shared mappings, no entry in the map indicates
++			 * no reservation.  We are done.
++			 */
++			if (!(vma->vm_flags & VM_MAYSHARE))
++				/*
++				 * For private mappings, no entry indicates
++				 * a reservation is present.  Since we can
++				 * not add an entry, set SetHPageRestoreReserve
++				 * on the page so reserve count will be
++				 * incremented when freed.  This reserve will
++				 * be consumed on a subsequent allocation.
++				 */
++				SetHPageRestoreReserve(page);
+ 		} else
+-			vma_end_reservation(h, vma, address);
++			/*
++			 * No reservation present, do nothing
++			 */
++			 vma_end_reservation(h, vma, address);
+ 	}
+ }
+ 
+@@ -3886,6 +3957,8 @@ again:
+ 				spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
+ 				entry = huge_ptep_get(src_pte);
+ 				if (!pte_same(src_pte_old, entry)) {
++					restore_reserve_on_error(h, vma, addr,
++								new);
+ 					put_page(new);
+ 					/* dst_entry won't change as in child */
+ 					goto again;
+@@ -4820,6 +4893,7 @@ out_release_unlock:
+ 	if (vm_shared)
+ 		unlock_page(page);
+ out_release_nounlock:
++	restore_reserve_on_error(h, dst_vma, dst_addr, page);
+ 	put_page(page);
+ 	goto out;
+ }
+@@ -5664,6 +5738,21 @@ unlock:
+ 	return ret;
+ }
+ 
++int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
++{
++	int ret = 0;
++
++	*hugetlb = false;
++	spin_lock_irq(&hugetlb_lock);
++	if (PageHeadHuge(page)) {
++		*hugetlb = true;
++		if (HPageFreed(page) || HPageMigratable(page))
++			ret = get_page_unless_zero(page);
++	}
++	spin_unlock_irq(&hugetlb_lock);
++	return ret;
++}
++
+ void putback_active_hugepage(struct page *page)
+ {
+ 	spin_lock(&hugetlb_lock);
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index bd3945446d47e..704d05057d8c3 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -949,6 +949,17 @@ static int page_action(struct page_state *ps, struct page *p,
+ 	return (result == MF_RECOVERED || result == MF_DELAYED) ? 0 : -EBUSY;
+ }
+ 
++/*
++ * Return true if a page type of a given page is supported by hwpoison
++ * mechanism (while handling could fail), otherwise false.  This function
++ * does not return true for hugetlb or device memory pages, so it's assumed
++ * to be called only in the context where we never have such pages.
++ */
++static inline bool HWPoisonHandlable(struct page *page)
++{
++	return PageLRU(page) || __PageMovable(page);
++}
++
+ /**
+  * __get_hwpoison_page() - Get refcount for memory error handling:
+  * @page:	raw error page (hit by memory error)
+@@ -959,8 +970,22 @@ static int page_action(struct page_state *ps, struct page *p,
+ static int __get_hwpoison_page(struct page *page)
+ {
+ 	struct page *head = compound_head(page);
++	int ret = 0;
++	bool hugetlb = false;
++
++	ret = get_hwpoison_huge_page(head, &hugetlb);
++	if (hugetlb)
++		return ret;
+ 
+-	if (!PageHuge(head) && PageTransHuge(head)) {
++	/*
++	 * This check prevents from calling get_hwpoison_unless_zero()
++	 * for any unsupported type of page in order to reduce the risk of
++	 * unexpected races caused by taking a page refcount.
++	 */
++	if (!HWPoisonHandlable(head))
++		return 0;
++
++	if (PageTransHuge(head)) {
+ 		/*
+ 		 * Non anonymous thp exists only in allocation/free time. We
+ 		 * can't handle such a case correctly, so let's give it up.
+@@ -1017,7 +1042,7 @@ try_again:
+ 			ret = -EIO;
+ 		}
+ 	} else {
+-		if (PageHuge(p) || PageLRU(p) || __PageMovable(p)) {
++		if (PageHuge(p) || HWPoisonHandlable(p)) {
+ 			ret = 1;
+ 		} else {
+ 			/*
+@@ -1527,7 +1552,12 @@ try_again:
+ 		return 0;
+ 	}
+ 
+-	if (!PageTransTail(p) && !PageLRU(p))
++	/*
++	 * __munlock_pagevec may clear a writeback page's LRU flag without
++	 * page_lock. We need wait writeback completion for this page or it
++	 * may trigger vfs BUG while evict inode.
++	 */
++	if (!PageTransTail(p) && !PageLRU(p) && !PageWriteback(p))
+ 		goto identify_page_state;
+ 
+ 	/*
+diff --git a/mm/slab_common.c b/mm/slab_common.c
+index 88e833986332e..ba2f4b01920fd 100644
+--- a/mm/slab_common.c
++++ b/mm/slab_common.c
+@@ -89,8 +89,7 @@ EXPORT_SYMBOL(kmem_cache_size);
+ #ifdef CONFIG_DEBUG_VM
+ static int kmem_cache_sanity_check(const char *name, unsigned int size)
+ {
+-	if (!name || in_interrupt() || size < sizeof(void *) ||
+-		size > KMALLOC_MAX_SIZE) {
++	if (!name || in_interrupt() || size > KMALLOC_MAX_SIZE) {
+ 		pr_err("kmem_cache_create(%s) integrity check failed\n", name);
+ 		return -EINVAL;
+ 	}
+diff --git a/mm/slub.c b/mm/slub.c
+index 3021ce9bf1b3d..602f9712ab53d 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -15,6 +15,7 @@
+ #include <linux/module.h>
+ #include <linux/bit_spinlock.h>
+ #include <linux/interrupt.h>
++#include <linux/swab.h>
+ #include <linux/bitops.h>
+ #include <linux/slab.h>
+ #include "slab.h"
+@@ -710,15 +711,15 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
+ 	       p, p - addr, get_freepointer(s, p));
+ 
+ 	if (s->flags & SLAB_RED_ZONE)
+-		print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
++		print_section(KERN_ERR, "Redzone  ", p - s->red_left_pad,
+ 			      s->red_left_pad);
+ 	else if (p > addr + 16)
+ 		print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
+ 
+-	print_section(KERN_ERR, "Object ", p,
++	print_section(KERN_ERR,         "Object   ", p,
+ 		      min_t(unsigned int, s->object_size, PAGE_SIZE));
+ 	if (s->flags & SLAB_RED_ZONE)
+-		print_section(KERN_ERR, "Redzone ", p + s->object_size,
++		print_section(KERN_ERR, "Redzone  ", p + s->object_size,
+ 			s->inuse - s->object_size);
+ 
+ 	off = get_info_end(s);
+@@ -730,7 +731,7 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
+ 
+ 	if (off != size_from_object(s))
+ 		/* Beginning of the filler is the free pointer */
+-		print_section(KERN_ERR, "Padding ", p + off,
++		print_section(KERN_ERR, "Padding  ", p + off,
+ 			      size_from_object(s) - off);
+ 
+ 	dump_stack();
+@@ -907,11 +908,11 @@ static int check_object(struct kmem_cache *s, struct page *page,
+ 	u8 *endobject = object + s->object_size;
+ 
+ 	if (s->flags & SLAB_RED_ZONE) {
+-		if (!check_bytes_and_report(s, page, object, "Redzone",
++		if (!check_bytes_and_report(s, page, object, "Left Redzone",
+ 			object - s->red_left_pad, val, s->red_left_pad))
+ 			return 0;
+ 
+-		if (!check_bytes_and_report(s, page, object, "Redzone",
++		if (!check_bytes_and_report(s, page, object, "Right Redzone",
+ 			endobject, val, s->inuse - s->object_size))
+ 			return 0;
+ 	} else {
+@@ -926,7 +927,7 @@ static int check_object(struct kmem_cache *s, struct page *page,
+ 		if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
+ 			(!check_bytes_and_report(s, page, p, "Poison", p,
+ 					POISON_FREE, s->object_size - 1) ||
+-			 !check_bytes_and_report(s, page, p, "Poison",
++			 !check_bytes_and_report(s, page, p, "End Poison",
+ 				p + s->object_size - 1, POISON_END, 1)))
+ 			return 0;
+ 		/*
+@@ -3687,7 +3688,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
+ {
+ 	slab_flags_t flags = s->flags;
+ 	unsigned int size = s->object_size;
+-	unsigned int freepointer_area;
+ 	unsigned int order;
+ 
+ 	/*
+@@ -3696,13 +3696,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
+ 	 * the possible location of the free pointer.
+ 	 */
+ 	size = ALIGN(size, sizeof(void *));
+-	/*
+-	 * This is the area of the object where a freepointer can be
+-	 * safely written. If redzoning adds more to the inuse size, we
+-	 * can't use that portion for writing the freepointer, so
+-	 * s->offset must be limited within this for the general case.
+-	 */
+-	freepointer_area = size;
+ 
+ #ifdef CONFIG_SLUB_DEBUG
+ 	/*
+@@ -3728,19 +3721,21 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
+ 
+ 	/*
+ 	 * With that we have determined the number of bytes in actual use
+-	 * by the object. This is the potential offset to the free pointer.
++	 * by the object and redzoning.
+ 	 */
+ 	s->inuse = size;
+ 
+-	if (((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
+-		s->ctor)) {
++	if ((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
++	    ((flags & SLAB_RED_ZONE) && s->object_size < sizeof(void *)) ||
++	    s->ctor) {
+ 		/*
+ 		 * Relocate free pointer after the object if it is not
+ 		 * permitted to overwrite the first word of the object on
+ 		 * kmem_cache_free.
+ 		 *
+ 		 * This is the case if we do RCU, have a constructor or
+-		 * destructor or are poisoning the objects.
++		 * destructor, are poisoning the objects, or are
++		 * redzoning an object smaller than sizeof(void *).
+ 		 *
+ 		 * The assumption that s->offset >= s->inuse means free
+ 		 * pointer is outside of the object is used in the
+@@ -3749,13 +3744,13 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
+ 		 */
+ 		s->offset = size;
+ 		size += sizeof(void *);
+-	} else if (freepointer_area > sizeof(void *)) {
++	} else {
+ 		/*
+ 		 * Store freelist pointer near middle of object to keep
+ 		 * it away from the edges of the object to avoid small
+ 		 * sized over/underflows from neighboring allocations.
+ 		 */
+-		s->offset = ALIGN(freepointer_area / 2, sizeof(void *));
++		s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *));
+ 	}
+ 
+ #ifdef CONFIG_SLUB_DEBUG
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index 084a5b9a18e5c..2097648df212d 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -1900,7 +1900,7 @@ unsigned int count_swap_pages(int type, int free)
+ 
+ static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte)
+ {
+-	return pte_same(pte_swp_clear_soft_dirty(pte), swp_pte);
++	return pte_same(pte_swp_clear_flags(pte), swp_pte);
+ }
+ 
+ /*
+diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
+index a5e313cd6f447..b9dd150f6f01d 100644
+--- a/net/batman-adv/bat_iv_ogm.c
++++ b/net/batman-adv/bat_iv_ogm.c
+@@ -409,8 +409,10 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
+ 	if (WARN_ON(!forw_packet->if_outgoing))
+ 		return;
+ 
+-	if (WARN_ON(forw_packet->if_outgoing->soft_iface != soft_iface))
++	if (forw_packet->if_outgoing->soft_iface != soft_iface) {
++		pr_warn("%s: soft interface switch for queued OGM\n", __func__);
+ 		return;
++	}
+ 
+ 	if (forw_packet->if_incoming->if_status != BATADV_IF_ACTIVE)
+ 		return;
+diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
+index af3430c2d6ea8..660dec6785ad9 100644
+--- a/net/bridge/br_private.h
++++ b/net/bridge/br_private.h
+@@ -90,8 +90,8 @@ struct bridge_mcast_stats {
+ #endif
+ 
+ struct br_tunnel_info {
+-	__be64			tunnel_id;
+-	struct metadata_dst	*tunnel_dst;
++	__be64				tunnel_id;
++	struct metadata_dst __rcu	*tunnel_dst;
+ };
+ 
+ /* private vlan flags */
+diff --git a/net/bridge/br_vlan_tunnel.c b/net/bridge/br_vlan_tunnel.c
+index 169e005fbda29..debe167202782 100644
+--- a/net/bridge/br_vlan_tunnel.c
++++ b/net/bridge/br_vlan_tunnel.c
+@@ -41,26 +41,33 @@ static struct net_bridge_vlan *br_vlan_tunnel_lookup(struct rhashtable *tbl,
+ 				      br_vlan_tunnel_rht_params);
+ }
+ 
++static void vlan_tunnel_info_release(struct net_bridge_vlan *vlan)
++{
++	struct metadata_dst *tdst = rtnl_dereference(vlan->tinfo.tunnel_dst);
++
++	WRITE_ONCE(vlan->tinfo.tunnel_id, 0);
++	RCU_INIT_POINTER(vlan->tinfo.tunnel_dst, NULL);
++	dst_release(&tdst->dst);
++}
++
+ void vlan_tunnel_info_del(struct net_bridge_vlan_group *vg,
+ 			  struct net_bridge_vlan *vlan)
+ {
+-	if (!vlan->tinfo.tunnel_dst)
++	if (!rcu_access_pointer(vlan->tinfo.tunnel_dst))
+ 		return;
+ 	rhashtable_remove_fast(&vg->tunnel_hash, &vlan->tnode,
+ 			       br_vlan_tunnel_rht_params);
+-	vlan->tinfo.tunnel_id = 0;
+-	dst_release(&vlan->tinfo.tunnel_dst->dst);
+-	vlan->tinfo.tunnel_dst = NULL;
++	vlan_tunnel_info_release(vlan);
+ }
+ 
+ static int __vlan_tunnel_info_add(struct net_bridge_vlan_group *vg,
+ 				  struct net_bridge_vlan *vlan, u32 tun_id)
+ {
+-	struct metadata_dst *metadata = NULL;
++	struct metadata_dst *metadata = rtnl_dereference(vlan->tinfo.tunnel_dst);
+ 	__be64 key = key32_to_tunnel_id(cpu_to_be32(tun_id));
+ 	int err;
+ 
+-	if (vlan->tinfo.tunnel_dst)
++	if (metadata)
+ 		return -EEXIST;
+ 
+ 	metadata = __ip_tun_set_dst(0, 0, 0, 0, 0, TUNNEL_KEY,
+@@ -69,8 +76,8 @@ static int __vlan_tunnel_info_add(struct net_bridge_vlan_group *vg,
+ 		return -EINVAL;
+ 
+ 	metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX | IP_TUNNEL_INFO_BRIDGE;
+-	vlan->tinfo.tunnel_dst = metadata;
+-	vlan->tinfo.tunnel_id = key;
++	rcu_assign_pointer(vlan->tinfo.tunnel_dst, metadata);
++	WRITE_ONCE(vlan->tinfo.tunnel_id, key);
+ 
+ 	err = rhashtable_lookup_insert_fast(&vg->tunnel_hash, &vlan->tnode,
+ 					    br_vlan_tunnel_rht_params);
+@@ -79,9 +86,7 @@ static int __vlan_tunnel_info_add(struct net_bridge_vlan_group *vg,
+ 
+ 	return 0;
+ out:
+-	dst_release(&vlan->tinfo.tunnel_dst->dst);
+-	vlan->tinfo.tunnel_dst = NULL;
+-	vlan->tinfo.tunnel_id = 0;
++	vlan_tunnel_info_release(vlan);
+ 
+ 	return err;
+ }
+@@ -182,12 +187,15 @@ int br_handle_ingress_vlan_tunnel(struct sk_buff *skb,
+ int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
+ 				 struct net_bridge_vlan *vlan)
+ {
++	struct metadata_dst *tunnel_dst;
++	__be64 tunnel_id;
+ 	int err;
+ 
+-	if (!vlan || !vlan->tinfo.tunnel_id)
++	if (!vlan)
+ 		return 0;
+ 
+-	if (unlikely(!skb_vlan_tag_present(skb)))
++	tunnel_id = READ_ONCE(vlan->tinfo.tunnel_id);
++	if (!tunnel_id || unlikely(!skb_vlan_tag_present(skb)))
+ 		return 0;
+ 
+ 	skb_dst_drop(skb);
+@@ -195,7 +203,9 @@ int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
+ 	if (err)
+ 		return err;
+ 
+-	skb_dst_set(skb, dst_clone(&vlan->tinfo.tunnel_dst->dst));
++	tunnel_dst = rcu_dereference(vlan->tinfo.tunnel_dst);
++	if (tunnel_dst && dst_hold_safe(&tunnel_dst->dst))
++		skb_dst_set(skb, &tunnel_dst->dst);
+ 
+ 	return 0;
+ }
+diff --git a/net/can/bcm.c b/net/can/bcm.c
+index 909b9e684e043..f3e4d9528fa38 100644
+--- a/net/can/bcm.c
++++ b/net/can/bcm.c
+@@ -125,7 +125,7 @@ struct bcm_sock {
+ 	struct sock sk;
+ 	int bound;
+ 	int ifindex;
+-	struct notifier_block notifier;
++	struct list_head notifier;
+ 	struct list_head rx_ops;
+ 	struct list_head tx_ops;
+ 	unsigned long dropped_usr_msgs;
+@@ -133,6 +133,10 @@ struct bcm_sock {
+ 	char procname [32]; /* inode number in decimal with \0 */
+ };
+ 
++static LIST_HEAD(bcm_notifier_list);
++static DEFINE_SPINLOCK(bcm_notifier_lock);
++static struct bcm_sock *bcm_busy_notifier;
++
+ static inline struct bcm_sock *bcm_sk(const struct sock *sk)
+ {
+ 	return (struct bcm_sock *)sk;
+@@ -402,6 +406,7 @@ static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
+ 		if (!op->count && (op->flags & TX_COUNTEVT)) {
+ 
+ 			/* create notification to user */
++			memset(&msg_head, 0, sizeof(msg_head));
+ 			msg_head.opcode  = TX_EXPIRED;
+ 			msg_head.flags   = op->flags;
+ 			msg_head.count   = op->count;
+@@ -439,6 +444,7 @@ static void bcm_rx_changed(struct bcm_op *op, struct canfd_frame *data)
+ 	/* this element is not throttled anymore */
+ 	data->flags &= (BCM_CAN_FLAGS_MASK|RX_RECV);
+ 
++	memset(&head, 0, sizeof(head));
+ 	head.opcode  = RX_CHANGED;
+ 	head.flags   = op->flags;
+ 	head.count   = op->count;
+@@ -560,6 +566,7 @@ static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
+ 	}
+ 
+ 	/* create notification to user */
++	memset(&msg_head, 0, sizeof(msg_head));
+ 	msg_head.opcode  = RX_TIMEOUT;
+ 	msg_head.flags   = op->flags;
+ 	msg_head.count   = op->count;
+@@ -1378,20 +1385,15 @@ static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
+ /*
+  * notification handler for netdevice status changes
+  */
+-static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
+-			void *ptr)
++static void bcm_notify(struct bcm_sock *bo, unsigned long msg,
++		       struct net_device *dev)
+ {
+-	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+-	struct bcm_sock *bo = container_of(nb, struct bcm_sock, notifier);
+ 	struct sock *sk = &bo->sk;
+ 	struct bcm_op *op;
+ 	int notify_enodev = 0;
+ 
+ 	if (!net_eq(dev_net(dev), sock_net(sk)))
+-		return NOTIFY_DONE;
+-
+-	if (dev->type != ARPHRD_CAN)
+-		return NOTIFY_DONE;
++		return;
+ 
+ 	switch (msg) {
+ 
+@@ -1426,7 +1428,28 @@ static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
+ 				sk->sk_error_report(sk);
+ 		}
+ 	}
++}
+ 
++static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
++			void *ptr)
++{
++	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
++
++	if (dev->type != ARPHRD_CAN)
++		return NOTIFY_DONE;
++	if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
++		return NOTIFY_DONE;
++	if (unlikely(bcm_busy_notifier)) /* Check for reentrant bug. */
++		return NOTIFY_DONE;
++
++	spin_lock(&bcm_notifier_lock);
++	list_for_each_entry(bcm_busy_notifier, &bcm_notifier_list, notifier) {
++		spin_unlock(&bcm_notifier_lock);
++		bcm_notify(bcm_busy_notifier, msg, dev);
++		spin_lock(&bcm_notifier_lock);
++	}
++	bcm_busy_notifier = NULL;
++	spin_unlock(&bcm_notifier_lock);
+ 	return NOTIFY_DONE;
+ }
+ 
+@@ -1446,9 +1469,9 @@ static int bcm_init(struct sock *sk)
+ 	INIT_LIST_HEAD(&bo->rx_ops);
+ 
+ 	/* set notifier */
+-	bo->notifier.notifier_call = bcm_notifier;
+-
+-	register_netdevice_notifier(&bo->notifier);
++	spin_lock(&bcm_notifier_lock);
++	list_add_tail(&bo->notifier, &bcm_notifier_list);
++	spin_unlock(&bcm_notifier_lock);
+ 
+ 	return 0;
+ }
+@@ -1471,7 +1494,14 @@ static int bcm_release(struct socket *sock)
+ 
+ 	/* remove bcm_ops, timer, rx_unregister(), etc. */
+ 
+-	unregister_netdevice_notifier(&bo->notifier);
++	spin_lock(&bcm_notifier_lock);
++	while (bcm_busy_notifier == bo) {
++		spin_unlock(&bcm_notifier_lock);
++		schedule_timeout_uninterruptible(1);
++		spin_lock(&bcm_notifier_lock);
++	}
++	list_del(&bo->notifier);
++	spin_unlock(&bcm_notifier_lock);
+ 
+ 	lock_sock(sk);
+ 
+@@ -1692,6 +1722,10 @@ static struct pernet_operations canbcm_pernet_ops __read_mostly = {
+ 	.exit = canbcm_pernet_exit,
+ };
+ 
++static struct notifier_block canbcm_notifier = {
++	.notifier_call = bcm_notifier
++};
++
+ static int __init bcm_module_init(void)
+ {
+ 	int err;
+@@ -1705,12 +1739,14 @@ static int __init bcm_module_init(void)
+ 	}
+ 
+ 	register_pernet_subsys(&canbcm_pernet_ops);
++	register_netdevice_notifier(&canbcm_notifier);
+ 	return 0;
+ }
+ 
+ static void __exit bcm_module_exit(void)
+ {
+ 	can_proto_unregister(&bcm_can_proto);
++	unregister_netdevice_notifier(&canbcm_notifier);
+ 	unregister_pernet_subsys(&canbcm_pernet_ops);
+ }
+ 
+diff --git a/net/can/isotp.c b/net/can/isotp.c
+index 253b24417c8e5..be6183f8ca110 100644
+--- a/net/can/isotp.c
++++ b/net/can/isotp.c
+@@ -143,10 +143,14 @@ struct isotp_sock {
+ 	u32 force_tx_stmin;
+ 	u32 force_rx_stmin;
+ 	struct tpcon rx, tx;
+-	struct notifier_block notifier;
++	struct list_head notifier;
+ 	wait_queue_head_t wait;
+ };
+ 
++static LIST_HEAD(isotp_notifier_list);
++static DEFINE_SPINLOCK(isotp_notifier_lock);
++static struct isotp_sock *isotp_busy_notifier;
++
+ static inline struct isotp_sock *isotp_sk(const struct sock *sk)
+ {
+ 	return (struct isotp_sock *)sk;
+@@ -1013,7 +1017,14 @@ static int isotp_release(struct socket *sock)
+ 	/* wait for complete transmission of current pdu */
+ 	wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
+ 
+-	unregister_netdevice_notifier(&so->notifier);
++	spin_lock(&isotp_notifier_lock);
++	while (isotp_busy_notifier == so) {
++		spin_unlock(&isotp_notifier_lock);
++		schedule_timeout_uninterruptible(1);
++		spin_lock(&isotp_notifier_lock);
++	}
++	list_del(&so->notifier);
++	spin_unlock(&isotp_notifier_lock);
+ 
+ 	lock_sock(sk);
+ 
+@@ -1317,21 +1328,16 @@ static int isotp_getsockopt(struct socket *sock, int level, int optname,
+ 	return 0;
+ }
+ 
+-static int isotp_notifier(struct notifier_block *nb, unsigned long msg,
+-			  void *ptr)
++static void isotp_notify(struct isotp_sock *so, unsigned long msg,
++			 struct net_device *dev)
+ {
+-	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+-	struct isotp_sock *so = container_of(nb, struct isotp_sock, notifier);
+ 	struct sock *sk = &so->sk;
+ 
+ 	if (!net_eq(dev_net(dev), sock_net(sk)))
+-		return NOTIFY_DONE;
+-
+-	if (dev->type != ARPHRD_CAN)
+-		return NOTIFY_DONE;
++		return;
+ 
+ 	if (so->ifindex != dev->ifindex)
+-		return NOTIFY_DONE;
++		return;
+ 
+ 	switch (msg) {
+ 	case NETDEV_UNREGISTER:
+@@ -1357,7 +1363,28 @@ static int isotp_notifier(struct notifier_block *nb, unsigned long msg,
+ 			sk->sk_error_report(sk);
+ 		break;
+ 	}
++}
+ 
++static int isotp_notifier(struct notifier_block *nb, unsigned long msg,
++			  void *ptr)
++{
++	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
++
++	if (dev->type != ARPHRD_CAN)
++		return NOTIFY_DONE;
++	if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
++		return NOTIFY_DONE;
++	if (unlikely(isotp_busy_notifier)) /* Check for reentrant bug. */
++		return NOTIFY_DONE;
++
++	spin_lock(&isotp_notifier_lock);
++	list_for_each_entry(isotp_busy_notifier, &isotp_notifier_list, notifier) {
++		spin_unlock(&isotp_notifier_lock);
++		isotp_notify(isotp_busy_notifier, msg, dev);
++		spin_lock(&isotp_notifier_lock);
++	}
++	isotp_busy_notifier = NULL;
++	spin_unlock(&isotp_notifier_lock);
+ 	return NOTIFY_DONE;
+ }
+ 
+@@ -1394,8 +1421,9 @@ static int isotp_init(struct sock *sk)
+ 
+ 	init_waitqueue_head(&so->wait);
+ 
+-	so->notifier.notifier_call = isotp_notifier;
+-	register_netdevice_notifier(&so->notifier);
++	spin_lock(&isotp_notifier_lock);
++	list_add_tail(&so->notifier, &isotp_notifier_list);
++	spin_unlock(&isotp_notifier_lock);
+ 
+ 	return 0;
+ }
+@@ -1442,6 +1470,10 @@ static const struct can_proto isotp_can_proto = {
+ 	.prot = &isotp_proto,
+ };
+ 
++static struct notifier_block canisotp_notifier = {
++	.notifier_call = isotp_notifier
++};
++
+ static __init int isotp_module_init(void)
+ {
+ 	int err;
+@@ -1451,6 +1483,8 @@ static __init int isotp_module_init(void)
+ 	err = can_proto_register(&isotp_can_proto);
+ 	if (err < 0)
+ 		pr_err("can: registration of isotp protocol failed\n");
++	else
++		register_netdevice_notifier(&canisotp_notifier);
+ 
+ 	return err;
+ }
+@@ -1458,6 +1492,7 @@ static __init int isotp_module_init(void)
+ static __exit void isotp_module_exit(void)
+ {
+ 	can_proto_unregister(&isotp_can_proto);
++	unregister_netdevice_notifier(&canisotp_notifier);
+ }
+ 
+ module_init(isotp_module_init);
+diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
+index e09d087ba2409..c3946c3558826 100644
+--- a/net/can/j1939/transport.c
++++ b/net/can/j1939/transport.c
+@@ -330,6 +330,9 @@ static void j1939_session_skb_drop_old(struct j1939_session *session)
+ 
+ 	if ((do_skcb->offset + do_skb->len) < offset_start) {
+ 		__skb_unlink(do_skb, &session->skb_queue);
++		/* drop ref taken in j1939_session_skb_queue() */
++		skb_unref(do_skb);
++
+ 		kfree_skb(do_skb);
+ 	}
+ 	spin_unlock_irqrestore(&session->skb_queue.lock, flags);
+@@ -349,12 +352,13 @@ void j1939_session_skb_queue(struct j1939_session *session,
+ 
+ 	skcb->flags |= J1939_ECU_LOCAL_SRC;
+ 
++	skb_get(skb);
+ 	skb_queue_tail(&session->skb_queue, skb);
+ }
+ 
+ static struct
+-sk_buff *j1939_session_skb_find_by_offset(struct j1939_session *session,
+-					  unsigned int offset_start)
++sk_buff *j1939_session_skb_get_by_offset(struct j1939_session *session,
++					 unsigned int offset_start)
+ {
+ 	struct j1939_priv *priv = session->priv;
+ 	struct j1939_sk_buff_cb *do_skcb;
+@@ -371,6 +375,10 @@ sk_buff *j1939_session_skb_find_by_offset(struct j1939_session *session,
+ 			skb = do_skb;
+ 		}
+ 	}
++
++	if (skb)
++		skb_get(skb);
++
+ 	spin_unlock_irqrestore(&session->skb_queue.lock, flags);
+ 
+ 	if (!skb)
+@@ -381,12 +389,12 @@ sk_buff *j1939_session_skb_find_by_offset(struct j1939_session *session,
+ 	return skb;
+ }
+ 
+-static struct sk_buff *j1939_session_skb_find(struct j1939_session *session)
++static struct sk_buff *j1939_session_skb_get(struct j1939_session *session)
+ {
+ 	unsigned int offset_start;
+ 
+ 	offset_start = session->pkt.dpo * 7;
+-	return j1939_session_skb_find_by_offset(session, offset_start);
++	return j1939_session_skb_get_by_offset(session, offset_start);
+ }
+ 
+ /* see if we are receiver
+@@ -776,7 +784,7 @@ static int j1939_session_tx_dat(struct j1939_session *session)
+ 	int ret = 0;
+ 	u8 dat[8];
+ 
+-	se_skb = j1939_session_skb_find_by_offset(session, session->pkt.tx * 7);
++	se_skb = j1939_session_skb_get_by_offset(session, session->pkt.tx * 7);
+ 	if (!se_skb)
+ 		return -ENOBUFS;
+ 
+@@ -801,7 +809,8 @@ static int j1939_session_tx_dat(struct j1939_session *session)
+ 			netdev_err_once(priv->ndev,
+ 					"%s: 0x%p: requested data outside of queued buffer: offset %i, len %i, pkt.tx: %i\n",
+ 					__func__, session, skcb->offset, se_skb->len , session->pkt.tx);
+-			return -EOVERFLOW;
++			ret = -EOVERFLOW;
++			goto out_free;
+ 		}
+ 
+ 		if (!len) {
+@@ -835,6 +844,12 @@ static int j1939_session_tx_dat(struct j1939_session *session)
+ 	if (pkt_done)
+ 		j1939_tp_set_rxtimeout(session, 250);
+ 
++ out_free:
++	if (ret)
++		kfree_skb(se_skb);
++	else
++		consume_skb(se_skb);
++
+ 	return ret;
+ }
+ 
+@@ -1007,7 +1022,7 @@ static int j1939_xtp_txnext_receiver(struct j1939_session *session)
+ static int j1939_simple_txnext(struct j1939_session *session)
+ {
+ 	struct j1939_priv *priv = session->priv;
+-	struct sk_buff *se_skb = j1939_session_skb_find(session);
++	struct sk_buff *se_skb = j1939_session_skb_get(session);
+ 	struct sk_buff *skb;
+ 	int ret;
+ 
+@@ -1015,8 +1030,10 @@ static int j1939_simple_txnext(struct j1939_session *session)
+ 		return 0;
+ 
+ 	skb = skb_clone(se_skb, GFP_ATOMIC);
+-	if (!skb)
+-		return -ENOMEM;
++	if (!skb) {
++		ret = -ENOMEM;
++		goto out_free;
++	}
+ 
+ 	can_skb_set_owner(skb, se_skb->sk);
+ 
+@@ -1024,12 +1041,18 @@ static int j1939_simple_txnext(struct j1939_session *session)
+ 
+ 	ret = j1939_send_one(priv, skb);
+ 	if (ret)
+-		return ret;
++		goto out_free;
+ 
+ 	j1939_sk_errqueue(session, J1939_ERRQUEUE_SCHED);
+ 	j1939_sk_queue_activate_next(session);
+ 
+-	return 0;
++ out_free:
++	if (ret)
++		kfree_skb(se_skb);
++	else
++		consume_skb(se_skb);
++
++	return ret;
+ }
+ 
+ static bool j1939_session_deactivate_locked(struct j1939_session *session)
+@@ -1170,9 +1193,10 @@ static void j1939_session_completed(struct j1939_session *session)
+ 	struct sk_buff *skb;
+ 
+ 	if (!session->transmission) {
+-		skb = j1939_session_skb_find(session);
++		skb = j1939_session_skb_get(session);
+ 		/* distribute among j1939 receivers */
+ 		j1939_sk_recv(session->priv, skb);
++		consume_skb(skb);
+ 	}
+ 
+ 	j1939_session_deactivate_activate_next(session);
+@@ -1744,7 +1768,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
+ {
+ 	struct j1939_priv *priv = session->priv;
+ 	struct j1939_sk_buff_cb *skcb;
+-	struct sk_buff *se_skb;
++	struct sk_buff *se_skb = NULL;
+ 	const u8 *dat;
+ 	u8 *tpdat;
+ 	int offset;
+@@ -1786,7 +1810,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
+ 		goto out_session_cancel;
+ 	}
+ 
+-	se_skb = j1939_session_skb_find_by_offset(session, packet * 7);
++	se_skb = j1939_session_skb_get_by_offset(session, packet * 7);
+ 	if (!se_skb) {
+ 		netdev_warn(priv->ndev, "%s: 0x%p: no skb found\n", __func__,
+ 			    session);
+@@ -1848,11 +1872,13 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
+ 		j1939_tp_set_rxtimeout(session, 250);
+ 	}
+ 	session->last_cmd = 0xff;
++	consume_skb(se_skb);
+ 	j1939_session_put(session);
+ 
+ 	return;
+ 
+  out_session_cancel:
++	kfree_skb(se_skb);
+ 	j1939_session_timers_cancel(session);
+ 	j1939_session_cancel(session, J1939_XTP_ABORT_FAULT);
+ 	j1939_session_put(session);
+diff --git a/net/can/raw.c b/net/can/raw.c
+index 139d9471ddcf4..ac96fc2100253 100644
+--- a/net/can/raw.c
++++ b/net/can/raw.c
+@@ -83,7 +83,7 @@ struct raw_sock {
+ 	struct sock sk;
+ 	int bound;
+ 	int ifindex;
+-	struct notifier_block notifier;
++	struct list_head notifier;
+ 	int loopback;
+ 	int recv_own_msgs;
+ 	int fd_frames;
+@@ -95,6 +95,10 @@ struct raw_sock {
+ 	struct uniqframe __percpu *uniq;
+ };
+ 
++static LIST_HEAD(raw_notifier_list);
++static DEFINE_SPINLOCK(raw_notifier_lock);
++static struct raw_sock *raw_busy_notifier;
++
+ /* Return pointer to store the extra msg flags for raw_recvmsg().
+  * We use the space of one unsigned int beyond the 'struct sockaddr_can'
+  * in skb->cb.
+@@ -263,21 +267,16 @@ static int raw_enable_allfilters(struct net *net, struct net_device *dev,
+ 	return err;
+ }
+ 
+-static int raw_notifier(struct notifier_block *nb,
+-			unsigned long msg, void *ptr)
++static void raw_notify(struct raw_sock *ro, unsigned long msg,
++		       struct net_device *dev)
+ {
+-	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+-	struct raw_sock *ro = container_of(nb, struct raw_sock, notifier);
+ 	struct sock *sk = &ro->sk;
+ 
+ 	if (!net_eq(dev_net(dev), sock_net(sk)))
+-		return NOTIFY_DONE;
+-
+-	if (dev->type != ARPHRD_CAN)
+-		return NOTIFY_DONE;
++		return;
+ 
+ 	if (ro->ifindex != dev->ifindex)
+-		return NOTIFY_DONE;
++		return;
+ 
+ 	switch (msg) {
+ 	case NETDEV_UNREGISTER:
+@@ -305,7 +304,28 @@ static int raw_notifier(struct notifier_block *nb,
+ 			sk->sk_error_report(sk);
+ 		break;
+ 	}
++}
++
++static int raw_notifier(struct notifier_block *nb, unsigned long msg,
++			void *ptr)
++{
++	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
++
++	if (dev->type != ARPHRD_CAN)
++		return NOTIFY_DONE;
++	if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
++		return NOTIFY_DONE;
++	if (unlikely(raw_busy_notifier)) /* Check for reentrant bug. */
++		return NOTIFY_DONE;
+ 
++	spin_lock(&raw_notifier_lock);
++	list_for_each_entry(raw_busy_notifier, &raw_notifier_list, notifier) {
++		spin_unlock(&raw_notifier_lock);
++		raw_notify(raw_busy_notifier, msg, dev);
++		spin_lock(&raw_notifier_lock);
++	}
++	raw_busy_notifier = NULL;
++	spin_unlock(&raw_notifier_lock);
+ 	return NOTIFY_DONE;
+ }
+ 
+@@ -334,9 +354,9 @@ static int raw_init(struct sock *sk)
+ 		return -ENOMEM;
+ 
+ 	/* set notifier */
+-	ro->notifier.notifier_call = raw_notifier;
+-
+-	register_netdevice_notifier(&ro->notifier);
++	spin_lock(&raw_notifier_lock);
++	list_add_tail(&ro->notifier, &raw_notifier_list);
++	spin_unlock(&raw_notifier_lock);
+ 
+ 	return 0;
+ }
+@@ -351,7 +371,14 @@ static int raw_release(struct socket *sock)
+ 
+ 	ro = raw_sk(sk);
+ 
+-	unregister_netdevice_notifier(&ro->notifier);
++	spin_lock(&raw_notifier_lock);
++	while (raw_busy_notifier == ro) {
++		spin_unlock(&raw_notifier_lock);
++		schedule_timeout_uninterruptible(1);
++		spin_lock(&raw_notifier_lock);
++	}
++	list_del(&ro->notifier);
++	spin_unlock(&raw_notifier_lock);
+ 
+ 	lock_sock(sk);
+ 
+@@ -889,6 +916,10 @@ static const struct can_proto raw_can_proto = {
+ 	.prot       = &raw_proto,
+ };
+ 
++static struct notifier_block canraw_notifier = {
++	.notifier_call = raw_notifier
++};
++
+ static __init int raw_module_init(void)
+ {
+ 	int err;
+@@ -898,6 +929,8 @@ static __init int raw_module_init(void)
+ 	err = can_proto_register(&raw_can_proto);
+ 	if (err < 0)
+ 		pr_err("can: registration of raw protocol failed\n");
++	else
++		register_netdevice_notifier(&canraw_notifier);
+ 
+ 	return err;
+ }
+@@ -905,6 +938,7 @@ static __init int raw_module_init(void)
+ static __exit void raw_module_exit(void)
+ {
+ 	can_proto_unregister(&raw_can_proto);
++	unregister_netdevice_notifier(&canraw_notifier);
+ }
+ 
+ module_init(raw_module_init);
+diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
+index 43b6ac4c44395..cc8dafb25d612 100644
+--- a/net/core/net_namespace.c
++++ b/net/core/net_namespace.c
+@@ -641,6 +641,18 @@ void __put_net(struct net *net)
+ }
+ EXPORT_SYMBOL_GPL(__put_net);
+ 
++/**
++ * get_net_ns - increment the refcount of the network namespace
++ * @ns: common namespace (net)
++ *
++ * Returns the net's common namespace.
++ */
++struct ns_common *get_net_ns(struct ns_common *ns)
++{
++	return &get_net(container_of(ns, struct net, ns))->ns;
++}
++EXPORT_SYMBOL_GPL(get_net_ns);
++
+ struct net *get_net_ns_by_fd(int fd)
+ {
+ 	struct file *file;
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 9ad046917b340..2123427883baa 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -4833,10 +4833,12 @@ static int rtnl_bridge_notify(struct net_device *dev)
+ 	if (err < 0)
+ 		goto errout;
+ 
+-	if (!skb->len) {
+-		err = -EINVAL;
++	/* Notification info is only filled for bridge ports, not the bridge
++	 * device itself. Therefore, a zero notification length is valid and
++	 * should not result in an error.
++	 */
++	if (!skb->len)
+ 		goto errout;
+-	}
+ 
+ 	rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
+ 	return 0;
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index c421c8f809256..7997d99afbd8e 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -1252,6 +1252,7 @@ static void __msg_zerocopy_callback(struct ubuf_info *uarg)
+ 	struct sock *sk = skb->sk;
+ 	struct sk_buff_head *q;
+ 	unsigned long flags;
++	bool is_zerocopy;
+ 	u32 lo, hi;
+ 	u16 len;
+ 
+@@ -1266,6 +1267,7 @@ static void __msg_zerocopy_callback(struct ubuf_info *uarg)
+ 	len = uarg->len;
+ 	lo = uarg->id;
+ 	hi = uarg->id + len - 1;
++	is_zerocopy = uarg->zerocopy;
+ 
+ 	serr = SKB_EXT_ERR(skb);
+ 	memset(serr, 0, sizeof(*serr));
+@@ -1273,7 +1275,7 @@ static void __msg_zerocopy_callback(struct ubuf_info *uarg)
+ 	serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY;
+ 	serr->ee.ee_data = hi;
+ 	serr->ee.ee_info = lo;
+-	if (!uarg->zerocopy)
++	if (!is_zerocopy)
+ 		serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED;
+ 
+ 	q = &sk->sk_error_queue;
+diff --git a/net/ethtool/strset.c b/net/ethtool/strset.c
+index c3a5489964cde..9908b922cce8d 100644
+--- a/net/ethtool/strset.c
++++ b/net/ethtool/strset.c
+@@ -328,6 +328,8 @@ static int strset_reply_size(const struct ethnl_req_info *req_base,
+ 	int len = 0;
+ 	int ret;
+ 
++	len += nla_total_size(0); /* ETHTOOL_A_STRSET_STRINGSETS */
++
+ 	for (i = 0; i < ETH_SS_COUNT; i++) {
+ 		const struct strset_info *set_info = &data->sets[i];
+ 
+diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
+index bfaf327e9d121..e0480c6cebaad 100644
+--- a/net/ipv4/cipso_ipv4.c
++++ b/net/ipv4/cipso_ipv4.c
+@@ -472,6 +472,7 @@ void cipso_v4_doi_free(struct cipso_v4_doi *doi_def)
+ 		kfree(doi_def->map.std->lvl.local);
+ 		kfree(doi_def->map.std->cat.cipso);
+ 		kfree(doi_def->map.std->cat.local);
++		kfree(doi_def->map.std);
+ 		break;
+ 	}
+ 	kfree(doi_def);
+diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
+index 616e2dc1c8fa4..cd65d3146c300 100644
+--- a/net/ipv4/icmp.c
++++ b/net/ipv4/icmp.c
+@@ -759,6 +759,13 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
+ 		icmp_param.data_len = room;
+ 	icmp_param.head_len = sizeof(struct icmphdr);
+ 
++	/* if we don't have a source address at this point, fall back to the
++	 * dummy address instead of sending out a packet with a source address
++	 * of 0.0.0.0
++	 */
++	if (!fl4.saddr)
++		fl4.saddr = htonl(INADDR_DUMMY);
++
+ 	icmp_push_reply(&icmp_param, &fl4, &ipc, &rt);
+ ende:
+ 	ip_rt_put(rt);
+diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
+index 7b272bbed2b43..6b3c558a4f232 100644
+--- a/net/ipv4/igmp.c
++++ b/net/ipv4/igmp.c
+@@ -1801,6 +1801,7 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
+ 	while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) {
+ 		in_dev->mc_list = i->next_rcu;
+ 		in_dev->mc_count--;
++		ip_mc_clear_src(i);
+ 		ip_ma_put(i);
+ 	}
+ }
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index d635b4f32d348..09506203156d1 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -2081,6 +2081,19 @@ martian_source:
+ 	return err;
+ }
+ 
++/* get device for dst_alloc with local routes */
++static struct net_device *ip_rt_get_dev(struct net *net,
++					const struct fib_result *res)
++{
++	struct fib_nh_common *nhc = res->fi ? res->nhc : NULL;
++	struct net_device *dev = NULL;
++
++	if (nhc)
++		dev = l3mdev_master_dev_rcu(nhc->nhc_dev);
++
++	return dev ? : net->loopback_dev;
++}
++
+ /*
+  *	NOTE. We drop all the packets that has local source
+  *	addresses, because every properly looped back packet
+@@ -2237,7 +2250,7 @@ local_input:
+ 		}
+ 	}
+ 
+-	rth = rt_dst_alloc(l3mdev_master_dev_rcu(dev) ? : net->loopback_dev,
++	rth = rt_dst_alloc(ip_rt_get_dev(net, res),
+ 			   flags | RTCF_LOCAL, res->type,
+ 			   IN_DEV_ORCONF(in_dev, NOPOLICY), false);
+ 	if (!rth)
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index c586a6bb8c6d0..3dd340679d096 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -2576,6 +2576,9 @@ void udp_destroy_sock(struct sock *sk)
+ {
+ 	struct udp_sock *up = udp_sk(sk);
+ 	bool slow = lock_sock_fast(sk);
++
++	/* protects from races with udp_abort() */
++	sock_set_flag(sk, SOCK_DEAD);
+ 	udp_flush_pending_frames(sk);
+ 	unlock_sock_fast(sk, slow);
+ 	if (static_branch_unlikely(&udp_encap_needed_key)) {
+@@ -2826,10 +2829,17 @@ int udp_abort(struct sock *sk, int err)
+ {
+ 	lock_sock(sk);
+ 
++	/* udp{v6}_destroy_sock() sets it under the sk lock, avoid racing
++	 * with close()
++	 */
++	if (sock_flag(sk, SOCK_DEAD))
++		goto out;
++
+ 	sk->sk_err = err;
+ 	sk->sk_error_report(sk);
+ 	__udp_disconnect(sk, 0);
+ 
++out:
+ 	release_sock(sk);
+ 
+ 	return 0;
+diff --git a/net/ipv6/netfilter/nft_fib_ipv6.c b/net/ipv6/netfilter/nft_fib_ipv6.c
+index e204163c7036c..92f3235fa2874 100644
+--- a/net/ipv6/netfilter/nft_fib_ipv6.c
++++ b/net/ipv6/netfilter/nft_fib_ipv6.c
+@@ -135,6 +135,17 @@ void nft_fib6_eval_type(const struct nft_expr *expr, struct nft_regs *regs,
+ }
+ EXPORT_SYMBOL_GPL(nft_fib6_eval_type);
+ 
++static bool nft_fib_v6_skip_icmpv6(const struct sk_buff *skb, u8 next, const struct ipv6hdr *iph)
++{
++	if (likely(next != IPPROTO_ICMPV6))
++		return false;
++
++	if (ipv6_addr_type(&iph->saddr) != IPV6_ADDR_ANY)
++		return false;
++
++	return ipv6_addr_type(&iph->daddr) & IPV6_ADDR_LINKLOCAL;
++}
++
+ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
+ 		   const struct nft_pktinfo *pkt)
+ {
+@@ -163,10 +174,13 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
+ 
+ 	lookup_flags = nft_fib6_flowi_init(&fl6, priv, pkt, oif, iph);
+ 
+-	if (nft_hook(pkt) == NF_INET_PRE_ROUTING &&
+-	    nft_fib_is_loopback(pkt->skb, nft_in(pkt))) {
+-		nft_fib_store_result(dest, priv, nft_in(pkt));
+-		return;
++	if (nft_hook(pkt) == NF_INET_PRE_ROUTING ||
++	    nft_hook(pkt) == NF_INET_INGRESS) {
++		if (nft_fib_is_loopback(pkt->skb, nft_in(pkt)) ||
++		    nft_fib_v6_skip_icmpv6(pkt->skb, pkt->tprot, iph)) {
++			nft_fib_store_result(dest, priv, nft_in(pkt));
++			return;
++		}
+ 	}
+ 
+ 	*dest = 0;
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index d25e5a9252fdb..29288f134d7ac 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -1597,6 +1597,9 @@ void udpv6_destroy_sock(struct sock *sk)
+ {
+ 	struct udp_sock *up = udp_sk(sk);
+ 	lock_sock(sk);
++
++	/* protects from races with udp_abort() */
++	sock_set_flag(sk, SOCK_DEAD);
+ 	udp_v6_flush_pending_frames(sk);
+ 	release_sock(sk);
+ 
+diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
+index 5296898875ffb..223fbcafd6fce 100644
+--- a/net/mac80211/debugfs.c
++++ b/net/mac80211/debugfs.c
+@@ -4,7 +4,7 @@
+  *
+  * Copyright 2007	Johannes Berg <johannes@sipsolutions.net>
+  * Copyright 2013-2014  Intel Mobile Communications GmbH
+- * Copyright (C) 2018 - 2019 Intel Corporation
++ * Copyright (C) 2018 - 2019, 2021 Intel Corporation
+  */
+ 
+ #include <linux/debugfs.h>
+@@ -387,10 +387,17 @@ static ssize_t reset_write(struct file *file, const char __user *user_buf,
+ 			   size_t count, loff_t *ppos)
+ {
+ 	struct ieee80211_local *local = file->private_data;
++	int ret;
+ 
+ 	rtnl_lock();
++	wiphy_lock(local->hw.wiphy);
+ 	__ieee80211_suspend(&local->hw, NULL);
+-	__ieee80211_resume(&local->hw);
++	ret = __ieee80211_resume(&local->hw);
++	wiphy_unlock(local->hw.wiphy);
++
++	if (ret)
++		cfg80211_shutdown_all_interfaces(local->hw.wiphy);
++
+ 	rtnl_unlock();
+ 
+ 	return count;
+diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
+index 6f8885766cbaa..6ebfd484e61d2 100644
+--- a/net/mac80211/iface.c
++++ b/net/mac80211/iface.c
+@@ -475,14 +475,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
+ 				   GFP_KERNEL);
+ 	}
+ 
+-	/* APs need special treatment */
+ 	if (sdata->vif.type == NL80211_IFTYPE_AP) {
+-		struct ieee80211_sub_if_data *vlan, *tmpsdata;
+-
+-		/* down all dependent devices, that is VLANs */
+-		list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans,
+-					 u.vlan.list)
+-			dev_close(vlan->dev);
+ 		WARN_ON(!list_empty(&sdata->u.ap.vlans));
+ 	} else if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
+ 		/* remove all packets in parent bc_buf pointing to this dev */
+@@ -640,6 +633,15 @@ static int ieee80211_stop(struct net_device *dev)
+ {
+ 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ 
++	/* close all dependent VLAN interfaces before locking wiphy */
++	if (sdata->vif.type == NL80211_IFTYPE_AP) {
++		struct ieee80211_sub_if_data *vlan, *tmpsdata;
++
++		list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans,
++					 u.vlan.list)
++			dev_close(vlan->dev);
++	}
++
+ 	wiphy_lock(sdata->local->hw.wiphy);
+ 	ieee80211_do_stop(sdata, true);
+ 	wiphy_unlock(sdata->local->hw.wiphy);
+@@ -1589,6 +1591,9 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
+ 
+ 	switch (sdata->vif.type) {
+ 	case NL80211_IFTYPE_AP:
++		if (!list_empty(&sdata->u.ap.vlans))
++			return -EBUSY;
++		break;
+ 	case NL80211_IFTYPE_STATION:
+ 	case NL80211_IFTYPE_ADHOC:
+ 	case NL80211_IFTYPE_OCB:
+diff --git a/net/mac80211/main.c b/net/mac80211/main.c
+index 0331f3a3c40e0..9dd741b68f268 100644
+--- a/net/mac80211/main.c
++++ b/net/mac80211/main.c
+@@ -252,6 +252,7 @@ static void ieee80211_restart_work(struct work_struct *work)
+ 	struct ieee80211_local *local =
+ 		container_of(work, struct ieee80211_local, restart_work);
+ 	struct ieee80211_sub_if_data *sdata;
++	int ret;
+ 
+ 	/* wait for scan work complete */
+ 	flush_workqueue(local->workqueue);
+@@ -294,8 +295,12 @@ static void ieee80211_restart_work(struct work_struct *work)
+ 	/* wait for all packet processing to be done */
+ 	synchronize_net();
+ 
+-	ieee80211_reconfig(local);
++	ret = ieee80211_reconfig(local);
+ 	wiphy_unlock(local->hw.wiphy);
++
++	if (ret)
++		cfg80211_shutdown_all_interfaces(local->hw.wiphy);
++
+ 	rtnl_unlock();
+ }
+ 
+diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
+index ecad9b10984ff..e627a11844a9b 100644
+--- a/net/mac80211/rc80211_minstrel_ht.c
++++ b/net/mac80211/rc80211_minstrel_ht.c
+@@ -1516,7 +1516,7 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
+ 	    (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO))
+ 		return;
+ 
+-	if (time_is_before_jiffies(mi->sample_time))
++	if (time_is_after_jiffies(mi->sample_time))
+ 		return;
+ 
+ 	mi->sample_time = jiffies + MINSTREL_SAMPLE_INTERVAL;
+diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
+index d4cc9ac2d7033..6b50cb5e0e3cc 100644
+--- a/net/mac80211/scan.c
++++ b/net/mac80211/scan.c
+@@ -251,13 +251,24 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
+ 	struct ieee80211_mgmt *mgmt = (void *)skb->data;
+ 	struct ieee80211_bss *bss;
+ 	struct ieee80211_channel *channel;
++	size_t min_hdr_len = offsetof(struct ieee80211_mgmt,
++				      u.probe_resp.variable);
++
++	if (!ieee80211_is_probe_resp(mgmt->frame_control) &&
++	    !ieee80211_is_beacon(mgmt->frame_control) &&
++	    !ieee80211_is_s1g_beacon(mgmt->frame_control))
++		return;
+ 
+ 	if (ieee80211_is_s1g_beacon(mgmt->frame_control)) {
+-		if (skb->len < 15)
+-			return;
+-	} else if (skb->len < 24 ||
+-		 (!ieee80211_is_probe_resp(mgmt->frame_control) &&
+-		  !ieee80211_is_beacon(mgmt->frame_control)))
++		if (ieee80211_is_s1g_short_beacon(mgmt->frame_control))
++			min_hdr_len = offsetof(struct ieee80211_ext,
++					       u.s1g_short_beacon.variable);
++		else
++			min_hdr_len = offsetof(struct ieee80211_ext,
++					       u.s1g_beacon);
++	}
++
++	if (skb->len < min_hdr_len)
+ 		return;
+ 
+ 	sdata1 = rcu_dereference(local->scan_sdata);
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index 28422d6870967..d33dc4e023715 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -2002,6 +2002,26 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
+ 	ieee80211_tx(sdata, sta, skb, false);
+ }
+ 
++static bool ieee80211_validate_radiotap_len(struct sk_buff *skb)
++{
++	struct ieee80211_radiotap_header *rthdr =
++		(struct ieee80211_radiotap_header *)skb->data;
++
++	/* check for not even having the fixed radiotap header part */
++	if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
++		return false; /* too short to be possibly valid */
++
++	/* is it a header version we can trust to find length from? */
++	if (unlikely(rthdr->it_version))
++		return false; /* only version 0 is supported */
++
++	/* does the skb contain enough to deliver on the alleged length? */
++	if (unlikely(skb->len < ieee80211_get_radiotap_len(skb->data)))
++		return false; /* skb too short for claimed rt header extent */
++
++	return true;
++}
++
+ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
+ 				 struct net_device *dev)
+ {
+@@ -2010,8 +2030,6 @@ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
+ 	struct ieee80211_radiotap_header *rthdr =
+ 		(struct ieee80211_radiotap_header *) skb->data;
+ 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+-	struct ieee80211_supported_band *sband =
+-		local->hw.wiphy->bands[info->band];
+ 	int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len,
+ 						   NULL);
+ 	u16 txflags;
+@@ -2024,17 +2042,8 @@ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
+ 	u8 vht_mcs = 0, vht_nss = 0;
+ 	int i;
+ 
+-	/* check for not even having the fixed radiotap header part */
+-	if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
+-		return false; /* too short to be possibly valid */
+-
+-	/* is it a header version we can trust to find length from? */
+-	if (unlikely(rthdr->it_version))
+-		return false; /* only version 0 is supported */
+-
+-	/* does the skb contain enough to deliver on the alleged length? */
+-	if (unlikely(skb->len < ieee80211_get_radiotap_len(skb->data)))
+-		return false; /* skb too short for claimed rt header extent */
++	if (!ieee80211_validate_radiotap_len(skb))
++		return false;
+ 
+ 	info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
+ 		       IEEE80211_TX_CTL_DONTFRAG;
+@@ -2174,6 +2183,9 @@ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
+ 		return false;
+ 
+ 	if (rate_found) {
++		struct ieee80211_supported_band *sband =
++			local->hw.wiphy->bands[info->band];
++
+ 		info->control.flags |= IEEE80211_TX_CTRL_RATE_INJECT;
+ 
+ 		for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+@@ -2187,7 +2199,7 @@ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
+ 		} else if (rate_flags & IEEE80211_TX_RC_VHT_MCS) {
+ 			ieee80211_rate_set_vht(info->control.rates, vht_mcs,
+ 					       vht_nss);
+-		} else {
++		} else if (sband) {
+ 			for (i = 0; i < sband->n_bitrates; i++) {
+ 				if (rate * 5 != sband->bitrates[i].bitrate)
+ 					continue;
+@@ -2224,8 +2236,8 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
+ 	info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
+ 		      IEEE80211_TX_CTL_INJECTED;
+ 
+-	/* Sanity-check and process the injection radiotap header */
+-	if (!ieee80211_parse_tx_radiotap(skb, dev))
++	/* Sanity-check the length of the radiotap header */
++	if (!ieee80211_validate_radiotap_len(skb))
+ 		goto fail;
+ 
+ 	/* we now know there is a radiotap header with a length we can use */
+@@ -2339,6 +2351,14 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
+ 	ieee80211_select_queue_80211(sdata, skb, hdr);
+ 	skb_set_queue_mapping(skb, ieee80211_ac_from_tid(skb->priority));
+ 
++	/*
++	 * Process the radiotap header. This will now take into account the
++	 * selected chandef above to accurately set injection rates and
++	 * retransmissions.
++	 */
++	if (!ieee80211_parse_tx_radiotap(skb, dev))
++		goto fail_rcu;
++
+ 	/* remove the injection radiotap header */
+ 	skb_pull(skb, len_rthdr);
+ 
+diff --git a/net/mac80211/util.c b/net/mac80211/util.c
+index c0fa526a45b4d..53755a05f73b5 100644
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -2186,8 +2186,6 @@ static void ieee80211_handle_reconfig_failure(struct ieee80211_local *local)
+ 	list_for_each_entry(ctx, &local->chanctx_list, list)
+ 		ctx->driver_present = false;
+ 	mutex_unlock(&local->chanctx_mtx);
+-
+-	cfg80211_shutdown_all_interfaces(local->hw.wiphy);
+ }
+ 
+ static void ieee80211_assign_chanctx(struct ieee80211_local *local,
+diff --git a/net/mptcp/options.c b/net/mptcp/options.c
+index 8848a9e2a95b1..47d90cf31f125 100644
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -337,6 +337,8 @@ void mptcp_get_options(const struct sk_buff *skb,
+ 			length--;
+ 			continue;
+ 		default:
++			if (length < 2)
++				return;
+ 			opsize = *ptr++;
+ 			if (opsize < 2) /* "silly options" */
+ 				return;
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 225b988215171..d8187ac065397 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -287,11 +287,13 @@ static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
+ 
+ 	/* try to fetch required memory from subflow */
+ 	if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
+-		if (ssk->sk_forward_alloc < skb->truesize)
+-			goto drop;
+-		__sk_mem_reclaim(ssk, skb->truesize);
+-		if (!sk_rmem_schedule(sk, skb, skb->truesize))
++		int amount = sk_mem_pages(skb->truesize) << SK_MEM_QUANTUM_SHIFT;
++
++		if (ssk->sk_forward_alloc < amount)
+ 			goto drop;
++
++		ssk->sk_forward_alloc -= amount;
++		sk->sk_forward_alloc += amount;
+ 	}
+ 
+ 	/* the skb map_seq accounts for the skb offset:
+@@ -687,18 +689,22 @@ static bool __mptcp_ofo_queue(struct mptcp_sock *msk)
+ /* In most cases we will be able to lock the mptcp socket.  If its already
+  * owned, we need to defer to the work queue to avoid ABBA deadlock.
+  */
+-static void move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
++static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
+ {
+ 	struct sock *sk = (struct sock *)msk;
+ 	unsigned int moved = 0;
+ 
+ 	if (inet_sk_state_load(sk) == TCP_CLOSE)
+-		return;
+-
+-	mptcp_data_lock(sk);
++		return false;
+ 
+ 	__mptcp_move_skbs_from_subflow(msk, ssk, &moved);
+ 	__mptcp_ofo_queue(msk);
++	if (unlikely(ssk->sk_err)) {
++		if (!sock_owned_by_user(sk))
++			__mptcp_error_report(sk);
++		else
++			set_bit(MPTCP_ERROR_REPORT,  &msk->flags);
++	}
+ 
+ 	/* If the moves have caught up with the DATA_FIN sequence number
+ 	 * it's time to ack the DATA_FIN and change socket state, but
+@@ -707,7 +713,7 @@ static void move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
+ 	 */
+ 	if (mptcp_pending_data_fin(sk, NULL))
+ 		mptcp_schedule_work(sk);
+-	mptcp_data_unlock(sk);
++	return moved > 0;
+ }
+ 
+ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
+@@ -715,7 +721,6 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
+ 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+ 	struct mptcp_sock *msk = mptcp_sk(sk);
+ 	int sk_rbuf, ssk_rbuf;
+-	bool wake;
+ 
+ 	/* The peer can send data while we are shutting down this
+ 	 * subflow at msk destruction time, but we must avoid enqueuing
+@@ -724,28 +729,22 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
+ 	if (unlikely(subflow->disposable))
+ 		return;
+ 
+-	/* move_skbs_to_msk below can legitly clear the data_avail flag,
+-	 * but we will need later to properly woke the reader, cache its
+-	 * value
+-	 */
+-	wake = subflow->data_avail == MPTCP_SUBFLOW_DATA_AVAIL;
+-	if (wake)
+-		set_bit(MPTCP_DATA_READY, &msk->flags);
+-
+ 	ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf);
+ 	sk_rbuf = READ_ONCE(sk->sk_rcvbuf);
+ 	if (unlikely(ssk_rbuf > sk_rbuf))
+ 		sk_rbuf = ssk_rbuf;
+ 
+-	/* over limit? can't append more skbs to msk */
++	/* over limit? can't append more skbs to msk, Also, no need to wake-up*/
+ 	if (atomic_read(&sk->sk_rmem_alloc) > sk_rbuf)
+-		goto wake;
+-
+-	move_skbs_to_msk(msk, ssk);
++		return;
+ 
+-wake:
+-	if (wake)
++	/* Wake-up the reader only for in-sequence data */
++	mptcp_data_lock(sk);
++	if (move_skbs_to_msk(msk, ssk)) {
++		set_bit(MPTCP_DATA_READY, &msk->flags);
+ 		sk->sk_data_ready(sk);
++	}
++	mptcp_data_unlock(sk);
+ }
+ 
+ void __mptcp_flush_join_list(struct mptcp_sock *msk)
+@@ -848,7 +847,7 @@ static struct sock *mptcp_subflow_recv_lookup(const struct mptcp_sock *msk)
+ 	sock_owned_by_me(sk);
+ 
+ 	mptcp_for_each_subflow(msk, subflow) {
+-		if (subflow->data_avail)
++		if (READ_ONCE(subflow->data_avail))
+ 			return mptcp_subflow_tcp_sock(subflow);
+ 	}
+ 
+@@ -1939,6 +1938,9 @@ static bool __mptcp_move_skbs(struct mptcp_sock *msk)
+ 		done = __mptcp_move_skbs_from_subflow(msk, ssk, &moved);
+ 		mptcp_data_unlock(sk);
+ 		tcp_cleanup_rbuf(ssk, moved);
++
++		if (unlikely(ssk->sk_err))
++			__mptcp_error_report(sk);
+ 		unlock_sock_fast(ssk, slowpath);
+ 	} while (!done);
+ 
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index e21a5bc36cf08..14e89e4bd4a80 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -372,7 +372,6 @@ mptcp_subflow_rsk(const struct request_sock *rsk)
+ enum mptcp_data_avail {
+ 	MPTCP_SUBFLOW_NODATA,
+ 	MPTCP_SUBFLOW_DATA_AVAIL,
+-	MPTCP_SUBFLOW_OOO_DATA
+ };
+ 
+ struct mptcp_delegated_action {
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 8425cd393bf3e..d6d8ad4f918e7 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -754,10 +754,10 @@ static u64 expand_seq(u64 old_seq, u16 old_data_len, u64 seq)
+ 	return seq | ((old_seq + old_data_len + 1) & GENMASK_ULL(63, 32));
+ }
+ 
+-static void warn_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
++static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
+ {
+-	WARN_ONCE(1, "Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
+-		  ssn, subflow->map_subflow_seq, subflow->map_data_len);
++	pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
++		 ssn, subflow->map_subflow_seq, subflow->map_data_len);
+ }
+ 
+ static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb)
+@@ -782,13 +782,13 @@ static bool validate_mapping(struct sock *ssk, struct sk_buff *skb)
+ 		/* Mapping covers data later in the subflow stream,
+ 		 * currently unsupported.
+ 		 */
+-		warn_bad_map(subflow, ssn);
++		dbg_bad_map(subflow, ssn);
+ 		return false;
+ 	}
+ 	if (unlikely(!before(ssn, subflow->map_subflow_seq +
+ 				  subflow->map_data_len))) {
+ 		/* Mapping does covers past subflow data, invalid */
+-		warn_bad_map(subflow, ssn + skb->len);
++		dbg_bad_map(subflow, ssn);
+ 		return false;
+ 	}
+ 	return true;
+@@ -974,7 +974,7 @@ static bool subflow_check_data_avail(struct sock *ssk)
+ 	pr_debug("msk=%p ssk=%p data_avail=%d skb=%p", subflow->conn, ssk,
+ 		 subflow->data_avail, skb_peek(&ssk->sk_receive_queue));
+ 	if (!skb_peek(&ssk->sk_receive_queue))
+-		subflow->data_avail = 0;
++		WRITE_ONCE(subflow->data_avail, 0);
+ 	if (subflow->data_avail)
+ 		return true;
+ 
+@@ -1012,18 +1012,13 @@ static bool subflow_check_data_avail(struct sock *ssk)
+ 		ack_seq = mptcp_subflow_get_mapped_dsn(subflow);
+ 		pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
+ 			 ack_seq);
+-		if (ack_seq == old_ack) {
+-			subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
+-			break;
+-		} else if (after64(ack_seq, old_ack)) {
+-			subflow->data_avail = MPTCP_SUBFLOW_OOO_DATA;
+-			break;
++		if (unlikely(before64(ack_seq, old_ack))) {
++			mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
++			continue;
+ 		}
+ 
+-		/* only accept in-sequence mapping. Old values are spurious
+-		 * retransmission
+-		 */
+-		mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
++		WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
++		break;
+ 	}
+ 	return true;
+ 
+@@ -1038,10 +1033,9 @@ fallback:
+ 		 * subflow_error_report() will introduce the appropriate barriers
+ 		 */
+ 		ssk->sk_err = EBADMSG;
+-		ssk->sk_error_report(ssk);
+ 		tcp_set_state(ssk, TCP_CLOSE);
+ 		tcp_send_active_reset(ssk, GFP_ATOMIC);
+-		subflow->data_avail = 0;
++		WRITE_ONCE(subflow->data_avail, 0);
+ 		return false;
+ 	}
+ 
+@@ -1051,7 +1045,7 @@ fallback:
+ 	subflow->map_seq = READ_ONCE(msk->ack_seq);
+ 	subflow->map_data_len = skb->len;
+ 	subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
+-	subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
++	WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
+ 	return true;
+ }
+ 
+@@ -1063,7 +1057,7 @@ bool mptcp_subflow_data_available(struct sock *sk)
+ 	if (subflow->map_valid &&
+ 	    mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) {
+ 		subflow->map_valid = 0;
+-		subflow->data_avail = 0;
++		WRITE_ONCE(subflow->data_avail, 0);
+ 
+ 		pr_debug("Done with mapping: seq=%u data_len=%u",
+ 			 subflow->map_subflow_seq,
+@@ -1091,41 +1085,6 @@ void mptcp_space(const struct sock *ssk, int *space, int *full_space)
+ 	*full_space = tcp_full_space(sk);
+ }
+ 
+-static void subflow_data_ready(struct sock *sk)
+-{
+-	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+-	u16 state = 1 << inet_sk_state_load(sk);
+-	struct sock *parent = subflow->conn;
+-	struct mptcp_sock *msk;
+-
+-	msk = mptcp_sk(parent);
+-	if (state & TCPF_LISTEN) {
+-		/* MPJ subflow are removed from accept queue before reaching here,
+-		 * avoid stray wakeups
+-		 */
+-		if (reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue))
+-			return;
+-
+-		set_bit(MPTCP_DATA_READY, &msk->flags);
+-		parent->sk_data_ready(parent);
+-		return;
+-	}
+-
+-	WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable &&
+-		     !subflow->mp_join && !(state & TCPF_CLOSE));
+-
+-	if (mptcp_subflow_data_available(sk))
+-		mptcp_data_ready(parent, sk);
+-}
+-
+-static void subflow_write_space(struct sock *ssk)
+-{
+-	struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
+-
+-	mptcp_propagate_sndbuf(sk, ssk);
+-	mptcp_write_space(sk);
+-}
+-
+ void __mptcp_error_report(struct sock *sk)
+ {
+ 	struct mptcp_subflow_context *subflow;
+@@ -1166,6 +1125,43 @@ static void subflow_error_report(struct sock *ssk)
+ 	mptcp_data_unlock(sk);
+ }
+ 
++static void subflow_data_ready(struct sock *sk)
++{
++	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
++	u16 state = 1 << inet_sk_state_load(sk);
++	struct sock *parent = subflow->conn;
++	struct mptcp_sock *msk;
++
++	msk = mptcp_sk(parent);
++	if (state & TCPF_LISTEN) {
++		/* MPJ subflow are removed from accept queue before reaching here,
++		 * avoid stray wakeups
++		 */
++		if (reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue))
++			return;
++
++		set_bit(MPTCP_DATA_READY, &msk->flags);
++		parent->sk_data_ready(parent);
++		return;
++	}
++
++	WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable &&
++		     !subflow->mp_join && !(state & TCPF_CLOSE));
++
++	if (mptcp_subflow_data_available(sk))
++		mptcp_data_ready(parent, sk);
++	else if (unlikely(sk->sk_err))
++		subflow_error_report(sk);
++}
++
++static void subflow_write_space(struct sock *ssk)
++{
++	struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
++
++	mptcp_propagate_sndbuf(sk, ssk);
++	mptcp_write_space(sk);
++}
++
+ static struct inet_connection_sock_af_ops *
+ subflow_default_af_ops(struct sock *sk)
+ {
+@@ -1474,6 +1470,8 @@ static void subflow_state_change(struct sock *sk)
+ 	 */
+ 	if (mptcp_subflow_data_available(sk))
+ 		mptcp_data_ready(parent, sk);
++	else if (unlikely(sk->sk_err))
++		subflow_error_report(sk);
+ 
+ 	subflow_sched_work_if_closed(mptcp_sk(parent), sk);
+ 
+diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
+index b100c04a0e435..3d6d49420db8b 100644
+--- a/net/netfilter/nf_synproxy_core.c
++++ b/net/netfilter/nf_synproxy_core.c
+@@ -31,6 +31,9 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
+ 	int length = (th->doff * 4) - sizeof(*th);
+ 	u8 buf[40], *ptr;
+ 
++	if (unlikely(length < 0))
++		return false;
++
+ 	ptr = skb_header_pointer(skb, doff + sizeof(*th), length, buf);
+ 	if (ptr == NULL)
+ 		return false;
+@@ -47,6 +50,8 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
+ 			length--;
+ 			continue;
+ 		default:
++			if (length < 2)
++				return true;
+ 			opsize = *ptr++;
+ 			if (opsize < 2)
+ 				return true;
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 31016c144c48b..9d5ea23529657 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -4317,13 +4317,44 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
+ 	err = nf_tables_set_alloc_name(&ctx, set, name);
+ 	kfree(name);
+ 	if (err < 0)
+-		goto err_set_alloc_name;
++		goto err_set_name;
++
++	udata = NULL;
++	if (udlen) {
++		udata = set->data + size;
++		nla_memcpy(udata, nla[NFTA_SET_USERDATA], udlen);
++	}
++
++	INIT_LIST_HEAD(&set->bindings);
++	set->table = table;
++	write_pnet(&set->net, net);
++	set->ops = ops;
++	set->ktype = ktype;
++	set->klen = desc.klen;
++	set->dtype = dtype;
++	set->objtype = objtype;
++	set->dlen = desc.dlen;
++	set->flags = flags;
++	set->size = desc.size;
++	set->policy = policy;
++	set->udlen = udlen;
++	set->udata = udata;
++	set->timeout = timeout;
++	set->gc_int = gc_int;
++
++	set->field_count = desc.field_count;
++	for (i = 0; i < desc.field_count; i++)
++		set->field_len[i] = desc.field_len[i];
++
++	err = ops->init(set, &desc, nla);
++	if (err < 0)
++		goto err_set_init;
+ 
+ 	if (nla[NFTA_SET_EXPR]) {
+ 		expr = nft_set_elem_expr_alloc(&ctx, set, nla[NFTA_SET_EXPR]);
+ 		if (IS_ERR(expr)) {
+ 			err = PTR_ERR(expr);
+-			goto err_set_alloc_name;
++			goto err_set_expr_alloc;
+ 		}
+ 		set->exprs[0] = expr;
+ 		set->num_exprs++;
+@@ -4334,74 +4365,44 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
+ 
+ 		if (!(flags & NFT_SET_EXPR)) {
+ 			err = -EINVAL;
+-			goto err_set_alloc_name;
++			goto err_set_expr_alloc;
+ 		}
+ 		i = 0;
+ 		nla_for_each_nested(tmp, nla[NFTA_SET_EXPRESSIONS], left) {
+ 			if (i == NFT_SET_EXPR_MAX) {
+ 				err = -E2BIG;
+-				goto err_set_init;
++				goto err_set_expr_alloc;
+ 			}
+ 			if (nla_type(tmp) != NFTA_LIST_ELEM) {
+ 				err = -EINVAL;
+-				goto err_set_init;
++				goto err_set_expr_alloc;
+ 			}
+ 			expr = nft_set_elem_expr_alloc(&ctx, set, tmp);
+ 			if (IS_ERR(expr)) {
+ 				err = PTR_ERR(expr);
+-				goto err_set_init;
++				goto err_set_expr_alloc;
+ 			}
+ 			set->exprs[i++] = expr;
+ 			set->num_exprs++;
+ 		}
+ 	}
+ 
+-	udata = NULL;
+-	if (udlen) {
+-		udata = set->data + size;
+-		nla_memcpy(udata, nla[NFTA_SET_USERDATA], udlen);
+-	}
+-
+-	INIT_LIST_HEAD(&set->bindings);
+-	set->table = table;
+-	write_pnet(&set->net, net);
+-	set->ops   = ops;
+-	set->ktype = ktype;
+-	set->klen  = desc.klen;
+-	set->dtype = dtype;
+-	set->objtype = objtype;
+-	set->dlen  = desc.dlen;
+-	set->flags = flags;
+-	set->size  = desc.size;
+-	set->policy = policy;
+-	set->udlen  = udlen;
+-	set->udata  = udata;
+-	set->timeout = timeout;
+-	set->gc_int = gc_int;
+ 	set->handle = nf_tables_alloc_handle(table);
+ 
+-	set->field_count = desc.field_count;
+-	for (i = 0; i < desc.field_count; i++)
+-		set->field_len[i] = desc.field_len[i];
+-
+-	err = ops->init(set, &desc, nla);
+-	if (err < 0)
+-		goto err_set_init;
+-
+ 	err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set);
+ 	if (err < 0)
+-		goto err_set_trans;
++		goto err_set_expr_alloc;
+ 
+ 	list_add_tail_rcu(&set->list, &table->sets);
+ 	table->use++;
+ 	return 0;
+ 
+-err_set_trans:
+-	ops->destroy(set);
+-err_set_init:
++err_set_expr_alloc:
+ 	for (i = 0; i < set->num_exprs; i++)
+ 		nft_expr_destroy(&ctx, set->exprs[i]);
+-err_set_alloc_name:
++
++	ops->destroy(set);
++err_set_init:
+ 	kfree(set->name);
+ err_set_name:
+ 	kvfree(set);
+diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
+index 1e4fb568fa841..24f10bf7d8a3f 100644
+--- a/net/qrtr/qrtr.c
++++ b/net/qrtr/qrtr.c
+@@ -435,7 +435,7 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
+ 	struct qrtr_sock *ipc;
+ 	struct sk_buff *skb;
+ 	struct qrtr_cb *cb;
+-	unsigned int size;
++	size_t size;
+ 	unsigned int ver;
+ 	size_t hdrlen;
+ 
+diff --git a/net/rds/recv.c b/net/rds/recv.c
+index aba4afe4dfedc..967d115f97efd 100644
+--- a/net/rds/recv.c
++++ b/net/rds/recv.c
+@@ -714,7 +714,7 @@ int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ 
+ 		if (rds_cmsg_recv(inc, msg, rs)) {
+ 			ret = -EFAULT;
+-			goto out;
++			break;
+ 		}
+ 		rds_recvmsg_zcookie(rs, msg);
+ 
+diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
+index ba7f57cb41c30..143786d8cde03 100644
+--- a/net/sched/act_ct.c
++++ b/net/sched/act_ct.c
+@@ -904,14 +904,19 @@ static int tcf_ct_act_nat(struct sk_buff *skb,
+ 	}
+ 
+ 	err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
+-	if (err == NF_ACCEPT &&
+-	    ct->status & IPS_SRC_NAT && ct->status & IPS_DST_NAT) {
+-		if (maniptype == NF_NAT_MANIP_SRC)
+-			maniptype = NF_NAT_MANIP_DST;
+-		else
+-			maniptype = NF_NAT_MANIP_SRC;
+-
+-		err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
++	if (err == NF_ACCEPT && ct->status & IPS_DST_NAT) {
++		if (ct->status & IPS_SRC_NAT) {
++			if (maniptype == NF_NAT_MANIP_SRC)
++				maniptype = NF_NAT_MANIP_DST;
++			else
++				maniptype = NF_NAT_MANIP_SRC;
++
++			err = ct_nat_execute(skb, ct, ctinfo, range,
++					     maniptype);
++		} else if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
++			err = ct_nat_execute(skb, ct, ctinfo, NULL,
++					     NF_NAT_MANIP_SRC);
++		}
+ 	}
+ 	return err;
+ #else
+diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
+index 7d37638ee1c7a..5c15968b5155b 100644
+--- a/net/sched/sch_cake.c
++++ b/net/sched/sch_cake.c
+@@ -943,7 +943,7 @@ static struct tcphdr *cake_get_tcphdr(const struct sk_buff *skb,
+ 	}
+ 
+ 	tcph = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
+-	if (!tcph)
++	if (!tcph || tcph->doff < 5)
+ 		return NULL;
+ 
+ 	return skb_header_pointer(skb, offset,
+@@ -967,6 +967,8 @@ static const void *cake_get_tcpopt(const struct tcphdr *tcph,
+ 			length--;
+ 			continue;
+ 		}
++		if (length < 2)
++			break;
+ 		opsize = *ptr++;
+ 		if (opsize < 2 || opsize > length)
+ 			break;
+@@ -1104,6 +1106,8 @@ static bool cake_tcph_may_drop(const struct tcphdr *tcph,
+ 			length--;
+ 			continue;
+ 		}
++		if (length < 2)
++			break;
+ 		opsize = *ptr++;
+ 		if (opsize < 2 || opsize > length)
+ 			break;
+diff --git a/net/socket.c b/net/socket.c
+index 84a8049c2b099..03259cb919f7e 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -1072,19 +1072,6 @@ static long sock_do_ioctl(struct net *net, struct socket *sock,
+  *	what to do with it - that's up to the protocol still.
+  */
+ 
+-/**
+- *	get_net_ns - increment the refcount of the network namespace
+- *	@ns: common namespace (net)
+- *
+- *	Returns the net's common namespace.
+- */
+-
+-struct ns_common *get_net_ns(struct ns_common *ns)
+-{
+-	return &get_net(container_of(ns, struct net, ns))->ns;
+-}
+-EXPORT_SYMBOL_GPL(get_net_ns);
+-
+ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
+ {
+ 	struct socket *sock;
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 5a31307ceb76d..5d1192ceb1397 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -535,12 +535,14 @@ static void unix_release_sock(struct sock *sk, int embrion)
+ 	u->path.mnt = NULL;
+ 	state = sk->sk_state;
+ 	sk->sk_state = TCP_CLOSE;
++
++	skpair = unix_peer(sk);
++	unix_peer(sk) = NULL;
++
+ 	unix_state_unlock(sk);
+ 
+ 	wake_up_interruptible_all(&u->peer_wait);
+ 
+-	skpair = unix_peer(sk);
+-
+ 	if (skpair != NULL) {
+ 		if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
+ 			unix_state_lock(skpair);
+@@ -555,7 +557,6 @@ static void unix_release_sock(struct sock *sk, int embrion)
+ 
+ 		unix_dgram_peer_wake_disconnect(sk, skpair);
+ 		sock_put(skpair); /* It may now die */
+-		unix_peer(sk) = NULL;
+ 	}
+ 
+ 	/* Try to flush out this socket. Throw out buffers at least */
+diff --git a/net/wireless/Makefile b/net/wireless/Makefile
+index 2eee93985ab0d..af590ae606b69 100644
+--- a/net/wireless/Makefile
++++ b/net/wireless/Makefile
+@@ -28,7 +28,7 @@ $(obj)/shipped-certs.c: $(wildcard $(srctree)/$(src)/certs/*.hex)
+ 	@$(kecho) "  GEN     $@"
+ 	@(echo '#include "reg.h"'; \
+ 	  echo 'const u8 shipped_regdb_certs[] = {'; \
+-	  cat $^ ; \
++	  echo | cat - $^ ; \
+ 	  echo '};'; \
+ 	  echo 'unsigned int shipped_regdb_certs_len = sizeof(shipped_regdb_certs);'; \
+ 	 ) > $@
+diff --git a/net/wireless/core.c b/net/wireless/core.c
+index 589ee5a69a2e5..0e364f32794d3 100644
+--- a/net/wireless/core.c
++++ b/net/wireless/core.c
+@@ -1339,6 +1339,11 @@ void cfg80211_register_wdev(struct cfg80211_registered_device *rdev,
+ 	rdev->devlist_generation++;
+ 	wdev->registered = true;
+ 
++	if (wdev->netdev &&
++	    sysfs_create_link(&wdev->netdev->dev.kobj, &rdev->wiphy.dev.kobj,
++			      "phy80211"))
++		pr_err("failed to add phy80211 symlink to netdev!\n");
++
+ 	nl80211_notify_iface(rdev, wdev, NL80211_CMD_NEW_INTERFACE);
+ }
+ 
+@@ -1364,14 +1369,6 @@ int cfg80211_register_netdevice(struct net_device *dev)
+ 	if (ret)
+ 		goto out;
+ 
+-	if (sysfs_create_link(&dev->dev.kobj, &rdev->wiphy.dev.kobj,
+-			      "phy80211")) {
+-		pr_err("failed to add phy80211 symlink to netdev!\n");
+-		unregister_netdevice(dev);
+-		ret = -EINVAL;
+-		goto out;
+-	}
+-
+ 	cfg80211_register_wdev(rdev, wdev);
+ 	ret = 0;
+ out:
+diff --git a/net/wireless/pmsr.c b/net/wireless/pmsr.c
+index a95c79d183492..a817d8e3e4b36 100644
+--- a/net/wireless/pmsr.c
++++ b/net/wireless/pmsr.c
+@@ -324,6 +324,7 @@ void cfg80211_pmsr_complete(struct wireless_dev *wdev,
+ 			    gfp_t gfp)
+ {
+ 	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
++	struct cfg80211_pmsr_request *tmp, *prev, *to_free = NULL;
+ 	struct sk_buff *msg;
+ 	void *hdr;
+ 
+@@ -354,9 +355,20 @@ free_msg:
+ 	nlmsg_free(msg);
+ free_request:
+ 	spin_lock_bh(&wdev->pmsr_lock);
+-	list_del(&req->list);
++	/*
++	 * cfg80211_pmsr_process_abort() may have already moved this request
++	 * to the free list, and will free it later. In this case, don't free
++	 * it here.
++	 */
++	list_for_each_entry_safe(tmp, prev, &wdev->pmsr_list, list) {
++		if (tmp == req) {
++			list_del(&req->list);
++			to_free = req;
++			break;
++		}
++	}
+ 	spin_unlock_bh(&wdev->pmsr_lock);
+-	kfree(req);
++	kfree(to_free);
+ }
+ EXPORT_SYMBOL_GPL(cfg80211_pmsr_complete);
+ 
+diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
+index 9b959e3b09c6d..0c3f05c9be27a 100644
+--- a/net/wireless/sysfs.c
++++ b/net/wireless/sysfs.c
+@@ -133,6 +133,10 @@ static int wiphy_resume(struct device *dev)
+ 	if (rdev->wiphy.registered && rdev->ops->resume)
+ 		ret = rdev_resume(rdev);
+ 	wiphy_unlock(&rdev->wiphy);
++
++	if (ret)
++		cfg80211_shutdown_all_interfaces(&rdev->wiphy);
++
+ 	rtnl_unlock();
+ 
+ 	return ret;
+diff --git a/sound/soc/codecs/rt5659.c b/sound/soc/codecs/rt5659.c
+index 91a4ef7f620ca..a9b079d56fd69 100644
+--- a/sound/soc/codecs/rt5659.c
++++ b/sound/soc/codecs/rt5659.c
+@@ -2433,13 +2433,18 @@ static int set_dmic_power(struct snd_soc_dapm_widget *w,
+ 	return 0;
+ }
+ 
+-static const struct snd_soc_dapm_widget rt5659_dapm_widgets[] = {
++static const struct snd_soc_dapm_widget rt5659_particular_dapm_widgets[] = {
+ 	SND_SOC_DAPM_SUPPLY("LDO2", RT5659_PWR_ANLG_3, RT5659_PWR_LDO2_BIT, 0,
+ 		NULL, 0),
+-	SND_SOC_DAPM_SUPPLY("PLL", RT5659_PWR_ANLG_3, RT5659_PWR_PLL_BIT, 0,
+-		NULL, 0),
++	SND_SOC_DAPM_SUPPLY("MICBIAS1", RT5659_PWR_ANLG_2, RT5659_PWR_MB1_BIT,
++		0, NULL, 0),
+ 	SND_SOC_DAPM_SUPPLY("Mic Det Power", RT5659_PWR_VOL,
+ 		RT5659_PWR_MIC_DET_BIT, 0, NULL, 0),
++};
++
++static const struct snd_soc_dapm_widget rt5659_dapm_widgets[] = {
++	SND_SOC_DAPM_SUPPLY("PLL", RT5659_PWR_ANLG_3, RT5659_PWR_PLL_BIT, 0,
++		NULL, 0),
+ 	SND_SOC_DAPM_SUPPLY("Mono Vref", RT5659_PWR_ANLG_1,
+ 		RT5659_PWR_VREF3_BIT, 0, NULL, 0),
+ 
+@@ -2464,8 +2469,6 @@ static const struct snd_soc_dapm_widget rt5659_dapm_widgets[] = {
+ 		RT5659_ADC_MONO_R_ASRC_SFT, 0, NULL, 0),
+ 
+ 	/* Input Side */
+-	SND_SOC_DAPM_SUPPLY("MICBIAS1", RT5659_PWR_ANLG_2, RT5659_PWR_MB1_BIT,
+-		0, NULL, 0),
+ 	SND_SOC_DAPM_SUPPLY("MICBIAS2", RT5659_PWR_ANLG_2, RT5659_PWR_MB2_BIT,
+ 		0, NULL, 0),
+ 	SND_SOC_DAPM_SUPPLY("MICBIAS3", RT5659_PWR_ANLG_2, RT5659_PWR_MB3_BIT,
+@@ -3660,10 +3663,23 @@ static int rt5659_set_bias_level(struct snd_soc_component *component,
+ 
+ static int rt5659_probe(struct snd_soc_component *component)
+ {
++	struct snd_soc_dapm_context *dapm =
++		snd_soc_component_get_dapm(component);
+ 	struct rt5659_priv *rt5659 = snd_soc_component_get_drvdata(component);
+ 
+ 	rt5659->component = component;
+ 
++	switch (rt5659->pdata.jd_src) {
++	case RT5659_JD_HDA_HEADER:
++		break;
++
++	default:
++		snd_soc_dapm_new_controls(dapm,
++			rt5659_particular_dapm_widgets,
++			ARRAY_SIZE(rt5659_particular_dapm_widgets));
++		break;
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/sound/soc/codecs/rt5682-sdw.c b/sound/soc/codecs/rt5682-sdw.c
+index b49f1e16125d4..d1dd7f720ba48 100644
+--- a/sound/soc/codecs/rt5682-sdw.c
++++ b/sound/soc/codecs/rt5682-sdw.c
+@@ -462,7 +462,8 @@ static int rt5682_io_init(struct device *dev, struct sdw_slave *slave)
+ 
+ 	regmap_update_bits(rt5682->regmap, RT5682_CBJ_CTRL_2,
+ 		RT5682_EXT_JD_SRC, RT5682_EXT_JD_SRC_MANUAL);
+-	regmap_write(rt5682->regmap, RT5682_CBJ_CTRL_1, 0xd042);
++	regmap_write(rt5682->regmap, RT5682_CBJ_CTRL_1, 0xd142);
++	regmap_update_bits(rt5682->regmap, RT5682_CBJ_CTRL_5, 0x0700, 0x0600);
+ 	regmap_update_bits(rt5682->regmap, RT5682_CBJ_CTRL_3,
+ 		RT5682_CBJ_IN_BUF_EN, RT5682_CBJ_IN_BUF_EN);
+ 	regmap_update_bits(rt5682->regmap, RT5682_SAR_IL_CMD_1,
+diff --git a/sound/soc/codecs/tas2562.h b/sound/soc/codecs/tas2562.h
+index 81866aeb3fbfa..55b2a1f52ca37 100644
+--- a/sound/soc/codecs/tas2562.h
++++ b/sound/soc/codecs/tas2562.h
+@@ -57,13 +57,13 @@
+ #define TAS2562_TDM_CFG0_RAMPRATE_MASK		BIT(5)
+ #define TAS2562_TDM_CFG0_RAMPRATE_44_1		BIT(5)
+ #define TAS2562_TDM_CFG0_SAMPRATE_MASK		GENMASK(3, 1)
+-#define TAS2562_TDM_CFG0_SAMPRATE_7305_8KHZ	0x0
+-#define TAS2562_TDM_CFG0_SAMPRATE_14_7_16KHZ	0x1
+-#define TAS2562_TDM_CFG0_SAMPRATE_22_05_24KHZ	0x2
+-#define TAS2562_TDM_CFG0_SAMPRATE_29_4_32KHZ	0x3
+-#define TAS2562_TDM_CFG0_SAMPRATE_44_1_48KHZ	0x4
+-#define TAS2562_TDM_CFG0_SAMPRATE_88_2_96KHZ	0x5
+-#define TAS2562_TDM_CFG0_SAMPRATE_176_4_192KHZ	0x6
++#define TAS2562_TDM_CFG0_SAMPRATE_7305_8KHZ	(0x0 << 1)
++#define TAS2562_TDM_CFG0_SAMPRATE_14_7_16KHZ	(0x1 << 1)
++#define TAS2562_TDM_CFG0_SAMPRATE_22_05_24KHZ	(0x2 << 1)
++#define TAS2562_TDM_CFG0_SAMPRATE_29_4_32KHZ	(0x3 << 1)
++#define TAS2562_TDM_CFG0_SAMPRATE_44_1_48KHZ	(0x4 << 1)
++#define TAS2562_TDM_CFG0_SAMPRATE_88_2_96KHZ	(0x5 << 1)
++#define TAS2562_TDM_CFG0_SAMPRATE_176_4_192KHZ	(0x6 << 1)
+ 
+ #define TAS2562_TDM_CFG2_RIGHT_JUSTIFY	BIT(6)
+ 
+diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
+index f62f81ceab0d2..9dcbe5d5a428c 100644
+--- a/sound/soc/fsl/fsl-asoc-card.c
++++ b/sound/soc/fsl/fsl-asoc-card.c
+@@ -732,6 +732,7 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
+ 	/* Initialize sound card */
+ 	priv->pdev = pdev;
+ 	priv->card.dev = &pdev->dev;
++	priv->card.owner = THIS_MODULE;
+ 	ret = snd_soc_of_parse_card_name(&priv->card, "model");
+ 	if (ret) {
+ 		snprintf(priv->name, sizeof(priv->name), "%s-audio",
+diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c
+index 936384a94f25e..74d3d8c586080 100644
+--- a/sound/soc/qcom/lpass-cpu.c
++++ b/sound/soc/qcom/lpass-cpu.c
+@@ -93,8 +93,30 @@ static void lpass_cpu_daiops_shutdown(struct snd_pcm_substream *substream,
+ 		struct snd_soc_dai *dai)
+ {
+ 	struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
++	struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
++	unsigned int id = dai->driver->id;
+ 
+ 	clk_disable_unprepare(drvdata->mi2s_osr_clk[dai->driver->id]);
++	/*
++	 * Ensure LRCLK is disabled even in device node validation.
++	 * Will not impact if disabled in lpass_cpu_daiops_trigger()
++	 * suspend.
++	 */
++	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
++		regmap_fields_write(i2sctl->spken, id, LPAIF_I2SCTL_SPKEN_DISABLE);
++	else
++		regmap_fields_write(i2sctl->micen, id, LPAIF_I2SCTL_MICEN_DISABLE);
++
++	/*
++	 * BCLK may not be enabled if lpass_cpu_daiops_prepare is called before
++	 * lpass_cpu_daiops_shutdown. It's paired with the clk_enable in
++	 * lpass_cpu_daiops_prepare.
++	 */
++	if (drvdata->mi2s_was_prepared[dai->driver->id]) {
++		drvdata->mi2s_was_prepared[dai->driver->id] = false;
++		clk_disable(drvdata->mi2s_bit_clk[dai->driver->id]);
++	}
++
+ 	clk_unprepare(drvdata->mi2s_bit_clk[dai->driver->id]);
+ }
+ 
+@@ -275,6 +297,18 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
+ 	case SNDRV_PCM_TRIGGER_START:
+ 	case SNDRV_PCM_TRIGGER_RESUME:
+ 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
++		/*
++		 * Ensure lpass BCLK/LRCLK is enabled during
++		 * device resume as lpass_cpu_daiops_prepare() is not called
++		 * after the device resumes. We don't check mi2s_was_prepared before
++		 * enable/disable BCLK in trigger events because:
++		 *  1. These trigger events are paired, so the BCLK
++		 *     enable_count is balanced.
++		 *  2. the BCLK can be shared (ex: headset and headset mic),
++		 *     we need to increase the enable_count so that we don't
++		 *     turn off the shared BCLK while other devices are using
++		 *     it.
++		 */
+ 		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ 			ret = regmap_fields_write(i2sctl->spken, id,
+ 						 LPAIF_I2SCTL_SPKEN_ENABLE);
+@@ -296,6 +330,10 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
+ 	case SNDRV_PCM_TRIGGER_STOP:
+ 	case SNDRV_PCM_TRIGGER_SUSPEND:
+ 	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
++		/*
++		 * To ensure lpass BCLK/LRCLK is disabled during
++		 * device suspend.
++		 */
+ 		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ 			ret = regmap_fields_write(i2sctl->spken, id,
+ 						 LPAIF_I2SCTL_SPKEN_DISABLE);
+@@ -315,12 +353,53 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
+ 	return ret;
+ }
+ 
++static int lpass_cpu_daiops_prepare(struct snd_pcm_substream *substream,
++		struct snd_soc_dai *dai)
++{
++	struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
++	struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
++	unsigned int id = dai->driver->id;
++	int ret;
++
++	/*
++	 * Ensure lpass BCLK/LRCLK is enabled bit before playback/capture
++	 * data flow starts. This allows other codec to have some delay before
++	 * the data flow.
++	 * (ex: to drop start up pop noise before capture starts).
++	 */
++	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
++		ret = regmap_fields_write(i2sctl->spken, id, LPAIF_I2SCTL_SPKEN_ENABLE);
++	else
++		ret = regmap_fields_write(i2sctl->micen, id, LPAIF_I2SCTL_MICEN_ENABLE);
++
++	if (ret) {
++		dev_err(dai->dev, "error writing to i2sctl reg: %d\n", ret);
++		return ret;
++	}
++
++	/*
++	 * Check mi2s_was_prepared before enabling BCLK as lpass_cpu_daiops_prepare can
++	 * be called multiple times. It's paired with the clk_disable in
++	 * lpass_cpu_daiops_shutdown.
++	 */
++	if (!drvdata->mi2s_was_prepared[dai->driver->id]) {
++		ret = clk_enable(drvdata->mi2s_bit_clk[id]);
++		if (ret) {
++			dev_err(dai->dev, "error in enabling mi2s bit clk: %d\n", ret);
++			return ret;
++		}
++		drvdata->mi2s_was_prepared[dai->driver->id] = true;
++	}
++	return 0;
++}
++
+ const struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops = {
+ 	.set_sysclk	= lpass_cpu_daiops_set_sysclk,
+ 	.startup	= lpass_cpu_daiops_startup,
+ 	.shutdown	= lpass_cpu_daiops_shutdown,
+ 	.hw_params	= lpass_cpu_daiops_hw_params,
+ 	.trigger	= lpass_cpu_daiops_trigger,
++	.prepare	= lpass_cpu_daiops_prepare,
+ };
+ EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_dai_ops);
+ 
+diff --git a/sound/soc/qcom/lpass.h b/sound/soc/qcom/lpass.h
+index 83b2e08ade060..7f72214404baf 100644
+--- a/sound/soc/qcom/lpass.h
++++ b/sound/soc/qcom/lpass.h
+@@ -67,6 +67,10 @@ struct lpass_data {
+ 	/* MI2S SD lines to use for playback/capture */
+ 	unsigned int mi2s_playback_sd_mode[LPASS_MAX_MI2S_PORTS];
+ 	unsigned int mi2s_capture_sd_mode[LPASS_MAX_MI2S_PORTS];
++
++	/* The state of MI2S prepare dai_ops was called */
++	bool mi2s_was_prepared[LPASS_MAX_MI2S_PORTS];
++
+ 	int hdmi_port_enable;
+ 
+ 	/* low-power audio interface (LPAIF) registers */
+diff --git a/tools/include/uapi/linux/in.h b/tools/include/uapi/linux/in.h
+index 7d6687618d808..d1b327036ae43 100644
+--- a/tools/include/uapi/linux/in.h
++++ b/tools/include/uapi/linux/in.h
+@@ -289,6 +289,9 @@ struct sockaddr_in {
+ /* Address indicating an error return. */
+ #define	INADDR_NONE		((unsigned long int) 0xffffffff)
+ 
++/* Dummy address for src of ICMP replies if no real address is set (RFC7600). */
++#define	INADDR_DUMMY		((unsigned long int) 0xc0000008)
++
+ /* Network number for local host loopback. */
+ #define	IN_LOOPBACKNET		127
+ 
+diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
+index 007fe5d594386..fe2bec500bf68 100644
+--- a/tools/lib/bpf/xsk.c
++++ b/tools/lib/bpf/xsk.c
+@@ -928,7 +928,7 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
+ 			goto out_put_ctx;
+ 		}
+ 		if (xsk->fd == umem->fd)
+-			umem->rx_ring_setup_done = true;
++			umem->tx_ring_setup_done = true;
+ 	}
+ 
+ 	err = xsk_get_mmap_offsets(xsk->fd, &off);
+diff --git a/tools/perf/trace/beauty/include/linux/socket.h b/tools/perf/trace/beauty/include/linux/socket.h
+index 385894b4a8bba..42222a84167f3 100644
+--- a/tools/perf/trace/beauty/include/linux/socket.h
++++ b/tools/perf/trace/beauty/include/linux/socket.h
+@@ -438,6 +438,4 @@ extern int __sys_socketpair(int family, int type, int protocol,
+ 			    int __user *usockvec);
+ extern int __sys_shutdown_sock(struct socket *sock, int how);
+ extern int __sys_shutdown(int fd, int how);
+-
+-extern struct ns_common *get_net_ns(struct ns_common *ns);
+ #endif /* _LINUX_SOCKET_H */
+diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
+index 26c990e323781..939aed36e0c2a 100644
+--- a/tools/perf/util/metricgroup.c
++++ b/tools/perf/util/metricgroup.c
+@@ -162,10 +162,10 @@ static bool contains_event(struct evsel **metric_events, int num_events,
+ 	return false;
+ }
+ 
+-static bool evsel_same_pmu(struct evsel *ev1, struct evsel *ev2)
++static bool evsel_same_pmu_or_none(struct evsel *ev1, struct evsel *ev2)
+ {
+ 	if (!ev1->pmu_name || !ev2->pmu_name)
+-		return false;
++		return true;
+ 
+ 	return !strcmp(ev1->pmu_name, ev2->pmu_name);
+ }
+@@ -288,7 +288,7 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist,
+ 			 */
+ 			if (!has_constraint &&
+ 			    ev->leader != metric_events[i]->leader &&
+-			    evsel_same_pmu(ev->leader, metric_events[i]->leader))
++			    evsel_same_pmu_or_none(ev->leader, metric_events[i]->leader))
+ 				break;
+ 			if (!strcmp(metric_events[i]->name, ev->name)) {
+ 				set_bit(ev->idx, evlist_used);
+@@ -1072,16 +1072,18 @@ static int metricgroup__add_metric_sys_event_iter(struct pmu_event *pe,
+ 
+ 	ret = add_metric(d->metric_list, pe, d->metric_no_group, &m, NULL, d->ids);
+ 	if (ret)
+-		return ret;
++		goto out;
+ 
+ 	ret = resolve_metric(d->metric_no_group,
+ 				     d->metric_list, NULL, d->ids);
+ 	if (ret)
+-		return ret;
++		goto out;
+ 
+ 	*(d->has_match) = true;
+ 
+-	return *d->ret;
++out:
++	*(d->ret) = ret;
++	return ret;
+ }
+ 
+ static int metricgroup__add_metric(const char *metric, bool metric_no_group,
+diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
+index 2b5707738609e..6fad54c7ecb4a 100755
+--- a/tools/testing/selftests/net/fib_tests.sh
++++ b/tools/testing/selftests/net/fib_tests.sh
+@@ -1384,12 +1384,37 @@ ipv4_rt_replace()
+ 	ipv4_rt_replace_mpath
+ }
+ 
++# checks that cached input route on VRF port is deleted
++# when VRF is deleted
++ipv4_local_rt_cache()
++{
++	run_cmd "ip addr add 10.0.0.1/32 dev lo"
++	run_cmd "ip netns add test-ns"
++	run_cmd "ip link add veth-outside type veth peer name veth-inside"
++	run_cmd "ip link add vrf-100 type vrf table 1100"
++	run_cmd "ip link set veth-outside master vrf-100"
++	run_cmd "ip link set veth-inside netns test-ns"
++	run_cmd "ip link set veth-outside up"
++	run_cmd "ip link set vrf-100 up"
++	run_cmd "ip route add 10.1.1.1/32 dev veth-outside table 1100"
++	run_cmd "ip netns exec test-ns ip link set veth-inside up"
++	run_cmd "ip netns exec test-ns ip addr add 10.1.1.1/32 dev veth-inside"
++	run_cmd "ip netns exec test-ns ip route add 10.0.0.1/32 dev veth-inside"
++	run_cmd "ip netns exec test-ns ip route add default via 10.0.0.1"
++	run_cmd "ip netns exec test-ns ping 10.0.0.1 -c 1 -i 1"
++	run_cmd "ip link delete vrf-100"
++
++	# if we do not hang test is a success
++	log_test $? 0 "Cached route removed from VRF port device"
++}
++
+ ipv4_route_test()
+ {
+ 	route_setup
+ 
+ 	ipv4_rt_add
+ 	ipv4_rt_replace
++	ipv4_local_rt_cache
+ 
+ 	route_cleanup
+ }
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+index 65b3b983efc26..8763706b0d047 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+@@ -197,9 +197,6 @@ ip -net "$ns4" link set ns4eth3 up
+ ip -net "$ns4" route add default via 10.0.3.2
+ ip -net "$ns4" route add default via dead:beef:3::2
+ 
+-# use TCP syn cookies, even if no flooding was detected.
+-ip netns exec "$ns2" sysctl -q net.ipv4.tcp_syncookies=2
+-
+ set_ethtool_flags() {
+ 	local ns="$1"
+ 	local dev="$2"
+@@ -711,6 +708,14 @@ for sender in $ns1 $ns2 $ns3 $ns4;do
+ 		exit $ret
+ 	fi
+ 
++	# ns1<->ns2 is not subject to reordering/tc delays. Use it to test
++	# mptcp syncookie support.
++	if [ $sender = $ns1 ]; then
++		ip netns exec "$ns2" sysctl -q net.ipv4.tcp_syncookies=2
++	else
++		ip netns exec "$ns2" sysctl -q net.ipv4.tcp_syncookies=1
++	fi
++
+ 	run_tests "$ns2" $sender 10.0.1.2
+ 	run_tests "$ns2" $sender dead:beef:1::2
+ 	run_tests "$ns2" $sender 10.0.2.1


^ permalink raw reply related	[flat|nested] 33+ messages in thread
* [gentoo-commits] proj/linux-patches:5.12 commit in: /
@ 2021-06-18 12:00 Mike Pagano
  0 siblings, 0 replies; 33+ messages in thread
From: Mike Pagano @ 2021-06-18 12:00 UTC (permalink / raw
  To: gentoo-commits

commit:     bf946abb459042ec2dcd84125d7740fb3d24df4f
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Jun 18 12:00:00 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Jun 18 12:00:00 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=bf946abb

Fix BMQ Patch

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 5020_BMQ-and-PDS-io-scheduler-v5.12-r0.patch | 14 --------------
 1 file changed, 14 deletions(-)

diff --git a/5020_BMQ-and-PDS-io-scheduler-v5.12-r0.patch b/5020_BMQ-and-PDS-io-scheduler-v5.12-r0.patch
index 7eb05e6..7e92738 100644
--- a/5020_BMQ-and-PDS-io-scheduler-v5.12-r0.patch
+++ b/5020_BMQ-and-PDS-io-scheduler-v5.12-r0.patch
@@ -9802,17 +9802,6 @@ index 73ef12092250..24bf8ef1249a 100644
 +#endif
  	};
  	struct wakeup_test_data *x = data;
- 
-From b2dc217bab541a5e737b52137f1bcce0b1cc2ed5 Mon Sep 17 00:00:00 2001
-From: Piotr Gorski <lucjan.lucjanov@gmail.com>
-Date: Mon, 14 Jun 2021 15:46:03 +0200
-Subject: [PATCH] prjc: fix compilation error
-
-Signed-off-by: Piotr Gorski <lucjan.lucjanov@gmail.com>
----
- kernel/sched/pelt.h | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
 diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
 index bc722a476..26a33f76b 100644
 --- a/kernel/sched/pelt.h
@@ -9833,6 +9822,3 @@ index bc722a476..26a33f76b 100644
  /*
   * The clock_pelt scales the time to reflect the effective amount of
   * computation done during the running delta time but then sync back to
--- 
-2.32.0
-


^ permalink raw reply related	[flat|nested] 33+ messages in thread
* [gentoo-commits] proj/linux-patches:5.12 commit in: /
@ 2021-06-18 11:35 Mike Pagano
  0 siblings, 0 replies; 33+ messages in thread
From: Mike Pagano @ 2021-06-18 11:35 UTC (permalink / raw
  To: gentoo-commits

commit:     aa50463fa5ba1eee5ee6ffa0711dd8cd7eb53412
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Jun 18 11:35:17 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Jun 18 11:35:17 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=aa50463f

Linux patch 5.12.12 update to BMQ

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                  |    4 +
 1011_linux-5.12.12.patch                     | 1384 ++++++++++++++++++++++++++
 5020_BMQ-and-PDS-io-scheduler-v5.12-r0.patch |   33 +
 3 files changed, 1421 insertions(+)

diff --git a/0000_README b/0000_README
index d850f88..07044b3 100644
--- a/0000_README
+++ b/0000_README
@@ -87,6 +87,10 @@ Patch:  1010_linux-5.12.11.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.12.11
 
+Patch:  1011_linux-5.12.12.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.12.12
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1011_linux-5.12.12.patch b/1011_linux-5.12.12.patch
new file mode 100644
index 0000000..9b730db
--- /dev/null
+++ b/1011_linux-5.12.12.patch
@@ -0,0 +1,1384 @@
+diff --git a/Makefile b/Makefile
+index 82ca490ce5f48..e0a252b644630 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 12
+-SUBLEVEL = 11
++SUBLEVEL = 12
+ EXTRAVERSION =
+ NAME = Frozen Wasteland
+ 
+diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
+index 2c1e2b32b9b36..a745d64d46995 100644
+--- a/arch/arm/mach-omap1/pm.c
++++ b/arch/arm/mach-omap1/pm.c
+@@ -655,9 +655,13 @@ static int __init omap_pm_init(void)
+ 		irq = INT_7XX_WAKE_UP_REQ;
+ 	else if (cpu_is_omap16xx())
+ 		irq = INT_1610_WAKE_UP_REQ;
+-	if (request_irq(irq, omap_wakeup_interrupt, 0, "peripheral wakeup",
+-			NULL))
+-		pr_err("Failed to request irq %d (peripheral wakeup)\n", irq);
++	else
++		irq = -1;
++
++	if (irq >= 0) {
++		if (request_irq(irq, omap_wakeup_interrupt, 0, "peripheral wakeup", NULL))
++			pr_err("Failed to request irq %d (peripheral wakeup)\n", irq);
++	}
+ 
+ 	/* Program new power ramp-up time
+ 	 * (0 for most boards since we don't lower voltage when in deep sleep)
+diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
+index 418a61ecb8275..5e86145db0e2a 100644
+--- a/arch/arm/mach-omap2/board-n8x0.c
++++ b/arch/arm/mach-omap2/board-n8x0.c
+@@ -322,6 +322,7 @@ static int n8x0_mmc_get_cover_state(struct device *dev, int slot)
+ 
+ static void n8x0_mmc_callback(void *data, u8 card_mask)
+ {
++#ifdef CONFIG_MMC_OMAP
+ 	int bit, *openp, index;
+ 
+ 	if (board_is_n800()) {
+@@ -339,7 +340,6 @@ static void n8x0_mmc_callback(void *data, u8 card_mask)
+ 	else
+ 		*openp = 0;
+ 
+-#ifdef CONFIG_MMC_OMAP
+ 	omap_mmc_notify_cover_event(mmc_device, index, *openp);
+ #else
+ 	pr_warn("MMC: notify cover event not available\n");
+diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
+index 1368d943f1f39..5243bf2327c02 100644
+--- a/arch/riscv/Makefile
++++ b/arch/riscv/Makefile
+@@ -38,6 +38,15 @@ else
+ 	KBUILD_LDFLAGS += -melf32lriscv
+ endif
+ 
++ifeq ($(CONFIG_LD_IS_LLD),y)
++	KBUILD_CFLAGS += -mno-relax
++	KBUILD_AFLAGS += -mno-relax
++ifneq ($(LLVM_IAS),1)
++	KBUILD_CFLAGS += -Wa,-mno-relax
++	KBUILD_AFLAGS += -Wa,-mno-relax
++endif
++endif
++
+ # ISA string setting
+ riscv-march-$(CONFIG_ARCH_RV32I)	:= rv32ima
+ riscv-march-$(CONFIG_ARCH_RV64I)	:= rv64ima
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 4a901508e48e7..ddc7b86725cd7 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -388,6 +388,8 @@ static const struct usb_device_id blacklist_table[] = {
+ 	/* Realtek 8822CE Bluetooth devices */
+ 	{ USB_DEVICE(0x0bda, 0xb00c), .driver_info = BTUSB_REALTEK |
+ 						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x0bda, 0xc822), .driver_info = BTUSB_REALTEK |
++						     BTUSB_WIDEBAND_SPEECH },
+ 
+ 	/* Realtek 8852AE Bluetooth devices */
+ 	{ USB_DEVICE(0x0bda, 0xc852), .driver_info = BTUSB_REALTEK |
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
+index 8f4a8f8d81463..39b6c6bfab453 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
+@@ -101,7 +101,8 @@ static int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr,
+ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
+ {
+ 	unsigned char buff[34];
+-	int addrptr = 0, size = 0;
++	int addrptr, size;
++	int len;
+ 
+ 	if (!is_fru_eeprom_supported(adev))
+ 		return 0;
+@@ -109,7 +110,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
+ 	/* If algo exists, it means that the i2c_adapter's initialized */
+ 	if (!adev->pm.smu_i2c.algo) {
+ 		DRM_WARN("Cannot access FRU, EEPROM accessor not initialized");
+-		return 0;
++		return -ENODEV;
+ 	}
+ 
+ 	/* There's a lot of repetition here. This is due to the FRU having
+@@ -128,7 +129,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
+ 	size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
+ 	if (size < 1) {
+ 		DRM_ERROR("Failed to read FRU Manufacturer, ret:%d", size);
+-		return size;
++		return -EINVAL;
+ 	}
+ 
+ 	/* Increment the addrptr by the size of the field, and 1 due to the
+@@ -138,43 +139,45 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
+ 	size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
+ 	if (size < 1) {
+ 		DRM_ERROR("Failed to read FRU product name, ret:%d", size);
+-		return size;
++		return -EINVAL;
+ 	}
+ 
++	len = size;
+ 	/* Product name should only be 32 characters. Any more,
+ 	 * and something could be wrong. Cap it at 32 to be safe
+ 	 */
+-	if (size > 32) {
++	if (len >= sizeof(adev->product_name)) {
+ 		DRM_WARN("FRU Product Number is larger than 32 characters. This is likely a mistake");
+-		size = 32;
++		len = sizeof(adev->product_name) - 1;
+ 	}
+ 	/* Start at 2 due to buff using fields 0 and 1 for the address */
+-	memcpy(adev->product_name, &buff[2], size);
+-	adev->product_name[size] = '\0';
++	memcpy(adev->product_name, &buff[2], len);
++	adev->product_name[len] = '\0';
+ 
+ 	addrptr += size + 1;
+ 	size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
+ 	if (size < 1) {
+ 		DRM_ERROR("Failed to read FRU product number, ret:%d", size);
+-		return size;
++		return -EINVAL;
+ 	}
+ 
++	len = size;
+ 	/* Product number should only be 16 characters. Any more,
+ 	 * and something could be wrong. Cap it at 16 to be safe
+ 	 */
+-	if (size > 16) {
++	if (len >= sizeof(adev->product_number)) {
+ 		DRM_WARN("FRU Product Number is larger than 16 characters. This is likely a mistake");
+-		size = 16;
++		len = sizeof(adev->product_number) - 1;
+ 	}
+-	memcpy(adev->product_number, &buff[2], size);
+-	adev->product_number[size] = '\0';
++	memcpy(adev->product_number, &buff[2], len);
++	adev->product_number[len] = '\0';
+ 
+ 	addrptr += size + 1;
+ 	size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
+ 
+ 	if (size < 1) {
+ 		DRM_ERROR("Failed to read FRU product version, ret:%d", size);
+-		return size;
++		return -EINVAL;
+ 	}
+ 
+ 	addrptr += size + 1;
+@@ -182,18 +185,19 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
+ 
+ 	if (size < 1) {
+ 		DRM_ERROR("Failed to read FRU serial number, ret:%d", size);
+-		return size;
++		return -EINVAL;
+ 	}
+ 
++	len = size;
+ 	/* Serial number should only be 16 characters. Any more,
+ 	 * and something could be wrong. Cap it at 16 to be safe
+ 	 */
+-	if (size > 16) {
++	if (len >= sizeof(adev->serial)) {
+ 		DRM_WARN("FRU Serial Number is larger than 16 characters. This is likely a mistake");
+-		size = 16;
++		len = sizeof(adev->serial) - 1;
+ 	}
+-	memcpy(adev->serial, &buff[2], size);
+-	adev->serial[size] = '\0';
++	memcpy(adev->serial, &buff[2], len);
++	adev->serial[len] = '\0';
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+index cb50ba445f8c8..0fd62a8e68c24 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+@@ -76,6 +76,7 @@ struct psp_ring
+ 	uint64_t			ring_mem_mc_addr;
+ 	void				*ring_mem_handle;
+ 	uint32_t			ring_size;
++	uint32_t			ring_wptr;
+ };
+ 
+ /* More registers may will be supported */
+diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+index c325d6f53a71e..d39735a89a252 100644
+--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+@@ -720,7 +720,7 @@ static uint32_t psp_v11_0_ring_get_wptr(struct psp_context *psp)
+ 	struct amdgpu_device *adev = psp->adev;
+ 
+ 	if (amdgpu_sriov_vf(adev))
+-		data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
++		data = psp->km_ring.ring_wptr;
+ 	else
+ 		data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
+ 
+@@ -734,6 +734,7 @@ static void psp_v11_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
+ 	if (amdgpu_sriov_vf(adev)) {
+ 		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value);
+ 		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD);
++		psp->km_ring.ring_wptr = value;
+ 	} else
+ 		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value);
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+index f2e725f72d2f1..908664a5774bb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
++++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+@@ -379,7 +379,7 @@ static uint32_t psp_v3_1_ring_get_wptr(struct psp_context *psp)
+ 	struct amdgpu_device *adev = psp->adev;
+ 
+ 	if (amdgpu_sriov_vf(adev))
+-		data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
++		data = psp->km_ring.ring_wptr;
+ 	else
+ 		data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
+ 	return data;
+@@ -394,6 +394,7 @@ static void psp_v3_1_ring_set_wptr(struct psp_context *psp, uint32_t value)
+ 		/* send interrupt to PSP for SRIOV ring write pointer update */
+ 		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
+ 			GFX_CTRL_CMD_ID_CONSUME_CMD);
++		psp->km_ring.ring_wptr = value;
+ 	} else
+ 		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value);
+ }
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index b63f55ea8758a..eed4946305834 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -871,7 +871,8 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
+ 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
+ 	}
+ 
+-	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
++	if (!adev->dm.dc->ctx->dmub_srv)
++		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
+ 	if (!adev->dm.dc->ctx->dmub_srv) {
+ 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
+ 		return -ENOMEM;
+@@ -1863,7 +1864,6 @@ static int dm_suspend(void *handle)
+ 
+ 	amdgpu_dm_irq_suspend(adev);
+ 
+-
+ 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+index bfbc23b76cd5a..3e3c898848bd8 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+@@ -3231,7 +3231,7 @@ static noinline bool dcn20_validate_bandwidth_fp(struct dc *dc,
+ 	voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false);
+ 	dummy_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
+ 
+-	if (voltage_supported && dummy_pstate_supported) {
++	if (voltage_supported && (dummy_pstate_supported || !(context->stream_count))) {
+ 		context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
+ 		goto restore_dml_state;
+ 	}
+diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
+index 7b88261f57bb6..32c83f2e386ca 100644
+--- a/drivers/gpu/drm/tegra/sor.c
++++ b/drivers/gpu/drm/tegra/sor.c
+@@ -3125,21 +3125,21 @@ static int tegra_sor_init(struct host1x_client *client)
+ 		if (err < 0) {
+ 			dev_err(sor->dev, "failed to acquire SOR reset: %d\n",
+ 				err);
+-			return err;
++			goto rpm_put;
+ 		}
+ 
+ 		err = reset_control_assert(sor->rst);
+ 		if (err < 0) {
+ 			dev_err(sor->dev, "failed to assert SOR reset: %d\n",
+ 				err);
+-			return err;
++			goto rpm_put;
+ 		}
+ 	}
+ 
+ 	err = clk_prepare_enable(sor->clk);
+ 	if (err < 0) {
+ 		dev_err(sor->dev, "failed to enable clock: %d\n", err);
+-		return err;
++		goto rpm_put;
+ 	}
+ 
+ 	usleep_range(1000, 3000);
+@@ -3150,7 +3150,7 @@ static int tegra_sor_init(struct host1x_client *client)
+ 			dev_err(sor->dev, "failed to deassert SOR reset: %d\n",
+ 				err);
+ 			clk_disable_unprepare(sor->clk);
+-			return err;
++			goto rpm_put;
+ 		}
+ 
+ 		reset_control_release(sor->rst);
+@@ -3171,6 +3171,12 @@ static int tegra_sor_init(struct host1x_client *client)
+ 	}
+ 
+ 	return 0;
++
++rpm_put:
++	if (sor->rst)
++		pm_runtime_put(sor->dev);
++
++	return err;
+ }
+ 
+ static int tegra_sor_exit(struct host1x_client *client)
+@@ -3916,17 +3922,10 @@ static int tegra_sor_probe(struct platform_device *pdev)
+ 	platform_set_drvdata(pdev, sor);
+ 	pm_runtime_enable(&pdev->dev);
+ 
+-	INIT_LIST_HEAD(&sor->client.list);
++	host1x_client_init(&sor->client);
+ 	sor->client.ops = &sor_client_ops;
+ 	sor->client.dev = &pdev->dev;
+ 
+-	err = host1x_client_register(&sor->client);
+-	if (err < 0) {
+-		dev_err(&pdev->dev, "failed to register host1x client: %d\n",
+-			err);
+-		goto rpm_disable;
+-	}
+-
+ 	/*
+ 	 * On Tegra210 and earlier, provide our own implementation for the
+ 	 * pad output clock.
+@@ -3938,13 +3937,13 @@ static int tegra_sor_probe(struct platform_device *pdev)
+ 				      sor->index);
+ 		if (!name) {
+ 			err = -ENOMEM;
+-			goto unregister;
++			goto uninit;
+ 		}
+ 
+ 		err = host1x_client_resume(&sor->client);
+ 		if (err < 0) {
+ 			dev_err(sor->dev, "failed to resume: %d\n", err);
+-			goto unregister;
++			goto uninit;
+ 		}
+ 
+ 		sor->clk_pad = tegra_clk_sor_pad_register(sor, name);
+@@ -3955,14 +3954,20 @@ static int tegra_sor_probe(struct platform_device *pdev)
+ 		err = PTR_ERR(sor->clk_pad);
+ 		dev_err(sor->dev, "failed to register SOR pad clock: %d\n",
+ 			err);
+-		goto unregister;
++		goto uninit;
++	}
++
++	err = __host1x_client_register(&sor->client);
++	if (err < 0) {
++		dev_err(&pdev->dev, "failed to register host1x client: %d\n",
++			err);
++		goto uninit;
+ 	}
+ 
+ 	return 0;
+ 
+-unregister:
+-	host1x_client_unregister(&sor->client);
+-rpm_disable:
++uninit:
++	host1x_client_exit(&sor->client);
+ 	pm_runtime_disable(&pdev->dev);
+ remove:
+ 	tegra_output_remove(&sor->output);
+diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
+index 68a766ff0e9d2..b98d746141a89 100644
+--- a/drivers/gpu/host1x/bus.c
++++ b/drivers/gpu/host1x/bus.c
+@@ -704,6 +704,29 @@ void host1x_driver_unregister(struct host1x_driver *driver)
+ }
+ EXPORT_SYMBOL(host1x_driver_unregister);
+ 
++/**
++ * __host1x_client_init() - initialize a host1x client
++ * @client: host1x client
++ * @key: lock class key for the client-specific mutex
++ */
++void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key)
++{
++	INIT_LIST_HEAD(&client->list);
++	__mutex_init(&client->lock, "host1x client lock", key);
++	client->usecount = 0;
++}
++EXPORT_SYMBOL(__host1x_client_init);
++
++/**
++ * host1x_client_exit() - uninitialize a host1x client
++ * @client: host1x client
++ */
++void host1x_client_exit(struct host1x_client *client)
++{
++	mutex_destroy(&client->lock);
++}
++EXPORT_SYMBOL(host1x_client_exit);
++
+ /**
+  * __host1x_client_register() - register a host1x client
+  * @client: host1x client
+@@ -716,16 +739,11 @@ EXPORT_SYMBOL(host1x_driver_unregister);
+  * device and call host1x_device_init(), which will in turn call each client's
+  * &host1x_client_ops.init implementation.
+  */
+-int __host1x_client_register(struct host1x_client *client,
+-			     struct lock_class_key *key)
++int __host1x_client_register(struct host1x_client *client)
+ {
+ 	struct host1x *host1x;
+ 	int err;
+ 
+-	INIT_LIST_HEAD(&client->list);
+-	__mutex_init(&client->lock, "host1x client lock", key);
+-	client->usecount = 0;
+-
+ 	mutex_lock(&devices_lock);
+ 
+ 	list_for_each_entry(host1x, &devices, list) {
+diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
+index 786b71ef7738a..c6a643f4fc5f5 100644
+--- a/drivers/hid/Kconfig
++++ b/drivers/hid/Kconfig
+@@ -93,11 +93,11 @@ menu "Special HID drivers"
+ 	depends on HID
+ 
+ config HID_A4TECH
+-	tristate "A4 tech mice"
++	tristate "A4TECH mice"
+ 	depends on HID
+ 	default !EXPERT
+ 	help
+-	Support for A4 tech X5 and WOP-35 / Trust 450L mice.
++	Support for some A4TECH mice with two scroll wheels.
+ 
+ config HID_ACCUTOUCH
+ 	tristate "Accutouch touch device"
+diff --git a/drivers/hid/hid-a4tech.c b/drivers/hid/hid-a4tech.c
+index 3a8c4a5971f70..2cbc32dda7f74 100644
+--- a/drivers/hid/hid-a4tech.c
++++ b/drivers/hid/hid-a4tech.c
+@@ -147,6 +147,8 @@ static const struct hid_device_id a4_devices[] = {
+ 		.driver_data = A4_2WHEEL_MOUSE_HACK_B8 },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649),
+ 		.driver_data = A4_2WHEEL_MOUSE_HACK_B8 },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_NB_95),
++		.driver_data = A4_2WHEEL_MOUSE_HACK_B8 },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(hid, a4_devices);
+diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
+index 2ab22b9259418..60606c11bdaf0 100644
+--- a/drivers/hid/hid-asus.c
++++ b/drivers/hid/hid-asus.c
+@@ -335,7 +335,7 @@ static int asus_raw_event(struct hid_device *hdev,
+ 	if (drvdata->quirks & QUIRK_MEDION_E1239T)
+ 		return asus_e1239t_event(drvdata, data, size);
+ 
+-	if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD) {
++	if (drvdata->quirks & QUIRK_USE_KBD_BACKLIGHT) {
+ 		/*
+ 		 * Skip these report ID, the device emits a continuous stream associated
+ 		 * with the AURA mode it is in which looks like an 'echo'.
+@@ -355,6 +355,16 @@ static int asus_raw_event(struct hid_device *hdev,
+ 				return -1;
+ 			}
+ 		}
++		if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD) {
++			/*
++			 * G713 and G733 send these codes on some keypresses, depending on
++			 * the key pressed it can trigger a shutdown event if not caught.
++			*/
++			if(data[0] == 0x02 && data[1] == 0x30) {
++				return -1;
++			}
++		}
++
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 097cb1ee31268..0f69f35f2957e 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -2005,6 +2005,9 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
+ 	case BUS_I2C:
+ 		bus = "I2C";
+ 		break;
++	case BUS_VIRTUAL:
++		bus = "VIRTUAL";
++		break;
+ 	default:
+ 		bus = "<UNKNOWN>";
+ 	}
+diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
+index d7eaf91003706..982737827b871 100644
+--- a/drivers/hid/hid-debug.c
++++ b/drivers/hid/hid-debug.c
+@@ -929,6 +929,7 @@ static const char *keys[KEY_MAX + 1] = {
+ 	[KEY_APPSELECT] = "AppSelect",
+ 	[KEY_SCREENSAVER] = "ScreenSaver",
+ 	[KEY_VOICECOMMAND] = "VoiceCommand",
++	[KEY_EMOJI_PICKER] = "EmojiPicker",
+ 	[KEY_BRIGHTNESS_MIN] = "BrightnessMin",
+ 	[KEY_BRIGHTNESS_MAX] = "BrightnessMax",
+ 	[KEY_BRIGHTNESS_AUTO] = "BrightnessAuto",
+diff --git a/drivers/hid/hid-gt683r.c b/drivers/hid/hid-gt683r.c
+index 898871c8c768e..29ccb0accfba8 100644
+--- a/drivers/hid/hid-gt683r.c
++++ b/drivers/hid/hid-gt683r.c
+@@ -54,6 +54,7 @@ static const struct hid_device_id gt683r_led_id[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) },
+ 	{ }
+ };
++MODULE_DEVICE_TABLE(hid, gt683r_led_id);
+ 
+ static void gt683r_brightness_set(struct led_classdev *led_cdev,
+ 				enum led_brightness brightness)
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index ba338973e968d..03978111d9448 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -26,6 +26,7 @@
+ #define USB_DEVICE_ID_A4TECH_WCP32PU	0x0006
+ #define USB_DEVICE_ID_A4TECH_X5_005D	0x000a
+ #define USB_DEVICE_ID_A4TECH_RP_649	0x001a
++#define USB_DEVICE_ID_A4TECH_NB_95	0x022b
+ 
+ #define USB_VENDOR_ID_AASHIMA		0x06d6
+ #define USB_DEVICE_ID_AASHIMA_GAMEPAD	0x0025
+@@ -749,6 +750,7 @@
+ #define USB_DEVICE_ID_LENOVO_X1_COVER	0x6085
+ #define USB_DEVICE_ID_LENOVO_X1_TAB	0x60a3
+ #define USB_DEVICE_ID_LENOVO_X1_TAB3	0x60b5
++#define USB_DEVICE_ID_LENOVO_OPTICAL_USB_MOUSE_600E	0x600e
+ #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D	0x608d
+ #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019	0x6019
+ #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_602E	0x602e
+@@ -1042,6 +1044,7 @@
+ #define USB_DEVICE_ID_SAITEK_X52	0x075c
+ #define USB_DEVICE_ID_SAITEK_X52_2	0x0255
+ #define USB_DEVICE_ID_SAITEK_X52_PRO	0x0762
++#define USB_DEVICE_ID_SAITEK_X65	0x0b6a
+ 
+ #define USB_VENDOR_ID_SAMSUNG		0x0419
+ #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE	0x0001
+@@ -1152,6 +1155,7 @@
+ #define USB_DEVICE_ID_SYNAPTICS_DELL_K12A	0x2819
+ #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012	0x2968
+ #define USB_DEVICE_ID_SYNAPTICS_TP_V103	0x5710
++#define USB_DEVICE_ID_SYNAPTICS_DELL_K15A	0x6e21
+ #define USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1002	0x73f4
+ #define USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003	0x73f5
+ #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5	0x81a7
+diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
+index 236bccd37760d..e982d8173c9c7 100644
+--- a/drivers/hid/hid-input.c
++++ b/drivers/hid/hid-input.c
+@@ -963,6 +963,9 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
+ 
+ 		case 0x0cd: map_key_clear(KEY_PLAYPAUSE);	break;
+ 		case 0x0cf: map_key_clear(KEY_VOICECOMMAND);	break;
++
++		case 0x0d9: map_key_clear(KEY_EMOJI_PICKER);	break;
++
+ 		case 0x0e0: map_abs_clear(ABS_VOLUME);		break;
+ 		case 0x0e2: map_key_clear(KEY_MUTE);		break;
+ 		case 0x0e5: map_key_clear(KEY_BASSBOOST);	break;
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index 55dcb8536286b..2e4fb76c45f3d 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -70,6 +70,7 @@ MODULE_LICENSE("GPL");
+ #define MT_QUIRK_WIN8_PTP_BUTTONS	BIT(18)
+ #define MT_QUIRK_SEPARATE_APP_REPORT	BIT(19)
+ #define MT_QUIRK_FORCE_MULTI_INPUT	BIT(20)
++#define MT_QUIRK_DISABLE_WAKEUP		BIT(21)
+ 
+ #define MT_INPUTMODE_TOUCHSCREEN	0x02
+ #define MT_INPUTMODE_TOUCHPAD		0x03
+@@ -191,6 +192,7 @@ static void mt_post_parse(struct mt_device *td, struct mt_application *app);
+ #define MT_CLS_EXPORT_ALL_INPUTS		0x0013
+ /* reserved					0x0014 */
+ #define MT_CLS_WIN_8_FORCE_MULTI_INPUT		0x0015
++#define MT_CLS_WIN_8_DISABLE_WAKEUP		0x0016
+ 
+ /* vendor specific classes */
+ #define MT_CLS_3M				0x0101
+@@ -283,6 +285,15 @@ static const struct mt_class mt_classes[] = {
+ 			MT_QUIRK_WIN8_PTP_BUTTONS |
+ 			MT_QUIRK_FORCE_MULTI_INPUT,
+ 		.export_all_inputs = true },
++	{ .name = MT_CLS_WIN_8_DISABLE_WAKEUP,
++		.quirks = MT_QUIRK_ALWAYS_VALID |
++			MT_QUIRK_IGNORE_DUPLICATES |
++			MT_QUIRK_HOVERING |
++			MT_QUIRK_CONTACT_CNT_ACCURATE |
++			MT_QUIRK_STICKY_FINGERS |
++			MT_QUIRK_WIN8_PTP_BUTTONS |
++			MT_QUIRK_DISABLE_WAKEUP,
++		.export_all_inputs = true },
+ 
+ 	/*
+ 	 * vendor specific classes
+@@ -763,7 +774,8 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ 			return 1;
+ 		case HID_DG_CONFIDENCE:
+ 			if ((cls->name == MT_CLS_WIN_8 ||
+-			     cls->name == MT_CLS_WIN_8_FORCE_MULTI_INPUT) &&
++			     cls->name == MT_CLS_WIN_8_FORCE_MULTI_INPUT ||
++			     cls->name == MT_CLS_WIN_8_DISABLE_WAKEUP) &&
+ 				(field->application == HID_DG_TOUCHPAD ||
+ 				 field->application == HID_DG_TOUCHSCREEN))
+ 				app->quirks |= MT_QUIRK_CONFIDENCE;
+@@ -1580,13 +1592,13 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
+ 		/* we do not set suffix = "Touchscreen" */
+ 		hi->input->name = hdev->name;
+ 		break;
+-	case HID_DG_STYLUS:
+-		/* force BTN_STYLUS to allow tablet matching in udev */
+-		__set_bit(BTN_STYLUS, hi->input->keybit);
+-		break;
+ 	case HID_VD_ASUS_CUSTOM_MEDIA_KEYS:
+ 		suffix = "Custom Media Keys";
+ 		break;
++	case HID_DG_STYLUS:
++		/* force BTN_STYLUS to allow tablet matching in udev */
++		__set_bit(BTN_STYLUS, hi->input->keybit);
++		fallthrough;
+ 	case HID_DG_PEN:
+ 		suffix = "Stylus";
+ 		break;
+@@ -1753,8 +1765,14 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ #ifdef CONFIG_PM
+ static int mt_suspend(struct hid_device *hdev, pm_message_t state)
+ {
++	struct mt_device *td = hid_get_drvdata(hdev);
++
+ 	/* High latency is desirable for power savings during S3/S0ix */
+-	mt_set_modes(hdev, HID_LATENCY_HIGH, true, true);
++	if (td->mtclass.quirks & MT_QUIRK_DISABLE_WAKEUP)
++		mt_set_modes(hdev, HID_LATENCY_HIGH, false, false);
++	else
++		mt_set_modes(hdev, HID_LATENCY_HIGH, true, true);
++
+ 	return 0;
+ }
+ 
+@@ -1813,6 +1831,12 @@ static const struct hid_device_id mt_devices[] = {
+ 		MT_USB_DEVICE(USB_VENDOR_ID_ANTON,
+ 			USB_DEVICE_ID_ANTON_TOUCH_PAD) },
+ 
++	/* Asus T101HA */
++	{ .driver_data = MT_CLS_WIN_8_DISABLE_WAKEUP,
++		HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
++			   USB_VENDOR_ID_ASUSTEK,
++			   USB_DEVICE_ID_ASUSTEK_T101HA_KEYBOARD) },
++
+ 	/* Asus T304UA */
+ 	{ .driver_data = MT_CLS_ASUS,
+ 		HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
+diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
+index 1a9daf03dbfae..bb2b60bc618f6 100644
+--- a/drivers/hid/hid-quirks.c
++++ b/drivers/hid/hid-quirks.c
+@@ -110,6 +110,7 @@ static const struct hid_device_id hid_quirks[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912), HID_QUIRK_MULTI_INPUT },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M406XE), HID_QUIRK_MULTI_INPUT },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE_ID2), HID_QUIRK_ALWAYS_POLL },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_OPTICAL_USB_MOUSE_600E), HID_QUIRK_ALWAYS_POLL },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D), HID_QUIRK_ALWAYS_POLL },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019), HID_QUIRK_ALWAYS_POLL },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_602E), HID_QUIRK_ALWAYS_POLL },
+@@ -158,6 +159,7 @@ static const struct hid_device_id hid_quirks[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52_2), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52_PRO), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X65), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB), HID_QUIRK_NOGET },
+@@ -176,6 +178,7 @@ static const struct hid_device_id hid_quirks[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_QUAD_HD), HID_QUIRK_NO_INIT_REPORTS },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP_V103), HID_QUIRK_NO_INIT_REPORTS },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DELL_K12A), HID_QUIRK_NO_INIT_REPORTS },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DELL_K15A), HID_QUIRK_NO_INIT_REPORTS },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD), HID_QUIRK_BADPAD },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS), HID_QUIRK_MULTI_INPUT },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882), HID_QUIRK_NOGET },
+@@ -211,6 +214,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_NB_95) },
+ #endif
+ #if IS_ENABLED(CONFIG_HID_ACCUTOUCH)
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_ACCUTOUCH_2216) },
+diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
+index 3dd7d32467378..f9983145d4e70 100644
+--- a/drivers/hid/hid-sensor-hub.c
++++ b/drivers/hid/hid-sensor-hub.c
+@@ -210,16 +210,21 @@ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
+ 	buffer_size = buffer_size / sizeof(__s32);
+ 	if (buffer_size) {
+ 		for (i = 0; i < buffer_size; ++i) {
+-			hid_set_field(report->field[field_index], i,
+-				      (__force __s32)cpu_to_le32(*buf32));
++			ret = hid_set_field(report->field[field_index], i,
++					    (__force __s32)cpu_to_le32(*buf32));
++			if (ret)
++				goto done_proc;
++
+ 			++buf32;
+ 		}
+ 	}
+ 	if (remaining_bytes) {
+ 		value = 0;
+ 		memcpy(&value, (u8 *)buf32, remaining_bytes);
+-		hid_set_field(report->field[field_index], i,
+-			      (__force __s32)cpu_to_le32(value));
++		ret = hid_set_field(report->field[field_index], i,
++				    (__force __s32)cpu_to_le32(value));
++		if (ret)
++			goto done_proc;
+ 	}
+ 	hid_hw_request(hsdev->hdev, report, HID_REQ_SET_REPORT);
+ 	hid_hw_wait(hsdev->hdev);
+diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+index 21b87e4003afc..07e3cbc86bef1 100644
+--- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h
++++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+@@ -28,6 +28,8 @@
+ #define EHL_Ax_DEVICE_ID	0x4BB3
+ #define TGL_LP_DEVICE_ID	0xA0FC
+ #define TGL_H_DEVICE_ID		0x43FC
++#define ADL_S_DEVICE_ID		0x7AF8
++#define ADL_P_DEVICE_ID		0x51FC
+ 
+ #define	REVISION_ID_CHT_A0	0x6
+ #define	REVISION_ID_CHT_Ax_SI	0x0
+diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+index 06081cf9b85a2..a6d5173ac0030 100644
+--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
++++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+@@ -39,6 +39,8 @@ static const struct pci_device_id ish_pci_tbl[] = {
+ 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, EHL_Ax_DEVICE_ID)},
+ 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, TGL_LP_DEVICE_ID)},
+ 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, TGL_H_DEVICE_ID)},
++	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, ADL_S_DEVICE_ID)},
++	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, ADL_P_DEVICE_ID)},
+ 	{0, }
+ };
+ MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
+diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
+index 86257ce6d6198..4e9077363c962 100644
+--- a/drivers/hid/usbhid/hid-core.c
++++ b/drivers/hid/usbhid/hid-core.c
+@@ -374,7 +374,7 @@ static int hid_submit_ctrl(struct hid_device *hid)
+ 	raw_report = usbhid->ctrl[usbhid->ctrltail].raw_report;
+ 	dir = usbhid->ctrl[usbhid->ctrltail].dir;
+ 
+-	len = ((report->size - 1) >> 3) + 1 + (report->id > 0);
++	len = hid_report_len(report);
+ 	if (dir == USB_DIR_OUT) {
+ 		usbhid->urbctrl->pipe = usb_sndctrlpipe(hid_to_usb_dev(hid), 0);
+ 		usbhid->urbctrl->transfer_buffer_length = len;
+diff --git a/drivers/hwmon/pmbus/q54sj108a2.c b/drivers/hwmon/pmbus/q54sj108a2.c
+index aec512766c319..0976268b2670b 100644
+--- a/drivers/hwmon/pmbus/q54sj108a2.c
++++ b/drivers/hwmon/pmbus/q54sj108a2.c
+@@ -299,7 +299,7 @@ static int q54sj108a2_probe(struct i2c_client *client)
+ 		dev_err(&client->dev, "Failed to read Manufacturer ID\n");
+ 		return ret;
+ 	}
+-	if (ret != 5 || strncmp(buf, "DELTA", 5)) {
++	if (ret != 6 || strncmp(buf, "DELTA", 5)) {
+ 		buf[ret] = '\0';
+ 		dev_err(dev, "Unsupported Manufacturer ID '%s'\n", buf);
+ 		return -ENODEV;
+diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+index c84c8bf2bc20e..fc99ad8e4a388 100644
+--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
++++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+@@ -3815,6 +3815,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 		dev_err(&pdev->dev,
+ 			"invalid sram_size %dB or board span %ldB\n",
+ 			mgp->sram_size, mgp->board_span);
++		status = -EINVAL;
+ 		goto abort_with_ioremap;
+ 	}
+ 	memcpy_fromio(mgp->eeprom_strings,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+index 89a13b4a74a45..c0001e38fcce9 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+@@ -74,8 +74,8 @@ mt7921_init_wiphy(struct ieee80211_hw *hw)
+ 	struct wiphy *wiphy = hw->wiphy;
+ 
+ 	hw->queues = 4;
+-	hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
+-	hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
++	hw->max_rx_aggregation_subframes = 64;
++	hw->max_tx_aggregation_subframes = 128;
+ 
+ 	phy->slottime = 9;
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+index ada943c7a9500..2c781b6f89e53 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+@@ -74,8 +74,7 @@ mt7921_init_he_caps(struct mt7921_phy *phy, enum nl80211_band band,
+ 				IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
+ 		else if (band == NL80211_BAND_5GHZ)
+ 			he_cap_elem->phy_cap_info[0] =
+-				IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
+-				IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G;
++				IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G;
+ 
+ 		he_cap_elem->phy_cap_info[1] =
+ 			IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD;
+diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
+index 14913a4588ecc..0f22f333ff247 100644
+--- a/drivers/nvme/target/loop.c
++++ b/drivers/nvme/target/loop.c
+@@ -261,7 +261,8 @@ static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
+ 
+ static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
+ {
+-	clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
++	if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags))
++		return;
+ 	nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
+ 	blk_cleanup_queue(ctrl->ctrl.admin_q);
+ 	blk_cleanup_queue(ctrl->ctrl.fabrics_q);
+@@ -297,6 +298,7 @@ static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
+ 		clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
+ 		nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
+ 	}
++	ctrl->ctrl.queue_count = 1;
+ }
+ 
+ static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
+@@ -403,6 +405,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
+ 	return 0;
+ 
+ out_cleanup_queue:
++	clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
+ 	blk_cleanup_queue(ctrl->ctrl.admin_q);
+ out_cleanup_fabrics_q:
+ 	blk_cleanup_queue(ctrl->ctrl.fabrics_q);
+@@ -460,8 +463,10 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
+ 	nvme_loop_shutdown_ctrl(ctrl);
+ 
+ 	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
+-		/* state change failure should never happen */
+-		WARN_ON_ONCE(1);
++		if (ctrl->ctrl.state != NVME_CTRL_DELETING &&
++		    ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO)
++			/* state change failure for non-deleted ctrl? */
++			WARN_ON_ONCE(1);
+ 		return;
+ 	}
+ 
+diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
+index e5076f09d5ed4..16a0470aedfc4 100644
+--- a/drivers/scsi/qedf/qedf_main.c
++++ b/drivers/scsi/qedf/qedf_main.c
+@@ -1827,22 +1827,20 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
+ 		fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
+ 		QEDF_WARN(&(base_qedf->dbg_ctx), "Failed to create vport, "
+ 			   "WWPN (0x%s) already exists.\n", buf);
+-		goto err1;
++		return rc;
+ 	}
+ 
+ 	if (atomic_read(&base_qedf->link_state) != QEDF_LINK_UP) {
+ 		QEDF_WARN(&(base_qedf->dbg_ctx), "Cannot create vport "
+ 			   "because link is not up.\n");
+-		rc = -EIO;
+-		goto err1;
++		return -EIO;
+ 	}
+ 
+ 	vn_port = libfc_vport_create(vport, sizeof(struct qedf_ctx));
+ 	if (!vn_port) {
+ 		QEDF_WARN(&(base_qedf->dbg_ctx), "Could not create lport "
+ 			   "for vport.\n");
+-		rc = -ENOMEM;
+-		goto err1;
++		return -ENOMEM;
+ 	}
+ 
+ 	fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
+@@ -1866,7 +1864,7 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
+ 	if (rc) {
+ 		QEDF_ERR(&(base_qedf->dbg_ctx), "Could not allocate memory "
+ 		    "for lport stats.\n");
+-		goto err2;
++		goto err;
+ 	}
+ 
+ 	fc_set_wwnn(vn_port, vport->node_name);
+@@ -1884,7 +1882,7 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
+ 	if (rc) {
+ 		QEDF_WARN(&base_qedf->dbg_ctx,
+ 			  "Error adding Scsi_Host rc=0x%x.\n", rc);
+-		goto err2;
++		goto err;
+ 	}
+ 
+ 	/* Set default dev_loss_tmo based on module parameter */
+@@ -1925,9 +1923,10 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
+ 	vport_qedf->dbg_ctx.host_no = vn_port->host->host_no;
+ 	vport_qedf->dbg_ctx.pdev = base_qedf->pdev;
+ 
+-err2:
++	return 0;
++
++err:
+ 	scsi_host_put(vn_port->host);
+-err1:
+ 	return rc;
+ }
+ 
+@@ -1968,8 +1967,7 @@ static int qedf_vport_destroy(struct fc_vport *vport)
+ 	fc_lport_free_stats(vn_port);
+ 
+ 	/* Release Scsi_Host */
+-	if (vn_port->host)
+-		scsi_host_put(vn_port->host);
++	scsi_host_put(vn_port->host);
+ 
+ out:
+ 	return 0;
+diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
+index d92cec12454cb..d33355ab6e145 100644
+--- a/drivers/scsi/scsi_devinfo.c
++++ b/drivers/scsi/scsi_devinfo.c
+@@ -184,6 +184,7 @@ static struct {
+ 	{"HP", "C3323-300", "4269", BLIST_NOTQ},
+ 	{"HP", "C5713A", NULL, BLIST_NOREPORTLUN},
+ 	{"HP", "DISK-SUBSYSTEM", "*", BLIST_REPORTLUN2},
++	{"HPE", "OPEN-", "*", BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES},
+ 	{"IBM", "AuSaV1S2", NULL, BLIST_FORCELUN},
+ 	{"IBM", "ProFibre 4000R", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
+ 	{"IBM", "2105", NULL, BLIST_RETRY_HWERROR},
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 9e8cd07179d71..28479addb2d1d 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -2995,9 +2995,7 @@ __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
+ 	__releases(&cmd->t_state_lock)
+ 	__acquires(&cmd->t_state_lock)
+ {
+-
+-	assert_spin_locked(&cmd->t_state_lock);
+-	WARN_ON_ONCE(!irqs_disabled());
++	lockdep_assert_held(&cmd->t_state_lock);
+ 
+ 	if (fabric_stop)
+ 		cmd->transport_state |= CMD_T_FABRIC_STOP;
+diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
+index 2d500f90cdacf..a86e6810237a1 100644
+--- a/fs/gfs2/file.c
++++ b/fs/gfs2/file.c
+@@ -935,8 +935,11 @@ static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ 		current->backing_dev_info = inode_to_bdi(inode);
+ 		buffered = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops);
+ 		current->backing_dev_info = NULL;
+-		if (unlikely(buffered <= 0))
++		if (unlikely(buffered <= 0)) {
++			if (!ret)
++				ret = buffered;
+ 			goto out_unlock;
++		}
+ 
+ 		/*
+ 		 * We need to ensure that the page cache pages are written to
+diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
+index 7c2ba81213da0..4d70faa606dc4 100644
+--- a/fs/gfs2/glock.c
++++ b/fs/gfs2/glock.c
+@@ -583,6 +583,16 @@ out_locked:
+ 	spin_unlock(&gl->gl_lockref.lock);
+ }
+ 
++static bool is_system_glock(struct gfs2_glock *gl)
++{
++	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
++	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
++
++	if (gl == m_ip->i_gl)
++		return true;
++	return false;
++}
++
+ /**
+  * do_xmote - Calls the DLM to change the state of a lock
+  * @gl: The lock state
+@@ -672,17 +682,25 @@ skip_inval:
+ 	 * to see sd_log_error and withdraw, and in the meantime, requeue the
+ 	 * work for later.
+ 	 *
++	 * We make a special exception for some system glocks, such as the
++	 * system statfs inode glock, which needs to be granted before the
++	 * gfs2_quotad daemon can exit, and that exit needs to finish before
++	 * we can unmount the withdrawn file system.
++	 *
+ 	 * However, if we're just unlocking the lock (say, for unmount, when
+ 	 * gfs2_gl_hash_clear calls clear_glock) and recovery is complete
+ 	 * then it's okay to tell dlm to unlock it.
+ 	 */
+ 	if (unlikely(sdp->sd_log_error && !gfs2_withdrawn(sdp)))
+ 		gfs2_withdraw_delayed(sdp);
+-	if (glock_blocked_by_withdraw(gl)) {
+-		if (target != LM_ST_UNLOCKED ||
+-		    test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags)) {
++	if (glock_blocked_by_withdraw(gl) &&
++	    (target != LM_ST_UNLOCKED ||
++	     test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags))) {
++		if (!is_system_glock(gl)) {
+ 			gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD);
+ 			goto out;
++		} else {
++			clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
+ 		}
+ 	}
+ 
+@@ -1774,6 +1792,7 @@ __acquires(&lru_lock)
+ 	while(!list_empty(list)) {
+ 		gl = list_first_entry(list, struct gfs2_glock, gl_lru);
+ 		list_del_init(&gl->gl_lru);
++		clear_bit(GLF_LRU, &gl->gl_flags);
+ 		if (!spin_trylock(&gl->gl_lockref.lock)) {
+ add_back_to_lru:
+ 			list_add(&gl->gl_lru, &lru_list);
+@@ -1819,7 +1838,6 @@ static long gfs2_scan_glock_lru(int nr)
+ 		if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
+ 			list_move(&gl->gl_lru, &dispose);
+ 			atomic_dec(&lru_count);
+-			clear_bit(GLF_LRU, &gl->gl_flags);
+ 			freed++;
+ 			continue;
+ 		}
+diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
+index 6410281546f92..47287a7056fe2 100644
+--- a/fs/gfs2/log.c
++++ b/fs/gfs2/log.c
+@@ -923,10 +923,10 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
+ }
+ 
+ /**
+- * ail_drain - drain the ail lists after a withdraw
++ * gfs2_ail_drain - drain the ail lists after a withdraw
+  * @sdp: Pointer to GFS2 superblock
+  */
+-static void ail_drain(struct gfs2_sbd *sdp)
++void gfs2_ail_drain(struct gfs2_sbd *sdp)
+ {
+ 	struct gfs2_trans *tr;
+ 
+@@ -953,6 +953,7 @@ static void ail_drain(struct gfs2_sbd *sdp)
+ 		list_del(&tr->tr_list);
+ 		gfs2_trans_free(sdp, tr);
+ 	}
++	gfs2_drain_revokes(sdp);
+ 	spin_unlock(&sdp->sd_ail_lock);
+ }
+ 
+@@ -1159,7 +1160,6 @@ out_withdraw:
+ 	if (tr && list_empty(&tr->tr_list))
+ 		list_add(&tr->tr_list, &sdp->sd_ail1_list);
+ 	spin_unlock(&sdp->sd_ail_lock);
+-	ail_drain(sdp); /* frees all transactions */
+ 	tr = NULL;
+ 	goto out_end;
+ }
+diff --git a/fs/gfs2/log.h b/fs/gfs2/log.h
+index eea58015710e7..fc905c2af53ce 100644
+--- a/fs/gfs2/log.h
++++ b/fs/gfs2/log.h
+@@ -93,5 +93,6 @@ extern int gfs2_logd(void *data);
+ extern void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd);
+ extern void gfs2_glock_remove_revoke(struct gfs2_glock *gl);
+ extern void gfs2_flush_revokes(struct gfs2_sbd *sdp);
++extern void gfs2_ail_drain(struct gfs2_sbd *sdp);
+ 
+ #endif /* __LOG_DOT_H__ */
+diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
+index a82f4747aa8d5..ef44d325e5182 100644
+--- a/fs/gfs2/lops.c
++++ b/fs/gfs2/lops.c
+@@ -882,7 +882,7 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
+ 	gfs2_log_write_page(sdp, page);
+ }
+ 
+-static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
++void gfs2_drain_revokes(struct gfs2_sbd *sdp)
+ {
+ 	struct list_head *head = &sdp->sd_log_revokes;
+ 	struct gfs2_bufdata *bd;
+@@ -897,6 +897,11 @@ static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
+ 	}
+ }
+ 
++static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
++{
++	gfs2_drain_revokes(sdp);
++}
++
+ static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
+ 				  struct gfs2_log_header_host *head, int pass)
+ {
+diff --git a/fs/gfs2/lops.h b/fs/gfs2/lops.h
+index 31b6dd0d2e5d8..f707601597dcc 100644
+--- a/fs/gfs2/lops.h
++++ b/fs/gfs2/lops.h
+@@ -20,6 +20,7 @@ extern void gfs2_log_submit_bio(struct bio **biop, int opf);
+ extern void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh);
+ extern int gfs2_find_jhead(struct gfs2_jdesc *jd,
+ 			   struct gfs2_log_header_host *head, bool keep_cache);
++extern void gfs2_drain_revokes(struct gfs2_sbd *sdp);
+ static inline unsigned int buf_limit(struct gfs2_sbd *sdp)
+ {
+ 	return sdp->sd_ldptrs;
+diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
+index 4f034b87b4276..cffb346fb9b33 100644
+--- a/fs/gfs2/util.c
++++ b/fs/gfs2/util.c
+@@ -130,6 +130,7 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
+ 	if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) || !sdp->sd_jdesc)
+ 		return;
+ 
++	gfs2_ail_drain(sdp); /* frees all transactions */
+ 	inode = sdp->sd_jdesc->jd_inode;
+ 	ip = GFS2_I(inode);
+ 	i_gl = ip->i_gl;
+diff --git a/include/linux/hid.h b/include/linux/hid.h
+index 3e33eb14118c8..5e79a21c696f1 100644
+--- a/include/linux/hid.h
++++ b/include/linux/hid.h
+@@ -1164,8 +1164,7 @@ static inline void hid_hw_wait(struct hid_device *hdev)
+  */
+ static inline u32 hid_report_len(struct hid_report *report)
+ {
+-	/* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */
+-	return ((report->size - 1) >> 3) + 1 + (report->id > 0);
++	return DIV_ROUND_UP(report->size, 8) + (report->id > 0);
+ }
+ 
+ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
+diff --git a/include/linux/host1x.h b/include/linux/host1x.h
+index 9eb77c87a83b0..ed0005ce4285c 100644
+--- a/include/linux/host1x.h
++++ b/include/linux/host1x.h
+@@ -320,12 +320,30 @@ static inline struct host1x_device *to_host1x_device(struct device *dev)
+ int host1x_device_init(struct host1x_device *device);
+ int host1x_device_exit(struct host1x_device *device);
+ 
+-int __host1x_client_register(struct host1x_client *client,
+-			     struct lock_class_key *key);
+-#define host1x_client_register(class) \
+-	({ \
+-		static struct lock_class_key __key; \
+-		__host1x_client_register(class, &__key); \
++void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key);
++void host1x_client_exit(struct host1x_client *client);
++
++#define host1x_client_init(client)			\
++	({						\
++		static struct lock_class_key __key;	\
++		__host1x_client_init(client, &__key);	\
++	})
++
++int __host1x_client_register(struct host1x_client *client);
++
++/*
++ * Note that this wrapper calls __host1x_client_init() for compatibility
++ * with existing callers. Callers that want to separately initialize and
++ * register a host1x client must first initialize using either of the
++ * __host1x_client_init() or host1x_client_init() functions and then use
++ * the low-level __host1x_client_register() function to avoid the client
++ * getting reinitialized.
++ */
++#define host1x_client_register(client)			\
++	({						\
++		static struct lock_class_key __key;	\
++		__host1x_client_init(client, &__key);	\
++		__host1x_client_register(client);	\
+ 	})
+ 
+ int host1x_client_unregister(struct host1x_client *client);
+diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
+index ee93428ced9a1..225ec87d4f228 100644
+--- a/include/uapi/linux/input-event-codes.h
++++ b/include/uapi/linux/input-event-codes.h
+@@ -611,6 +611,7 @@
+ #define KEY_VOICECOMMAND		0x246	/* Listening Voice Command */
+ #define KEY_ASSISTANT		0x247	/* AL Context-aware desktop assistant */
+ #define KEY_KBD_LAYOUT_NEXT	0x248	/* AC Next Keyboard Layout Select */
++#define KEY_EMOJI_PICKER	0x249	/* Show/hide emoji picker (HUTRR101) */
+ 
+ #define KEY_BRIGHTNESS_MIN		0x250	/* Set Brightness to Minimum */
+ #define KEY_BRIGHTNESS_MAX		0x251	/* Set Brightness to Maximum */
+diff --git a/net/compat.c b/net/compat.c
+index ddd15af3a2837..210fc3b4d0d83 100644
+--- a/net/compat.c
++++ b/net/compat.c
+@@ -177,7 +177,7 @@ int cmsghdr_from_user_compat_to_kern(struct msghdr *kmsg, struct sock *sk,
+ 	if (kcmlen > stackbuf_size)
+ 		kcmsg_base = kcmsg = sock_kmalloc(sk, kcmlen, GFP_KERNEL);
+ 	if (kcmsg == NULL)
+-		return -ENOBUFS;
++		return -ENOMEM;
+ 
+ 	/* Now copy them over neatly. */
+ 	memset(kcmsg, 0, kcmlen);
+diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
+index cd80ffed6d267..a9f9379750802 100644
+--- a/net/core/fib_rules.c
++++ b/net/core/fib_rules.c
+@@ -1168,7 +1168,7 @@ static void notify_rule_change(int event, struct fib_rule *rule,
+ {
+ 	struct net *net;
+ 	struct sk_buff *skb;
+-	int err = -ENOBUFS;
++	int err = -ENOMEM;
+ 
+ 	net = ops->fro_net;
+ 	skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL);
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 3485b16a7ff32..9ad046917b340 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -4833,8 +4833,10 @@ static int rtnl_bridge_notify(struct net_device *dev)
+ 	if (err < 0)
+ 		goto errout;
+ 
+-	if (!skb->len)
++	if (!skb->len) {
++		err = -EINVAL;
+ 		goto errout;
++	}
+ 
+ 	rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
+ 	return 0;
+diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c
+index 05f6bd89a7dd8..0cf2374c143bd 100644
+--- a/net/ieee802154/nl802154.c
++++ b/net/ieee802154/nl802154.c
+@@ -1298,19 +1298,20 @@ ieee802154_llsec_parse_dev_addr(struct nlattr *nla,
+ 	if (!nla || nla_parse_nested_deprecated(attrs, NL802154_DEV_ADDR_ATTR_MAX, nla, nl802154_dev_addr_policy, NULL))
+ 		return -EINVAL;
+ 
+-	if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] ||
+-	    !attrs[NL802154_DEV_ADDR_ATTR_MODE] ||
+-	    !(attrs[NL802154_DEV_ADDR_ATTR_SHORT] ||
+-	      attrs[NL802154_DEV_ADDR_ATTR_EXTENDED]))
++	if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] || !attrs[NL802154_DEV_ADDR_ATTR_MODE])
+ 		return -EINVAL;
+ 
+ 	addr->pan_id = nla_get_le16(attrs[NL802154_DEV_ADDR_ATTR_PAN_ID]);
+ 	addr->mode = nla_get_u32(attrs[NL802154_DEV_ADDR_ATTR_MODE]);
+ 	switch (addr->mode) {
+ 	case NL802154_DEV_ADDR_SHORT:
++		if (!attrs[NL802154_DEV_ADDR_ATTR_SHORT])
++			return -EINVAL;
+ 		addr->short_addr = nla_get_le16(attrs[NL802154_DEV_ADDR_ATTR_SHORT]);
+ 		break;
+ 	case NL802154_DEV_ADDR_EXTENDED:
++		if (!attrs[NL802154_DEV_ADDR_ATTR_EXTENDED])
++			return -EINVAL;
+ 		addr->extended_addr = nla_get_le64(attrs[NL802154_DEV_ADDR_ATTR_EXTENDED]);
+ 		break;
+ 	default:
+diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
+index bc2f6ca971520..816d8aad5a684 100644
+--- a/net/ipv4/ipconfig.c
++++ b/net/ipv4/ipconfig.c
+@@ -886,7 +886,7 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d
+ 
+ 
+ /*
+- *  Copy BOOTP-supplied string if not already set.
++ *  Copy BOOTP-supplied string
+  */
+ static int __init ic_bootp_string(char *dest, char *src, int len, int max)
+ {
+@@ -935,12 +935,15 @@ static void __init ic_do_bootp_ext(u8 *ext)
+ 		}
+ 		break;
+ 	case 12:	/* Host name */
+-		ic_bootp_string(utsname()->nodename, ext+1, *ext,
+-				__NEW_UTS_LEN);
+-		ic_host_name_set = 1;
++		if (!ic_host_name_set) {
++			ic_bootp_string(utsname()->nodename, ext+1, *ext,
++					__NEW_UTS_LEN);
++			ic_host_name_set = 1;
++		}
+ 		break;
+ 	case 15:	/* Domain name (DNS) */
+-		ic_bootp_string(ic_domain, ext+1, *ext, sizeof(ic_domain));
++		if (!ic_domain[0])
++			ic_bootp_string(ic_domain, ext+1, *ext, sizeof(ic_domain));
+ 		break;
+ 	case 17:	/* Root path */
+ 		if (!root_server_path[0])
+diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
+index ff687b97b2d95..401901c8ad429 100644
+--- a/net/x25/af_x25.c
++++ b/net/x25/af_x25.c
+@@ -536,7 +536,7 @@ static int x25_create(struct net *net, struct socket *sock, int protocol,
+ 	if (protocol)
+ 		goto out;
+ 
+-	rc = -ENOBUFS;
++	rc = -ENOMEM;
+ 	if ((sk = x25_alloc_socket(net, kern)) == NULL)
+ 		goto out;
+ 
+diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c
+index ab5ff7867eb99..d8be146793eee 100644
+--- a/sound/hda/intel-dsp-config.c
++++ b/sound/hda/intel-dsp-config.c
+@@ -331,6 +331,10 @@ static const struct config_entry config_table[] = {
+ 		.flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
+ 		.device = 0x51c8,
+ 	},
++	{
++		.flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
++		.device = 0x51cc,
++	},
+ #endif
+ 
+ };
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 79ade335c8a09..470753b36c8a1 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2485,6 +2485,9 @@ static const struct pci_device_id azx_ids[] = {
+ 	/* Alderlake-P */
+ 	{ PCI_DEVICE(0x8086, 0x51c8),
+ 	  .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
++	/* Alderlake-M */
++	{ PCI_DEVICE(0x8086, 0x51cc),
++	  .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+ 	/* Elkhart Lake */
+ 	{ PCI_DEVICE(0x8086, 0x4b55),
+ 	  .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},

diff --git a/5020_BMQ-and-PDS-io-scheduler-v5.12-r0.patch b/5020_BMQ-and-PDS-io-scheduler-v5.12-r0.patch
index 81f4c55..7eb05e6 100644
--- a/5020_BMQ-and-PDS-io-scheduler-v5.12-r0.patch
+++ b/5020_BMQ-and-PDS-io-scheduler-v5.12-r0.patch
@@ -9803,3 +9803,36 @@ index 73ef12092250..24bf8ef1249a 100644
  	};
  	struct wakeup_test_data *x = data;
  
+From b2dc217bab541a5e737b52137f1bcce0b1cc2ed5 Mon Sep 17 00:00:00 2001
+From: Piotr Gorski <lucjan.lucjanov@gmail.com>
+Date: Mon, 14 Jun 2021 15:46:03 +0200
+Subject: [PATCH] prjc: fix compilation error
+
+Signed-off-by: Piotr Gorski <lucjan.lucjanov@gmail.com>
+---
+ kernel/sched/pelt.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
+index bc722a476..26a33f76b 100644
+--- a/kernel/sched/pelt.h
++++ b/kernel/sched/pelt.h
+@@ -44,6 +44,7 @@ static inline u32 get_pelt_divider(struct sched_avg *avg)
+ 	return LOAD_AVG_MAX - 1024 + avg->period_contrib;
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ static inline void cfs_se_util_change(struct sched_avg *avg)
+ {
+ 	unsigned int enqueued;
+@@ -61,7 +62,6 @@ static inline void cfs_se_util_change(struct sched_avg *avg)
+ 	WRITE_ONCE(avg->util_est.enqueued, enqueued);
+ }
+ 
+-#ifndef CONFIG_SCHED_ALT
+ /*
+  * The clock_pelt scales the time to reflect the effective amount of
+  * computation done during the running delta time but then sync back to
+-- 
+2.32.0
+


^ permalink raw reply related	[flat|nested] 33+ messages in thread
* [gentoo-commits] proj/linux-patches:5.12 commit in: /
@ 2021-06-16 12:25 Mike Pagano
  0 siblings, 0 replies; 33+ messages in thread
From: Mike Pagano @ 2021-06-16 12:25 UTC (permalink / raw
  To: gentoo-commits

commit:     ad4d8d2f6bac01b7e851c062743d1446efc08804
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Jun 16 12:25:01 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Jun 16 12:25:01 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ad4d8d2f

Linux patch 5.12.11

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1010_linux-5.12.11.patch | 5273 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 5277 insertions(+)

diff --git a/0000_README b/0000_README
index 25657f9..d850f88 100644
--- a/0000_README
+++ b/0000_README
@@ -83,6 +83,10 @@ Patch:  1009_linux-5.12.10.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.12.10
 
+Patch:  1010_linux-5.12.11.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.12.11
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1010_linux-5.12.11.patch b/1010_linux-5.12.11.patch
new file mode 100644
index 0000000..4eb63e8
--- /dev/null
+++ b/1010_linux-5.12.11.patch
@@ -0,0 +1,5273 @@
+diff --git a/Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml b/Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml
+index db61f0731a203..2e35aeaa8781d 100644
+--- a/Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml
++++ b/Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml
+@@ -57,7 +57,7 @@ patternProperties:
+           rate
+ 
+       sound-dai:
+-        $ref: /schemas/types.yaml#/definitions/phandle
++        $ref: /schemas/types.yaml#/definitions/phandle-array
+         description: phandle of the CPU DAI
+ 
+     patternProperties:
+@@ -71,7 +71,7 @@ patternProperties:
+ 
+         properties:
+           sound-dai:
+-            $ref: /schemas/types.yaml#/definitions/phandle
++            $ref: /schemas/types.yaml#/definitions/phandle-array
+             description: phandle of the codec DAI
+ 
+         required:
+diff --git a/Documentation/virt/kvm/mmu.rst b/Documentation/virt/kvm/mmu.rst
+index 5bfe28b0728e8..20d85daed395e 100644
+--- a/Documentation/virt/kvm/mmu.rst
++++ b/Documentation/virt/kvm/mmu.rst
+@@ -171,8 +171,8 @@ Shadow pages contain the following information:
+     shadow pages) so role.quadrant takes values in the range 0..3.  Each
+     quadrant maps 1GB virtual address space.
+   role.access:
+-    Inherited guest access permissions in the form uwx.  Note execute
+-    permission is positive, not negative.
++    Inherited guest access permissions from the parent ptes in the form uwx.
++    Note execute permission is positive, not negative.
+   role.invalid:
+     The page is invalid and should not be used.  It is a root page that is
+     currently pinned (by a cpu hardware register pointing to it); once it is
+diff --git a/Makefile b/Makefile
+index ebc02c56db03c..82ca490ce5f48 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 12
+-SUBLEVEL = 10
++SUBLEVEL = 11
+ EXTRAVERSION =
+ NAME = Frozen Wasteland
+ 
+@@ -912,6 +912,11 @@ CC_FLAGS_LTO	+= -fvisibility=hidden
+ 
+ # Limit inlining across translation units to reduce binary size
+ KBUILD_LDFLAGS += -mllvm -import-instr-limit=5
++
++# Check for frame size exceeding threshold during prolog/epilog insertion.
++ifneq ($(CONFIG_FRAME_WARN),0)
++KBUILD_LDFLAGS	+= -plugin-opt=-warn-stack-size=$(CONFIG_FRAME_WARN)
++endif
+ endif
+ 
+ ifdef CONFIG_LTO
+diff --git a/arch/arm/include/asm/cpuidle.h b/arch/arm/include/asm/cpuidle.h
+index 0d67ed682e077..bc4ffa7ca04c7 100644
+--- a/arch/arm/include/asm/cpuidle.h
++++ b/arch/arm/include/asm/cpuidle.h
+@@ -7,9 +7,11 @@
+ #ifdef CONFIG_CPU_IDLE
+ extern int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
+ 		struct cpuidle_driver *drv, int index);
++#define __cpuidle_method_section __used __section("__cpuidle_method_of_table")
+ #else
+ static inline int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
+ 		struct cpuidle_driver *drv, int index) { return -ENODEV; }
++#define __cpuidle_method_section __maybe_unused /* drop silently */
+ #endif
+ 
+ /* Common ARM WFI state */
+@@ -42,8 +44,7 @@ struct of_cpuidle_method {
+ 
+ #define CPUIDLE_METHOD_OF_DECLARE(name, _method, _ops)			\
+ 	static const struct of_cpuidle_method __cpuidle_method_of_table_##name \
+-	__used __section("__cpuidle_method_of_table")			\
+-	= { .method = _method, .ops = _ops }
++	__cpuidle_method_section = { .method = _method, .ops = _ops }
+ 
+ extern int arm_cpuidle_suspend(int index);
+ 
+diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c
+index de03838b343b8..a9b72eacfc0b3 100644
+--- a/arch/mips/lib/mips-atomic.c
++++ b/arch/mips/lib/mips-atomic.c
+@@ -37,7 +37,7 @@
+  */
+ notrace void arch_local_irq_disable(void)
+ {
+-	preempt_disable();
++	preempt_disable_notrace();
+ 
+ 	__asm__ __volatile__(
+ 	"	.set	push						\n"
+@@ -53,7 +53,7 @@ notrace void arch_local_irq_disable(void)
+ 	: /* no inputs */
+ 	: "memory");
+ 
+-	preempt_enable();
++	preempt_enable_notrace();
+ }
+ EXPORT_SYMBOL(arch_local_irq_disable);
+ 
+@@ -61,7 +61,7 @@ notrace unsigned long arch_local_irq_save(void)
+ {
+ 	unsigned long flags;
+ 
+-	preempt_disable();
++	preempt_disable_notrace();
+ 
+ 	__asm__ __volatile__(
+ 	"	.set	push						\n"
+@@ -78,7 +78,7 @@ notrace unsigned long arch_local_irq_save(void)
+ 	: /* no inputs */
+ 	: "memory");
+ 
+-	preempt_enable();
++	preempt_enable_notrace();
+ 
+ 	return flags;
+ }
+@@ -88,7 +88,7 @@ notrace void arch_local_irq_restore(unsigned long flags)
+ {
+ 	unsigned long __tmp1;
+ 
+-	preempt_disable();
++	preempt_disable_notrace();
+ 
+ 	__asm__ __volatile__(
+ 	"	.set	push						\n"
+@@ -106,7 +106,7 @@ notrace void arch_local_irq_restore(unsigned long flags)
+ 	: "0" (flags)
+ 	: "memory");
+ 
+-	preempt_enable();
++	preempt_enable_notrace();
+ }
+ EXPORT_SYMBOL(arch_local_irq_restore);
+ 
+diff --git a/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi
+index 1b4aafc1f6a27..9716a0484ecf0 100644
+--- a/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi
++++ b/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi
+@@ -122,7 +122,15 @@
+ 	};
+ 
+ /include/ "pq3-i2c-0.dtsi"
++	i2c@3000 {
++		fsl,i2c-erratum-a004447;
++	};
++
+ /include/ "pq3-i2c-1.dtsi"
++	i2c@3100 {
++		fsl,i2c-erratum-a004447;
++	};
++
+ /include/ "pq3-duart-0.dtsi"
+ /include/ "pq3-espi-0.dtsi"
+ 	spi0: spi@7000 {
+diff --git a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
+index 872e4485dc3f0..ddc018d42252f 100644
+--- a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
++++ b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
+@@ -371,7 +371,23 @@
+ 	};
+ 
+ /include/ "qoriq-i2c-0.dtsi"
++	i2c@118000 {
++		fsl,i2c-erratum-a004447;
++	};
++
++	i2c@118100 {
++		fsl,i2c-erratum-a004447;
++	};
++
+ /include/ "qoriq-i2c-1.dtsi"
++	i2c@119000 {
++		fsl,i2c-erratum-a004447;
++	};
++
++	i2c@119100 {
++		fsl,i2c-erratum-a004447;
++	};
++
+ /include/ "qoriq-duart-0.dtsi"
+ /include/ "qoriq-duart-1.dtsi"
+ /include/ "qoriq-gpio-0.dtsi"
+diff --git a/arch/x86/Makefile b/arch/x86/Makefile
+index 1f2e5bfb9bb03..2855a1a3d6121 100644
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -192,8 +192,9 @@ endif
+ KBUILD_LDFLAGS += -m elf_$(UTS_MACHINE)
+ 
+ ifdef CONFIG_LTO_CLANG
+-KBUILD_LDFLAGS	+= -plugin-opt=-code-model=kernel \
+-		   -plugin-opt=-stack-alignment=$(if $(CONFIG_X86_32),4,8)
++ifeq ($(shell test $(CONFIG_LLD_VERSION) -lt 130000; echo $$?),0)
++KBUILD_LDFLAGS	+= -plugin-opt=-stack-alignment=$(if $(CONFIG_X86_32),4,8)
++endif
+ endif
+ 
+ ifdef CONFIG_X86_NEED_RELOCS
+diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
+index 9b89376318388..9e1f64bc988b0 100644
+--- a/arch/x86/events/intel/uncore_snbep.c
++++ b/arch/x86/events/intel/uncore_snbep.c
+@@ -1406,6 +1406,8 @@ static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool
+ 						die_id = i;
+ 					else
+ 						die_id = topology_phys_to_logical_pkg(i);
++					if (die_id < 0)
++						die_id = -ENODEV;
+ 					map->pbus_to_dieid[bus] = die_id;
+ 					break;
+ 				}
+@@ -1452,14 +1454,14 @@ static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool
+ 			i = -1;
+ 			if (reverse) {
+ 				for (bus = 255; bus >= 0; bus--) {
+-					if (map->pbus_to_dieid[bus] >= 0)
++					if (map->pbus_to_dieid[bus] != -1)
+ 						i = map->pbus_to_dieid[bus];
+ 					else
+ 						map->pbus_to_dieid[bus] = i;
+ 				}
+ 			} else {
+ 				for (bus = 0; bus <= 255; bus++) {
+-					if (map->pbus_to_dieid[bus] >= 0)
++					if (map->pbus_to_dieid[bus] != -1)
+ 						i = map->pbus_to_dieid[bus];
+ 					else
+ 						map->pbus_to_dieid[bus] = i;
+@@ -5103,9 +5105,10 @@ static struct intel_uncore_type icx_uncore_m2m = {
+ 	.perf_ctr	= SNR_M2M_PCI_PMON_CTR0,
+ 	.event_ctl	= SNR_M2M_PCI_PMON_CTL0,
+ 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
++	.event_mask_ext	= SNR_M2M_PCI_PMON_UMASK_EXT,
+ 	.box_ctl	= SNR_M2M_PCI_PMON_BOX_CTL,
+ 	.ops		= &snr_m2m_uncore_pci_ops,
+-	.format_group	= &skx_uncore_format_group,
++	.format_group	= &snr_m2m_uncore_format_group,
+ };
+ 
+ static struct attribute *icx_upi_uncore_formats_attr[] = {
+diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
+index 3ef5868ac588a..7aecb2fc31863 100644
+--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
++++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
+@@ -63,7 +63,7 @@ static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
+ 		case 15:
+ 			return msr - MSR_P4_BPU_PERFCTR0;
+ 		}
+-		fallthrough;
++		break;
+ 	case X86_VENDOR_ZHAOXIN:
+ 	case X86_VENDOR_CENTAUR:
+ 		return msr - MSR_ARCH_PERFMON_PERFCTR0;
+@@ -96,7 +96,7 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
+ 		case 15:
+ 			return msr - MSR_P4_BSU_ESCR0;
+ 		}
+-		fallthrough;
++		break;
+ 	case X86_VENDOR_ZHAOXIN:
+ 	case X86_VENDOR_CENTAUR:
+ 		return msr - MSR_ARCH_PERFMON_EVENTSEL0;
+diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
+index 55d7b473ac447..a2becf9c2553e 100644
+--- a/arch/x86/kvm/mmu/paging_tmpl.h
++++ b/arch/x86/kvm/mmu/paging_tmpl.h
+@@ -90,8 +90,8 @@ struct guest_walker {
+ 	gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
+ 	pt_element_t __user *ptep_user[PT_MAX_FULL_LEVELS];
+ 	bool pte_writable[PT_MAX_FULL_LEVELS];
+-	unsigned pt_access;
+-	unsigned pte_access;
++	unsigned int pt_access[PT_MAX_FULL_LEVELS];
++	unsigned int pte_access;
+ 	gfn_t gfn;
+ 	struct x86_exception fault;
+ };
+@@ -418,13 +418,15 @@ retry_walk:
+ 		}
+ 
+ 		walker->ptes[walker->level - 1] = pte;
++
++		/* Convert to ACC_*_MASK flags for struct guest_walker.  */
++		walker->pt_access[walker->level - 1] = FNAME(gpte_access)(pt_access ^ walk_nx_mask);
+ 	} while (!is_last_gpte(mmu, walker->level, pte));
+ 
+ 	pte_pkey = FNAME(gpte_pkeys)(vcpu, pte);
+ 	accessed_dirty = have_ad ? pte_access & PT_GUEST_ACCESSED_MASK : 0;
+ 
+ 	/* Convert to ACC_*_MASK flags for struct guest_walker.  */
+-	walker->pt_access = FNAME(gpte_access)(pt_access ^ walk_nx_mask);
+ 	walker->pte_access = FNAME(gpte_access)(pte_access ^ walk_nx_mask);
+ 	errcode = permission_fault(vcpu, mmu, walker->pte_access, pte_pkey, access);
+ 	if (unlikely(errcode))
+@@ -463,7 +465,8 @@ retry_walk:
+ 	}
+ 
+ 	pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
+-		 __func__, (u64)pte, walker->pte_access, walker->pt_access);
++		 __func__, (u64)pte, walker->pte_access,
++		 walker->pt_access[walker->level - 1]);
+ 	return 1;
+ 
+ error:
+@@ -642,7 +645,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
+ 	bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled;
+ 	struct kvm_mmu_page *sp = NULL;
+ 	struct kvm_shadow_walk_iterator it;
+-	unsigned direct_access, access = gw->pt_access;
++	unsigned int direct_access, access;
+ 	int top_level, level, req_level, ret;
+ 	gfn_t base_gfn = gw->gfn;
+ 
+@@ -674,6 +677,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
+ 		sp = NULL;
+ 		if (!is_shadow_present_pte(*it.sptep)) {
+ 			table_gfn = gw->table_gfn[it.level - 2];
++			access = gw->pt_access[it.level - 2];
+ 			sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1,
+ 					      false, access);
+ 		}
+diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
+index a61c015870e33..4f839148948bc 100644
+--- a/arch/x86/kvm/trace.h
++++ b/arch/x86/kvm/trace.h
+@@ -1550,16 +1550,16 @@ TRACE_EVENT(kvm_nested_vmenter_failed,
+ 	TP_ARGS(msg, err),
+ 
+ 	TP_STRUCT__entry(
+-		__field(const char *, msg)
++		__string(msg, msg)
+ 		__field(u32, err)
+ 	),
+ 
+ 	TP_fast_assign(
+-		__entry->msg = msg;
++		__assign_str(msg, msg);
+ 		__entry->err = err;
+ 	),
+ 
+-	TP_printk("%s%s", __entry->msg, !__entry->err ? "" :
++	TP_printk("%s%s", __get_str(msg), !__entry->err ? "" :
+ 		__print_symbolic(__entry->err, VMX_VMENTER_INSTRUCTION_ERRORS))
+ );
+ 
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 822c23c3752c5..cf37205784297 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -2982,6 +2982,19 @@ static void kvm_vcpu_flush_tlb_all(struct kvm_vcpu *vcpu)
+ static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu)
+ {
+ 	++vcpu->stat.tlb_flush;
++
++	if (!tdp_enabled) {
++               /*
++		 * A TLB flush on behalf of the guest is equivalent to
++		 * INVPCID(all), toggling CR4.PGE, etc., which requires
++		 * a forced sync of the shadow page tables.  Unload the
++		 * entire MMU here and the subsequent load will sync the
++		 * shadow page tables, and also flush the TLB.
++		 */
++		kvm_mmu_unload(vcpu);
++		return;
++	}
++
+ 	static_call(kvm_x86_tlb_flush_guest)(vcpu);
+ }
+ 
+diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
+index 6cd7f7025df47..d8a91521144e0 100644
+--- a/crypto/async_tx/async_xor.c
++++ b/crypto/async_tx/async_xor.c
+@@ -233,7 +233,8 @@ async_xor_offs(struct page *dest, unsigned int offset,
+ 		if (submit->flags & ASYNC_TX_XOR_DROP_DST) {
+ 			src_cnt--;
+ 			src_list++;
+-			src_offs++;
++			if (src_offs)
++				src_offs++;
+ 		}
+ 
+ 		/* wait for any prerequisite operations */
+diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
+index be7da23fad76f..a4bd673934c0a 100644
+--- a/drivers/acpi/bus.c
++++ b/drivers/acpi/bus.c
+@@ -330,32 +330,21 @@ static void acpi_bus_osc_negotiate_platform_control(void)
+ 	if (ACPI_FAILURE(acpi_run_osc(handle, &context)))
+ 		return;
+ 
+-	capbuf_ret = context.ret.pointer;
+-	if (context.ret.length <= OSC_SUPPORT_DWORD) {
+-		kfree(context.ret.pointer);
+-		return;
+-	}
++	kfree(context.ret.pointer);
+ 
+-	/*
+-	 * Now run _OSC again with query flag clear and with the caps
+-	 * supported by both the OS and the platform.
+-	 */
++	/* Now run _OSC again with query flag clear */
+ 	capbuf[OSC_QUERY_DWORD] = 0;
+-	capbuf[OSC_SUPPORT_DWORD] = capbuf_ret[OSC_SUPPORT_DWORD];
+-	kfree(context.ret.pointer);
+ 
+ 	if (ACPI_FAILURE(acpi_run_osc(handle, &context)))
+ 		return;
+ 
+ 	capbuf_ret = context.ret.pointer;
+-	if (context.ret.length > OSC_SUPPORT_DWORD) {
+-		osc_sb_apei_support_acked =
+-			capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT;
+-		osc_pc_lpi_support_confirmed =
+-			capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_PCLPI_SUPPORT;
+-		osc_sb_native_usb4_support_confirmed =
+-			capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_NATIVE_USB4_SUPPORT;
+-	}
++	osc_sb_apei_support_acked =
++		capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT;
++	osc_pc_lpi_support_confirmed =
++		capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_PCLPI_SUPPORT;
++	osc_sb_native_usb4_support_confirmed =
++		capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_NATIVE_USB4_SUPPORT;
+ 
+ 	kfree(context.ret.pointer);
+ }
+diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
+index 09fd13757b658..4d2118c8dd6c0 100644
+--- a/drivers/acpi/sleep.c
++++ b/drivers/acpi/sleep.c
+@@ -1009,10 +1009,8 @@ static void acpi_sleep_hibernate_setup(void)
+ 		return;
+ 
+ 	acpi_get_table(ACPI_SIG_FACS, 1, (struct acpi_table_header **)&facs);
+-	if (facs) {
++	if (facs)
+ 		s4_hardware_signature = facs->hardware_signature;
+-		acpi_put_table((struct acpi_table_header *)facs);
+-	}
+ }
+ #else /* !CONFIG_HIBERNATION */
+ static inline void acpi_sleep_hibernate_setup(void) {}
+diff --git a/drivers/bus/mhi/pci_generic.c b/drivers/bus/mhi/pci_generic.c
+index ef549c695b55c..3e50a9fc4d822 100644
+--- a/drivers/bus/mhi/pci_generic.c
++++ b/drivers/bus/mhi/pci_generic.c
+@@ -505,7 +505,7 @@ static void mhi_pci_remove(struct pci_dev *pdev)
+ 	struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
+ 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
+ 
+-	del_timer(&mhi_pdev->health_check_timer);
++	del_timer_sync(&mhi_pdev->health_check_timer);
+ 	cancel_work_sync(&mhi_pdev->recovery_work);
+ 
+ 	if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
+diff --git a/drivers/gpio/gpio-wcd934x.c b/drivers/gpio/gpio-wcd934x.c
+index 1cbce59908558..97e6caedf1f33 100644
+--- a/drivers/gpio/gpio-wcd934x.c
++++ b/drivers/gpio/gpio-wcd934x.c
+@@ -7,7 +7,7 @@
+ #include <linux/slab.h>
+ #include <linux/of_device.h>
+ 
+-#define WCD_PIN_MASK(p) BIT(p - 1)
++#define WCD_PIN_MASK(p) BIT(p)
+ #define WCD_REG_DIR_CTL_OFFSET 0x42
+ #define WCD_REG_VAL_CTL_OFFSET 0x43
+ #define WCD934X_NPINS		5
+diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
+index f2d46b7ac6f9f..232abbba36868 100644
+--- a/drivers/gpu/drm/drm_auth.c
++++ b/drivers/gpu/drm/drm_auth.c
+@@ -314,9 +314,10 @@ int drm_master_open(struct drm_file *file_priv)
+ void drm_master_release(struct drm_file *file_priv)
+ {
+ 	struct drm_device *dev = file_priv->minor->dev;
+-	struct drm_master *master = file_priv->master;
++	struct drm_master *master;
+ 
+ 	mutex_lock(&dev->master_mutex);
++	master = file_priv->master;
+ 	if (file_priv->magic)
+ 		idr_remove(&file_priv->master->magic_map, file_priv->magic);
+ 
+diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
+index d273d1a8603a9..495a4767a4430 100644
+--- a/drivers/gpu/drm/drm_ioctl.c
++++ b/drivers/gpu/drm/drm_ioctl.c
+@@ -118,17 +118,18 @@ int drm_getunique(struct drm_device *dev, void *data,
+ 		  struct drm_file *file_priv)
+ {
+ 	struct drm_unique *u = data;
+-	struct drm_master *master = file_priv->master;
++	struct drm_master *master;
+ 
+-	mutex_lock(&master->dev->master_mutex);
++	mutex_lock(&dev->master_mutex);
++	master = file_priv->master;
+ 	if (u->unique_len >= master->unique_len) {
+ 		if (copy_to_user(u->unique, master->unique, master->unique_len)) {
+-			mutex_unlock(&master->dev->master_mutex);
++			mutex_unlock(&dev->master_mutex);
+ 			return -EFAULT;
+ 		}
+ 	}
+ 	u->unique_len = master->unique_len;
+-	mutex_unlock(&master->dev->master_mutex);
++	mutex_unlock(&dev->master_mutex);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
+index b3fd3501c4127..5275b2723293b 100644
+--- a/drivers/gpu/drm/mcde/mcde_dsi.c
++++ b/drivers/gpu/drm/mcde/mcde_dsi.c
+@@ -577,7 +577,7 @@ static void mcde_dsi_setup_video_mode(struct mcde_dsi *d,
+ 	 * porches and sync.
+ 	 */
+ 	/* (ps/s) / (pixels/s) = ps/pixels */
+-	pclk = DIV_ROUND_UP_ULL(1000000000000, mode->clock);
++	pclk = DIV_ROUND_UP_ULL(1000000000000, (mode->clock * 1000));
+ 	dev_dbg(d->dev, "picoseconds between two pixels: %llu\n",
+ 		pclk);
+ 
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+index b4d8e1b01ee4f..f6c1b62b901e2 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+@@ -157,7 +157,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+ 	 * GPU registers so we need to add 0x1a800 to the register value on A630
+ 	 * to get the right value from PM4.
+ 	 */
+-	get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
++	get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
+ 		rbmemptr_stats(ring, index, alwayson_start));
+ 
+ 	/* Invalidate CCU depth and color */
+@@ -187,7 +187,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+ 
+ 	get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
+ 		rbmemptr_stats(ring, index, cpcycles_end));
+-	get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
++	get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
+ 		rbmemptr_stats(ring, index, alwayson_end));
+ 
+ 	/* Write the fence to the scratch register */
+@@ -206,8 +206,8 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+ 	OUT_RING(ring, submit->seqno);
+ 
+ 	trace_msm_gpu_submit_flush(submit,
+-		gmu_read64(&a6xx_gpu->gmu, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L,
+-			REG_A6XX_GMU_ALWAYS_ON_COUNTER_H));
++		gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
++			REG_A6XX_CP_ALWAYS_ON_COUNTER_HI));
+ 
+ 	a6xx_flush(gpu, ring);
+ }
+@@ -462,6 +462,113 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
+ 	gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? clock_cntl_on : 0);
+ }
+ 
++/* For a615, a616, a618, A619, a630, a640 and a680 */
++static const u32 a6xx_protect[] = {
++	A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
++	A6XX_PROTECT_RDONLY(0x00501, 0x0005),
++	A6XX_PROTECT_RDONLY(0x0050b, 0x02f4),
++	A6XX_PROTECT_NORDWR(0x0050e, 0x0000),
++	A6XX_PROTECT_NORDWR(0x00510, 0x0000),
++	A6XX_PROTECT_NORDWR(0x00534, 0x0000),
++	A6XX_PROTECT_NORDWR(0x00800, 0x0082),
++	A6XX_PROTECT_NORDWR(0x008a0, 0x0008),
++	A6XX_PROTECT_NORDWR(0x008ab, 0x0024),
++	A6XX_PROTECT_RDONLY(0x008de, 0x00ae),
++	A6XX_PROTECT_NORDWR(0x00900, 0x004d),
++	A6XX_PROTECT_NORDWR(0x0098d, 0x0272),
++	A6XX_PROTECT_NORDWR(0x00e00, 0x0001),
++	A6XX_PROTECT_NORDWR(0x00e03, 0x000c),
++	A6XX_PROTECT_NORDWR(0x03c00, 0x00c3),
++	A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff),
++	A6XX_PROTECT_NORDWR(0x08630, 0x01cf),
++	A6XX_PROTECT_NORDWR(0x08e00, 0x0000),
++	A6XX_PROTECT_NORDWR(0x08e08, 0x0000),
++	A6XX_PROTECT_NORDWR(0x08e50, 0x001f),
++	A6XX_PROTECT_NORDWR(0x09624, 0x01db),
++	A6XX_PROTECT_NORDWR(0x09e70, 0x0001),
++	A6XX_PROTECT_NORDWR(0x09e78, 0x0187),
++	A6XX_PROTECT_NORDWR(0x0a630, 0x01cf),
++	A6XX_PROTECT_NORDWR(0x0ae02, 0x0000),
++	A6XX_PROTECT_NORDWR(0x0ae50, 0x032f),
++	A6XX_PROTECT_NORDWR(0x0b604, 0x0000),
++	A6XX_PROTECT_NORDWR(0x0be02, 0x0001),
++	A6XX_PROTECT_NORDWR(0x0be20, 0x17df),
++	A6XX_PROTECT_NORDWR(0x0f000, 0x0bff),
++	A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff),
++	A6XX_PROTECT_NORDWR(0x11c00, 0x0000), /* note: infinite range */
++};
++
++/* These are for a620 and a650 */
++static const u32 a650_protect[] = {
++	A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
++	A6XX_PROTECT_RDONLY(0x00501, 0x0005),
++	A6XX_PROTECT_RDONLY(0x0050b, 0x02f4),
++	A6XX_PROTECT_NORDWR(0x0050e, 0x0000),
++	A6XX_PROTECT_NORDWR(0x00510, 0x0000),
++	A6XX_PROTECT_NORDWR(0x00534, 0x0000),
++	A6XX_PROTECT_NORDWR(0x00800, 0x0082),
++	A6XX_PROTECT_NORDWR(0x008a0, 0x0008),
++	A6XX_PROTECT_NORDWR(0x008ab, 0x0024),
++	A6XX_PROTECT_RDONLY(0x008de, 0x00ae),
++	A6XX_PROTECT_NORDWR(0x00900, 0x004d),
++	A6XX_PROTECT_NORDWR(0x0098d, 0x0272),
++	A6XX_PROTECT_NORDWR(0x00e00, 0x0001),
++	A6XX_PROTECT_NORDWR(0x00e03, 0x000c),
++	A6XX_PROTECT_NORDWR(0x03c00, 0x00c3),
++	A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff),
++	A6XX_PROTECT_NORDWR(0x08630, 0x01cf),
++	A6XX_PROTECT_NORDWR(0x08e00, 0x0000),
++	A6XX_PROTECT_NORDWR(0x08e08, 0x0000),
++	A6XX_PROTECT_NORDWR(0x08e50, 0x001f),
++	A6XX_PROTECT_NORDWR(0x08e80, 0x027f),
++	A6XX_PROTECT_NORDWR(0x09624, 0x01db),
++	A6XX_PROTECT_NORDWR(0x09e60, 0x0011),
++	A6XX_PROTECT_NORDWR(0x09e78, 0x0187),
++	A6XX_PROTECT_NORDWR(0x0a630, 0x01cf),
++	A6XX_PROTECT_NORDWR(0x0ae02, 0x0000),
++	A6XX_PROTECT_NORDWR(0x0ae50, 0x032f),
++	A6XX_PROTECT_NORDWR(0x0b604, 0x0000),
++	A6XX_PROTECT_NORDWR(0x0b608, 0x0007),
++	A6XX_PROTECT_NORDWR(0x0be02, 0x0001),
++	A6XX_PROTECT_NORDWR(0x0be20, 0x17df),
++	A6XX_PROTECT_NORDWR(0x0f000, 0x0bff),
++	A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff),
++	A6XX_PROTECT_NORDWR(0x18400, 0x1fff),
++	A6XX_PROTECT_NORDWR(0x1a800, 0x1fff),
++	A6XX_PROTECT_NORDWR(0x1f400, 0x0443),
++	A6XX_PROTECT_RDONLY(0x1f844, 0x007b),
++	A6XX_PROTECT_NORDWR(0x1f887, 0x001b),
++	A6XX_PROTECT_NORDWR(0x1f8c0, 0x0000), /* note: infinite range */
++};
++
++static void a6xx_set_cp_protect(struct msm_gpu *gpu)
++{
++	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
++	const u32 *regs = a6xx_protect;
++	unsigned i, count = ARRAY_SIZE(a6xx_protect), count_max = 32;
++
++	BUILD_BUG_ON(ARRAY_SIZE(a6xx_protect) > 32);
++	BUILD_BUG_ON(ARRAY_SIZE(a650_protect) > 48);
++
++	if (adreno_is_a650(adreno_gpu)) {
++		regs = a650_protect;
++		count = ARRAY_SIZE(a650_protect);
++		count_max = 48;
++	}
++
++	/*
++	 * Enable access protection to privileged registers, fault on an access
++	 * protect violation and select the last span to protect from the start
++	 * address all the way to the end of the register address space
++	 */
++	gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, BIT(0) | BIT(1) | BIT(3));
++
++	for (i = 0; i < count - 1; i++)
++		gpu_write(gpu, REG_A6XX_CP_PROTECT(i), regs[i]);
++	/* last CP_PROTECT to have "infinite" length on the last entry */
++	gpu_write(gpu, REG_A6XX_CP_PROTECT(count_max - 1), regs[i]);
++}
++
+ static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
+ {
+ 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+@@ -489,7 +596,7 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
+ 		rgb565_predicator << 11 | amsbc << 4 | lower_bit << 1);
+ 	gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, lower_bit << 1);
+ 	gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL,
+-		uavflagprd_inv >> 4 | lower_bit << 1);
++		uavflagprd_inv << 4 | lower_bit << 1);
+ 	gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, lower_bit << 21);
+ }
+ 
+@@ -776,41 +883,7 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
+ 	}
+ 
+ 	/* Protect registers from the CP */
+-	gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, 0x00000003);
+-
+-	gpu_write(gpu, REG_A6XX_CP_PROTECT(0),
+-		A6XX_PROTECT_RDONLY(0x600, 0x51));
+-	gpu_write(gpu, REG_A6XX_CP_PROTECT(1), A6XX_PROTECT_RW(0xae50, 0x2));
+-	gpu_write(gpu, REG_A6XX_CP_PROTECT(2), A6XX_PROTECT_RW(0x9624, 0x13));
+-	gpu_write(gpu, REG_A6XX_CP_PROTECT(3), A6XX_PROTECT_RW(0x8630, 0x8));
+-	gpu_write(gpu, REG_A6XX_CP_PROTECT(4), A6XX_PROTECT_RW(0x9e70, 0x1));
+-	gpu_write(gpu, REG_A6XX_CP_PROTECT(5), A6XX_PROTECT_RW(0x9e78, 0x187));
+-	gpu_write(gpu, REG_A6XX_CP_PROTECT(6), A6XX_PROTECT_RW(0xf000, 0x810));
+-	gpu_write(gpu, REG_A6XX_CP_PROTECT(7),
+-		A6XX_PROTECT_RDONLY(0xfc00, 0x3));
+-	gpu_write(gpu, REG_A6XX_CP_PROTECT(8), A6XX_PROTECT_RW(0x50e, 0x0));
+-	gpu_write(gpu, REG_A6XX_CP_PROTECT(9), A6XX_PROTECT_RDONLY(0x50f, 0x0));
+-	gpu_write(gpu, REG_A6XX_CP_PROTECT(10), A6XX_PROTECT_RW(0x510, 0x0));
+-	gpu_write(gpu, REG_A6XX_CP_PROTECT(11),
+-		A6XX_PROTECT_RDONLY(0x0, 0x4f9));
+-	gpu_write(gpu, REG_A6XX_CP_PROTECT(12),
+-		A6XX_PROTECT_RDONLY(0x501, 0xa));
+-	gpu_write(gpu, REG_A6XX_CP_PROTECT(13),
+-		A6XX_PROTECT_RDONLY(0x511, 0x44));
+-	gpu_write(gpu, REG_A6XX_CP_PROTECT(14), A6XX_PROTECT_RW(0xe00, 0xe));
+-	gpu_write(gpu, REG_A6XX_CP_PROTECT(15), A6XX_PROTECT_RW(0x8e00, 0x0));
+-	gpu_write(gpu, REG_A6XX_CP_PROTECT(16), A6XX_PROTECT_RW(0x8e50, 0xf));
+-	gpu_write(gpu, REG_A6XX_CP_PROTECT(17), A6XX_PROTECT_RW(0xbe02, 0x0));
+-	gpu_write(gpu, REG_A6XX_CP_PROTECT(18),
+-		A6XX_PROTECT_RW(0xbe20, 0x11f3));
+-	gpu_write(gpu, REG_A6XX_CP_PROTECT(19), A6XX_PROTECT_RW(0x800, 0x82));
+-	gpu_write(gpu, REG_A6XX_CP_PROTECT(20), A6XX_PROTECT_RW(0x8a0, 0x8));
+-	gpu_write(gpu, REG_A6XX_CP_PROTECT(21), A6XX_PROTECT_RW(0x8ab, 0x19));
+-	gpu_write(gpu, REG_A6XX_CP_PROTECT(22), A6XX_PROTECT_RW(0x900, 0x4d));
+-	gpu_write(gpu, REG_A6XX_CP_PROTECT(23), A6XX_PROTECT_RW(0x98d, 0x76));
+-	gpu_write(gpu, REG_A6XX_CP_PROTECT(24),
+-			A6XX_PROTECT_RDONLY(0x980, 0x4));
+-	gpu_write(gpu, REG_A6XX_CP_PROTECT(25), A6XX_PROTECT_RW(0xa630, 0x0));
++	a6xx_set_cp_protect(gpu);
+ 
+ 	/* Enable expanded apriv for targets that support it */
+ 	if (gpu->hw_apriv) {
+@@ -1211,7 +1284,7 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
+ 	if (ret)
+ 		return ret;
+ 
+-	if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami)
++	if (a6xx_gpu->shadow_bo)
+ 		for (i = 0; i < gpu->nr_rings; i++)
+ 			a6xx_gpu->shadow[i] = 0;
+ 
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+index ce0610c5256f7..bb544dfe57379 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+@@ -44,7 +44,7 @@ struct a6xx_gpu {
+  * REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len
+  * registers starting at _reg.
+  */
+-#define A6XX_PROTECT_RW(_reg, _len) \
++#define A6XX_PROTECT_NORDWR(_reg, _len) \
+ 	((1 << 31) | \
+ 	(((_len) & 0x3FFF) << 18) | ((_reg) & 0x3FFFF))
+ 
+diff --git a/drivers/hwmon/corsair-psu.c b/drivers/hwmon/corsair-psu.c
+index 99494056f4bd9..7c462726382d5 100644
+--- a/drivers/hwmon/corsair-psu.c
++++ b/drivers/hwmon/corsair-psu.c
+@@ -570,6 +570,16 @@ static int corsairpsu_raw_event(struct hid_device *hdev, struct hid_report *repo
+ 	return 0;
+ }
+ 
++#ifdef CONFIG_PM
++static int corsairpsu_resume(struct hid_device *hdev)
++{
++	struct corsairpsu_data *priv = hid_get_drvdata(hdev);
++
++	/* some PSUs turn off the microcontroller during standby, so a reinit is required */
++	return corsairpsu_init(priv);
++}
++#endif
++
+ static const struct hid_device_id corsairpsu_idtable[] = {
+ 	{ HID_USB_DEVICE(0x1b1c, 0x1c03) }, /* Corsair HX550i */
+ 	{ HID_USB_DEVICE(0x1b1c, 0x1c04) }, /* Corsair HX650i */
+@@ -592,6 +602,10 @@ static struct hid_driver corsairpsu_driver = {
+ 	.probe		= corsairpsu_probe,
+ 	.remove		= corsairpsu_remove,
+ 	.raw_event	= corsairpsu_raw_event,
++#ifdef CONFIG_PM
++	.resume		= corsairpsu_resume,
++	.reset_resume	= corsairpsu_resume,
++#endif
+ };
+ module_hid_driver(corsairpsu_driver);
+ 
+diff --git a/drivers/hwmon/tps23861.c b/drivers/hwmon/tps23861.c
+index c2484f15298b0..8bd6435c13e82 100644
+--- a/drivers/hwmon/tps23861.c
++++ b/drivers/hwmon/tps23861.c
+@@ -99,11 +99,14 @@
+ #define POWER_ENABLE			0x19
+ #define TPS23861_NUM_PORTS		4
+ 
++#define TPS23861_GENERAL_MASK_1		0x17
++#define TPS23861_CURRENT_SHUNT_MASK	BIT(0)
++
+ #define TEMPERATURE_LSB			652 /* 0.652 degrees Celsius */
+ #define VOLTAGE_LSB			3662 /* 3.662 mV */
+ #define SHUNT_RESISTOR_DEFAULT		255000 /* 255 mOhm */
+-#define CURRENT_LSB_255			62260 /* 62.260 uA */
+-#define CURRENT_LSB_250			61039 /* 61.039 uA */
++#define CURRENT_LSB_250			62260 /* 62.260 uA */
++#define CURRENT_LSB_255			61039 /* 61.039 uA */
+ #define RESISTANCE_LSB			110966 /* 11.0966 Ohm*/
+ #define RESISTANCE_LSB_LOW		157216 /* 15.7216 Ohm*/
+ 
+@@ -117,6 +120,7 @@ struct tps23861_data {
+ static struct regmap_config tps23861_regmap_config = {
+ 	.reg_bits = 8,
+ 	.val_bits = 8,
++	.max_register = 0x6f,
+ };
+ 
+ static int tps23861_read_temp(struct tps23861_data *data, long *val)
+@@ -560,6 +564,15 @@ static int tps23861_probe(struct i2c_client *client)
+ 	else
+ 		data->shunt_resistor = SHUNT_RESISTOR_DEFAULT;
+ 
++	if (data->shunt_resistor == SHUNT_RESISTOR_DEFAULT)
++		regmap_clear_bits(data->regmap,
++				  TPS23861_GENERAL_MASK_1,
++				  TPS23861_CURRENT_SHUNT_MASK);
++	else
++		regmap_set_bits(data->regmap,
++				TPS23861_GENERAL_MASK_1,
++				TPS23861_CURRENT_SHUNT_MASK);
++
+ 	hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ 							 data, &tps23861_chip_info,
+ 							 NULL);
+diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
+index d94f05c8b8b79..af349661fd769 100644
+--- a/drivers/i2c/busses/i2c-mpc.c
++++ b/drivers/i2c/busses/i2c-mpc.c
+@@ -23,6 +23,7 @@
+ 
+ #include <linux/clk.h>
+ #include <linux/io.h>
++#include <linux/iopoll.h>
+ #include <linux/fsl_devices.h>
+ #include <linux/i2c.h>
+ #include <linux/interrupt.h>
+@@ -49,6 +50,7 @@
+ #define CCR_MTX  0x10
+ #define CCR_TXAK 0x08
+ #define CCR_RSTA 0x04
++#define CCR_RSVD 0x02
+ 
+ #define CSR_MCF  0x80
+ #define CSR_MAAS 0x40
+@@ -70,6 +72,7 @@ struct mpc_i2c {
+ 	u8 fdr, dfsrr;
+ #endif
+ 	struct clk *clk_per;
++	bool has_errata_A004447;
+ };
+ 
+ struct mpc_i2c_divider {
+@@ -176,6 +179,75 @@ static int i2c_wait(struct mpc_i2c *i2c, unsigned timeout, int writing)
+ 	return 0;
+ }
+ 
++static int i2c_mpc_wait_sr(struct mpc_i2c *i2c, int mask)
++{
++	void __iomem *addr = i2c->base + MPC_I2C_SR;
++	u8 val;
++
++	return readb_poll_timeout(addr, val, val & mask, 0, 100);
++}
++
++/*
++ * Workaround for Erratum A004447. From the P2040CE Rev Q
++ *
++ * 1.  Set up the frequency divider and sampling rate.
++ * 2.  I2CCR - a0h
++ * 3.  Poll for I2CSR[MBB] to get set.
++ * 4.  If I2CSR[MAL] is set (an indication that SDA is stuck low), then go to
++ *     step 5. If MAL is not set, then go to step 13.
++ * 5.  I2CCR - 00h
++ * 6.  I2CCR - 22h
++ * 7.  I2CCR - a2h
++ * 8.  Poll for I2CSR[MBB] to get set.
++ * 9.  Issue read to I2CDR.
++ * 10. Poll for I2CSR[MIF] to be set.
++ * 11. I2CCR - 82h
++ * 12. Workaround complete. Skip the next steps.
++ * 13. Issue read to I2CDR.
++ * 14. Poll for I2CSR[MIF] to be set.
++ * 15. I2CCR - 80h
++ */
++static void mpc_i2c_fixup_A004447(struct mpc_i2c *i2c)
++{
++	int ret;
++	u32 val;
++
++	writeccr(i2c, CCR_MEN | CCR_MSTA);
++	ret = i2c_mpc_wait_sr(i2c, CSR_MBB);
++	if (ret) {
++		dev_err(i2c->dev, "timeout waiting for CSR_MBB\n");
++		return;
++	}
++
++	val = readb(i2c->base + MPC_I2C_SR);
++
++	if (val & CSR_MAL) {
++		writeccr(i2c, 0x00);
++		writeccr(i2c, CCR_MSTA | CCR_RSVD);
++		writeccr(i2c, CCR_MEN | CCR_MSTA | CCR_RSVD);
++		ret = i2c_mpc_wait_sr(i2c, CSR_MBB);
++		if (ret) {
++			dev_err(i2c->dev, "timeout waiting for CSR_MBB\n");
++			return;
++		}
++		val = readb(i2c->base + MPC_I2C_DR);
++		ret = i2c_mpc_wait_sr(i2c, CSR_MIF);
++		if (ret) {
++			dev_err(i2c->dev, "timeout waiting for CSR_MIF\n");
++			return;
++		}
++		writeccr(i2c, CCR_MEN | CCR_RSVD);
++	} else {
++		val = readb(i2c->base + MPC_I2C_DR);
++		ret = i2c_mpc_wait_sr(i2c, CSR_MIF);
++		if (ret) {
++			dev_err(i2c->dev, "timeout waiting for CSR_MIF\n");
++			return;
++		}
++		writeccr(i2c, CCR_MEN);
++	}
++}
++
+ #if defined(CONFIG_PPC_MPC52xx) || defined(CONFIG_PPC_MPC512x)
+ static const struct mpc_i2c_divider mpc_i2c_dividers_52xx[] = {
+ 	{20, 0x20}, {22, 0x21}, {24, 0x22}, {26, 0x23},
+@@ -586,7 +658,7 @@ static int mpc_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+ 			if ((status & (CSR_MCF | CSR_MBB | CSR_RXAK)) != 0) {
+ 				writeb(status & ~CSR_MAL,
+ 				       i2c->base + MPC_I2C_SR);
+-				mpc_i2c_fixup(i2c);
++				i2c_recover_bus(&i2c->adap);
+ 			}
+ 			return -EIO;
+ 		}
+@@ -622,7 +694,7 @@ static int mpc_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+ 			if ((status & (CSR_MCF | CSR_MBB | CSR_RXAK)) != 0) {
+ 				writeb(status & ~CSR_MAL,
+ 				       i2c->base + MPC_I2C_SR);
+-				mpc_i2c_fixup(i2c);
++				i2c_recover_bus(&i2c->adap);
+ 			}
+ 			return -EIO;
+ 		}
+@@ -637,6 +709,18 @@ static u32 mpc_functionality(struct i2c_adapter *adap)
+ 	  | I2C_FUNC_SMBUS_READ_BLOCK_DATA | I2C_FUNC_SMBUS_BLOCK_PROC_CALL;
+ }
+ 
++static int fsl_i2c_bus_recovery(struct i2c_adapter *adap)
++{
++	struct mpc_i2c *i2c = i2c_get_adapdata(adap);
++
++	if (i2c->has_errata_A004447)
++		mpc_i2c_fixup_A004447(i2c);
++	else
++		mpc_i2c_fixup(i2c);
++
++	return 0;
++}
++
+ static const struct i2c_algorithm mpc_algo = {
+ 	.master_xfer = mpc_xfer,
+ 	.functionality = mpc_functionality,
+@@ -648,6 +732,10 @@ static struct i2c_adapter mpc_ops = {
+ 	.timeout = HZ,
+ };
+ 
++static struct i2c_bus_recovery_info fsl_i2c_recovery_info = {
++	.recover_bus = fsl_i2c_bus_recovery,
++};
++
+ static const struct of_device_id mpc_i2c_of_match[];
+ static int fsl_i2c_probe(struct platform_device *op)
+ {
+@@ -732,6 +820,8 @@ static int fsl_i2c_probe(struct platform_device *op)
+ 	dev_info(i2c->dev, "timeout %u us\n", mpc_ops.timeout * 1000000 / HZ);
+ 
+ 	platform_set_drvdata(op, i2c);
++	if (of_property_read_bool(op->dev.of_node, "fsl,i2c-erratum-a004447"))
++		i2c->has_errata_A004447 = true;
+ 
+ 	i2c->adap = mpc_ops;
+ 	of_address_to_resource(op->dev.of_node, 0, &res);
+@@ -740,6 +830,7 @@ static int fsl_i2c_probe(struct platform_device *op)
+ 	i2c_set_adapdata(&i2c->adap, i2c);
+ 	i2c->adap.dev.parent = &op->dev;
+ 	i2c->adap.dev.of_node = of_node_get(op->dev.of_node);
++	i2c->adap.bus_recovery_info = &fsl_i2c_recovery_info;
+ 
+ 	result = i2c_add_adapter(&i2c->adap);
+ 	if (result < 0)
+diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
+index f5b8be3bedde9..ab55f8b3190eb 100644
+--- a/drivers/infiniband/core/uverbs_cmd.c
++++ b/drivers/infiniband/core/uverbs_cmd.c
+@@ -3247,6 +3247,11 @@ static int ib_uverbs_ex_create_flow(struct uverbs_attr_bundle *attrs)
+ 		goto err_free_attr;
+ 	}
+ 
++	if (!rdma_is_port_valid(uobj->context->device, cmd.flow_attr.port)) {
++		err = -EINVAL;
++		goto err_uobj;
++	}
++
+ 	qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
+ 	if (!qp) {
+ 		err = -EINVAL;
+diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
+index f26a0d9208429..3b3a894f95623 100644
+--- a/drivers/infiniband/hw/mlx4/main.c
++++ b/drivers/infiniband/hw/mlx4/main.c
+@@ -580,12 +580,9 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
+ 	props->cq_caps.max_cq_moderation_count = MLX4_MAX_CQ_COUNT;
+ 	props->cq_caps.max_cq_moderation_period = MLX4_MAX_CQ_PERIOD;
+ 
+-	if (!mlx4_is_slave(dev->dev))
+-		err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
+-
+ 	if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
+ 		resp.response_length += sizeof(resp.hca_core_clock_offset);
+-		if (!err && !mlx4_is_slave(dev->dev)) {
++		if (!mlx4_get_internal_clock_params(dev->dev, &clock_params)) {
+ 			resp.comp_mask |= MLX4_IB_QUERY_DEV_RESP_MASK_CORE_CLOCK_OFFSET;
+ 			resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
+ 		}
+@@ -1699,9 +1696,6 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
+ 	struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
+ 	int is_bonded = mlx4_is_bonded(dev);
+ 
+-	if (!rdma_is_port_valid(qp->device, flow_attr->port))
+-		return ERR_PTR(-EINVAL);
+-
+ 	if (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP)
+ 		return ERR_PTR(-EOPNOTSUPP);
+ 
+diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
+index eb92cefffd777..9ce01f7296739 100644
+--- a/drivers/infiniband/hw/mlx5/cq.c
++++ b/drivers/infiniband/hw/mlx5/cq.c
+@@ -849,15 +849,14 @@ static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_udata *udata)
+ 	ib_umem_release(cq->buf.umem);
+ }
+ 
+-static void init_cq_frag_buf(struct mlx5_ib_cq *cq,
+-			     struct mlx5_ib_cq_buf *buf)
++static void init_cq_frag_buf(struct mlx5_ib_cq_buf *buf)
+ {
+ 	int i;
+ 	void *cqe;
+ 	struct mlx5_cqe64 *cqe64;
+ 
+ 	for (i = 0; i < buf->nent; i++) {
+-		cqe = get_cqe(cq, i);
++		cqe = mlx5_frag_buf_get_wqe(&buf->fbc, i);
+ 		cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64;
+ 		cqe64->op_own = MLX5_CQE_INVALID << 4;
+ 	}
+@@ -883,7 +882,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
+ 	if (err)
+ 		goto err_db;
+ 
+-	init_cq_frag_buf(cq, &cq->buf);
++	init_cq_frag_buf(&cq->buf);
+ 
+ 	*inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+ 		 MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) *
+@@ -1184,7 +1183,7 @@ static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
+ 	if (err)
+ 		goto ex;
+ 
+-	init_cq_frag_buf(cq, cq->resize_buf);
++	init_cq_frag_buf(cq->resize_buf);
+ 
+ 	return 0;
+ 
+diff --git a/drivers/infiniband/hw/mlx5/doorbell.c b/drivers/infiniband/hw/mlx5/doorbell.c
+index 61475b5715312..7af4df7a68237 100644
+--- a/drivers/infiniband/hw/mlx5/doorbell.c
++++ b/drivers/infiniband/hw/mlx5/doorbell.c
+@@ -41,6 +41,7 @@ struct mlx5_ib_user_db_page {
+ 	struct ib_umem	       *umem;
+ 	unsigned long		user_virt;
+ 	int			refcnt;
++	struct mm_struct	*mm;
+ };
+ 
+ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context,
+@@ -53,7 +54,8 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context,
+ 	mutex_lock(&context->db_page_mutex);
+ 
+ 	list_for_each_entry(page, &context->db_page_list, list)
+-		if (page->user_virt == (virt & PAGE_MASK))
++		if ((current->mm == page->mm) &&
++		    (page->user_virt == (virt & PAGE_MASK)))
+ 			goto found;
+ 
+ 	page = kmalloc(sizeof(*page), GFP_KERNEL);
+@@ -71,6 +73,8 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context,
+ 		kfree(page);
+ 		goto out;
+ 	}
++	mmgrab(current->mm);
++	page->mm = current->mm;
+ 
+ 	list_add(&page->list, &context->db_page_list);
+ 
+@@ -91,6 +95,7 @@ void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db)
+ 
+ 	if (!--db->u.user_page->refcnt) {
+ 		list_del(&db->u.user_page->list);
++		mmdrop(db->u.user_page->mm);
+ 		ib_umem_release(db->u.user_page->umem);
+ 		kfree(db->u.user_page);
+ 	}
+diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
+index f0af3f1ae0398..d719d1225b959 100644
+--- a/drivers/infiniband/hw/mlx5/fs.c
++++ b/drivers/infiniband/hw/mlx5/fs.c
+@@ -1194,9 +1194,8 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
+ 		goto free_ucmd;
+ 	}
+ 
+-	if (flow_attr->port > dev->num_ports ||
+-	    (flow_attr->flags &
+-	     ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP | IB_FLOW_ATTR_FLAGS_EGRESS))) {
++	if (flow_attr->flags &
++	    ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP | IB_FLOW_ATTR_FLAGS_EGRESS)) {
+ 		err = -EINVAL;
+ 		goto free_ucmd;
+ 	}
+@@ -2134,6 +2133,12 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_MATCHER_CREATE)(
+ 	if (err)
+ 		goto end;
+ 
++	if (obj->ns_type == MLX5_FLOW_NAMESPACE_FDB &&
++	    mlx5_eswitch_mode(dev->mdev) != MLX5_ESWITCH_OFFLOADS) {
++		err = -EINVAL;
++		goto end;
++	}
++
+ 	uobj->object = obj;
+ 	obj->mdev = dev->mdev;
+ 	atomic_set(&obj->usecnt, 0);
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+index d5a90a66b45cf..5b05cf3837da1 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+@@ -163,6 +163,7 @@ static size_t ipoib_get_size(const struct net_device *dev)
+ 
+ static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
+ 	.kind		= "ipoib",
++	.netns_refund   = true,
+ 	.maxtype	= IFLA_IPOIB_MAX,
+ 	.policy		= ipoib_policy,
+ 	.priv_size	= sizeof(struct ipoib_dev_priv),
+diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
+index ee925b58bbcea..2a1ddd47a0968 100644
+--- a/drivers/isdn/hardware/mISDN/netjet.c
++++ b/drivers/isdn/hardware/mISDN/netjet.c
+@@ -1100,7 +1100,6 @@ nj_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 		card->typ = NETJET_S_TJ300;
+ 
+ 	card->base = pci_resource_start(pdev, 0);
+-	card->irq = pdev->irq;
+ 	pci_set_drvdata(pdev, card);
+ 	err = setup_instance(card);
+ 	if (err)
+diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
+index 848dd4db16592..75311b8378858 100644
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -364,7 +364,6 @@ struct cached_dev {
+ 
+ 	/* The rest of this all shows up in sysfs */
+ 	unsigned int		sequential_cutoff;
+-	unsigned int		readahead;
+ 
+ 	unsigned int		io_disable:1;
+ 	unsigned int		verify:1;
+diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
+index 29c231758293e..6d1de889baeb1 100644
+--- a/drivers/md/bcache/request.c
++++ b/drivers/md/bcache/request.c
+@@ -880,9 +880,9 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
+ 				 struct bio *bio, unsigned int sectors)
+ {
+ 	int ret = MAP_CONTINUE;
+-	unsigned int reada = 0;
+ 	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+ 	struct bio *miss, *cache_bio;
++	unsigned int size_limit;
+ 
+ 	s->cache_missed = 1;
+ 
+@@ -892,14 +892,10 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
+ 		goto out_submit;
+ 	}
+ 
+-	if (!(bio->bi_opf & REQ_RAHEAD) &&
+-	    !(bio->bi_opf & (REQ_META|REQ_PRIO)) &&
+-	    s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
+-		reada = min_t(sector_t, dc->readahead >> 9,
+-			      get_capacity(bio->bi_bdev->bd_disk) -
+-			      bio_end_sector(bio));
+-
+-	s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
++	/* Limitation for valid replace key size and cache_bio bvecs number */
++	size_limit = min_t(unsigned int, BIO_MAX_VECS * PAGE_SECTORS,
++			   (1 << KEY_SIZE_BITS) - 1);
++	s->insert_bio_sectors = min3(size_limit, sectors, bio_sectors(bio));
+ 
+ 	s->iop.replace_key = KEY(s->iop.inode,
+ 				 bio->bi_iter.bi_sector + s->insert_bio_sectors,
+@@ -911,7 +907,8 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
+ 
+ 	s->iop.replace = true;
+ 
+-	miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
++	miss = bio_next_split(bio, s->insert_bio_sectors, GFP_NOIO,
++			      &s->d->bio_split);
+ 
+ 	/* btree_search_recurse()'s btree iterator is no good anymore */
+ 	ret = miss == bio ? MAP_DONE : -EINTR;
+@@ -933,9 +930,6 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
+ 	if (bch_bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
+ 		goto out_put;
+ 
+-	if (reada)
+-		bch_mark_cache_readahead(s->iop.c, s->d);
+-
+ 	s->cache_miss	= miss;
+ 	s->iop.bio	= cache_bio;
+ 	bio_get(cache_bio);
+diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c
+index 503aafe188dce..4c7ee5fedb9dc 100644
+--- a/drivers/md/bcache/stats.c
++++ b/drivers/md/bcache/stats.c
+@@ -46,7 +46,6 @@ read_attribute(cache_misses);
+ read_attribute(cache_bypass_hits);
+ read_attribute(cache_bypass_misses);
+ read_attribute(cache_hit_ratio);
+-read_attribute(cache_readaheads);
+ read_attribute(cache_miss_collisions);
+ read_attribute(bypassed);
+ 
+@@ -64,7 +63,6 @@ SHOW(bch_stats)
+ 		    DIV_SAFE(var(cache_hits) * 100,
+ 			     var(cache_hits) + var(cache_misses)));
+ 
+-	var_print(cache_readaheads);
+ 	var_print(cache_miss_collisions);
+ 	sysfs_hprint(bypassed,	var(sectors_bypassed) << 9);
+ #undef var
+@@ -86,7 +84,6 @@ static struct attribute *bch_stats_files[] = {
+ 	&sysfs_cache_bypass_hits,
+ 	&sysfs_cache_bypass_misses,
+ 	&sysfs_cache_hit_ratio,
+-	&sysfs_cache_readaheads,
+ 	&sysfs_cache_miss_collisions,
+ 	&sysfs_bypassed,
+ 	NULL
+@@ -113,7 +110,6 @@ void bch_cache_accounting_clear(struct cache_accounting *acc)
+ 	acc->total.cache_misses = 0;
+ 	acc->total.cache_bypass_hits = 0;
+ 	acc->total.cache_bypass_misses = 0;
+-	acc->total.cache_readaheads = 0;
+ 	acc->total.cache_miss_collisions = 0;
+ 	acc->total.sectors_bypassed = 0;
+ }
+@@ -145,7 +141,6 @@ static void scale_stats(struct cache_stats *stats, unsigned long rescale_at)
+ 		scale_stat(&stats->cache_misses);
+ 		scale_stat(&stats->cache_bypass_hits);
+ 		scale_stat(&stats->cache_bypass_misses);
+-		scale_stat(&stats->cache_readaheads);
+ 		scale_stat(&stats->cache_miss_collisions);
+ 		scale_stat(&stats->sectors_bypassed);
+ 	}
+@@ -168,7 +163,6 @@ static void scale_accounting(struct timer_list *t)
+ 	move_stat(cache_misses);
+ 	move_stat(cache_bypass_hits);
+ 	move_stat(cache_bypass_misses);
+-	move_stat(cache_readaheads);
+ 	move_stat(cache_miss_collisions);
+ 	move_stat(sectors_bypassed);
+ 
+@@ -209,14 +203,6 @@ void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d,
+ 	mark_cache_stats(&c->accounting.collector, hit, bypass);
+ }
+ 
+-void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d)
+-{
+-	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
+-
+-	atomic_inc(&dc->accounting.collector.cache_readaheads);
+-	atomic_inc(&c->accounting.collector.cache_readaheads);
+-}
+-
+ void bch_mark_cache_miss_collision(struct cache_set *c, struct bcache_device *d)
+ {
+ 	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
+diff --git a/drivers/md/bcache/stats.h b/drivers/md/bcache/stats.h
+index abfaabf7e7fcf..ca4f435f7216a 100644
+--- a/drivers/md/bcache/stats.h
++++ b/drivers/md/bcache/stats.h
+@@ -7,7 +7,6 @@ struct cache_stat_collector {
+ 	atomic_t cache_misses;
+ 	atomic_t cache_bypass_hits;
+ 	atomic_t cache_bypass_misses;
+-	atomic_t cache_readaheads;
+ 	atomic_t cache_miss_collisions;
+ 	atomic_t sectors_bypassed;
+ };
+diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
+index cc89f3156d1aa..05ac1d6fbbf35 100644
+--- a/drivers/md/bcache/sysfs.c
++++ b/drivers/md/bcache/sysfs.c
+@@ -137,7 +137,6 @@ rw_attribute(io_disable);
+ rw_attribute(discard);
+ rw_attribute(running);
+ rw_attribute(label);
+-rw_attribute(readahead);
+ rw_attribute(errors);
+ rw_attribute(io_error_limit);
+ rw_attribute(io_error_halflife);
+@@ -260,7 +259,6 @@ SHOW(__bch_cached_dev)
+ 	var_printf(partial_stripes_expensive,	"%u");
+ 
+ 	var_hprint(sequential_cutoff);
+-	var_hprint(readahead);
+ 
+ 	sysfs_print(running,		atomic_read(&dc->running));
+ 	sysfs_print(state,		states[BDEV_STATE(&dc->sb)]);
+@@ -365,7 +363,6 @@ STORE(__cached_dev)
+ 	sysfs_strtoul_clamp(sequential_cutoff,
+ 			    dc->sequential_cutoff,
+ 			    0, UINT_MAX);
+-	d_strtoi_h(readahead);
+ 
+ 	if (attr == &sysfs_clear_stats)
+ 		bch_cache_accounting_clear(&dc->accounting);
+@@ -538,7 +535,6 @@ static struct attribute *bch_cached_dev_files[] = {
+ 	&sysfs_running,
+ 	&sysfs_state,
+ 	&sysfs_label,
+-	&sysfs_readahead,
+ #ifdef CONFIG_BCACHE_DEBUG
+ 	&sysfs_verify,
+ 	&sysfs_bypass_torture_test,
+diff --git a/drivers/md/dm-verity-verify-sig.c b/drivers/md/dm-verity-verify-sig.c
+index 29385dc470d5d..db61a1f43ae91 100644
+--- a/drivers/md/dm-verity-verify-sig.c
++++ b/drivers/md/dm-verity-verify-sig.c
+@@ -15,7 +15,7 @@
+ #define DM_VERITY_VERIFY_ERR(s) DM_VERITY_ROOT_HASH_VERIFICATION " " s
+ 
+ static bool require_signatures;
+-module_param(require_signatures, bool, false);
++module_param(require_signatures, bool, 0444);
+ MODULE_PARM_DESC(require_signatures,
+ 		"Verify the roothash of dm-verity hash tree");
+ 
+diff --git a/drivers/misc/cardreader/rtl8411.c b/drivers/misc/cardreader/rtl8411.c
+index a07674ed05965..4c5621b17a6fb 100644
+--- a/drivers/misc/cardreader/rtl8411.c
++++ b/drivers/misc/cardreader/rtl8411.c
+@@ -468,6 +468,7 @@ static void rtl8411_init_common_params(struct rtsx_pcr *pcr)
+ 	pcr->sd30_drive_sel_1v8 = DRIVER_TYPE_B;
+ 	pcr->sd30_drive_sel_3v3 = DRIVER_TYPE_D;
+ 	pcr->aspm_en = ASPM_L1_EN;
++	pcr->aspm_mode = ASPM_MODE_CFG;
+ 	pcr->tx_initial_phase = SET_CLOCK_PHASE(23, 7, 14);
+ 	pcr->rx_initial_phase = SET_CLOCK_PHASE(4, 3, 10);
+ 	pcr->ic_version = rtl8411_get_ic_version(pcr);
+diff --git a/drivers/misc/cardreader/rts5209.c b/drivers/misc/cardreader/rts5209.c
+index 39a6a7ecc32e9..29f5414072bf1 100644
+--- a/drivers/misc/cardreader/rts5209.c
++++ b/drivers/misc/cardreader/rts5209.c
+@@ -255,6 +255,7 @@ void rts5209_init_params(struct rtsx_pcr *pcr)
+ 	pcr->sd30_drive_sel_1v8 = DRIVER_TYPE_B;
+ 	pcr->sd30_drive_sel_3v3 = DRIVER_TYPE_D;
+ 	pcr->aspm_en = ASPM_L1_EN;
++	pcr->aspm_mode = ASPM_MODE_CFG;
+ 	pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 16);
+ 	pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
+ 
+diff --git a/drivers/misc/cardreader/rts5227.c b/drivers/misc/cardreader/rts5227.c
+index 8200af22b529e..4bcfbc9afbac1 100644
+--- a/drivers/misc/cardreader/rts5227.c
++++ b/drivers/misc/cardreader/rts5227.c
+@@ -358,6 +358,7 @@ void rts5227_init_params(struct rtsx_pcr *pcr)
+ 	pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
+ 	pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
+ 	pcr->aspm_en = ASPM_L1_EN;
++	pcr->aspm_mode = ASPM_MODE_CFG;
+ 	pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 15);
+ 	pcr->rx_initial_phase = SET_CLOCK_PHASE(30, 7, 7);
+ 
+@@ -483,6 +484,7 @@ void rts522a_init_params(struct rtsx_pcr *pcr)
+ 
+ 	rts5227_init_params(pcr);
+ 	pcr->ops = &rts522a_pcr_ops;
++	pcr->aspm_mode = ASPM_MODE_REG;
+ 	pcr->tx_initial_phase = SET_CLOCK_PHASE(20, 20, 11);
+ 	pcr->reg_pm_ctrl3 = RTS522A_PM_CTRL3;
+ 
+diff --git a/drivers/misc/cardreader/rts5228.c b/drivers/misc/cardreader/rts5228.c
+index 781a86def59a9..ffc128278613b 100644
+--- a/drivers/misc/cardreader/rts5228.c
++++ b/drivers/misc/cardreader/rts5228.c
+@@ -718,6 +718,7 @@ void rts5228_init_params(struct rtsx_pcr *pcr)
+ 	pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
+ 	pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
+ 	pcr->aspm_en = ASPM_L1_EN;
++	pcr->aspm_mode = ASPM_MODE_REG;
+ 	pcr->tx_initial_phase = SET_CLOCK_PHASE(28, 27, 11);
+ 	pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
+ 
+diff --git a/drivers/misc/cardreader/rts5229.c b/drivers/misc/cardreader/rts5229.c
+index 89e6f124ca5ca..c748eaf1ec1f9 100644
+--- a/drivers/misc/cardreader/rts5229.c
++++ b/drivers/misc/cardreader/rts5229.c
+@@ -246,6 +246,7 @@ void rts5229_init_params(struct rtsx_pcr *pcr)
+ 	pcr->sd30_drive_sel_1v8 = DRIVER_TYPE_B;
+ 	pcr->sd30_drive_sel_3v3 = DRIVER_TYPE_D;
+ 	pcr->aspm_en = ASPM_L1_EN;
++	pcr->aspm_mode = ASPM_MODE_CFG;
+ 	pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 15);
+ 	pcr->rx_initial_phase = SET_CLOCK_PHASE(30, 6, 6);
+ 
+diff --git a/drivers/misc/cardreader/rts5249.c b/drivers/misc/cardreader/rts5249.c
+index b2676e7f50271..53f3a1f45c4a7 100644
+--- a/drivers/misc/cardreader/rts5249.c
++++ b/drivers/misc/cardreader/rts5249.c
+@@ -566,6 +566,7 @@ void rts5249_init_params(struct rtsx_pcr *pcr)
+ 	pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
+ 	pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
+ 	pcr->aspm_en = ASPM_L1_EN;
++	pcr->aspm_mode = ASPM_MODE_CFG;
+ 	pcr->tx_initial_phase = SET_CLOCK_PHASE(1, 29, 16);
+ 	pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
+ 
+@@ -729,6 +730,7 @@ static const struct pcr_ops rts524a_pcr_ops = {
+ void rts524a_init_params(struct rtsx_pcr *pcr)
+ {
+ 	rts5249_init_params(pcr);
++	pcr->aspm_mode = ASPM_MODE_REG;
+ 	pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 29, 11);
+ 	pcr->option.ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
+ 	pcr->option.ltr_l1off_snooze_sspwrgate =
+@@ -845,6 +847,7 @@ static const struct pcr_ops rts525a_pcr_ops = {
+ void rts525a_init_params(struct rtsx_pcr *pcr)
+ {
+ 	rts5249_init_params(pcr);
++	pcr->aspm_mode = ASPM_MODE_REG;
+ 	pcr->tx_initial_phase = SET_CLOCK_PHASE(25, 29, 11);
+ 	pcr->option.ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
+ 	pcr->option.ltr_l1off_snooze_sspwrgate =
+diff --git a/drivers/misc/cardreader/rts5260.c b/drivers/misc/cardreader/rts5260.c
+index 080a7d67a8e1a..9b42b20a3e5ae 100644
+--- a/drivers/misc/cardreader/rts5260.c
++++ b/drivers/misc/cardreader/rts5260.c
+@@ -628,6 +628,7 @@ void rts5260_init_params(struct rtsx_pcr *pcr)
+ 	pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
+ 	pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
+ 	pcr->aspm_en = ASPM_L1_EN;
++	pcr->aspm_mode = ASPM_MODE_REG;
+ 	pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 29, 11);
+ 	pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
+ 
+diff --git a/drivers/misc/cardreader/rts5261.c b/drivers/misc/cardreader/rts5261.c
+index 6c64dade8e1af..1fd4e0e507302 100644
+--- a/drivers/misc/cardreader/rts5261.c
++++ b/drivers/misc/cardreader/rts5261.c
+@@ -783,6 +783,7 @@ void rts5261_init_params(struct rtsx_pcr *pcr)
+ 	pcr->sd30_drive_sel_1v8 = 0x00;
+ 	pcr->sd30_drive_sel_3v3 = 0x00;
+ 	pcr->aspm_en = ASPM_L1_EN;
++	pcr->aspm_mode = ASPM_MODE_REG;
+ 	pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 11);
+ 	pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
+ 
+diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
+index 273311184669a..baf83594a01d3 100644
+--- a/drivers/misc/cardreader/rtsx_pcr.c
++++ b/drivers/misc/cardreader/rtsx_pcr.c
+@@ -85,12 +85,18 @@ static void rtsx_comm_set_aspm(struct rtsx_pcr *pcr, bool enable)
+ 	if (pcr->aspm_enabled == enable)
+ 		return;
+ 
+-	if (pcr->aspm_en & 0x02)
+-		rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
+-			FORCE_ASPM_CTL1, enable ? 0 : FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
+-	else
+-		rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
+-			FORCE_ASPM_CTL1, FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
++	if (pcr->aspm_mode == ASPM_MODE_CFG) {
++		pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
++						PCI_EXP_LNKCTL_ASPMC,
++						enable ? pcr->aspm_en : 0);
++	} else if (pcr->aspm_mode == ASPM_MODE_REG) {
++		if (pcr->aspm_en & 0x02)
++			rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
++				FORCE_ASPM_CTL1, enable ? 0 : FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
++		else
++			rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
++				FORCE_ASPM_CTL1, FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
++	}
+ 
+ 	if (!enable && (pcr->aspm_en & 0x02))
+ 		mdelay(10);
+@@ -1394,7 +1400,8 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
+ 			return err;
+ 	}
+ 
+-	rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30);
++	if (pcr->aspm_mode == ASPM_MODE_REG)
++		rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30);
+ 
+ 	/* No CD interrupt if probing driver with card inserted.
+ 	 * So we need to initialize pcr->card_exist here.
+@@ -1410,6 +1417,8 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
+ static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
+ {
+ 	int err;
++	u16 cfg_val;
++	u8 val;
+ 
+ 	spin_lock_init(&pcr->lock);
+ 	mutex_init(&pcr->pcr_mutex);
+@@ -1477,6 +1486,21 @@ static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
+ 	if (!pcr->slots)
+ 		return -ENOMEM;
+ 
++	if (pcr->aspm_mode == ASPM_MODE_CFG) {
++		pcie_capability_read_word(pcr->pci, PCI_EXP_LNKCTL, &cfg_val);
++		if (cfg_val & PCI_EXP_LNKCTL_ASPM_L1)
++			pcr->aspm_enabled = true;
++		else
++			pcr->aspm_enabled = false;
++
++	} else if (pcr->aspm_mode == ASPM_MODE_REG) {
++		rtsx_pci_read_register(pcr, ASPM_FORCE_CTL, &val);
++		if (val & FORCE_ASPM_CTL0 && val & FORCE_ASPM_CTL1)
++			pcr->aspm_enabled = false;
++		else
++			pcr->aspm_enabled = true;
++	}
++
+ 	if (pcr->ops->fetch_vendor_settings)
+ 		pcr->ops->fetch_vendor_settings(pcr);
+ 
+@@ -1506,7 +1530,6 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
+ 	struct pcr_handle *handle;
+ 	u32 base, len;
+ 	int ret, i, bar = 0;
+-	u8 val;
+ 
+ 	dev_dbg(&(pcidev->dev),
+ 		": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n",
+@@ -1572,11 +1595,6 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
+ 	pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr;
+ 	pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
+ 	pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN;
+-	rtsx_pci_read_register(pcr, ASPM_FORCE_CTL, &val);
+-	if (val & FORCE_ASPM_CTL0 && val & FORCE_ASPM_CTL1)
+-		pcr->aspm_enabled = false;
+-	else
+-		pcr->aspm_enabled = true;
+ 	pcr->card_inserted = 0;
+ 	pcr->card_removed = 0;
+ 	INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect);
+diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
+index 158c21e5a942f..7a4a6efb8369c 100644
+--- a/drivers/mmc/host/renesas_sdhi_core.c
++++ b/drivers/mmc/host/renesas_sdhi_core.c
+@@ -679,14 +679,19 @@ static int renesas_sdhi_execute_tuning(struct mmc_host *mmc, u32 opcode)
+ 
+ 	/* Issue CMD19 twice for each tap */
+ 	for (i = 0; i < 2 * priv->tap_num; i++) {
++		int cmd_error;
++
+ 		/* Set sampling clock position */
+ 		sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, i % priv->tap_num);
+ 
+-		if (mmc_send_tuning(mmc, opcode, NULL) == 0)
++		if (mmc_send_tuning(mmc, opcode, &cmd_error) == 0)
+ 			set_bit(i, priv->taps);
+ 
+ 		if (sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_SMPCMP) == 0)
+ 			set_bit(i, priv->smpcmp);
++
++		if (cmd_error)
++			mmc_abort_tuning(mmc, opcode);
+ 	}
+ 
+ 	ret = renesas_sdhi_select_tuning(host);
+@@ -926,7 +931,7 @@ static const struct soc_device_attribute sdhi_quirks_match[]  = {
+ 	{ .soc_id = "r8a7795", .revision = "ES3.*", .data = &sdhi_quirks_bad_taps2367 },
+ 	{ .soc_id = "r8a7796", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 },
+ 	{ .soc_id = "r8a7796", .revision = "ES1.*", .data = &sdhi_quirks_r8a7796_es13 },
+-	{ .soc_id = "r8a7796", .revision = "ES3.*", .data = &sdhi_quirks_bad_taps1357 },
++	{ .soc_id = "r8a77961", .data = &sdhi_quirks_bad_taps1357 },
+ 	{ .soc_id = "r8a77965", .data = &sdhi_quirks_r8a77965 },
+ 	{ .soc_id = "r8a77980", .data = &sdhi_quirks_nohs400 },
+ 	{ .soc_id = "r8a77990", .data = &sdhi_quirks_r8a77990 },
+diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
+index ba8e70a8e3125..6b12ce822e51a 100644
+--- a/drivers/net/appletalk/cops.c
++++ b/drivers/net/appletalk/cops.c
+@@ -327,6 +327,8 @@ static int __init cops_probe1(struct net_device *dev, int ioaddr)
+ 			break;
+ 	}
+ 
++	dev->base_addr = ioaddr;
++
+ 	/* Reserve any actual interrupt. */
+ 	if (dev->irq) {
+ 		retval = request_irq(dev->irq, cops_interrupt, 0, dev->name, dev);
+@@ -334,8 +336,6 @@ static int __init cops_probe1(struct net_device *dev, int ioaddr)
+ 			goto err_out;
+ 	}
+ 
+-	dev->base_addr = ioaddr;
+-
+         lp = netdev_priv(dev);
+         spin_lock_init(&lp->lock);
+ 
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 74cbbb22470b5..fa4bf727a48db 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1526,6 +1526,7 @@ static struct slave *bond_alloc_slave(struct bonding *bond,
+ 
+ 	slave->bond = bond;
+ 	slave->dev = slave_dev;
++	INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
+ 
+ 	if (bond_kobj_init(slave))
+ 		return NULL;
+@@ -1538,7 +1539,6 @@ static struct slave *bond_alloc_slave(struct bonding *bond,
+ 			return NULL;
+ 		}
+ 	}
+-	INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
+ 
+ 	return slave;
+ }
+diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
+index 55e5d479acce3..854e25f43fa70 100644
+--- a/drivers/net/dsa/microchip/ksz9477.c
++++ b/drivers/net/dsa/microchip/ksz9477.c
+@@ -1530,6 +1530,7 @@ static const struct ksz_chip_data ksz9477_switch_chips[] = {
+ 		.num_statics = 16,
+ 		.cpu_ports = 0x7F,	/* can be configured as cpu port */
+ 		.port_cnt = 7,		/* total physical port count */
++		.phy_errata_9477 = true,
+ 	},
+ };
+ 
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+index 9c2f51f230351..9108b497b3c99 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+@@ -1224,8 +1224,10 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
+ 		goto failed;
+ 
+ 	/* SR-IOV capability was enabled but there are no VFs*/
+-	if (iov->total == 0)
++	if (iov->total == 0) {
++		err = -EINVAL;
+ 		goto failed;
++	}
+ 
+ 	iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param);
+ 
+diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
+index 0f6a6cb7e98d7..51b19172d63be 100644
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -2837,6 +2837,9 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
+ 	struct gem_stats *hwstat = &bp->hw_stats.gem;
+ 	struct net_device_stats *nstat = &bp->dev->stats;
+ 
++	if (!netif_running(bp->dev))
++		return nstat;
++
+ 	gem_update_stats(bp);
+ 
+ 	nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
+diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
+index f6cfec81ccc3b..dc4ac1a2b6b67 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
++++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
+@@ -823,6 +823,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
+ #define QUERY_DEV_CAP_MAD_DEMUX_OFFSET		0xb0
+ #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET	0xa8
+ #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET	0xac
++#define QUERY_DEV_CAP_MAP_CLOCK_TO_USER 0xc1
+ #define QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET	0xcc
+ #define QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET	0xd0
+ #define QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET	0xd2
+@@ -841,6 +842,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
+ 
+ 	if (mlx4_is_mfunc(dev))
+ 		disable_unsupported_roce_caps(outbox);
++	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAP_CLOCK_TO_USER);
++	dev_cap->map_clock_to_user = field & 0x80;
+ 	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET);
+ 	dev_cap->reserved_qps = 1 << (field & 0xf);
+ 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET);
+diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
+index 8f020f26ebf5f..cf64e54eecb05 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
++++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
+@@ -131,6 +131,7 @@ struct mlx4_dev_cap {
+ 	u32 health_buffer_addrs;
+ 	struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1];
+ 	bool wol_port[MLX4_MAX_PORTS + 1];
++	bool map_clock_to_user;
+ };
+ 
+ struct mlx4_func_cap {
+diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
+index c326b434734e1..00c84656b2e7e 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/main.c
++++ b/drivers/net/ethernet/mellanox/mlx4/main.c
+@@ -498,6 +498,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
+ 		}
+ 	}
+ 
++	dev->caps.map_clock_to_user  = dev_cap->map_clock_to_user;
+ 	dev->caps.uar_page_size	     = PAGE_SIZE;
+ 	dev->caps.num_uars	     = dev_cap->uar_size / PAGE_SIZE;
+ 	dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
+@@ -1948,6 +1949,11 @@ int mlx4_get_internal_clock_params(struct mlx4_dev *dev,
+ 	if (mlx4_is_slave(dev))
+ 		return -EOPNOTSUPP;
+ 
++	if (!dev->caps.map_clock_to_user) {
++		mlx4_dbg(dev, "Map clock to user is not supported.\n");
++		return -EOPNOTSUPP;
++	}
++
+ 	if (!params)
+ 		return -EINVAL;
+ 
+diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
+index 214e347097a7a..2376b2729633f 100644
+--- a/drivers/net/ethernet/qlogic/qla3xxx.c
++++ b/drivers/net/ethernet/qlogic/qla3xxx.c
+@@ -114,7 +114,7 @@ static int ql_sem_spinlock(struct ql3_adapter *qdev,
+ 		value = readl(&port_regs->CommonRegs.semaphoreReg);
+ 		if ((value & (sem_mask >> 16)) == sem_bits)
+ 			return 0;
+-		ssleep(1);
++		mdelay(1000);
+ 	} while (--seconds);
+ 	return -1;
+ }
+diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
+index d1e908846f5dd..22fbb0ae77fba 100644
+--- a/drivers/net/ethernet/sfc/nic.c
++++ b/drivers/net/ethernet/sfc/nic.c
+@@ -90,6 +90,7 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
+ 				  efx->pci_dev->irq);
+ 			goto fail1;
+ 		}
++		efx->irqs_hooked = true;
+ 		return 0;
+ 	}
+ 
+diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
+index 8235185540798..d3c916821b113 100644
+--- a/drivers/net/phy/mdio_bus.c
++++ b/drivers/net/phy/mdio_bus.c
+@@ -607,7 +607,8 @@ void mdiobus_unregister(struct mii_bus *bus)
+ 	struct mdio_device *mdiodev;
+ 	int i;
+ 
+-	BUG_ON(bus->state != MDIOBUS_REGISTERED);
++	if (WARN_ON_ONCE(bus->state != MDIOBUS_REGISTERED))
++		return;
+ 	bus->state = MDIOBUS_UNREGISTERED;
+ 
+ 	for (i = 0; i < PHY_MAX_ADDR; i++) {
+diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
+index a44d49d63968a..494675aeaaad7 100644
+--- a/drivers/nvme/host/Kconfig
++++ b/drivers/nvme/host/Kconfig
+@@ -71,7 +71,8 @@ config NVME_FC
+ config NVME_TCP
+ 	tristate "NVM Express over Fabrics TCP host driver"
+ 	depends on INET
+-	depends on BLK_DEV_NVME
++	depends on BLOCK
++	select NVME_CORE
+ 	select NVME_FABRICS
+ 	select CRYPTO
+ 	select CRYPTO_CRC32C
+diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
+index 604ab0e5a2adb..fb02ca2e3096f 100644
+--- a/drivers/nvme/host/fabrics.c
++++ b/drivers/nvme/host/fabrics.c
+@@ -336,6 +336,11 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
+ 			cmd->connect.recfmt);
+ 		break;
+ 
++	case NVME_SC_HOST_PATH_ERROR:
++		dev_err(ctrl->device,
++			"Connect command failed: host path error\n");
++		break;
++
+ 	default:
+ 		dev_err(ctrl->device,
+ 			"Connect command failed, error wo/DNR bit: %d\n",
+diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
+index 7d16cb4cd8acf..83921dab8368d 100644
+--- a/drivers/nvme/target/core.c
++++ b/drivers/nvme/target/core.c
+@@ -388,10 +388,10 @@ static void nvmet_keep_alive_timer(struct work_struct *work)
+ {
+ 	struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
+ 			struct nvmet_ctrl, ka_work);
+-	bool cmd_seen = ctrl->cmd_seen;
++	bool reset_tbkas = ctrl->reset_tbkas;
+ 
+-	ctrl->cmd_seen = false;
+-	if (cmd_seen) {
++	ctrl->reset_tbkas = false;
++	if (reset_tbkas) {
+ 		pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
+ 			ctrl->cntlid);
+ 		schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
+@@ -804,6 +804,13 @@ void nvmet_sq_destroy(struct nvmet_sq *sq)
+ 	percpu_ref_exit(&sq->ref);
+ 
+ 	if (ctrl) {
++		/*
++		 * The teardown flow may take some time, and the host may not
++		 * send us keep-alive during this period, hence reset the
++		 * traffic based keep-alive timer so we don't trigger a
++		 * controller teardown as a result of a keep-alive expiration.
++		 */
++		ctrl->reset_tbkas = true;
+ 		nvmet_ctrl_put(ctrl);
+ 		sq->ctrl = NULL; /* allows reusing the queue later */
+ 	}
+@@ -953,7 +960,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
+ 	}
+ 
+ 	if (sq->ctrl)
+-		sq->ctrl->cmd_seen = true;
++		sq->ctrl->reset_tbkas = true;
+ 
+ 	return true;
+ 
+diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
+index 5aad34b106dc2..43a668dc8bc4d 100644
+--- a/drivers/nvme/target/nvmet.h
++++ b/drivers/nvme/target/nvmet.h
+@@ -166,7 +166,7 @@ struct nvmet_ctrl {
+ 	struct nvmet_subsys	*subsys;
+ 	struct nvmet_sq		**sqs;
+ 
+-	bool			cmd_seen;
++	bool			reset_tbkas;
+ 
+ 	struct mutex		lock;
+ 	u64			cap;
+diff --git a/drivers/phy/broadcom/phy-brcm-usb-init.h b/drivers/phy/broadcom/phy-brcm-usb-init.h
+index 899b9eb43fad6..a39f30fa2e991 100644
+--- a/drivers/phy/broadcom/phy-brcm-usb-init.h
++++ b/drivers/phy/broadcom/phy-brcm-usb-init.h
+@@ -78,7 +78,7 @@ static inline u32 brcm_usb_readl(void __iomem *addr)
+ 	 * Other architectures (e.g., ARM) either do not support big endian, or
+ 	 * else leave I/O in little endian mode.
+ 	 */
+-	if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
++	if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ 		return __raw_readl(addr);
+ 	else
+ 		return readl_relaxed(addr);
+@@ -87,7 +87,7 @@ static inline u32 brcm_usb_readl(void __iomem *addr)
+ static inline void brcm_usb_writel(u32 val, void __iomem *addr)
+ {
+ 	/* See brcmnand_readl() comments */
+-	if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
++	if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ 		__raw_writel(val, addr);
+ 	else
+ 		writel_relaxed(val, addr);
+diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c
+index 19f32ae877b94..acca76729c090 100644
+--- a/drivers/phy/cadence/phy-cadence-sierra.c
++++ b/drivers/phy/cadence/phy-cadence-sierra.c
+@@ -612,6 +612,7 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
+ 	sp->nsubnodes = node;
+ 
+ 	if (sp->num_lanes > SIERRA_MAX_LANES) {
++		ret = -EINVAL;
+ 		dev_err(dev, "Invalid lane configuration\n");
+ 		goto put_child2;
+ 	}
+diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
+index e28e25f98708c..dceac77148721 100644
+--- a/drivers/phy/ti/phy-j721e-wiz.c
++++ b/drivers/phy/ti/phy-j721e-wiz.c
+@@ -894,6 +894,7 @@ static int wiz_probe(struct platform_device *pdev)
+ 
+ 		if (wiz->typec_dir_delay < WIZ_TYPEC_DIR_DEBOUNCE_MIN ||
+ 		    wiz->typec_dir_delay > WIZ_TYPEC_DIR_DEBOUNCE_MAX) {
++			ret = -EINVAL;
+ 			dev_err(dev, "Invalid typec-dir-debounce property\n");
+ 			goto err_addr_to_resource;
+ 		}
+diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
+index 6853a896c4766..740ecf6011a9b 100644
+--- a/drivers/pinctrl/qcom/Kconfig
++++ b/drivers/pinctrl/qcom/Kconfig
+@@ -223,7 +223,7 @@ config PINCTRL_SC7280
+ config PINCTRL_SC8180X
+ 	tristate "Qualcomm Technologies Inc SC8180x pin controller driver"
+ 	depends on GPIOLIB && OF
+-	select PINCTRL_MSM
++	depends on PINCTRL_MSM
+ 	help
+ 	  This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ 	  Qualcomm Technologies Inc TLMM block found on the Qualcomm
+diff --git a/drivers/pinctrl/qcom/pinctrl-sdx55.c b/drivers/pinctrl/qcom/pinctrl-sdx55.c
+index 5aaf57b40407f..0bb4931cec59e 100644
+--- a/drivers/pinctrl/qcom/pinctrl-sdx55.c
++++ b/drivers/pinctrl/qcom/pinctrl-sdx55.c
+@@ -410,15 +410,15 @@ static const char * const gpio_groups[] = {
+ 	"gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ 	"gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ 	"gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+-	"gpio50", "gpio51", "gpio52", "gpio52", "gpio53", "gpio53", "gpio54",
+-	"gpio55", "gpio56", "gpio57", "gpio58", "gpio59", "gpio60", "gpio61",
+-	"gpio62", "gpio63", "gpio64", "gpio65", "gpio66", "gpio67", "gpio68",
+-	"gpio69", "gpio70", "gpio71", "gpio72", "gpio73", "gpio74", "gpio75",
+-	"gpio76", "gpio77", "gpio78", "gpio79", "gpio80", "gpio81", "gpio82",
+-	"gpio83", "gpio84", "gpio85", "gpio86", "gpio87", "gpio88", "gpio89",
+-	"gpio90", "gpio91", "gpio92", "gpio93", "gpio94", "gpio95", "gpio96",
+-	"gpio97", "gpio98", "gpio99", "gpio100", "gpio101", "gpio102",
+-	"gpio103", "gpio104", "gpio105", "gpio106", "gpio107",
++	"gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
++	"gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
++	"gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
++	"gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
++	"gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
++	"gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
++	"gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
++	"gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
++	"gpio105", "gpio106", "gpio107",
+ };
+ 
+ static const char * const qdss_stm_groups[] = {
+diff --git a/drivers/platform/surface/aggregator/controller.c b/drivers/platform/surface/aggregator/controller.c
+index 89761d3e1a476..d68d51fb24ffd 100644
+--- a/drivers/platform/surface/aggregator/controller.c
++++ b/drivers/platform/surface/aggregator/controller.c
+@@ -1907,7 +1907,7 @@ static int ssam_ssh_event_disable(struct ssam_controller *ctrl,
+ {
+ 	int status;
+ 
+-	status = __ssam_ssh_event_request(ctrl, reg, reg.cid_enable, id, flags);
++	status = __ssam_ssh_event_request(ctrl, reg, reg.cid_disable, id, flags);
+ 
+ 	if (status < 0 && status != -EINVAL) {
+ 		ssam_err(ctrl,
+diff --git a/drivers/regulator/atc260x-regulator.c b/drivers/regulator/atc260x-regulator.c
+index d8b429955d33f..05147d2c38428 100644
+--- a/drivers/regulator/atc260x-regulator.c
++++ b/drivers/regulator/atc260x-regulator.c
+@@ -28,16 +28,16 @@ static const struct linear_range atc2609a_dcdc_voltage_ranges[] = {
+ 
+ static const struct linear_range atc2609a_ldo_voltage_ranges0[] = {
+ 	REGULATOR_LINEAR_RANGE(700000, 0, 15, 100000),
+-	REGULATOR_LINEAR_RANGE(2100000, 16, 28, 100000),
++	REGULATOR_LINEAR_RANGE(2100000, 0, 12, 100000),
+ };
+ 
+ static const struct linear_range atc2609a_ldo_voltage_ranges1[] = {
+ 	REGULATOR_LINEAR_RANGE(850000, 0, 15, 100000),
+-	REGULATOR_LINEAR_RANGE(2100000, 16, 27, 100000),
++	REGULATOR_LINEAR_RANGE(2100000, 0, 11, 100000),
+ };
+ 
+ static const unsigned int atc260x_ldo_voltage_range_sel[] = {
+-	0x0, 0x1,
++	0x0, 0x20,
+ };
+ 
+ static int atc260x_dcdc_set_voltage_time_sel(struct regulator_dev *rdev,
+@@ -411,7 +411,7 @@ enum atc2609a_reg_ids {
+ 	.owner = THIS_MODULE, \
+ }
+ 
+-#define atc2609a_reg_desc_ldo_range_pick(num, n_range) { \
++#define atc2609a_reg_desc_ldo_range_pick(num, n_range, n_volt) { \
+ 	.name = "LDO"#num, \
+ 	.supply_name = "ldo"#num, \
+ 	.of_match = of_match_ptr("ldo"#num), \
+@@ -421,6 +421,7 @@ enum atc2609a_reg_ids {
+ 	.type = REGULATOR_VOLTAGE, \
+ 	.linear_ranges = atc2609a_ldo_voltage_ranges##n_range, \
+ 	.n_linear_ranges = ARRAY_SIZE(atc2609a_ldo_voltage_ranges##n_range), \
++	.n_voltages = n_volt, \
+ 	.vsel_reg = ATC2609A_PMU_LDO##num##_CTL0, \
+ 	.vsel_mask = GENMASK(4, 1), \
+ 	.vsel_range_reg = ATC2609A_PMU_LDO##num##_CTL0, \
+@@ -458,12 +459,12 @@ static const struct regulator_desc atc2609a_reg[] = {
+ 	atc2609a_reg_desc_ldo_bypass(0),
+ 	atc2609a_reg_desc_ldo_bypass(1),
+ 	atc2609a_reg_desc_ldo_bypass(2),
+-	atc2609a_reg_desc_ldo_range_pick(3, 0),
+-	atc2609a_reg_desc_ldo_range_pick(4, 0),
++	atc2609a_reg_desc_ldo_range_pick(3, 0, 29),
++	atc2609a_reg_desc_ldo_range_pick(4, 0, 29),
+ 	atc2609a_reg_desc_ldo(5),
+-	atc2609a_reg_desc_ldo_range_pick(6, 1),
+-	atc2609a_reg_desc_ldo_range_pick(7, 0),
+-	atc2609a_reg_desc_ldo_range_pick(8, 0),
++	atc2609a_reg_desc_ldo_range_pick(6, 1, 28),
++	atc2609a_reg_desc_ldo_range_pick(7, 0, 29),
++	atc2609a_reg_desc_ldo_range_pick(8, 0, 29),
+ 	atc2609a_reg_desc_ldo_fixed(9),
+ };
+ 
+diff --git a/drivers/regulator/bd718x7-regulator.c b/drivers/regulator/bd718x7-regulator.c
+index 8ff47ea522d69..5b6f4d3d1a146 100644
+--- a/drivers/regulator/bd718x7-regulator.c
++++ b/drivers/regulator/bd718x7-regulator.c
+@@ -364,7 +364,7 @@ BD718XX_OPS(bd71837_buck_regulator_ops, regulator_list_voltage_linear_range,
+ 	    NULL);
+ 
+ BD718XX_OPS(bd71837_buck_regulator_nolinear_ops, regulator_list_voltage_table,
+-	    regulator_map_voltage_ascend, bd718xx_set_voltage_sel_restricted,
++	    regulator_map_voltage_ascend, bd71837_set_voltage_sel_restricted,
+ 	    regulator_get_voltage_sel_regmap, regulator_set_voltage_time_sel,
+ 	    NULL);
+ /*
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index 16114aea099a2..898acb8ae405e 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -1422,6 +1422,12 @@ static int set_machine_constraints(struct regulator_dev *rdev)
+ 	 * and we have control then make sure it is enabled.
+ 	 */
+ 	if (rdev->constraints->always_on || rdev->constraints->boot_on) {
++		/* If we want to enable this regulator, make sure that we know
++		 * the supplying regulator.
++		 */
++		if (rdev->supply_name && !rdev->supply)
++			return -EPROBE_DEFER;
++
+ 		if (rdev->supply) {
+ 			ret = regulator_enable(rdev->supply);
+ 			if (ret < 0) {
+diff --git a/drivers/regulator/da9121-regulator.c b/drivers/regulator/da9121-regulator.c
+index 08cbf688e14d3..e669250902580 100644
+--- a/drivers/regulator/da9121-regulator.c
++++ b/drivers/regulator/da9121-regulator.c
+@@ -280,7 +280,7 @@ static unsigned int da9121_map_mode(unsigned int mode)
+ 	case DA9121_BUCK_MODE_FORCE_PFM:
+ 		return REGULATOR_MODE_STANDBY;
+ 	default:
+-		return -EINVAL;
++		return REGULATOR_MODE_INVALID;
+ 	}
+ }
+ 
+@@ -317,7 +317,7 @@ static unsigned int da9121_buck_get_mode(struct regulator_dev *rdev)
+ {
+ 	struct da9121 *chip = rdev_get_drvdata(rdev);
+ 	int id = rdev_get_id(rdev);
+-	unsigned int val;
++	unsigned int val, mode;
+ 	int ret = 0;
+ 
+ 	ret = regmap_read(chip->regmap, da9121_mode_field[id].reg, &val);
+@@ -326,7 +326,11 @@ static unsigned int da9121_buck_get_mode(struct regulator_dev *rdev)
+ 		return -EINVAL;
+ 	}
+ 
+-	return da9121_map_mode(val & da9121_mode_field[id].msk);
++	mode = da9121_map_mode(val & da9121_mode_field[id].msk);
++	if (mode == REGULATOR_MODE_INVALID)
++		return -EINVAL;
++
++	return mode;
+ }
+ 
+ static const struct regulator_ops da9121_buck_ops = {
+diff --git a/drivers/regulator/fan53880.c b/drivers/regulator/fan53880.c
+index e83eb4fb1876a..1684faf82ed25 100644
+--- a/drivers/regulator/fan53880.c
++++ b/drivers/regulator/fan53880.c
+@@ -51,6 +51,7 @@ static const struct regulator_ops fan53880_ops = {
+ 		      REGULATOR_LINEAR_RANGE(800000, 0xf, 0x73, 25000),	\
+ 		},							\
+ 		.n_linear_ranges = 2,					\
++		.n_voltages =	   0x74,				\
+ 		.vsel_reg =	   FAN53880_LDO ## _num ## VOUT,	\
+ 		.vsel_mask =	   0x7f,				\
+ 		.enable_reg =	   FAN53880_ENABLE,			\
+@@ -76,6 +77,7 @@ static const struct regulator_desc fan53880_regulators[] = {
+ 		      REGULATOR_LINEAR_RANGE(600000, 0x1f, 0xf7, 12500),
+ 		},
+ 		.n_linear_ranges = 2,
++		.n_voltages =	   0xf8,
+ 		.vsel_reg =	   FAN53880_BUCKVOUT,
+ 		.vsel_mask =	   0x7f,
+ 		.enable_reg =	   FAN53880_ENABLE,
+@@ -95,6 +97,7 @@ static const struct regulator_desc fan53880_regulators[] = {
+ 		      REGULATOR_LINEAR_RANGE(3000000, 0x4, 0x70, 25000),
+ 		},
+ 		.n_linear_ranges = 2,
++		.n_voltages =	   0x71,
+ 		.vsel_reg =	   FAN53880_BOOSTVOUT,
+ 		.vsel_mask =	   0x7f,
+ 		.enable_reg =	   FAN53880_ENABLE_BOOST,
+diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
+index 02ad83153e19a..34e255c235d4c 100644
+--- a/drivers/regulator/fixed.c
++++ b/drivers/regulator/fixed.c
+@@ -88,10 +88,15 @@ static int reg_domain_disable(struct regulator_dev *rdev)
+ {
+ 	struct fixed_voltage_data *priv = rdev_get_drvdata(rdev);
+ 	struct device *dev = rdev->dev.parent;
++	int ret;
++
++	ret = dev_pm_genpd_set_performance_state(dev, 0);
++	if (ret)
++		return ret;
+ 
+ 	priv->enable_counter--;
+ 
+-	return dev_pm_genpd_set_performance_state(dev, 0);
++	return 0;
+ }
+ 
+ static int reg_is_enabled(struct regulator_dev *rdev)
+diff --git a/drivers/regulator/max77620-regulator.c b/drivers/regulator/max77620-regulator.c
+index 8d9731e4052bf..5c439c850d090 100644
+--- a/drivers/regulator/max77620-regulator.c
++++ b/drivers/regulator/max77620-regulator.c
+@@ -814,6 +814,13 @@ static int max77620_regulator_probe(struct platform_device *pdev)
+ 	config.dev = dev;
+ 	config.driver_data = pmic;
+ 
++	/*
++	 * Set of_node_reuse flag to prevent driver core from attempting to
++	 * claim any pinmux resources already claimed by the parent device.
++	 * Otherwise PMIC driver will fail to re-probe.
++	 */
++	device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent);
++
+ 	for (id = 0; id < MAX77620_NUM_REGS; id++) {
+ 		struct regulator_dev *rdev;
+ 		struct regulator_desc *rdesc;
+diff --git a/drivers/regulator/rtmv20-regulator.c b/drivers/regulator/rtmv20-regulator.c
+index 852fb2596ffda..5adc552dffd58 100644
+--- a/drivers/regulator/rtmv20-regulator.c
++++ b/drivers/regulator/rtmv20-regulator.c
+@@ -103,9 +103,47 @@ static int rtmv20_lsw_disable(struct regulator_dev *rdev)
+ 	return 0;
+ }
+ 
++static int rtmv20_lsw_set_current_limit(struct regulator_dev *rdev, int min_uA,
++					int max_uA)
++{
++	int sel;
++
++	if (min_uA > RTMV20_LSW_MAXUA || max_uA < RTMV20_LSW_MINUA)
++		return -EINVAL;
++
++	if (max_uA > RTMV20_LSW_MAXUA)
++		max_uA = RTMV20_LSW_MAXUA;
++
++	sel = (max_uA - RTMV20_LSW_MINUA) / RTMV20_LSW_STEPUA;
++
++	/* Ensure the selected setting is still in range */
++	if ((sel * RTMV20_LSW_STEPUA + RTMV20_LSW_MINUA) < min_uA)
++		return -EINVAL;
++
++	sel <<= ffs(rdev->desc->csel_mask) - 1;
++
++	return regmap_update_bits(rdev->regmap, rdev->desc->csel_reg,
++				  rdev->desc->csel_mask, sel);
++}
++
++static int rtmv20_lsw_get_current_limit(struct regulator_dev *rdev)
++{
++	unsigned int val;
++	int ret;
++
++	ret = regmap_read(rdev->regmap, rdev->desc->csel_reg, &val);
++	if (ret)
++		return ret;
++
++	val &= rdev->desc->csel_mask;
++	val >>= ffs(rdev->desc->csel_mask) - 1;
++
++	return val * RTMV20_LSW_STEPUA + RTMV20_LSW_MINUA;
++}
++
+ static const struct regulator_ops rtmv20_regulator_ops = {
+-	.set_current_limit = regulator_set_current_limit_regmap,
+-	.get_current_limit = regulator_get_current_limit_regmap,
++	.set_current_limit = rtmv20_lsw_set_current_limit,
++	.get_current_limit = rtmv20_lsw_get_current_limit,
+ 	.enable = rtmv20_lsw_enable,
+ 	.disable = rtmv20_lsw_disable,
+ 	.is_enabled = regulator_is_enabled_regmap,
+diff --git a/drivers/regulator/scmi-regulator.c b/drivers/regulator/scmi-regulator.c
+index 0e8b3caa81461..66ffc2d54e979 100644
+--- a/drivers/regulator/scmi-regulator.c
++++ b/drivers/regulator/scmi-regulator.c
+@@ -176,7 +176,7 @@ scmi_config_linear_regulator_mappings(struct scmi_regulator *sreg,
+ 		sreg->desc.uV_step =
+ 			vinfo->levels_uv[SCMI_VOLTAGE_SEGMENT_STEP];
+ 		sreg->desc.linear_min_sel = 0;
+-		sreg->desc.n_voltages = delta_uV / sreg->desc.uV_step;
++		sreg->desc.n_voltages = (delta_uV / sreg->desc.uV_step) + 1;
+ 		sreg->desc.ops = &scmi_reg_linear_ops;
+ 	}
+ 
+diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
+index 8c625b530035f..9b61e9b131ade 100644
+--- a/drivers/s390/cio/vfio_ccw_drv.c
++++ b/drivers/s390/cio/vfio_ccw_drv.c
+@@ -86,6 +86,7 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
+ 	struct vfio_ccw_private *private;
+ 	struct irb *irb;
+ 	bool is_final;
++	bool cp_is_finished = false;
+ 
+ 	private = container_of(work, struct vfio_ccw_private, io_work);
+ 	irb = &private->irb;
+@@ -94,14 +95,21 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
+ 		     (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
+ 	if (scsw_is_solicited(&irb->scsw)) {
+ 		cp_update_scsw(&private->cp, &irb->scsw);
+-		if (is_final && private->state == VFIO_CCW_STATE_CP_PENDING)
++		if (is_final && private->state == VFIO_CCW_STATE_CP_PENDING) {
+ 			cp_free(&private->cp);
++			cp_is_finished = true;
++		}
+ 	}
+ 	mutex_lock(&private->io_mutex);
+ 	memcpy(private->io_region->irb_area, irb, sizeof(*irb));
+ 	mutex_unlock(&private->io_mutex);
+ 
+-	if (private->mdev && is_final)
++	/*
++	 * Reset to IDLE only if processing of a channel program
++	 * has finished. Do not overwrite a possible processing
++	 * state if the final interrupt was for HSCH or CSCH.
++	 */
++	if (private->mdev && cp_is_finished)
+ 		private->state = VFIO_CCW_STATE_IDLE;
+ 
+ 	if (private->io_trigger)
+diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
+index 23e61aa638e4e..e435a9cd92dac 100644
+--- a/drivers/s390/cio/vfio_ccw_fsm.c
++++ b/drivers/s390/cio/vfio_ccw_fsm.c
+@@ -318,6 +318,7 @@ static void fsm_io_request(struct vfio_ccw_private *private,
+ 	}
+ 
+ err_out:
++	private->state = VFIO_CCW_STATE_IDLE;
+ 	trace_vfio_ccw_fsm_io_request(scsw->cmd.fctl, schid,
+ 				      io_region->ret_code, errstr);
+ }
+diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
+index 767ac41686fe2..5971641964c6f 100644
+--- a/drivers/s390/cio/vfio_ccw_ops.c
++++ b/drivers/s390/cio/vfio_ccw_ops.c
+@@ -276,8 +276,6 @@ static ssize_t vfio_ccw_mdev_write_io_region(struct vfio_ccw_private *private,
+ 	}
+ 
+ 	vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_IO_REQ);
+-	if (region->ret_code != 0)
+-		private->state = VFIO_CCW_STATE_IDLE;
+ 	ret = (region->ret_code != 0) ? region->ret_code : count;
+ 
+ out_unlock:
+diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
+index 1a0dc18d69155..ed300a279a387 100644
+--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
++++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
+@@ -1220,6 +1220,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
+ 		   was a result from the ABTS request rather than the CLEANUP
+ 		   request */
+ 		set_bit(BNX2FC_FLAG_IO_CLEANUP,	&io_req->req_flags);
++		rc = FAILED;
+ 		goto done;
+ 	}
+ 
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+index 4580e081e4894..b21246b1ba99c 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+@@ -4799,14 +4799,14 @@ hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba)
+ {
+ 	int i;
+ 
+-	free_irq(pci_irq_vector(pdev, 1), hisi_hba);
+-	free_irq(pci_irq_vector(pdev, 2), hisi_hba);
+-	free_irq(pci_irq_vector(pdev, 11), hisi_hba);
++	devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 1), hisi_hba);
++	devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 2), hisi_hba);
++	devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 11), hisi_hba);
+ 	for (i = 0; i < hisi_hba->cq_nvecs; i++) {
+ 		struct hisi_sas_cq *cq = &hisi_hba->cq[i];
+ 		int nr = hisi_sas_intr_conv ? 16 : 16 + i;
+ 
+-		free_irq(pci_irq_vector(pdev, nr), cq);
++		devm_free_irq(&pdev->dev, pci_irq_vector(pdev, nr), cq);
+ 	}
+ 	pci_free_irq_vectors(pdev);
+ }
+diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
+index 2f162603876f9..b93dd8ef4ac82 100644
+--- a/drivers/scsi/hosts.c
++++ b/drivers/scsi/hosts.c
+@@ -254,12 +254,11 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
+ 
+ 	device_enable_async_suspend(&shost->shost_dev);
+ 
++	get_device(&shost->shost_gendev);
+ 	error = device_add(&shost->shost_dev);
+ 	if (error)
+ 		goto out_del_gendev;
+ 
+-	get_device(&shost->shost_gendev);
+-
+ 	if (shost->transportt->host_size) {
+ 		shost->shost_data = kzalloc(shost->transportt->host_size,
+ 					 GFP_KERNEL);
+@@ -278,33 +277,36 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
+ 
+ 		if (!shost->work_q) {
+ 			error = -EINVAL;
+-			goto out_free_shost_data;
++			goto out_del_dev;
+ 		}
+ 	}
+ 
+ 	error = scsi_sysfs_add_host(shost);
+ 	if (error)
+-		goto out_destroy_host;
++		goto out_del_dev;
+ 
+ 	scsi_proc_host_add(shost);
+ 	scsi_autopm_put_host(shost);
+ 	return error;
+ 
+- out_destroy_host:
+-	if (shost->work_q)
+-		destroy_workqueue(shost->work_q);
+- out_free_shost_data:
+-	kfree(shost->shost_data);
++	/*
++	 * Any host allocation in this function will be freed in
++	 * scsi_host_dev_release().
++	 */
+  out_del_dev:
+ 	device_del(&shost->shost_dev);
+  out_del_gendev:
++	/*
++	 * Host state is SHOST_RUNNING so we have to explicitly release
++	 * ->shost_dev.
++	 */
++	put_device(&shost->shost_dev);
+ 	device_del(&shost->shost_gendev);
+  out_disable_runtime_pm:
+ 	device_disable_async_suspend(&shost->shost_gendev);
+ 	pm_runtime_disable(&shost->shost_gendev);
+ 	pm_runtime_set_suspended(&shost->shost_gendev);
+ 	pm_runtime_put_noidle(&shost->shost_gendev);
+-	scsi_mq_destroy_tags(shost);
+  fail:
+ 	return error;
+ }
+@@ -345,7 +347,7 @@ static void scsi_host_dev_release(struct device *dev)
+ 
+ 	ida_simple_remove(&host_index_ida, shost->host_no);
+ 
+-	if (parent)
++	if (shost->shost_state != SHOST_CREATED)
+ 		put_device(parent);
+ 	kfree(shost);
+ }
+@@ -392,8 +394,10 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
+ 	mutex_init(&shost->scan_mutex);
+ 
+ 	index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL);
+-	if (index < 0)
+-		goto fail_kfree;
++	if (index < 0) {
++		kfree(shost);
++		return NULL;
++	}
+ 	shost->host_no = index;
+ 
+ 	shost->dma_channel = 0xff;
+@@ -486,7 +490,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
+ 		shost_printk(KERN_WARNING, shost,
+ 			"error handler thread failed to spawn, error = %ld\n",
+ 			PTR_ERR(shost->ehandler));
+-		goto fail_index_remove;
++		goto fail;
+ 	}
+ 
+ 	shost->tmf_work_q = alloc_workqueue("scsi_tmf_%d",
+@@ -495,17 +499,18 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
+ 	if (!shost->tmf_work_q) {
+ 		shost_printk(KERN_WARNING, shost,
+ 			     "failed to create tmf workq\n");
+-		goto fail_kthread;
++		goto fail;
+ 	}
+ 	scsi_proc_hostdir_add(shost->hostt);
+ 	return shost;
++ fail:
++	/*
++	 * Host state is still SHOST_CREATED and that is enough to release
++	 * ->shost_gendev. scsi_host_dev_release() will free
++	 * dev_name(&shost->shost_dev).
++	 */
++	put_device(&shost->shost_gendev);
+ 
+- fail_kthread:
+-	kthread_stop(shost->ehandler);
+- fail_index_remove:
+-	ida_simple_remove(&host_index_ida, shost->host_no);
+- fail_kfree:
+-	kfree(shost);
+ 	return NULL;
+ }
+ EXPORT_SYMBOL(scsi_host_alloc);
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
+index 480e7d2dcf3e6..745d6d98c02e2 100644
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -1558,10 +1558,12 @@ void qlt_stop_phase2(struct qla_tgt *tgt)
+ 		return;
+ 	}
+ 
++	mutex_lock(&tgt->ha->optrom_mutex);
+ 	mutex_lock(&vha->vha_tgt.tgt_mutex);
+ 	tgt->tgt_stop = 0;
+ 	tgt->tgt_stopped = 1;
+ 	mutex_unlock(&vha->vha_tgt.tgt_mutex);
++	mutex_unlock(&tgt->ha->optrom_mutex);
+ 
+ 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n",
+ 	    tgt);
+diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
+index 8a79605d96521..b9969fce6b4d1 100644
+--- a/drivers/scsi/vmw_pvscsi.c
++++ b/drivers/scsi/vmw_pvscsi.c
+@@ -585,7 +585,13 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
+ 		case BTSTAT_SUCCESS:
+ 		case BTSTAT_LINKED_COMMAND_COMPLETED:
+ 		case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG:
+-			/* If everything went fine, let's move on..  */
++			/*
++			 * Commands like INQUIRY may transfer less data than
++			 * requested by the initiator via bufflen. Set residual
++			 * count to make upper layer aware of the actual amount
++			 * of data returned.
++			 */
++			scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen);
+ 			cmd->result = (DID_OK << 16);
+ 			break;
+ 
+diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
+index 8965fe61c8b44..fe40626e45aa8 100644
+--- a/drivers/spi/spi-bcm2835.c
++++ b/drivers/spi/spi-bcm2835.c
+@@ -68,7 +68,7 @@
+ #define BCM2835_SPI_FIFO_SIZE		64
+ #define BCM2835_SPI_FIFO_SIZE_3_4	48
+ #define BCM2835_SPI_DMA_MIN_LENGTH	96
+-#define BCM2835_SPI_NUM_CS		4   /* raise as necessary */
++#define BCM2835_SPI_NUM_CS		24  /* raise as necessary */
+ #define BCM2835_SPI_MODE_BITS	(SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
+ 				| SPI_NO_CS | SPI_3WIRE)
+ 
+@@ -1195,6 +1195,12 @@ static int bcm2835_spi_setup(struct spi_device *spi)
+ 	struct gpio_chip *chip;
+ 	u32 cs;
+ 
++	if (spi->chip_select >= BCM2835_SPI_NUM_CS) {
++		dev_err(&spi->dev, "only %d chip-selects supported\n",
++			BCM2835_SPI_NUM_CS - 1);
++		return -EINVAL;
++	}
++
+ 	/*
+ 	 * Precalculate SPI slave's CS register value for ->prepare_message():
+ 	 * The driver always uses software-controlled GPIO chip select, hence
+@@ -1288,7 +1294,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
+ 	ctlr->use_gpio_descriptors = true;
+ 	ctlr->mode_bits = BCM2835_SPI_MODE_BITS;
+ 	ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
+-	ctlr->num_chipselect = BCM2835_SPI_NUM_CS;
++	ctlr->num_chipselect = 3;
+ 	ctlr->setup = bcm2835_spi_setup;
+ 	ctlr->transfer_one = bcm2835_spi_transfer_one;
+ 	ctlr->handle_err = bcm2835_spi_handle_err;
+diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c
+index 1a7352abd8786..3d8948a17095b 100644
+--- a/drivers/spi/spi-bitbang.c
++++ b/drivers/spi/spi-bitbang.c
+@@ -181,6 +181,8 @@ int spi_bitbang_setup(struct spi_device *spi)
+ {
+ 	struct spi_bitbang_cs	*cs = spi->controller_state;
+ 	struct spi_bitbang	*bitbang;
++	bool			initial_setup = false;
++	int			retval;
+ 
+ 	bitbang = spi_master_get_devdata(spi->master);
+ 
+@@ -189,22 +191,30 @@ int spi_bitbang_setup(struct spi_device *spi)
+ 		if (!cs)
+ 			return -ENOMEM;
+ 		spi->controller_state = cs;
++		initial_setup = true;
+ 	}
+ 
+ 	/* per-word shift register access, in hardware or bitbanging */
+ 	cs->txrx_word = bitbang->txrx_word[spi->mode & (SPI_CPOL|SPI_CPHA)];
+-	if (!cs->txrx_word)
+-		return -EINVAL;
++	if (!cs->txrx_word) {
++		retval = -EINVAL;
++		goto err_free;
++	}
+ 
+ 	if (bitbang->setup_transfer) {
+-		int retval = bitbang->setup_transfer(spi, NULL);
++		retval = bitbang->setup_transfer(spi, NULL);
+ 		if (retval < 0)
+-			return retval;
++			goto err_free;
+ 	}
+ 
+ 	dev_dbg(&spi->dev, "%s, %u nsec/bit\n", __func__, 2 * cs->nsecs);
+ 
+ 	return 0;
++
++err_free:
++	if (initial_setup)
++		kfree(cs);
++	return retval;
+ }
+ EXPORT_SYMBOL_GPL(spi_bitbang_setup);
+ 
+diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
+index d0e5aa18b7bad..bdf94cc7be1af 100644
+--- a/drivers/spi/spi-fsl-spi.c
++++ b/drivers/spi/spi-fsl-spi.c
+@@ -440,6 +440,7 @@ static int fsl_spi_setup(struct spi_device *spi)
+ {
+ 	struct mpc8xxx_spi *mpc8xxx_spi;
+ 	struct fsl_spi_reg __iomem *reg_base;
++	bool initial_setup = false;
+ 	int retval;
+ 	u32 hw_mode;
+ 	struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi);
+@@ -452,6 +453,7 @@ static int fsl_spi_setup(struct spi_device *spi)
+ 		if (!cs)
+ 			return -ENOMEM;
+ 		spi_set_ctldata(spi, cs);
++		initial_setup = true;
+ 	}
+ 	mpc8xxx_spi = spi_master_get_devdata(spi->master);
+ 
+@@ -475,6 +477,8 @@ static int fsl_spi_setup(struct spi_device *spi)
+ 	retval = fsl_spi_setup_transfer(spi, NULL);
+ 	if (retval < 0) {
+ 		cs->hw_mode = hw_mode; /* Restore settings */
++		if (initial_setup)
++			kfree(cs);
+ 		return retval;
+ 	}
+ 
+diff --git a/drivers/spi/spi-omap-uwire.c b/drivers/spi/spi-omap-uwire.c
+index 71402f71ddd85..df28c6664aba6 100644
+--- a/drivers/spi/spi-omap-uwire.c
++++ b/drivers/spi/spi-omap-uwire.c
+@@ -424,15 +424,22 @@ done:
+ static int uwire_setup(struct spi_device *spi)
+ {
+ 	struct uwire_state *ust = spi->controller_state;
++	bool initial_setup = false;
++	int status;
+ 
+ 	if (ust == NULL) {
+ 		ust = kzalloc(sizeof(*ust), GFP_KERNEL);
+ 		if (ust == NULL)
+ 			return -ENOMEM;
+ 		spi->controller_state = ust;
++		initial_setup = true;
+ 	}
+ 
+-	return uwire_setup_transfer(spi, NULL);
++	status = uwire_setup_transfer(spi, NULL);
++	if (status && initial_setup)
++		kfree(ust);
++
++	return status;
+ }
+ 
+ static void uwire_cleanup(struct spi_device *spi)
+diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
+index d4c9510af3931..3596bbe4b7760 100644
+--- a/drivers/spi/spi-omap2-mcspi.c
++++ b/drivers/spi/spi-omap2-mcspi.c
+@@ -1032,8 +1032,22 @@ static void omap2_mcspi_release_dma(struct spi_master *master)
+ 	}
+ }
+ 
++static void omap2_mcspi_cleanup(struct spi_device *spi)
++{
++	struct omap2_mcspi_cs	*cs;
++
++	if (spi->controller_state) {
++		/* Unlink controller state from context save list */
++		cs = spi->controller_state;
++		list_del(&cs->node);
++
++		kfree(cs);
++	}
++}
++
+ static int omap2_mcspi_setup(struct spi_device *spi)
+ {
++	bool			initial_setup = false;
+ 	int			ret;
+ 	struct omap2_mcspi	*mcspi = spi_master_get_devdata(spi->master);
+ 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
+@@ -1051,35 +1065,28 @@ static int omap2_mcspi_setup(struct spi_device *spi)
+ 		spi->controller_state = cs;
+ 		/* Link this to context save list */
+ 		list_add_tail(&cs->node, &ctx->cs);
++		initial_setup = true;
+ 	}
+ 
+ 	ret = pm_runtime_get_sync(mcspi->dev);
+ 	if (ret < 0) {
+ 		pm_runtime_put_noidle(mcspi->dev);
++		if (initial_setup)
++			omap2_mcspi_cleanup(spi);
+ 
+ 		return ret;
+ 	}
+ 
+ 	ret = omap2_mcspi_setup_transfer(spi, NULL);
++	if (ret && initial_setup)
++		omap2_mcspi_cleanup(spi);
++
+ 	pm_runtime_mark_last_busy(mcspi->dev);
+ 	pm_runtime_put_autosuspend(mcspi->dev);
+ 
+ 	return ret;
+ }
+ 
+-static void omap2_mcspi_cleanup(struct spi_device *spi)
+-{
+-	struct omap2_mcspi_cs	*cs;
+-
+-	if (spi->controller_state) {
+-		/* Unlink controller state from context save list */
+-		cs = spi->controller_state;
+-		list_del(&cs->node);
+-
+-		kfree(cs);
+-	}
+-}
+-
+ static irqreturn_t omap2_mcspi_irq_handler(int irq, void *data)
+ {
+ 	struct omap2_mcspi *mcspi = data;
+diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
+index 0cc767283674b..825cc5b2ab2fb 100644
+--- a/drivers/spi/spi-pxa2xx.c
++++ b/drivers/spi/spi-pxa2xx.c
+@@ -1254,6 +1254,8 @@ static int setup_cs(struct spi_device *spi, struct chip_data *chip,
+ 		chip->gpio_cs_inverted = spi->mode & SPI_CS_HIGH;
+ 
+ 		err = gpiod_direction_output(gpiod, !chip->gpio_cs_inverted);
++		if (err)
++			gpiod_put(chip->gpiod_cs);
+ 	}
+ 
+ 	return err;
+@@ -1267,6 +1269,7 @@ static int setup(struct spi_device *spi)
+ 	struct driver_data *drv_data =
+ 		spi_controller_get_devdata(spi->controller);
+ 	uint tx_thres, tx_hi_thres, rx_thres;
++	int err;
+ 
+ 	switch (drv_data->ssp_type) {
+ 	case QUARK_X1000_SSP:
+@@ -1413,7 +1416,11 @@ static int setup(struct spi_device *spi)
+ 	if (drv_data->ssp_type == CE4100_SSP)
+ 		return 0;
+ 
+-	return setup_cs(spi, chip, chip_info);
++	err = setup_cs(spi, chip, chip_info);
++	if (err)
++		kfree(chip);
++
++	return err;
+ }
+ 
+ static void cleanup(struct spi_device *spi)
+diff --git a/drivers/spi/spi-sprd.c b/drivers/spi/spi-sprd.c
+index b41a75749b498..28e70db9bbba8 100644
+--- a/drivers/spi/spi-sprd.c
++++ b/drivers/spi/spi-sprd.c
+@@ -1068,6 +1068,7 @@ static const struct of_device_id sprd_spi_of_match[] = {
+ 	{ .compatible = "sprd,sc9860-spi", },
+ 	{ /* sentinel */ }
+ };
++MODULE_DEVICE_TABLE(of, sprd_spi_of_match);
+ 
+ static struct platform_driver sprd_spi_driver = {
+ 	.driver = {
+diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c
+index 5d8a5ee62fa23..2765289028fae 100644
+--- a/drivers/spi/spi-zynq-qspi.c
++++ b/drivers/spi/spi-zynq-qspi.c
+@@ -528,18 +528,17 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
+ 	struct zynq_qspi *xqspi = spi_controller_get_devdata(mem->spi->master);
+ 	int err = 0, i;
+ 	u8 *tmpbuf;
+-	u8 opcode = op->cmd.opcode;
+ 
+ 	dev_dbg(xqspi->dev, "cmd:%#x mode:%d.%d.%d.%d\n",
+-		opcode, op->cmd.buswidth, op->addr.buswidth,
++		op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
+ 		op->dummy.buswidth, op->data.buswidth);
+ 
+ 	zynq_qspi_chipselect(mem->spi, true);
+ 	zynq_qspi_config_op(xqspi, mem->spi);
+ 
+-	if (op->cmd.nbytes) {
++	if (op->cmd.opcode) {
+ 		reinit_completion(&xqspi->data_completion);
+-		xqspi->txbuf = &opcode;
++		xqspi->txbuf = (u8 *)&op->cmd.opcode;
+ 		xqspi->rxbuf = NULL;
+ 		xqspi->tx_bytes = op->cmd.nbytes;
+ 		xqspi->rx_bytes = op->cmd.nbytes;
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index 6ae7418f648ca..e067c54e87dd7 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -47,10 +47,6 @@ static void spidev_release(struct device *dev)
+ {
+ 	struct spi_device	*spi = to_spi_device(dev);
+ 
+-	/* spi controllers may cleanup for released devices */
+-	if (spi->controller->cleanup)
+-		spi->controller->cleanup(spi);
+-
+ 	spi_controller_put(spi->controller);
+ 	kfree(spi->driver_override);
+ 	kfree(spi);
+@@ -558,6 +554,12 @@ static int spi_dev_check(struct device *dev, void *data)
+ 	return 0;
+ }
+ 
++static void spi_cleanup(struct spi_device *spi)
++{
++	if (spi->controller->cleanup)
++		spi->controller->cleanup(spi);
++}
++
+ /**
+  * spi_add_device - Add spi_device allocated with spi_alloc_device
+  * @spi: spi_device to register
+@@ -622,11 +624,13 @@ int spi_add_device(struct spi_device *spi)
+ 
+ 	/* Device may be bound to an active driver when this returns */
+ 	status = device_add(&spi->dev);
+-	if (status < 0)
++	if (status < 0) {
+ 		dev_err(dev, "can't add %s, status %d\n",
+ 				dev_name(&spi->dev), status);
+-	else
++		spi_cleanup(spi);
++	} else {
+ 		dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
++	}
+ 
+ done:
+ 	mutex_unlock(&spi_add_lock);
+@@ -719,7 +723,9 @@ void spi_unregister_device(struct spi_device *spi)
+ 	}
+ 	if (ACPI_COMPANION(&spi->dev))
+ 		acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
+-	device_unregister(&spi->dev);
++	device_del(&spi->dev);
++	spi_cleanup(spi);
++	put_device(&spi->dev);
+ }
+ EXPORT_SYMBOL_GPL(spi_unregister_device);
+ 
+diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+index ff164a8c8679a..cbec65e5a4645 100644
+--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
++++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+@@ -2359,7 +2359,7 @@ void rtw_cfg80211_indicate_sta_assoc(struct adapter *padapter, u8 *pmgmt_frame,
+ 	DBG_871X(FUNC_ADPT_FMT"\n", FUNC_ADPT_ARG(padapter));
+ 
+ 	{
+-		struct station_info sinfo;
++		struct station_info sinfo = {};
+ 		u8 ie_offset;
+ 		if (GetFrameSubType(pmgmt_frame) == WIFI_ASSOCREQ)
+ 			ie_offset = _ASOCREQ_IE_OFFSET_;
+diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
+index 582bfeceedb47..e72ec9c787f98 100644
+--- a/drivers/usb/cdns3/cdns3-gadget.c
++++ b/drivers/usb/cdns3/cdns3-gadget.c
+@@ -2006,7 +2006,7 @@ static void cdns3_configure_dmult(struct cdns3_device *priv_dev,
+ 		else
+ 			mask = BIT(priv_ep->num);
+ 
+-		if (priv_ep->type != USB_ENDPOINT_XFER_ISOC) {
++		if (priv_ep->type != USB_ENDPOINT_XFER_ISOC  && !priv_ep->dir) {
+ 			cdns3_set_register_bit(&regs->tdl_from_trb, mask);
+ 			cdns3_set_register_bit(&regs->tdl_beh, mask);
+ 			cdns3_set_register_bit(&regs->tdl_beh2, mask);
+@@ -2045,15 +2045,13 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
+ 	case USB_ENDPOINT_XFER_INT:
+ 		ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_INT);
+ 
+-		if ((priv_dev->dev_ver == DEV_VER_V2 && !priv_ep->dir) ||
+-		    priv_dev->dev_ver > DEV_VER_V2)
++		if (priv_dev->dev_ver >= DEV_VER_V2 && !priv_ep->dir)
+ 			ep_cfg |= EP_CFG_TDL_CHK;
+ 		break;
+ 	case USB_ENDPOINT_XFER_BULK:
+ 		ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_BULK);
+ 
+-		if ((priv_dev->dev_ver == DEV_VER_V2  && !priv_ep->dir) ||
+-		    priv_dev->dev_ver > DEV_VER_V2)
++		if (priv_dev->dev_ver >= DEV_VER_V2 && !priv_ep->dir)
+ 			ep_cfg |= EP_CFG_TDL_CHK;
+ 		break;
+ 	default:
+@@ -3255,8 +3253,10 @@ static int __cdns3_gadget_init(struct cdns *cdns)
+ 	pm_runtime_get_sync(cdns->dev);
+ 
+ 	ret = cdns3_gadget_start(cdns);
+-	if (ret)
++	if (ret) {
++		pm_runtime_put_sync(cdns->dev);
+ 		return ret;
++	}
+ 
+ 	/*
+ 	 * Because interrupt line can be shared with other components in
+diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c
+index 5f0513c96c04e..68972746e3636 100644
+--- a/drivers/usb/cdns3/cdnsp-ring.c
++++ b/drivers/usb/cdns3/cdnsp-ring.c
+@@ -1517,13 +1517,14 @@ irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
+ {
+ 	struct cdnsp_device *pdev = (struct cdnsp_device *)data;
+ 	union cdnsp_trb *event_ring_deq;
++	unsigned long flags;
+ 	int counter = 0;
+ 
+-	spin_lock(&pdev->lock);
++	spin_lock_irqsave(&pdev->lock, flags);
+ 
+ 	if (pdev->cdnsp_state & (CDNSP_STATE_HALTED | CDNSP_STATE_DYING)) {
+ 		cdnsp_died(pdev);
+-		spin_unlock(&pdev->lock);
++		spin_unlock_irqrestore(&pdev->lock, flags);
+ 		return IRQ_HANDLED;
+ 	}
+ 
+@@ -1539,7 +1540,7 @@ irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
+ 
+ 	cdnsp_update_erst_dequeue(pdev, event_ring_deq, 1);
+ 
+-	spin_unlock(&pdev->lock);
++	spin_unlock_irqrestore(&pdev->lock, flags);
+ 
+ 	return IRQ_HANDLED;
+ }
+diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
+index c16d900cdaee3..393f216b91615 100644
+--- a/drivers/usb/chipidea/udc.c
++++ b/drivers/usb/chipidea/udc.c
+@@ -2061,6 +2061,7 @@ static int udc_start(struct ci_hdrc *ci)
+ 	ci->gadget.name         = ci->platdata->name;
+ 	ci->gadget.otg_caps	= otg_caps;
+ 	ci->gadget.sg_supported = 1;
++	ci->gadget.irq		= ci->irq;
+ 
+ 	if (ci->platdata->flags & CI_HDRC_REQUIRES_ALIGNED_DMA)
+ 		ci->gadget.quirk_avoids_skb_reserve = 1;
+diff --git a/drivers/usb/dwc3/dwc3-meson-g12a.c b/drivers/usb/dwc3/dwc3-meson-g12a.c
+index bdf1f98dfad8c..ffe301d6ea359 100644
+--- a/drivers/usb/dwc3/dwc3-meson-g12a.c
++++ b/drivers/usb/dwc3/dwc3-meson-g12a.c
+@@ -651,7 +651,7 @@ static int dwc3_meson_g12a_setup_regmaps(struct dwc3_meson_g12a *priv,
+ 		return PTR_ERR(priv->usb_glue_regmap);
+ 
+ 	/* Create a regmap for each USB2 PHY control register set */
+-	for (i = 0; i < priv->usb2_ports; i++) {
++	for (i = 0; i < priv->drvdata->num_phys; i++) {
+ 		struct regmap_config u2p_regmap_config = {
+ 			.reg_bits = 8,
+ 			.val_bits = 32,
+@@ -659,6 +659,9 @@ static int dwc3_meson_g12a_setup_regmaps(struct dwc3_meson_g12a *priv,
+ 			.max_register = U2P_R1,
+ 		};
+ 
++		if (!strstr(priv->drvdata->phy_names[i], "usb2"))
++			continue;
++
+ 		u2p_regmap_config.name = devm_kasprintf(priv->dev, GFP_KERNEL,
+ 							"u2p-%d", i);
+ 		if (!u2p_regmap_config.name)
+@@ -772,13 +775,13 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev)
+ 
+ 	ret = priv->drvdata->usb_init(priv);
+ 	if (ret)
+-		goto err_disable_clks;
++		goto err_disable_regulator;
+ 
+ 	/* Init PHYs */
+ 	for (i = 0 ; i < PHY_COUNT ; ++i) {
+ 		ret = phy_init(priv->phys[i]);
+ 		if (ret)
+-			goto err_disable_clks;
++			goto err_disable_regulator;
+ 	}
+ 
+ 	/* Set PHY Power */
+@@ -816,6 +819,10 @@ err_phys_exit:
+ 	for (i = 0 ; i < PHY_COUNT ; ++i)
+ 		phy_exit(priv->phys[i]);
+ 
++err_disable_regulator:
++	if (priv->vbus)
++		regulator_disable(priv->vbus);
++
+ err_disable_clks:
+ 	clk_bulk_disable_unprepare(priv->drvdata->num_clks,
+ 				   priv->drvdata->clks);
+diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
+index 8b668ef46f7f1..3cd2942643725 100644
+--- a/drivers/usb/dwc3/ep0.c
++++ b/drivers/usb/dwc3/ep0.c
+@@ -292,6 +292,9 @@ static struct dwc3_ep *dwc3_wIndex_to_dep(struct dwc3 *dwc, __le16 wIndex_le)
+ 		epnum |= 1;
+ 
+ 	dep = dwc->eps[epnum];
++	if (dep == NULL)
++		return NULL;
++
+ 	if (dep->flags & DWC3_EP_ENABLED)
+ 		return dep;
+ 
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index a721d0e4d9994..1f9454e0d447b 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -2240,13 +2240,10 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
+ 	}
+ 
+ 	/*
+-	 * Synchronize any pending event handling before executing the controller
+-	 * halt routine.
++	 * Synchronize and disable any further event handling while controller
++	 * is being enabled/disabled.
+ 	 */
+-	if (!is_on) {
+-		dwc3_gadget_disable_irq(dwc);
+-		synchronize_irq(dwc->irq_gadget);
+-	}
++	disable_irq(dwc->irq_gadget);
+ 
+ 	spin_lock_irqsave(&dwc->lock, flags);
+ 
+@@ -2284,6 +2281,8 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
+ 
+ 	ret = dwc3_gadget_run_stop(dwc, is_on, false);
+ 	spin_unlock_irqrestore(&dwc->lock, flags);
++	enable_irq(dwc->irq_gadget);
++
+ 	pm_runtime_put(dwc->dev);
+ 
+ 	return ret;
+@@ -4012,6 +4011,7 @@ err5:
+ 	dwc3_gadget_free_endpoints(dwc);
+ err4:
+ 	usb_put_gadget(dwc->gadget);
++	dwc->gadget = NULL;
+ err3:
+ 	dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce,
+ 			dwc->bounce_addr);
+@@ -4031,6 +4031,9 @@ err0:
+ 
+ void dwc3_gadget_exit(struct dwc3 *dwc)
+ {
++	if (!dwc->gadget)
++		return;
++
+ 	usb_del_gadget(dwc->gadget);
+ 	dwc3_gadget_free_endpoints(dwc);
+ 	usb_put_gadget(dwc->gadget);
+diff --git a/drivers/usb/gadget/config.c b/drivers/usb/gadget/config.c
+index 8bb25773b61e9..05507606b2b42 100644
+--- a/drivers/usb/gadget/config.c
++++ b/drivers/usb/gadget/config.c
+@@ -164,6 +164,14 @@ int usb_assign_descriptors(struct usb_function *f,
+ {
+ 	struct usb_gadget *g = f->config->cdev->gadget;
+ 
++	/* super-speed-plus descriptor falls back to super-speed one,
++	 * if such a descriptor was provided, thus avoiding a NULL
++	 * pointer dereference if a 5gbps capable gadget is used with
++	 * a 10gbps capable config (device port + cable + host port)
++	 */
++	if (!ssp)
++		ssp = ss;
++
+ 	if (fs) {
+ 		f->fs_descriptors = usb_copy_descriptors(fs);
+ 		if (!f->fs_descriptors)
+diff --git a/drivers/usb/gadget/function/f_ecm.c b/drivers/usb/gadget/function/f_ecm.c
+index 7f5cf488b2b1e..ffe2486fce71c 100644
+--- a/drivers/usb/gadget/function/f_ecm.c
++++ b/drivers/usb/gadget/function/f_ecm.c
+@@ -791,7 +791,7 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f)
+ 		fs_ecm_notify_desc.bEndpointAddress;
+ 
+ 	status = usb_assign_descriptors(f, ecm_fs_function, ecm_hs_function,
+-			ecm_ss_function, NULL);
++			ecm_ss_function, ecm_ss_function);
+ 	if (status)
+ 		goto fail;
+ 
+diff --git a/drivers/usb/gadget/function/f_eem.c b/drivers/usb/gadget/function/f_eem.c
+index cfcc4e81fb776..2cd9942707b46 100644
+--- a/drivers/usb/gadget/function/f_eem.c
++++ b/drivers/usb/gadget/function/f_eem.c
+@@ -302,7 +302,7 @@ static int eem_bind(struct usb_configuration *c, struct usb_function *f)
+ 	eem_ss_out_desc.bEndpointAddress = eem_fs_out_desc.bEndpointAddress;
+ 
+ 	status = usb_assign_descriptors(f, eem_fs_function, eem_hs_function,
+-			eem_ss_function, NULL);
++			eem_ss_function, eem_ss_function);
+ 	if (status)
+ 		goto fail;
+ 
+@@ -495,7 +495,7 @@ static int eem_unwrap(struct gether *port,
+ 			skb2 = skb_clone(skb, GFP_ATOMIC);
+ 			if (unlikely(!skb2)) {
+ 				DBG(cdev, "unable to unframe EEM packet\n");
+-				continue;
++				goto next;
+ 			}
+ 			skb_trim(skb2, len - ETH_FCS_LEN);
+ 
+@@ -505,7 +505,7 @@ static int eem_unwrap(struct gether *port,
+ 						GFP_ATOMIC);
+ 			if (unlikely(!skb3)) {
+ 				dev_kfree_skb_any(skb2);
+-				continue;
++				goto next;
+ 			}
+ 			dev_kfree_skb_any(skb2);
+ 			skb_queue_tail(list, skb3);
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index 10a5d9f0f2b90..f29abc7867d59 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -3567,6 +3567,9 @@ static void ffs_func_unbind(struct usb_configuration *c,
+ 		ffs->func = NULL;
+ 	}
+ 
++	/* Drain any pending AIO completions */
++	drain_workqueue(ffs->io_completion_wq);
++
+ 	if (!--opts->refcnt)
+ 		functionfs_unbind(ffs);
+ 
+diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
+index 1125f4715830d..e556993081170 100644
+--- a/drivers/usb/gadget/function/f_hid.c
++++ b/drivers/usb/gadget/function/f_hid.c
+@@ -802,7 +802,8 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
+ 		hidg_fs_out_ep_desc.bEndpointAddress;
+ 
+ 	status = usb_assign_descriptors(f, hidg_fs_descriptors,
+-			hidg_hs_descriptors, hidg_ss_descriptors, NULL);
++			hidg_hs_descriptors, hidg_ss_descriptors,
++			hidg_ss_descriptors);
+ 	if (status)
+ 		goto fail;
+ 
+diff --git a/drivers/usb/gadget/function/f_loopback.c b/drivers/usb/gadget/function/f_loopback.c
+index b56ad7c3838b8..ae41f556eb752 100644
+--- a/drivers/usb/gadget/function/f_loopback.c
++++ b/drivers/usb/gadget/function/f_loopback.c
+@@ -207,7 +207,7 @@ autoconf_fail:
+ 	ss_loop_sink_desc.bEndpointAddress = fs_loop_sink_desc.bEndpointAddress;
+ 
+ 	ret = usb_assign_descriptors(f, fs_loopback_descs, hs_loopback_descs,
+-			ss_loopback_descs, NULL);
++			ss_loopback_descs, ss_loopback_descs);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
+index 019bea8e09cce..855127249f242 100644
+--- a/drivers/usb/gadget/function/f_ncm.c
++++ b/drivers/usb/gadget/function/f_ncm.c
+@@ -583,7 +583,7 @@ static void ncm_do_notify(struct f_ncm *ncm)
+ 		data[0] = cpu_to_le32(ncm_bitrate(cdev->gadget));
+ 		data[1] = data[0];
+ 
+-		DBG(cdev, "notify speed %d\n", ncm_bitrate(cdev->gadget));
++		DBG(cdev, "notify speed %u\n", ncm_bitrate(cdev->gadget));
+ 		ncm->notify_state = NCM_NOTIFY_CONNECT;
+ 		break;
+ 	}
+@@ -1101,11 +1101,11 @@ static struct sk_buff *ncm_wrap_ntb(struct gether *port,
+ 			ncm->ndp_dgram_count = 1;
+ 
+ 			/* Note: we skip opts->next_ndp_index */
+-		}
+ 
+-		/* Delay the timer. */
+-		hrtimer_start(&ncm->task_timer, TX_TIMEOUT_NSECS,
+-			      HRTIMER_MODE_REL_SOFT);
++			/* Start the timer. */
++			hrtimer_start(&ncm->task_timer, TX_TIMEOUT_NSECS,
++				      HRTIMER_MODE_REL_SOFT);
++		}
+ 
+ 		/* Add the datagram position entries */
+ 		ntb_ndp = skb_put_zero(ncm->skb_tx_ndp, dgram_idx_len);
+diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
+index 61ce8e68f7a30..61bbf27b52037 100644
+--- a/drivers/usb/gadget/function/f_printer.c
++++ b/drivers/usb/gadget/function/f_printer.c
+@@ -1101,7 +1101,8 @@ autoconf_fail:
+ 	ss_ep_out_desc.bEndpointAddress = fs_ep_out_desc.bEndpointAddress;
+ 
+ 	ret = usb_assign_descriptors(f, fs_printer_function,
+-			hs_printer_function, ss_printer_function, NULL);
++			hs_printer_function, ss_printer_function,
++			ss_printer_function);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c
+index 0739b05a0ef7b..ee95e8f5f9d48 100644
+--- a/drivers/usb/gadget/function/f_rndis.c
++++ b/drivers/usb/gadget/function/f_rndis.c
+@@ -789,7 +789,7 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
+ 	ss_notify_desc.bEndpointAddress = fs_notify_desc.bEndpointAddress;
+ 
+ 	status = usb_assign_descriptors(f, eth_fs_function, eth_hs_function,
+-			eth_ss_function, NULL);
++			eth_ss_function, eth_ss_function);
+ 	if (status)
+ 		goto fail;
+ 
+diff --git a/drivers/usb/gadget/function/f_serial.c b/drivers/usb/gadget/function/f_serial.c
+index e627138463504..1ed8ff0ac2d31 100644
+--- a/drivers/usb/gadget/function/f_serial.c
++++ b/drivers/usb/gadget/function/f_serial.c
+@@ -233,7 +233,7 @@ static int gser_bind(struct usb_configuration *c, struct usb_function *f)
+ 	gser_ss_out_desc.bEndpointAddress = gser_fs_out_desc.bEndpointAddress;
+ 
+ 	status = usb_assign_descriptors(f, gser_fs_function, gser_hs_function,
+-			gser_ss_function, NULL);
++			gser_ss_function, gser_ss_function);
+ 	if (status)
+ 		goto fail;
+ 	dev_dbg(&cdev->gadget->dev, "generic ttyGS%d: %s speed IN/%s OUT/%s\n",
+diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c
+index 5a201ba7b155b..1abf08e5164af 100644
+--- a/drivers/usb/gadget/function/f_sourcesink.c
++++ b/drivers/usb/gadget/function/f_sourcesink.c
+@@ -431,7 +431,8 @@ no_iso:
+ 	ss_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress;
+ 
+ 	ret = usb_assign_descriptors(f, fs_source_sink_descs,
+-			hs_source_sink_descs, ss_source_sink_descs, NULL);
++			hs_source_sink_descs, ss_source_sink_descs,
++			ss_source_sink_descs);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/usb/gadget/function/f_subset.c b/drivers/usb/gadget/function/f_subset.c
+index 4d945254905d9..51c1cae162d9b 100644
+--- a/drivers/usb/gadget/function/f_subset.c
++++ b/drivers/usb/gadget/function/f_subset.c
+@@ -358,7 +358,7 @@ geth_bind(struct usb_configuration *c, struct usb_function *f)
+ 		fs_subset_out_desc.bEndpointAddress;
+ 
+ 	status = usb_assign_descriptors(f, fs_eth_function, hs_eth_function,
+-			ss_eth_function, NULL);
++			ss_eth_function, ss_eth_function);
+ 	if (status)
+ 		goto fail;
+ 
+diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
+index 410fa89eae8f6..5a2e9ce2bc352 100644
+--- a/drivers/usb/gadget/function/f_tcm.c
++++ b/drivers/usb/gadget/function/f_tcm.c
+@@ -2061,7 +2061,8 @@ static int tcm_bind(struct usb_configuration *c, struct usb_function *f)
+ 	uasp_fs_cmd_desc.bEndpointAddress = uasp_ss_cmd_desc.bEndpointAddress;
+ 
+ 	ret = usb_assign_descriptors(f, uasp_fs_function_desc,
+-			uasp_hs_function_desc, uasp_ss_function_desc, NULL);
++			uasp_hs_function_desc, uasp_ss_function_desc,
++			uasp_ss_function_desc);
+ 	if (ret)
+ 		goto ep_fail;
+ 
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 7bc18cf8042cc..18c2bbddf080b 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -59,6 +59,7 @@
+ #define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI		0x1138
+ #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI		0x461e
+ 
++#define PCI_DEVICE_ID_AMD_RENOIR_XHCI			0x1639
+ #define PCI_DEVICE_ID_AMD_PROMONTORYA_4			0x43b9
+ #define PCI_DEVICE_ID_AMD_PROMONTORYA_3			0x43ba
+ #define PCI_DEVICE_ID_AMD_PROMONTORYA_2			0x43bb
+@@ -182,6 +183,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 		(pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_1)))
+ 		xhci->quirks |= XHCI_U2_DISABLE_WAKE;
+ 
++	if (pdev->vendor == PCI_VENDOR_ID_AMD &&
++		pdev->device == PCI_DEVICE_ID_AMD_RENOIR_XHCI)
++		xhci->quirks |= XHCI_BROKEN_D3COLD;
++
+ 	if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
+ 		xhci->quirks |= XHCI_LPM_SUPPORT;
+ 		xhci->quirks |= XHCI_INTEL_HOST;
+@@ -539,7 +544,7 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
+ 	 * Systems with the TI redriver that loses port status change events
+ 	 * need to have the registers polled during D3, so avoid D3cold.
+ 	 */
+-	if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
++	if (xhci->quirks & (XHCI_COMP_MODE_QUIRK | XHCI_BROKEN_D3COLD))
+ 		pci_d3cold_disable(pdev);
+ 
+ 	if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index ca822ad3b65b0..4e171099d2cbd 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1892,6 +1892,7 @@ struct xhci_hcd {
+ #define XHCI_DISABLE_SPARSE	BIT_ULL(38)
+ #define XHCI_SG_TRB_CACHE_SIZE_QUIRK	BIT_ULL(39)
+ #define XHCI_NO_SOFT_RETRY	BIT_ULL(40)
++#define XHCI_BROKEN_D3COLD	BIT_ULL(41)
+ 
+ 	unsigned int		num_active_eps;
+ 	unsigned int		limit_active_eps;
+diff --git a/drivers/usb/misc/brcmstb-usb-pinmap.c b/drivers/usb/misc/brcmstb-usb-pinmap.c
+index b3cfe8666ea7d..336653091e3b3 100644
+--- a/drivers/usb/misc/brcmstb-usb-pinmap.c
++++ b/drivers/usb/misc/brcmstb-usb-pinmap.c
+@@ -263,6 +263,8 @@ static int __init brcmstb_usb_pinmap_probe(struct platform_device *pdev)
+ 		return -EINVAL;
+ 
+ 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++	if (!r)
++		return -EINVAL;
+ 
+ 	pdata = devm_kzalloc(&pdev->dev,
+ 			     sizeof(*pdata) +
+diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
+index 8f09a387b7738..4c8f0112481f3 100644
+--- a/drivers/usb/musb/musb_core.c
++++ b/drivers/usb/musb/musb_core.c
+@@ -2009,9 +2009,8 @@ static void musb_pm_runtime_check_session(struct musb *musb)
+ 			schedule_delayed_work(&musb->irq_work,
+ 					      msecs_to_jiffies(1000));
+ 			musb->quirk_retries--;
+-			break;
+ 		}
+-		fallthrough;
++		break;
+ 	case MUSB_QUIRK_B_INVALID_VBUS_91:
+ 		if (musb->quirk_retries && !musb->flush_irq_work) {
+ 			musb_dbg(musb,
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index a373cd63b3a44..f6e1ce0a47edb 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -252,9 +252,11 @@ struct cp210x_serial_private {
+ 	u8			gpio_input;
+ #endif
+ 	u8			partnum;
++	u32			fw_version;
+ 	speed_t			min_speed;
+ 	speed_t			max_speed;
+ 	bool			use_actual_rate;
++	bool			no_flow_control;
+ };
+ 
+ enum cp210x_event_state {
+@@ -398,6 +400,7 @@ struct cp210x_special_chars {
+ 
+ /* CP210X_VENDOR_SPECIFIC values */
+ #define CP210X_READ_2NCONFIG	0x000E
++#define CP210X_GET_FW_VER_2N	0x0010
+ #define CP210X_READ_LATCH	0x00C2
+ #define CP210X_GET_PARTNUM	0x370B
+ #define CP210X_GET_PORTCONFIG	0x370C
+@@ -537,6 +540,12 @@ struct cp210x_single_port_config {
+ #define CP210X_2NCONFIG_GPIO_RSTLATCH_IDX	587
+ #define CP210X_2NCONFIG_GPIO_CONTROL_IDX	600
+ 
++/* CP2102N QFN20 port configuration values */
++#define CP2102N_QFN20_GPIO2_TXLED_MODE		BIT(2)
++#define CP2102N_QFN20_GPIO3_RXLED_MODE		BIT(3)
++#define CP2102N_QFN20_GPIO1_RS485_MODE		BIT(4)
++#define CP2102N_QFN20_GPIO0_CLK_MODE		BIT(6)
++
+ /* CP210X_VENDOR_SPECIFIC, CP210X_WRITE_LATCH call writes these 0x2 bytes. */
+ struct cp210x_gpio_write {
+ 	u8	mask;
+@@ -1122,6 +1131,7 @@ static bool cp210x_termios_change(const struct ktermios *a, const struct ktermio
+ static void cp210x_set_flow_control(struct tty_struct *tty,
+ 		struct usb_serial_port *port, struct ktermios *old_termios)
+ {
++	struct cp210x_serial_private *priv = usb_get_serial_data(port->serial);
+ 	struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
+ 	struct cp210x_special_chars chars;
+ 	struct cp210x_flow_ctl flow_ctl;
+@@ -1129,6 +1139,15 @@ static void cp210x_set_flow_control(struct tty_struct *tty,
+ 	u32 ctl_hs;
+ 	int ret;
+ 
++	/*
++	 * Some CP2102N interpret ulXonLimit as ulFlowReplace (erratum
++	 * CP2102N_E104). Report back that flow control is not supported.
++	 */
++	if (priv->no_flow_control) {
++		tty->termios.c_cflag &= ~CRTSCTS;
++		tty->termios.c_iflag &= ~(IXON | IXOFF);
++	}
++
+ 	if (old_termios &&
+ 			C_CRTSCTS(tty) == (old_termios->c_cflag & CRTSCTS) &&
+ 			I_IXON(tty) == (old_termios->c_iflag & IXON) &&
+@@ -1185,19 +1204,20 @@ static void cp210x_set_flow_control(struct tty_struct *tty,
+ 		port_priv->crtscts = false;
+ 	}
+ 
+-	if (I_IXOFF(tty))
++	if (I_IXOFF(tty)) {
+ 		flow_repl |= CP210X_SERIAL_AUTO_RECEIVE;
+-	else
++
++		flow_ctl.ulXonLimit = cpu_to_le32(128);
++		flow_ctl.ulXoffLimit = cpu_to_le32(128);
++	} else {
+ 		flow_repl &= ~CP210X_SERIAL_AUTO_RECEIVE;
++	}
+ 
+ 	if (I_IXON(tty))
+ 		flow_repl |= CP210X_SERIAL_AUTO_TRANSMIT;
+ 	else
+ 		flow_repl &= ~CP210X_SERIAL_AUTO_TRANSMIT;
+ 
+-	flow_ctl.ulXonLimit = cpu_to_le32(128);
+-	flow_ctl.ulXoffLimit = cpu_to_le32(128);
+-
+ 	dev_dbg(&port->dev, "%s - ctrl = 0x%02x, flow = 0x%02x\n", __func__,
+ 			ctl_hs, flow_repl);
+ 
+@@ -1726,7 +1746,19 @@ static int cp2102n_gpioconf_init(struct usb_serial *serial)
+ 	priv->gpio_pushpull = (gpio_pushpull >> 3) & 0x0f;
+ 
+ 	/* 0 indicates GPIO mode, 1 is alternate function */
+-	priv->gpio_altfunc = (gpio_ctrl >> 2) & 0x0f;
++	if (priv->partnum == CP210X_PARTNUM_CP2102N_QFN20) {
++		/* QFN20 is special... */
++		if (gpio_ctrl & CP2102N_QFN20_GPIO0_CLK_MODE)   /* GPIO 0 */
++			priv->gpio_altfunc |= BIT(0);
++		if (gpio_ctrl & CP2102N_QFN20_GPIO1_RS485_MODE) /* GPIO 1 */
++			priv->gpio_altfunc |= BIT(1);
++		if (gpio_ctrl & CP2102N_QFN20_GPIO2_TXLED_MODE) /* GPIO 2 */
++			priv->gpio_altfunc |= BIT(2);
++		if (gpio_ctrl & CP2102N_QFN20_GPIO3_RXLED_MODE) /* GPIO 3 */
++			priv->gpio_altfunc |= BIT(3);
++	} else {
++		priv->gpio_altfunc = (gpio_ctrl >> 2) & 0x0f;
++	}
+ 
+ 	if (priv->partnum == CP210X_PARTNUM_CP2102N_QFN28) {
+ 		/*
+@@ -1901,6 +1933,45 @@ static void cp210x_init_max_speed(struct usb_serial *serial)
+ 	priv->use_actual_rate = use_actual_rate;
+ }
+ 
++static int cp210x_get_fw_version(struct usb_serial *serial, u16 value)
++{
++	struct cp210x_serial_private *priv = usb_get_serial_data(serial);
++	u8 ver[3];
++	int ret;
++
++	ret = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST, value,
++			ver, sizeof(ver));
++	if (ret)
++		return ret;
++
++	dev_dbg(&serial->interface->dev, "%s - %d.%d.%d\n", __func__,
++			ver[0], ver[1], ver[2]);
++
++	priv->fw_version = ver[0] << 16 | ver[1] << 8 | ver[2];
++
++	return 0;
++}
++
++static void cp210x_determine_quirks(struct usb_serial *serial)
++{
++	struct cp210x_serial_private *priv = usb_get_serial_data(serial);
++	int ret;
++
++	switch (priv->partnum) {
++	case CP210X_PARTNUM_CP2102N_QFN28:
++	case CP210X_PARTNUM_CP2102N_QFN24:
++	case CP210X_PARTNUM_CP2102N_QFN20:
++		ret = cp210x_get_fw_version(serial, CP210X_GET_FW_VER_2N);
++		if (ret)
++			break;
++		if (priv->fw_version <= 0x10004)
++			priv->no_flow_control = true;
++		break;
++	default:
++		break;
++	}
++}
++
+ static int cp210x_attach(struct usb_serial *serial)
+ {
+ 	int result;
+@@ -1921,6 +1992,7 @@ static int cp210x_attach(struct usb_serial *serial)
+ 
+ 	usb_set_serial_data(serial, priv);
+ 
++	cp210x_determine_quirks(serial);
+ 	cp210x_init_max_speed(serial);
+ 
+ 	result = cp210x_gpio_init(serial);
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 5a725ffb76756..41e81ab7f64a9 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -611,6 +611,7 @@ static const struct usb_device_id id_table_combined[] = {
+ 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLX_PLUS_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_NT_ORION_IO_PID) },
++	{ USB_DEVICE(FTDI_VID, FTDI_NT_ORIONMX_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) },
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index d854e04a4286e..add602bebd820 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -581,6 +581,7 @@
+ #define FTDI_NT_ORIONLXM_PID		0x7c90	/* OrionLXm Substation Automation Platform */
+ #define FTDI_NT_ORIONLX_PLUS_PID	0x7c91	/* OrionLX+ Substation Automation Platform */
+ #define FTDI_NT_ORION_IO_PID		0x7c92	/* Orion I/O */
++#define FTDI_NT_ORIONMX_PID		0x7c93	/* OrionMX */
+ 
+ /*
+  * Synapse Wireless product ids (FTDI_VID)
+diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
+index 83c62f920c501..c2ece584724e9 100644
+--- a/drivers/usb/serial/omninet.c
++++ b/drivers/usb/serial/omninet.c
+@@ -26,6 +26,7 @@
+ 
+ #define ZYXEL_VENDOR_ID		0x0586
+ #define ZYXEL_OMNINET_ID	0x1000
++#define ZYXEL_OMNI_56K_PLUS_ID	0x1500
+ /* This one seems to be a re-branded ZyXEL device */
+ #define BT_IGNITIONPRO_ID	0x2000
+ 
+@@ -40,6 +41,7 @@ static void omninet_port_remove(struct usb_serial_port *port);
+ 
+ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(ZYXEL_VENDOR_ID, ZYXEL_OMNINET_ID) },
++	{ USB_DEVICE(ZYXEL_VENDOR_ID, ZYXEL_OMNI_56K_PLUS_ID) },
+ 	{ USB_DEVICE(ZYXEL_VENDOR_ID, BT_IGNITIONPRO_ID) },
+ 	{ }						/* Terminating entry */
+ };
+diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
+index 599dcb2e374d0..527029ab23016 100644
+--- a/drivers/usb/serial/quatech2.c
++++ b/drivers/usb/serial/quatech2.c
+@@ -416,7 +416,7 @@ static void qt2_close(struct usb_serial_port *port)
+ 
+ 	/* flush the port transmit buffer */
+ 	i = usb_control_msg(serial->dev,
+-			    usb_rcvctrlpipe(serial->dev, 0),
++			    usb_sndctrlpipe(serial->dev, 0),
+ 			    QT2_FLUSH_DEVICE, 0x40, 1,
+ 			    port_priv->device_port, NULL, 0, QT2_USB_TIMEOUT);
+ 
+@@ -426,7 +426,7 @@ static void qt2_close(struct usb_serial_port *port)
+ 
+ 	/* flush the port receive buffer */
+ 	i = usb_control_msg(serial->dev,
+-			    usb_rcvctrlpipe(serial->dev, 0),
++			    usb_sndctrlpipe(serial->dev, 0),
+ 			    QT2_FLUSH_DEVICE, 0x40, 0,
+ 			    port_priv->device_port, NULL, 0, QT2_USB_TIMEOUT);
+ 
+@@ -654,7 +654,7 @@ static int qt2_attach(struct usb_serial *serial)
+ 	int status;
+ 
+ 	/* power on unit */
+-	status = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
++	status = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
+ 				 0xc2, 0x40, 0x8000, 0, NULL, 0,
+ 				 QT2_USB_TIMEOUT);
+ 	if (status < 0) {
+diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c
+index 42acdc8b684fe..b9035c3407b56 100644
+--- a/drivers/usb/typec/mux.c
++++ b/drivers/usb/typec/mux.c
+@@ -239,7 +239,7 @@ find_mux:
+ 	dev = class_find_device(&typec_mux_class, NULL, fwnode,
+ 				mux_fwnode_match);
+ 
+-	return dev ? to_typec_switch(dev) : ERR_PTR(-EPROBE_DEFER);
++	return dev ? to_typec_mux(dev) : ERR_PTR(-EPROBE_DEFER);
+ }
+ 
+ /**
+diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c
+index 46a25b8db72e5..31b1b3b555c77 100644
+--- a/drivers/usb/typec/mux/intel_pmc_mux.c
++++ b/drivers/usb/typec/mux/intel_pmc_mux.c
+@@ -586,6 +586,11 @@ static int pmc_usb_probe_iom(struct pmc_usb *pmc)
+ 		return -ENOMEM;
+ 	}
+ 
++	if (IS_ERR(pmc->iom_base)) {
++		put_device(&adev->dev);
++		return PTR_ERR(pmc->iom_base);
++	}
++
+ 	pmc->iom_adev = adev;
+ 
+ 	return 0;
+@@ -636,8 +641,10 @@ static int pmc_usb_probe(struct platform_device *pdev)
+ 			break;
+ 
+ 		ret = pmc_usb_register_port(pmc, i, fwnode);
+-		if (ret)
++		if (ret) {
++			fwnode_handle_put(fwnode);
+ 			goto err_remove_ports;
++		}
+ 	}
+ 
+ 	platform_set_drvdata(pdev, pmc);
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
+index b3e3dff464917..6133c0679c273 100644
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -1531,19 +1531,25 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
+ 			if (PD_VDO_VID(p[0]) != USB_SID_PD)
+ 				break;
+ 
+-			if (PD_VDO_SVDM_VER(p[0]) < svdm_version)
++			if (PD_VDO_SVDM_VER(p[0]) < svdm_version) {
+ 				typec_partner_set_svdm_version(port->partner,
+ 							       PD_VDO_SVDM_VER(p[0]));
++				svdm_version = PD_VDO_SVDM_VER(p[0]);
++			}
+ 
+-			tcpm_ams_start(port, DISCOVER_IDENTITY);
+-			/* 6.4.4.3.1: Only respond as UFP (device) */
+-			if (port->data_role == TYPEC_DEVICE &&
++			port->ams = DISCOVER_IDENTITY;
++			/*
++			 * PD2.0 Spec 6.10.3: respond with NAK as DFP (data host)
++			 * PD3.1 Spec 6.4.4.2.5.1: respond with NAK if "invalid field" or
++			 * "wrong configuation" or "Unrecognized"
++			 */
++			if ((port->data_role == TYPEC_DEVICE || svdm_version >= SVDM_VER_2_0) &&
+ 			    port->nr_snk_vdo) {
+ 				/*
+ 				 * Product Type DFP and Connector Type are not defined in SVDM
+ 				 * version 1.0 and shall be set to zero.
+ 				 */
+-				if (typec_get_negotiated_svdm_version(typec) < SVDM_VER_2_0)
++				if (svdm_version < SVDM_VER_2_0)
+ 					response[1] = port->snk_vdo[0] & ~IDH_DFP_MASK
+ 						      & ~IDH_CONN_MASK;
+ 				else
+@@ -1554,19 +1560,18 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
+ 			}
+ 			break;
+ 		case CMD_DISCOVER_SVID:
+-			tcpm_ams_start(port, DISCOVER_SVIDS);
++			port->ams = DISCOVER_SVIDS;
+ 			break;
+ 		case CMD_DISCOVER_MODES:
+-			tcpm_ams_start(port, DISCOVER_MODES);
++			port->ams = DISCOVER_MODES;
+ 			break;
+ 		case CMD_ENTER_MODE:
+-			tcpm_ams_start(port, DFP_TO_UFP_ENTER_MODE);
++			port->ams = DFP_TO_UFP_ENTER_MODE;
+ 			break;
+ 		case CMD_EXIT_MODE:
+-			tcpm_ams_start(port, DFP_TO_UFP_EXIT_MODE);
++			port->ams = DFP_TO_UFP_EXIT_MODE;
+ 			break;
+ 		case CMD_ATTENTION:
+-			tcpm_ams_start(port, ATTENTION);
+ 			/* Attention command does not have response */
+ 			*adev_action = ADEV_ATTENTION;
+ 			return 0;
+@@ -1917,6 +1922,9 @@ static void vdm_run_state_machine(struct tcpm_port *port)
+ 			tcpm_log(port, "VDM Tx error, retry");
+ 			port->vdm_retries++;
+ 			port->vdm_state = VDM_STATE_READY;
++			if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMDT(vdo_hdr) == CMDT_INIT)
++				tcpm_ams_finish(port);
++		} else {
+ 			tcpm_ams_finish(port);
+ 		}
+ 		break;
+@@ -2163,20 +2171,25 @@ static void tcpm_handle_alert(struct tcpm_port *port, const __le32 *payload,
+ 
+ 	if (!type) {
+ 		tcpm_log(port, "Alert message received with no type");
++		tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
+ 		return;
+ 	}
+ 
+ 	/* Just handling non-battery alerts for now */
+ 	if (!(type & USB_PD_ADO_TYPE_BATT_STATUS_CHANGE)) {
+-		switch (port->state) {
+-		case SRC_READY:
+-		case SNK_READY:
++		if (port->pwr_role == TYPEC_SOURCE) {
++			port->upcoming_state = GET_STATUS_SEND;
++			tcpm_ams_start(port, GETTING_SOURCE_SINK_STATUS);
++		} else {
++			/*
++			 * Do not check SinkTxOk here in case the Source doesn't set its Rp to
++			 * SinkTxOk in time.
++			 */
++			port->ams = GETTING_SOURCE_SINK_STATUS;
+ 			tcpm_set_state(port, GET_STATUS_SEND, 0);
+-			break;
+-		default:
+-			tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
+-			break;
+ 		}
++	} else {
++		tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
+ 	}
+ }
+ 
+@@ -2420,7 +2433,12 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
+ 		tcpm_pd_handle_state(port, BIST_RX, BIST, 0);
+ 		break;
+ 	case PD_DATA_ALERT:
+-		tcpm_handle_alert(port, msg->payload, cnt);
++		if (port->state != SRC_READY && port->state != SNK_READY)
++			tcpm_pd_handle_state(port, port->pwr_role == TYPEC_SOURCE ?
++					     SRC_SOFT_RESET_WAIT_SNK_TX : SNK_SOFT_RESET,
++					     NONE_AMS, 0);
++		else
++			tcpm_handle_alert(port, msg->payload, cnt);
+ 		break;
+ 	case PD_DATA_BATT_STATUS:
+ 	case PD_DATA_GET_COUNTRY_INFO:
+@@ -2744,24 +2762,16 @@ static void tcpm_pd_ext_msg_request(struct tcpm_port *port,
+ 
+ 	switch (type) {
+ 	case PD_EXT_STATUS:
+-		/*
+-		 * If PPS related events raised then get PPS status to clear
+-		 * (see USB PD 3.0 Spec, 6.5.2.4)
+-		 */
+-		if (msg->ext_msg.data[USB_PD_EXT_SDB_EVENT_FLAGS] &
+-		    USB_PD_EXT_SDB_PPS_EVENTS)
+-			tcpm_pd_handle_state(port, GET_PPS_STATUS_SEND,
+-					     GETTING_SOURCE_SINK_STATUS, 0);
+-
+-		else
+-			tcpm_pd_handle_state(port, ready_state(port), NONE_AMS, 0);
+-		break;
+ 	case PD_EXT_PPS_STATUS:
+-		/*
+-		 * For now the PPS status message is used to clear events
+-		 * and nothing more.
+-		 */
+-		tcpm_pd_handle_state(port, ready_state(port), NONE_AMS, 0);
++		if (port->ams == GETTING_SOURCE_SINK_STATUS) {
++			tcpm_ams_finish(port);
++			tcpm_set_state(port, ready_state(port), 0);
++		} else {
++			/* unexpected Status or PPS_Status Message */
++			tcpm_pd_handle_state(port, port->pwr_role == TYPEC_SOURCE ?
++					     SRC_SOFT_RESET_WAIT_SNK_TX : SNK_SOFT_RESET,
++					     NONE_AMS, 0);
++		}
+ 		break;
+ 	case PD_EXT_SOURCE_CAP_EXT:
+ 	case PD_EXT_GET_BATT_CAP:
+@@ -6226,6 +6236,10 @@ void tcpm_unregister_port(struct tcpm_port *port)
+ {
+ 	int i;
+ 
++	hrtimer_cancel(&port->enable_frs_timer);
++	hrtimer_cancel(&port->vdm_state_machine_timer);
++	hrtimer_cancel(&port->state_machine_timer);
++
+ 	tcpm_reset_port(port);
+ 	for (i = 0; i < ARRAY_SIZE(port->port_altmode); i++)
+ 		typec_unregister_altmode(port->port_altmode[i]);
+diff --git a/drivers/usb/typec/tcpm/wcove.c b/drivers/usb/typec/tcpm/wcove.c
+index 79ae63950050c..5d125339687a3 100644
+--- a/drivers/usb/typec/tcpm/wcove.c
++++ b/drivers/usb/typec/tcpm/wcove.c
+@@ -378,7 +378,7 @@ static int wcove_pd_transmit(struct tcpc_dev *tcpc,
+ 		const u8 *data = (void *)msg;
+ 		int i;
+ 
+-		for (i = 0; i < pd_header_cnt(msg->header) * 4 + 2; i++) {
++		for (i = 0; i < pd_header_cnt_le(msg->header) * 4 + 2; i++) {
+ 			ret = regmap_write(wcove->regmap, USBC_TX_DATA + i,
+ 					   data[i]);
+ 			if (ret)
+diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
+index 7104ddb9696d6..0c4a0f1ac0446 100644
+--- a/drivers/usb/typec/ucsi/ucsi.c
++++ b/drivers/usb/typec/ucsi/ucsi.c
+@@ -1253,6 +1253,7 @@ err_unregister:
+ 	}
+ 
+ err_reset:
++	memset(&ucsi->cap, 0, sizeof(ucsi->cap));
+ 	ucsi_reset_ppm(ucsi);
+ err:
+ 	return ret;
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 289f1f09481d7..da6bf9a70fc68 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -2558,6 +2558,24 @@ static int validate_super(struct btrfs_fs_info *fs_info,
+ 		ret = -EINVAL;
+ 	}
+ 
++	if (memcmp(fs_info->fs_devices->fsid, fs_info->super_copy->fsid,
++		   BTRFS_FSID_SIZE)) {
++		btrfs_err(fs_info,
++		"superblock fsid doesn't match fsid of fs_devices: %pU != %pU",
++			fs_info->super_copy->fsid, fs_info->fs_devices->fsid);
++		ret = -EINVAL;
++	}
++
++	if (btrfs_fs_incompat(fs_info, METADATA_UUID) &&
++	    memcmp(fs_info->fs_devices->metadata_uuid,
++		   fs_info->super_copy->metadata_uuid, BTRFS_FSID_SIZE)) {
++		btrfs_err(fs_info,
++"superblock metadata_uuid doesn't match metadata uuid of fs_devices: %pU != %pU",
++			fs_info->super_copy->metadata_uuid,
++			fs_info->fs_devices->metadata_uuid);
++		ret = -EINVAL;
++	}
++
+ 	if (memcmp(fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid,
+ 		   BTRFS_FSID_SIZE) != 0) {
+ 		btrfs_err(fs_info,
+@@ -3185,14 +3203,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
+ 
+ 	disk_super = fs_info->super_copy;
+ 
+-	ASSERT(!memcmp(fs_info->fs_devices->fsid, fs_info->super_copy->fsid,
+-		       BTRFS_FSID_SIZE));
+-
+-	if (btrfs_fs_incompat(fs_info, METADATA_UUID)) {
+-		ASSERT(!memcmp(fs_info->fs_devices->metadata_uuid,
+-				fs_info->super_copy->metadata_uuid,
+-				BTRFS_FSID_SIZE));
+-	}
+ 
+ 	features = btrfs_super_flags(disk_super);
+ 	if (features & BTRFS_SUPER_FLAG_CHANGING_FSID_V2) {
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index abee4b62741da..f21d98c738785 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -1094,7 +1094,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
+ 	int del_nr = 0;
+ 	int del_slot = 0;
+ 	int recow;
+-	int ret;
++	int ret = 0;
+ 	u64 ino = btrfs_ino(inode);
+ 
+ 	path = btrfs_alloc_path();
+@@ -1315,7 +1315,7 @@ again:
+ 	}
+ out:
+ 	btrfs_free_path(path);
+-	return 0;
++	return ret;
+ }
+ 
+ /*
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index faae6ebd8a279..276b5511ff809 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -3302,6 +3302,22 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
+ 	 *    begins and releases it only after writing its superblock.
+ 	 */
+ 	mutex_lock(&fs_info->tree_log_mutex);
++
++	/*
++	 * The previous transaction writeout phase could have failed, and thus
++	 * marked the fs in an error state.  We must not commit here, as we
++	 * could have updated our generation in the super_for_commit and
++	 * writing the super here would result in transid mismatches.  If there
++	 * is an error here just bail.
++	 */
++	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
++		ret = -EIO;
++		btrfs_set_log_full_commit(trans);
++		btrfs_abort_transaction(trans, ret);
++		mutex_unlock(&fs_info->tree_log_mutex);
++		goto out_wake_log_root;
++	}
++
+ 	btrfs_set_super_log_root(fs_info->super_for_commit, log_root_start);
+ 	btrfs_set_super_log_root_level(fs_info->super_for_commit, log_root_level);
+ 	ret = write_all_supers(fs_info, 1);
+diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
+index 304ce64c70a44..cfff29718b840 100644
+--- a/fs/btrfs/zoned.c
++++ b/fs/btrfs/zoned.c
+@@ -150,6 +150,18 @@ static inline u32 sb_zone_number(int shift, int mirror)
+ 	return (u32)zone;
+ }
+ 
++static inline sector_t zone_start_sector(u32 zone_number,
++					 struct block_device *bdev)
++{
++	return (sector_t)zone_number << ilog2(bdev_zone_sectors(bdev));
++}
++
++static inline u64 zone_start_physical(u32 zone_number,
++				      struct btrfs_zoned_device_info *zone_info)
++{
++	return (u64)zone_number << zone_info->zone_size_shift;
++}
++
+ /*
+  * Emulate blkdev_report_zones() for a non-zoned device. It slices up the block
+  * device into static sized chunks and fake a conventional zone on each of
+@@ -405,8 +417,8 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device)
+ 		if (sb_zone + 1 >= zone_info->nr_zones)
+ 			continue;
+ 
+-		sector = sb_zone << (zone_info->zone_size_shift - SECTOR_SHIFT);
+-		ret = btrfs_get_dev_zones(device, sector << SECTOR_SHIFT,
++		ret = btrfs_get_dev_zones(device,
++					  zone_start_physical(sb_zone, zone_info),
+ 					  &zone_info->sb_zones[sb_pos],
+ 					  &nr_zones);
+ 		if (ret)
+@@ -721,7 +733,7 @@ int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw,
+ 	if (sb_zone + 1 >= nr_zones)
+ 		return -ENOENT;
+ 
+-	ret = blkdev_report_zones(bdev, sb_zone << zone_sectors_shift,
++	ret = blkdev_report_zones(bdev, zone_start_sector(sb_zone, bdev),
+ 				  BTRFS_NR_SB_LOG_ZONES, copy_zone_info_cb,
+ 				  zones);
+ 	if (ret < 0)
+@@ -826,7 +838,7 @@ int btrfs_reset_sb_log_zones(struct block_device *bdev, int mirror)
+ 		return -ENOENT;
+ 
+ 	return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
+-				sb_zone << zone_sectors_shift,
++				zone_start_sector(sb_zone, bdev),
+ 				zone_sectors * BTRFS_NR_SB_LOG_ZONES, GFP_NOFS);
+ }
+ 
+@@ -878,7 +890,8 @@ u64 btrfs_find_allocatable_zones(struct btrfs_device *device, u64 hole_start,
+ 			if (!(end <= sb_zone ||
+ 			      sb_zone + BTRFS_NR_SB_LOG_ZONES <= begin)) {
+ 				have_sb = true;
+-				pos = ((u64)sb_zone + BTRFS_NR_SB_LOG_ZONES) << shift;
++				pos = zone_start_physical(
++					sb_zone + BTRFS_NR_SB_LOG_ZONES, zinfo);
+ 				break;
+ 			}
+ 
+diff --git a/fs/coredump.c b/fs/coredump.c
+index 1c0fdc1aa70b4..f559dc5b326ae 100644
+--- a/fs/coredump.c
++++ b/fs/coredump.c
+@@ -519,7 +519,7 @@ static bool dump_interrupted(void)
+ 	 * but then we need to teach dump_write() to restart and clear
+ 	 * TIF_SIGPENDING.
+ 	 */
+-	return signal_pending(current);
++	return fatal_signal_pending(current) || freezing(current);
+ }
+ 
+ static void wait_for_dump_helpers(struct file *file)
+diff --git a/fs/nfs/client.c b/fs/nfs/client.c
+index ff5c4d0d6d131..686b211342a87 100644
+--- a/fs/nfs/client.c
++++ b/fs/nfs/client.c
+@@ -406,7 +406,7 @@ struct nfs_client *nfs_get_client(const struct nfs_client_initdata *cl_init)
+ 
+ 	if (cl_init->hostname == NULL) {
+ 		WARN_ON(1);
+-		return NULL;
++		return ERR_PTR(-EINVAL);
+ 	}
+ 
+ 	/* see if the client already exists */
+diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
+index 065cb04222a1b..543d916f79abb 100644
+--- a/fs/nfs/nfs4_fs.h
++++ b/fs/nfs/nfs4_fs.h
+@@ -205,6 +205,7 @@ struct nfs4_exception {
+ 	struct inode *inode;
+ 	nfs4_stateid *stateid;
+ 	long timeout;
++	unsigned char task_is_privileged : 1;
+ 	unsigned char delay : 1,
+ 		      recovering : 1,
+ 		      retry : 1;
+diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
+index 889a9f4c0310d..42719384e25fe 100644
+--- a/fs/nfs/nfs4client.c
++++ b/fs/nfs/nfs4client.c
+@@ -435,8 +435,8 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp,
+ 		 */
+ 		nfs_mark_client_ready(clp, -EPERM);
+ 	}
+-	nfs_put_client(clp);
+ 	clear_bit(NFS_CS_TSM_POSSIBLE, &clp->cl_flags);
++	nfs_put_client(clp);
+ 	return old;
+ 
+ error:
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 0b809cc6ad1df..64153ef18dd08 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -590,6 +590,8 @@ int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_
+ 		goto out_retry;
+ 	}
+ 	if (exception->recovering) {
++		if (exception->task_is_privileged)
++			return -EDEADLOCK;
+ 		ret = nfs4_wait_clnt_recover(clp);
+ 		if (test_bit(NFS_MIG_FAILED, &server->mig_status))
+ 			return -EIO;
+@@ -615,6 +617,8 @@ nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server,
+ 		goto out_retry;
+ 	}
+ 	if (exception->recovering) {
++		if (exception->task_is_privileged)
++			return -EDEADLOCK;
+ 		rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
+ 		if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
+ 			rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
+@@ -5940,6 +5944,14 @@ static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen
+ 	do {
+ 		err = __nfs4_proc_set_acl(inode, buf, buflen);
+ 		trace_nfs4_set_acl(inode, err);
++		if (err == -NFS4ERR_BADOWNER || err == -NFS4ERR_BADNAME) {
++			/*
++			 * no need to retry since the kernel
++			 * isn't involved in encoding the ACEs.
++			 */
++			err = -EINVAL;
++			break;
++		}
+ 		err = nfs4_handle_exception(NFS_SERVER(inode), err,
+ 				&exception);
+ 	} while (exception.retry);
+@@ -6381,6 +6393,7 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
+ 	struct nfs4_exception exception = {
+ 		.inode = data->inode,
+ 		.stateid = &data->stateid,
++		.task_is_privileged = data->args.seq_args.sa_privileged,
+ 	};
+ 
+ 	if (!nfs4_sequence_done(task, &data->res.seq_res))
+@@ -6504,7 +6517,6 @@ static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred,
+ 	data = kzalloc(sizeof(*data), GFP_NOFS);
+ 	if (data == NULL)
+ 		return -ENOMEM;
+-	nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0);
+ 
+ 	nfs4_state_protect(server->nfs_client,
+ 			NFS_SP4_MACH_CRED_CLEANUP,
+@@ -6535,6 +6547,12 @@ static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred,
+ 		}
+ 	}
+ 
++	if (!data->inode)
++		nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1,
++				   1);
++	else
++		nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1,
++				   0);
+ 	task_setup_data.callback_data = data;
+ 	msg.rpc_argp = &data->args;
+ 	msg.rpc_resp = &data->res;
+@@ -9620,15 +9638,20 @@ int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync)
+ 			&task_setup_data.rpc_client, &msg);
+ 
+ 	dprintk("--> %s\n", __func__);
++	lrp->inode = nfs_igrab_and_active(lrp->args.inode);
+ 	if (!sync) {
+-		lrp->inode = nfs_igrab_and_active(lrp->args.inode);
+ 		if (!lrp->inode) {
+ 			nfs4_layoutreturn_release(lrp);
+ 			return -EAGAIN;
+ 		}
+ 		task_setup_data.flags |= RPC_TASK_ASYNC;
+ 	}
+-	nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1, 0);
++	if (!lrp->inode)
++		nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1,
++				   1);
++	else
++		nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1,
++				   0);
+ 	task = rpc_run_task(&task_setup_data);
+ 	if (IS_ERR(task))
+ 		return PTR_ERR(task);
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index 58bbf334265b7..9cbd915025ad7 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -2674,6 +2674,13 @@ out:
+ }
+ 
+ #ifdef CONFIG_SECURITY
++static int proc_pid_attr_open(struct inode *inode, struct file *file)
++{
++	file->private_data = NULL;
++	__mem_open(inode, file, PTRACE_MODE_READ_FSCREDS);
++	return 0;
++}
++
+ static ssize_t proc_pid_attr_read(struct file * file, char __user * buf,
+ 				  size_t count, loff_t *ppos)
+ {
+@@ -2704,7 +2711,7 @@ static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf,
+ 	int rv;
+ 
+ 	/* A task may only write when it was the opener. */
+-	if (file->f_cred != current_real_cred())
++	if (file->private_data != current->mm)
+ 		return -EPERM;
+ 
+ 	rcu_read_lock();
+@@ -2754,9 +2761,11 @@ out:
+ }
+ 
+ static const struct file_operations proc_pid_attr_operations = {
++	.open		= proc_pid_attr_open,
+ 	.read		= proc_pid_attr_read,
+ 	.write		= proc_pid_attr_write,
+ 	.llseek		= generic_file_llseek,
++	.release	= mem_release,
+ };
+ 
+ #define LSM_DIR_OPS(LSM) \
+diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
+index 0331d5d49551a..3c751cdd8594c 100644
+--- a/include/asm-generic/vmlinux.lds.h
++++ b/include/asm-generic/vmlinux.lds.h
+@@ -943,6 +943,7 @@
+ #ifdef CONFIG_AMD_MEM_ENCRYPT
+ #define PERCPU_DECRYPTED_SECTION					\
+ 	. = ALIGN(PAGE_SIZE);						\
++	*(.data..decrypted)						\
+ 	*(.data..percpu..decrypted)					\
+ 	. = ALIGN(PAGE_SIZE);
+ #else
+diff --git a/include/dt-bindings/usb/pd.h b/include/dt-bindings/usb/pd.h
+index fef3ef65967fa..cb70b4ceeddef 100644
+--- a/include/dt-bindings/usb/pd.h
++++ b/include/dt-bindings/usb/pd.h
+@@ -163,10 +163,10 @@
+ #define UFP_VDO_VER1_2		2
+ 
+ /* Device Capability */
+-#define DEV_USB2_CAPABLE	BIT(0)
+-#define DEV_USB2_BILLBOARD	BIT(1)
+-#define DEV_USB3_CAPABLE	BIT(2)
+-#define DEV_USB4_CAPABLE	BIT(3)
++#define DEV_USB2_CAPABLE	(1 << 0)
++#define DEV_USB2_BILLBOARD	(1 << 1)
++#define DEV_USB3_CAPABLE	(1 << 2)
++#define DEV_USB4_CAPABLE	(1 << 3)
+ 
+ /* Connector Type */
+ #define UFP_RECEPTACLE		2
+@@ -191,9 +191,9 @@
+ 
+ /* Alternate Modes */
+ #define UFP_ALTMODE_NOT_SUPP	0
+-#define UFP_ALTMODE_TBT3	BIT(0)
+-#define UFP_ALTMODE_RECFG	BIT(1)
+-#define UFP_ALTMODE_NO_RECFG	BIT(2)
++#define UFP_ALTMODE_TBT3	(1 << 0)
++#define UFP_ALTMODE_RECFG	(1 << 1)
++#define UFP_ALTMODE_NO_RECFG	(1 << 2)
+ 
+ /* USB Highest Speed */
+ #define UFP_USB2_ONLY		0
+@@ -217,9 +217,9 @@
+  * <4:0>   :: Port number
+  */
+ #define DFP_VDO_VER1_1		1
+-#define HOST_USB2_CAPABLE	BIT(0)
+-#define HOST_USB3_CAPABLE	BIT(1)
+-#define HOST_USB4_CAPABLE	BIT(2)
++#define HOST_USB2_CAPABLE	(1 << 0)
++#define HOST_USB3_CAPABLE	(1 << 1)
++#define HOST_USB4_CAPABLE	(1 << 2)
+ #define DFP_RECEPTACLE		2
+ #define DFP_CAPTIVE		3
+ 
+diff --git a/include/linux/entry-kvm.h b/include/linux/entry-kvm.h
+index 8b2b1d68b9545..136b8d97d8c01 100644
+--- a/include/linux/entry-kvm.h
++++ b/include/linux/entry-kvm.h
+@@ -3,6 +3,7 @@
+ #define __LINUX_ENTRYKVM_H
+ 
+ #include <linux/entry-common.h>
++#include <linux/tick.h>
+ 
+ /* Transfer to guest mode work */
+ #ifdef CONFIG_KVM_XFER_TO_GUEST_WORK
+@@ -57,7 +58,7 @@ int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu);
+ static inline void xfer_to_guest_mode_prepare(void)
+ {
+ 	lockdep_assert_irqs_disabled();
+-	rcu_nocb_flush_deferred_wakeup();
++	tick_nohz_user_enter_prepare();
+ }
+ 
+ /**
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
+index 99dccea4293c6..6e884bf58c0a9 100644
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -1118,7 +1118,15 @@ __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
+ static inline unsigned long
+ __gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
+ {
+-	return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
++	/*
++	 * The index was checked originally in search_memslots.  To avoid
++	 * that a malicious guest builds a Spectre gadget out of e.g. page
++	 * table walks, do not let the processor speculate loads outside
++	 * the guest's registered memslots.
++	 */
++	unsigned long offset = gfn - slot->base_gfn;
++	offset = array_index_nospec(offset, slot->npages);
++	return slot->userspace_addr + offset * PAGE_SIZE;
+ }
+ 
+ static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
+diff --git a/include/linux/mfd/rohm-bd71828.h b/include/linux/mfd/rohm-bd71828.h
+index 017a4c01cb315..61f0974c33d72 100644
+--- a/include/linux/mfd/rohm-bd71828.h
++++ b/include/linux/mfd/rohm-bd71828.h
+@@ -26,11 +26,11 @@ enum {
+ 	BD71828_REGULATOR_AMOUNT,
+ };
+ 
+-#define BD71828_BUCK1267_VOLTS		0xEF
+-#define BD71828_BUCK3_VOLTS		0x10
+-#define BD71828_BUCK4_VOLTS		0x20
+-#define BD71828_BUCK5_VOLTS		0x10
+-#define BD71828_LDO_VOLTS		0x32
++#define BD71828_BUCK1267_VOLTS		0x100
++#define BD71828_BUCK3_VOLTS		0x20
++#define BD71828_BUCK4_VOLTS		0x40
++#define BD71828_BUCK5_VOLTS		0x20
++#define BD71828_LDO_VOLTS		0x40
+ /* LDO6 is fixed 1.8V voltage */
+ #define BD71828_LDO_6_VOLTAGE		1800000
+ 
+diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
+index 236a7d04f891e..30bb59fe970cb 100644
+--- a/include/linux/mlx4/device.h
++++ b/include/linux/mlx4/device.h
+@@ -630,6 +630,7 @@ struct mlx4_caps {
+ 	bool			wol_port[MLX4_MAX_PORTS + 1];
+ 	struct mlx4_rate_limit_caps rl_caps;
+ 	u32			health_buffer_addrs;
++	bool			map_clock_to_user;
+ };
+ 
+ struct mlx4_buf_list {
+diff --git a/include/linux/rtsx_pci.h b/include/linux/rtsx_pci.h
+index 6f155f99aa160..4ab7bfc675f11 100644
+--- a/include/linux/rtsx_pci.h
++++ b/include/linux/rtsx_pci.h
+@@ -1109,6 +1109,7 @@ struct pcr_ops {
+ };
+ 
+ enum PDEV_STAT  {PDEV_STAT_IDLE, PDEV_STAT_RUN};
++enum ASPM_MODE  {ASPM_MODE_CFG, ASPM_MODE_REG};
+ 
+ #define ASPM_L1_1_EN			BIT(0)
+ #define ASPM_L1_2_EN			BIT(1)
+@@ -1234,6 +1235,7 @@ struct rtsx_pcr {
+ 	u8				card_drive_sel;
+ #define ASPM_L1_EN			0x02
+ 	u8				aspm_en;
++	enum ASPM_MODE			aspm_mode;
+ 	bool				aspm_enabled;
+ 
+ #define PCR_MS_PMOS			(1 << 0)
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index ef00bb22164cd..edc01bcefbfd8 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -350,11 +350,19 @@ struct load_weight {
+  * Only for tasks we track a moving average of the past instantaneous
+  * estimated utilization. This allows to absorb sporadic drops in utilization
+  * of an otherwise almost periodic task.
++ *
++ * The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg
++ * updates. When a task is dequeued, its util_est should not be updated if its
++ * util_avg has not been updated in the meantime.
++ * This information is mapped into the MSB bit of util_est.enqueued at dequeue
++ * time. Since max value of util_est.enqueued for a task is 1024 (PELT util_avg
++ * for a task) it is safe to use MSB.
+  */
+ struct util_est {
+ 	unsigned int			enqueued;
+ 	unsigned int			ewma;
+ #define UTIL_EST_WEIGHT_SHIFT		2
++#define UTIL_AVG_UNCHANGED		0x80000000
+ } __attribute__((__aligned__(sizeof(u64))));
+ 
+ /*
+diff --git a/include/linux/tick.h b/include/linux/tick.h
+index 7340613c7eff7..1a0ff88fa107b 100644
+--- a/include/linux/tick.h
++++ b/include/linux/tick.h
+@@ -11,6 +11,7 @@
+ #include <linux/context_tracking_state.h>
+ #include <linux/cpumask.h>
+ #include <linux/sched.h>
++#include <linux/rcupdate.h>
+ 
+ #ifdef CONFIG_GENERIC_CLOCKEVENTS
+ extern void __init tick_init(void);
+@@ -300,4 +301,10 @@ static inline void tick_nohz_task_switch(void)
+ 		__tick_nohz_task_switch();
+ }
+ 
++static inline void tick_nohz_user_enter_prepare(void)
++{
++	if (tick_nohz_full_cpu(smp_processor_id()))
++		rcu_nocb_flush_deferred_wakeup();
++}
++
+ #endif
+diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h
+index bf00259493e07..96b7ff66f074b 100644
+--- a/include/linux/usb/pd.h
++++ b/include/linux/usb/pd.h
+@@ -460,7 +460,7 @@ static inline unsigned int rdo_max_power(u32 rdo)
+ #define PD_T_RECEIVER_RESPONSE	15	/* 15ms max */
+ #define PD_T_SOURCE_ACTIVITY	45
+ #define PD_T_SINK_ACTIVITY	135
+-#define PD_T_SINK_WAIT_CAP	240
++#define PD_T_SINK_WAIT_CAP	310	/* 310 - 620 ms */
+ #define PD_T_PS_TRANSITION	500
+ #define PD_T_SRC_TRANSITION	35
+ #define PD_T_DRP_SNK		40
+diff --git a/include/linux/usb/pd_ext_sdb.h b/include/linux/usb/pd_ext_sdb.h
+index 0eb83ce195970..b517ebc8f0ff2 100644
+--- a/include/linux/usb/pd_ext_sdb.h
++++ b/include/linux/usb/pd_ext_sdb.h
+@@ -24,8 +24,4 @@ enum usb_pd_ext_sdb_fields {
+ #define USB_PD_EXT_SDB_EVENT_OVP		BIT(3)
+ #define USB_PD_EXT_SDB_EVENT_CF_CV_MODE		BIT(4)
+ 
+-#define USB_PD_EXT_SDB_PPS_EVENTS	(USB_PD_EXT_SDB_EVENT_OCP |	\
+-					 USB_PD_EXT_SDB_EVENT_OTP |	\
+-					 USB_PD_EXT_SDB_EVENT_OVP)
+-
+ #endif /* __LINUX_USB_PD_EXT_SDB_H */
+diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
+index b1a76fe046cba..6bd003568fa5c 100644
+--- a/kernel/bpf/btf.c
++++ b/kernel/bpf/btf.c
+@@ -5126,6 +5126,12 @@ int btf_distill_func_proto(struct bpf_verifier_log *log,
+ 	m->ret_size = ret;
+ 
+ 	for (i = 0; i < nargs; i++) {
++		if (i == nargs - 1 && args[i].type == 0) {
++			bpf_log(log,
++				"The function %s with variable args is unsupported.\n",
++				tname);
++			return -EINVAL;
++		}
+ 		ret = __get_type_size(btf, args[i].type, &t);
+ 		if (ret < 0) {
+ 			bpf_log(log,
+@@ -5133,6 +5139,12 @@ int btf_distill_func_proto(struct bpf_verifier_log *log,
+ 				tname, i, btf_kind_str[BTF_INFO_KIND(t->info)]);
+ 			return -EINVAL;
+ 		}
++		if (ret == 0) {
++			bpf_log(log,
++				"The function %s has malformed void argument.\n",
++				tname);
++			return -EINVAL;
++		}
+ 		m->arg_size[i] = ret;
+ 	}
+ 	m->nr_args = nargs;
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index b186d852fe3df..9e600767803b5 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -12556,6 +12556,17 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
+ 	return 0;
+ }
+ 
++BTF_SET_START(btf_id_deny)
++BTF_ID_UNUSED
++#ifdef CONFIG_SMP
++BTF_ID(func, migrate_disable)
++BTF_ID(func, migrate_enable)
++#endif
++#if !defined CONFIG_PREEMPT_RCU && !defined CONFIG_TINY_RCU
++BTF_ID(func, rcu_read_unlock_strict)
++#endif
++BTF_SET_END(btf_id_deny)
++
+ static int check_attach_btf_id(struct bpf_verifier_env *env)
+ {
+ 	struct bpf_prog *prog = env->prog;
+@@ -12615,6 +12626,9 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
+ 		ret = bpf_lsm_verify_prog(&env->log, prog);
+ 		if (ret < 0)
+ 			return ret;
++	} else if (prog->type == BPF_PROG_TYPE_TRACING &&
++		   btf_id_set_contains(&btf_id_deny, btf_id)) {
++		return -EINVAL;
+ 	}
+ 
+ 	key = bpf_trampoline_compute_key(tgt_prog, prog->aux->attach_btf, btf_id);
+diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
+index a5751784ad740..f6dddb3a8f4a2 100644
+--- a/kernel/cgroup/cgroup-v1.c
++++ b/kernel/cgroup/cgroup-v1.c
+@@ -820,6 +820,10 @@ static int cgroup1_rename(struct kernfs_node *kn, struct kernfs_node *new_parent
+ 	struct cgroup *cgrp = kn->priv;
+ 	int ret;
+ 
++	/* do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable */
++	if (strchr(new_name_str, '\n'))
++		return -EINVAL;
++
+ 	if (kernfs_type(kn) != KERNFS_DIR)
+ 		return -ENOTDIR;
+ 	if (kn->parent != new_parent)
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index 9153b20e5cc65..2529d1f883305 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -5626,8 +5626,6 @@ int __init cgroup_init_early(void)
+ 	return 0;
+ }
+ 
+-static u16 cgroup_disable_mask __initdata;
+-
+ /**
+  * cgroup_init - cgroup initialization
+  *
+@@ -5686,12 +5684,8 @@ int __init cgroup_init(void)
+ 		 * disabled flag and cftype registration needs kmalloc,
+ 		 * both of which aren't available during early_init.
+ 		 */
+-		if (cgroup_disable_mask & (1 << ssid)) {
+-			static_branch_disable(cgroup_subsys_enabled_key[ssid]);
+-			printk(KERN_INFO "Disabling %s control group subsystem\n",
+-			       ss->name);
++		if (!cgroup_ssid_enabled(ssid))
+ 			continue;
+-		}
+ 
+ 		if (cgroup1_ssid_disabled(ssid))
+ 			printk(KERN_INFO "Disabling %s control group subsystem in v1 mounts\n",
+@@ -6206,7 +6200,10 @@ static int __init cgroup_disable(char *str)
+ 			if (strcmp(token, ss->name) &&
+ 			    strcmp(token, ss->legacy_name))
+ 				continue;
+-			cgroup_disable_mask |= 1 << i;
++
++			static_branch_disable(cgroup_subsys_enabled_key[i]);
++			pr_info("Disabling %s control group subsystem\n",
++				ss->name);
+ 		}
+ 	}
+ 	return 1;
+diff --git a/kernel/entry/common.c b/kernel/entry/common.c
+index 2003d69bd6d54..e9d21e939ec00 100644
+--- a/kernel/entry/common.c
++++ b/kernel/entry/common.c
+@@ -5,6 +5,7 @@
+ #include <linux/highmem.h>
+ #include <linux/livepatch.h>
+ #include <linux/audit.h>
++#include <linux/tick.h>
+ 
+ #include "common.h"
+ 
+@@ -186,7 +187,7 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
+ 		local_irq_disable_exit_to_user();
+ 
+ 		/* Check if any of the above work has queued a deferred wakeup */
+-		rcu_nocb_flush_deferred_wakeup();
++		tick_nohz_user_enter_prepare();
+ 
+ 		ti_work = READ_ONCE(current_thread_info()->flags);
+ 	}
+@@ -202,7 +203,7 @@ static void exit_to_user_mode_prepare(struct pt_regs *regs)
+ 	lockdep_assert_irqs_disabled();
+ 
+ 	/* Flush pending rcuog wakeup before the last need_resched() check */
+-	rcu_nocb_flush_deferred_wakeup();
++	tick_nohz_user_enter_prepare();
+ 
+ 	if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
+ 		ti_work = exit_to_user_mode_loop(regs, ti_work);
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index c24ea952e7ae4..67237fc439236 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -4542,7 +4542,9 @@ find_get_context(struct pmu *pmu, struct task_struct *task,
+ 		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
+ 		ctx = &cpuctx->ctx;
+ 		get_ctx(ctx);
++		raw_spin_lock_irqsave(&ctx->lock, flags);
+ 		++ctx->pin_count;
++		raw_spin_unlock_irqrestore(&ctx->lock, flags);
+ 
+ 		return ctx;
+ 	}
+diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
+index 9c8b3ed2199a8..9e0a915e6eb83 100644
+--- a/kernel/sched/debug.c
++++ b/kernel/sched/debug.c
+@@ -888,6 +888,7 @@ __initcall(init_sched_debug_procfs);
+ #define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F))
+ #define __P(F) __PS(#F, F)
+ #define   P(F) __PS(#F, p->F)
++#define   PM(F, M) __PS(#F, p->F & (M))
+ #define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F)))
+ #define __PN(F) __PSN(#F, F)
+ #define   PN(F) __PSN(#F, p->F)
+@@ -1014,7 +1015,7 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
+ 	P(se.avg.util_avg);
+ 	P(se.avg.last_update_time);
+ 	P(se.avg.util_est.ewma);
+-	P(se.avg.util_est.enqueued);
++	PM(se.avg.util_est.enqueued, ~UTIL_AVG_UNCHANGED);
+ #endif
+ #ifdef CONFIG_UCLAMP_TASK
+ 	__PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index a073a839cd065..487312a5ceabb 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -3494,10 +3494,9 @@ update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cf
+ static inline void
+ update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
+ {
+-	long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
++	long delta, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
+ 	unsigned long load_avg;
+ 	u64 load_sum = 0;
+-	s64 delta_sum;
+ 	u32 divider;
+ 
+ 	if (!runnable_sum)
+@@ -3544,13 +3543,13 @@ update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
+ 	load_sum = (s64)se_weight(se) * runnable_sum;
+ 	load_avg = div_s64(load_sum, divider);
+ 
+-	delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum;
+-	delta_avg = load_avg - se->avg.load_avg;
++	delta = load_avg - se->avg.load_avg;
+ 
+ 	se->avg.load_sum = runnable_sum;
+ 	se->avg.load_avg = load_avg;
+-	add_positive(&cfs_rq->avg.load_avg, delta_avg);
+-	add_positive(&cfs_rq->avg.load_sum, delta_sum);
++
++	add_positive(&cfs_rq->avg.load_avg, delta);
++	cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * divider;
+ }
+ 
+ static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum)
+@@ -3897,7 +3896,7 @@ static inline unsigned long _task_util_est(struct task_struct *p)
+ {
+ 	struct util_est ue = READ_ONCE(p->se.avg.util_est);
+ 
+-	return (max(ue.ewma, ue.enqueued) | UTIL_AVG_UNCHANGED);
++	return max(ue.ewma, (ue.enqueued & ~UTIL_AVG_UNCHANGED));
+ }
+ 
+ static inline unsigned long task_util_est(struct task_struct *p)
+@@ -3997,7 +3996,7 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
+ 	 * Reset EWMA on utilization increases, the moving average is used only
+ 	 * to smooth utilization decreases.
+ 	 */
+-	ue.enqueued = (task_util(p) | UTIL_AVG_UNCHANGED);
++	ue.enqueued = task_util(p);
+ 	if (sched_feat(UTIL_EST_FASTUP)) {
+ 		if (ue.ewma < ue.enqueued) {
+ 			ue.ewma = ue.enqueued;
+@@ -4046,6 +4045,7 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
+ 	ue.ewma  += last_ewma_diff;
+ 	ue.ewma >>= UTIL_EST_WEIGHT_SHIFT;
+ done:
++	ue.enqueued |= UTIL_AVG_UNCHANGED;
+ 	WRITE_ONCE(p->se.avg.util_est, ue);
+ 
+ 	trace_sched_util_est_se_tp(&p->se);
+@@ -8006,7 +8006,7 @@ static bool __update_blocked_fair(struct rq *rq, bool *done)
+ 		/* Propagate pending load changes to the parent, if any: */
+ 		se = cfs_rq->tg->se[cpu];
+ 		if (se && !skip_blocked_update(se))
+-			update_load_avg(cfs_rq_of(se), se, 0);
++			update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
+ 
+ 		/*
+ 		 * There can be a lot of idle CPU cgroups.  Don't let fully
+diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
+index 795e43e02afc6..0b9aeebb9c325 100644
+--- a/kernel/sched/pelt.h
++++ b/kernel/sched/pelt.h
+@@ -42,15 +42,6 @@ static inline u32 get_pelt_divider(struct sched_avg *avg)
+ 	return LOAD_AVG_MAX - 1024 + avg->period_contrib;
+ }
+ 
+-/*
+- * When a task is dequeued, its estimated utilization should not be update if
+- * its util_avg has not been updated at least once.
+- * This flag is used to synchronize util_avg updates with util_est updates.
+- * We map this information into the LSB bit of the utilization saved at
+- * dequeue time (i.e. util_est.dequeued).
+- */
+-#define UTIL_AVG_UNCHANGED 0x1
+-
+ static inline void cfs_se_util_change(struct sched_avg *avg)
+ {
+ 	unsigned int enqueued;
+@@ -58,7 +49,7 @@ static inline void cfs_se_util_change(struct sched_avg *avg)
+ 	if (!sched_feat(UTIL_EST))
+ 		return;
+ 
+-	/* Avoid store if the flag has been already set */
++	/* Avoid store if the flag has been already reset */
+ 	enqueued = avg->util_est.enqueued;
+ 	if (!(enqueued & UTIL_AVG_UNCHANGED))
+ 		return;
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index e10a4af887373..538c9b7ffaf14 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -230,6 +230,7 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
+ 
+ #ifdef CONFIG_NO_HZ_FULL
+ cpumask_var_t tick_nohz_full_mask;
++EXPORT_SYMBOL_GPL(tick_nohz_full_mask);
+ bool tick_nohz_full_running;
+ EXPORT_SYMBOL_GPL(tick_nohz_full_running);
+ static atomic_t tick_dep_mask;
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 826b88b727a62..433a84d442eeb 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -1967,12 +1967,18 @@ static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
+ 
+ static void print_ip_ins(const char *fmt, const unsigned char *p)
+ {
++	char ins[MCOUNT_INSN_SIZE];
+ 	int i;
+ 
++	if (copy_from_kernel_nofault(ins, p, MCOUNT_INSN_SIZE)) {
++		printk(KERN_CONT "%s[FAULT] %px\n", fmt, p);
++		return;
++	}
++
+ 	printk(KERN_CONT "%s", fmt);
+ 
+ 	for (i = 0; i < MCOUNT_INSN_SIZE; i++)
+-		printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
++		printk(KERN_CONT "%s%02x", i ? ":" : "", ins[i]);
+ }
+ 
+ enum ftrace_bug_type ftrace_bug_type;
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 67c01dc5cdeb7..f2d4ee80feb34 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -2735,7 +2735,7 @@ trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
+ 	    (entry = this_cpu_read(trace_buffered_event))) {
+ 		/* Try to use the per cpu buffer first */
+ 		val = this_cpu_inc_return(trace_buffered_event_cnt);
+-		if ((len < (PAGE_SIZE - sizeof(*entry))) && val == 1) {
++		if ((len < (PAGE_SIZE - sizeof(*entry) - sizeof(entry->array[0]))) && val == 1) {
+ 			trace_event_setup(entry, type, trace_ctx);
+ 			entry->array[0] = len;
+ 			return entry;
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 79f2319543ced..994eafd25d648 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -50,6 +50,7 @@
+ #include <linux/uaccess.h>
+ #include <linux/sched/isolation.h>
+ #include <linux/nmi.h>
++#include <linux/kvm_para.h>
+ 
+ #include "workqueue_internal.h"
+ 
+@@ -5772,6 +5773,7 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
+ {
+ 	unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
+ 	bool lockup_detected = false;
++	unsigned long now = jiffies;
+ 	struct worker_pool *pool;
+ 	int pi;
+ 
+@@ -5786,6 +5788,12 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
+ 		if (list_empty(&pool->worklist))
+ 			continue;
+ 
++		/*
++		 * If a virtual machine is stopped by the host it can look to
++		 * the watchdog like a stall.
++		 */
++		kvm_check_and_clear_guest_paused();
++
+ 		/* get the latest of pool and touched timestamps */
+ 		if (pool->cpu >= 0)
+ 			touched = READ_ONCE(per_cpu(wq_watchdog_touched_cpu, pool->cpu));
+@@ -5799,12 +5807,12 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
+ 			ts = touched;
+ 
+ 		/* did we stall? */
+-		if (time_after(jiffies, ts + thresh)) {
++		if (time_after(now, ts + thresh)) {
+ 			lockup_detected = true;
+ 			pr_emerg("BUG: workqueue lockup - pool");
+ 			pr_cont_pool_info(pool);
+ 			pr_cont(" stuck for %us!\n",
+-				jiffies_to_msecs(jiffies - pool_ts) / 1000);
++				jiffies_to_msecs(now - pool_ts) / 1000);
+ 		}
+ 	}
+ 
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 3a62f97acf39d..6133e412b948c 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -461,11 +461,13 @@ void netlink_table_ungrab(void)
+ static inline void
+ netlink_lock_table(void)
+ {
++	unsigned long flags;
++
+ 	/* read_lock() synchronizes us to netlink_table_grab */
+ 
+-	read_lock(&nl_table_lock);
++	read_lock_irqsave(&nl_table_lock, flags);
+ 	atomic_inc(&nl_table_users);
+-	read_unlock(&nl_table_lock);
++	read_unlock_irqrestore(&nl_table_lock, flags);
+ }
+ 
+ static inline void
+diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
+index 9c7eb8455ba8e..5f1d438a0a23f 100644
+--- a/net/nfc/rawsock.c
++++ b/net/nfc/rawsock.c
+@@ -329,7 +329,7 @@ static int rawsock_create(struct net *net, struct socket *sock,
+ 		return -ESOCKTNOSUPPORT;
+ 
+ 	if (sock->type == SOCK_RAW) {
+-		if (!capable(CAP_NET_RAW))
++		if (!ns_capable(net->user_ns, CAP_NET_RAW))
+ 			return -EPERM;
+ 		sock->ops = &rawsock_raw_ops;
+ 	} else {
+diff --git a/net/rds/connection.c b/net/rds/connection.c
+index f2fcab182095c..a3bc4b54d4910 100644
+--- a/net/rds/connection.c
++++ b/net/rds/connection.c
+@@ -240,12 +240,23 @@ static struct rds_connection *__rds_conn_create(struct net *net,
+ 	if (loop_trans) {
+ 		rds_trans_put(loop_trans);
+ 		conn->c_loopback = 1;
+-		if (is_outgoing && trans->t_prefer_loopback) {
+-			/* "outgoing" connection - and the transport
+-			 * says it wants the connection handled by the
+-			 * loopback transport. This is what TCP does.
+-			 */
+-			trans = &rds_loop_transport;
++		if (trans->t_prefer_loopback) {
++			if (likely(is_outgoing)) {
++				/* "outgoing" connection to local address.
++				 * Protocol says it wants the connection
++				 * handled by the loopback transport.
++				 * This is what TCP does.
++				 */
++				trans = &rds_loop_transport;
++			} else {
++				/* No transport currently in use
++				 * should end up here, but if it
++				 * does, reset/destroy the connection.
++				 */
++				kmem_cache_free(rds_conn_slab, conn);
++				conn = ERR_PTR(-EOPNOTSUPP);
++				goto out;
++			}
+ 		}
+ 	}
+ 
+diff --git a/net/rds/tcp.c b/net/rds/tcp.c
+index 43db0eca911fa..abf19c0e3ba0b 100644
+--- a/net/rds/tcp.c
++++ b/net/rds/tcp.c
+@@ -313,8 +313,8 @@ out:
+ }
+ #endif
+ 
+-static int rds_tcp_laddr_check(struct net *net, const struct in6_addr *addr,
+-			       __u32 scope_id)
++int rds_tcp_laddr_check(struct net *net, const struct in6_addr *addr,
++			__u32 scope_id)
+ {
+ 	struct net_device *dev = NULL;
+ #if IS_ENABLED(CONFIG_IPV6)
+diff --git a/net/rds/tcp.h b/net/rds/tcp.h
+index bad9cf49d5657..dc8d745d68575 100644
+--- a/net/rds/tcp.h
++++ b/net/rds/tcp.h
+@@ -59,7 +59,8 @@ u32 rds_tcp_snd_una(struct rds_tcp_connection *tc);
+ u64 rds_tcp_map_seq(struct rds_tcp_connection *tc, u32 seq);
+ extern struct rds_transport rds_tcp_transport;
+ void rds_tcp_accept_work(struct sock *sk);
+-
++int rds_tcp_laddr_check(struct net *net, const struct in6_addr *addr,
++			__u32 scope_id);
+ /* tcp_connect.c */
+ int rds_tcp_conn_path_connect(struct rds_conn_path *cp);
+ void rds_tcp_conn_path_shutdown(struct rds_conn_path *conn);
+diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
+index 101cf14215a0b..09cadd556d1e1 100644
+--- a/net/rds/tcp_listen.c
++++ b/net/rds/tcp_listen.c
+@@ -167,6 +167,12 @@ int rds_tcp_accept_one(struct socket *sock)
+ 	}
+ #endif
+ 
++	if (!rds_tcp_laddr_check(sock_net(sock->sk), peer_addr, dev_if)) {
++		/* local address connection is only allowed via loopback */
++		ret = -EOPNOTSUPP;
++		goto out;
++	}
++
+ 	conn = rds_conn_create(sock_net(sock->sk),
+ 			       my_addr, peer_addr,
+ 			       &rds_tcp_transport, 0, GFP_KERNEL, dev_if);
+diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c
+index 1645e4142e302..9863be6fd43e1 100644
+--- a/sound/core/seq/seq_timer.c
++++ b/sound/core/seq/seq_timer.c
+@@ -297,8 +297,16 @@ int snd_seq_timer_open(struct snd_seq_queue *q)
+ 		return err;
+ 	}
+ 	spin_lock_irq(&tmr->lock);
+-	tmr->timeri = t;
++	if (tmr->timeri)
++		err = -EBUSY;
++	else
++		tmr->timeri = t;
+ 	spin_unlock_irq(&tmr->lock);
++	if (err < 0) {
++		snd_timer_close(t);
++		snd_timer_instance_free(t);
++		return err;
++	}
+ 	return 0;
+ }
+ 
+diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
+index e0faa6601966c..5805c5de39fbf 100644
+--- a/sound/firewire/amdtp-stream.c
++++ b/sound/firewire/amdtp-stream.c
+@@ -804,7 +804,7 @@ static void generate_pkt_descs(struct amdtp_stream *s, struct pkt_desc *descs,
+ static inline void cancel_stream(struct amdtp_stream *s)
+ {
+ 	s->packet_index = -1;
+-	if (current_work() == &s->period_work)
++	if (in_interrupt())
+ 		amdtp_stream_pcm_abort(s);
+ 	WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
+ }
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index cc13a68197f3c..e46e43dac6bfd 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -6560,6 +6560,7 @@ enum {
+ 	ALC285_FIXUP_HP_SPECTRE_X360,
+ 	ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP,
+ 	ALC623_FIXUP_LENOVO_THINKSTATION_P340,
++	ALC255_FIXUP_ACER_HEADPHONE_AND_MIC,
+ };
+ 
+ static const struct hda_fixup alc269_fixups[] = {
+@@ -8132,6 +8133,15 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC283_FIXUP_HEADSET_MIC,
+ 	},
++	[ALC255_FIXUP_ACER_HEADPHONE_AND_MIC] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			{ 0x21, 0x03211030 }, /* Change the Headphone location to Left */
++			{ }
++		},
++		.chained = true,
++		.chain_id = ALC255_FIXUP_XIAOMI_HEADSET_MIC
++	},
+ };
+ 
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -8168,6 +8178,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1025, 0x132a, "Acer TravelMate B114-21", ALC233_FIXUP_ACER_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1025, 0x1430, "Acer TravelMate B311R-31", ALC256_FIXUP_ACER_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1025, 0x1466, "Acer Aspire A515-56", ALC255_FIXUP_ACER_HEADPHONE_AND_MIC),
+ 	SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
+ 	SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
+ 	SND_PCI_QUIRK(0x1028, 0x05bd, "Dell Latitude E6440", ALC292_FIXUP_DELL_E7X),
+@@ -8296,6 +8307,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8519, "HP Spectre x360 15-df0xxx", ALC285_FIXUP_HP_SPECTRE_X360),
+ 	SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO),
++	SND_PCI_QUIRK(0x103c, 0x8716, "HP Elite Dragonfly G2 Notebook PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
++	SND_PCI_QUIRK(0x103c, 0x8720, "HP EliteBook x360 1040 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ 	SND_PCI_QUIRK(0x103c, 0x8724, "HP EliteBook 850 G7", ALC285_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8730, "HP ProBook 445 G7", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+@@ -8314,10 +8327,12 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
+ 	SND_PCI_QUIRK(0x103c, 0x8846, "HP EliteBook 850 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x884b, "HP EliteBook 840 Aero G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x884c, "HP EliteBook 840 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x886d, "HP ZBook Fury 17.3 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ 	SND_PCI_QUIRK(0x103c, 0x8870, "HP ZBook Fury 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ 	SND_PCI_QUIRK(0x103c, 0x8873, "HP ZBook Studio 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
++	SND_PCI_QUIRK(0x103c, 0x888d, "HP ZBook Power 15.6 inch G8 Mobile Workstation PC", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8896, "HP EliteBook 855 G8 Notebook PC", ALC285_FIXUP_HP_MUTE_LED),
+ 	SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
+@@ -8722,6 +8737,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
+ 	{.id = ALC285_FIXUP_HP_SPECTRE_X360, .name = "alc285-hp-spectre-x360"},
+ 	{.id = ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP, .name = "alc287-ideapad-bass-spk-amp"},
+ 	{.id = ALC623_FIXUP_LENOVO_THINKSTATION_P340, .name = "alc623-lenovo-thinkstation-p340"},
++	{.id = ALC255_FIXUP_ACER_HEADPHONE_AND_MIC, .name = "alc255-acer-headphone-and-mic"},
+ 	{}
+ };
+ #define ALC225_STANDARD_PINS \
+diff --git a/sound/soc/amd/raven/acp3x-pcm-dma.c b/sound/soc/amd/raven/acp3x-pcm-dma.c
+index 417cda24030cd..2447a1e6e913f 100644
+--- a/sound/soc/amd/raven/acp3x-pcm-dma.c
++++ b/sound/soc/amd/raven/acp3x-pcm-dma.c
+@@ -237,10 +237,6 @@ static int acp3x_dma_open(struct snd_soc_component *component,
+ 		return ret;
+ 	}
+ 
+-	if (!adata->play_stream && !adata->capture_stream &&
+-	    !adata->i2ssp_play_stream && !adata->i2ssp_capture_stream)
+-		rv_writel(1, adata->acp3x_base + mmACP_EXTERNAL_INTR_ENB);
+-
+ 	i2s_data->acp3x_base = adata->acp3x_base;
+ 	runtime->private_data = i2s_data;
+ 	return ret;
+@@ -367,12 +363,6 @@ static int acp3x_dma_close(struct snd_soc_component *component,
+ 		}
+ 	}
+ 
+-	/* Disable ACP irq, when the current stream is being closed and
+-	 * another stream is also not active.
+-	 */
+-	if (!adata->play_stream && !adata->capture_stream &&
+-		!adata->i2ssp_play_stream && !adata->i2ssp_capture_stream)
+-		rv_writel(0, adata->acp3x_base + mmACP_EXTERNAL_INTR_ENB);
+ 	return 0;
+ }
+ 
+diff --git a/sound/soc/amd/raven/acp3x.h b/sound/soc/amd/raven/acp3x.h
+index 03fe93913e12e..c3f0c8b7545db 100644
+--- a/sound/soc/amd/raven/acp3x.h
++++ b/sound/soc/amd/raven/acp3x.h
+@@ -77,6 +77,7 @@
+ #define ACP_POWER_OFF_IN_PROGRESS	0x03
+ 
+ #define ACP3x_ITER_IRER_SAMP_LEN_MASK	0x38
++#define ACP_EXT_INTR_STAT_CLEAR_MASK 0xFFFFFFFF
+ 
+ struct acp3x_platform_info {
+ 	u16 play_i2s_instance;
+diff --git a/sound/soc/amd/raven/pci-acp3x.c b/sound/soc/amd/raven/pci-acp3x.c
+index d3536fd6a1240..a013a607b3d47 100644
+--- a/sound/soc/amd/raven/pci-acp3x.c
++++ b/sound/soc/amd/raven/pci-acp3x.c
+@@ -76,6 +76,19 @@ static int acp3x_reset(void __iomem *acp3x_base)
+ 	return -ETIMEDOUT;
+ }
+ 
++static void acp3x_enable_interrupts(void __iomem *acp_base)
++{
++	rv_writel(0x01, acp_base + mmACP_EXTERNAL_INTR_ENB);
++}
++
++static void acp3x_disable_interrupts(void __iomem *acp_base)
++{
++	rv_writel(ACP_EXT_INTR_STAT_CLEAR_MASK, acp_base +
++		  mmACP_EXTERNAL_INTR_STAT);
++	rv_writel(0x00, acp_base + mmACP_EXTERNAL_INTR_CNTL);
++	rv_writel(0x00, acp_base + mmACP_EXTERNAL_INTR_ENB);
++}
++
+ static int acp3x_init(struct acp3x_dev_data *adata)
+ {
+ 	void __iomem *acp3x_base = adata->acp3x_base;
+@@ -93,6 +106,7 @@ static int acp3x_init(struct acp3x_dev_data *adata)
+ 		pr_err("ACP3x reset failed\n");
+ 		return ret;
+ 	}
++	acp3x_enable_interrupts(acp3x_base);
+ 	return 0;
+ }
+ 
+@@ -100,6 +114,7 @@ static int acp3x_deinit(void __iomem *acp3x_base)
+ {
+ 	int ret;
+ 
++	acp3x_disable_interrupts(acp3x_base);
+ 	/* Reset */
+ 	ret = acp3x_reset(acp3x_base);
+ 	if (ret) {
+diff --git a/sound/soc/codecs/lpass-rx-macro.c b/sound/soc/codecs/lpass-rx-macro.c
+index 7878da89d8e09..b7b9c891e2f04 100644
+--- a/sound/soc/codecs/lpass-rx-macro.c
++++ b/sound/soc/codecs/lpass-rx-macro.c
+@@ -3581,6 +3581,7 @@ static const struct of_device_id rx_macro_dt_match[] = {
+ 	{ .compatible = "qcom,sm8250-lpass-rx-macro" },
+ 	{ }
+ };
++MODULE_DEVICE_TABLE(of, rx_macro_dt_match);
+ 
+ static struct platform_driver rx_macro_driver = {
+ 	.driver = {
+diff --git a/sound/soc/codecs/lpass-tx-macro.c b/sound/soc/codecs/lpass-tx-macro.c
+index e8c6c738bbaa0..5341ca02951c6 100644
+--- a/sound/soc/codecs/lpass-tx-macro.c
++++ b/sound/soc/codecs/lpass-tx-macro.c
+@@ -1846,6 +1846,7 @@ static const struct of_device_id tx_macro_dt_match[] = {
+ 	{ .compatible = "qcom,sm8250-lpass-tx-macro" },
+ 	{ }
+ };
++MODULE_DEVICE_TABLE(of, tx_macro_dt_match);
+ static struct platform_driver tx_macro_driver = {
+ 	.driver = {
+ 		.name = "tx_macro",
+diff --git a/sound/soc/codecs/max98088.c b/sound/soc/codecs/max98088.c
+index 4be24e7f51c89..f8e49e45ce33f 100644
+--- a/sound/soc/codecs/max98088.c
++++ b/sound/soc/codecs/max98088.c
+@@ -41,6 +41,7 @@ struct max98088_priv {
+ 	enum max98088_type devtype;
+ 	struct max98088_pdata *pdata;
+ 	struct clk *mclk;
++	unsigned char mclk_prescaler;
+ 	unsigned int sysclk;
+ 	struct max98088_cdata dai[2];
+ 	int eq_textcnt;
+@@ -998,13 +999,16 @@ static int max98088_dai1_hw_params(struct snd_pcm_substream *substream,
+        /* Configure NI when operating as master */
+        if (snd_soc_component_read(component, M98088_REG_14_DAI1_FORMAT)
+                & M98088_DAI_MAS) {
++               unsigned long pclk;
++
+                if (max98088->sysclk == 0) {
+                        dev_err(component->dev, "Invalid system clock frequency\n");
+                        return -EINVAL;
+                }
+                ni = 65536ULL * (rate < 50000 ? 96ULL : 48ULL)
+                                * (unsigned long long int)rate;
+-               do_div(ni, (unsigned long long int)max98088->sysclk);
++               pclk = DIV_ROUND_CLOSEST(max98088->sysclk, max98088->mclk_prescaler);
++               ni = DIV_ROUND_CLOSEST_ULL(ni, pclk);
+                snd_soc_component_write(component, M98088_REG_12_DAI1_CLKCFG_HI,
+                        (ni >> 8) & 0x7F);
+                snd_soc_component_write(component, M98088_REG_13_DAI1_CLKCFG_LO,
+@@ -1065,13 +1069,16 @@ static int max98088_dai2_hw_params(struct snd_pcm_substream *substream,
+        /* Configure NI when operating as master */
+        if (snd_soc_component_read(component, M98088_REG_1C_DAI2_FORMAT)
+                & M98088_DAI_MAS) {
++               unsigned long pclk;
++
+                if (max98088->sysclk == 0) {
+                        dev_err(component->dev, "Invalid system clock frequency\n");
+                        return -EINVAL;
+                }
+                ni = 65536ULL * (rate < 50000 ? 96ULL : 48ULL)
+                                * (unsigned long long int)rate;
+-               do_div(ni, (unsigned long long int)max98088->sysclk);
++               pclk = DIV_ROUND_CLOSEST(max98088->sysclk, max98088->mclk_prescaler);
++               ni = DIV_ROUND_CLOSEST_ULL(ni, pclk);
+                snd_soc_component_write(component, M98088_REG_1A_DAI2_CLKCFG_HI,
+                        (ni >> 8) & 0x7F);
+                snd_soc_component_write(component, M98088_REG_1B_DAI2_CLKCFG_LO,
+@@ -1113,8 +1120,10 @@ static int max98088_dai_set_sysclk(struct snd_soc_dai *dai,
+         */
+        if ((freq >= 10000000) && (freq < 20000000)) {
+                snd_soc_component_write(component, M98088_REG_10_SYS_CLK, 0x10);
++               max98088->mclk_prescaler = 1;
+        } else if ((freq >= 20000000) && (freq < 30000000)) {
+                snd_soc_component_write(component, M98088_REG_10_SYS_CLK, 0x20);
++               max98088->mclk_prescaler = 2;
+        } else {
+                dev_err(component->dev, "Invalid master clock frequency\n");
+                return -EINVAL;
+diff --git a/sound/soc/codecs/sti-sas.c b/sound/soc/codecs/sti-sas.c
+index ec9933b054ad3..423daac9d5a9f 100644
+--- a/sound/soc/codecs/sti-sas.c
++++ b/sound/soc/codecs/sti-sas.c
+@@ -411,6 +411,7 @@ static const struct of_device_id sti_sas_dev_match[] = {
+ 	},
+ 	{},
+ };
++MODULE_DEVICE_TABLE(of, sti_sas_dev_match);
+ 
+ static int sti_sas_driver_probe(struct platform_device *pdev)
+ {
+diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
+index 22912cab5e638..cc24e89a7f8d7 100644
+--- a/sound/soc/intel/boards/bytcr_rt5640.c
++++ b/sound/soc/intel/boards/bytcr_rt5640.c
+@@ -574,6 +574,17 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
+ 					BYT_RT5640_SSP0_AIF1 |
+ 					BYT_RT5640_MCLK_EN),
+ 	},
++	{	/* Glavey TM800A550L */
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
++			DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
++			/* Above strings are too generic, also match on BIOS version */
++			DMI_MATCH(DMI_BIOS_VERSION, "ZY-8-BI-PX4S70VTR400-X423B-005-D"),
++		},
++		.driver_data = (void *)(BYTCR_INPUT_DEFAULTS |
++					BYT_RT5640_SSP0_AIF1 |
++					BYT_RT5640_MCLK_EN),
++	},
+ 	{
+ 		.matches = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+@@ -652,6 +663,20 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
+ 					BYT_RT5640_MONO_SPEAKER |
+ 					BYT_RT5640_MCLK_EN),
+ 	},
++	{	/* Lenovo Miix 3-830 */
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 3-830"),
++		},
++		.driver_data = (void *)(BYT_RT5640_IN1_MAP |
++					BYT_RT5640_JD_SRC_JD2_IN4N |
++					BYT_RT5640_OVCD_TH_2000UA |
++					BYT_RT5640_OVCD_SF_0P75 |
++					BYT_RT5640_MONO_SPEAKER |
++					BYT_RT5640_DIFF_MIC |
++					BYT_RT5640_SSP0_AIF1 |
++					BYT_RT5640_MCLK_EN),
++	},
+ 	{	/* Linx Linx7 tablet */
+ 		.matches = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LINX"),
+diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
+index 0cffc9527e289..6680c12716d44 100644
+--- a/sound/soc/soc-core.c
++++ b/sound/soc/soc-core.c
+@@ -2223,6 +2223,8 @@ static char *fmt_single_name(struct device *dev, int *id)
+ 		return NULL;
+ 
+ 	name = devm_kstrdup(dev, devname, GFP_KERNEL);
++	if (!name)
++		return NULL;
+ 
+ 	/* are we a "%s.%d" name (platform and SPI components) */
+ 	found = strstr(name, dev->driver->name);
+diff --git a/sound/soc/sof/pm.c b/sound/soc/sof/pm.c
+index fd265803f7bc7..c83fb62559616 100644
+--- a/sound/soc/sof/pm.c
++++ b/sound/soc/sof/pm.c
+@@ -256,6 +256,7 @@ suspend:
+ 
+ 	/* reset FW state */
+ 	sdev->fw_state = SOF_FW_BOOT_NOT_STARTED;
++	sdev->enabled_cores_mask = 0;
+ 
+ 	return ret;
+ }
+diff --git a/tools/bootconfig/include/linux/bootconfig.h b/tools/bootconfig/include/linux/bootconfig.h
+index 078cbd2ba651d..de7f30f99af38 100644
+--- a/tools/bootconfig/include/linux/bootconfig.h
++++ b/tools/bootconfig/include/linux/bootconfig.h
+@@ -4,4 +4,8 @@
+ 
+ #include "../../../../include/linux/bootconfig.h"
+ 
++#ifndef fallthrough
++# define fallthrough
++#endif
++
+ #endif
+diff --git a/tools/bootconfig/main.c b/tools/bootconfig/main.c
+index 7362bef1a3683..6cd6080cac04c 100644
+--- a/tools/bootconfig/main.c
++++ b/tools/bootconfig/main.c
+@@ -399,6 +399,7 @@ static int apply_xbc(const char *path, const char *xbc_path)
+ 	}
+ 	/* TODO: Ensure the @path is initramfs/initrd image */
+ 	if (fstat(fd, &stat) < 0) {
++		ret = -errno;
+ 		pr_err("Failed to get the size of %s\n", path);
+ 		goto out;
+ 	}
+diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
+index e9d4e6f4bdf3e..b7cfdbf207b70 100644
+--- a/tools/perf/util/session.c
++++ b/tools/perf/util/session.c
+@@ -1710,6 +1710,7 @@ int perf_session__peek_event(struct perf_session *session, off_t file_offset,
+ 	if (event->header.size < hdr_sz || event->header.size > buf_sz)
+ 		return -1;
+ 
++	buf += hdr_sz;
+ 	rest = event->header.size - hdr_sz;
+ 
+ 	if (readn(fd, buf, rest) != (ssize_t)rest)
+diff --git a/tools/testing/selftests/bpf/verifier/stack_ptr.c b/tools/testing/selftests/bpf/verifier/stack_ptr.c
+index 07eaa04412ae4..8ab94d65f3d54 100644
+--- a/tools/testing/selftests/bpf/verifier/stack_ptr.c
++++ b/tools/testing/selftests/bpf/verifier/stack_ptr.c
+@@ -295,8 +295,6 @@
+ 	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ 	BPF_EXIT_INSN(),
+ 	},
+-	.result_unpriv = REJECT,
+-	.errstr_unpriv = "invalid write to stack R1 off=0 size=1",
+ 	.result = ACCEPT,
+ 	.retval = 42,
+ },
+diff --git a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c
+index e5913fd3b9030..7ae2859d495c5 100644
+--- a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c
++++ b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c
+@@ -300,8 +300,6 @@
+ 	},
+ 	.fixup_map_array_48b = { 3 },
+ 	.result = ACCEPT,
+-	.result_unpriv = REJECT,
+-	.errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ 	.retval = 1,
+ },
+ {
+@@ -371,8 +369,6 @@
+ 	},
+ 	.fixup_map_array_48b = { 3 },
+ 	.result = ACCEPT,
+-	.result_unpriv = REJECT,
+-	.errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ 	.retval = 1,
+ },
+ {
+@@ -472,8 +468,6 @@
+ 	},
+ 	.fixup_map_array_48b = { 3 },
+ 	.result = ACCEPT,
+-	.result_unpriv = REJECT,
+-	.errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ 	.retval = 1,
+ },
+ {
+@@ -766,8 +760,6 @@
+ 	},
+ 	.fixup_map_array_48b = { 3 },
+ 	.result = ACCEPT,
+-	.result_unpriv = REJECT,
+-	.errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ 	.retval = 1,
+ },
+ {


^ permalink raw reply related	[flat|nested] 33+ messages in thread
* [gentoo-commits] proj/linux-patches:5.12 commit in: /
@ 2021-06-11 13:21 Mike Pagano
  0 siblings, 0 replies; 33+ messages in thread
From: Mike Pagano @ 2021-06-11 13:21 UTC (permalink / raw
  To: gentoo-commits

commit:     13e766e98b60fdcbf537b4cb5742ba977a26d9fb
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Jun 11 13:19:50 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Jun 11 13:19:50 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=13e766e9

Fix CONFIG_GCC_PLUGINS typo

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 4567_distro-Gentoo-Kconfig.patch | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/4567_distro-Gentoo-Kconfig.patch b/4567_distro-Gentoo-Kconfig.patch
index 4eee26b..b671313 100644
--- a/4567_distro-Gentoo-Kconfig.patch
+++ b/4567_distro-Gentoo-Kconfig.patch
@@ -214,7 +214,7 @@
 +	select FORTIFY_SOURCE
 +	select SECURITY_DMESG_RESTRICT
 +	select PANIC_ON_OOPS
-+	select CONFIG_GCC_PLUGINS=y
++	select CONFIG_GCC_PLUGINS
 +	select GCC_PLUGIN_LATENT_ENTROPY
 +	select GCC_PLUGIN_STRUCTLEAK
 +	select GCC_PLUGIN_STRUCTLEAK_BYREF_ALL


^ permalink raw reply related	[flat|nested] 33+ messages in thread
* [gentoo-commits] proj/linux-patches:5.12 commit in: /
@ 2021-06-10 12:14 Mike Pagano
  0 siblings, 0 replies; 33+ messages in thread
From: Mike Pagano @ 2021-06-10 12:14 UTC (permalink / raw
  To: gentoo-commits

commit:     b2e817b8ca49469e758b8db4e5985605cb670c1b
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Jun 10 12:14:48 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Jun 10 12:14:48 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b2e817b8

Linux patch 5.12.10

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1009_linux-5.12.10.patch | 6796 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 6800 insertions(+)

diff --git a/0000_README b/0000_README
index f16429c..25657f9 100644
--- a/0000_README
+++ b/0000_README
@@ -79,6 +79,10 @@ Patch:  1008_linux-5.12.9.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.12.9
 
+Patch:  1009_linux-5.12.10.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.12.10
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1009_linux-5.12.10.patch b/1009_linux-5.12.10.patch
new file mode 100644
index 0000000..d201785
--- /dev/null
+++ b/1009_linux-5.12.10.patch
@@ -0,0 +1,6796 @@
+diff --git a/Makefile b/Makefile
+index d53577db10858..ebc02c56db03c 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 12
+-SUBLEVEL = 9
++SUBLEVEL = 10
+ EXTRAVERSION =
+ NAME = Frozen Wasteland
+ 
+diff --git a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
+index 7d2c72562c735..9148a01ed6d9f 100644
+--- a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
++++ b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
+@@ -105,9 +105,13 @@
+ 	phy-reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>;
+ 	phy-reset-duration = <20>;
+ 	phy-supply = <&sw2_reg>;
+-	phy-handle = <&ethphy0>;
+ 	status = "okay";
+ 
++	fixed-link {
++		speed = <1000>;
++		full-duplex;
++	};
++
+ 	mdio {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+diff --git a/arch/arm/boot/dts/imx6q-dhcom-som.dtsi b/arch/arm/boot/dts/imx6q-dhcom-som.dtsi
+index 236fc205c3890..d0768ae429faa 100644
+--- a/arch/arm/boot/dts/imx6q-dhcom-som.dtsi
++++ b/arch/arm/boot/dts/imx6q-dhcom-som.dtsi
+@@ -406,6 +406,18 @@
+ 	vin-supply = <&sw1_reg>;
+ };
+ 
++&reg_pu {
++	vin-supply = <&sw1_reg>;
++};
++
++&reg_vdd1p1 {
++	vin-supply = <&sw2_reg>;
++};
++
++&reg_vdd2p5 {
++	vin-supply = <&sw2_reg>;
++};
++
+ &uart1 {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_uart1>;
+diff --git a/arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi b/arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi
+index 828cf3e39784a..c4e146f3341bb 100644
+--- a/arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi
+@@ -126,7 +126,7 @@
+ 		compatible = "nxp,pca8574";
+ 		reg = <0x3a>;
+ 		gpio-controller;
+-		#gpio-cells = <1>;
++		#gpio-cells = <2>;
+ 	};
+ };
+ 
+diff --git a/arch/arm/boot/dts/imx7d-meerkat96.dts b/arch/arm/boot/dts/imx7d-meerkat96.dts
+index 5339210b63d0f..dd8003bd1fc09 100644
+--- a/arch/arm/boot/dts/imx7d-meerkat96.dts
++++ b/arch/arm/boot/dts/imx7d-meerkat96.dts
+@@ -193,7 +193,7 @@
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_usdhc1>;
+ 	keep-power-in-suspend;
+-	tuning-step = <2>;
++	fsl,tuning-step = <2>;
+ 	vmmc-supply = <&reg_3p3v>;
+ 	no-1-8-v;
+ 	broken-cd;
+diff --git a/arch/arm/boot/dts/imx7d-pico.dtsi b/arch/arm/boot/dts/imx7d-pico.dtsi
+index e57da0d32b98d..e519897fae082 100644
+--- a/arch/arm/boot/dts/imx7d-pico.dtsi
++++ b/arch/arm/boot/dts/imx7d-pico.dtsi
+@@ -351,7 +351,7 @@
+ 	pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
+ 	cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
+ 	bus-width = <4>;
+-	tuning-step = <2>;
++	fsl,tuning-step = <2>;
+ 	vmmc-supply = <&reg_3p3v>;
+ 	wakeup-source;
+ 	no-1-8-v;
+diff --git a/arch/arm/mach-omap1/board-h2.c b/arch/arm/mach-omap1/board-h2.c
+index c40cf5ef86079..977b0b744c22a 100644
+--- a/arch/arm/mach-omap1/board-h2.c
++++ b/arch/arm/mach-omap1/board-h2.c
+@@ -320,7 +320,7 @@ static int tps_setup(struct i2c_client *client, void *context)
+ {
+ 	if (!IS_BUILTIN(CONFIG_TPS65010))
+ 		return -ENOSYS;
+-	
++
+ 	tps65010_config_vregs1(TPS_LDO2_ENABLE | TPS_VLDO2_3_0V |
+ 				TPS_LDO1_ENABLE | TPS_VLDO1_3_0V);
+ 
+@@ -394,6 +394,8 @@ static void __init h2_init(void)
+ 	BUG_ON(gpio_request(H2_NAND_RB_GPIO_PIN, "NAND ready") < 0);
+ 	gpio_direction_input(H2_NAND_RB_GPIO_PIN);
+ 
++	gpiod_add_lookup_table(&isp1301_gpiod_table);
++
+ 	omap_cfg_reg(L3_1610_FLASH_CS2B_OE);
+ 	omap_cfg_reg(M8_1610_FLASH_CS2B_WE);
+ 
+diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
+index cdfd5fed457ff..a3fdffcd1ce8b 100644
+--- a/arch/arm64/Kconfig.platforms
++++ b/arch/arm64/Kconfig.platforms
+@@ -168,6 +168,7 @@ config ARCH_MEDIATEK
+ 
+ config ARCH_MESON
+ 	bool "Amlogic Platforms"
++	select COMMON_CLK
+ 	select MESON_IRQ_GPIO
+ 	help
+ 	  This enables support for the arm64 based Amlogic SoCs
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var1.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var1.dts
+index 6c309b97587df..e8d31279b7a34 100644
+--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var1.dts
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var1.dts
+@@ -46,7 +46,8 @@
+ 			eee-broken-100tx;
+ 			qca,clk-out-frequency = <125000000>;
+ 			qca,clk-out-strength = <AR803X_STRENGTH_FULL>;
+-			vddio-supply = <&vddh>;
++			qca,keep-pll-enabled;
++			vddio-supply = <&vddio>;
+ 
+ 			vddio: vddio-regulator {
+ 				regulator-name = "VDDIO";
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts
+index df212ed5bb942..e65d1c477e2ce 100644
+--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts
+@@ -31,11 +31,10 @@
+ 			reg = <0x4>;
+ 			eee-broken-1000t;
+ 			eee-broken-100tx;
+-
+ 			qca,clk-out-frequency = <125000000>;
+ 			qca,clk-out-strength = <AR803X_STRENGTH_FULL>;
+-
+-			vddio-supply = <&vddh>;
++			qca,keep-pll-enabled;
++			vddio-supply = <&vddio>;
+ 
+ 			vddio: vddio-regulator {
+ 				regulator-name = "VDDIO";
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
+index 262fbad8f0ec5..1b264e5e947ac 100644
+--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
+@@ -201,8 +201,8 @@
+ 		ddr: memory-controller@1080000 {
+ 			compatible = "fsl,qoriq-memory-controller";
+ 			reg = <0x0 0x1080000 0x0 0x1000>;
+-			interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
+-			big-endian;
++			interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
++			little-endian;
+ 		};
+ 
+ 		dcfg: syscon@1e00000 {
+diff --git a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts
+index 631e01c1b9fd4..be1e7d6f0ecb5 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts
+@@ -88,11 +88,11 @@
+ 		pinctrl-0 = <&pinctrl_codec2>;
+ 		reg = <0x18>;
+ 		#sound-dai-cells = <0>;
+-		HPVDD-supply = <&reg_3p3v>;
+-		SPRVDD-supply = <&reg_3p3v>;
+-		SPLVDD-supply = <&reg_3p3v>;
+-		AVDD-supply = <&reg_3p3v>;
+-		IOVDD-supply = <&reg_3p3v>;
++		HPVDD-supply = <&reg_gen_3p3>;
++		SPRVDD-supply = <&reg_gen_3p3>;
++		SPLVDD-supply = <&reg_gen_3p3>;
++		AVDD-supply = <&reg_gen_3p3>;
++		IOVDD-supply = <&reg_gen_3p3>;
+ 		DVDD-supply = <&vgen4_reg>;
+ 		reset-gpios = <&gpio3 4 GPIO_ACTIVE_HIGH>;
+ 	};
+diff --git a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
+index 4dc8383478ee2..a08a568c31d92 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
+@@ -45,8 +45,8 @@
+ 	reg_12p0_main: regulator-12p0-main {
+ 		compatible = "regulator-fixed";
+ 		regulator-name = "12V_MAIN";
+-		regulator-min-microvolt = <5000000>;
+-		regulator-max-microvolt = <5000000>;
++		regulator-min-microvolt = <12000000>;
++		regulator-max-microvolt = <12000000>;
+ 		regulator-always-on;
+ 	};
+ 
+@@ -77,15 +77,6 @@
+ 		regulator-always-on;
+ 	};
+ 
+-	reg_3p3v: regulator-3p3v {
+-		compatible = "regulator-fixed";
+-		vin-supply = <&reg_3p3_main>;
+-		regulator-name = "GEN_3V3";
+-		regulator-min-microvolt = <3300000>;
+-		regulator-max-microvolt = <3300000>;
+-		regulator-always-on;
+-	};
+-
+ 	reg_usdhc2_vmmc: regulator-vsd-3v3 {
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_reg_usdhc2>;
+@@ -415,11 +406,11 @@
+ 		pinctrl-0 = <&pinctrl_codec1>;
+ 		reg = <0x18>;
+ 		#sound-dai-cells = <0>;
+-		HPVDD-supply = <&reg_3p3v>;
+-		SPRVDD-supply = <&reg_3p3v>;
+-		SPLVDD-supply = <&reg_3p3v>;
+-		AVDD-supply = <&reg_3p3v>;
+-		IOVDD-supply = <&reg_3p3v>;
++		HPVDD-supply = <&reg_gen_3p3>;
++		SPRVDD-supply = <&reg_gen_3p3>;
++		SPLVDD-supply = <&reg_gen_3p3>;
++		AVDD-supply = <&reg_gen_3p3>;
++		IOVDD-supply = <&reg_gen_3p3>;
+ 		DVDD-supply = <&vgen4_reg>;
+ 		reset-gpios = <&gpio3 3 GPIO_ACTIVE_LOW>;
+ 	};
+diff --git a/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi
+index 17477ab0fd8e1..3398f174f09b3 100644
+--- a/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi
+@@ -85,6 +85,8 @@
+ 		#size-cells = <2>;
+ 		ranges = <0x00 0x30000000 0x00 0x30000000 0x00 0x0c400000>;
+ 		ti,sci-dev-id = <199>;
++		dma-coherent;
++		dma-ranges;
+ 
+ 		main_navss_intr: interrupt-controller1 {
+ 			compatible = "ti,sci-intr";
+diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
+index a8578d650bb67..f362f72bcb508 100644
+--- a/arch/arm64/include/asm/kvm_asm.h
++++ b/arch/arm64/include/asm/kvm_asm.h
+@@ -57,6 +57,7 @@
+ #define __KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2		12
+ #define __KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs		13
+ #define __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs		14
++#define __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc			15
+ 
+ #ifndef __ASSEMBLY__
+ 
+diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
+index 84b5f79c9eab4..7730b81aad6d1 100644
+--- a/arch/arm64/kvm/arm.c
++++ b/arch/arm64/kvm/arm.c
+@@ -715,11 +715,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
+ 			return ret;
+ 	}
+ 
+-	if (run->immediate_exit)
+-		return -EINTR;
+-
+ 	vcpu_load(vcpu);
+ 
++	if (run->immediate_exit) {
++		ret = -EINTR;
++		goto out;
++	}
++
+ 	kvm_sigset_activate(vcpu);
+ 
+ 	ret = 1;
+@@ -892,6 +894,18 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
+ 
+ 	kvm_sigset_deactivate(vcpu);
+ 
++out:
++	/*
++	 * In the unlikely event that we are returning to userspace
++	 * with pending exceptions or PC adjustment, commit these
++	 * adjustments in order to give userspace a consistent view of
++	 * the vcpu state. Note that this relies on __kvm_adjust_pc()
++	 * being preempt-safe on VHE.
++	 */
++	if (unlikely(vcpu->arch.flags & (KVM_ARM64_PENDING_EXCEPTION |
++					 KVM_ARM64_INCREMENT_PC)))
++		kvm_call_hyp(__kvm_adjust_pc, vcpu);
++
+ 	vcpu_put(vcpu);
+ 	return ret;
+ }
+diff --git a/arch/arm64/kvm/hyp/exception.c b/arch/arm64/kvm/hyp/exception.c
+index 0812a496725f6..11541b94b328f 100644
+--- a/arch/arm64/kvm/hyp/exception.c
++++ b/arch/arm64/kvm/hyp/exception.c
+@@ -331,8 +331,8 @@ static void kvm_inject_exception(struct kvm_vcpu *vcpu)
+ }
+ 
+ /*
+- * Adjust the guest PC on entry, depending on flags provided by EL1
+- * for the purpose of emulation (MMIO, sysreg) or exception injection.
++ * Adjust the guest PC (and potentially exception state) depending on
++ * flags provided by the emulation code.
+  */
+ void __kvm_adjust_pc(struct kvm_vcpu *vcpu)
+ {
+diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+index 936328207bde0..e52582e140873 100644
+--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
++++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+@@ -25,6 +25,13 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
+ 	cpu_reg(host_ctxt, 1) =  __kvm_vcpu_run(kern_hyp_va(vcpu));
+ }
+ 
++static void handle___kvm_adjust_pc(struct kvm_cpu_context *host_ctxt)
++{
++	DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);
++
++	__kvm_adjust_pc(kern_hyp_va(vcpu));
++}
++
+ static void handle___kvm_flush_vm_context(struct kvm_cpu_context *host_ctxt)
+ {
+ 	__kvm_flush_vm_context();
+@@ -112,6 +119,7 @@ typedef void (*hcall_t)(struct kvm_cpu_context *);
+ 
+ static const hcall_t host_hcall[] = {
+ 	HANDLE_FUNC(__kvm_vcpu_run),
++	HANDLE_FUNC(__kvm_adjust_pc),
+ 	HANDLE_FUNC(__kvm_flush_vm_context),
+ 	HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
+ 	HANDLE_FUNC(__kvm_tlb_flush_vmid),
+diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
+index 7719d632df8df..1754498b07174 100644
+--- a/arch/mips/mm/cache.c
++++ b/arch/mips/mm/cache.c
+@@ -157,31 +157,29 @@ unsigned long _page_cachable_default;
+ EXPORT_SYMBOL(_page_cachable_default);
+ 
+ #define PM(p)	__pgprot(_page_cachable_default | (p))
+-#define PVA(p)	PM(_PAGE_VALID | _PAGE_ACCESSED | (p))
+ 
+ static inline void setup_protection_map(void)
+ {
+ 	protection_map[0]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
+-	protection_map[1]  = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC);
+-	protection_map[2]  = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
+-	protection_map[3]  = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC);
+-	protection_map[4]  = PVA(_PAGE_PRESENT);
+-	protection_map[5]  = PVA(_PAGE_PRESENT);
+-	protection_map[6]  = PVA(_PAGE_PRESENT);
+-	protection_map[7]  = PVA(_PAGE_PRESENT);
++	protection_map[1]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
++	protection_map[2]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
++	protection_map[3]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
++	protection_map[4]  = PM(_PAGE_PRESENT);
++	protection_map[5]  = PM(_PAGE_PRESENT);
++	protection_map[6]  = PM(_PAGE_PRESENT);
++	protection_map[7]  = PM(_PAGE_PRESENT);
+ 
+ 	protection_map[8]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
+-	protection_map[9]  = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC);
+-	protection_map[10] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE |
++	protection_map[9]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
++	protection_map[10] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE |
+ 				_PAGE_NO_READ);
+-	protection_map[11] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
+-	protection_map[12] = PVA(_PAGE_PRESENT);
+-	protection_map[13] = PVA(_PAGE_PRESENT);
+-	protection_map[14] = PVA(_PAGE_PRESENT);
+-	protection_map[15] = PVA(_PAGE_PRESENT);
++	protection_map[11] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
++	protection_map[12] = PM(_PAGE_PRESENT);
++	protection_map[13] = PM(_PAGE_PRESENT);
++	protection_map[14] = PM(_PAGE_PRESENT | _PAGE_WRITE);
++	protection_map[15] = PM(_PAGE_PRESENT | _PAGE_WRITE);
+ }
+ 
+-#undef _PVA
+ #undef PM
+ 
+ void cpu_cache_init(void)
+diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
+index 01ab2163659e4..e8c2a6373157d 100644
+--- a/arch/powerpc/kernel/kprobes.c
++++ b/arch/powerpc/kernel/kprobes.c
+@@ -108,7 +108,6 @@ int arch_prepare_kprobe(struct kprobe *p)
+ 	int ret = 0;
+ 	struct kprobe *prev;
+ 	struct ppc_inst insn = ppc_inst_read((struct ppc_inst *)p->addr);
+-	struct ppc_inst prefix = ppc_inst_read((struct ppc_inst *)(p->addr - 1));
+ 
+ 	if ((unsigned long)p->addr & 0x03) {
+ 		printk("Attempt to register kprobe at an unaligned address\n");
+@@ -116,7 +115,8 @@ int arch_prepare_kprobe(struct kprobe *p)
+ 	} else if (IS_MTMSRD(insn) || IS_RFID(insn) || IS_RFI(insn)) {
+ 		printk("Cannot register a kprobe on rfi/rfid or mtmsr[d]\n");
+ 		ret = -EINVAL;
+-	} else if (ppc_inst_prefixed(prefix)) {
++	} else if ((unsigned long)p->addr & ~PAGE_MASK &&
++		   ppc_inst_prefixed(ppc_inst_read((struct ppc_inst *)(p->addr - 1)))) {
+ 		printk("Cannot register a kprobe on the second word of prefixed instruction\n");
+ 		ret = -EINVAL;
+ 	}
+diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
+index 208a053c9adfd..60c5bc0c130cf 100644
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -4418,7 +4418,6 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
+ 		mtspr(SPRN_EBBRR, ebb_regs[1]);
+ 		mtspr(SPRN_BESCR, ebb_regs[2]);
+ 		mtspr(SPRN_TAR, user_tar);
+-		mtspr(SPRN_FSCR, current->thread.fscr);
+ 	}
+ 	mtspr(SPRN_VRSAVE, user_vrsave);
+ 
+diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+index 5e634db4809bf..004f0d4e665f8 100644
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -59,6 +59,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
+ #define STACK_SLOT_UAMOR	(SFS-88)
+ #define STACK_SLOT_DAWR1	(SFS-96)
+ #define STACK_SLOT_DAWRX1	(SFS-104)
++#define STACK_SLOT_FSCR		(SFS-112)
+ /* the following is used by the P9 short path */
+ #define STACK_SLOT_NVGPRS	(SFS-152)	/* 18 gprs */
+ 
+@@ -686,6 +687,8 @@ BEGIN_FTR_SECTION
+ 	std	r6, STACK_SLOT_DAWR0(r1)
+ 	std	r7, STACK_SLOT_DAWRX0(r1)
+ 	std	r8, STACK_SLOT_IAMR(r1)
++	mfspr	r5, SPRN_FSCR
++	std	r5, STACK_SLOT_FSCR(r1)
+ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+ BEGIN_FTR_SECTION
+ 	mfspr	r6, SPRN_DAWR1
+@@ -1663,6 +1666,10 @@ FTR_SECTION_ELSE
+ 	ld	r7, STACK_SLOT_HFSCR(r1)
+ 	mtspr	SPRN_HFSCR, r7
+ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
++BEGIN_FTR_SECTION
++	ld	r5, STACK_SLOT_FSCR(r1)
++	mtspr	SPRN_FSCR, r5
++END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+ 	/*
+ 	 * Restore various registers to 0, where non-zero values
+ 	 * set by the guest could disrupt the host.
+diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
+index ca2b40dfd24b8..24d936c147cdf 100644
+--- a/arch/riscv/kernel/vdso/Makefile
++++ b/arch/riscv/kernel/vdso/Makefile
+@@ -23,7 +23,7 @@ ifneq ($(c-gettimeofday-y),)
+ endif
+ 
+ # Build rules
+-targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds vdso-dummy.o
++targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds vdso-syms.S
+ obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
+ 
+ obj-y += vdso.o vdso-syms.o
+@@ -41,7 +41,7 @@ KASAN_SANITIZE := n
+ $(obj)/vdso.o: $(obj)/vdso.so
+ 
+ # link rule for the .so file, .lds has to be first
+-$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
++$(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE
+ 	$(call if_changed,vdsold)
+ LDFLAGS_vdso.so.dbg = -shared -s -soname=linux-vdso.so.1 \
+ 	--build-id=sha1 --hash-style=both --eh-frame-hdr
+diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
+index 412b51e059c80..48067af946785 100644
+--- a/arch/x86/include/asm/apic.h
++++ b/arch/x86/include/asm/apic.h
+@@ -174,6 +174,7 @@ static inline int apic_is_clustered_box(void)
+ extern int setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask);
+ extern void lapic_assign_system_vectors(void);
+ extern void lapic_assign_legacy_vector(unsigned int isairq, bool replace);
++extern void lapic_update_legacy_vectors(void);
+ extern void lapic_online(void);
+ extern void lapic_offline(void);
+ extern bool apic_needs_pit(void);
+diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
+index b7dd944dc8673..8f28fafa98b32 100644
+--- a/arch/x86/include/asm/disabled-features.h
++++ b/arch/x86/include/asm/disabled-features.h
+@@ -56,11 +56,8 @@
+ # define DISABLE_PTI		(1 << (X86_FEATURE_PTI & 31))
+ #endif
+ 
+-#ifdef CONFIG_IOMMU_SUPPORT
+-# define DISABLE_ENQCMD	0
+-#else
+-# define DISABLE_ENQCMD (1 << (X86_FEATURE_ENQCMD & 31))
+-#endif
++/* Force disable because it's broken beyond repair */
++#define DISABLE_ENQCMD		(1 << (X86_FEATURE_ENQCMD & 31))
+ 
+ #ifdef CONFIG_X86_SGX
+ # define DISABLE_SGX	0
+diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h
+index ed33a14188f66..23bef08a83880 100644
+--- a/arch/x86/include/asm/fpu/api.h
++++ b/arch/x86/include/asm/fpu/api.h
+@@ -106,10 +106,6 @@ extern int cpu_has_xfeatures(u64 xfeatures_mask, const char **feature_name);
+  */
+ #define PASID_DISABLED	0
+ 
+-#ifdef CONFIG_IOMMU_SUPPORT
+-/* Update current's PASID MSR/state by mm's PASID. */
+-void update_pasid(void);
+-#else
+ static inline void update_pasid(void) { }
+-#endif
++
+ #endif /* _ASM_X86_FPU_API_H */
+diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
+index 8d33ad80704f2..ceeba9f631722 100644
+--- a/arch/x86/include/asm/fpu/internal.h
++++ b/arch/x86/include/asm/fpu/internal.h
+@@ -584,13 +584,6 @@ static inline void switch_fpu_finish(struct fpu *new_fpu)
+ 			pkru_val = pk->pkru;
+ 	}
+ 	__write_pkru(pkru_val);
+-
+-	/*
+-	 * Expensive PASID MSR write will be avoided in update_pasid() because
+-	 * TIF_NEED_FPU_LOAD was set. And the PASID state won't be updated
+-	 * unless it's different from mm->pasid to reduce overhead.
+-	 */
+-	update_pasid();
+ }
+ 
+ #endif /* _ASM_X86_FPU_INTERNAL_H */
+diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
+index 3381198525126..69299878b200a 100644
+--- a/arch/x86/include/asm/kvm_para.h
++++ b/arch/x86/include/asm/kvm_para.h
+@@ -7,8 +7,6 @@
+ #include <linux/interrupt.h>
+ #include <uapi/asm/kvm_para.h>
+ 
+-extern void kvmclock_init(void);
+-
+ #ifdef CONFIG_KVM_GUEST
+ bool kvm_check_and_clear_guest_paused(void);
+ #else
+@@ -86,13 +84,14 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
+ }
+ 
+ #ifdef CONFIG_KVM_GUEST
++void kvmclock_init(void);
++void kvmclock_disable(void);
+ bool kvm_para_available(void);
+ unsigned int kvm_arch_para_features(void);
+ unsigned int kvm_arch_para_hints(void);
+ void kvm_async_pf_task_wait_schedule(u32 token);
+ void kvm_async_pf_task_wake(u32 token);
+ u32 kvm_read_and_reset_apf_flags(void);
+-void kvm_disable_steal_time(void);
+ bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token);
+ 
+ DECLARE_STATIC_KEY_FALSE(kvm_async_pf_enabled);
+@@ -137,11 +136,6 @@ static inline u32 kvm_read_and_reset_apf_flags(void)
+ 	return 0;
+ }
+ 
+-static inline void kvm_disable_steal_time(void)
+-{
+-	return;
+-}
+-
+ static __always_inline bool kvm_handle_async_pf(struct pt_regs *regs, u32 token)
+ {
+ 	return false;
+diff --git a/arch/x86/include/asm/thermal.h b/arch/x86/include/asm/thermal.h
+index ddbdefd5b94f1..91a7b6687c3b9 100644
+--- a/arch/x86/include/asm/thermal.h
++++ b/arch/x86/include/asm/thermal.h
+@@ -3,11 +3,13 @@
+ #define _ASM_X86_THERMAL_H
+ 
+ #ifdef CONFIG_X86_THERMAL_VECTOR
++void therm_lvt_init(void);
+ void intel_init_thermal(struct cpuinfo_x86 *c);
+ bool x86_thermal_enabled(void);
+ void intel_thermal_interrupt(void);
+ #else
+-static inline void intel_init_thermal(struct cpuinfo_x86 *c) { }
++static inline void therm_lvt_init(void)				{ }
++static inline void intel_init_thermal(struct cpuinfo_x86 *c)	{ }
+ #endif
+ 
+ #endif /* _ASM_X86_THERMAL_H */
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index 4f26700f314d9..b967a2ba7494a 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -2604,6 +2604,7 @@ static void __init apic_bsp_setup(bool upmode)
+ 	end_local_APIC_setup();
+ 	irq_remap_enable_fault_handling();
+ 	setup_IO_APIC();
++	lapic_update_legacy_vectors();
+ }
+ 
+ #ifdef CONFIG_UP_LATE_INIT
+diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
+index 3c9c7492252f8..8c97a3468affa 100644
+--- a/arch/x86/kernel/apic/vector.c
++++ b/arch/x86/kernel/apic/vector.c
+@@ -730,6 +730,26 @@ void lapic_assign_legacy_vector(unsigned int irq, bool replace)
+ 	irq_matrix_assign_system(vector_matrix, ISA_IRQ_VECTOR(irq), replace);
+ }
+ 
++void __init lapic_update_legacy_vectors(void)
++{
++	unsigned int i;
++
++	if (IS_ENABLED(CONFIG_X86_IO_APIC) && nr_ioapics > 0)
++		return;
++
++	/*
++	 * If the IO/APIC is disabled via config, kernel command line or
++	 * lack of enumeration then all legacy interrupts are routed
++	 * through the PIC. Make sure that they are marked as legacy
++	 * vectors. PIC_CASCADE_IRQ has already been marked in
++	 * lapic_assign_system_vectors().
++	 */
++	for (i = 0; i < nr_legacy_irqs(); i++) {
++		if (i != PIC_CASCADE_IR)
++			lapic_assign_legacy_vector(i, true);
++	}
++}
++
+ void __init lapic_assign_system_vectors(void)
+ {
+ 	unsigned int i, vector = 0;
+diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
+index 683749b80ae28..2ad57cc14b83f 100644
+--- a/arch/x86/kernel/fpu/xstate.c
++++ b/arch/x86/kernel/fpu/xstate.c
+@@ -1402,60 +1402,3 @@ int proc_pid_arch_status(struct seq_file *m, struct pid_namespace *ns,
+ 	return 0;
+ }
+ #endif /* CONFIG_PROC_PID_ARCH_STATUS */
+-
+-#ifdef CONFIG_IOMMU_SUPPORT
+-void update_pasid(void)
+-{
+-	u64 pasid_state;
+-	u32 pasid;
+-
+-	if (!cpu_feature_enabled(X86_FEATURE_ENQCMD))
+-		return;
+-
+-	if (!current->mm)
+-		return;
+-
+-	pasid = READ_ONCE(current->mm->pasid);
+-	/* Set the valid bit in the PASID MSR/state only for valid pasid. */
+-	pasid_state = pasid == PASID_DISABLED ?
+-		      pasid : pasid | MSR_IA32_PASID_VALID;
+-
+-	/*
+-	 * No need to hold fregs_lock() since the task's fpstate won't
+-	 * be changed by others (e.g. ptrace) while the task is being
+-	 * switched to or is in IPI.
+-	 */
+-	if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
+-		/* The MSR is active and can be directly updated. */
+-		wrmsrl(MSR_IA32_PASID, pasid_state);
+-	} else {
+-		struct fpu *fpu = &current->thread.fpu;
+-		struct ia32_pasid_state *ppasid_state;
+-		struct xregs_state *xsave;
+-
+-		/*
+-		 * The CPU's xstate registers are not currently active. Just
+-		 * update the PASID state in the memory buffer here. The
+-		 * PASID MSR will be loaded when returning to user mode.
+-		 */
+-		xsave = &fpu->state.xsave;
+-		xsave->header.xfeatures |= XFEATURE_MASK_PASID;
+-		ppasid_state = get_xsave_addr(xsave, XFEATURE_PASID);
+-		/*
+-		 * Since XFEATURE_MASK_PASID is set in xfeatures, ppasid_state
+-		 * won't be NULL and no need to check its value.
+-		 *
+-		 * Only update the task's PASID state when it's different
+-		 * from the mm's pasid.
+-		 */
+-		if (ppasid_state->pasid != pasid_state) {
+-			/*
+-			 * Invalid fpregs so that state restoring will pick up
+-			 * the PASID state.
+-			 */
+-			__fpu_invalidate_fpregs_state(fpu);
+-			ppasid_state->pasid = pasid_state;
+-		}
+-	}
+-}
+-#endif /* CONFIG_IOMMU_SUPPORT */
+diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
+index 78bb0fae39826..919411a0117df 100644
+--- a/arch/x86/kernel/kvm.c
++++ b/arch/x86/kernel/kvm.c
+@@ -26,6 +26,7 @@
+ #include <linux/kprobes.h>
+ #include <linux/nmi.h>
+ #include <linux/swait.h>
++#include <linux/syscore_ops.h>
+ #include <asm/timer.h>
+ #include <asm/cpu.h>
+ #include <asm/traps.h>
+@@ -37,6 +38,7 @@
+ #include <asm/tlb.h>
+ #include <asm/cpuidle_haltpoll.h>
+ #include <asm/ptrace.h>
++#include <asm/reboot.h>
+ #include <asm/svm.h>
+ 
+ DEFINE_STATIC_KEY_FALSE(kvm_async_pf_enabled);
+@@ -374,6 +376,14 @@ static void kvm_pv_disable_apf(void)
+ 	pr_info("Unregister pv shared memory for cpu %d\n", smp_processor_id());
+ }
+ 
++static void kvm_disable_steal_time(void)
++{
++	if (!has_steal_clock)
++		return;
++
++	wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
++}
++
+ static void kvm_pv_guest_cpu_reboot(void *unused)
+ {
+ 	/*
+@@ -416,14 +426,6 @@ static u64 kvm_steal_clock(int cpu)
+ 	return steal;
+ }
+ 
+-void kvm_disable_steal_time(void)
+-{
+-	if (!has_steal_clock)
+-		return;
+-
+-	wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
+-}
+-
+ static inline void __set_percpu_decrypted(void *ptr, unsigned long size)
+ {
+ 	early_set_memory_decrypted((unsigned long) ptr, size);
+@@ -460,6 +462,27 @@ static bool pv_tlb_flush_supported(void)
+ 
+ static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask);
+ 
++static void kvm_guest_cpu_offline(bool shutdown)
++{
++	kvm_disable_steal_time();
++	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
++		wrmsrl(MSR_KVM_PV_EOI_EN, 0);
++	kvm_pv_disable_apf();
++	if (!shutdown)
++		apf_task_wake_all();
++	kvmclock_disable();
++}
++
++static int kvm_cpu_online(unsigned int cpu)
++{
++	unsigned long flags;
++
++	local_irq_save(flags);
++	kvm_guest_cpu_init();
++	local_irq_restore(flags);
++	return 0;
++}
++
+ #ifdef CONFIG_SMP
+ 
+ static bool pv_ipi_supported(void)
+@@ -587,29 +610,46 @@ static void __init kvm_smp_prepare_boot_cpu(void)
+ 	kvm_spinlock_init();
+ }
+ 
+-static void kvm_guest_cpu_offline(void)
++static int kvm_cpu_down_prepare(unsigned int cpu)
+ {
+-	kvm_disable_steal_time();
+-	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
+-		wrmsrl(MSR_KVM_PV_EOI_EN, 0);
+-	kvm_pv_disable_apf();
+-	apf_task_wake_all();
++	unsigned long flags;
++
++	local_irq_save(flags);
++	kvm_guest_cpu_offline(false);
++	local_irq_restore(flags);
++	return 0;
+ }
+ 
+-static int kvm_cpu_online(unsigned int cpu)
++#endif
++
++static int kvm_suspend(void)
+ {
+-	local_irq_disable();
+-	kvm_guest_cpu_init();
+-	local_irq_enable();
++	kvm_guest_cpu_offline(false);
++
+ 	return 0;
+ }
+ 
+-static int kvm_cpu_down_prepare(unsigned int cpu)
++static void kvm_resume(void)
+ {
+-	local_irq_disable();
+-	kvm_guest_cpu_offline();
+-	local_irq_enable();
+-	return 0;
++	kvm_cpu_online(raw_smp_processor_id());
++}
++
++static struct syscore_ops kvm_syscore_ops = {
++	.suspend	= kvm_suspend,
++	.resume		= kvm_resume,
++};
++
++/*
++ * After a PV feature is registered, the host will keep writing to the
++ * registered memory location. If the guest happens to shutdown, this memory
++ * won't be valid. In cases like kexec, in which you install a new kernel, this
++ * means a random memory location will be kept being written.
++ */
++#ifdef CONFIG_KEXEC_CORE
++static void kvm_crash_shutdown(struct pt_regs *regs)
++{
++	kvm_guest_cpu_offline(true);
++	native_machine_crash_shutdown(regs);
+ }
+ #endif
+ 
+@@ -681,6 +721,12 @@ static void __init kvm_guest_init(void)
+ 	kvm_guest_cpu_init();
+ #endif
+ 
++#ifdef CONFIG_KEXEC_CORE
++	machine_ops.crash_shutdown = kvm_crash_shutdown;
++#endif
++
++	register_syscore_ops(&kvm_syscore_ops);
++
+ 	/*
+ 	 * Hard lockup detection is enabled by default. Disable it, as guests
+ 	 * can get false positives too easily, for example if the host is
+diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
+index 1fc0962c89c08..b825c87c12ef7 100644
+--- a/arch/x86/kernel/kvmclock.c
++++ b/arch/x86/kernel/kvmclock.c
+@@ -20,7 +20,6 @@
+ #include <asm/hypervisor.h>
+ #include <asm/mem_encrypt.h>
+ #include <asm/x86_init.h>
+-#include <asm/reboot.h>
+ #include <asm/kvmclock.h>
+ 
+ static int kvmclock __initdata = 1;
+@@ -203,28 +202,9 @@ static void kvm_setup_secondary_clock(void)
+ }
+ #endif
+ 
+-/*
+- * After the clock is registered, the host will keep writing to the
+- * registered memory location. If the guest happens to shutdown, this memory
+- * won't be valid. In cases like kexec, in which you install a new kernel, this
+- * means a random memory location will be kept being written. So before any
+- * kind of shutdown from our side, we unregister the clock by writing anything
+- * that does not have the 'enable' bit set in the msr
+- */
+-#ifdef CONFIG_KEXEC_CORE
+-static void kvm_crash_shutdown(struct pt_regs *regs)
+-{
+-	native_write_msr(msr_kvm_system_time, 0, 0);
+-	kvm_disable_steal_time();
+-	native_machine_crash_shutdown(regs);
+-}
+-#endif
+-
+-static void kvm_shutdown(void)
++void kvmclock_disable(void)
+ {
+ 	native_write_msr(msr_kvm_system_time, 0, 0);
+-	kvm_disable_steal_time();
+-	native_machine_shutdown();
+ }
+ 
+ static void __init kvmclock_init_mem(void)
+@@ -351,10 +331,6 @@ void __init kvmclock_init(void)
+ #endif
+ 	x86_platform.save_sched_clock_state = kvm_save_sched_clock_state;
+ 	x86_platform.restore_sched_clock_state = kvm_restore_sched_clock_state;
+-	machine_ops.shutdown  = kvm_shutdown;
+-#ifdef CONFIG_KEXEC_CORE
+-	machine_ops.crash_shutdown  = kvm_crash_shutdown;
+-#endif
+ 	kvm_get_preset_lpj();
+ 
+ 	/*
+diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
+index ccab6cf91283d..e79f21d13a0d7 100644
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -44,6 +44,7 @@
+ #include <asm/pci-direct.h>
+ #include <asm/prom.h>
+ #include <asm/proto.h>
++#include <asm/thermal.h>
+ #include <asm/unwind.h>
+ #include <asm/vsyscall.h>
+ #include <linux/vmalloc.h>
+@@ -1220,6 +1221,14 @@ void __init setup_arch(char **cmdline_p)
+ 
+ 	x86_init.timers.wallclock_init();
+ 
++	/*
++	 * This needs to run before setup_local_APIC() which soft-disables the
++	 * local APIC temporarily and that masks the thermal LVT interrupt,
++	 * leading to softlockups on machines which have configured SMI
++	 * interrupt delivery.
++	 */
++	therm_lvt_init();
++
+ 	mcheck_init();
+ 
+ 	register_refined_jiffies(CLOCK_TICK_RATE);
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index 9a6825feaf53f..30569bbbca9ac 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -2532,7 +2532,7 @@ static int cr_interception(struct vcpu_svm *svm)
+ 	err = 0;
+ 	if (cr >= 16) { /* mov to cr */
+ 		cr -= 16;
+-		val = kvm_register_read(&svm->vcpu, reg);
++		val = kvm_register_readl(&svm->vcpu, reg);
+ 		trace_kvm_cr_write(cr, val);
+ 		switch (cr) {
+ 		case 0:
+@@ -2578,7 +2578,7 @@ static int cr_interception(struct vcpu_svm *svm)
+ 			kvm_queue_exception(&svm->vcpu, UD_VECTOR);
+ 			return 1;
+ 		}
+-		kvm_register_write(&svm->vcpu, reg, val);
++		kvm_register_writel(&svm->vcpu, reg, val);
+ 		trace_kvm_cr_read(cr, val);
+ 	}
+ 	return kvm_complete_insn_gp(&svm->vcpu, err);
+@@ -2643,11 +2643,11 @@ static int dr_interception(struct vcpu_svm *svm)
+ 	dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
+ 	if (dr >= 16) { /* mov to DRn  */
+ 		dr -= 16;
+-		val = kvm_register_read(&svm->vcpu, reg);
++		val = kvm_register_readl(&svm->vcpu, reg);
+ 		err = kvm_set_dr(&svm->vcpu, dr, val);
+ 	} else {
+ 		kvm_get_dr(&svm->vcpu, dr, &val);
+-		kvm_register_write(&svm->vcpu, reg, val);
++		kvm_register_writel(&svm->vcpu, reg, val);
+ 	}
+ 
+ 	return kvm_complete_insn_gp(&svm->vcpu, err);
+diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
+index a73347e2cdfc5..ea3d0b73731bc 100644
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -836,8 +836,8 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
+ 
+ 	if (si_code == SEGV_PKUERR)
+ 		force_sig_pkuerr((void __user *)address, pkey);
+-
+-	force_sig_fault(SIGSEGV, si_code, (void __user *)address);
++	else
++		force_sig_fault(SIGSEGV, si_code, (void __user *)address);
+ 
+ 	local_irq_disable();
+ }
+diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
+index a19374d261013..65f599e9075bc 100644
+--- a/arch/x86/mm/mem_encrypt_identity.c
++++ b/arch/x86/mm/mem_encrypt_identity.c
+@@ -504,10 +504,6 @@ void __init sme_enable(struct boot_params *bp)
+ #define AMD_SME_BIT	BIT(0)
+ #define AMD_SEV_BIT	BIT(1)
+ 
+-	/* Check the SEV MSR whether SEV or SME is enabled */
+-	sev_status   = __rdmsr(MSR_AMD64_SEV);
+-	feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
+-
+ 	/*
+ 	 * Check for the SME/SEV feature:
+ 	 *   CPUID Fn8000_001F[EAX]
+@@ -519,11 +515,16 @@ void __init sme_enable(struct boot_params *bp)
+ 	eax = 0x8000001f;
+ 	ecx = 0;
+ 	native_cpuid(&eax, &ebx, &ecx, &edx);
+-	if (!(eax & feature_mask))
++	/* Check whether SEV or SME is supported */
++	if (!(eax & (AMD_SEV_BIT | AMD_SME_BIT)))
+ 		return;
+ 
+ 	me_mask = 1UL << (ebx & 0x3f);
+ 
++	/* Check the SEV MSR whether SEV or SME is enabled */
++	sev_status   = __rdmsr(MSR_AMD64_SEV);
++	feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
++
+ 	/* Check if memory encryption is enabled */
+ 	if (feature_mask == AMD_SME_BIT) {
+ 		/*
+diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
+index 624a26794d558..e5ba9795ec696 100644
+--- a/drivers/acpi/acpica/utdelete.c
++++ b/drivers/acpi/acpica/utdelete.c
+@@ -285,6 +285,14 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
+ 		}
+ 		break;
+ 
++	case ACPI_TYPE_LOCAL_ADDRESS_HANDLER:
++
++		ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
++				  "***** Address handler %p\n", object));
++
++		acpi_os_delete_mutex(object->address_space.context_mutex);
++		break;
++
+ 	default:
+ 
+ 		break;
+diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
+index 68145e326eb90..30e9b700273e1 100644
+--- a/drivers/bus/ti-sysc.c
++++ b/drivers/bus/ti-sysc.c
+@@ -1334,6 +1334,34 @@ err_allow_idle:
+ 	return error;
+ }
+ 
++static int sysc_reinit_module(struct sysc *ddata, bool leave_enabled)
++{
++	struct device *dev = ddata->dev;
++	int error;
++
++	/* Disable target module if it is enabled */
++	if (ddata->enabled) {
++		error = sysc_runtime_suspend(dev);
++		if (error)
++			dev_warn(dev, "reinit suspend failed: %i\n", error);
++	}
++
++	/* Enable target module */
++	error = sysc_runtime_resume(dev);
++	if (error)
++		dev_warn(dev, "reinit resume failed: %i\n", error);
++
++	if (leave_enabled)
++		return error;
++
++	/* Disable target module if no leave_enabled was set */
++	error = sysc_runtime_suspend(dev);
++	if (error)
++		dev_warn(dev, "reinit suspend failed: %i\n", error);
++
++	return error;
++}
++
+ static int __maybe_unused sysc_noirq_suspend(struct device *dev)
+ {
+ 	struct sysc *ddata;
+@@ -1344,12 +1372,18 @@ static int __maybe_unused sysc_noirq_suspend(struct device *dev)
+ 	    (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
+ 		return 0;
+ 
+-	return pm_runtime_force_suspend(dev);
++	if (!ddata->enabled)
++		return 0;
++
++	ddata->needs_resume = 1;
++
++	return sysc_runtime_suspend(dev);
+ }
+ 
+ static int __maybe_unused sysc_noirq_resume(struct device *dev)
+ {
+ 	struct sysc *ddata;
++	int error = 0;
+ 
+ 	ddata = dev_get_drvdata(dev);
+ 
+@@ -1357,7 +1391,19 @@ static int __maybe_unused sysc_noirq_resume(struct device *dev)
+ 	    (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
+ 		return 0;
+ 
+-	return pm_runtime_force_resume(dev);
++	if (ddata->cfg.quirks & SYSC_QUIRK_REINIT_ON_RESUME) {
++		error = sysc_reinit_module(ddata, ddata->needs_resume);
++		if (error)
++			dev_warn(dev, "noirq_resume failed: %i\n", error);
++	} else if (ddata->needs_resume) {
++		error = sysc_runtime_resume(dev);
++		if (error)
++			dev_warn(dev, "noirq_resume failed: %i\n", error);
++	}
++
++	ddata->needs_resume = 0;
++
++	return error;
+ }
+ 
+ static const struct dev_pm_ops sysc_pm_ops = {
+@@ -1408,9 +1454,9 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
+ 		   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
+ 	/* Uarts on omap4 and later */
+ 	SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff,
+-		   SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
++		   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
+ 	SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff,
+-		   SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
++		   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
+ 
+ 	/* Quirks that need to be set based on the module address */
+ 	SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff,
+@@ -1466,7 +1512,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
+ 	SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050,
+ 		   0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+ 	SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -ENODEV, 0x4ea2080d, 0xffffffff,
+-		   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
++		   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY |
++		   SYSC_QUIRK_REINIT_ON_RESUME),
+ 	SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
+ 		   SYSC_MODULE_QUIRK_WDT),
+ 	/* PRUSS on am3, am4 and am5 */
+diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
+index 07cf7977a0450..59f2104ffc771 100644
+--- a/drivers/dma/idxd/init.c
++++ b/drivers/dma/idxd/init.c
+@@ -675,12 +675,12 @@ static int __init idxd_init_module(void)
+ 	 * If the CPU does not support MOVDIR64B or ENQCMDS, there's no point in
+ 	 * enumerating the device. We can not utilize it.
+ 	 */
+-	if (!boot_cpu_has(X86_FEATURE_MOVDIR64B)) {
++	if (!cpu_feature_enabled(X86_FEATURE_MOVDIR64B)) {
+ 		pr_warn("idxd driver failed to load without MOVDIR64B.\n");
+ 		return -ENODEV;
+ 	}
+ 
+-	if (!boot_cpu_has(X86_FEATURE_ENQCMD))
++	if (!cpu_feature_enabled(X86_FEATURE_ENQCMD))
+ 		pr_warn("Platform does not have ENQCMD(S) support.\n");
+ 	else
+ 		support_enqcmd = true;
+diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
+index e15d484b6a5a7..ea7ca74fc1730 100644
+--- a/drivers/firmware/efi/cper.c
++++ b/drivers/firmware/efi/cper.c
+@@ -276,8 +276,7 @@ static int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg)
+ 	if (!msg || !(mem->validation_bits & CPER_MEM_VALID_MODULE_HANDLE))
+ 		return 0;
+ 
+-	n = 0;
+-	len = CPER_REC_LEN - 1;
++	len = CPER_REC_LEN;
+ 	dmi_memdev_name(mem->mem_dev_handle, &bank, &device);
+ 	if (bank && device)
+ 		n = snprintf(msg, len, "DIMM location: %s %s ", bank, device);
+@@ -286,7 +285,6 @@ static int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg)
+ 			     "DIMM location: not present. DMI handle: 0x%.4x ",
+ 			     mem->mem_dev_handle);
+ 
+-	msg[n] = '\0';
+ 	return n;
+ }
+ 
+diff --git a/drivers/firmware/efi/fdtparams.c b/drivers/firmware/efi/fdtparams.c
+index bb042ab7c2be6..e901f8564ca0c 100644
+--- a/drivers/firmware/efi/fdtparams.c
++++ b/drivers/firmware/efi/fdtparams.c
+@@ -98,6 +98,9 @@ u64 __init efi_get_fdt_params(struct efi_memory_map_data *mm)
+ 	BUILD_BUG_ON(ARRAY_SIZE(target) != ARRAY_SIZE(name));
+ 	BUILD_BUG_ON(ARRAY_SIZE(target) != ARRAY_SIZE(dt_params[0].params));
+ 
++	if (!fdt)
++		return 0;
++
+ 	for (i = 0; i < ARRAY_SIZE(dt_params); i++) {
+ 		node = fdt_path_offset(fdt, dt_params[i].path);
+ 		if (node < 0)
+diff --git a/drivers/firmware/efi/libstub/file.c b/drivers/firmware/efi/libstub/file.c
+index 4e81c6077188e..dd95f330fe6e1 100644
+--- a/drivers/firmware/efi/libstub/file.c
++++ b/drivers/firmware/efi/libstub/file.c
+@@ -103,7 +103,7 @@ static int find_file_option(const efi_char16_t *cmdline, int cmdline_len,
+ 		return 0;
+ 
+ 	/* Skip any leading slashes */
+-	while (cmdline[i] == L'/' || cmdline[i] == L'\\')
++	while (i < cmdline_len && (cmdline[i] == L'/' || cmdline[i] == L'\\'))
+ 		i++;
+ 
+ 	while (--result_len > 0 && i < cmdline_len) {
+diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c
+index 5737cb0fcd44e..0a9aba5f9ceff 100644
+--- a/drivers/firmware/efi/memattr.c
++++ b/drivers/firmware/efi/memattr.c
+@@ -67,11 +67,6 @@ static bool entry_is_valid(const efi_memory_desc_t *in, efi_memory_desc_t *out)
+ 		return false;
+ 	}
+ 
+-	if (!(in->attribute & (EFI_MEMORY_RO | EFI_MEMORY_XP))) {
+-		pr_warn("Entry attributes invalid: RO and XP bits both cleared\n");
+-		return false;
+-	}
+-
+ 	if (PAGE_SIZE > EFI_PAGE_SIZE &&
+ 	    (!PAGE_ALIGNED(in->phys_addr) ||
+ 	     !PAGE_ALIGNED(in->num_pages << EFI_PAGE_SHIFT))) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+index 0350205c48974..6819fe5612d9e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+@@ -337,7 +337,6 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
+ {
+ 	struct amdgpu_ctx *ctx;
+ 	struct amdgpu_ctx_mgr *mgr;
+-	unsigned long ras_counter;
+ 
+ 	if (!fpriv)
+ 		return -EINVAL;
+@@ -362,21 +361,6 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
+ 	if (atomic_read(&ctx->guilty))
+ 		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
+ 
+-	/*query ue count*/
+-	ras_counter = amdgpu_ras_query_error_count(adev, false);
+-	/*ras counter is monotonic increasing*/
+-	if (ras_counter != ctx->ras_counter_ue) {
+-		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
+-		ctx->ras_counter_ue = ras_counter;
+-	}
+-
+-	/*query ce count*/
+-	ras_counter = amdgpu_ras_query_error_count(adev, true);
+-	if (ras_counter != ctx->ras_counter_ce) {
+-		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
+-		ctx->ras_counter_ce = ras_counter;
+-	}
+-
+ 	mutex_unlock(&mgr->lock);
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+index a2ac44cc2a6da..e80cc2928b583 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+@@ -944,6 +944,7 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
+ 	domains = amdgpu_display_supported_domains(drm_to_adev(dev), bo->flags);
+ 	if (obj->import_attach && !(domains & AMDGPU_GEM_DOMAIN_GTT)) {
+ 		drm_dbg_kms(dev, "Cannot create framebuffer from imported dma_buf\n");
++		drm_gem_object_put(obj);
+ 		return ERR_PTR(-EINVAL);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+index dc947c8ffe213..e6c4a36eaf9ae 100644
+--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
++++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+@@ -187,14 +187,14 @@ static int jpeg_v2_5_hw_init(void *handle)
+ static int jpeg_v2_5_hw_fini(void *handle)
+ {
+ 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+-	struct amdgpu_ring *ring;
+ 	int i;
+ 
++	cancel_delayed_work_sync(&adev->vcn.idle_work);
++
+ 	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ 		if (adev->jpeg.harvest_config & (1 << i))
+ 			continue;
+ 
+-		ring = &adev->jpeg.inst[i].ring_dec;
+ 		if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
+ 		      RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS))
+ 			jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
+diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
+index 1d354245678d5..2ea68c84e6b48 100644
+--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
+@@ -159,9 +159,9 @@ static int jpeg_v3_0_hw_init(void *handle)
+ static int jpeg_v3_0_hw_fini(void *handle)
+ {
+ 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+-	struct amdgpu_ring *ring;
+ 
+-	ring = &adev->jpeg.inst->ring_dec;
++	cancel_delayed_work_sync(&adev->vcn.idle_work);
++
+ 	if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
+ 	      RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
+ 		jpeg_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+index 760859880c1ed..4eebf973a0658 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+@@ -357,6 +357,7 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+ 
+ error:
+ 	dma_fence_put(fence);
++	amdgpu_bo_unpin(bo);
+ 	amdgpu_bo_unreserve(bo);
+ 	amdgpu_bo_unref(&bo);
+ 	return r;
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+index ebbc04ff5da06..90138469648a9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+@@ -367,15 +367,14 @@ done:
+ static int vcn_v3_0_hw_fini(void *handle)
+ {
+ 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+-	struct amdgpu_ring *ring;
+ 	int i;
+ 
++	cancel_delayed_work_sync(&adev->vcn.idle_work);
++
+ 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ 		if (adev->vcn.harvest_config & (1 << i))
+ 			continue;
+ 
+-		ring = &adev->vcn.inst[i].ring_dec;
+-
+ 		if (!amdgpu_sriov_vf(adev)) {
+ 			if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
+ 					(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
+diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
+index d2a678a2497e4..411494005f0ec 100644
+--- a/drivers/gpu/drm/i915/selftests/i915_request.c
++++ b/drivers/gpu/drm/i915/selftests/i915_request.c
+@@ -1392,8 +1392,8 @@ static int live_breadcrumbs_smoketest(void *arg)
+ 
+ 	for (n = 0; n < smoke[0].ncontexts; n++) {
+ 		smoke[0].contexts[n] = live_context(i915, file);
+-		if (!smoke[0].contexts[n]) {
+-			ret = -ENOMEM;
++		if (IS_ERR(smoke[0].contexts[n])) {
++			ret = PTR_ERR(smoke[0].contexts[n]);
+ 			goto out_contexts;
+ 		}
+ 	}
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+index 85f2c3564c966..fb061e666faa7 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+@@ -933,8 +933,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
+ 		DPU_DEBUG("REG_DMA is not defined");
+ 	}
+ 
+-	if (of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss"))
+-		dpu_kms_parse_data_bus_icc_path(dpu_kms);
++	dpu_kms_parse_data_bus_icc_path(dpu_kms);
+ 
+ 	pm_runtime_get_sync(&dpu_kms->pdev->dev);
+ 
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
+index cd4078807db1b..3416e9617ee9a 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
+@@ -31,40 +31,8 @@ struct dpu_mdss {
+ 	void __iomem *mmio;
+ 	struct dss_module_power mp;
+ 	struct dpu_irq_controller irq_controller;
+-	struct icc_path *path[2];
+-	u32 num_paths;
+ };
+ 
+-static int dpu_mdss_parse_data_bus_icc_path(struct drm_device *dev,
+-						struct dpu_mdss *dpu_mdss)
+-{
+-	struct icc_path *path0 = of_icc_get(dev->dev, "mdp0-mem");
+-	struct icc_path *path1 = of_icc_get(dev->dev, "mdp1-mem");
+-
+-	if (IS_ERR_OR_NULL(path0))
+-		return PTR_ERR_OR_ZERO(path0);
+-
+-	dpu_mdss->path[0] = path0;
+-	dpu_mdss->num_paths = 1;
+-
+-	if (!IS_ERR_OR_NULL(path1)) {
+-		dpu_mdss->path[1] = path1;
+-		dpu_mdss->num_paths++;
+-	}
+-
+-	return 0;
+-}
+-
+-static void dpu_mdss_icc_request_bw(struct msm_mdss *mdss)
+-{
+-	struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
+-	int i;
+-	u64 avg_bw = dpu_mdss->num_paths ? MAX_BW / dpu_mdss->num_paths : 0;
+-
+-	for (i = 0; i < dpu_mdss->num_paths; i++)
+-		icc_set_bw(dpu_mdss->path[i], avg_bw, kBps_to_icc(MAX_BW));
+-}
+-
+ static void dpu_mdss_irq(struct irq_desc *desc)
+ {
+ 	struct dpu_mdss *dpu_mdss = irq_desc_get_handler_data(desc);
+@@ -178,8 +146,6 @@ static int dpu_mdss_enable(struct msm_mdss *mdss)
+ 	struct dss_module_power *mp = &dpu_mdss->mp;
+ 	int ret;
+ 
+-	dpu_mdss_icc_request_bw(mdss);
+-
+ 	ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
+ 	if (ret) {
+ 		DPU_ERROR("clock enable failed, ret:%d\n", ret);
+@@ -213,15 +179,12 @@ static int dpu_mdss_disable(struct msm_mdss *mdss)
+ {
+ 	struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
+ 	struct dss_module_power *mp = &dpu_mdss->mp;
+-	int ret, i;
++	int ret;
+ 
+ 	ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
+ 	if (ret)
+ 		DPU_ERROR("clock disable failed, ret:%d\n", ret);
+ 
+-	for (i = 0; i < dpu_mdss->num_paths; i++)
+-		icc_set_bw(dpu_mdss->path[i], 0, 0);
+-
+ 	return ret;
+ }
+ 
+@@ -232,7 +195,6 @@ static void dpu_mdss_destroy(struct drm_device *dev)
+ 	struct dpu_mdss *dpu_mdss = to_dpu_mdss(priv->mdss);
+ 	struct dss_module_power *mp = &dpu_mdss->mp;
+ 	int irq;
+-	int i;
+ 
+ 	pm_runtime_suspend(dev->dev);
+ 	pm_runtime_disable(dev->dev);
+@@ -242,9 +204,6 @@ static void dpu_mdss_destroy(struct drm_device *dev)
+ 	msm_dss_put_clk(mp->clk_config, mp->num_clk);
+ 	devm_kfree(&pdev->dev, mp->clk_config);
+ 
+-	for (i = 0; i < dpu_mdss->num_paths; i++)
+-		icc_put(dpu_mdss->path[i]);
+-
+ 	if (dpu_mdss->mmio)
+ 		devm_iounmap(&pdev->dev, dpu_mdss->mmio);
+ 	dpu_mdss->mmio = NULL;
+@@ -276,12 +235,6 @@ int dpu_mdss_init(struct drm_device *dev)
+ 
+ 	DRM_DEBUG("mapped mdss address space @%pK\n", dpu_mdss->mmio);
+ 
+-	if (!of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss")) {
+-		ret = dpu_mdss_parse_data_bus_icc_path(dev, dpu_mdss);
+-		if (ret)
+-			return ret;
+-	}
+-
+ 	mp = &dpu_mdss->mp;
+ 	ret = msm_dss_parse_clock(pdev, mp);
+ 	if (ret) {
+@@ -307,8 +260,6 @@ int dpu_mdss_init(struct drm_device *dev)
+ 
+ 	pm_runtime_enable(dev->dev);
+ 
+-	dpu_mdss_icc_request_bw(priv->mdss);
+-
+ 	return ret;
+ 
+ irq_error:
+diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
+index 2ab38b7153477..ea9a4913932d6 100644
+--- a/drivers/hid/amd-sfh-hid/amd_sfh_client.c
++++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
+@@ -88,6 +88,7 @@ static void amd_sfh_work(struct work_struct *work)
+ 	sensor_index = req_node->sensor_idx;
+ 	report_id = req_node->report_id;
+ 	node_type = req_node->report_type;
++	kfree(req_node);
+ 
+ 	if (node_type == HID_FEATURE_REPORT) {
+ 		report_size = get_feature_report(sensor_index, report_id,
+diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
+index d459e2dbe6474..f7710fb2f48d2 100644
+--- a/drivers/hid/hid-logitech-hidpp.c
++++ b/drivers/hid/hid-logitech-hidpp.c
+@@ -1262,6 +1262,7 @@ static int hidpp20_battery_map_status_voltage(u8 data[3], int *voltage,
+ 	int status;
+ 
+ 	long flags = (long) data[2];
++	*level = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN;
+ 
+ 	if (flags & 0x80)
+ 		switch (flags & 0x07) {
+diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
+index abd86903875f0..fc4c074597539 100644
+--- a/drivers/hid/hid-magicmouse.c
++++ b/drivers/hid/hid-magicmouse.c
+@@ -597,7 +597,7 @@ static int magicmouse_probe(struct hid_device *hdev,
+ 	if (id->vendor == USB_VENDOR_ID_APPLE &&
+ 	    id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 &&
+ 	    hdev->type != HID_TYPE_USBMOUSE)
+-		return 0;
++		return -ENODEV;
+ 
+ 	msc = devm_kzalloc(&hdev->dev, sizeof(*msc), GFP_KERNEL);
+ 	if (msc == NULL) {
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index 9d9f3e1bd5f41..55dcb8536286b 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -604,9 +604,13 @@ static struct mt_report_data *mt_allocate_report_data(struct mt_device *td,
+ 		if (!(HID_MAIN_ITEM_VARIABLE & field->flags))
+ 			continue;
+ 
+-		for (n = 0; n < field->report_count; n++) {
+-			if (field->usage[n].hid == HID_DG_CONTACTID)
+-				rdata->is_mt_collection = true;
++		if (field->logical == HID_DG_FINGER || td->hdev->group != HID_GROUP_MULTITOUCH_WIN_8) {
++			for (n = 0; n < field->report_count; n++) {
++				if (field->usage[n].hid == HID_DG_CONTACTID) {
++					rdata->is_mt_collection = true;
++					break;
++				}
++			}
+ 		}
+ 	}
+ 
+diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
+index 9993133989a58..46474612e73c6 100644
+--- a/drivers/hid/i2c-hid/i2c-hid-core.c
++++ b/drivers/hid/i2c-hid/i2c-hid-core.c
+@@ -45,6 +45,7 @@
+ #define I2C_HID_QUIRK_BOGUS_IRQ			BIT(4)
+ #define I2C_HID_QUIRK_RESET_ON_RESUME		BIT(5)
+ #define I2C_HID_QUIRK_BAD_INPUT_SIZE		BIT(6)
++#define I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET	BIT(7)
+ 
+ 
+ /* flags */
+@@ -178,6 +179,11 @@ static const struct i2c_hid_quirks {
+ 		 I2C_HID_QUIRK_RESET_ON_RESUME },
+ 	{ USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720,
+ 		I2C_HID_QUIRK_BAD_INPUT_SIZE },
++	/*
++	 * Sending the wakeup after reset actually break ELAN touchscreen controller
++	 */
++	{ USB_VENDOR_ID_ELAN, HID_ANY_ID,
++		 I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET },
+ 	{ 0, 0 }
+ };
+ 
+@@ -461,7 +467,8 @@ static int i2c_hid_hwreset(struct i2c_client *client)
+ 	}
+ 
+ 	/* At least some SIS devices need this after reset */
+-	ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
++	if (!(ihid->quirks & I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET))
++		ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
+ 
+ out_unlock:
+ 	mutex_unlock(&ihid->reset_lock);
+@@ -990,8 +997,8 @@ int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops,
+ 	hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID);
+ 	hid->product = le16_to_cpu(ihid->hdesc.wProductID);
+ 
+-	snprintf(hid->name, sizeof(hid->name), "%s %04hX:%04hX",
+-		 client->name, hid->vendor, hid->product);
++	snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X",
++		 client->name, (u16)hid->vendor, (u16)hid->product);
+ 	strlcpy(hid->phys, dev_name(&client->dev), sizeof(hid->phys));
+ 
+ 	ihid->quirks = i2c_hid_lookup_quirk(hid->vendor, hid->product);
+diff --git a/drivers/hid/usbhid/hid-pidff.c b/drivers/hid/usbhid/hid-pidff.c
+index fddac7c72f645..07a9fe97d2e05 100644
+--- a/drivers/hid/usbhid/hid-pidff.c
++++ b/drivers/hid/usbhid/hid-pidff.c
+@@ -1292,6 +1292,7 @@ int hid_pidff_init(struct hid_device *hid)
+ 
+ 	if (pidff->pool[PID_DEVICE_MANAGED_POOL].value &&
+ 	    pidff->pool[PID_DEVICE_MANAGED_POOL].value[0] == 0) {
++		error = -EPERM;
+ 		hid_notice(hid,
+ 			   "device does not support device managed pool\n");
+ 		goto fail;
+diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
+index 73b9db9e3aab6..63b74e781c5d9 100644
+--- a/drivers/hwmon/dell-smm-hwmon.c
++++ b/drivers/hwmon/dell-smm-hwmon.c
+@@ -838,10 +838,10 @@ static struct attribute *i8k_attrs[] = {
+ static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
+ 			      int index)
+ {
+-	if (disallow_fan_support && index >= 8)
++	if (disallow_fan_support && index >= 20)
+ 		return 0;
+ 	if (disallow_fan_type_call &&
+-	    (index == 9 || index == 12 || index == 15))
++	    (index == 21 || index == 25 || index == 28))
+ 		return 0;
+ 	if (index >= 0 && index <= 1 &&
+ 	    !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1))
+diff --git a/drivers/hwmon/pmbus/isl68137.c b/drivers/hwmon/pmbus/isl68137.c
+index 2bee930d39002..789242ed72e5d 100644
+--- a/drivers/hwmon/pmbus/isl68137.c
++++ b/drivers/hwmon/pmbus/isl68137.c
+@@ -244,8 +244,8 @@ static int isl68137_probe(struct i2c_client *client)
+ 		info->read_word_data = raa_dmpvr2_read_word_data;
+ 		break;
+ 	case raa_dmpvr2_2rail_nontc:
+-		info->func[0] &= ~PMBUS_HAVE_TEMP;
+-		info->func[1] &= ~PMBUS_HAVE_TEMP;
++		info->func[0] &= ~PMBUS_HAVE_TEMP3;
++		info->func[1] &= ~PMBUS_HAVE_TEMP3;
+ 		fallthrough;
+ 	case raa_dmpvr2_2rail:
+ 		info->pages = 2;
+diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
+index 214b4c913a139..671f4a52275ec 100644
+--- a/drivers/i2c/busses/i2c-qcom-geni.c
++++ b/drivers/i2c/busses/i2c-qcom-geni.c
+@@ -650,6 +650,14 @@ static int geni_i2c_remove(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
++static void geni_i2c_shutdown(struct platform_device *pdev)
++{
++	struct geni_i2c_dev *gi2c = platform_get_drvdata(pdev);
++
++	/* Make client i2c transfers start failing */
++	i2c_mark_adapter_suspended(&gi2c->adap);
++}
++
+ static int __maybe_unused geni_i2c_runtime_suspend(struct device *dev)
+ {
+ 	int ret;
+@@ -690,6 +698,8 @@ static int __maybe_unused geni_i2c_suspend_noirq(struct device *dev)
+ {
+ 	struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
+ 
++	i2c_mark_adapter_suspended(&gi2c->adap);
++
+ 	if (!gi2c->suspended) {
+ 		geni_i2c_runtime_suspend(dev);
+ 		pm_runtime_disable(dev);
+@@ -699,8 +709,16 @@ static int __maybe_unused geni_i2c_suspend_noirq(struct device *dev)
+ 	return 0;
+ }
+ 
++static int __maybe_unused geni_i2c_resume_noirq(struct device *dev)
++{
++	struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
++
++	i2c_mark_adapter_resumed(&gi2c->adap);
++	return 0;
++}
++
+ static const struct dev_pm_ops geni_i2c_pm_ops = {
+-	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(geni_i2c_suspend_noirq, NULL)
++	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(geni_i2c_suspend_noirq, geni_i2c_resume_noirq)
+ 	SET_RUNTIME_PM_OPS(geni_i2c_runtime_suspend, geni_i2c_runtime_resume,
+ 									NULL)
+ };
+@@ -714,6 +732,7 @@ MODULE_DEVICE_TABLE(of, geni_i2c_dt_match);
+ static struct platform_driver geni_i2c_driver = {
+ 	.probe  = geni_i2c_probe,
+ 	.remove = geni_i2c_remove,
++	.shutdown = geni_i2c_shutdown,
+ 	.driver = {
+ 		.name = "geni_i2c",
+ 		.pm = &geni_i2c_pm_ops,
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+index 314f8d8067231..9058f09f921ee 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+@@ -2177,8 +2177,6 @@ int cxgb4_update_mac_filt(struct port_info *pi, unsigned int viid,
+ 			  bool persistent, u8 *smt_idx);
+ int cxgb4_get_msix_idx_from_bmap(struct adapter *adap);
+ void cxgb4_free_msix_idx_in_bmap(struct adapter *adap, u32 msix_idx);
+-int cxgb_open(struct net_device *dev);
+-int cxgb_close(struct net_device *dev);
+ void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q);
+ void cxgb4_quiesce_rx(struct sge_rspq *q);
+ int cxgb4_port_mirror_alloc(struct net_device *dev);
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+index 421bd9b88028d..1f601de02e706 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+@@ -2834,7 +2834,7 @@ static void cxgb_down(struct adapter *adapter)
+ /*
+  * net_device operations
+  */
+-int cxgb_open(struct net_device *dev)
++static int cxgb_open(struct net_device *dev)
+ {
+ 	struct port_info *pi = netdev_priv(dev);
+ 	struct adapter *adapter = pi->adapter;
+@@ -2882,7 +2882,7 @@ out_unlock:
+ 	return err;
+ }
+ 
+-int cxgb_close(struct net_device *dev)
++static int cxgb_close(struct net_device *dev)
+ {
+ 	struct port_info *pi = netdev_priv(dev);
+ 	struct adapter *adapter = pi->adapter;
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+index 1b88bd1c2dbe4..dd9be229819a5 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+@@ -997,20 +997,16 @@ int cxgb4_tc_flower_destroy(struct net_device *dev,
+ 	if (!ch_flower)
+ 		return -ENOENT;
+ 
++	rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node,
++			       adap->flower_ht_params);
++
+ 	ret = cxgb4_flow_rule_destroy(dev, ch_flower->fs.tc_prio,
+ 				      &ch_flower->fs, ch_flower->filter_id);
+ 	if (ret)
+-		goto err;
++		netdev_err(dev, "Flow rule destroy failed for tid: %u, ret: %d",
++			   ch_flower->filter_id, ret);
+ 
+-	ret = rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node,
+-				     adap->flower_ht_params);
+-	if (ret) {
+-		netdev_err(dev, "Flow remove from rhashtable failed");
+-		goto err;
+-	}
+ 	kfree_rcu(ch_flower, rcu);
+-
+-err:
+ 	return ret;
+ }
+ 
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
+index 6c259de96f969..338b04f339b3d 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
+@@ -589,7 +589,8 @@ int cxgb4_setup_tc_mqprio(struct net_device *dev,
+ 	 * down before configuring tc params.
+ 	 */
+ 	if (netif_running(dev)) {
+-		cxgb_close(dev);
++		netif_tx_stop_all_queues(dev);
++		netif_carrier_off(dev);
+ 		needs_bring_up = true;
+ 	}
+ 
+@@ -615,8 +616,10 @@ int cxgb4_setup_tc_mqprio(struct net_device *dev,
+ 	}
+ 
+ out:
+-	if (needs_bring_up)
+-		cxgb_open(dev);
++	if (needs_bring_up) {
++		netif_tx_start_all_queues(dev);
++		netif_carrier_on(dev);
++	}
+ 
+ 	mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
+ 	return ret;
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
+index 1e5f2edb70cf4..6a099cb34b122 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
+@@ -2556,6 +2556,12 @@ int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc)
+ 	if (!eosw_txq)
+ 		return -ENOMEM;
+ 
++	if (!(adap->flags & CXGB4_FW_OK)) {
++		/* Don't stall caller when access to FW is lost */
++		complete(&eosw_txq->completion);
++		return -EIO;
++	}
++
+ 	skb = alloc_skb(len, GFP_KERNEL);
+ 	if (!skb)
+ 		return -ENOMEM;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+index 70b515049540f..c358d90498813 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+@@ -2313,15 +2313,20 @@ static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
+ 	case XDP_TX:
+ 		xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
+ 		result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
++		if (result == I40E_XDP_CONSUMED)
++			goto out_failure;
+ 		break;
+ 	case XDP_REDIRECT:
+ 		err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
+-		result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
++		if (err)
++			goto out_failure;
++		result = I40E_XDP_REDIR;
+ 		break;
+ 	default:
+ 		bpf_warn_invalid_xdp_action(act);
+ 		fallthrough;
+ 	case XDP_ABORTED:
++out_failure:
+ 		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+ 		fallthrough; /* handle aborts by dropping packet */
+ 	case XDP_DROP:
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+index 12ca84113587d..5b39c457bd77b 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+@@ -160,21 +160,28 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
+ 	xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+ 	act = bpf_prog_run_xdp(xdp_prog, xdp);
+ 
++	if (likely(act == XDP_REDIRECT)) {
++		err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
++		if (err)
++			goto out_failure;
++		rcu_read_unlock();
++		return I40E_XDP_REDIR;
++	}
++
+ 	switch (act) {
+ 	case XDP_PASS:
+ 		break;
+ 	case XDP_TX:
+ 		xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
+ 		result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
+-		break;
+-	case XDP_REDIRECT:
+-		err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
+-		result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
++		if (result == I40E_XDP_CONSUMED)
++			goto out_failure;
+ 		break;
+ 	default:
+ 		bpf_warn_invalid_xdp_action(act);
+ 		fallthrough;
+ 	case XDP_ABORTED:
++out_failure:
+ 		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+ 		fallthrough; /* handle aborts by dropping packet */
+ 	case XDP_DROP:
+diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
+index 17101c45cbcd8..f668296ca6779 100644
+--- a/drivers/net/ethernet/intel/ice/ice.h
++++ b/drivers/net/ethernet/intel/ice/ice.h
+@@ -325,6 +325,7 @@ struct ice_vsi {
+ 	struct ice_tc_cfg tc_cfg;
+ 	struct bpf_prog *xdp_prog;
+ 	struct ice_ring **xdp_rings;	 /* XDP ring array */
++	unsigned long *af_xdp_zc_qps;	 /* tracks AF_XDP ZC enabled qps */
+ 	u16 num_xdp_txq;		 /* Used XDP queues */
+ 	u8 xdp_mapping_mode;		 /* ICE_MAP_MODE_[CONTIG|SCATTER] */
+ 
+@@ -534,15 +535,16 @@ static inline void ice_set_ring_xdp(struct ice_ring *ring)
+  */
+ static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_ring *ring)
+ {
++	struct ice_vsi *vsi = ring->vsi;
+ 	u16 qid = ring->q_index;
+ 
+ 	if (ice_ring_is_xdp(ring))
+-		qid -= ring->vsi->num_xdp_txq;
++		qid -= vsi->num_xdp_txq;
+ 
+-	if (!ice_is_xdp_ena_vsi(ring->vsi))
++	if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
+ 		return NULL;
+ 
+-	return xsk_get_pool_from_qid(ring->vsi->netdev, qid);
++	return xsk_get_pool_from_qid(vsi->netdev, qid);
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+index 32ba71a161652..f80fff97d8dce 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
++++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+@@ -1797,49 +1797,6 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
+ 		ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
+ 						100000baseKR4_Full);
+ 	}
+-
+-	/* Autoneg PHY types */
+-	if (phy_types_low & ICE_PHY_TYPE_LOW_100BASE_TX ||
+-	    phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_T ||
+-	    phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_KX ||
+-	    phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_T ||
+-	    phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_KX ||
+-	    phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_T ||
+-	    phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_KR ||
+-	    phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_T ||
+-	    phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 ||
+-	    phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_T ||
+-	    phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR ||
+-	    phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR_S ||
+-	    phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR1 ||
+-	    phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR ||
+-	    phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR_S ||
+-	    phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR1 ||
+-	    phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_CR4 ||
+-	    phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_KR4) {
+-		ethtool_link_ksettings_add_link_mode(ks, supported,
+-						     Autoneg);
+-		ethtool_link_ksettings_add_link_mode(ks, advertising,
+-						     Autoneg);
+-	}
+-	if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CR2 ||
+-	    phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR2 ||
+-	    phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CP ||
+-	    phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4) {
+-		ethtool_link_ksettings_add_link_mode(ks, supported,
+-						     Autoneg);
+-		ethtool_link_ksettings_add_link_mode(ks, advertising,
+-						     Autoneg);
+-	}
+-	if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CR4 ||
+-	    phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR4 ||
+-	    phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 ||
+-	    phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CP2) {
+-		ethtool_link_ksettings_add_link_mode(ks, supported,
+-						     Autoneg);
+-		ethtool_link_ksettings_add_link_mode(ks, advertising,
+-						     Autoneg);
+-	}
+ }
+ 
+ #define TEST_SET_BITS_TIMEOUT	50
+@@ -1996,9 +1953,7 @@ ice_get_link_ksettings(struct net_device *netdev,
+ 		ks->base.port = PORT_TP;
+ 		break;
+ 	case ICE_MEDIA_BACKPLANE:
+-		ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ 		ethtool_link_ksettings_add_link_mode(ks, supported, Backplane);
+-		ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ 		ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 						     Backplane);
+ 		ks->base.port = PORT_NONE;
+@@ -2073,6 +2028,12 @@ ice_get_link_ksettings(struct net_device *netdev,
+ 	if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)
+ 		ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
+ 
++	/* Set supported and advertised autoneg */
++	if (ice_is_phy_caps_an_enabled(caps)) {
++		ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
++		ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
++	}
++
+ done:
+ 	kfree(caps);
+ 	return err;
+diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+index 093a1818a3929..1998821896c0f 100644
+--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
++++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+@@ -31,6 +31,7 @@
+ #define PF_FW_ATQLEN_ATQOVFL_M			BIT(29)
+ #define PF_FW_ATQLEN_ATQCRIT_M			BIT(30)
+ #define VF_MBX_ARQLEN(_VF)			(0x0022BC00 + ((_VF) * 4))
++#define VF_MBX_ATQLEN(_VF)			(0x0022A800 + ((_VF) * 4))
+ #define PF_FW_ATQLEN_ATQENABLE_M		BIT(31)
+ #define PF_FW_ATQT				0x00080400
+ #define PF_MBX_ARQBAH				0x0022E400
+diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
+index 195d122c9cb22..27e439853c3b0 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
+@@ -105,8 +105,14 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
+ 	if (!vsi->q_vectors)
+ 		goto err_vectors;
+ 
++	vsi->af_xdp_zc_qps = bitmap_zalloc(max_t(int, vsi->alloc_txq, vsi->alloc_rxq), GFP_KERNEL);
++	if (!vsi->af_xdp_zc_qps)
++		goto err_zc_qps;
++
+ 	return 0;
+ 
++err_zc_qps:
++	devm_kfree(dev, vsi->q_vectors);
+ err_vectors:
+ 	devm_kfree(dev, vsi->rxq_map);
+ err_rxq_map:
+@@ -192,6 +198,8 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
+ 		break;
+ 	case ICE_VSI_VF:
+ 		vf = &pf->vf[vsi->vf_id];
++		if (vf->num_req_qs)
++			vf->num_vf_qs = vf->num_req_qs;
+ 		vsi->alloc_txq = vf->num_vf_qs;
+ 		vsi->alloc_rxq = vf->num_vf_qs;
+ 		/* pf->num_msix_per_vf includes (VF miscellaneous vector +
+@@ -286,6 +294,10 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)
+ 
+ 	dev = ice_pf_to_dev(pf);
+ 
++	if (vsi->af_xdp_zc_qps) {
++		bitmap_free(vsi->af_xdp_zc_qps);
++		vsi->af_xdp_zc_qps = NULL;
++	}
+ 	/* free the ring and vector containers */
+ 	if (vsi->q_vectors) {
+ 		devm_kfree(dev, vsi->q_vectors);
+diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
+index b91dcfd12727d..113e53efffd71 100644
+--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
++++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
+@@ -523,7 +523,7 @@ ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
+ 	    struct bpf_prog *xdp_prog)
+ {
+ 	struct ice_ring *xdp_ring;
+-	int err;
++	int err, result;
+ 	u32 act;
+ 
+ 	act = bpf_prog_run_xdp(xdp_prog, xdp);
+@@ -532,14 +532,20 @@ ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
+ 		return ICE_XDP_PASS;
+ 	case XDP_TX:
+ 		xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()];
+-		return ice_xmit_xdp_buff(xdp, xdp_ring);
++		result = ice_xmit_xdp_buff(xdp, xdp_ring);
++		if (result == ICE_XDP_CONSUMED)
++			goto out_failure;
++		return result;
+ 	case XDP_REDIRECT:
+ 		err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
+-		return !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
++		if (err)
++			goto out_failure;
++		return ICE_XDP_REDIR;
+ 	default:
+ 		bpf_warn_invalid_xdp_action(act);
+ 		fallthrough;
+ 	case XDP_ABORTED:
++out_failure:
+ 		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+ 		fallthrough;
+ 	case XDP_DROP:
+@@ -2331,6 +2337,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
+ 	struct ice_tx_offload_params offload = { 0 };
+ 	struct ice_vsi *vsi = tx_ring->vsi;
+ 	struct ice_tx_buf *first;
++	struct ethhdr *eth;
+ 	unsigned int count;
+ 	int tso, csum;
+ 
+@@ -2377,7 +2384,9 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
+ 		goto out_drop;
+ 
+ 	/* allow CONTROL frames egress from main VSI if FW LLDP disabled */
+-	if (unlikely(skb->priority == TC_PRIO_CONTROL &&
++	eth = (struct ethhdr *)skb_mac_header(skb);
++	if (unlikely((skb->priority == TC_PRIO_CONTROL ||
++		      eth->h_proto == htons(ETH_P_LLDP)) &&
+ 		     vsi->type == ICE_VSI_PF &&
+ 		     vsi->port_info->qos_cfg.is_sw_lldp))
+ 		offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
+diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+index 1f38a8d0c5254..48dee9c5d534b 100644
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+@@ -435,13 +435,15 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
+ 	 */
+ 	clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
+ 
+-	/* VF_MBX_ARQLEN is cleared by PFR, so the driver needs to clear it
+-	 * in the case of VFR. If this is done for PFR, it can mess up VF
+-	 * resets because the VF driver may already have started cleanup
+-	 * by the time we get here.
++	/* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver
++	 * needs to clear them in the case of VFR/VFLR. If this is done for
++	 * PFR, it can mess up VF resets because the VF driver may already
++	 * have started cleanup by the time we get here.
+ 	 */
+-	if (!is_pfr)
++	if (!is_pfr) {
+ 		wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0);
++		wr32(hw, VF_MBX_ATQLEN(vf->vf_id), 0);
++	}
+ 
+ 	/* In the case of a VFLR, the HW has already reset the VF and we
+ 	 * just need to clean up, so don't hit the VFRTRIG register.
+@@ -1375,7 +1377,12 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
+ 	}
+ 
+ 	ice_vf_pre_vsi_rebuild(vf);
+-	ice_vf_rebuild_vsi_with_release(vf);
++
++	if (ice_vf_rebuild_vsi_with_release(vf)) {
++		dev_err(dev, "Failed to release and setup the VF%u's VSI\n", vf->vf_id);
++		return false;
++	}
++
+ 	ice_vf_post_vsi_rebuild(vf);
+ 
+ 	return true;
+diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
+index 9f94d9159acde..f1d4240e57df3 100644
+--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
++++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
+@@ -273,6 +273,7 @@ static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
+ 	if (!pool)
+ 		return -EINVAL;
+ 
++	clear_bit(qid, vsi->af_xdp_zc_qps);
+ 	xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR);
+ 
+ 	return 0;
+@@ -303,6 +304,8 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
+ 	if (err)
+ 		return err;
+ 
++	set_bit(qid, vsi->af_xdp_zc_qps);
++
+ 	return 0;
+ }
+ 
+@@ -473,21 +476,29 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
+ 	xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+ 
+ 	act = bpf_prog_run_xdp(xdp_prog, xdp);
++
++	if (likely(act == XDP_REDIRECT)) {
++		err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
++		if (err)
++			goto out_failure;
++		rcu_read_unlock();
++		return ICE_XDP_REDIR;
++	}
++
+ 	switch (act) {
+ 	case XDP_PASS:
+ 		break;
+ 	case XDP_TX:
+ 		xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->q_index];
+ 		result = ice_xmit_xdp_buff(xdp, xdp_ring);
+-		break;
+-	case XDP_REDIRECT:
+-		err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
+-		result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
++		if (result == ICE_XDP_CONSUMED)
++			goto out_failure;
+ 		break;
+ 	default:
+ 		bpf_warn_invalid_xdp_action(act);
+ 		fallthrough;
+ 	case XDP_ABORTED:
++out_failure:
+ 		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+ 		fallthrough;
+ 	case XDP_DROP:
+diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
+index 7bda8c5edea5d..2d3daf022651c 100644
+--- a/drivers/net/ethernet/intel/igb/igb.h
++++ b/drivers/net/ethernet/intel/igb/igb.h
+@@ -749,7 +749,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter);
+ void igb_ptp_tx_hang(struct igb_adapter *adapter);
+ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
+ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
+-			struct sk_buff *skb);
++			ktime_t *timestamp);
+ int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
+ int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
+ void igb_set_flag_queue_pairs(struct igb_adapter *, const u32);
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index a45cd2b416c89..caa8929289ae7 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -8281,7 +8281,7 @@ static void igb_add_rx_frag(struct igb_ring *rx_ring,
+ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
+ 					 struct igb_rx_buffer *rx_buffer,
+ 					 struct xdp_buff *xdp,
+-					 union e1000_adv_rx_desc *rx_desc)
++					 ktime_t timestamp)
+ {
+ #if (PAGE_SIZE < 8192)
+ 	unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
+@@ -8301,12 +8301,8 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
+ 	if (unlikely(!skb))
+ 		return NULL;
+ 
+-	if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
+-		if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, xdp->data, skb)) {
+-			xdp->data += IGB_TS_HDR_LEN;
+-			size -= IGB_TS_HDR_LEN;
+-		}
+-	}
++	if (timestamp)
++		skb_hwtstamps(skb)->hwtstamp = timestamp;
+ 
+ 	/* Determine available headroom for copy */
+ 	headlen = size;
+@@ -8337,7 +8333,7 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
+ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
+ 				     struct igb_rx_buffer *rx_buffer,
+ 				     struct xdp_buff *xdp,
+-				     union e1000_adv_rx_desc *rx_desc)
++				     ktime_t timestamp)
+ {
+ #if (PAGE_SIZE < 8192)
+ 	unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
+@@ -8364,11 +8360,8 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
+ 	if (metasize)
+ 		skb_metadata_set(skb, metasize);
+ 
+-	/* pull timestamp out of packet data */
+-	if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+-		if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb))
+-			__skb_pull(skb, IGB_TS_HDR_LEN);
+-	}
++	if (timestamp)
++		skb_hwtstamps(skb)->hwtstamp = timestamp;
+ 
+ 	/* update buffer offset */
+ #if (PAGE_SIZE < 8192)
+@@ -8402,18 +8395,20 @@ static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter,
+ 		break;
+ 	case XDP_TX:
+ 		result = igb_xdp_xmit_back(adapter, xdp);
++		if (result == IGB_XDP_CONSUMED)
++			goto out_failure;
+ 		break;
+ 	case XDP_REDIRECT:
+ 		err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
+-		if (!err)
+-			result = IGB_XDP_REDIR;
+-		else
+-			result = IGB_XDP_CONSUMED;
++		if (err)
++			goto out_failure;
++		result = IGB_XDP_REDIR;
+ 		break;
+ 	default:
+ 		bpf_warn_invalid_xdp_action(act);
+ 		fallthrough;
+ 	case XDP_ABORTED:
++out_failure:
+ 		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+ 		fallthrough;
+ 	case XDP_DROP:
+@@ -8683,7 +8678,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
+ 	while (likely(total_packets < budget)) {
+ 		union e1000_adv_rx_desc *rx_desc;
+ 		struct igb_rx_buffer *rx_buffer;
++		ktime_t timestamp = 0;
++		int pkt_offset = 0;
+ 		unsigned int size;
++		void *pktbuf;
+ 
+ 		/* return some buffers to hardware, one at a time is too slow */
+ 		if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
+@@ -8703,14 +8701,24 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
+ 		dma_rmb();
+ 
+ 		rx_buffer = igb_get_rx_buffer(rx_ring, size, &rx_buf_pgcnt);
++		pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset;
++
++		/* pull rx packet timestamp if available and valid */
++		if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
++			int ts_hdr_len;
++
++			ts_hdr_len = igb_ptp_rx_pktstamp(rx_ring->q_vector,
++							 pktbuf, &timestamp);
++
++			pkt_offset += ts_hdr_len;
++			size -= ts_hdr_len;
++		}
+ 
+ 		/* retrieve a buffer from the ring */
+ 		if (!skb) {
+-			unsigned int offset = igb_rx_offset(rx_ring);
+-			unsigned char *hard_start;
++			unsigned char *hard_start = pktbuf - igb_rx_offset(rx_ring);
++			unsigned int offset = pkt_offset + igb_rx_offset(rx_ring);
+ 
+-			hard_start = page_address(rx_buffer->page) +
+-				     rx_buffer->page_offset - offset;
+ 			xdp_prepare_buff(&xdp, hard_start, offset, size, true);
+ #if (PAGE_SIZE > 4096)
+ 			/* At larger PAGE_SIZE, frame_sz depend on len size */
+@@ -8733,10 +8741,11 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
+ 		} else if (skb)
+ 			igb_add_rx_frag(rx_ring, rx_buffer, skb, size);
+ 		else if (ring_uses_build_skb(rx_ring))
+-			skb = igb_build_skb(rx_ring, rx_buffer, &xdp, rx_desc);
++			skb = igb_build_skb(rx_ring, rx_buffer, &xdp,
++					    timestamp);
+ 		else
+ 			skb = igb_construct_skb(rx_ring, rx_buffer,
+-						&xdp, rx_desc);
++						&xdp, timestamp);
+ 
+ 		/* exit if we failed to retrieve a buffer */
+ 		if (!skb) {
+diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
+index 86a576201f5ff..58b25f26ea7f2 100644
+--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
++++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
+@@ -856,30 +856,28 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
+ 	dev_kfree_skb_any(skb);
+ }
+ 
+-#define IGB_RET_PTP_DISABLED 1
+-#define IGB_RET_PTP_INVALID 2
+-
+ /**
+  * igb_ptp_rx_pktstamp - retrieve Rx per packet timestamp
+  * @q_vector: Pointer to interrupt specific structure
+  * @va: Pointer to address containing Rx buffer
+- * @skb: Buffer containing timestamp and packet
++ * @timestamp: Pointer where timestamp will be stored
+  *
+  * This function is meant to retrieve a timestamp from the first buffer of an
+  * incoming frame.  The value is stored in little endian format starting on
+  * byte 8
+  *
+- * Returns: 0 if success, nonzero if failure
++ * Returns: The timestamp header length or 0 if not available
+  **/
+ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
+-			struct sk_buff *skb)
++			ktime_t *timestamp)
+ {
+ 	struct igb_adapter *adapter = q_vector->adapter;
++	struct skb_shared_hwtstamps ts;
+ 	__le64 *regval = (__le64 *)va;
+ 	int adjust = 0;
+ 
+ 	if (!(adapter->ptp_flags & IGB_PTP_ENABLED))
+-		return IGB_RET_PTP_DISABLED;
++		return 0;
+ 
+ 	/* The timestamp is recorded in little endian format.
+ 	 * DWORD: 0        1        2        3
+@@ -888,10 +886,9 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
+ 
+ 	/* check reserved dwords are zero, be/le doesn't matter for zero */
+ 	if (regval[0])
+-		return IGB_RET_PTP_INVALID;
++		return 0;
+ 
+-	igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb),
+-				   le64_to_cpu(regval[1]));
++	igb_ptp_systim_to_hwtstamp(adapter, &ts, le64_to_cpu(regval[1]));
+ 
+ 	/* adjust timestamp for the RX latency based on link speed */
+ 	if (adapter->hw.mac.type == e1000_i210) {
+@@ -907,10 +904,10 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
+ 			break;
+ 		}
+ 	}
+-	skb_hwtstamps(skb)->hwtstamp =
+-		ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
+ 
+-	return 0;
++	*timestamp = ktime_sub_ns(ts.hwtstamp, adjust);
++
++	return IGB_TS_HDR_LEN;
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+index cffb95f8f6326..c194158a421c7 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -2213,23 +2213,23 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
+ 		break;
+ 	case XDP_TX:
+ 		xdpf = xdp_convert_buff_to_frame(xdp);
+-		if (unlikely(!xdpf)) {
+-			result = IXGBE_XDP_CONSUMED;
+-			break;
+-		}
++		if (unlikely(!xdpf))
++			goto out_failure;
+ 		result = ixgbe_xmit_xdp_ring(adapter, xdpf);
++		if (result == IXGBE_XDP_CONSUMED)
++			goto out_failure;
+ 		break;
+ 	case XDP_REDIRECT:
+ 		err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
+-		if (!err)
+-			result = IXGBE_XDP_REDIR;
+-		else
+-			result = IXGBE_XDP_CONSUMED;
++		if (err)
++			goto out_failure;
++		result = IXGBE_XDP_REDIR;
+ 		break;
+ 	default:
+ 		bpf_warn_invalid_xdp_action(act);
+ 		fallthrough;
+ 	case XDP_ABORTED:
++out_failure:
+ 		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+ 		fallthrough; /* handle aborts by dropping packet */
+ 	case XDP_DROP:
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+index 3771857cf887c..f72d2978263b9 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+@@ -104,25 +104,30 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
+ 	xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+ 	act = bpf_prog_run_xdp(xdp_prog, xdp);
+ 
++	if (likely(act == XDP_REDIRECT)) {
++		err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
++		if (err)
++			goto out_failure;
++		rcu_read_unlock();
++		return IXGBE_XDP_REDIR;
++	}
++
+ 	switch (act) {
+ 	case XDP_PASS:
+ 		break;
+ 	case XDP_TX:
+ 		xdpf = xdp_convert_buff_to_frame(xdp);
+-		if (unlikely(!xdpf)) {
+-			result = IXGBE_XDP_CONSUMED;
+-			break;
+-		}
++		if (unlikely(!xdpf))
++			goto out_failure;
+ 		result = ixgbe_xmit_xdp_ring(adapter, xdpf);
+-		break;
+-	case XDP_REDIRECT:
+-		err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
+-		result = !err ? IXGBE_XDP_REDIR : IXGBE_XDP_CONSUMED;
++		if (result == IXGBE_XDP_CONSUMED)
++			goto out_failure;
+ 		break;
+ 	default:
+ 		bpf_warn_invalid_xdp_action(act);
+ 		fallthrough;
+ 	case XDP_ABORTED:
++out_failure:
+ 		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+ 		fallthrough; /* handle aborts by dropping packet */
+ 	case XDP_DROP:
+diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+index 449d7d5b280dd..b38860c485986 100644
+--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
++++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+@@ -1067,11 +1067,14 @@ static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
+ 	case XDP_TX:
+ 		xdp_ring = adapter->xdp_ring[rx_ring->queue_index];
+ 		result = ixgbevf_xmit_xdp_ring(xdp_ring, xdp);
++		if (result == IXGBEVF_XDP_CONSUMED)
++			goto out_failure;
+ 		break;
+ 	default:
+ 		bpf_warn_invalid_xdp_action(act);
+ 		fallthrough;
+ 	case XDP_ABORTED:
++out_failure:
+ 		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+ 		fallthrough; /* handle aborts by dropping packet */
+ 	case XDP_DROP:
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+index 53802e18af900..04b49cb3adb32 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+@@ -1632,12 +1632,13 @@ static int mlx5e_set_fecparam(struct net_device *netdev,
+ {
+ 	struct mlx5e_priv *priv = netdev_priv(netdev);
+ 	struct mlx5_core_dev *mdev = priv->mdev;
++	unsigned long fec_bitmap;
+ 	u16 fec_policy = 0;
+ 	int mode;
+ 	int err;
+ 
+-	if (bitmap_weight((unsigned long *)&fecparam->fec,
+-			  ETHTOOL_FEC_LLRS_BIT + 1) > 1)
++	bitmap_from_arr32(&fec_bitmap, &fecparam->fec, sizeof(fecparam->fec) * BITS_PER_BYTE);
++	if (bitmap_weight(&fec_bitmap, ETHTOOL_FEC_LLRS_BIT + 1) > 1)
+ 		return -EOPNOTSUPP;
+ 
+ 	for (mode = 0; mode < ARRAY_SIZE(pplm_fec_2_ethtool); mode++) {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index 78a1403c98026..b633f669ea57f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -1964,11 +1964,13 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
+ 				    misc_parameters);
+ 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ 	struct flow_dissector *dissector = rule->match.dissector;
++	enum fs_flow_table_type fs_type;
+ 	u16 addr_type = 0;
+ 	u8 ip_proto = 0;
+ 	u8 *match_level;
+ 	int err;
+ 
++	fs_type = mlx5e_is_eswitch_flow(flow) ? FS_FT_FDB : FS_FT_NIC_RX;
+ 	match_level = outer_match_level;
+ 
+ 	if (dissector->used_keys &
+@@ -2093,6 +2095,13 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
+ 		if (match.mask->vlan_id ||
+ 		    match.mask->vlan_priority ||
+ 		    match.mask->vlan_tpid) {
++			if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ft_field_support.outer_second_vid,
++						     fs_type)) {
++				NL_SET_ERR_MSG_MOD(extack,
++						   "Matching on CVLAN is not supported");
++				return -EOPNOTSUPP;
++			}
++
+ 			if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
+ 				MLX5_SET(fte_match_set_misc, misc_c,
+ 					 outer_second_svlan_tag, 1);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+index d4a2f8d1ee9f1..3719452a78035 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+@@ -349,7 +349,8 @@ esw_setup_slow_path_dest(struct mlx5_flow_destination *dest,
+ 			 struct mlx5_fs_chains *chains,
+ 			 int i)
+ {
+-	flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
++	if (mlx5_chains_ignore_flow_level_supported(chains))
++		flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ 	dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ 	dest[i].ft = mlx5_chains_get_tc_end_ft(chains);
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+index f9042e147c7f6..ee710ce007950 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+@@ -354,6 +354,9 @@ static void mlx5_sync_reset_abort_event(struct work_struct *work)
+ 						      reset_abort_work);
+ 	struct mlx5_core_dev *dev = fw_reset->dev;
+ 
++	if (!test_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags))
++		return;
++
+ 	mlx5_sync_reset_clear_reset_requested(dev, true);
+ 	mlx5_core_warn(dev, "PCI Sync FW Update Reset Aborted.\n");
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
+index 381325b4a863e..b607ed5a74bb4 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
+@@ -111,7 +111,7 @@ bool mlx5_chains_prios_supported(struct mlx5_fs_chains *chains)
+ 	return chains->flags & MLX5_CHAINS_AND_PRIOS_SUPPORTED;
+ }
+ 
+-static bool mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains)
++bool mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains)
+ {
+ 	return chains->flags & MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h
+index 6d5be31b05dd7..9f53a08235582 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h
+@@ -27,6 +27,7 @@ struct mlx5_chains_attr {
+ 
+ bool
+ mlx5_chains_prios_supported(struct mlx5_fs_chains *chains);
++bool mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains);
+ bool
+ mlx5_chains_backwards_supported(struct mlx5_fs_chains *chains);
+ u32
+@@ -72,6 +73,10 @@ mlx5_chains_set_end_ft(struct mlx5_fs_chains *chains,
+ 
+ #else /* CONFIG_MLX5_CLS_ACT */
+ 
++static inline bool
++mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains)
++{ return false; }
++
+ static inline struct mlx5_flow_table *
+ mlx5_chains_get_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
+ 		      u32 level) { return ERR_PTR(-EOPNOTSUPP); }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
+index 1fbcd012bb855..7ccfd40586cee 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
+@@ -112,7 +112,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
+ 	int ret;
+ 
+ 	ft_attr.table_type = MLX5_FLOW_TABLE_TYPE_FDB;
+-	ft_attr.level = dmn->info.caps.max_ft_level - 2;
++	ft_attr.level = min_t(int, dmn->info.caps.max_ft_level - 2,
++			      MLX5_FT_MAX_MULTIPATH_LEVEL);
+ 	ft_attr.reformat_en = reformat_req;
+ 	ft_attr.decap_en = reformat_req;
+ 
+diff --git a/drivers/net/wireguard/Makefile b/drivers/net/wireguard/Makefile
+index fc52b2cb500b3..dbe1f8514efc3 100644
+--- a/drivers/net/wireguard/Makefile
++++ b/drivers/net/wireguard/Makefile
+@@ -1,5 +1,4 @@
+-ccflags-y := -O3
+-ccflags-y += -D'pr_fmt(fmt)=KBUILD_MODNAME ": " fmt'
++ccflags-y := -D'pr_fmt(fmt)=KBUILD_MODNAME ": " fmt'
+ ccflags-$(CONFIG_WIREGUARD_DEBUG) += -DDEBUG
+ wireguard-y := main.o
+ wireguard-y += noise.o
+diff --git a/drivers/net/wireguard/allowedips.c b/drivers/net/wireguard/allowedips.c
+index 3725e9cd85f4f..b7197e80f2264 100644
+--- a/drivers/net/wireguard/allowedips.c
++++ b/drivers/net/wireguard/allowedips.c
+@@ -6,6 +6,8 @@
+ #include "allowedips.h"
+ #include "peer.h"
+ 
++static struct kmem_cache *node_cache;
++
+ static void swap_endian(u8 *dst, const u8 *src, u8 bits)
+ {
+ 	if (bits == 32) {
+@@ -28,8 +30,11 @@ static void copy_and_assign_cidr(struct allowedips_node *node, const u8 *src,
+ 	node->bitlen = bits;
+ 	memcpy(node->bits, src, bits / 8U);
+ }
+-#define CHOOSE_NODE(parent, key) \
+-	parent->bit[(key[parent->bit_at_a] >> parent->bit_at_b) & 1]
++
++static inline u8 choose(struct allowedips_node *node, const u8 *key)
++{
++	return (key[node->bit_at_a] >> node->bit_at_b) & 1;
++}
+ 
+ static void push_rcu(struct allowedips_node **stack,
+ 		     struct allowedips_node __rcu *p, unsigned int *len)
+@@ -40,6 +45,11 @@ static void push_rcu(struct allowedips_node **stack,
+ 	}
+ }
+ 
++static void node_free_rcu(struct rcu_head *rcu)
++{
++	kmem_cache_free(node_cache, container_of(rcu, struct allowedips_node, rcu));
++}
++
+ static void root_free_rcu(struct rcu_head *rcu)
+ {
+ 	struct allowedips_node *node, *stack[128] = {
+@@ -49,7 +59,7 @@ static void root_free_rcu(struct rcu_head *rcu)
+ 	while (len > 0 && (node = stack[--len])) {
+ 		push_rcu(stack, node->bit[0], &len);
+ 		push_rcu(stack, node->bit[1], &len);
+-		kfree(node);
++		kmem_cache_free(node_cache, node);
+ 	}
+ }
+ 
+@@ -66,60 +76,6 @@ static void root_remove_peer_lists(struct allowedips_node *root)
+ 	}
+ }
+ 
+-static void walk_remove_by_peer(struct allowedips_node __rcu **top,
+-				struct wg_peer *peer, struct mutex *lock)
+-{
+-#define REF(p) rcu_access_pointer(p)
+-#define DEREF(p) rcu_dereference_protected(*(p), lockdep_is_held(lock))
+-#define PUSH(p) ({                                                             \
+-		WARN_ON(IS_ENABLED(DEBUG) && len >= 128);                      \
+-		stack[len++] = p;                                              \
+-	})
+-
+-	struct allowedips_node __rcu **stack[128], **nptr;
+-	struct allowedips_node *node, *prev;
+-	unsigned int len;
+-
+-	if (unlikely(!peer || !REF(*top)))
+-		return;
+-
+-	for (prev = NULL, len = 0, PUSH(top); len > 0; prev = node) {
+-		nptr = stack[len - 1];
+-		node = DEREF(nptr);
+-		if (!node) {
+-			--len;
+-			continue;
+-		}
+-		if (!prev || REF(prev->bit[0]) == node ||
+-		    REF(prev->bit[1]) == node) {
+-			if (REF(node->bit[0]))
+-				PUSH(&node->bit[0]);
+-			else if (REF(node->bit[1]))
+-				PUSH(&node->bit[1]);
+-		} else if (REF(node->bit[0]) == prev) {
+-			if (REF(node->bit[1]))
+-				PUSH(&node->bit[1]);
+-		} else {
+-			if (rcu_dereference_protected(node->peer,
+-				lockdep_is_held(lock)) == peer) {
+-				RCU_INIT_POINTER(node->peer, NULL);
+-				list_del_init(&node->peer_list);
+-				if (!node->bit[0] || !node->bit[1]) {
+-					rcu_assign_pointer(*nptr, DEREF(
+-					       &node->bit[!REF(node->bit[0])]));
+-					kfree_rcu(node, rcu);
+-					node = DEREF(nptr);
+-				}
+-			}
+-			--len;
+-		}
+-	}
+-
+-#undef REF
+-#undef DEREF
+-#undef PUSH
+-}
+-
+ static unsigned int fls128(u64 a, u64 b)
+ {
+ 	return a ? fls64(a) + 64U : fls64(b);
+@@ -159,7 +115,7 @@ static struct allowedips_node *find_node(struct allowedips_node *trie, u8 bits,
+ 			found = node;
+ 		if (node->cidr == bits)
+ 			break;
+-		node = rcu_dereference_bh(CHOOSE_NODE(node, key));
++		node = rcu_dereference_bh(node->bit[choose(node, key)]);
+ 	}
+ 	return found;
+ }
+@@ -191,8 +147,7 @@ static bool node_placement(struct allowedips_node __rcu *trie, const u8 *key,
+ 			   u8 cidr, u8 bits, struct allowedips_node **rnode,
+ 			   struct mutex *lock)
+ {
+-	struct allowedips_node *node = rcu_dereference_protected(trie,
+-						lockdep_is_held(lock));
++	struct allowedips_node *node = rcu_dereference_protected(trie, lockdep_is_held(lock));
+ 	struct allowedips_node *parent = NULL;
+ 	bool exact = false;
+ 
+@@ -202,13 +157,24 @@ static bool node_placement(struct allowedips_node __rcu *trie, const u8 *key,
+ 			exact = true;
+ 			break;
+ 		}
+-		node = rcu_dereference_protected(CHOOSE_NODE(parent, key),
+-						 lockdep_is_held(lock));
++		node = rcu_dereference_protected(parent->bit[choose(parent, key)], lockdep_is_held(lock));
+ 	}
+ 	*rnode = parent;
+ 	return exact;
+ }
+ 
++static inline void connect_node(struct allowedips_node **parent, u8 bit, struct allowedips_node *node)
++{
++	node->parent_bit_packed = (unsigned long)parent | bit;
++	rcu_assign_pointer(*parent, node);
++}
++
++static inline void choose_and_connect_node(struct allowedips_node *parent, struct allowedips_node *node)
++{
++	u8 bit = choose(parent, node->bits);
++	connect_node(&parent->bit[bit], bit, node);
++}
++
+ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
+ 	       u8 cidr, struct wg_peer *peer, struct mutex *lock)
+ {
+@@ -218,13 +184,13 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
+ 		return -EINVAL;
+ 
+ 	if (!rcu_access_pointer(*trie)) {
+-		node = kzalloc(sizeof(*node), GFP_KERNEL);
++		node = kmem_cache_zalloc(node_cache, GFP_KERNEL);
+ 		if (unlikely(!node))
+ 			return -ENOMEM;
+ 		RCU_INIT_POINTER(node->peer, peer);
+ 		list_add_tail(&node->peer_list, &peer->allowedips_list);
+ 		copy_and_assign_cidr(node, key, cidr, bits);
+-		rcu_assign_pointer(*trie, node);
++		connect_node(trie, 2, node);
+ 		return 0;
+ 	}
+ 	if (node_placement(*trie, key, cidr, bits, &node, lock)) {
+@@ -233,7 +199,7 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
+ 		return 0;
+ 	}
+ 
+-	newnode = kzalloc(sizeof(*newnode), GFP_KERNEL);
++	newnode = kmem_cache_zalloc(node_cache, GFP_KERNEL);
+ 	if (unlikely(!newnode))
+ 		return -ENOMEM;
+ 	RCU_INIT_POINTER(newnode->peer, peer);
+@@ -243,10 +209,10 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
+ 	if (!node) {
+ 		down = rcu_dereference_protected(*trie, lockdep_is_held(lock));
+ 	} else {
+-		down = rcu_dereference_protected(CHOOSE_NODE(node, key),
+-						 lockdep_is_held(lock));
++		const u8 bit = choose(node, key);
++		down = rcu_dereference_protected(node->bit[bit], lockdep_is_held(lock));
+ 		if (!down) {
+-			rcu_assign_pointer(CHOOSE_NODE(node, key), newnode);
++			connect_node(&node->bit[bit], bit, newnode);
+ 			return 0;
+ 		}
+ 	}
+@@ -254,30 +220,29 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
+ 	parent = node;
+ 
+ 	if (newnode->cidr == cidr) {
+-		rcu_assign_pointer(CHOOSE_NODE(newnode, down->bits), down);
++		choose_and_connect_node(newnode, down);
+ 		if (!parent)
+-			rcu_assign_pointer(*trie, newnode);
++			connect_node(trie, 2, newnode);
+ 		else
+-			rcu_assign_pointer(CHOOSE_NODE(parent, newnode->bits),
+-					   newnode);
+-	} else {
+-		node = kzalloc(sizeof(*node), GFP_KERNEL);
+-		if (unlikely(!node)) {
+-			list_del(&newnode->peer_list);
+-			kfree(newnode);
+-			return -ENOMEM;
+-		}
+-		INIT_LIST_HEAD(&node->peer_list);
+-		copy_and_assign_cidr(node, newnode->bits, cidr, bits);
++			choose_and_connect_node(parent, newnode);
++		return 0;
++	}
+ 
+-		rcu_assign_pointer(CHOOSE_NODE(node, down->bits), down);
+-		rcu_assign_pointer(CHOOSE_NODE(node, newnode->bits), newnode);
+-		if (!parent)
+-			rcu_assign_pointer(*trie, node);
+-		else
+-			rcu_assign_pointer(CHOOSE_NODE(parent, node->bits),
+-					   node);
++	node = kmem_cache_zalloc(node_cache, GFP_KERNEL);
++	if (unlikely(!node)) {
++		list_del(&newnode->peer_list);
++		kmem_cache_free(node_cache, newnode);
++		return -ENOMEM;
+ 	}
++	INIT_LIST_HEAD(&node->peer_list);
++	copy_and_assign_cidr(node, newnode->bits, cidr, bits);
++
++	choose_and_connect_node(node, down);
++	choose_and_connect_node(node, newnode);
++	if (!parent)
++		connect_node(trie, 2, node);
++	else
++		choose_and_connect_node(parent, node);
+ 	return 0;
+ }
+ 
+@@ -335,9 +300,41 @@ int wg_allowedips_insert_v6(struct allowedips *table, const struct in6_addr *ip,
+ void wg_allowedips_remove_by_peer(struct allowedips *table,
+ 				  struct wg_peer *peer, struct mutex *lock)
+ {
++	struct allowedips_node *node, *child, **parent_bit, *parent, *tmp;
++	bool free_parent;
++
++	if (list_empty(&peer->allowedips_list))
++		return;
+ 	++table->seq;
+-	walk_remove_by_peer(&table->root4, peer, lock);
+-	walk_remove_by_peer(&table->root6, peer, lock);
++	list_for_each_entry_safe(node, tmp, &peer->allowedips_list, peer_list) {
++		list_del_init(&node->peer_list);
++		RCU_INIT_POINTER(node->peer, NULL);
++		if (node->bit[0] && node->bit[1])
++			continue;
++		child = rcu_dereference_protected(node->bit[!rcu_access_pointer(node->bit[0])],
++						  lockdep_is_held(lock));
++		if (child)
++			child->parent_bit_packed = node->parent_bit_packed;
++		parent_bit = (struct allowedips_node **)(node->parent_bit_packed & ~3UL);
++		*parent_bit = child;
++		parent = (void *)parent_bit -
++			 offsetof(struct allowedips_node, bit[node->parent_bit_packed & 1]);
++		free_parent = !rcu_access_pointer(node->bit[0]) &&
++			      !rcu_access_pointer(node->bit[1]) &&
++			      (node->parent_bit_packed & 3) <= 1 &&
++			      !rcu_access_pointer(parent->peer);
++		if (free_parent)
++			child = rcu_dereference_protected(
++					parent->bit[!(node->parent_bit_packed & 1)],
++					lockdep_is_held(lock));
++		call_rcu(&node->rcu, node_free_rcu);
++		if (!free_parent)
++			continue;
++		if (child)
++			child->parent_bit_packed = parent->parent_bit_packed;
++		*(struct allowedips_node **)(parent->parent_bit_packed & ~3UL) = child;
++		call_rcu(&parent->rcu, node_free_rcu);
++	}
+ }
+ 
+ int wg_allowedips_read_node(struct allowedips_node *node, u8 ip[16], u8 *cidr)
+@@ -374,4 +371,16 @@ struct wg_peer *wg_allowedips_lookup_src(struct allowedips *table,
+ 	return NULL;
+ }
+ 
++int __init wg_allowedips_slab_init(void)
++{
++	node_cache = KMEM_CACHE(allowedips_node, 0);
++	return node_cache ? 0 : -ENOMEM;
++}
++
++void wg_allowedips_slab_uninit(void)
++{
++	rcu_barrier();
++	kmem_cache_destroy(node_cache);
++}
++
+ #include "selftest/allowedips.c"
+diff --git a/drivers/net/wireguard/allowedips.h b/drivers/net/wireguard/allowedips.h
+index e5c83cafcef4c..2346c797eb4d8 100644
+--- a/drivers/net/wireguard/allowedips.h
++++ b/drivers/net/wireguard/allowedips.h
+@@ -15,14 +15,11 @@ struct wg_peer;
+ struct allowedips_node {
+ 	struct wg_peer __rcu *peer;
+ 	struct allowedips_node __rcu *bit[2];
+-	/* While it may seem scandalous that we waste space for v4,
+-	 * we're alloc'ing to the nearest power of 2 anyway, so this
+-	 * doesn't actually make a difference.
+-	 */
+-	u8 bits[16] __aligned(__alignof(u64));
+ 	u8 cidr, bit_at_a, bit_at_b, bitlen;
++	u8 bits[16] __aligned(__alignof(u64));
+ 
+-	/* Keep rarely used list at bottom to be beyond cache line. */
++	/* Keep rarely used members at bottom to be beyond cache line. */
++	unsigned long parent_bit_packed;
+ 	union {
+ 		struct list_head peer_list;
+ 		struct rcu_head rcu;
+@@ -33,7 +30,7 @@ struct allowedips {
+ 	struct allowedips_node __rcu *root4;
+ 	struct allowedips_node __rcu *root6;
+ 	u64 seq;
+-};
++} __aligned(4); /* We pack the lower 2 bits of &root, but m68k only gives 16-bit alignment. */
+ 
+ void wg_allowedips_init(struct allowedips *table);
+ void wg_allowedips_free(struct allowedips *table, struct mutex *mutex);
+@@ -56,4 +53,7 @@ struct wg_peer *wg_allowedips_lookup_src(struct allowedips *table,
+ bool wg_allowedips_selftest(void);
+ #endif
+ 
++int wg_allowedips_slab_init(void);
++void wg_allowedips_slab_uninit(void);
++
+ #endif /* _WG_ALLOWEDIPS_H */
+diff --git a/drivers/net/wireguard/main.c b/drivers/net/wireguard/main.c
+index 7a7d5f1a80fc7..75dbe77b0b4b4 100644
+--- a/drivers/net/wireguard/main.c
++++ b/drivers/net/wireguard/main.c
+@@ -21,13 +21,22 @@ static int __init mod_init(void)
+ {
+ 	int ret;
+ 
++	ret = wg_allowedips_slab_init();
++	if (ret < 0)
++		goto err_allowedips;
++
+ #ifdef DEBUG
++	ret = -ENOTRECOVERABLE;
+ 	if (!wg_allowedips_selftest() || !wg_packet_counter_selftest() ||
+ 	    !wg_ratelimiter_selftest())
+-		return -ENOTRECOVERABLE;
++		goto err_peer;
+ #endif
+ 	wg_noise_init();
+ 
++	ret = wg_peer_init();
++	if (ret < 0)
++		goto err_peer;
++
+ 	ret = wg_device_init();
+ 	if (ret < 0)
+ 		goto err_device;
+@@ -44,6 +53,10 @@ static int __init mod_init(void)
+ err_netlink:
+ 	wg_device_uninit();
+ err_device:
++	wg_peer_uninit();
++err_peer:
++	wg_allowedips_slab_uninit();
++err_allowedips:
+ 	return ret;
+ }
+ 
+@@ -51,6 +64,8 @@ static void __exit mod_exit(void)
+ {
+ 	wg_genetlink_uninit();
+ 	wg_device_uninit();
++	wg_peer_uninit();
++	wg_allowedips_slab_uninit();
+ }
+ 
+ module_init(mod_init);
+diff --git a/drivers/net/wireguard/peer.c b/drivers/net/wireguard/peer.c
+index cd5cb0292cb67..1acd00ab2fbcb 100644
+--- a/drivers/net/wireguard/peer.c
++++ b/drivers/net/wireguard/peer.c
+@@ -15,6 +15,7 @@
+ #include <linux/rcupdate.h>
+ #include <linux/list.h>
+ 
++static struct kmem_cache *peer_cache;
+ static atomic64_t peer_counter = ATOMIC64_INIT(0);
+ 
+ struct wg_peer *wg_peer_create(struct wg_device *wg,
+@@ -29,10 +30,10 @@ struct wg_peer *wg_peer_create(struct wg_device *wg,
+ 	if (wg->num_peers >= MAX_PEERS_PER_DEVICE)
+ 		return ERR_PTR(ret);
+ 
+-	peer = kzalloc(sizeof(*peer), GFP_KERNEL);
++	peer = kmem_cache_zalloc(peer_cache, GFP_KERNEL);
+ 	if (unlikely(!peer))
+ 		return ERR_PTR(ret);
+-	if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL))
++	if (unlikely(dst_cache_init(&peer->endpoint_cache, GFP_KERNEL)))
+ 		goto err;
+ 
+ 	peer->device = wg;
+@@ -64,7 +65,7 @@ struct wg_peer *wg_peer_create(struct wg_device *wg,
+ 	return peer;
+ 
+ err:
+-	kfree(peer);
++	kmem_cache_free(peer_cache, peer);
+ 	return ERR_PTR(ret);
+ }
+ 
+@@ -88,7 +89,7 @@ static void peer_make_dead(struct wg_peer *peer)
+ 	/* Mark as dead, so that we don't allow jumping contexts after. */
+ 	WRITE_ONCE(peer->is_dead, true);
+ 
+-	/* The caller must now synchronize_rcu() for this to take effect. */
++	/* The caller must now synchronize_net() for this to take effect. */
+ }
+ 
+ static void peer_remove_after_dead(struct wg_peer *peer)
+@@ -160,7 +161,7 @@ void wg_peer_remove(struct wg_peer *peer)
+ 	lockdep_assert_held(&peer->device->device_update_lock);
+ 
+ 	peer_make_dead(peer);
+-	synchronize_rcu();
++	synchronize_net();
+ 	peer_remove_after_dead(peer);
+ }
+ 
+@@ -178,7 +179,7 @@ void wg_peer_remove_all(struct wg_device *wg)
+ 		peer_make_dead(peer);
+ 		list_add_tail(&peer->peer_list, &dead_peers);
+ 	}
+-	synchronize_rcu();
++	synchronize_net();
+ 	list_for_each_entry_safe(peer, temp, &dead_peers, peer_list)
+ 		peer_remove_after_dead(peer);
+ }
+@@ -193,7 +194,8 @@ static void rcu_release(struct rcu_head *rcu)
+ 	/* The final zeroing takes care of clearing any remaining handshake key
+ 	 * material and other potentially sensitive information.
+ 	 */
+-	kfree_sensitive(peer);
++	memzero_explicit(peer, sizeof(*peer));
++	kmem_cache_free(peer_cache, peer);
+ }
+ 
+ static void kref_release(struct kref *refcount)
+@@ -225,3 +227,14 @@ void wg_peer_put(struct wg_peer *peer)
+ 		return;
+ 	kref_put(&peer->refcount, kref_release);
+ }
++
++int __init wg_peer_init(void)
++{
++	peer_cache = KMEM_CACHE(wg_peer, 0);
++	return peer_cache ? 0 : -ENOMEM;
++}
++
++void wg_peer_uninit(void)
++{
++	kmem_cache_destroy(peer_cache);
++}
+diff --git a/drivers/net/wireguard/peer.h b/drivers/net/wireguard/peer.h
+index 8d53b687a1d16..76e4d3128ad4e 100644
+--- a/drivers/net/wireguard/peer.h
++++ b/drivers/net/wireguard/peer.h
+@@ -80,4 +80,7 @@ void wg_peer_put(struct wg_peer *peer);
+ void wg_peer_remove(struct wg_peer *peer);
+ void wg_peer_remove_all(struct wg_device *wg);
+ 
++int wg_peer_init(void);
++void wg_peer_uninit(void);
++
+ #endif /* _WG_PEER_H */
+diff --git a/drivers/net/wireguard/selftest/allowedips.c b/drivers/net/wireguard/selftest/allowedips.c
+index 846db14cb046b..e173204ae7d78 100644
+--- a/drivers/net/wireguard/selftest/allowedips.c
++++ b/drivers/net/wireguard/selftest/allowedips.c
+@@ -19,32 +19,22 @@
+ 
+ #include <linux/siphash.h>
+ 
+-static __init void swap_endian_and_apply_cidr(u8 *dst, const u8 *src, u8 bits,
+-					      u8 cidr)
+-{
+-	swap_endian(dst, src, bits);
+-	memset(dst + (cidr + 7) / 8, 0, bits / 8 - (cidr + 7) / 8);
+-	if (cidr)
+-		dst[(cidr + 7) / 8 - 1] &= ~0U << ((8 - (cidr % 8)) % 8);
+-}
+-
+ static __init void print_node(struct allowedips_node *node, u8 bits)
+ {
+ 	char *fmt_connection = KERN_DEBUG "\t\"%p/%d\" -> \"%p/%d\";\n";
+-	char *fmt_declaration = KERN_DEBUG
+-		"\t\"%p/%d\"[style=%s, color=\"#%06x\"];\n";
++	char *fmt_declaration = KERN_DEBUG "\t\"%p/%d\"[style=%s, color=\"#%06x\"];\n";
++	u8 ip1[16], ip2[16], cidr1, cidr2;
+ 	char *style = "dotted";
+-	u8 ip1[16], ip2[16];
+ 	u32 color = 0;
+ 
++	if (node == NULL)
++		return;
+ 	if (bits == 32) {
+ 		fmt_connection = KERN_DEBUG "\t\"%pI4/%d\" -> \"%pI4/%d\";\n";
+-		fmt_declaration = KERN_DEBUG
+-			"\t\"%pI4/%d\"[style=%s, color=\"#%06x\"];\n";
++		fmt_declaration = KERN_DEBUG "\t\"%pI4/%d\"[style=%s, color=\"#%06x\"];\n";
+ 	} else if (bits == 128) {
+ 		fmt_connection = KERN_DEBUG "\t\"%pI6/%d\" -> \"%pI6/%d\";\n";
+-		fmt_declaration = KERN_DEBUG
+-			"\t\"%pI6/%d\"[style=%s, color=\"#%06x\"];\n";
++		fmt_declaration = KERN_DEBUG "\t\"%pI6/%d\"[style=%s, color=\"#%06x\"];\n";
+ 	}
+ 	if (node->peer) {
+ 		hsiphash_key_t key = { { 0 } };
+@@ -55,24 +45,20 @@ static __init void print_node(struct allowedips_node *node, u8 bits)
+ 			hsiphash_1u32(0xabad1dea, &key) % 200;
+ 		style = "bold";
+ 	}
+-	swap_endian_and_apply_cidr(ip1, node->bits, bits, node->cidr);
+-	printk(fmt_declaration, ip1, node->cidr, style, color);
++	wg_allowedips_read_node(node, ip1, &cidr1);
++	printk(fmt_declaration, ip1, cidr1, style, color);
+ 	if (node->bit[0]) {
+-		swap_endian_and_apply_cidr(ip2,
+-				rcu_dereference_raw(node->bit[0])->bits, bits,
+-				node->cidr);
+-		printk(fmt_connection, ip1, node->cidr, ip2,
+-		       rcu_dereference_raw(node->bit[0])->cidr);
+-		print_node(rcu_dereference_raw(node->bit[0]), bits);
++		wg_allowedips_read_node(rcu_dereference_raw(node->bit[0]), ip2, &cidr2);
++		printk(fmt_connection, ip1, cidr1, ip2, cidr2);
+ 	}
+ 	if (node->bit[1]) {
+-		swap_endian_and_apply_cidr(ip2,
+-				rcu_dereference_raw(node->bit[1])->bits,
+-				bits, node->cidr);
+-		printk(fmt_connection, ip1, node->cidr, ip2,
+-		       rcu_dereference_raw(node->bit[1])->cidr);
+-		print_node(rcu_dereference_raw(node->bit[1]), bits);
++		wg_allowedips_read_node(rcu_dereference_raw(node->bit[1]), ip2, &cidr2);
++		printk(fmt_connection, ip1, cidr1, ip2, cidr2);
+ 	}
++	if (node->bit[0])
++		print_node(rcu_dereference_raw(node->bit[0]), bits);
++	if (node->bit[1])
++		print_node(rcu_dereference_raw(node->bit[1]), bits);
+ }
+ 
+ static __init void print_tree(struct allowedips_node __rcu *top, u8 bits)
+@@ -121,8 +107,8 @@ static __init inline union nf_inet_addr horrible_cidr_to_mask(u8 cidr)
+ {
+ 	union nf_inet_addr mask;
+ 
+-	memset(&mask, 0x00, 128 / 8);
+-	memset(&mask, 0xff, cidr / 8);
++	memset(&mask, 0, sizeof(mask));
++	memset(&mask.all, 0xff, cidr / 8);
+ 	if (cidr % 32)
+ 		mask.all[cidr / 32] = (__force u32)htonl(
+ 			(0xFFFFFFFFUL << (32 - (cidr % 32))) & 0xFFFFFFFFUL);
+@@ -149,42 +135,36 @@ horrible_mask_self(struct horrible_allowedips_node *node)
+ }
+ 
+ static __init inline bool
+-horrible_match_v4(const struct horrible_allowedips_node *node,
+-		  struct in_addr *ip)
++horrible_match_v4(const struct horrible_allowedips_node *node, struct in_addr *ip)
+ {
+ 	return (ip->s_addr & node->mask.ip) == node->ip.ip;
+ }
+ 
+ static __init inline bool
+-horrible_match_v6(const struct horrible_allowedips_node *node,
+-		  struct in6_addr *ip)
++horrible_match_v6(const struct horrible_allowedips_node *node, struct in6_addr *ip)
+ {
+-	return (ip->in6_u.u6_addr32[0] & node->mask.ip6[0]) ==
+-		       node->ip.ip6[0] &&
+-	       (ip->in6_u.u6_addr32[1] & node->mask.ip6[1]) ==
+-		       node->ip.ip6[1] &&
+-	       (ip->in6_u.u6_addr32[2] & node->mask.ip6[2]) ==
+-		       node->ip.ip6[2] &&
++	return (ip->in6_u.u6_addr32[0] & node->mask.ip6[0]) == node->ip.ip6[0] &&
++	       (ip->in6_u.u6_addr32[1] & node->mask.ip6[1]) == node->ip.ip6[1] &&
++	       (ip->in6_u.u6_addr32[2] & node->mask.ip6[2]) == node->ip.ip6[2] &&
+ 	       (ip->in6_u.u6_addr32[3] & node->mask.ip6[3]) == node->ip.ip6[3];
+ }
+ 
+ static __init void
+-horrible_insert_ordered(struct horrible_allowedips *table,
+-			struct horrible_allowedips_node *node)
++horrible_insert_ordered(struct horrible_allowedips *table, struct horrible_allowedips_node *node)
+ {
+ 	struct horrible_allowedips_node *other = NULL, *where = NULL;
+ 	u8 my_cidr = horrible_mask_to_cidr(node->mask);
+ 
+ 	hlist_for_each_entry(other, &table->head, table) {
+-		if (!memcmp(&other->mask, &node->mask,
+-			    sizeof(union nf_inet_addr)) &&
+-		    !memcmp(&other->ip, &node->ip,
+-			    sizeof(union nf_inet_addr)) &&
+-		    other->ip_version == node->ip_version) {
++		if (other->ip_version == node->ip_version &&
++		    !memcmp(&other->mask, &node->mask, sizeof(union nf_inet_addr)) &&
++		    !memcmp(&other->ip, &node->ip, sizeof(union nf_inet_addr))) {
+ 			other->value = node->value;
+ 			kfree(node);
+ 			return;
+ 		}
++	}
++	hlist_for_each_entry(other, &table->head, table) {
+ 		where = other;
+ 		if (horrible_mask_to_cidr(other->mask) <= my_cidr)
+ 			break;
+@@ -201,8 +181,7 @@ static __init int
+ horrible_allowedips_insert_v4(struct horrible_allowedips *table,
+ 			      struct in_addr *ip, u8 cidr, void *value)
+ {
+-	struct horrible_allowedips_node *node = kzalloc(sizeof(*node),
+-							GFP_KERNEL);
++	struct horrible_allowedips_node *node = kzalloc(sizeof(*node), GFP_KERNEL);
+ 
+ 	if (unlikely(!node))
+ 		return -ENOMEM;
+@@ -219,8 +198,7 @@ static __init int
+ horrible_allowedips_insert_v6(struct horrible_allowedips *table,
+ 			      struct in6_addr *ip, u8 cidr, void *value)
+ {
+-	struct horrible_allowedips_node *node = kzalloc(sizeof(*node),
+-							GFP_KERNEL);
++	struct horrible_allowedips_node *node = kzalloc(sizeof(*node), GFP_KERNEL);
+ 
+ 	if (unlikely(!node))
+ 		return -ENOMEM;
+@@ -234,39 +212,43 @@ horrible_allowedips_insert_v6(struct horrible_allowedips *table,
+ }
+ 
+ static __init void *
+-horrible_allowedips_lookup_v4(struct horrible_allowedips *table,
+-			      struct in_addr *ip)
++horrible_allowedips_lookup_v4(struct horrible_allowedips *table, struct in_addr *ip)
+ {
+ 	struct horrible_allowedips_node *node;
+-	void *ret = NULL;
+ 
+ 	hlist_for_each_entry(node, &table->head, table) {
+-		if (node->ip_version != 4)
+-			continue;
+-		if (horrible_match_v4(node, ip)) {
+-			ret = node->value;
+-			break;
+-		}
++		if (node->ip_version == 4 && horrible_match_v4(node, ip))
++			return node->value;
+ 	}
+-	return ret;
++	return NULL;
+ }
+ 
+ static __init void *
+-horrible_allowedips_lookup_v6(struct horrible_allowedips *table,
+-			      struct in6_addr *ip)
++horrible_allowedips_lookup_v6(struct horrible_allowedips *table, struct in6_addr *ip)
+ {
+ 	struct horrible_allowedips_node *node;
+-	void *ret = NULL;
+ 
+ 	hlist_for_each_entry(node, &table->head, table) {
+-		if (node->ip_version != 6)
++		if (node->ip_version == 6 && horrible_match_v6(node, ip))
++			return node->value;
++	}
++	return NULL;
++}
++
++
++static __init void
++horrible_allowedips_remove_by_value(struct horrible_allowedips *table, void *value)
++{
++	struct horrible_allowedips_node *node;
++	struct hlist_node *h;
++
++	hlist_for_each_entry_safe(node, h, &table->head, table) {
++		if (node->value != value)
+ 			continue;
+-		if (horrible_match_v6(node, ip)) {
+-			ret = node->value;
+-			break;
+-		}
++		hlist_del(&node->table);
++		kfree(node);
+ 	}
+-	return ret;
++
+ }
+ 
+ static __init bool randomized_test(void)
+@@ -296,6 +278,7 @@ static __init bool randomized_test(void)
+ 			goto free;
+ 		}
+ 		kref_init(&peers[i]->refcount);
++		INIT_LIST_HEAD(&peers[i]->allowedips_list);
+ 	}
+ 
+ 	mutex_lock(&mutex);
+@@ -333,7 +316,7 @@ static __init bool randomized_test(void)
+ 			if (wg_allowedips_insert_v4(&t,
+ 						    (struct in_addr *)mutated,
+ 						    cidr, peer, &mutex) < 0) {
+-				pr_err("allowedips random malloc: FAIL\n");
++				pr_err("allowedips random self-test malloc: FAIL\n");
+ 				goto free_locked;
+ 			}
+ 			if (horrible_allowedips_insert_v4(&h,
+@@ -396,23 +379,33 @@ static __init bool randomized_test(void)
+ 		print_tree(t.root6, 128);
+ 	}
+ 
+-	for (i = 0; i < NUM_QUERIES; ++i) {
+-		prandom_bytes(ip, 4);
+-		if (lookup(t.root4, 32, ip) !=
+-		    horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) {
+-			pr_err("allowedips random self-test: FAIL\n");
+-			goto free;
++	for (j = 0;; ++j) {
++		for (i = 0; i < NUM_QUERIES; ++i) {
++			prandom_bytes(ip, 4);
++			if (lookup(t.root4, 32, ip) != horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) {
++				horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip);
++				pr_err("allowedips random v4 self-test: FAIL\n");
++				goto free;
++			}
++			prandom_bytes(ip, 16);
++			if (lookup(t.root6, 128, ip) != horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) {
++				pr_err("allowedips random v6 self-test: FAIL\n");
++				goto free;
++			}
+ 		}
++		if (j >= NUM_PEERS)
++			break;
++		mutex_lock(&mutex);
++		wg_allowedips_remove_by_peer(&t, peers[j], &mutex);
++		mutex_unlock(&mutex);
++		horrible_allowedips_remove_by_value(&h, peers[j]);
+ 	}
+ 
+-	for (i = 0; i < NUM_QUERIES; ++i) {
+-		prandom_bytes(ip, 16);
+-		if (lookup(t.root6, 128, ip) !=
+-		    horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) {
+-			pr_err("allowedips random self-test: FAIL\n");
+-			goto free;
+-		}
++	if (t.root4 || t.root6) {
++		pr_err("allowedips random self-test removal: FAIL\n");
++		goto free;
+ 	}
++
+ 	ret = true;
+ 
+ free:
+diff --git a/drivers/net/wireguard/socket.c b/drivers/net/wireguard/socket.c
+index d9ad850daa793..8c496b7471082 100644
+--- a/drivers/net/wireguard/socket.c
++++ b/drivers/net/wireguard/socket.c
+@@ -430,7 +430,7 @@ void wg_socket_reinit(struct wg_device *wg, struct sock *new4,
+ 	if (new4)
+ 		wg->incoming_port = ntohs(inet_sk(new4)->inet_sport);
+ 	mutex_unlock(&wg->socket_update_lock);
+-	synchronize_rcu();
++	synchronize_net();
+ 	sock_free(old4);
+ 	sock_free(old6);
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+index 02d0aa0b815e9..d2489dc9dc139 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+@@ -87,7 +87,7 @@ static const struct ieee80211_ops mt76x0e_ops = {
+ 	.reconfig_complete = mt76x02_reconfig_complete,
+ };
+ 
+-static int mt76x0e_register_device(struct mt76x02_dev *dev)
++static int mt76x0e_init_hardware(struct mt76x02_dev *dev, bool resume)
+ {
+ 	int err;
+ 
+@@ -100,9 +100,11 @@ static int mt76x0e_register_device(struct mt76x02_dev *dev)
+ 	if (err < 0)
+ 		return err;
+ 
+-	err = mt76x02_dma_init(dev);
+-	if (err < 0)
+-		return err;
++	if (!resume) {
++		err = mt76x02_dma_init(dev);
++		if (err < 0)
++			return err;
++	}
+ 
+ 	err = mt76x0_init_hardware(dev);
+ 	if (err < 0)
+@@ -123,6 +125,17 @@ static int mt76x0e_register_device(struct mt76x02_dev *dev)
+ 	mt76_clear(dev, 0x110, BIT(9));
+ 	mt76_set(dev, MT_MAX_LEN_CFG, BIT(13));
+ 
++	return 0;
++}
++
++static int mt76x0e_register_device(struct mt76x02_dev *dev)
++{
++	int err;
++
++	err = mt76x0e_init_hardware(dev, false);
++	if (err < 0)
++		return err;
++
+ 	err = mt76x0_register_device(dev);
+ 	if (err < 0)
+ 		return err;
+@@ -167,6 +180,8 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	if (ret)
+ 		return ret;
+ 
++	mt76_pci_disable_aspm(pdev);
++
+ 	mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt76x0e_ops,
+ 				 &drv_ops);
+ 	if (!mdev)
+@@ -220,6 +235,60 @@ mt76x0e_remove(struct pci_dev *pdev)
+ 	mt76_free_device(mdev);
+ }
+ 
++#ifdef CONFIG_PM
++static int mt76x0e_suspend(struct pci_dev *pdev, pm_message_t state)
++{
++	struct mt76_dev *mdev = pci_get_drvdata(pdev);
++	struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
++	int i;
++
++	mt76_worker_disable(&mdev->tx_worker);
++	for (i = 0; i < ARRAY_SIZE(mdev->phy.q_tx); i++)
++		mt76_queue_tx_cleanup(dev, mdev->phy.q_tx[i], true);
++	for (i = 0; i < ARRAY_SIZE(mdev->q_mcu); i++)
++		mt76_queue_tx_cleanup(dev, mdev->q_mcu[i], true);
++	napi_disable(&mdev->tx_napi);
++
++	mt76_for_each_q_rx(mdev, i)
++		napi_disable(&mdev->napi[i]);
++
++	mt76x02_dma_disable(dev);
++	mt76x02_mcu_cleanup(dev);
++	mt76x0_chip_onoff(dev, false, false);
++
++	pci_enable_wake(pdev, pci_choose_state(pdev, state), true);
++	pci_save_state(pdev);
++
++	return pci_set_power_state(pdev, pci_choose_state(pdev, state));
++}
++
++static int mt76x0e_resume(struct pci_dev *pdev)
++{
++	struct mt76_dev *mdev = pci_get_drvdata(pdev);
++	struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
++	int err, i;
++
++	err = pci_set_power_state(pdev, PCI_D0);
++	if (err)
++		return err;
++
++	pci_restore_state(pdev);
++
++	mt76_worker_enable(&mdev->tx_worker);
++
++	mt76_for_each_q_rx(mdev, i) {
++		mt76_queue_rx_reset(dev, i);
++		napi_enable(&mdev->napi[i]);
++		napi_schedule(&mdev->napi[i]);
++	}
++
++	napi_enable(&mdev->tx_napi);
++	napi_schedule(&mdev->tx_napi);
++
++	return mt76x0e_init_hardware(dev, true);
++}
++#endif /* CONFIG_PM */
++
+ static const struct pci_device_id mt76x0e_device_table[] = {
+ 	{ PCI_DEVICE(0x14c3, 0x7610) },
+ 	{ PCI_DEVICE(0x14c3, 0x7630) },
+@@ -237,6 +306,10 @@ static struct pci_driver mt76x0e_driver = {
+ 	.id_table	= mt76x0e_device_table,
+ 	.probe		= mt76x0e_probe,
+ 	.remove		= mt76x0e_remove,
++#ifdef CONFIG_PM
++	.suspend	= mt76x0e_suspend,
++	.resume		= mt76x0e_resume,
++#endif /* CONFIG_PM */
+ };
+ 
+ module_pci_driver(mt76x0e_driver);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+index 62afbad77596b..be88c9f5637a5 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+@@ -391,29 +391,37 @@ static void
+ mt7921_mcu_tx_rate_report(struct mt7921_dev *dev, struct sk_buff *skb,
+ 			  u16 wlan_idx)
+ {
+-	struct mt7921_mcu_wlan_info_event *wtbl_info =
+-		(struct mt7921_mcu_wlan_info_event *)(skb->data);
+-	struct rate_info rate = {};
+-	u8 curr_idx = wtbl_info->rate_info.rate_idx;
+-	u16 curr = le16_to_cpu(wtbl_info->rate_info.rate[curr_idx]);
+-	struct mt7921_mcu_peer_cap peer = wtbl_info->peer_cap;
++	struct mt7921_mcu_wlan_info_event *wtbl_info;
+ 	struct mt76_phy *mphy = &dev->mphy;
+ 	struct mt7921_sta_stats *stats;
++	struct rate_info rate = {};
+ 	struct mt7921_sta *msta;
+ 	struct mt76_wcid *wcid;
++	u8 idx;
+ 
+ 	if (wlan_idx >= MT76_N_WCIDS)
+ 		return;
++
++	wtbl_info = (struct mt7921_mcu_wlan_info_event *)skb->data;
++	idx = wtbl_info->rate_info.rate_idx;
++	if (idx >= ARRAY_SIZE(wtbl_info->rate_info.rate))
++		return;
++
++	rcu_read_lock();
++
+ 	wcid = rcu_dereference(dev->mt76.wcid[wlan_idx]);
+ 	if (!wcid)
+-		return;
++		goto out;
+ 
+ 	msta = container_of(wcid, struct mt7921_sta, wcid);
+ 	stats = &msta->stats;
+ 
+ 	/* current rate */
+-	mt7921_mcu_tx_rate_parse(mphy, &peer, &rate, curr);
++	mt7921_mcu_tx_rate_parse(mphy, &wtbl_info->peer_cap, &rate,
++				 le16_to_cpu(wtbl_info->rate_info.rate[idx]));
+ 	stats->tx_rate = rate;
++out:
++	rcu_read_unlock();
+ }
+ 
+ static void
+diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
+index 193b723fe3bd7..c58996c1e2309 100644
+--- a/drivers/net/xen-netback/interface.c
++++ b/drivers/net/xen-netback/interface.c
+@@ -684,6 +684,7 @@ static void xenvif_disconnect_queue(struct xenvif_queue *queue)
+ {
+ 	if (queue->task) {
+ 		kthread_stop(queue->task);
++		put_task_struct(queue->task);
+ 		queue->task = NULL;
+ 	}
+ 
+@@ -745,6 +746,11 @@ int xenvif_connect_data(struct xenvif_queue *queue,
+ 	if (IS_ERR(task))
+ 		goto kthread_err;
+ 	queue->task = task;
++	/*
++	 * Take a reference to the task in order to prevent it from being freed
++	 * if the thread function returns before kthread_stop is called.
++	 */
++	get_task_struct(task);
+ 
+ 	task = kthread_run(xenvif_dealloc_kthread, queue,
+ 			   "%s-dealloc", queue->name);
+diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
+index be905d4fdb47f..ce8b3ce7582be 100644
+--- a/drivers/nvme/host/rdma.c
++++ b/drivers/nvme/host/rdma.c
+@@ -1319,16 +1319,17 @@ static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue,
+ 		int count)
+ {
+ 	struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
+-	struct scatterlist *sgl = req->data_sgl.sg_table.sgl;
+ 	struct ib_sge *sge = &req->sge[1];
++	struct scatterlist *sgl;
+ 	u32 len = 0;
+ 	int i;
+ 
+-	for (i = 0; i < count; i++, sgl++, sge++) {
++	for_each_sg(req->data_sgl.sg_table.sgl, sgl, count, i) {
+ 		sge->addr = sg_dma_address(sgl);
+ 		sge->length = sg_dma_len(sgl);
+ 		sge->lkey = queue->device->pd->local_dma_lkey;
+ 		len += sge->length;
++		sge++;
+ 	}
+ 
+ 	sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
+diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
+index 348057fdc568f..7d16cb4cd8acf 100644
+--- a/drivers/nvme/target/core.c
++++ b/drivers/nvme/target/core.c
+@@ -999,19 +999,23 @@ static unsigned int nvmet_data_transfer_len(struct nvmet_req *req)
+ 	return req->transfer_len - req->metadata_len;
+ }
+ 
+-static int nvmet_req_alloc_p2pmem_sgls(struct nvmet_req *req)
++static int nvmet_req_alloc_p2pmem_sgls(struct pci_dev *p2p_dev,
++		struct nvmet_req *req)
+ {
+-	req->sg = pci_p2pmem_alloc_sgl(req->p2p_dev, &req->sg_cnt,
++	req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt,
+ 			nvmet_data_transfer_len(req));
+ 	if (!req->sg)
+ 		goto out_err;
+ 
+ 	if (req->metadata_len) {
+-		req->metadata_sg = pci_p2pmem_alloc_sgl(req->p2p_dev,
++		req->metadata_sg = pci_p2pmem_alloc_sgl(p2p_dev,
+ 				&req->metadata_sg_cnt, req->metadata_len);
+ 		if (!req->metadata_sg)
+ 			goto out_free_sg;
+ 	}
++
++	req->p2p_dev = p2p_dev;
++
+ 	return 0;
+ out_free_sg:
+ 	pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
+@@ -1019,25 +1023,19 @@ out_err:
+ 	return -ENOMEM;
+ }
+ 
+-static bool nvmet_req_find_p2p_dev(struct nvmet_req *req)
++static struct pci_dev *nvmet_req_find_p2p_dev(struct nvmet_req *req)
+ {
+-	if (!IS_ENABLED(CONFIG_PCI_P2PDMA))
+-		return false;
+-
+-	if (req->sq->ctrl && req->sq->qid && req->ns) {
+-		req->p2p_dev = radix_tree_lookup(&req->sq->ctrl->p2p_ns_map,
+-						 req->ns->nsid);
+-		if (req->p2p_dev)
+-			return true;
+-	}
+-
+-	req->p2p_dev = NULL;
+-	return false;
++	if (!IS_ENABLED(CONFIG_PCI_P2PDMA) ||
++	    !req->sq->ctrl || !req->sq->qid || !req->ns)
++		return NULL;
++	return radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, req->ns->nsid);
+ }
+ 
+ int nvmet_req_alloc_sgls(struct nvmet_req *req)
+ {
+-	if (nvmet_req_find_p2p_dev(req) && !nvmet_req_alloc_p2pmem_sgls(req))
++	struct pci_dev *p2p_dev = nvmet_req_find_p2p_dev(req);
++
++	if (p2p_dev && !nvmet_req_alloc_p2pmem_sgls(p2p_dev, req))
+ 		return 0;
+ 
+ 	req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL,
+@@ -1066,6 +1064,7 @@ void nvmet_req_free_sgls(struct nvmet_req *req)
+ 		pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
+ 		if (req->metadata_sg)
+ 			pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg);
++		req->p2p_dev = NULL;
+ 	} else {
+ 		sgl_free(req->sg);
+ 		if (req->metadata_sg)
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index 920cf329268b5..f8a5a4eb5bcef 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -20591,10 +20591,8 @@ lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 	abtswqe = &abtsiocb->wqe;
+ 	memset(abtswqe, 0, sizeof(*abtswqe));
+ 
+-	if (lpfc_is_link_up(phba))
++	if (!lpfc_is_link_up(phba))
+ 		bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 1);
+-	else
+-		bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 0);
+ 	bf_set(abort_cmd_criteria, &abtswqe->abort_cmd, T_XRI_TAG);
+ 	abtswqe->abort_cmd.rsrvd5 = 0;
+ 	abtswqe->abort_cmd.wqe_com.abort_tag = xritag;
+diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c
+index 7a77e375b503c..6b52f0c526baa 100644
+--- a/drivers/tee/optee/call.c
++++ b/drivers/tee/optee/call.c
+@@ -216,6 +216,7 @@ int optee_open_session(struct tee_context *ctx,
+ 	struct optee_msg_arg *msg_arg;
+ 	phys_addr_t msg_parg;
+ 	struct optee_session *sess = NULL;
++	uuid_t client_uuid;
+ 
+ 	/* +2 for the meta parameters added below */
+ 	shm = get_msg_arg(ctx, arg->num_params + 2, &msg_arg, &msg_parg);
+@@ -236,10 +237,11 @@ int optee_open_session(struct tee_context *ctx,
+ 	memcpy(&msg_arg->params[0].u.value, arg->uuid, sizeof(arg->uuid));
+ 	msg_arg->params[1].u.value.c = arg->clnt_login;
+ 
+-	rc = tee_session_calc_client_uuid((uuid_t *)&msg_arg->params[1].u.value,
+-					  arg->clnt_login, arg->clnt_uuid);
++	rc = tee_session_calc_client_uuid(&client_uuid, arg->clnt_login,
++					  arg->clnt_uuid);
+ 	if (rc)
+ 		goto out;
++	export_uuid(msg_arg->params[1].u.octets, &client_uuid);
+ 
+ 	rc = optee_to_msg_param(msg_arg->params + 2, arg->num_params, param);
+ 	if (rc)
+diff --git a/drivers/tee/optee/optee_msg.h b/drivers/tee/optee/optee_msg.h
+index 81ff593ac4ec2..e3d72d09c4848 100644
+--- a/drivers/tee/optee/optee_msg.h
++++ b/drivers/tee/optee/optee_msg.h
+@@ -9,7 +9,7 @@
+ #include <linux/types.h>
+ 
+ /*
+- * This file defines the OP-TEE message protocol used to communicate
++ * This file defines the OP-TEE message protocol (ABI) used to communicate
+  * with an instance of OP-TEE running in secure world.
+  *
+  * This file is divided into two sections.
+@@ -144,9 +144,10 @@ struct optee_msg_param_value {
+  * @tmem:	parameter by temporary memory reference
+  * @rmem:	parameter by registered memory reference
+  * @value:	parameter by opaque value
++ * @octets:	parameter by octet string
+  *
+  * @attr & OPTEE_MSG_ATTR_TYPE_MASK indicates if tmem, rmem or value is used in
+- * the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value,
++ * the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value or octets,
+  * OPTEE_MSG_ATTR_TYPE_TMEM_* indicates @tmem and
+  * OPTEE_MSG_ATTR_TYPE_RMEM_* indicates @rmem,
+  * OPTEE_MSG_ATTR_TYPE_NONE indicates that none of the members are used.
+@@ -157,6 +158,7 @@ struct optee_msg_param {
+ 		struct optee_msg_param_tmem tmem;
+ 		struct optee_msg_param_rmem rmem;
+ 		struct optee_msg_param_value value;
++		u8 octets[24];
+ 	} u;
+ };
+ 
+diff --git a/drivers/thermal/intel/therm_throt.c b/drivers/thermal/intel/therm_throt.c
+index f8e882592ba5d..99abdc03c44ce 100644
+--- a/drivers/thermal/intel/therm_throt.c
++++ b/drivers/thermal/intel/therm_throt.c
+@@ -621,6 +621,17 @@ bool x86_thermal_enabled(void)
+ 	return atomic_read(&therm_throt_en);
+ }
+ 
++void __init therm_lvt_init(void)
++{
++	/*
++	 * This function is only called on boot CPU. Save the init thermal
++	 * LVT value on BSP and use that value to restore APs' thermal LVT
++	 * entry BIOS programmed later
++	 */
++	if (intel_thermal_supported(&boot_cpu_data))
++		lvtthmr_init = apic_read(APIC_LVTTHMR);
++}
++
+ void intel_init_thermal(struct cpuinfo_x86 *c)
+ {
+ 	unsigned int cpu = smp_processor_id();
+@@ -630,10 +641,6 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
+ 	if (!intel_thermal_supported(c))
+ 		return;
+ 
+-	/* On the BSP? */
+-	if (c == &boot_cpu_data)
+-		lvtthmr_init = apic_read(APIC_LVTTHMR);
+-
+ 	/*
+ 	 * First check if its enabled already, in which case there might
+ 	 * be some SMM goo which handles it, so we can't even put a handler
+diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
+index 99dfa884cbefb..68c6535bbf7f0 100644
+--- a/drivers/tty/serial/stm32-usart.c
++++ b/drivers/tty/serial/stm32-usart.c
+@@ -214,14 +214,11 @@ static void stm32_usart_receive_chars(struct uart_port *port, bool threaded)
+ 	struct tty_port *tport = &port->state->port;
+ 	struct stm32_port *stm32_port = to_stm32_port(port);
+ 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+-	unsigned long c, flags;
++	unsigned long c;
+ 	u32 sr;
+ 	char flag;
+ 
+-	if (threaded)
+-		spin_lock_irqsave(&port->lock, flags);
+-	else
+-		spin_lock(&port->lock);
++	spin_lock(&port->lock);
+ 
+ 	while (stm32_usart_pending_rx(port, &sr, &stm32_port->last_res,
+ 				      threaded)) {
+@@ -278,10 +275,7 @@ static void stm32_usart_receive_chars(struct uart_port *port, bool threaded)
+ 		uart_insert_char(port, sr, USART_SR_ORE, c, flag);
+ 	}
+ 
+-	if (threaded)
+-		spin_unlock_irqrestore(&port->lock, flags);
+-	else
+-		spin_unlock(&port->lock);
++	spin_unlock(&port->lock);
+ 
+ 	tty_flip_buffer_push(tport);
+ }
+@@ -654,7 +648,8 @@ static int stm32_usart_startup(struct uart_port *port)
+ 
+ 	ret = request_threaded_irq(port->irq, stm32_usart_interrupt,
+ 				   stm32_usart_threaded_interrupt,
+-				   IRQF_NO_SUSPEND, name, port);
++				   IRQF_ONESHOT | IRQF_NO_SUSPEND,
++				   name, port);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1136,6 +1131,13 @@ static int stm32_usart_of_dma_rx_probe(struct stm32_port *stm32port,
+ 	struct dma_async_tx_descriptor *desc = NULL;
+ 	int ret;
+ 
++	/*
++	 * Using DMA and threaded handler for the console could lead to
++	 * deadlocks.
++	 */
++	if (uart_console(port))
++		return -ENODEV;
++
+ 	/* Request DMA RX channel */
+ 	stm32port->rx_ch = dma_request_slave_channel(dev, "rx");
+ 	if (!stm32port->rx_ch) {
+diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
+index 510fd0572feb1..e3f429f1575e9 100644
+--- a/drivers/usb/dwc2/core_intr.c
++++ b/drivers/usb/dwc2/core_intr.c
+@@ -707,7 +707,11 @@ static inline void dwc_handle_gpwrdn_disc_det(struct dwc2_hsotg *hsotg,
+ 	dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
+ 
+ 	hsotg->hibernated = 0;
++
++#if IS_ENABLED(CONFIG_USB_DWC2_HOST) ||	\
++	IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
+ 	hsotg->bus_suspended = 0;
++#endif
+ 
+ 	if (gpwrdn & GPWRDN_IDSTS) {
+ 		hsotg->op_state = OTG_STATE_B_PERIPHERAL;
+diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig
+index 4abddbebd4b23..c691127bc805a 100644
+--- a/drivers/vfio/pci/Kconfig
++++ b/drivers/vfio/pci/Kconfig
+@@ -2,6 +2,7 @@
+ config VFIO_PCI
+ 	tristate "VFIO support for PCI devices"
+ 	depends on VFIO && PCI && EVENTFD
++	depends on MMU
+ 	select VFIO_VIRQFD
+ 	select IRQ_BYPASS_MANAGER
+ 	help
+diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
+index a402adee8a215..47f21a6ca7fe9 100644
+--- a/drivers/vfio/pci/vfio_pci_config.c
++++ b/drivers/vfio/pci/vfio_pci_config.c
+@@ -1581,7 +1581,7 @@ static int vfio_ecap_init(struct vfio_pci_device *vdev)
+ 			if (len == 0xFF) {
+ 				len = vfio_ext_cap_len(vdev, ecap, epos);
+ 				if (len < 0)
+-					return ret;
++					return len;
+ 			}
+ 		}
+ 
+diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
+index fb4b385191f28..e83a7cd15c956 100644
+--- a/drivers/vfio/platform/vfio_platform_common.c
++++ b/drivers/vfio/platform/vfio_platform_common.c
+@@ -289,7 +289,7 @@ err_irq:
+ 	vfio_platform_regions_cleanup(vdev);
+ err_reg:
+ 	mutex_unlock(&driver_lock);
+-	module_put(THIS_MODULE);
++	module_put(vdev->parent_module);
+ 	return ret;
+ }
+ 
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 5b82050b871a7..27c3680074814 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -1868,7 +1868,7 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
+ 	trace_run_delayed_ref_head(fs_info, head, 0);
+ 	btrfs_delayed_ref_unlock(head);
+ 	btrfs_put_delayed_ref_head(head);
+-	return 0;
++	return ret;
+ }
+ 
+ static struct btrfs_delayed_ref_head *btrfs_obtain_ref_head(
+diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
+index 47cd3a6dc6351..eed75bb0fedbf 100644
+--- a/fs/btrfs/file-item.c
++++ b/fs/btrfs/file-item.c
+@@ -787,7 +787,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
+ 	u64 end_byte = bytenr + len;
+ 	u64 csum_end;
+ 	struct extent_buffer *leaf;
+-	int ret;
++	int ret = 0;
+ 	const u32 csum_size = fs_info->csum_size;
+ 	u32 blocksize_bits = fs_info->sectorsize_bits;
+ 
+@@ -805,6 +805,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
+ 
+ 		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+ 		if (ret > 0) {
++			ret = 0;
+ 			if (path->slots[0] == 0)
+ 				break;
+ 			path->slots[0]--;
+@@ -861,7 +862,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
+ 			ret = btrfs_del_items(trans, root, path,
+ 					      path->slots[0], del_nr);
+ 			if (ret)
+-				goto out;
++				break;
+ 			if (key.offset == bytenr)
+ 				break;
+ 		} else if (key.offset < bytenr && csum_end > end_byte) {
+@@ -905,8 +906,9 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
+ 			ret = btrfs_split_item(trans, root, path, &key, offset);
+ 			if (ret && ret != -EAGAIN) {
+ 				btrfs_abort_transaction(trans, ret);
+-				goto out;
++				break;
+ 			}
++			ret = 0;
+ 
+ 			key.offset = end_byte - 1;
+ 		} else {
+@@ -916,12 +918,41 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
+ 		}
+ 		btrfs_release_path(path);
+ 	}
+-	ret = 0;
+-out:
+ 	btrfs_free_path(path);
+ 	return ret;
+ }
+ 
++static int find_next_csum_offset(struct btrfs_root *root,
++				 struct btrfs_path *path,
++				 u64 *next_offset)
++{
++	const u32 nritems = btrfs_header_nritems(path->nodes[0]);
++	struct btrfs_key found_key;
++	int slot = path->slots[0] + 1;
++	int ret;
++
++	if (nritems == 0 || slot >= nritems) {
++		ret = btrfs_next_leaf(root, path);
++		if (ret < 0) {
++			return ret;
++		} else if (ret > 0) {
++			*next_offset = (u64)-1;
++			return 0;
++		}
++		slot = path->slots[0];
++	}
++
++	btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
++
++	if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
++	    found_key.type != BTRFS_EXTENT_CSUM_KEY)
++		*next_offset = (u64)-1;
++	else
++		*next_offset = found_key.offset;
++
++	return 0;
++}
++
+ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
+ 			   struct btrfs_root *root,
+ 			   struct btrfs_ordered_sum *sums)
+@@ -937,7 +968,6 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
+ 	u64 total_bytes = 0;
+ 	u64 csum_offset;
+ 	u64 bytenr;
+-	u32 nritems;
+ 	u32 ins_size;
+ 	int index = 0;
+ 	int found_next;
+@@ -980,26 +1010,10 @@ again:
+ 			goto insert;
+ 		}
+ 	} else {
+-		int slot = path->slots[0] + 1;
+-		/* we didn't find a csum item, insert one */
+-		nritems = btrfs_header_nritems(path->nodes[0]);
+-		if (!nritems || (path->slots[0] >= nritems - 1)) {
+-			ret = btrfs_next_leaf(root, path);
+-			if (ret < 0) {
+-				goto out;
+-			} else if (ret > 0) {
+-				found_next = 1;
+-				goto insert;
+-			}
+-			slot = path->slots[0];
+-		}
+-		btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
+-		if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
+-		    found_key.type != BTRFS_EXTENT_CSUM_KEY) {
+-			found_next = 1;
+-			goto insert;
+-		}
+-		next_offset = found_key.offset;
++		/* We didn't find a csum item, insert one. */
++		ret = find_next_csum_offset(root, path, &next_offset);
++		if (ret < 0)
++			goto out;
+ 		found_next = 1;
+ 		goto insert;
+ 	}
+@@ -1055,8 +1069,48 @@ extend_csum:
+ 		tmp = sums->len - total_bytes;
+ 		tmp >>= fs_info->sectorsize_bits;
+ 		WARN_ON(tmp < 1);
++		extend_nr = max_t(int, 1, tmp);
++
++		/*
++		 * A log tree can already have checksum items with a subset of
++		 * the checksums we are trying to log. This can happen after
++		 * doing a sequence of partial writes into prealloc extents and
++		 * fsyncs in between, with a full fsync logging a larger subrange
++		 * of an extent for which a previous fast fsync logged a smaller
++		 * subrange. And this happens in particular due to merging file
++		 * extent items when we complete an ordered extent for a range
++		 * covered by a prealloc extent - this is done at
++		 * btrfs_mark_extent_written().
++		 *
++		 * So if we try to extend the previous checksum item, which has
++		 * a range that ends at the start of the range we want to insert,
++		 * make sure we don't extend beyond the start offset of the next
++		 * checksum item. If we are at the last item in the leaf, then
++		 * forget the optimization of extending and add a new checksum
++		 * item - it is not worth the complexity of releasing the path,
++		 * getting the first key for the next leaf, repeat the btree
++		 * search, etc, because log trees are temporary anyway and it
++		 * would only save a few bytes of leaf space.
++		 */
++		if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
++			if (path->slots[0] + 1 >=
++			    btrfs_header_nritems(path->nodes[0])) {
++				ret = find_next_csum_offset(root, path, &next_offset);
++				if (ret < 0)
++					goto out;
++				found_next = 1;
++				goto insert;
++			}
++
++			ret = find_next_csum_offset(root, path, &next_offset);
++			if (ret < 0)
++				goto out;
++
++			tmp = (next_offset - bytenr) >> fs_info->sectorsize_bits;
++			if (tmp <= INT_MAX)
++				extend_nr = min_t(int, extend_nr, tmp);
++		}
+ 
+-		extend_nr = max_t(int, 1, (int)tmp);
+ 		diff = (csum_offset + extend_nr) * csum_size;
+ 		diff = min(diff,
+ 			   MAX_CSUM_ITEMS(fs_info, csum_size) * csum_size);
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 81b93c9c659b7..3bb8ce4969f31 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -3011,6 +3011,18 @@ out:
+ 	if (ret || truncated) {
+ 		u64 unwritten_start = start;
+ 
++		/*
++		 * If we failed to finish this ordered extent for any reason we
++		 * need to make sure BTRFS_ORDERED_IOERR is set on the ordered
++		 * extent, and mark the inode with the error if it wasn't
++		 * already set.  Any error during writeback would have already
++		 * set the mapping error, so we need to set it if we're the ones
++		 * marking this ordered extent as failed.
++		 */
++		if (ret && !test_and_set_bit(BTRFS_ORDERED_IOERR,
++					     &ordered_extent->flags))
++			mapping_set_error(ordered_extent->inode->i_mapping, -EIO);
++
+ 		if (truncated)
+ 			unwritten_start += logical_len;
+ 		clear_extent_uptodate(io_tree, unwritten_start, end, NULL);
+@@ -9076,6 +9088,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
+ 	int ret2;
+ 	bool root_log_pinned = false;
+ 	bool dest_log_pinned = false;
++	bool need_abort = false;
+ 
+ 	/* we only allow rename subvolume link between subvolumes */
+ 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
+@@ -9132,6 +9145,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
+ 					     old_idx);
+ 		if (ret)
+ 			goto out_fail;
++		need_abort = true;
+ 	}
+ 
+ 	/* And now for the dest. */
+@@ -9147,8 +9161,11 @@ static int btrfs_rename_exchange(struct inode *old_dir,
+ 					     new_ino,
+ 					     btrfs_ino(BTRFS_I(old_dir)),
+ 					     new_idx);
+-		if (ret)
++		if (ret) {
++			if (need_abort)
++				btrfs_abort_transaction(trans, ret);
+ 			goto out_fail;
++		}
+ 	}
+ 
+ 	/* Update inode version and ctime/mtime. */
+diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c
+index 53ee17f5e382c..238e713635d79 100644
+--- a/fs/btrfs/reflink.c
++++ b/fs/btrfs/reflink.c
+@@ -207,10 +207,7 @@ static int clone_copy_inline_extent(struct inode *dst,
+ 			 * inline extent's data to the page.
+ 			 */
+ 			ASSERT(key.offset > 0);
+-			ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
+-						  inline_data, size, datal,
+-						  comp_type);
+-			goto out;
++			goto copy_to_page;
+ 		}
+ 	} else if (i_size_read(dst) <= datal) {
+ 		struct btrfs_file_extent_item *ei;
+@@ -226,13 +223,10 @@ static int clone_copy_inline_extent(struct inode *dst,
+ 		    BTRFS_FILE_EXTENT_INLINE)
+ 			goto copy_inline_extent;
+ 
+-		ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
+-					  inline_data, size, datal, comp_type);
+-		goto out;
++		goto copy_to_page;
+ 	}
+ 
+ copy_inline_extent:
+-	ret = 0;
+ 	/*
+ 	 * We have no extent items, or we have an extent at offset 0 which may
+ 	 * or may not be inlined. All these cases are dealt the same way.
+@@ -244,11 +238,13 @@ copy_inline_extent:
+ 		 * clone. Deal with all these cases by copying the inline extent
+ 		 * data into the respective page at the destination inode.
+ 		 */
+-		ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
+-					  inline_data, size, datal, comp_type);
+-		goto out;
++		goto copy_to_page;
+ 	}
+ 
++	/*
++	 * Release path before starting a new transaction so we don't hold locks
++	 * that would confuse lockdep.
++	 */
+ 	btrfs_release_path(path);
+ 	/*
+ 	 * If we end up here it means were copy the inline extent into a leaf
+@@ -285,11 +281,6 @@ copy_inline_extent:
+ 	ret = btrfs_inode_set_file_extent_range(BTRFS_I(dst), 0, aligned_end);
+ out:
+ 	if (!ret && !trans) {
+-		/*
+-		 * Release path before starting a new transaction so we don't
+-		 * hold locks that would confuse lockdep.
+-		 */
+-		btrfs_release_path(path);
+ 		/*
+ 		 * No transaction here means we copied the inline extent into a
+ 		 * page of the destination inode.
+@@ -310,6 +301,21 @@ out:
+ 		*trans_out = trans;
+ 
+ 	return ret;
++
++copy_to_page:
++	/*
++	 * Release our path because we don't need it anymore and also because
++	 * copy_inline_to_page() needs to reserve data and metadata, which may
++	 * need to flush delalloc when we are low on available space and
++	 * therefore cause a deadlock if writeback of an inline extent needs to
++	 * write to the same leaf or an ordered extent completion needs to write
++	 * to the same leaf.
++	 */
++	btrfs_release_path(path);
++
++	ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
++				  inline_data, size, datal, comp_type);
++	goto out;
+ }
+ 
+ /**
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index d7f1599e69b1f..faae6ebd8a279 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -1574,7 +1574,9 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
+ 			if (ret)
+ 				goto out;
+ 
+-			btrfs_update_inode(trans, root, BTRFS_I(inode));
++			ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
++			if (ret)
++				goto out;
+ 		}
+ 
+ 		ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
+@@ -1749,7 +1751,9 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
+ 
+ 	if (nlink != inode->i_nlink) {
+ 		set_nlink(inode, nlink);
+-		btrfs_update_inode(trans, root, BTRFS_I(inode));
++		ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
++		if (ret)
++			goto out;
+ 	}
+ 	BTRFS_I(inode)->index_cnt = (u64)-1;
+ 
+@@ -1787,6 +1791,7 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
+ 			break;
+ 
+ 		if (ret == 1) {
++			ret = 0;
+ 			if (path->slots[0] == 0)
+ 				break;
+ 			path->slots[0]--;
+@@ -1799,17 +1804,19 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
+ 
+ 		ret = btrfs_del_item(trans, root, path);
+ 		if (ret)
+-			goto out;
++			break;
+ 
+ 		btrfs_release_path(path);
+ 		inode = read_one_inode(root, key.offset);
+-		if (!inode)
+-			return -EIO;
++		if (!inode) {
++			ret = -EIO;
++			break;
++		}
+ 
+ 		ret = fixup_inode_link_count(trans, root, inode);
+ 		iput(inode);
+ 		if (ret)
+-			goto out;
++			break;
+ 
+ 		/*
+ 		 * fixup on a directory may create new entries,
+@@ -1818,8 +1825,6 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
+ 		 */
+ 		key.offset = (u64)-1;
+ 	}
+-	ret = 0;
+-out:
+ 	btrfs_release_path(path);
+ 	return ret;
+ }
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 77c84d6f1af6b..cbf37b2cf871e 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -3206,7 +3206,10 @@ static int ext4_split_extent_at(handle_t *handle,
+ 		ext4_ext_mark_unwritten(ex2);
+ 
+ 	err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags);
+-	if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
++	if (err != -ENOSPC && err != -EDQUOT)
++		goto out;
++
++	if (EXT4_EXT_MAY_ZEROOUT & split_flag) {
+ 		if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
+ 			if (split_flag & EXT4_EXT_DATA_VALID1) {
+ 				err = ext4_ext_zeroout(inode, ex2);
+@@ -3232,25 +3235,22 @@ static int ext4_split_extent_at(handle_t *handle,
+ 					      ext4_ext_pblock(&orig_ex));
+ 		}
+ 
+-		if (err)
+-			goto fix_extent_len;
+-		/* update the extent length and mark as initialized */
+-		ex->ee_len = cpu_to_le16(ee_len);
+-		ext4_ext_try_to_merge(handle, inode, path, ex);
+-		err = ext4_ext_dirty(handle, inode, path + path->p_depth);
+-		if (err)
+-			goto fix_extent_len;
+-
+-		/* update extent status tree */
+-		err = ext4_zeroout_es(inode, &zero_ex);
+-
+-		goto out;
+-	} else if (err)
+-		goto fix_extent_len;
+-
+-out:
+-	ext4_ext_show_leaf(inode, path);
+-	return err;
++		if (!err) {
++			/* update the extent length and mark as initialized */
++			ex->ee_len = cpu_to_le16(ee_len);
++			ext4_ext_try_to_merge(handle, inode, path, ex);
++			err = ext4_ext_dirty(handle, inode, path + path->p_depth);
++			if (!err)
++				/* update extent status tree */
++				err = ext4_zeroout_es(inode, &zero_ex);
++			/* If we failed at this point, we don't know in which
++			 * state the extent tree exactly is so don't try to fix
++			 * length of the original extent as it may do even more
++			 * damage.
++			 */
++			goto out;
++		}
++	}
+ 
+ fix_extent_len:
+ 	ex->ee_len = orig_ex.ee_len;
+@@ -3260,6 +3260,9 @@ fix_extent_len:
+ 	 */
+ 	ext4_ext_dirty(handle, inode, path + path->p_depth);
+ 	return err;
++out:
++	ext4_ext_show_leaf(inode, path);
++	return err;
+ }
+ 
+ /*
+diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c
+index eda14f630def4..c1c962b118012 100644
+--- a/fs/ext4/fast_commit.c
++++ b/fs/ext4/fast_commit.c
+@@ -1288,28 +1288,29 @@ struct dentry_info_args {
+ };
+ 
+ static inline void tl_to_darg(struct dentry_info_args *darg,
+-				struct  ext4_fc_tl *tl)
++			      struct  ext4_fc_tl *tl, u8 *val)
+ {
+-	struct ext4_fc_dentry_info *fcd;
++	struct ext4_fc_dentry_info fcd;
+ 
+-	fcd = (struct ext4_fc_dentry_info *)ext4_fc_tag_val(tl);
++	memcpy(&fcd, val, sizeof(fcd));
+ 
+-	darg->parent_ino = le32_to_cpu(fcd->fc_parent_ino);
+-	darg->ino = le32_to_cpu(fcd->fc_ino);
+-	darg->dname = fcd->fc_dname;
+-	darg->dname_len = ext4_fc_tag_len(tl) -
+-			sizeof(struct ext4_fc_dentry_info);
++	darg->parent_ino = le32_to_cpu(fcd.fc_parent_ino);
++	darg->ino = le32_to_cpu(fcd.fc_ino);
++	darg->dname = val + offsetof(struct ext4_fc_dentry_info, fc_dname);
++	darg->dname_len = le16_to_cpu(tl->fc_len) -
++		sizeof(struct ext4_fc_dentry_info);
+ }
+ 
+ /* Unlink replay function */
+-static int ext4_fc_replay_unlink(struct super_block *sb, struct ext4_fc_tl *tl)
++static int ext4_fc_replay_unlink(struct super_block *sb, struct ext4_fc_tl *tl,
++				 u8 *val)
+ {
+ 	struct inode *inode, *old_parent;
+ 	struct qstr entry;
+ 	struct dentry_info_args darg;
+ 	int ret = 0;
+ 
+-	tl_to_darg(&darg, tl);
++	tl_to_darg(&darg, tl, val);
+ 
+ 	trace_ext4_fc_replay(sb, EXT4_FC_TAG_UNLINK, darg.ino,
+ 			darg.parent_ino, darg.dname_len);
+@@ -1399,13 +1400,14 @@ out:
+ }
+ 
+ /* Link replay function */
+-static int ext4_fc_replay_link(struct super_block *sb, struct ext4_fc_tl *tl)
++static int ext4_fc_replay_link(struct super_block *sb, struct ext4_fc_tl *tl,
++			       u8 *val)
+ {
+ 	struct inode *inode;
+ 	struct dentry_info_args darg;
+ 	int ret = 0;
+ 
+-	tl_to_darg(&darg, tl);
++	tl_to_darg(&darg, tl, val);
+ 	trace_ext4_fc_replay(sb, EXT4_FC_TAG_LINK, darg.ino,
+ 			darg.parent_ino, darg.dname_len);
+ 
+@@ -1450,9 +1452,10 @@ static int ext4_fc_record_modified_inode(struct super_block *sb, int ino)
+ /*
+  * Inode replay function
+  */
+-static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl)
++static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl,
++				u8 *val)
+ {
+-	struct ext4_fc_inode *fc_inode;
++	struct ext4_fc_inode fc_inode;
+ 	struct ext4_inode *raw_inode;
+ 	struct ext4_inode *raw_fc_inode;
+ 	struct inode *inode = NULL;
+@@ -1460,9 +1463,9 @@ static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl)
+ 	int inode_len, ino, ret, tag = le16_to_cpu(tl->fc_tag);
+ 	struct ext4_extent_header *eh;
+ 
+-	fc_inode = (struct ext4_fc_inode *)ext4_fc_tag_val(tl);
++	memcpy(&fc_inode, val, sizeof(fc_inode));
+ 
+-	ino = le32_to_cpu(fc_inode->fc_ino);
++	ino = le32_to_cpu(fc_inode.fc_ino);
+ 	trace_ext4_fc_replay(sb, tag, ino, 0, 0);
+ 
+ 	inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL);
+@@ -1474,12 +1477,13 @@ static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl)
+ 
+ 	ext4_fc_record_modified_inode(sb, ino);
+ 
+-	raw_fc_inode = (struct ext4_inode *)fc_inode->fc_raw_inode;
++	raw_fc_inode = (struct ext4_inode *)
++		(val + offsetof(struct ext4_fc_inode, fc_raw_inode));
+ 	ret = ext4_get_fc_inode_loc(sb, ino, &iloc);
+ 	if (ret)
+ 		goto out;
+ 
+-	inode_len = ext4_fc_tag_len(tl) - sizeof(struct ext4_fc_inode);
++	inode_len = le16_to_cpu(tl->fc_len) - sizeof(struct ext4_fc_inode);
+ 	raw_inode = ext4_raw_inode(&iloc);
+ 
+ 	memcpy(raw_inode, raw_fc_inode, offsetof(struct ext4_inode, i_block));
+@@ -1547,14 +1551,15 @@ out:
+  * inode for which we are trying to create a dentry here, should already have
+  * been replayed before we start here.
+  */
+-static int ext4_fc_replay_create(struct super_block *sb, struct ext4_fc_tl *tl)
++static int ext4_fc_replay_create(struct super_block *sb, struct ext4_fc_tl *tl,
++				 u8 *val)
+ {
+ 	int ret = 0;
+ 	struct inode *inode = NULL;
+ 	struct inode *dir = NULL;
+ 	struct dentry_info_args darg;
+ 
+-	tl_to_darg(&darg, tl);
++	tl_to_darg(&darg, tl, val);
+ 
+ 	trace_ext4_fc_replay(sb, EXT4_FC_TAG_CREAT, darg.ino,
+ 			darg.parent_ino, darg.dname_len);
+@@ -1633,9 +1638,9 @@ static int ext4_fc_record_regions(struct super_block *sb, int ino,
+ 
+ /* Replay add range tag */
+ static int ext4_fc_replay_add_range(struct super_block *sb,
+-				struct ext4_fc_tl *tl)
++				    struct ext4_fc_tl *tl, u8 *val)
+ {
+-	struct ext4_fc_add_range *fc_add_ex;
++	struct ext4_fc_add_range fc_add_ex;
+ 	struct ext4_extent newex, *ex;
+ 	struct inode *inode;
+ 	ext4_lblk_t start, cur;
+@@ -1645,15 +1650,14 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
+ 	struct ext4_ext_path *path = NULL;
+ 	int ret;
+ 
+-	fc_add_ex = (struct ext4_fc_add_range *)ext4_fc_tag_val(tl);
+-	ex = (struct ext4_extent *)&fc_add_ex->fc_ex;
++	memcpy(&fc_add_ex, val, sizeof(fc_add_ex));
++	ex = (struct ext4_extent *)&fc_add_ex.fc_ex;
+ 
+ 	trace_ext4_fc_replay(sb, EXT4_FC_TAG_ADD_RANGE,
+-		le32_to_cpu(fc_add_ex->fc_ino), le32_to_cpu(ex->ee_block),
++		le32_to_cpu(fc_add_ex.fc_ino), le32_to_cpu(ex->ee_block),
+ 		ext4_ext_get_actual_len(ex));
+ 
+-	inode = ext4_iget(sb, le32_to_cpu(fc_add_ex->fc_ino),
+-				EXT4_IGET_NORMAL);
++	inode = ext4_iget(sb, le32_to_cpu(fc_add_ex.fc_ino), EXT4_IGET_NORMAL);
+ 	if (IS_ERR(inode)) {
+ 		jbd_debug(1, "Inode not found.");
+ 		return 0;
+@@ -1762,32 +1766,33 @@ next:
+ 
+ /* Replay DEL_RANGE tag */
+ static int
+-ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl)
++ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl,
++			 u8 *val)
+ {
+ 	struct inode *inode;
+-	struct ext4_fc_del_range *lrange;
++	struct ext4_fc_del_range lrange;
+ 	struct ext4_map_blocks map;
+ 	ext4_lblk_t cur, remaining;
+ 	int ret;
+ 
+-	lrange = (struct ext4_fc_del_range *)ext4_fc_tag_val(tl);
+-	cur = le32_to_cpu(lrange->fc_lblk);
+-	remaining = le32_to_cpu(lrange->fc_len);
++	memcpy(&lrange, val, sizeof(lrange));
++	cur = le32_to_cpu(lrange.fc_lblk);
++	remaining = le32_to_cpu(lrange.fc_len);
+ 
+ 	trace_ext4_fc_replay(sb, EXT4_FC_TAG_DEL_RANGE,
+-		le32_to_cpu(lrange->fc_ino), cur, remaining);
++		le32_to_cpu(lrange.fc_ino), cur, remaining);
+ 
+-	inode = ext4_iget(sb, le32_to_cpu(lrange->fc_ino), EXT4_IGET_NORMAL);
++	inode = ext4_iget(sb, le32_to_cpu(lrange.fc_ino), EXT4_IGET_NORMAL);
+ 	if (IS_ERR(inode)) {
+-		jbd_debug(1, "Inode %d not found", le32_to_cpu(lrange->fc_ino));
++		jbd_debug(1, "Inode %d not found", le32_to_cpu(lrange.fc_ino));
+ 		return 0;
+ 	}
+ 
+ 	ret = ext4_fc_record_modified_inode(sb, inode->i_ino);
+ 
+ 	jbd_debug(1, "DEL_RANGE, inode %ld, lblk %d, len %d\n",
+-			inode->i_ino, le32_to_cpu(lrange->fc_lblk),
+-			le32_to_cpu(lrange->fc_len));
++			inode->i_ino, le32_to_cpu(lrange.fc_lblk),
++			le32_to_cpu(lrange.fc_len));
+ 	while (remaining > 0) {
+ 		map.m_lblk = cur;
+ 		map.m_len = remaining;
+@@ -1808,8 +1813,8 @@ ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl)
+ 	}
+ 
+ 	ret = ext4_punch_hole(inode,
+-		le32_to_cpu(lrange->fc_lblk) << sb->s_blocksize_bits,
+-		le32_to_cpu(lrange->fc_len) <<  sb->s_blocksize_bits);
++		le32_to_cpu(lrange.fc_lblk) << sb->s_blocksize_bits,
++		le32_to_cpu(lrange.fc_len) <<  sb->s_blocksize_bits);
+ 	if (ret)
+ 		jbd_debug(1, "ext4_punch_hole returned %d", ret);
+ 	ext4_ext_replay_shrink_inode(inode,
+@@ -1925,11 +1930,11 @@ static int ext4_fc_replay_scan(journal_t *journal,
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+ 	struct ext4_fc_replay_state *state;
+ 	int ret = JBD2_FC_REPLAY_CONTINUE;
+-	struct ext4_fc_add_range *ext;
+-	struct ext4_fc_tl *tl;
+-	struct ext4_fc_tail *tail;
+-	__u8 *start, *end;
+-	struct ext4_fc_head *head;
++	struct ext4_fc_add_range ext;
++	struct ext4_fc_tl tl;
++	struct ext4_fc_tail tail;
++	__u8 *start, *end, *cur, *val;
++	struct ext4_fc_head head;
+ 	struct ext4_extent *ex;
+ 
+ 	state = &sbi->s_fc_replay_state;
+@@ -1956,15 +1961,17 @@ static int ext4_fc_replay_scan(journal_t *journal,
+ 	}
+ 
+ 	state->fc_replay_expected_off++;
+-	fc_for_each_tl(start, end, tl) {
++	for (cur = start; cur < end; cur = cur + sizeof(tl) + le16_to_cpu(tl.fc_len)) {
++		memcpy(&tl, cur, sizeof(tl));
++		val = cur + sizeof(tl);
+ 		jbd_debug(3, "Scan phase, tag:%s, blk %lld\n",
+-			  tag2str(le16_to_cpu(tl->fc_tag)), bh->b_blocknr);
+-		switch (le16_to_cpu(tl->fc_tag)) {
++			  tag2str(le16_to_cpu(tl.fc_tag)), bh->b_blocknr);
++		switch (le16_to_cpu(tl.fc_tag)) {
+ 		case EXT4_FC_TAG_ADD_RANGE:
+-			ext = (struct ext4_fc_add_range *)ext4_fc_tag_val(tl);
+-			ex = (struct ext4_extent *)&ext->fc_ex;
++			memcpy(&ext, val, sizeof(ext));
++			ex = (struct ext4_extent *)&ext.fc_ex;
+ 			ret = ext4_fc_record_regions(sb,
+-				le32_to_cpu(ext->fc_ino),
++				le32_to_cpu(ext.fc_ino),
+ 				le32_to_cpu(ex->ee_block), ext4_ext_pblock(ex),
+ 				ext4_ext_get_actual_len(ex));
+ 			if (ret < 0)
+@@ -1978,18 +1985,18 @@ static int ext4_fc_replay_scan(journal_t *journal,
+ 		case EXT4_FC_TAG_INODE:
+ 		case EXT4_FC_TAG_PAD:
+ 			state->fc_cur_tag++;
+-			state->fc_crc = ext4_chksum(sbi, state->fc_crc, tl,
+-					sizeof(*tl) + ext4_fc_tag_len(tl));
++			state->fc_crc = ext4_chksum(sbi, state->fc_crc, cur,
++					sizeof(tl) + le16_to_cpu(tl.fc_len));
+ 			break;
+ 		case EXT4_FC_TAG_TAIL:
+ 			state->fc_cur_tag++;
+-			tail = (struct ext4_fc_tail *)ext4_fc_tag_val(tl);
+-			state->fc_crc = ext4_chksum(sbi, state->fc_crc, tl,
+-						sizeof(*tl) +
++			memcpy(&tail, val, sizeof(tail));
++			state->fc_crc = ext4_chksum(sbi, state->fc_crc, cur,
++						sizeof(tl) +
+ 						offsetof(struct ext4_fc_tail,
+ 						fc_crc));
+-			if (le32_to_cpu(tail->fc_tid) == expected_tid &&
+-				le32_to_cpu(tail->fc_crc) == state->fc_crc) {
++			if (le32_to_cpu(tail.fc_tid) == expected_tid &&
++				le32_to_cpu(tail.fc_crc) == state->fc_crc) {
+ 				state->fc_replay_num_tags = state->fc_cur_tag;
+ 				state->fc_regions_valid =
+ 					state->fc_regions_used;
+@@ -2000,19 +2007,19 @@ static int ext4_fc_replay_scan(journal_t *journal,
+ 			state->fc_crc = 0;
+ 			break;
+ 		case EXT4_FC_TAG_HEAD:
+-			head = (struct ext4_fc_head *)ext4_fc_tag_val(tl);
+-			if (le32_to_cpu(head->fc_features) &
++			memcpy(&head, val, sizeof(head));
++			if (le32_to_cpu(head.fc_features) &
+ 				~EXT4_FC_SUPPORTED_FEATURES) {
+ 				ret = -EOPNOTSUPP;
+ 				break;
+ 			}
+-			if (le32_to_cpu(head->fc_tid) != expected_tid) {
++			if (le32_to_cpu(head.fc_tid) != expected_tid) {
+ 				ret = JBD2_FC_REPLAY_STOP;
+ 				break;
+ 			}
+ 			state->fc_cur_tag++;
+-			state->fc_crc = ext4_chksum(sbi, state->fc_crc, tl,
+-					sizeof(*tl) + ext4_fc_tag_len(tl));
++			state->fc_crc = ext4_chksum(sbi, state->fc_crc, cur,
++					    sizeof(tl) + le16_to_cpu(tl.fc_len));
+ 			break;
+ 		default:
+ 			ret = state->fc_replay_num_tags ?
+@@ -2036,11 +2043,11 @@ static int ext4_fc_replay(journal_t *journal, struct buffer_head *bh,
+ {
+ 	struct super_block *sb = journal->j_private;
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+-	struct ext4_fc_tl *tl;
+-	__u8 *start, *end;
++	struct ext4_fc_tl tl;
++	__u8 *start, *end, *cur, *val;
+ 	int ret = JBD2_FC_REPLAY_CONTINUE;
+ 	struct ext4_fc_replay_state *state = &sbi->s_fc_replay_state;
+-	struct ext4_fc_tail *tail;
++	struct ext4_fc_tail tail;
+ 
+ 	if (pass == PASS_SCAN) {
+ 		state->fc_current_pass = PASS_SCAN;
+@@ -2067,49 +2074,52 @@ static int ext4_fc_replay(journal_t *journal, struct buffer_head *bh,
+ 	start = (u8 *)bh->b_data;
+ 	end = (__u8 *)bh->b_data + journal->j_blocksize - 1;
+ 
+-	fc_for_each_tl(start, end, tl) {
++	for (cur = start; cur < end; cur = cur + sizeof(tl) + le16_to_cpu(tl.fc_len)) {
++		memcpy(&tl, cur, sizeof(tl));
++		val = cur + sizeof(tl);
++
+ 		if (state->fc_replay_num_tags == 0) {
+ 			ret = JBD2_FC_REPLAY_STOP;
+ 			ext4_fc_set_bitmaps_and_counters(sb);
+ 			break;
+ 		}
+ 		jbd_debug(3, "Replay phase, tag:%s\n",
+-				tag2str(le16_to_cpu(tl->fc_tag)));
++				tag2str(le16_to_cpu(tl.fc_tag)));
+ 		state->fc_replay_num_tags--;
+-		switch (le16_to_cpu(tl->fc_tag)) {
++		switch (le16_to_cpu(tl.fc_tag)) {
+ 		case EXT4_FC_TAG_LINK:
+-			ret = ext4_fc_replay_link(sb, tl);
++			ret = ext4_fc_replay_link(sb, &tl, val);
+ 			break;
+ 		case EXT4_FC_TAG_UNLINK:
+-			ret = ext4_fc_replay_unlink(sb, tl);
++			ret = ext4_fc_replay_unlink(sb, &tl, val);
+ 			break;
+ 		case EXT4_FC_TAG_ADD_RANGE:
+-			ret = ext4_fc_replay_add_range(sb, tl);
++			ret = ext4_fc_replay_add_range(sb, &tl, val);
+ 			break;
+ 		case EXT4_FC_TAG_CREAT:
+-			ret = ext4_fc_replay_create(sb, tl);
++			ret = ext4_fc_replay_create(sb, &tl, val);
+ 			break;
+ 		case EXT4_FC_TAG_DEL_RANGE:
+-			ret = ext4_fc_replay_del_range(sb, tl);
++			ret = ext4_fc_replay_del_range(sb, &tl, val);
+ 			break;
+ 		case EXT4_FC_TAG_INODE:
+-			ret = ext4_fc_replay_inode(sb, tl);
++			ret = ext4_fc_replay_inode(sb, &tl, val);
+ 			break;
+ 		case EXT4_FC_TAG_PAD:
+ 			trace_ext4_fc_replay(sb, EXT4_FC_TAG_PAD, 0,
+-				ext4_fc_tag_len(tl), 0);
++					     le16_to_cpu(tl.fc_len), 0);
+ 			break;
+ 		case EXT4_FC_TAG_TAIL:
+ 			trace_ext4_fc_replay(sb, EXT4_FC_TAG_TAIL, 0,
+-				ext4_fc_tag_len(tl), 0);
+-			tail = (struct ext4_fc_tail *)ext4_fc_tag_val(tl);
+-			WARN_ON(le32_to_cpu(tail->fc_tid) != expected_tid);
++					     le16_to_cpu(tl.fc_len), 0);
++			memcpy(&tail, val, sizeof(tail));
++			WARN_ON(le32_to_cpu(tail.fc_tid) != expected_tid);
+ 			break;
+ 		case EXT4_FC_TAG_HEAD:
+ 			break;
+ 		default:
+-			trace_ext4_fc_replay(sb, le16_to_cpu(tl->fc_tag), 0,
+-				ext4_fc_tag_len(tl), 0);
++			trace_ext4_fc_replay(sb, le16_to_cpu(tl.fc_tag), 0,
++					     le16_to_cpu(tl.fc_len), 0);
+ 			ret = -ECANCELED;
+ 			break;
+ 		}
+diff --git a/fs/ext4/fast_commit.h b/fs/ext4/fast_commit.h
+index b77f70f55a622..937c381b4c85e 100644
+--- a/fs/ext4/fast_commit.h
++++ b/fs/ext4/fast_commit.h
+@@ -153,13 +153,6 @@ struct ext4_fc_replay_state {
+ #define region_last(__region) (((__region)->lblk) + ((__region)->len) - 1)
+ #endif
+ 
+-#define fc_for_each_tl(__start, __end, __tl)				\
+-	for (tl = (struct ext4_fc_tl *)(__start);			\
+-	     (__u8 *)tl < (__u8 *)(__end);				\
+-		tl = (struct ext4_fc_tl *)((__u8 *)tl +			\
+-					sizeof(struct ext4_fc_tl) +	\
+-					+ le16_to_cpu(tl->fc_len)))
+-
+ static inline const char *tag2str(__u16 tag)
+ {
+ 	switch (tag) {
+@@ -186,16 +179,4 @@ static inline const char *tag2str(__u16 tag)
+ 	}
+ }
+ 
+-/* Get length of a particular tlv */
+-static inline int ext4_fc_tag_len(struct ext4_fc_tl *tl)
+-{
+-	return le16_to_cpu(tl->fc_len);
+-}
+-
+-/* Get a pointer to "value" of a tlv */
+-static inline __u8 *ext4_fc_tag_val(struct ext4_fc_tl *tl)
+-{
+-	return (__u8 *)tl + sizeof(*tl);
+-}
+-
+ #endif /* __FAST_COMMIT_H__ */
+diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
+index 71d321b3b9844..edbaed073ac5c 100644
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -322,14 +322,16 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
+ 	if (is_directory) {
+ 		count = ext4_used_dirs_count(sb, gdp) - 1;
+ 		ext4_used_dirs_set(sb, gdp, count);
+-		percpu_counter_dec(&sbi->s_dirs_counter);
++		if (percpu_counter_initialized(&sbi->s_dirs_counter))
++			percpu_counter_dec(&sbi->s_dirs_counter);
+ 	}
+ 	ext4_inode_bitmap_csum_set(sb, block_group, gdp, bitmap_bh,
+ 				   EXT4_INODES_PER_GROUP(sb) / 8);
+ 	ext4_group_desc_csum_set(sb, block_group, gdp);
+ 	ext4_unlock_group(sb, block_group);
+ 
+-	percpu_counter_inc(&sbi->s_freeinodes_counter);
++	if (percpu_counter_initialized(&sbi->s_freeinodes_counter))
++		percpu_counter_inc(&sbi->s_freeinodes_counter);
+ 	if (sbi->s_log_groups_per_flex) {
+ 		struct flex_groups *fg;
+ 
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index a02fadf4fc84e..d24cb3dc79fff 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -2715,7 +2715,7 @@ static int ext4_mb_init_backend(struct super_block *sb)
+ 		 */
+ 		if (sbi->s_es->s_log_groups_per_flex >= 32) {
+ 			ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group");
+-			goto err_freesgi;
++			goto err_freebuddy;
+ 		}
+ 		sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex,
+ 			BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9));
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 77c1cb2582623..0e3a847b5d279 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -4449,14 +4449,20 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ 	}
+ 
+ 	if (sb->s_blocksize != blocksize) {
++		/*
++		 * bh must be released before kill_bdev(), otherwise
++		 * it won't be freed and its page also. kill_bdev()
++		 * is called by sb_set_blocksize().
++		 */
++		brelse(bh);
+ 		/* Validate the filesystem blocksize */
+ 		if (!sb_set_blocksize(sb, blocksize)) {
+ 			ext4_msg(sb, KERN_ERR, "bad block size %d",
+ 					blocksize);
++			bh = NULL;
+ 			goto failed_mount;
+ 		}
+ 
+-		brelse(bh);
+ 		logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
+ 		offset = do_div(logical_sb_block, blocksize);
+ 		bh = ext4_sb_bread_unmovable(sb, logical_sb_block);
+@@ -5176,8 +5182,9 @@ failed_mount:
+ 		kfree(get_qf_name(sb, sbi, i));
+ #endif
+ 	fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy);
+-	ext4_blkdev_remove(sbi);
++	/* ext4_blkdev_remove() calls kill_bdev(), release bh before it. */
+ 	brelse(bh);
++	ext4_blkdev_remove(sbi);
+ out_fail:
+ 	sb->s_fs_info = NULL;
+ 	kfree(sbi->s_blockgroup_lock);
+diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
+index 9567520d79f79..7c2ba81213da0 100644
+--- a/fs/gfs2/glock.c
++++ b/fs/gfs2/glock.c
+@@ -1465,9 +1465,11 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
+ 	    glock_blocked_by_withdraw(gl) &&
+ 	    gh->gh_gl != sdp->sd_jinode_gl) {
+ 		sdp->sd_glock_dqs_held++;
++		spin_unlock(&gl->gl_lockref.lock);
+ 		might_sleep();
+ 		wait_on_bit(&sdp->sd_flags, SDF_WITHDRAW_RECOVERY,
+ 			    TASK_UNINTERRUPTIBLE);
++		spin_lock(&gl->gl_lockref.lock);
+ 	}
+ 	if (gh->gh_flags & GL_NOCACHE)
+ 		handle_callback(gl, LM_ST_UNLOCKED, 0, false);
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 144056b0cac92..359d1abb089c4 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -653,7 +653,7 @@ struct io_unlink {
+ struct io_completion {
+ 	struct file			*file;
+ 	struct list_head		list;
+-	int				cflags;
++	u32				cflags;
+ };
+ 
+ struct io_async_connect {
+@@ -1476,7 +1476,33 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
+ 	return ret;
+ }
+ 
+-static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
++static inline bool req_ref_inc_not_zero(struct io_kiocb *req)
++{
++	return refcount_inc_not_zero(&req->refs);
++}
++
++static inline bool req_ref_sub_and_test(struct io_kiocb *req, int refs)
++{
++	return refcount_sub_and_test(refs, &req->refs);
++}
++
++static inline bool req_ref_put_and_test(struct io_kiocb *req)
++{
++	return refcount_dec_and_test(&req->refs);
++}
++
++static inline void req_ref_put(struct io_kiocb *req)
++{
++	refcount_dec(&req->refs);
++}
++
++static inline void req_ref_get(struct io_kiocb *req)
++{
++	refcount_inc(&req->refs);
++}
++
++static void __io_cqring_fill_event(struct io_kiocb *req, long res,
++				   unsigned int cflags)
+ {
+ 	struct io_ring_ctx *ctx = req->ctx;
+ 	struct io_uring_cqe *cqe;
+@@ -1511,7 +1537,7 @@ static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
+ 		io_clean_op(req);
+ 		req->result = res;
+ 		req->compl.cflags = cflags;
+-		refcount_inc(&req->refs);
++		req_ref_get(req);
+ 		list_add_tail(&req->compl.list, &ctx->cq_overflow_list);
+ 	}
+ }
+@@ -1533,7 +1559,7 @@ static void io_req_complete_post(struct io_kiocb *req, long res,
+ 	 * If we're the last reference to this request, add to our locked
+ 	 * free_list cache.
+ 	 */
+-	if (refcount_dec_and_test(&req->refs)) {
++	if (req_ref_put_and_test(req)) {
+ 		struct io_comp_state *cs = &ctx->submit_state.comp;
+ 
+ 		if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
+@@ -2112,7 +2138,7 @@ static void io_submit_flush_completions(struct io_comp_state *cs,
+ 		req = cs->reqs[i];
+ 
+ 		/* submission and completion refs */
+-		if (refcount_sub_and_test(2, &req->refs))
++		if (req_ref_sub_and_test(req, 2))
+ 			io_req_free_batch(&rb, req, &ctx->submit_state);
+ 	}
+ 
+@@ -2128,7 +2154,7 @@ static struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
+ {
+ 	struct io_kiocb *nxt = NULL;
+ 
+-	if (refcount_dec_and_test(&req->refs)) {
++	if (req_ref_put_and_test(req)) {
+ 		nxt = io_req_find_next(req);
+ 		__io_free_req(req);
+ 	}
+@@ -2137,7 +2163,7 @@ static struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
+ 
+ static void io_put_req(struct io_kiocb *req)
+ {
+-	if (refcount_dec_and_test(&req->refs))
++	if (req_ref_put_and_test(req))
+ 		io_free_req(req);
+ }
+ 
+@@ -2160,14 +2186,14 @@ static void io_free_req_deferred(struct io_kiocb *req)
+ 
+ static inline void io_put_req_deferred(struct io_kiocb *req, int refs)
+ {
+-	if (refcount_sub_and_test(refs, &req->refs))
++	if (req_ref_sub_and_test(req, refs))
+ 		io_free_req_deferred(req);
+ }
+ 
+ static void io_double_put_req(struct io_kiocb *req)
+ {
+ 	/* drop both submit and complete references */
+-	if (refcount_sub_and_test(2, &req->refs))
++	if (req_ref_sub_and_test(req, 2))
+ 		io_free_req(req);
+ }
+ 
+@@ -2253,7 +2279,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
+ 		__io_cqring_fill_event(req, req->result, cflags);
+ 		(*nr_events)++;
+ 
+-		if (refcount_dec_and_test(&req->refs))
++		if (req_ref_put_and_test(req))
+ 			io_req_free_batch(&rb, req, &ctx->submit_state);
+ 	}
+ 
+@@ -2495,7 +2521,7 @@ static bool io_rw_reissue(struct io_kiocb *req)
+ 	lockdep_assert_held(&req->ctx->uring_lock);
+ 
+ 	if (io_resubmit_prep(req)) {
+-		refcount_inc(&req->refs);
++		req_ref_get(req);
+ 		io_queue_async_work(req);
+ 		return true;
+ 	}
+@@ -3208,7 +3234,7 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
+ 	list_del_init(&wait->entry);
+ 
+ 	/* submit ref gets dropped, acquire a new one */
+-	refcount_inc(&req->refs);
++	req_ref_get(req);
+ 	io_req_task_queue(req);
+ 	return 1;
+ }
+@@ -4953,7 +4979,7 @@ static void io_poll_remove_double(struct io_kiocb *req)
+ 		spin_lock(&head->lock);
+ 		list_del_init(&poll->wait.entry);
+ 		if (poll->wait.private)
+-			refcount_dec(&req->refs);
++			req_ref_put(req);
+ 		poll->head = NULL;
+ 		spin_unlock(&head->lock);
+ 	}
+@@ -5019,7 +5045,7 @@ static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
+ 			poll->wait.func(&poll->wait, mode, sync, key);
+ 		}
+ 	}
+-	refcount_dec(&req->refs);
++	req_ref_put(req);
+ 	return 1;
+ }
+ 
+@@ -5062,7 +5088,7 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
+ 			return;
+ 		}
+ 		io_init_poll_iocb(poll, poll_one->events, io_poll_double_wake);
+-		refcount_inc(&req->refs);
++		req_ref_get(req);
+ 		poll->wait.private = req;
+ 		*poll_ptr = poll;
+ 	}
+@@ -6211,7 +6237,7 @@ static void io_wq_submit_work(struct io_wq_work *work)
+ 	/* avoid locking problems by failing it from a clean context */
+ 	if (ret) {
+ 		/* io-wq is going to take one down */
+-		refcount_inc(&req->refs);
++		req_ref_get(req);
+ 		io_req_task_queue_fail(req, ret);
+ 	}
+ }
+@@ -6263,15 +6289,17 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
+ 	 * We don't expect the list to be empty, that will only happen if we
+ 	 * race with the completion of the linked work.
+ 	 */
+-	if (prev && refcount_inc_not_zero(&prev->refs))
++	if (prev) {
+ 		io_remove_next_linked(prev);
+-	else
+-		prev = NULL;
++		if (!req_ref_inc_not_zero(prev))
++			prev = NULL;
++	}
+ 	spin_unlock_irqrestore(&ctx->completion_lock, flags);
+ 
+ 	if (prev) {
+ 		io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
+ 		io_put_req_deferred(prev, 1);
++		io_put_req_deferred(req, 1);
+ 	} else {
+ 		io_req_complete_post(req, -ETIME, 0);
+ 		io_put_req_deferred(req, 1);
+diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
+index 5edc1d0cf115f..0e45893c0cda0 100644
+--- a/fs/ocfs2/file.c
++++ b/fs/ocfs2/file.c
+@@ -1857,6 +1857,45 @@ out:
+ 	return ret;
+ }
+ 
++/*
++ * zero out partial blocks of one cluster.
++ *
++ * start: file offset where zero starts, will be made upper block aligned.
++ * len: it will be trimmed to the end of current cluster if "start + len"
++ *      is bigger than it.
++ */
++static int ocfs2_zeroout_partial_cluster(struct inode *inode,
++					u64 start, u64 len)
++{
++	int ret;
++	u64 start_block, end_block, nr_blocks;
++	u64 p_block, offset;
++	u32 cluster, p_cluster, nr_clusters;
++	struct super_block *sb = inode->i_sb;
++	u64 end = ocfs2_align_bytes_to_clusters(sb, start);
++
++	if (start + len < end)
++		end = start + len;
++
++	start_block = ocfs2_blocks_for_bytes(sb, start);
++	end_block = ocfs2_blocks_for_bytes(sb, end);
++	nr_blocks = end_block - start_block;
++	if (!nr_blocks)
++		return 0;
++
++	cluster = ocfs2_bytes_to_clusters(sb, start);
++	ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
++				&nr_clusters, NULL);
++	if (ret)
++		return ret;
++	if (!p_cluster)
++		return 0;
++
++	offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
++	p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
++	return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
++}
++
+ /*
+  * Parts of this function taken from xfs_change_file_space()
+  */
+@@ -1867,7 +1906,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
+ {
+ 	int ret;
+ 	s64 llen;
+-	loff_t size;
++	loff_t size, orig_isize;
+ 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ 	struct buffer_head *di_bh = NULL;
+ 	handle_t *handle;
+@@ -1898,6 +1937,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
+ 		goto out_inode_unlock;
+ 	}
+ 
++	orig_isize = i_size_read(inode);
+ 	switch (sr->l_whence) {
+ 	case 0: /*SEEK_SET*/
+ 		break;
+@@ -1905,7 +1945,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
+ 		sr->l_start += f_pos;
+ 		break;
+ 	case 2: /*SEEK_END*/
+-		sr->l_start += i_size_read(inode);
++		sr->l_start += orig_isize;
+ 		break;
+ 	default:
+ 		ret = -EINVAL;
+@@ -1959,6 +1999,14 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
+ 	default:
+ 		ret = -EINVAL;
+ 	}
++
++	/* zeroout eof blocks in the cluster. */
++	if (!ret && change_size && orig_isize < size) {
++		ret = ocfs2_zeroout_partial_cluster(inode, orig_isize,
++					size - orig_isize);
++		if (!ret)
++			i_size_write(inode, size);
++	}
+ 	up_write(&OCFS2_I(inode)->ip_alloc_sem);
+ 	if (ret) {
+ 		mlog_errno(ret);
+@@ -1975,9 +2023,6 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
+ 		goto out_inode_unlock;
+ 	}
+ 
+-	if (change_size && i_size_read(inode) < size)
+-		i_size_write(inode, size);
+-
+ 	inode->i_ctime = inode->i_mtime = current_time(inode);
+ 	ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
+ 	if (ret < 0)
+diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
+index 9c68b2da14c63..e5a4c68093fc2 100644
+--- a/include/linux/mlx5/mlx5_ifc.h
++++ b/include/linux/mlx5/mlx5_ifc.h
+@@ -1260,6 +1260,8 @@ enum mlx5_fc_bulk_alloc_bitmask {
+ 
+ #define MLX5_FC_BULK_NUM_FCS(fc_enum) (MLX5_FC_BULK_SIZE_FACTOR * (fc_enum))
+ 
++#define MLX5_FT_MAX_MULTIPATH_LEVEL 63
++
+ enum {
+ 	MLX5_STEERING_FORMAT_CONNECTX_5   = 0,
+ 	MLX5_STEERING_FORMAT_CONNECTX_6DX = 1,
+diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
+index 5e772392a3795..136b1d996075c 100644
+--- a/include/linux/pgtable.h
++++ b/include/linux/pgtable.h
+@@ -432,6 +432,14 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
+  * To be differentiate with macro pte_mkyoung, this macro is used on platforms
+  * where software maintains page access bit.
+  */
++#ifndef pte_sw_mkyoung
++static inline pte_t pte_sw_mkyoung(pte_t pte)
++{
++	return pte;
++}
++#define pte_sw_mkyoung	pte_sw_mkyoung
++#endif
++
+ #ifndef pte_savedwrite
+ #define pte_savedwrite pte_write
+ #endif
+diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h
+index fafc1beea504a..9837fb011f2fb 100644
+--- a/include/linux/platform_data/ti-sysc.h
++++ b/include/linux/platform_data/ti-sysc.h
+@@ -50,6 +50,7 @@ struct sysc_regbits {
+ 	s8 emufree_shift;
+ };
+ 
++#define SYSC_QUIRK_REINIT_ON_RESUME	BIT(27)
+ #define SYSC_QUIRK_GPMC_DEBUG		BIT(26)
+ #define SYSC_MODULE_QUIRK_ENA_RESETDONE	BIT(25)
+ #define SYSC_MODULE_QUIRK_PRUSS		BIT(24)
+diff --git a/include/net/caif/caif_dev.h b/include/net/caif/caif_dev.h
+index 48ecca8530ffa..b655d8666f555 100644
+--- a/include/net/caif/caif_dev.h
++++ b/include/net/caif/caif_dev.h
+@@ -119,7 +119,7 @@ void caif_free_client(struct cflayer *adap_layer);
+  * The link_support layer is used to add any Link Layer specific
+  * framing.
+  */
+-void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
++int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
+ 			struct cflayer *link_support, int head_room,
+ 			struct cflayer **layer, int (**rcv_func)(
+ 				struct sk_buff *, struct net_device *,
+diff --git a/include/net/caif/cfcnfg.h b/include/net/caif/cfcnfg.h
+index 2aa5e91d84576..8819ff4db35a6 100644
+--- a/include/net/caif/cfcnfg.h
++++ b/include/net/caif/cfcnfg.h
+@@ -62,7 +62,7 @@ void cfcnfg_remove(struct cfcnfg *cfg);
+  * @fcs:	Specify if checksum is used in CAIF Framing Layer.
+  * @head_room:	Head space needed by link specific protocol.
+  */
+-void
++int
+ cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
+ 		     struct net_device *dev, struct cflayer *phy_layer,
+ 		     enum cfcnfg_phy_preference pref,
+diff --git a/include/net/caif/cfserl.h b/include/net/caif/cfserl.h
+index 14a55e03bb3ce..67cce8757175a 100644
+--- a/include/net/caif/cfserl.h
++++ b/include/net/caif/cfserl.h
+@@ -9,4 +9,5 @@
+ #include <net/caif/caif_layer.h>
+ 
+ struct cflayer *cfserl_create(int instance, bool use_stx);
++void cfserl_release(struct cflayer *layer);
+ #endif
+diff --git a/include/net/tls.h b/include/net/tls.h
+index 3eccb525e8f79..8341a8d1e8073 100644
+--- a/include/net/tls.h
++++ b/include/net/tls.h
+@@ -193,7 +193,11 @@ struct tls_offload_context_tx {
+ 	(sizeof(struct tls_offload_context_tx) + TLS_DRIVER_STATE_SIZE_TX)
+ 
+ enum tls_context_flags {
+-	TLS_RX_SYNC_RUNNING = 0,
++	/* tls_device_down was called after the netdev went down, device state
++	 * was released, and kTLS works in software, even though rx_conf is
++	 * still TLS_HW (needed for transition).
++	 */
++	TLS_RX_DEV_DEGRADED = 0,
+ 	/* Unlike RX where resync is driven entirely by the core in TX only
+ 	 * the driver knows when things went out of sync, so we need the flag
+ 	 * to be atomic.
+@@ -266,6 +270,7 @@ struct tls_context {
+ 
+ 	/* cache cold stuff */
+ 	struct proto *sk_proto;
++	struct sock *sk;
+ 
+ 	void (*sk_destruct)(struct sock *sk);
+ 
+@@ -448,6 +453,9 @@ static inline u16 tls_user_config(struct tls_context *ctx, bool tx)
+ struct sk_buff *
+ tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
+ 		      struct sk_buff *skb);
++struct sk_buff *
++tls_validate_xmit_skb_sw(struct sock *sk, struct net_device *dev,
++			 struct sk_buff *skb);
+ 
+ static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
+ {
+diff --git a/init/main.c b/init/main.c
+index 53b278845b886..5bd1a25f1d6f5 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -1514,7 +1514,7 @@ static noinline void __init kernel_init_freeable(void)
+ 	 */
+ 	set_mems_allowed(node_states[N_MEMORY]);
+ 
+-	cad_pid = task_pid(current);
++	cad_pid = get_pid(task_pid(current));
+ 
+ 	smp_prepare_cpus(setup_max_cpus);
+ 
+diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
+index 308427fe03a3a..6140e91e9c891 100644
+--- a/kernel/bpf/helpers.c
++++ b/kernel/bpf/helpers.c
+@@ -14,6 +14,7 @@
+ #include <linux/jiffies.h>
+ #include <linux/pid_namespace.h>
+ #include <linux/proc_ns.h>
++#include <linux/security.h>
+ 
+ #include "../../lib/kstrtox.h"
+ 
+@@ -741,11 +742,13 @@ bpf_base_func_proto(enum bpf_func_id func_id)
+ 	case BPF_FUNC_probe_read_user:
+ 		return &bpf_probe_read_user_proto;
+ 	case BPF_FUNC_probe_read_kernel:
+-		return &bpf_probe_read_kernel_proto;
++		return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
++		       NULL : &bpf_probe_read_kernel_proto;
+ 	case BPF_FUNC_probe_read_user_str:
+ 		return &bpf_probe_read_user_str_proto;
+ 	case BPF_FUNC_probe_read_kernel_str:
+-		return &bpf_probe_read_kernel_str_proto;
++		return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
++		       NULL : &bpf_probe_read_kernel_str_proto;
+ 	case BPF_FUNC_snprintf_btf:
+ 		return &bpf_snprintf_btf_proto;
+ 	default:
+diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
+index b0c45d923f0f9..9bb3d2823f442 100644
+--- a/kernel/trace/bpf_trace.c
++++ b/kernel/trace/bpf_trace.c
+@@ -215,16 +215,11 @@ const struct bpf_func_proto bpf_probe_read_user_str_proto = {
+ static __always_inline int
+ bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
+ {
+-	int ret = security_locked_down(LOCKDOWN_BPF_READ);
++	int ret;
+ 
+-	if (unlikely(ret < 0))
+-		goto fail;
+ 	ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
+ 	if (unlikely(ret < 0))
+-		goto fail;
+-	return ret;
+-fail:
+-	memset(dst, 0, size);
++		memset(dst, 0, size);
+ 	return ret;
+ }
+ 
+@@ -246,10 +241,7 @@ const struct bpf_func_proto bpf_probe_read_kernel_proto = {
+ static __always_inline int
+ bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
+ {
+-	int ret = security_locked_down(LOCKDOWN_BPF_READ);
+-
+-	if (unlikely(ret < 0))
+-		goto fail;
++	int ret;
+ 
+ 	/*
+ 	 * The strncpy_from_kernel_nofault() call will likely not fill the
+@@ -262,11 +254,7 @@ bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
+ 	 */
+ 	ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
+ 	if (unlikely(ret < 0))
+-		goto fail;
+-
+-	return ret;
+-fail:
+-	memset(dst, 0, size);
++		memset(dst, 0, size);
+ 	return ret;
+ }
+ 
+@@ -1322,16 +1310,20 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
+ 	case BPF_FUNC_probe_read_user:
+ 		return &bpf_probe_read_user_proto;
+ 	case BPF_FUNC_probe_read_kernel:
+-		return &bpf_probe_read_kernel_proto;
++		return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
++		       NULL : &bpf_probe_read_kernel_proto;
+ 	case BPF_FUNC_probe_read_user_str:
+ 		return &bpf_probe_read_user_str_proto;
+ 	case BPF_FUNC_probe_read_kernel_str:
+-		return &bpf_probe_read_kernel_str_proto;
++		return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
++		       NULL : &bpf_probe_read_kernel_str_proto;
+ #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
+ 	case BPF_FUNC_probe_read:
+-		return &bpf_probe_read_compat_proto;
++		return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
++		       NULL : &bpf_probe_read_compat_proto;
+ 	case BPF_FUNC_probe_read_str:
+-		return &bpf_probe_read_compat_str_proto;
++		return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
++		       NULL : &bpf_probe_read_compat_str_proto;
+ #endif
+ #ifdef CONFIG_CGROUPS
+ 	case BPF_FUNC_get_current_cgroup_id:
+diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c
+index a9bd6ce1ba02b..726fd2030f645 100644
+--- a/mm/debug_vm_pgtable.c
++++ b/mm/debug_vm_pgtable.c
+@@ -192,7 +192,7 @@ static void __init pmd_advanced_tests(struct mm_struct *mm,
+ 
+ 	pr_debug("Validating PMD advanced\n");
+ 	/* Align the address wrt HPAGE_PMD_SIZE */
+-	vaddr = (vaddr & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE;
++	vaddr &= HPAGE_PMD_MASK;
+ 
+ 	pgtable_trans_huge_deposit(mm, pmdp, pgtable);
+ 
+@@ -330,7 +330,7 @@ static void __init pud_advanced_tests(struct mm_struct *mm,
+ 
+ 	pr_debug("Validating PUD advanced\n");
+ 	/* Align the address wrt HPAGE_PUD_SIZE */
+-	vaddr = (vaddr & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE;
++	vaddr &= HPAGE_PUD_MASK;
+ 
+ 	set_pud_at(mm, vaddr, pudp, pud);
+ 	pudp_set_wrprotect(mm, vaddr, pudp);
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 96b722af092e7..ce63ec0187c55 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -4705,10 +4705,20 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
+ 	struct page *page;
+ 
+ 	if (!*pagep) {
+-		ret = -ENOMEM;
++		/* If a page already exists, then it's UFFDIO_COPY for
++		 * a non-missing case. Return -EEXIST.
++		 */
++		if (vm_shared &&
++		    hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
++			ret = -EEXIST;
++			goto out;
++		}
++
+ 		page = alloc_huge_page(dst_vma, dst_addr, 0);
+-		if (IS_ERR(page))
++		if (IS_ERR(page)) {
++			ret = -ENOMEM;
+ 			goto out;
++		}
+ 
+ 		ret = copy_huge_page_from_user(page,
+ 						(const void __user *) src_addr,
+diff --git a/mm/kfence/core.c b/mm/kfence/core.c
+index f0be2c5038b5d..6f29981e317fe 100644
+--- a/mm/kfence/core.c
++++ b/mm/kfence/core.c
+@@ -20,6 +20,7 @@
+ #include <linux/moduleparam.h>
+ #include <linux/random.h>
+ #include <linux/rcupdate.h>
++#include <linux/sched/sysctl.h>
+ #include <linux/seq_file.h>
+ #include <linux/slab.h>
+ #include <linux/spinlock.h>
+@@ -620,7 +621,16 @@ static void toggle_allocation_gate(struct work_struct *work)
+ 	/* Enable static key, and await allocation to happen. */
+ 	static_branch_enable(&kfence_allocation_key);
+ 
+-	wait_event_timeout(allocation_wait, atomic_read(&kfence_allocation_gate), HZ);
++	if (sysctl_hung_task_timeout_secs) {
++		/*
++		 * During low activity with no allocations we might wait a
++		 * while; let's avoid the hung task warning.
++		 */
++		wait_event_idle_timeout(allocation_wait, atomic_read(&kfence_allocation_gate),
++					sysctl_hung_task_timeout_secs * HZ / 2);
++	} else {
++		wait_event_idle(allocation_wait, atomic_read(&kfence_allocation_gate));
++	}
+ 
+ 	/* Disable static key and reset timer. */
+ 	static_branch_disable(&kfence_allocation_key);
+diff --git a/mm/memory.c b/mm/memory.c
+index 550405fc3b5e6..14a6c66b37483 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -2896,6 +2896,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
+ 		}
+ 		flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
+ 		entry = mk_pte(new_page, vma->vm_page_prot);
++		entry = pte_sw_mkyoung(entry);
+ 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+ 
+ 		/*
+@@ -3561,6 +3562,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
+ 	__SetPageUptodate(page);
+ 
+ 	entry = mk_pte(page, vma->vm_page_prot);
++	entry = pte_sw_mkyoung(entry);
+ 	if (vma->vm_flags & VM_WRITE)
+ 		entry = pte_mkwrite(pte_mkdirty(entry));
+ 
+@@ -3745,6 +3747,8 @@ void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)
+ 
+ 	if (prefault && arch_wants_old_prefaulted_pte())
+ 		entry = pte_mkold(entry);
++	else
++		entry = pte_sw_mkyoung(entry);
+ 
+ 	if (write)
+ 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 4bb3cdfc47f87..d9dbf45f7590e 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -8951,6 +8951,8 @@ bool take_page_off_buddy(struct page *page)
+ 			del_page_from_free_list(page_head, zone, page_order);
+ 			break_down_buddy_pages(zone, page_head, page, 0,
+ 						page_order, migratetype);
++			if (!is_migrate_isolate(migratetype))
++				__mod_zone_freepage_state(zone, -1, migratetype);
+ 			ret = true;
+ 			break;
+ 		}
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index b0d9c36acc033..4fad2ca661ed9 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -1608,8 +1608,13 @@ setup_failed:
+ 	} else {
+ 		/* Init failed, cleanup */
+ 		flush_work(&hdev->tx_work);
+-		flush_work(&hdev->cmd_work);
++
++		/* Since hci_rx_work() is possible to awake new cmd_work
++		 * it should be flushed first to avoid unexpected call of
++		 * hci_cmd_work()
++		 */
+ 		flush_work(&hdev->rx_work);
++		flush_work(&hdev->cmd_work);
+ 
+ 		skb_queue_purge(&hdev->cmd_q);
+ 		skb_queue_purge(&hdev->rx_q);
+diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
+index 251b9128f530a..eed0dd066e12c 100644
+--- a/net/bluetooth/hci_sock.c
++++ b/net/bluetooth/hci_sock.c
+@@ -762,7 +762,7 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event)
+ 		/* Detach sockets from device */
+ 		read_lock(&hci_sk_list.lock);
+ 		sk_for_each(sk, &hci_sk_list.head) {
+-			bh_lock_sock_nested(sk);
++			lock_sock(sk);
+ 			if (hci_pi(sk)->hdev == hdev) {
+ 				hci_pi(sk)->hdev = NULL;
+ 				sk->sk_err = EPIPE;
+@@ -771,7 +771,7 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event)
+ 
+ 				hci_dev_put(hdev);
+ 			}
+-			bh_unlock_sock(sk);
++			release_sock(sk);
+ 		}
+ 		read_unlock(&hci_sk_list.lock);
+ 	}
+diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
+index c10e5a55758d2..440139706130a 100644
+--- a/net/caif/caif_dev.c
++++ b/net/caif/caif_dev.c
+@@ -308,7 +308,7 @@ static void dev_flowctrl(struct net_device *dev, int on)
+ 	caifd_put(caifd);
+ }
+ 
+-void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
++int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
+ 		     struct cflayer *link_support, int head_room,
+ 		     struct cflayer **layer,
+ 		     int (**rcv_func)(struct sk_buff *, struct net_device *,
+@@ -319,11 +319,12 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
+ 	enum cfcnfg_phy_preference pref;
+ 	struct cfcnfg *cfg = get_cfcnfg(dev_net(dev));
+ 	struct caif_device_entry_list *caifdevs;
++	int res;
+ 
+ 	caifdevs = caif_device_list(dev_net(dev));
+ 	caifd = caif_device_alloc(dev);
+ 	if (!caifd)
+-		return;
++		return -ENOMEM;
+ 	*layer = &caifd->layer;
+ 	spin_lock_init(&caifd->flow_lock);
+ 
+@@ -344,7 +345,7 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
+ 	strlcpy(caifd->layer.name, dev->name,
+ 		sizeof(caifd->layer.name));
+ 	caifd->layer.transmit = transmit;
+-	cfcnfg_add_phy_layer(cfg,
++	res = cfcnfg_add_phy_layer(cfg,
+ 				dev,
+ 				&caifd->layer,
+ 				pref,
+@@ -354,6 +355,7 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
+ 	mutex_unlock(&caifdevs->lock);
+ 	if (rcv_func)
+ 		*rcv_func = receive;
++	return res;
+ }
+ EXPORT_SYMBOL(caif_enroll_dev);
+ 
+@@ -368,6 +370,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
+ 	struct cflayer *layer, *link_support;
+ 	int head_room = 0;
+ 	struct caif_device_entry_list *caifdevs;
++	int res;
+ 
+ 	cfg = get_cfcnfg(dev_net(dev));
+ 	caifdevs = caif_device_list(dev_net(dev));
+@@ -393,8 +396,10 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
+ 				break;
+ 			}
+ 		}
+-		caif_enroll_dev(dev, caifdev, link_support, head_room,
++		res = caif_enroll_dev(dev, caifdev, link_support, head_room,
+ 				&layer, NULL);
++		if (res)
++			cfserl_release(link_support);
+ 		caifdev->flowctrl = dev_flowctrl;
+ 		break;
+ 
+diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c
+index a0116b9503d9d..b02e1292f7f19 100644
+--- a/net/caif/caif_usb.c
++++ b/net/caif/caif_usb.c
+@@ -115,6 +115,11 @@ static struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN],
+ 	return (struct cflayer *) this;
+ }
+ 
++static void cfusbl_release(struct cflayer *layer)
++{
++	kfree(layer);
++}
++
+ static struct packet_type caif_usb_type __read_mostly = {
+ 	.type = cpu_to_be16(ETH_P_802_EX1),
+ };
+@@ -127,6 +132,7 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
+ 	struct cflayer *layer, *link_support;
+ 	struct usbnet *usbnet;
+ 	struct usb_device *usbdev;
++	int res;
+ 
+ 	/* Check whether we have a NCM device, and find its VID/PID. */
+ 	if (!(dev->dev.parent && dev->dev.parent->driver &&
+@@ -169,8 +175,11 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
+ 	if (dev->num_tx_queues > 1)
+ 		pr_warn("USB device uses more than one tx queue\n");
+ 
+-	caif_enroll_dev(dev, &common, link_support, CFUSB_MAX_HEADLEN,
++	res = caif_enroll_dev(dev, &common, link_support, CFUSB_MAX_HEADLEN,
+ 			&layer, &caif_usb_type.func);
++	if (res)
++		goto err;
++
+ 	if (!pack_added)
+ 		dev_add_pack(&caif_usb_type);
+ 	pack_added = true;
+@@ -178,6 +187,9 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
+ 	strlcpy(layer->name, dev->name, sizeof(layer->name));
+ 
+ 	return 0;
++err:
++	cfusbl_release(link_support);
++	return res;
+ }
+ 
+ static struct notifier_block caif_device_notifier = {
+diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
+index 399239a14420f..cac30e676ac94 100644
+--- a/net/caif/cfcnfg.c
++++ b/net/caif/cfcnfg.c
+@@ -450,7 +450,7 @@ unlock:
+ 	rcu_read_unlock();
+ }
+ 
+-void
++int
+ cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
+ 		     struct net_device *dev, struct cflayer *phy_layer,
+ 		     enum cfcnfg_phy_preference pref,
+@@ -459,7 +459,7 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
+ {
+ 	struct cflayer *frml;
+ 	struct cfcnfg_phyinfo *phyinfo = NULL;
+-	int i;
++	int i, res = 0;
+ 	u8 phyid;
+ 
+ 	mutex_lock(&cnfg->lock);
+@@ -473,12 +473,15 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
+ 			goto got_phyid;
+ 	}
+ 	pr_warn("Too many CAIF Link Layers (max 6)\n");
++	res = -EEXIST;
+ 	goto out;
+ 
+ got_phyid:
+ 	phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC);
+-	if (!phyinfo)
++	if (!phyinfo) {
++		res = -ENOMEM;
+ 		goto out_err;
++	}
+ 
+ 	phy_layer->id = phyid;
+ 	phyinfo->pref = pref;
+@@ -492,8 +495,10 @@ got_phyid:
+ 
+ 	frml = cffrml_create(phyid, fcs);
+ 
+-	if (!frml)
++	if (!frml) {
++		res = -ENOMEM;
+ 		goto out_err;
++	}
+ 	phyinfo->frm_layer = frml;
+ 	layer_set_up(frml, cnfg->mux);
+ 
+@@ -511,11 +516,12 @@ got_phyid:
+ 	list_add_rcu(&phyinfo->node, &cnfg->phys);
+ out:
+ 	mutex_unlock(&cnfg->lock);
+-	return;
++	return res;
+ 
+ out_err:
+ 	kfree(phyinfo);
+ 	mutex_unlock(&cnfg->lock);
++	return res;
+ }
+ EXPORT_SYMBOL(cfcnfg_add_phy_layer);
+ 
+diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c
+index e11725a4bb0ed..40cd57ad0a0f4 100644
+--- a/net/caif/cfserl.c
++++ b/net/caif/cfserl.c
+@@ -31,6 +31,11 @@ static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt);
+ static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
+ 			   int phyid);
+ 
++void cfserl_release(struct cflayer *layer)
++{
++	kfree(layer);
++}
++
+ struct cflayer *cfserl_create(int instance, bool use_stx)
+ {
+ 	struct cfserl *this = kzalloc(sizeof(struct cfserl), GFP_ATOMIC);
+diff --git a/net/core/devlink.c b/net/core/devlink.c
+index 737b61c2976e1..4c363fa7d4d11 100644
+--- a/net/core/devlink.c
++++ b/net/core/devlink.c
+@@ -705,7 +705,6 @@ static int devlink_nl_port_attrs_put(struct sk_buff *msg,
+ 	case DEVLINK_PORT_FLAVOUR_PHYSICAL:
+ 	case DEVLINK_PORT_FLAVOUR_CPU:
+ 	case DEVLINK_PORT_FLAVOUR_DSA:
+-	case DEVLINK_PORT_FLAVOUR_VIRTUAL:
+ 		if (nla_put_u32(msg, DEVLINK_ATTR_PORT_NUMBER,
+ 				attrs->phys.port_number))
+ 			return -EMSGSIZE;
+@@ -8629,7 +8628,6 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
+ 
+ 	switch (attrs->flavour) {
+ 	case DEVLINK_PORT_FLAVOUR_PHYSICAL:
+-	case DEVLINK_PORT_FLAVOUR_VIRTUAL:
+ 		if (!attrs->split)
+ 			n = snprintf(name, len, "p%u", attrs->phys.port_number);
+ 		else
+@@ -8670,6 +8668,8 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
+ 		n = snprintf(name, len, "pf%usf%u", attrs->pci_sf.pf,
+ 			     attrs->pci_sf.sf);
+ 		break;
++	case DEVLINK_PORT_FLAVOUR_VIRTUAL:
++		return -EOPNOTSUPP;
+ 	}
+ 
+ 	if (n >= len)
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index 98f20efbfadf2..bf774575ad716 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -238,6 +238,7 @@ static int neigh_forced_gc(struct neigh_table *tbl)
+ 
+ 			write_lock(&n->lock);
+ 			if ((n->nud_state == NUD_FAILED) ||
++			    (n->nud_state == NUD_NOARP) ||
+ 			    (tbl->is_multicast &&
+ 			     tbl->is_multicast(n->primary_key)) ||
+ 			    time_after(tref, n->updated))
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 9c7b143e7a964..a266760cd65ea 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -815,10 +815,18 @@ void sock_set_rcvbuf(struct sock *sk, int val)
+ }
+ EXPORT_SYMBOL(sock_set_rcvbuf);
+ 
++static void __sock_set_mark(struct sock *sk, u32 val)
++{
++	if (val != sk->sk_mark) {
++		sk->sk_mark = val;
++		sk_dst_reset(sk);
++	}
++}
++
+ void sock_set_mark(struct sock *sk, u32 val)
+ {
+ 	lock_sock(sk);
+-	sk->sk_mark = val;
++	__sock_set_mark(sk, val);
+ 	release_sock(sk);
+ }
+ EXPORT_SYMBOL(sock_set_mark);
+@@ -1126,10 +1134,10 @@ set_sndbuf:
+ 	case SO_MARK:
+ 		if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
+ 			ret = -EPERM;
+-		} else if (val != sk->sk_mark) {
+-			sk->sk_mark = val;
+-			sk_dst_reset(sk);
++			break;
+ 		}
++
++		__sock_set_mark(sk, val);
+ 		break;
+ 
+ 	case SO_RXQ_OVFL:
+diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c
+index 008c1ec6e20c1..122ad5833fb1c 100644
+--- a/net/dsa/tag_8021q.c
++++ b/net/dsa/tag_8021q.c
+@@ -64,7 +64,7 @@
+ #define DSA_8021Q_SUBVLAN_HI_SHIFT	9
+ #define DSA_8021Q_SUBVLAN_HI_MASK	GENMASK(9, 9)
+ #define DSA_8021Q_SUBVLAN_LO_SHIFT	4
+-#define DSA_8021Q_SUBVLAN_LO_MASK	GENMASK(4, 3)
++#define DSA_8021Q_SUBVLAN_LO_MASK	GENMASK(5, 4)
+ #define DSA_8021Q_SUBVLAN_HI(x)		(((x) & GENMASK(2, 2)) >> 2)
+ #define DSA_8021Q_SUBVLAN_LO(x)		((x) & GENMASK(1, 0))
+ #define DSA_8021Q_SUBVLAN(x)		\
+diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c
+index 0c1b0770c59ea..c23c152860b73 100644
+--- a/net/ieee802154/nl-mac.c
++++ b/net/ieee802154/nl-mac.c
+@@ -680,8 +680,10 @@ int ieee802154_llsec_getparams(struct sk_buff *skb, struct genl_info *info)
+ 	    nla_put_u8(msg, IEEE802154_ATTR_LLSEC_SECLEVEL, params.out_level) ||
+ 	    nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER,
+ 			be32_to_cpu(params.frame_counter)) ||
+-	    ieee802154_llsec_fill_key_id(msg, &params.out_key))
++	    ieee802154_llsec_fill_key_id(msg, &params.out_key)) {
++		rc = -ENOBUFS;
+ 		goto out_free;
++	}
+ 
+ 	dev_put(dev);
+ 
+diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
+index 2cdc7e63fe172..88215b5c93aa4 100644
+--- a/net/ieee802154/nl-phy.c
++++ b/net/ieee802154/nl-phy.c
+@@ -241,8 +241,10 @@ int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info)
+ 	}
+ 
+ 	if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
+-	    nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name))
++	    nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name)) {
++		rc = -EMSGSIZE;
+ 		goto nla_put_failure;
++	}
+ 	dev_put(dev);
+ 
+ 	wpan_phy_put(phy);
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 373d48073106f..36e80b3598b01 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -3676,11 +3676,11 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
+ 	if (nh) {
+ 		if (rt->fib6_src.plen) {
+ 			NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing");
+-			goto out;
++			goto out_free;
+ 		}
+ 		if (!nexthop_get(nh)) {
+ 			NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
+-			goto out;
++			goto out_free;
+ 		}
+ 		rt->nh = nh;
+ 		fib6_nh = nexthop_fib6_nh(rt->nh);
+@@ -3717,6 +3717,10 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
+ out:
+ 	fib6_info_release(rt);
+ 	return ERR_PTR(err);
++out_free:
++	ip_fib_metrics_put(rt->fib6_metrics);
++	kfree(rt);
++	return ERR_PTR(err);
+ }
+ 
+ int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags,
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 228dd40828c4b..225b988215171 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -937,6 +937,10 @@ static void __mptcp_update_wmem(struct sock *sk)
+ {
+ 	struct mptcp_sock *msk = mptcp_sk(sk);
+ 
++#ifdef CONFIG_LOCKDEP
++	WARN_ON_ONCE(!lockdep_is_held(&sk->sk_lock.slock));
++#endif
++
+ 	if (!msk->wmem_reserved)
+ 		return;
+ 
+@@ -1075,10 +1079,20 @@ out:
+ 
+ static void __mptcp_clean_una_wakeup(struct sock *sk)
+ {
++#ifdef CONFIG_LOCKDEP
++	WARN_ON_ONCE(!lockdep_is_held(&sk->sk_lock.slock));
++#endif
+ 	__mptcp_clean_una(sk);
+ 	mptcp_write_space(sk);
+ }
+ 
++static void mptcp_clean_una_wakeup(struct sock *sk)
++{
++	mptcp_data_lock(sk);
++	__mptcp_clean_una_wakeup(sk);
++	mptcp_data_unlock(sk);
++}
++
+ static void mptcp_enter_memory_pressure(struct sock *sk)
+ {
+ 	struct mptcp_subflow_context *subflow;
+@@ -2288,7 +2302,7 @@ static void __mptcp_retrans(struct sock *sk)
+ 	struct sock *ssk;
+ 	int ret;
+ 
+-	__mptcp_clean_una_wakeup(sk);
++	mptcp_clean_una_wakeup(sk);
+ 	dfrag = mptcp_rtx_head(sk);
+ 	if (!dfrag) {
+ 		if (mptcp_data_fin_enabled(msk)) {
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 1936db3574d2e..8425cd393bf3e 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -608,21 +608,20 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
+ 
+ 	/* if the sk is MP_CAPABLE, we try to fetch the client key */
+ 	if (subflow_req->mp_capable) {
+-		if (TCP_SKB_CB(skb)->seq != subflow_req->ssn_offset + 1) {
+-			/* here we can receive and accept an in-window,
+-			 * out-of-order pkt, which will not carry the MP_CAPABLE
+-			 * opt even on mptcp enabled paths
+-			 */
+-			goto create_msk;
+-		}
+-
++		/* we can receive and accept an in-window, out-of-order pkt,
++		 * which may not carry the MP_CAPABLE opt even on mptcp enabled
++		 * paths: always try to extract the peer key, and fallback
++		 * for packets missing it.
++		 * Even OoO DSS packets coming legitly after dropped or
++		 * reordered MPC will cause fallback, but we don't have other
++		 * options.
++		 */
+ 		mptcp_get_options(skb, &mp_opt);
+ 		if (!mp_opt.mp_capable) {
+ 			fallback = true;
+ 			goto create_child;
+ 		}
+ 
+-create_msk:
+ 		new_msk = mptcp_sk_clone(listener->conn, &mp_opt, req);
+ 		if (!new_msk)
+ 			fallback = true;
+@@ -985,22 +984,11 @@ static bool subflow_check_data_avail(struct sock *ssk)
+ 		u64 old_ack;
+ 
+ 		status = get_mapping_status(ssk, msk);
+-		pr_debug("msk=%p ssk=%p status=%d", msk, ssk, status);
+-		if (status == MAPPING_INVALID) {
+-			ssk->sk_err = EBADMSG;
+-			goto fatal;
+-		}
+-		if (status == MAPPING_DUMMY) {
+-			__mptcp_do_fallback(msk);
+-			skb = skb_peek(&ssk->sk_receive_queue);
+-			subflow->map_valid = 1;
+-			subflow->map_seq = READ_ONCE(msk->ack_seq);
+-			subflow->map_data_len = skb->len;
+-			subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq -
+-						   subflow->ssn_offset;
+-			subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
+-			return true;
+-		}
++		if (unlikely(status == MAPPING_INVALID))
++			goto fallback;
++
++		if (unlikely(status == MAPPING_DUMMY))
++			goto fallback;
+ 
+ 		if (status != MAPPING_OK)
+ 			goto no_data;
+@@ -1013,10 +1001,8 @@ static bool subflow_check_data_avail(struct sock *ssk)
+ 		 * MP_CAPABLE-based mapping
+ 		 */
+ 		if (unlikely(!READ_ONCE(msk->can_ack))) {
+-			if (!subflow->mpc_map) {
+-				ssk->sk_err = EBADMSG;
+-				goto fatal;
+-			}
++			if (!subflow->mpc_map)
++				goto fallback;
+ 			WRITE_ONCE(msk->remote_key, subflow->remote_key);
+ 			WRITE_ONCE(msk->ack_seq, subflow->map_seq);
+ 			WRITE_ONCE(msk->can_ack, true);
+@@ -1044,15 +1030,29 @@ static bool subflow_check_data_avail(struct sock *ssk)
+ no_data:
+ 	subflow_sched_work_if_closed(msk, ssk);
+ 	return false;
+-fatal:
+-	/* fatal protocol error, close the socket */
+-	/* This barrier is coupled with smp_rmb() in tcp_poll() */
+-	smp_wmb();
+-	ssk->sk_error_report(ssk);
+-	tcp_set_state(ssk, TCP_CLOSE);
+-	tcp_send_active_reset(ssk, GFP_ATOMIC);
+-	subflow->data_avail = 0;
+-	return false;
++
++fallback:
++	/* RFC 8684 section 3.7. */
++	if (subflow->mp_join || subflow->fully_established) {
++		/* fatal protocol error, close the socket.
++		 * subflow_error_report() will introduce the appropriate barriers
++		 */
++		ssk->sk_err = EBADMSG;
++		ssk->sk_error_report(ssk);
++		tcp_set_state(ssk, TCP_CLOSE);
++		tcp_send_active_reset(ssk, GFP_ATOMIC);
++		subflow->data_avail = 0;
++		return false;
++	}
++
++	__mptcp_do_fallback(msk);
++	skb = skb_peek(&ssk->sk_receive_queue);
++	subflow->map_valid = 1;
++	subflow->map_seq = READ_ONCE(msk->ack_seq);
++	subflow->map_data_len = skb->len;
++	subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
++	subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
++	return true;
+ }
+ 
+ bool mptcp_subflow_data_available(struct sock *sk)
+diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
+index d45dbcba8b49c..c25097092a060 100644
+--- a/net/netfilter/ipvs/ip_vs_ctl.c
++++ b/net/netfilter/ipvs/ip_vs_ctl.c
+@@ -1367,7 +1367,7 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u,
+ 	ip_vs_addr_copy(svc->af, &svc->addr, &u->addr);
+ 	svc->port = u->port;
+ 	svc->fwmark = u->fwmark;
+-	svc->flags = u->flags;
++	svc->flags = u->flags & ~IP_VS_SVC_F_HASHED;
+ 	svc->timeout = u->timeout * HZ;
+ 	svc->netmask = u->netmask;
+ 	svc->ipvs = ipvs;
+diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
+index 47e9319d2cf31..71892822bbf5d 100644
+--- a/net/netfilter/nf_conntrack_proto.c
++++ b/net/netfilter/nf_conntrack_proto.c
+@@ -660,7 +660,7 @@ int nf_conntrack_proto_init(void)
+ 
+ #if IS_ENABLED(CONFIG_IPV6)
+ cleanup_sockopt:
+-	nf_unregister_sockopt(&so_getorigdst6);
++	nf_unregister_sockopt(&so_getorigdst);
+ #endif
+ 	return ret;
+ }
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 878ed49d0c569..31016c144c48b 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -3288,8 +3288,10 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
+ 			if (n == NFT_RULE_MAXEXPRS)
+ 				goto err1;
+ 			err = nf_tables_expr_parse(&ctx, tmp, &info[n]);
+-			if (err < 0)
++			if (err < 0) {
++				NL_SET_BAD_ATTR(extack, tmp);
+ 				goto err1;
++			}
+ 			size += info[n].ops->size;
+ 			n++;
+ 		}
+diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
+index 0f94fce1d3ed3..04a12a264cf74 100644
+--- a/net/netfilter/nfnetlink_cthelper.c
++++ b/net/netfilter/nfnetlink_cthelper.c
+@@ -380,10 +380,14 @@ static int
+ nfnl_cthelper_update(const struct nlattr * const tb[],
+ 		     struct nf_conntrack_helper *helper)
+ {
++	u32 size;
+ 	int ret;
+ 
+-	if (tb[NFCTH_PRIV_DATA_LEN])
+-		return -EBUSY;
++	if (tb[NFCTH_PRIV_DATA_LEN]) {
++		size = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN]));
++		if (size != helper->data_len)
++			return -EBUSY;
++	}
+ 
+ 	if (tb[NFCTH_POLICY]) {
+ 		ret = nfnl_cthelper_update_policy(helper, tb[NFCTH_POLICY]);
+diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
+index 882fe8648653d..6d2b382f5e075 100644
+--- a/net/netfilter/nft_ct.c
++++ b/net/netfilter/nft_ct.c
+@@ -1216,7 +1216,7 @@ static void nft_ct_expect_obj_eval(struct nft_object *obj,
+ 	struct nf_conn *ct;
+ 
+ 	ct = nf_ct_get(pkt->skb, &ctinfo);
+-	if (!ct || ctinfo == IP_CT_UNTRACKED) {
++	if (!ct || nf_ct_is_confirmed(ct) || nf_ct_is_template(ct)) {
+ 		regs->verdict.code = NFT_BREAK;
+ 		return;
+ 	}
+diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
+index 53dbe733f9981..6cfd30fc07985 100644
+--- a/net/nfc/llcp_sock.c
++++ b/net/nfc/llcp_sock.c
+@@ -110,6 +110,7 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
+ 	if (!llcp_sock->service_name) {
+ 		nfc_llcp_local_put(llcp_sock->local);
+ 		llcp_sock->local = NULL;
++		llcp_sock->dev = NULL;
+ 		ret = -ENOMEM;
+ 		goto put_dev;
+ 	}
+@@ -119,6 +120,7 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
+ 		llcp_sock->local = NULL;
+ 		kfree(llcp_sock->service_name);
+ 		llcp_sock->service_name = NULL;
++		llcp_sock->dev = NULL;
+ 		ret = -EADDRINUSE;
+ 		goto put_dev;
+ 	}
+diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
+index 48fdf7293deaa..ba7f57cb41c30 100644
+--- a/net/sched/act_ct.c
++++ b/net/sched/act_ct.c
+@@ -984,7 +984,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
+ 	 */
+ 	cached = tcf_ct_skb_nfct_cached(net, skb, p->zone, force);
+ 	if (!cached) {
+-		if (!commit && tcf_ct_flow_table_lookup(p, skb, family)) {
++		if (tcf_ct_flow_table_lookup(p, skb, family)) {
+ 			skip_add = true;
+ 			goto do_nat;
+ 		}
+@@ -1024,10 +1024,11 @@ do_nat:
+ 		 * even if the connection is already confirmed.
+ 		 */
+ 		nf_conntrack_confirm(skb);
+-	} else if (!skip_add) {
+-		tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo);
+ 	}
+ 
++	if (!skip_add)
++		tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo);
++
+ out_push:
+ 	skb_push_rcsum(skb, nh_ofs);
+ 
+@@ -1204,9 +1205,6 @@ static int tcf_ct_fill_params(struct net *net,
+ 				   sizeof(p->zone));
+ 	}
+ 
+-	if (p->zone == NF_CT_DEFAULT_ZONE_ID)
+-		return 0;
+-
+ 	nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0);
+ 	tmpl = nf_ct_tmpl_alloc(net, &zone, GFP_KERNEL);
+ 	if (!tmpl) {
+diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
+index 081c11d5717c4..8827987ba9034 100644
+--- a/net/sched/sch_htb.c
++++ b/net/sched/sch_htb.c
+@@ -1488,7 +1488,8 @@ static void htb_parent_to_leaf_offload(struct Qdisc *sch,
+ 	struct Qdisc *old_q;
+ 
+ 	/* One ref for cl->leaf.q, the other for dev_queue->qdisc. */
+-	qdisc_refcount_inc(new_q);
++	if (new_q)
++		qdisc_refcount_inc(new_q);
+ 	old_q = htb_graft_helper(dev_queue, new_q);
+ 	WARN_ON(!(old_q->flags & TCQ_F_BUILTIN));
+ }
+@@ -1675,10 +1676,9 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg,
+ 					  cl->parent->common.classid,
+ 					  NULL);
+ 		if (q->offload) {
+-			if (new_q) {
++			if (new_q)
+ 				htb_set_lockdep_class_child(new_q);
+-				htb_parent_to_leaf_offload(sch, dev_queue, new_q);
+-			}
++			htb_parent_to_leaf_offload(sch, dev_queue, new_q);
+ 		}
+ 	}
+ 
+diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
+index a4389ef08a980..0c8882052ba08 100644
+--- a/net/tipc/bearer.c
++++ b/net/tipc/bearer.c
+@@ -243,7 +243,8 @@ void tipc_bearer_remove_dest(struct net *net, u32 bearer_id, u32 dest)
+  */
+ static int tipc_enable_bearer(struct net *net, const char *name,
+ 			      u32 disc_domain, u32 prio,
+-			      struct nlattr *attr[])
++			      struct nlattr *attr[],
++			      struct netlink_ext_ack *extack)
+ {
+ 	struct tipc_net *tn = tipc_net(net);
+ 	struct tipc_bearer_names b_names;
+@@ -254,20 +255,24 @@ static int tipc_enable_bearer(struct net *net, const char *name,
+ 	int bearer_id = 0;
+ 	int res = -EINVAL;
+ 	char *errstr = "";
++	u32 i;
+ 
+ 	if (!bearer_name_validate(name, &b_names)) {
+ 		errstr = "illegal name";
++		NL_SET_ERR_MSG(extack, "Illegal name");
+ 		goto rejected;
+ 	}
+ 
+ 	if (prio > TIPC_MAX_LINK_PRI && prio != TIPC_MEDIA_LINK_PRI) {
+ 		errstr = "illegal priority";
++		NL_SET_ERR_MSG(extack, "Illegal priority");
+ 		goto rejected;
+ 	}
+ 
+ 	m = tipc_media_find(b_names.media_name);
+ 	if (!m) {
+ 		errstr = "media not registered";
++		NL_SET_ERR_MSG(extack, "Media not registered");
+ 		goto rejected;
+ 	}
+ 
+@@ -275,33 +280,43 @@ static int tipc_enable_bearer(struct net *net, const char *name,
+ 		prio = m->priority;
+ 
+ 	/* Check new bearer vs existing ones and find free bearer id if any */
+-	while (bearer_id < MAX_BEARERS) {
+-		b = rtnl_dereference(tn->bearer_list[bearer_id]);
+-		if (!b)
+-			break;
++	bearer_id = MAX_BEARERS;
++	i = MAX_BEARERS;
++	while (i-- != 0) {
++		b = rtnl_dereference(tn->bearer_list[i]);
++		if (!b) {
++			bearer_id = i;
++			continue;
++		}
+ 		if (!strcmp(name, b->name)) {
+ 			errstr = "already enabled";
++			NL_SET_ERR_MSG(extack, "Already enabled");
+ 			goto rejected;
+ 		}
+-		bearer_id++;
+-		if (b->priority != prio)
+-			continue;
+-		if (++with_this_prio <= 2)
+-			continue;
+-		pr_warn("Bearer <%s>: already 2 bearers with priority %u\n",
+-			name, prio);
+-		if (prio == TIPC_MIN_LINK_PRI) {
+-			errstr = "cannot adjust to lower";
+-			goto rejected;
++
++		if (b->priority == prio &&
++		    (++with_this_prio > 2)) {
++			pr_warn("Bearer <%s>: already 2 bearers with priority %u\n",
++				name, prio);
++
++			if (prio == TIPC_MIN_LINK_PRI) {
++				errstr = "cannot adjust to lower";
++				NL_SET_ERR_MSG(extack, "Cannot adjust to lower");
++				goto rejected;
++			}
++
++			pr_warn("Bearer <%s>: trying with adjusted priority\n",
++				name);
++			prio--;
++			bearer_id = MAX_BEARERS;
++			i = MAX_BEARERS;
++			with_this_prio = 1;
+ 		}
+-		pr_warn("Bearer <%s>: trying with adjusted priority\n", name);
+-		prio--;
+-		bearer_id = 0;
+-		with_this_prio = 1;
+ 	}
+ 
+ 	if (bearer_id >= MAX_BEARERS) {
+ 		errstr = "max 3 bearers permitted";
++		NL_SET_ERR_MSG(extack, "Max 3 bearers permitted");
+ 		goto rejected;
+ 	}
+ 
+@@ -315,6 +330,7 @@ static int tipc_enable_bearer(struct net *net, const char *name,
+ 	if (res) {
+ 		kfree(b);
+ 		errstr = "failed to enable media";
++		NL_SET_ERR_MSG(extack, "Failed to enable media");
+ 		goto rejected;
+ 	}
+ 
+@@ -331,6 +347,7 @@ static int tipc_enable_bearer(struct net *net, const char *name,
+ 	if (res) {
+ 		bearer_disable(net, b);
+ 		errstr = "failed to create discoverer";
++		NL_SET_ERR_MSG(extack, "Failed to create discoverer");
+ 		goto rejected;
+ 	}
+ 
+@@ -909,6 +926,7 @@ int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info)
+ 	bearer = tipc_bearer_find(net, name);
+ 	if (!bearer) {
+ 		err = -EINVAL;
++		NL_SET_ERR_MSG(info->extack, "Bearer not found");
+ 		goto err_out;
+ 	}
+ 
+@@ -948,8 +966,10 @@ int __tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info)
+ 	name = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
+ 
+ 	bearer = tipc_bearer_find(net, name);
+-	if (!bearer)
++	if (!bearer) {
++		NL_SET_ERR_MSG(info->extack, "Bearer not found");
+ 		return -EINVAL;
++	}
+ 
+ 	bearer_disable(net, bearer);
+ 
+@@ -1007,7 +1027,8 @@ int __tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
+ 			prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
+ 	}
+ 
+-	return tipc_enable_bearer(net, bearer, domain, prio, attrs);
++	return tipc_enable_bearer(net, bearer, domain, prio, attrs,
++				  info->extack);
+ }
+ 
+ int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
+@@ -1046,6 +1067,7 @@ int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info)
+ 	b = tipc_bearer_find(net, name);
+ 	if (!b) {
+ 		rtnl_unlock();
++		NL_SET_ERR_MSG(info->extack, "Bearer not found");
+ 		return -EINVAL;
+ 	}
+ 
+@@ -1086,8 +1108,10 @@ int __tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
+ 	name = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
+ 
+ 	b = tipc_bearer_find(net, name);
+-	if (!b)
++	if (!b) {
++		NL_SET_ERR_MSG(info->extack, "Bearer not found");
+ 		return -EINVAL;
++	}
+ 
+ 	if (attrs[TIPC_NLA_BEARER_PROP]) {
+ 		struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
+@@ -1106,12 +1130,18 @@ int __tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
+ 		if (props[TIPC_NLA_PROP_WIN])
+ 			b->max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
+ 		if (props[TIPC_NLA_PROP_MTU]) {
+-			if (b->media->type_id != TIPC_MEDIA_TYPE_UDP)
++			if (b->media->type_id != TIPC_MEDIA_TYPE_UDP) {
++				NL_SET_ERR_MSG(info->extack,
++					       "MTU property is unsupported");
+ 				return -EINVAL;
++			}
+ #ifdef CONFIG_TIPC_MEDIA_UDP
+ 			if (tipc_udp_mtu_bad(nla_get_u32
+-					     (props[TIPC_NLA_PROP_MTU])))
++					     (props[TIPC_NLA_PROP_MTU]))) {
++				NL_SET_ERR_MSG(info->extack,
++					       "MTU value is out-of-range");
+ 				return -EINVAL;
++			}
+ 			b->mtu = nla_get_u32(props[TIPC_NLA_PROP_MTU]);
+ 			tipc_node_apply_property(net, b, TIPC_NLA_PROP_MTU);
+ #endif
+@@ -1239,6 +1269,7 @@ int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info)
+ 	rtnl_lock();
+ 	media = tipc_media_find(name);
+ 	if (!media) {
++		NL_SET_ERR_MSG(info->extack, "Media not found");
+ 		err = -EINVAL;
+ 		goto err_out;
+ 	}
+@@ -1275,9 +1306,10 @@ int __tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info)
+ 	name = nla_data(attrs[TIPC_NLA_MEDIA_NAME]);
+ 
+ 	m = tipc_media_find(name);
+-	if (!m)
++	if (!m) {
++		NL_SET_ERR_MSG(info->extack, "Media not found");
+ 		return -EINVAL;
+-
++	}
+ 	if (attrs[TIPC_NLA_MEDIA_PROP]) {
+ 		struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
+ 
+@@ -1293,12 +1325,18 @@ int __tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info)
+ 		if (props[TIPC_NLA_PROP_WIN])
+ 			m->max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
+ 		if (props[TIPC_NLA_PROP_MTU]) {
+-			if (m->type_id != TIPC_MEDIA_TYPE_UDP)
++			if (m->type_id != TIPC_MEDIA_TYPE_UDP) {
++				NL_SET_ERR_MSG(info->extack,
++					       "MTU property is unsupported");
+ 				return -EINVAL;
++			}
+ #ifdef CONFIG_TIPC_MEDIA_UDP
+ 			if (tipc_udp_mtu_bad(nla_get_u32
+-					     (props[TIPC_NLA_PROP_MTU])))
++					     (props[TIPC_NLA_PROP_MTU]))) {
++				NL_SET_ERR_MSG(info->extack,
++					       "MTU value is out-of-range");
+ 				return -EINVAL;
++			}
+ 			m->mtu = nla_get_u32(props[TIPC_NLA_PROP_MTU]);
+ #endif
+ 		}
+diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
+index d9cd229aa111b..9b1ea17f3b1dc 100644
+--- a/net/tls/tls_device.c
++++ b/net/tls/tls_device.c
+@@ -50,6 +50,7 @@ static void tls_device_gc_task(struct work_struct *work);
+ static DECLARE_WORK(tls_device_gc_work, tls_device_gc_task);
+ static LIST_HEAD(tls_device_gc_list);
+ static LIST_HEAD(tls_device_list);
++static LIST_HEAD(tls_device_down_list);
+ static DEFINE_SPINLOCK(tls_device_lock);
+ 
+ static void tls_device_free_ctx(struct tls_context *ctx)
+@@ -680,15 +681,13 @@ static void tls_device_resync_rx(struct tls_context *tls_ctx,
+ 	struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
+ 	struct net_device *netdev;
+ 
+-	if (WARN_ON(test_and_set_bit(TLS_RX_SYNC_RUNNING, &tls_ctx->flags)))
+-		return;
+-
+ 	trace_tls_device_rx_resync_send(sk, seq, rcd_sn, rx_ctx->resync_type);
++	rcu_read_lock();
+ 	netdev = READ_ONCE(tls_ctx->netdev);
+ 	if (netdev)
+ 		netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn,
+ 						   TLS_OFFLOAD_CTX_DIR_RX);
+-	clear_bit_unlock(TLS_RX_SYNC_RUNNING, &tls_ctx->flags);
++	rcu_read_unlock();
+ 	TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICERESYNC);
+ }
+ 
+@@ -761,6 +760,8 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
+ 
+ 	if (tls_ctx->rx_conf != TLS_HW)
+ 		return;
++	if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags)))
++		return;
+ 
+ 	prot = &tls_ctx->prot_info;
+ 	rx_ctx = tls_offload_ctx_rx(tls_ctx);
+@@ -963,6 +964,17 @@ int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx,
+ 
+ 	ctx->sw.decrypted |= is_decrypted;
+ 
++	if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags))) {
++		if (likely(is_encrypted || is_decrypted))
++			return 0;
++
++		/* After tls_device_down disables the offload, the next SKB will
++		 * likely have initial fragments decrypted, and final ones not
++		 * decrypted. We need to reencrypt that single SKB.
++		 */
++		return tls_device_reencrypt(sk, skb);
++	}
++
+ 	/* Return immediately if the record is either entirely plaintext or
+ 	 * entirely ciphertext. Otherwise handle reencrypt partially decrypted
+ 	 * record.
+@@ -1292,6 +1304,26 @@ static int tls_device_down(struct net_device *netdev)
+ 	spin_unlock_irqrestore(&tls_device_lock, flags);
+ 
+ 	list_for_each_entry_safe(ctx, tmp, &list, list)	{
++		/* Stop offloaded TX and switch to the fallback.
++		 * tls_is_sk_tx_device_offloaded will return false.
++		 */
++		WRITE_ONCE(ctx->sk->sk_validate_xmit_skb, tls_validate_xmit_skb_sw);
++
++		/* Stop the RX and TX resync.
++		 * tls_dev_resync must not be called after tls_dev_del.
++		 */
++		WRITE_ONCE(ctx->netdev, NULL);
++
++		/* Start skipping the RX resync logic completely. */
++		set_bit(TLS_RX_DEV_DEGRADED, &ctx->flags);
++
++		/* Sync with inflight packets. After this point:
++		 * TX: no non-encrypted packets will be passed to the driver.
++		 * RX: resync requests from the driver will be ignored.
++		 */
++		synchronize_net();
++
++		/* Release the offload context on the driver side. */
+ 		if (ctx->tx_conf == TLS_HW)
+ 			netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
+ 							TLS_OFFLOAD_CTX_DIR_TX);
+@@ -1299,15 +1331,21 @@ static int tls_device_down(struct net_device *netdev)
+ 		    !test_bit(TLS_RX_DEV_CLOSED, &ctx->flags))
+ 			netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
+ 							TLS_OFFLOAD_CTX_DIR_RX);
+-		WRITE_ONCE(ctx->netdev, NULL);
+-		smp_mb__before_atomic(); /* pairs with test_and_set_bit() */
+-		while (test_bit(TLS_RX_SYNC_RUNNING, &ctx->flags))
+-			usleep_range(10, 200);
++
+ 		dev_put(netdev);
+-		list_del_init(&ctx->list);
+ 
+-		if (refcount_dec_and_test(&ctx->refcount))
+-			tls_device_free_ctx(ctx);
++		/* Move the context to a separate list for two reasons:
++		 * 1. When the context is deallocated, list_del is called.
++		 * 2. It's no longer an offloaded context, so we don't want to
++		 *    run offload-specific code on this context.
++		 */
++		spin_lock_irqsave(&tls_device_lock, flags);
++		list_move_tail(&ctx->list, &tls_device_down_list);
++		spin_unlock_irqrestore(&tls_device_lock, flags);
++
++		/* Device contexts for RX and TX will be freed in on sk_destruct
++		 * by tls_device_free_ctx. rx_conf and tx_conf stay in TLS_HW.
++		 */
+ 	}
+ 
+ 	up_write(&device_offload_lock);
+diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c
+index cacf040872c74..e40bedd112b68 100644
+--- a/net/tls/tls_device_fallback.c
++++ b/net/tls/tls_device_fallback.c
+@@ -431,6 +431,13 @@ struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
+ }
+ EXPORT_SYMBOL_GPL(tls_validate_xmit_skb);
+ 
++struct sk_buff *tls_validate_xmit_skb_sw(struct sock *sk,
++					 struct net_device *dev,
++					 struct sk_buff *skb)
++{
++	return tls_sw_fallback(sk, skb);
++}
++
+ struct sk_buff *tls_encrypt_skb(struct sk_buff *skb)
+ {
+ 	return tls_sw_fallback(skb->sk, skb);
+diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
+index 47b7c5334c346..fde56ff491637 100644
+--- a/net/tls/tls_main.c
++++ b/net/tls/tls_main.c
+@@ -636,6 +636,7 @@ struct tls_context *tls_ctx_create(struct sock *sk)
+ 	mutex_init(&ctx->tx_lock);
+ 	rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
+ 	ctx->sk_proto = READ_ONCE(sk->sk_prot);
++	ctx->sk = sk;
+ 	return ctx;
+ }
+ 
+diff --git a/samples/vfio-mdev/mdpy-fb.c b/samples/vfio-mdev/mdpy-fb.c
+index 21dbf63d6e415..9ec93d90e8a5a 100644
+--- a/samples/vfio-mdev/mdpy-fb.c
++++ b/samples/vfio-mdev/mdpy-fb.c
+@@ -117,22 +117,27 @@ static int mdpy_fb_probe(struct pci_dev *pdev,
+ 	if (format != DRM_FORMAT_XRGB8888) {
+ 		pci_err(pdev, "format mismatch (0x%x != 0x%x)\n",
+ 			format, DRM_FORMAT_XRGB8888);
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto err_release_regions;
+ 	}
+ 	if (width < 100	 || width > 10000) {
+ 		pci_err(pdev, "width (%d) out of range\n", width);
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto err_release_regions;
+ 	}
+ 	if (height < 100 || height > 10000) {
+ 		pci_err(pdev, "height (%d) out of range\n", height);
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto err_release_regions;
+ 	}
+ 	pci_info(pdev, "mdpy found: %dx%d framebuffer\n",
+ 		 width, height);
+ 
+ 	info = framebuffer_alloc(sizeof(struct mdpy_fb_par), &pdev->dev);
+-	if (!info)
++	if (!info) {
++		ret = -ENOMEM;
+ 		goto err_release_regions;
++	}
+ 	pci_set_drvdata(pdev, info);
+ 	par = info->par;
+ 
+diff --git a/scripts/Makefile.modfinal b/scripts/Makefile.modfinal
+index 735e11e9041b9..19468831fcc73 100644
+--- a/scripts/Makefile.modfinal
++++ b/scripts/Makefile.modfinal
+@@ -59,7 +59,7 @@ quiet_cmd_ld_ko_o = LD [M]  $@
+ quiet_cmd_btf_ko = BTF [M] $@
+       cmd_btf_ko = 							\
+ 	if [ -f vmlinux ]; then						\
+-		LLVM_OBJCOPY=$(OBJCOPY) $(PAHOLE) -J --btf_base vmlinux $@; \
++		LLVM_OBJCOPY="$(OBJCOPY)" $(PAHOLE) -J --btf_base vmlinux $@; \
+ 	else								\
+ 		printf "Skipping BTF generation for %s due to unavailability of vmlinux\n" $@ 1>&2; \
+ 	fi;
+diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
+index 3b261b0f74f0a..0a16928e495b9 100755
+--- a/scripts/link-vmlinux.sh
++++ b/scripts/link-vmlinux.sh
+@@ -228,7 +228,7 @@ gen_btf()
+ 	vmlinux_link ${1}
+ 
+ 	info "BTF" ${2}
+-	LLVM_OBJCOPY=${OBJCOPY} ${PAHOLE} -J ${1}
++	LLVM_OBJCOPY="${OBJCOPY}" ${PAHOLE} -J ${1}
+ 
+ 	# Create ${2} which contains just .BTF section but no symbols. Add
+ 	# SHF_ALLOC because .BTF will be part of the vmlinux image. --strip-all
+diff --git a/sound/core/timer.c b/sound/core/timer.c
+index 6898b1ac0d7f4..92b7008fcdb86 100644
+--- a/sound/core/timer.c
++++ b/sound/core/timer.c
+@@ -520,9 +520,10 @@ static void snd_timer_notify1(struct snd_timer_instance *ti, int event)
+ 		return;
+ 	if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
+ 		return;
++	event += 10; /* convert to SNDRV_TIMER_EVENT_MXXX */
+ 	list_for_each_entry(ts, &ti->slave_active_head, active_list)
+ 		if (ts->ccallback)
+-			ts->ccallback(ts, event + 100, &tstamp, resolution);
++			ts->ccallback(ts, event, &tstamp, resolution);
+ }
+ 
+ /* start/continue a master timer */
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index 2026f1ccaf5a7..a220f7ac81263 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -2973,6 +2973,7 @@ static int hda_codec_runtime_resume(struct device *dev)
+ #ifdef CONFIG_PM_SLEEP
+ static int hda_codec_pm_prepare(struct device *dev)
+ {
++	dev->power.power_state = PMSG_SUSPEND;
+ 	return pm_runtime_suspended(dev);
+ }
+ 
+@@ -2980,6 +2981,10 @@ static void hda_codec_pm_complete(struct device *dev)
+ {
+ 	struct hda_codec *codec = dev_to_hda_codec(dev);
+ 
++	/* If no other pm-functions are called between prepare() and complete() */
++	if (dev->power.power_state.event == PM_EVENT_SUSPEND)
++		dev->power.power_state = PMSG_RESUME;
++
+ 	if (pm_runtime_suspended(dev) && (codec->jackpoll_interval ||
+ 	    hda_codec_need_resume(codec) || codec->forced_resume))
+ 		pm_request_resume(dev);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index d8424d226714f..cc13a68197f3c 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -8289,6 +8289,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
++	SND_PCI_QUIRK(0x103c, 0x841c, "HP Pavilion 15-CK0xx", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ 	SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ 	SND_PCI_QUIRK(0x103c, 0x84da, "HP OMEN dc0019-ur", ALC295_FIXUP_HP_OMEN),
+ 	SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
+index 7b2d471a6419d..4343356f3cf9a 100644
+--- a/tools/perf/util/dwarf-aux.c
++++ b/tools/perf/util/dwarf-aux.c
+@@ -975,9 +975,13 @@ static int __die_find_variable_cb(Dwarf_Die *die_mem, void *data)
+ 	if ((tag == DW_TAG_formal_parameter ||
+ 	     tag == DW_TAG_variable) &&
+ 	    die_compare_name(die_mem, fvp->name) &&
+-	/* Does the DIE have location information or external instance? */
++	/*
++	 * Does the DIE have location information or const value
++	 * or external instance?
++	 */
+ 	    (dwarf_attr(die_mem, DW_AT_external, &attr) ||
+-	     dwarf_attr(die_mem, DW_AT_location, &attr)))
++	     dwarf_attr(die_mem, DW_AT_location, &attr) ||
++	     dwarf_attr(die_mem, DW_AT_const_value, &attr)))
+ 		return DIE_FIND_CB_END;
+ 	if (dwarf_haspc(die_mem, fvp->addr))
+ 		return DIE_FIND_CB_CONTINUE;
+diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
+index 1b118c9c86a69..bba61b95a37a8 100644
+--- a/tools/perf/util/probe-finder.c
++++ b/tools/perf/util/probe-finder.c
+@@ -190,6 +190,9 @@ static int convert_variable_location(Dwarf_Die *vr_die, Dwarf_Addr addr,
+ 	    immediate_value_is_supported()) {
+ 		Dwarf_Sword snum;
+ 
++		if (!tvar)
++			return 0;
++
+ 		dwarf_formsdata(&attr, &snum);
+ 		ret = asprintf(&tvar->value, "\\%ld", (long)snum);
+ 
+diff --git a/tools/testing/selftests/wireguard/netns.sh b/tools/testing/selftests/wireguard/netns.sh
+index 7ed7cd95e58fe..ebc4ee0fe179f 100755
+--- a/tools/testing/selftests/wireguard/netns.sh
++++ b/tools/testing/selftests/wireguard/netns.sh
+@@ -363,6 +363,7 @@ ip1 -6 rule add table main suppress_prefixlength 0
+ ip1 -4 route add default dev wg0 table 51820
+ ip1 -4 rule add not fwmark 51820 table 51820
+ ip1 -4 rule add table main suppress_prefixlength 0
++n1 bash -c 'printf 0 > /proc/sys/net/ipv4/conf/vethc/rp_filter'
+ # Flood the pings instead of sending just one, to trigger routing table reference counting bugs.
+ n1 ping -W 1 -c 100 -f 192.168.99.7
+ n1 ping -W 1 -c 100 -f abab::1111
+diff --git a/tools/testing/selftests/wireguard/qemu/kernel.config b/tools/testing/selftests/wireguard/qemu/kernel.config
+index 4eecb432a66c1..74db83a0aedd8 100644
+--- a/tools/testing/selftests/wireguard/qemu/kernel.config
++++ b/tools/testing/selftests/wireguard/qemu/kernel.config
+@@ -19,7 +19,6 @@ CONFIG_NETFILTER_XTABLES=y
+ CONFIG_NETFILTER_XT_NAT=y
+ CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+ CONFIG_NETFILTER_XT_MARK=y
+-CONFIG_NF_CONNTRACK_IPV4=y
+ CONFIG_NF_NAT_IPV4=y
+ CONFIG_IP_NF_IPTABLES=y
+ CONFIG_IP_NF_FILTER=y


^ permalink raw reply related	[flat|nested] 33+ messages in thread
* [gentoo-commits] proj/linux-patches:5.12 commit in: /
@ 2021-06-08 22:15 Mike Pagano
  0 siblings, 0 replies; 33+ messages in thread
From: Mike Pagano @ 2021-06-08 22:15 UTC (permalink / raw
  To: gentoo-commits

commit:     406943272aef657d11be05055417c5beca95400d
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Jun  8 22:14:00 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Jun  8 22:15:22 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=40694327

Updates from gyakovlev

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 4567_distro-Gentoo-Kconfig.patch | 72 +++++++++++++++++++++++++++++++++++-----
 1 file changed, 64 insertions(+), 8 deletions(-)

diff --git a/4567_distro-Gentoo-Kconfig.patch b/4567_distro-Gentoo-Kconfig.patch
index 9a7a02d..4eee26b 100644
--- a/4567_distro-Gentoo-Kconfig.patch
+++ b/4567_distro-Gentoo-Kconfig.patch
@@ -6,9 +6,9 @@
  source "Documentation/Kconfig"
 +
 +source "distro/Kconfig"
---- /dev/null	2021-06-06 14:01:09.950742356 -0400
-+++ b/distro/Kconfig	2021-06-06 17:48:05.912077568 -0400
-@@ -0,0 +1,267 @@
+--- /dev/null	2021-06-08 16:56:49.698138501 -0400
++++ b/distro/Kconfig	2021-06-08 17:11:33.377999003 -0400
+@@ -0,0 +1,263 @@
 +menu "Gentoo Linux"
 +
 +config GENTOO_LINUX
@@ -181,8 +181,7 @@
 +	Note 2: Please see the URL above for numeric settings, e.g. CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 
 +	for X86_64
 +
-+	depends on GENTOO_LINUX && !HARDENED_USERCOPY_FALLBACK && !HARDENED_USERCOPY_PAGESPAN && !ACPI_CUSTOM_METHOD && !COMPAT_BRK && !DEVKMEM && !PROC_KCORE && !COMPAT_VDSO && !KEXEC && !HIBERNATION && !LEGACY_PTYS && !SECURITY_SELINUX_DISABLE && !X86_X32 && !MODIFY_LDT_SYSCALL
-+
++	depends on GENTOO_LINUX && !ACPI_CUSTOM_METHOD && !COMPAT_BRK && !DEVKMEM && !PROC_KCORE && !COMPAT_VDSO && !KEXEC && !HIBERNATION && !LEGACY_PTYS && !X86_X32 && !MODIFY_LDT_SYSCALL
 +
 +	select BUG
 +	select STRICT_KERNEL_RWX
@@ -191,7 +190,6 @@
 +	select STACKPROTECTOR_STRONG
 +	select STRICT_DEVMEM
 +	select IO_STRICT_DEVMEM
-+
 +	select SYN_COOKIES
 +	select DEBUG_CREDENTIALS
 +	select DEBUG_NOTIFIERS
@@ -201,9 +199,7 @@
 +	select SCHED_STACK_END_CHECK
 +	select SECCOMP
 +	select SECCOMP_FILTER
-+	select SECURITY
 +	select SECURITY_YAMA
-+	select HARDENED_USERCOPY
 +	select SLAB_FREELIST_RANDOM
 +	select SLAB_FREELIST_HARDENED
 +	select SHUFFLE_PAGE_ALLOCATOR
@@ -276,3 +272,63 @@
 +endmenu
 +
 +endmenu
+diff --git a/security/Kconfig b/security/Kconfig
+index 7561f6f99..01f0bf73f 100644
+--- a/security/Kconfig
++++ b/security/Kconfig
+@@ -166,6 +166,7 @@ config HARDENED_USERCOPY
+ config HARDENED_USERCOPY_FALLBACK
+ 	bool "Allow usercopy whitelist violations to fallback to object size"
+ 	depends on HARDENED_USERCOPY
++	depends on !GENTOO_KERNEL_SELF_PROTECTION
+ 	default y
+ 	help
+ 	  This is a temporary option that allows missing usercopy whitelists
+@@ -181,6 +182,7 @@ config HARDENED_USERCOPY_PAGESPAN
+ 	bool "Refuse to copy allocations that span multiple pages"
+ 	depends on HARDENED_USERCOPY
+ 	depends on EXPERT
++	depends on !GENTOO_KERNEL_SELF_PROTECTION
+ 	help
+ 	  When a multi-page allocation is done without __GFP_COMP,
+ 	  hardened usercopy will reject attempts to copy it. There are,
+diff --git a/security/selinux/Kconfig b/security/selinux/Kconfig
+index 9e921fc72..f29bc13fa 100644
+--- a/security/selinux/Kconfig
++++ b/security/selinux/Kconfig
+@@ -26,6 +26,7 @@ config SECURITY_SELINUX_BOOTPARAM
+ config SECURITY_SELINUX_DISABLE
+ 	bool "NSA SELinux runtime disable"
+ 	depends on SECURITY_SELINUX
++	depends on !GENTOO_KERNEL_SELF_PROTECTION
+ 	select SECURITY_WRITABLE_HOOKS
+ 	default n
+ 	help
+-- 
+2.31.1
+
+From bd3ff0b16792c18c0614c2b95e148943209f460a Mon Sep 17 00:00:00 2001
+From: Georgy Yakovlev <gyakovlev@gentoo.org>
+Date: Tue, 8 Jun 2021 13:59:57 -0700
+Subject: [PATCH 2/2] set DEFAULT_MMAP_MIN_ADDR by default
+
+---
+ mm/Kconfig | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/mm/Kconfig b/mm/Kconfig
+index 24c045b24..e13fc740c 100644
+--- a/mm/Kconfig
++++ b/mm/Kconfig
+@@ -321,6 +321,8 @@ config KSM
+ config DEFAULT_MMAP_MIN_ADDR
+ 	int "Low address space to protect from user allocation"
+ 	depends on MMU
++	default 65536 if ( X86_64 || X86_32 || PPC64 || IA64 ) && GENTOO_KERNEL_SELF_PROTECTION
++	default 32768 if ( ARM64 || ARM ) && GENTOO_KERNEL_SELF_PROTECTION
+ 	default 4096
+ 	help
+ 	  This is the portion of low virtual memory which should be protected
+-- 
+2.31.1
+```


^ permalink raw reply related	[flat|nested] 33+ messages in thread
* [gentoo-commits] proj/linux-patches:5.12 commit in: /
@ 2021-06-08 16:48 Mike Pagano
  0 siblings, 0 replies; 33+ messages in thread
From: Mike Pagano @ 2021-06-08 16:48 UTC (permalink / raw
  To: gentoo-commits

commit:     ff2d1f872602ae635ca691c4d1297d99b44a549a
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Jun  8 16:46:36 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Jun  8 16:48:12 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ff2d1f87

Remove !IA32_EMULATION in KSSP to avoid disabling multilib.Thanks gyakovlev

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 4567_distro-Gentoo-Kconfig.patch | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/4567_distro-Gentoo-Kconfig.patch b/4567_distro-Gentoo-Kconfig.patch
index daf29c3..9a7a02d 100644
--- a/4567_distro-Gentoo-Kconfig.patch
+++ b/4567_distro-Gentoo-Kconfig.patch
@@ -181,7 +181,7 @@
 +	Note 2: Please see the URL above for numeric settings, e.g. CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 
 +	for X86_64
 +
-+	depends on GENTOO_LINUX && !HARDENED_USERCOPY_FALLBACK && !HARDENED_USERCOPY_PAGESPAN && !ACPI_CUSTOM_METHOD && !COMPAT_BRK && !DEVKMEM && !PROC_KCORE && !COMPAT_VDSO && !KEXEC && !HIBERNATION && !LEGACY_PTYS && !SECURITY_SELINUX_DISABLE && !IA32_EMULATION && !X86_X32 && !MODIFY_LDT_SYSCALL
++	depends on GENTOO_LINUX && !HARDENED_USERCOPY_FALLBACK && !HARDENED_USERCOPY_PAGESPAN && !ACPI_CUSTOM_METHOD && !COMPAT_BRK && !DEVKMEM && !PROC_KCORE && !COMPAT_VDSO && !KEXEC && !HIBERNATION && !LEGACY_PTYS && !SECURITY_SELINUX_DISABLE && !X86_X32 && !MODIFY_LDT_SYSCALL
 +
 +
 +	select BUG


^ permalink raw reply related	[flat|nested] 33+ messages in thread
* [gentoo-commits] proj/linux-patches:5.12 commit in: /
@ 2021-06-08 16:26 Mike Pagano
  0 siblings, 0 replies; 33+ messages in thread
From: Mike Pagano @ 2021-06-08 16:26 UTC (permalink / raw
  To: gentoo-commits

commit:     ecc47a8c9fbbcb96f42d1339c5807f959e81b6a2
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Jun  8 15:34:15 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Jun  8 16:26:41 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ecc47a8c

CONFIG opt to enable a subset of Kernel Self Protection Project settings

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 4567_distro-Gentoo-Kconfig.patch | 121 +++++++++++++++++++++++++++++++++++++--
 1 file changed, 115 insertions(+), 6 deletions(-)

diff --git a/4567_distro-Gentoo-Kconfig.patch b/4567_distro-Gentoo-Kconfig.patch
index e754a3e..daf29c3 100644
--- a/4567_distro-Gentoo-Kconfig.patch
+++ b/4567_distro-Gentoo-Kconfig.patch
@@ -1,14 +1,14 @@
---- a/Kconfig	2020-04-15 11:05:30.202413863 -0400
-+++ b/Kconfig	2020-04-15 10:37:45.683952949 -0400
-@@ -32,3 +32,5 @@ source "lib/Kconfig"
+--- a/Kconfig	2021-06-04 19:03:33.646823432 -0400
++++ b/Kconfig	2021-06-04 19:03:40.508892817 -0400
+@@ -30,3 +30,5 @@ source "lib/Kconfig"
  source "lib/Kconfig.debug"
  
  source "Documentation/Kconfig"
 +
 +source "distro/Kconfig"
---- /dev/null	2020-09-24 03:06:47.590000000 -0400
-+++ b/distro/Kconfig	2020-09-24 11:31:29.403150624 -0400
-@@ -0,0 +1,158 @@
+--- /dev/null	2021-06-06 14:01:09.950742356 -0400
++++ b/distro/Kconfig	2021-06-06 17:48:05.912077568 -0400
+@@ -0,0 +1,267 @@
 +menu "Gentoo Linux"
 +
 +config GENTOO_LINUX
@@ -166,4 +166,113 @@
 +
 +endmenu
 +
++menu "Enable Kernel Self Protection Project Recommendations"
++	visible if GENTOO_LINUX
++
++config GENTOO_KERNEL_SELF_PROTECTION
++	bool "Architecture Independant Kernel Self Protection Project Recommendations"
++
++	help
++  Recommended Kernel settings based on the suggestions from the Kernel Self Protection Project
++	See: https://kernsec.org/wiki/index.php/Kernel_Self_Protection_Project/Recommended_Settings
++	Note, there may be additional settings for which the CONFIG_ setting is invisible in menuconfig due 
++	to unmet dependencies. Search for GENTOO_KERNEL_SELF_PROTECTION_{X86_64, ARM64, X86_32, ARM} for 
++	dependency information on your specific architecture.
++	Note 2: Please see the URL above for numeric settings, e.g. CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 
++	for X86_64
++
++	depends on GENTOO_LINUX && !HARDENED_USERCOPY_FALLBACK && !HARDENED_USERCOPY_PAGESPAN && !ACPI_CUSTOM_METHOD && !COMPAT_BRK && !DEVKMEM && !PROC_KCORE && !COMPAT_VDSO && !KEXEC && !HIBERNATION && !LEGACY_PTYS && !SECURITY_SELINUX_DISABLE && !IA32_EMULATION && !X86_X32 && !MODIFY_LDT_SYSCALL
++
++
++	select BUG
++	select STRICT_KERNEL_RWX
++	select DEBUG_WX
++	select STACKPROTECTOR
++	select STACKPROTECTOR_STRONG
++	select STRICT_DEVMEM
++	select IO_STRICT_DEVMEM
++
++	select SYN_COOKIES
++	select DEBUG_CREDENTIALS
++	select DEBUG_NOTIFIERS
++	select DEBUG_LIST
++	select DEBUG_SG
++	select BUG_ON_DATA_CORRUPTION
++	select SCHED_STACK_END_CHECK
++	select SECCOMP
++	select SECCOMP_FILTER
++	select SECURITY
++	select SECURITY_YAMA
++	select HARDENED_USERCOPY
++	select SLAB_FREELIST_RANDOM
++	select SLAB_FREELIST_HARDENED
++	select SHUFFLE_PAGE_ALLOCATOR
++	select SLUB_DEBUG
++	select PAGE_POISONING
++	select PAGE_POISONING_NO_SANITY
++	select PAGE_POISONING_ZERO
++	select INIT_ON_ALLOC_DEFAULT_ON
++	select INIT_ON_FREE_DEFAULT_ON
++	select VMAP_STACK
++	select REFCOUNT_FULL
++	select FORTIFY_SOURCE
++	select SECURITY_DMESG_RESTRICT
++	select PANIC_ON_OOPS
++	select CONFIG_GCC_PLUGINS=y
++	select GCC_PLUGIN_LATENT_ENTROPY
++	select GCC_PLUGIN_STRUCTLEAK
++	select GCC_PLUGIN_STRUCTLEAK_BYREF_ALL
++	select GCC_PLUGIN_STACKLEAK
++	select GCC_PLUGIN_RANDSTRUCT
++	select GCC_PLUGIN_RANDSTRUCT_PERFORMANCE
++
++menu "Architecture Specific Self Protection Project Recommendations"
++
++config GENTOO_KERNEL_SELF_PROTECTION_X86_64
++	bool "X86_64 KSPP Settings"
++
++	depends on !X86_MSR && X86_64
++	default n
++	
++	select RANDOMIZE_BASE
++	select RANDOMIZE_MEMORY
++	select LEGACY_VSYSCALL_NONE
++ select PAGE_TABLE_ISOLATION
++
++
++config GENTOO_KERNEL_SELF_PROTECTION_ARM64
++	bool "ARM64 KSPP Settings"
++
++	depends on ARM64
++	default n
++
++	select RANDOMIZE_BASE
++	select ARM64_SW_TTBR0_PAN
++	select CONFIG_UNMAP_KERNEL_AT_EL0
++
++config GENTOO_KERNEL_SELF_PROTECTION_X86_32
++	bool "X86_32 KSPP Settings"
++
++	depends on !X86_MSR && !MODIFY_LDT_SYSCALL && !M486 && X86_32
++	default n
++
++	select HIGHMEM64G
++	select X86_PAE
++	select RANDOMIZE_BASE
++	select PAGE_TABLE_ISOLATION
++
++config GENTOO_KERNEL_SELF_PROTECTION_ARM
++	bool "ARM KSPP Settings"
++
++	depends on !OABI_COMPAT && ARM
++	default n
++
++	select VMSPLIT_3G
++	select STRICT_MEMORY_RWX
++	select CPU_SW_DOMAIN_PAN
++
++endmenu
++
++endmenu
++
 +endmenu


^ permalink raw reply related	[flat|nested] 33+ messages in thread
* [gentoo-commits] proj/linux-patches:5.12 commit in: /
@ 2021-06-03 10:22 Alice Ferrazzi
  0 siblings, 0 replies; 33+ messages in thread
From: Alice Ferrazzi @ 2021-06-03 10:22 UTC (permalink / raw
  To: gentoo-commits

commit:     6b02df12d6c442e8b157b340954a4ca810844d95
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Thu Jun  3 10:21:39 2021 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Thu Jun  3 10:22:05 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=6b02df12

Linux patch 5.12.9

Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>

 0000_README             |     4 +
 1008_linux-5.12.9.patch | 10516 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 10520 insertions(+)

diff --git a/0000_README b/0000_README
index 90784e9..f16429c 100644
--- a/0000_README
+++ b/0000_README
@@ -75,6 +75,10 @@ Patch:  1007_linux-5.12.8.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.12.8
 
+Patch:  1008_linux-5.12.9.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.12.9
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1008_linux-5.12.9.patch b/1008_linux-5.12.9.patch
new file mode 100644
index 0000000..f5223c7
--- /dev/null
+++ b/1008_linux-5.12.9.patch
@@ -0,0 +1,10516 @@
+diff --git a/Documentation/userspace-api/seccomp_filter.rst b/Documentation/userspace-api/seccomp_filter.rst
+index bd9165241b6c8..6efb41cc80725 100644
+--- a/Documentation/userspace-api/seccomp_filter.rst
++++ b/Documentation/userspace-api/seccomp_filter.rst
+@@ -250,14 +250,14 @@ Users can read via ``ioctl(SECCOMP_IOCTL_NOTIF_RECV)``  (or ``poll()``) on a
+ seccomp notification fd to receive a ``struct seccomp_notif``, which contains
+ five members: the input length of the structure, a unique-per-filter ``id``,
+ the ``pid`` of the task which triggered this request (which may be 0 if the
+-task is in a pid ns not visible from the listener's pid namespace), a ``flags``
+-member which for now only has ``SECCOMP_NOTIF_FLAG_SIGNALED``, representing
+-whether or not the notification is a result of a non-fatal signal, and the
+-``data`` passed to seccomp. Userspace can then make a decision based on this
+-information about what to do, and ``ioctl(SECCOMP_IOCTL_NOTIF_SEND)`` a
+-response, indicating what should be returned to userspace. The ``id`` member of
+-``struct seccomp_notif_resp`` should be the same ``id`` as in ``struct
+-seccomp_notif``.
++task is in a pid ns not visible from the listener's pid namespace). The
++notification also contains the ``data`` passed to seccomp, and a filters flag.
++The structure should be zeroed out prior to calling the ioctl.
++
++Userspace can then make a decision based on this information about what to do,
++and ``ioctl(SECCOMP_IOCTL_NOTIF_SEND)`` a response, indicating what should be
++returned to userspace. The ``id`` member of ``struct seccomp_notif_resp`` should
++be the same ``id`` as in ``struct seccomp_notif``.
+ 
+ It is worth noting that ``struct seccomp_data`` contains the values of register
+ arguments to the syscall, but does not contain pointers to memory. The task's
+diff --git a/Makefile b/Makefile
+index a20afcb7d2bf4..d53577db10858 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 12
+-SUBLEVEL = 8
++SUBLEVEL = 9
+ EXTRAVERSION =
+ NAME = Frozen Wasteland
+ 
+diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
+index a7ab84f781f73..a8578d650bb67 100644
+--- a/arch/arm64/include/asm/kvm_asm.h
++++ b/arch/arm64/include/asm/kvm_asm.h
+@@ -192,6 +192,8 @@ extern void __kvm_timer_set_cntvoff(u64 cntvoff);
+ 
+ extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
+ 
++extern void __kvm_adjust_pc(struct kvm_vcpu *vcpu);
++
+ extern u64 __vgic_v3_get_gic_config(void);
+ extern u64 __vgic_v3_read_vmcr(void);
+ extern void __vgic_v3_write_vmcr(u32 vmcr);
+diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
+index f612c090f2e41..01b9857757f2a 100644
+--- a/arch/arm64/include/asm/kvm_emulate.h
++++ b/arch/arm64/include/asm/kvm_emulate.h
+@@ -463,4 +463,9 @@ static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
+ 	vcpu->arch.flags |= KVM_ARM64_INCREMENT_PC;
+ }
+ 
++static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
++{
++	return test_bit(feature, vcpu->arch.features);
++}
++
+ #endif /* __ARM64_KVM_EMULATE_H__ */
+diff --git a/arch/arm64/kvm/hyp/exception.c b/arch/arm64/kvm/hyp/exception.c
+index 73629094f9030..0812a496725f6 100644
+--- a/arch/arm64/kvm/hyp/exception.c
++++ b/arch/arm64/kvm/hyp/exception.c
+@@ -296,7 +296,7 @@ static void enter_exception32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
+ 	*vcpu_pc(vcpu) = vect_offset;
+ }
+ 
+-void kvm_inject_exception(struct kvm_vcpu *vcpu)
++static void kvm_inject_exception(struct kvm_vcpu *vcpu)
+ {
+ 	if (vcpu_el1_is_32bit(vcpu)) {
+ 		switch (vcpu->arch.flags & KVM_ARM64_EXCEPT_MASK) {
+@@ -329,3 +329,19 @@ void kvm_inject_exception(struct kvm_vcpu *vcpu)
+ 		}
+ 	}
+ }
++
++/*
++ * Adjust the guest PC on entry, depending on flags provided by EL1
++ * for the purpose of emulation (MMIO, sysreg) or exception injection.
++ */
++void __kvm_adjust_pc(struct kvm_vcpu *vcpu)
++{
++	if (vcpu->arch.flags & KVM_ARM64_PENDING_EXCEPTION) {
++		kvm_inject_exception(vcpu);
++		vcpu->arch.flags &= ~(KVM_ARM64_PENDING_EXCEPTION |
++				      KVM_ARM64_EXCEPT_MASK);
++	} else 	if (vcpu->arch.flags & KVM_ARM64_INCREMENT_PC) {
++		kvm_skip_instr(vcpu);
++		vcpu->arch.flags &= ~KVM_ARM64_INCREMENT_PC;
++	}
++}
+diff --git a/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h b/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
+index 61716359035d6..4fdfeabefeb43 100644
+--- a/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
++++ b/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
+@@ -13,8 +13,6 @@
+ #include <asm/kvm_emulate.h>
+ #include <asm/kvm_host.h>
+ 
+-void kvm_inject_exception(struct kvm_vcpu *vcpu);
+-
+ static inline void kvm_skip_instr(struct kvm_vcpu *vcpu)
+ {
+ 	if (vcpu_mode_is_32bit(vcpu)) {
+@@ -43,22 +41,6 @@ static inline void __kvm_skip_instr(struct kvm_vcpu *vcpu)
+ 	write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR);
+ }
+ 
+-/*
+- * Adjust the guest PC on entry, depending on flags provided by EL1
+- * for the purpose of emulation (MMIO, sysreg) or exception injection.
+- */
+-static inline void __adjust_pc(struct kvm_vcpu *vcpu)
+-{
+-	if (vcpu->arch.flags & KVM_ARM64_PENDING_EXCEPTION) {
+-		kvm_inject_exception(vcpu);
+-		vcpu->arch.flags &= ~(KVM_ARM64_PENDING_EXCEPTION |
+-				      KVM_ARM64_EXCEPT_MASK);
+-	} else 	if (vcpu->arch.flags & KVM_ARM64_INCREMENT_PC) {
+-		kvm_skip_instr(vcpu);
+-		vcpu->arch.flags &= ~KVM_ARM64_INCREMENT_PC;
+-	}
+-}
+-
+ /*
+  * Skip an instruction while host sysregs are live.
+  * Assumes host is always 64-bit.
+diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
+index 68ab6b4d51414..a343ef11736f9 100644
+--- a/arch/arm64/kvm/hyp/nvhe/switch.c
++++ b/arch/arm64/kvm/hyp/nvhe/switch.c
+@@ -4,7 +4,6 @@
+  * Author: Marc Zyngier <marc.zyngier@arm.com>
+  */
+ 
+-#include <hyp/adjust_pc.h>
+ #include <hyp/switch.h>
+ #include <hyp/sysreg-sr.h>
+ 
+@@ -201,7 +200,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
+ 	 */
+ 	__debug_save_host_buffers_nvhe(vcpu);
+ 
+-	__adjust_pc(vcpu);
++	__kvm_adjust_pc(vcpu);
+ 
+ 	/*
+ 	 * We must restore the 32-bit state before the sysregs, thanks
+diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
+index af8e940d0f035..ae65f6b5984e8 100644
+--- a/arch/arm64/kvm/hyp/vhe/switch.c
++++ b/arch/arm64/kvm/hyp/vhe/switch.c
+@@ -4,7 +4,6 @@
+  * Author: Marc Zyngier <marc.zyngier@arm.com>
+  */
+ 
+-#include <hyp/adjust_pc.h>
+ #include <hyp/switch.h>
+ 
+ #include <linux/arm-smccc.h>
+@@ -134,7 +133,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
+ 	__load_guest_stage2(vcpu->arch.hw_mmu);
+ 	__activate_traps(vcpu);
+ 
+-	__adjust_pc(vcpu);
++	__kvm_adjust_pc(vcpu);
+ 
+ 	sysreg_restore_guest_state_vhe(guest_ctxt);
+ 	__debug_switch_to_guest(vcpu);
+diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
+index 4b5acd84b8c87..dd207565cd998 100644
+--- a/arch/arm64/kvm/reset.c
++++ b/arch/arm64/kvm/reset.c
+@@ -170,6 +170,25 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
+ 	return 0;
+ }
+ 
++static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu)
++{
++	struct kvm_vcpu *tmp;
++	bool is32bit;
++	int i;
++
++	is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
++	if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1) && is32bit)
++		return false;
++
++	/* Check that the vcpus are either all 32bit or all 64bit */
++	kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
++		if (vcpu_has_feature(tmp, KVM_ARM_VCPU_EL1_32BIT) != is32bit)
++			return false;
++	}
++
++	return true;
++}
++
+ /**
+  * kvm_reset_vcpu - sets core registers and sys_regs to reset value
+  * @vcpu: The VCPU pointer
+@@ -221,13 +240,14 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
+ 		}
+ 	}
+ 
++	if (!vcpu_allowed_register_width(vcpu)) {
++		ret = -EINVAL;
++		goto out;
++	}
++
+ 	switch (vcpu->arch.target) {
+ 	default:
+ 		if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
+-			if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1)) {
+-				ret = -EINVAL;
+-				goto out;
+-			}
+ 			pstate = VCPU_RESET_PSTATE_SVC;
+ 		} else {
+ 			pstate = VCPU_RESET_PSTATE_EL1;
+diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
+index 4f2f1e3145deb..cdd073d8ea324 100644
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -399,14 +399,14 @@ static bool trap_bvr(struct kvm_vcpu *vcpu,
+ 		     struct sys_reg_params *p,
+ 		     const struct sys_reg_desc *rd)
+ {
+-	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
++	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
+ 
+ 	if (p->is_write)
+ 		reg_to_dbg(vcpu, p, rd, dbg_reg);
+ 	else
+ 		dbg_to_reg(vcpu, p, rd, dbg_reg);
+ 
+-	trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
++	trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
+ 
+ 	return true;
+ }
+@@ -414,7 +414,7 @@ static bool trap_bvr(struct kvm_vcpu *vcpu,
+ static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ 		const struct kvm_one_reg *reg, void __user *uaddr)
+ {
+-	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
++	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
+ 
+ 	if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
+ 		return -EFAULT;
+@@ -424,7 +424,7 @@ static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ 	const struct kvm_one_reg *reg, void __user *uaddr)
+ {
+-	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
++	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
+ 
+ 	if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
+ 		return -EFAULT;
+@@ -434,21 +434,21 @@ static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ static void reset_bvr(struct kvm_vcpu *vcpu,
+ 		      const struct sys_reg_desc *rd)
+ {
+-	vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val;
++	vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = rd->val;
+ }
+ 
+ static bool trap_bcr(struct kvm_vcpu *vcpu,
+ 		     struct sys_reg_params *p,
+ 		     const struct sys_reg_desc *rd)
+ {
+-	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
++	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
+ 
+ 	if (p->is_write)
+ 		reg_to_dbg(vcpu, p, rd, dbg_reg);
+ 	else
+ 		dbg_to_reg(vcpu, p, rd, dbg_reg);
+ 
+-	trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
++	trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
+ 
+ 	return true;
+ }
+@@ -456,7 +456,7 @@ static bool trap_bcr(struct kvm_vcpu *vcpu,
+ static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ 		const struct kvm_one_reg *reg, void __user *uaddr)
+ {
+-	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
++	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
+ 
+ 	if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
+ 		return -EFAULT;
+@@ -467,7 +467,7 @@ static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ 	const struct kvm_one_reg *reg, void __user *uaddr)
+ {
+-	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
++	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
+ 
+ 	if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
+ 		return -EFAULT;
+@@ -477,22 +477,22 @@ static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ static void reset_bcr(struct kvm_vcpu *vcpu,
+ 		      const struct sys_reg_desc *rd)
+ {
+-	vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val;
++	vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = rd->val;
+ }
+ 
+ static bool trap_wvr(struct kvm_vcpu *vcpu,
+ 		     struct sys_reg_params *p,
+ 		     const struct sys_reg_desc *rd)
+ {
+-	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
++	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
+ 
+ 	if (p->is_write)
+ 		reg_to_dbg(vcpu, p, rd, dbg_reg);
+ 	else
+ 		dbg_to_reg(vcpu, p, rd, dbg_reg);
+ 
+-	trace_trap_reg(__func__, rd->reg, p->is_write,
+-		vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]);
++	trace_trap_reg(__func__, rd->CRm, p->is_write,
++		vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]);
+ 
+ 	return true;
+ }
+@@ -500,7 +500,7 @@ static bool trap_wvr(struct kvm_vcpu *vcpu,
+ static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ 		const struct kvm_one_reg *reg, void __user *uaddr)
+ {
+-	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
++	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
+ 
+ 	if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
+ 		return -EFAULT;
+@@ -510,7 +510,7 @@ static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ 	const struct kvm_one_reg *reg, void __user *uaddr)
+ {
+-	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
++	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
+ 
+ 	if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
+ 		return -EFAULT;
+@@ -520,21 +520,21 @@ static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ static void reset_wvr(struct kvm_vcpu *vcpu,
+ 		      const struct sys_reg_desc *rd)
+ {
+-	vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val;
++	vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = rd->val;
+ }
+ 
+ static bool trap_wcr(struct kvm_vcpu *vcpu,
+ 		     struct sys_reg_params *p,
+ 		     const struct sys_reg_desc *rd)
+ {
+-	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
++	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
+ 
+ 	if (p->is_write)
+ 		reg_to_dbg(vcpu, p, rd, dbg_reg);
+ 	else
+ 		dbg_to_reg(vcpu, p, rd, dbg_reg);
+ 
+-	trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
++	trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
+ 
+ 	return true;
+ }
+@@ -542,7 +542,7 @@ static bool trap_wcr(struct kvm_vcpu *vcpu,
+ static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ 		const struct kvm_one_reg *reg, void __user *uaddr)
+ {
+-	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
++	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
+ 
+ 	if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
+ 		return -EFAULT;
+@@ -552,7 +552,7 @@ static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ 	const struct kvm_one_reg *reg, void __user *uaddr)
+ {
+-	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
++	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
+ 
+ 	if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
+ 		return -EFAULT;
+@@ -562,7 +562,7 @@ static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ static void reset_wcr(struct kvm_vcpu *vcpu,
+ 		      const struct sys_reg_desc *rd)
+ {
+-	vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val;
++	vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = rd->val;
+ }
+ 
+ static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
+diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
+index 5d9550fdb9cf8..bd4739baa368d 100644
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -492,7 +492,8 @@ static void __init map_mem(pgd_t *pgdp)
+ 	int flags = 0;
+ 	u64 i;
+ 
+-	if (rodata_full || crash_mem_map || debug_pagealloc_enabled())
++	if (rodata_full || crash_mem_map || debug_pagealloc_enabled() ||
++	    IS_ENABLED(CONFIG_KFENCE))
+ 		flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
+ 
+ 	/*
+diff --git a/arch/mips/alchemy/board-xxs1500.c b/arch/mips/alchemy/board-xxs1500.c
+index b184baa4e56a6..f175bce2987fa 100644
+--- a/arch/mips/alchemy/board-xxs1500.c
++++ b/arch/mips/alchemy/board-xxs1500.c
+@@ -18,6 +18,7 @@
+ #include <asm/reboot.h>
+ #include <asm/setup.h>
+ #include <asm/mach-au1x00/au1000.h>
++#include <asm/mach-au1x00/gpio-au1000.h>
+ #include <prom.h>
+ 
+ const char *get_system_type(void)
+diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c
+index 8286c3521476f..3c3d07cd80660 100644
+--- a/arch/mips/ralink/of.c
++++ b/arch/mips/ralink/of.c
+@@ -8,6 +8,7 @@
+ 
+ #include <linux/io.h>
+ #include <linux/clk.h>
++#include <linux/export.h>
+ #include <linux/init.h>
+ #include <linux/sizes.h>
+ #include <linux/of_fdt.h>
+@@ -25,6 +26,7 @@
+ 
+ __iomem void *rt_sysc_membase;
+ __iomem void *rt_memc_membase;
++EXPORT_SYMBOL_GPL(rt_sysc_membase);
+ 
+ __iomem void *plat_of_remap_node(const char *node)
+ {
+diff --git a/arch/openrisc/include/asm/barrier.h b/arch/openrisc/include/asm/barrier.h
+new file mode 100644
+index 0000000000000..7538294721bed
+--- /dev/null
++++ b/arch/openrisc/include/asm/barrier.h
+@@ -0,0 +1,9 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef __ASM_BARRIER_H
++#define __ASM_BARRIER_H
++
++#define mb() asm volatile ("l.msync" ::: "memory")
++
++#include <asm-generic/barrier.h>
++
++#endif /* __ASM_BARRIER_H */
+diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
+index 2b3e0cb90d789..bde85fc53357f 100644
+--- a/arch/riscv/kernel/stacktrace.c
++++ b/arch/riscv/kernel/stacktrace.c
+@@ -27,10 +27,10 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
+ 		fp = frame_pointer(regs);
+ 		sp = user_stack_pointer(regs);
+ 		pc = instruction_pointer(regs);
+-	} else if (task == NULL || task == current) {
+-		fp = (unsigned long)__builtin_frame_address(0);
+-		sp = sp_in_global;
+-		pc = (unsigned long)walk_stackframe;
++	} else if (task == current) {
++		fp = (unsigned long)__builtin_frame_address(1);
++		sp = (unsigned long)__builtin_frame_address(0);
++		pc = (unsigned long)__builtin_return_address(0);
+ 	} else {
+ 		/* task blocked in __switch_to */
+ 		fp = task->thread.s[0];
+@@ -106,15 +106,15 @@ static bool print_trace_address(void *arg, unsigned long pc)
+ 	return true;
+ }
+ 
+-void dump_backtrace(struct pt_regs *regs, struct task_struct *task,
++noinline void dump_backtrace(struct pt_regs *regs, struct task_struct *task,
+ 		    const char *loglvl)
+ {
+-	pr_cont("%sCall Trace:\n", loglvl);
+ 	walk_stackframe(task, regs, print_trace_address, (void *)loglvl);
+ }
+ 
+ void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
+ {
++	pr_cont("%sCall Trace:\n", loglvl);
+ 	dump_backtrace(NULL, task, loglvl);
+ }
+ 
+@@ -139,7 +139,7 @@ unsigned long get_wchan(struct task_struct *task)
+ 
+ #ifdef CONFIG_STACKTRACE
+ 
+-void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
++noinline void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
+ 		     struct task_struct *task, struct pt_regs *regs)
+ {
+ 	walk_stackframe(task, regs, consume_entry, cookie);
+diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
+index f98370a399361..f00830e5202fe 100644
+--- a/arch/x86/kvm/hyperv.c
++++ b/arch/x86/kvm/hyperv.c
+@@ -1172,6 +1172,7 @@ void kvm_hv_invalidate_tsc_page(struct kvm *kvm)
+ {
+ 	struct kvm_hv *hv = to_kvm_hv(kvm);
+ 	u64 gfn;
++	int idx;
+ 
+ 	if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
+ 	    hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET ||
+@@ -1190,9 +1191,16 @@ void kvm_hv_invalidate_tsc_page(struct kvm *kvm)
+ 	gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
+ 
+ 	hv->tsc_ref.tsc_sequence = 0;
++
++	/*
++	 * Take the srcu lock as memslots will be accessed to check the gfn
++	 * cache generation against the memslots generation.
++	 */
++	idx = srcu_read_lock(&kvm->srcu);
+ 	if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
+ 			    &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
+ 		hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;
++	srcu_read_unlock(&kvm->srcu, idx);
+ 
+ out_unlock:
+ 	mutex_unlock(&hv->hv_lock);
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 86678f8b35020..822c23c3752c5 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -3015,6 +3015,8 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
+ 				       st->preempted & KVM_VCPU_FLUSH_TLB);
+ 		if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB)
+ 			kvm_vcpu_flush_tlb_guest(vcpu);
++	} else {
++		st->preempted = 0;
+ 	}
+ 
+ 	vcpu->arch.st.preempted = 0;
+@@ -7113,6 +7115,11 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
+ 	BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK);
+ 	BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK);
+ 
++	ctxt->interruptibility = 0;
++	ctxt->have_exception = false;
++	ctxt->exception.vector = -1;
++	ctxt->perm_ok = false;
++
+ 	init_decode_cache(ctxt);
+ 	vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
+ }
+@@ -7448,11 +7455,6 @@ int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
+ 	    kvm_vcpu_check_breakpoint(vcpu, &r))
+ 		return r;
+ 
+-	ctxt->interruptibility = 0;
+-	ctxt->have_exception = false;
+-	ctxt->exception.vector = -1;
+-	ctxt->perm_ok = false;
+-
+ 	ctxt->ud = emulation_type & EMULTYPE_TRAP_UD;
+ 
+ 	r = x86_decode_insn(ctxt, insn, insn_len);
+diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
+index 39359ce0eb2c0..645e82a66bb07 100644
+--- a/drivers/acpi/acpi_apd.c
++++ b/drivers/acpi/acpi_apd.c
+@@ -226,6 +226,7 @@ static const struct acpi_device_id acpi_apd_device_ids[] = {
+ 	{ "AMDI0010", APD_ADDR(wt_i2c_desc) },
+ 	{ "AMD0020", APD_ADDR(cz_uart_desc) },
+ 	{ "AMDI0020", APD_ADDR(cz_uart_desc) },
++	{ "AMDI0022", APD_ADDR(cz_uart_desc) },
+ 	{ "AMD0030", },
+ 	{ "AMD0040", APD_ADDR(fch_misc_desc)},
+ 	{ "HYGO0010", APD_ADDR(wt_i2c_desc) },
+diff --git a/drivers/base/core.c b/drivers/base/core.c
+index f29839382f816..dd77e566f2aa0 100644
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -192,6 +192,11 @@ int device_links_read_lock_held(void)
+ {
+ 	return srcu_read_lock_held(&device_links_srcu);
+ }
++
++static void device_link_synchronize_removal(void)
++{
++	synchronize_srcu(&device_links_srcu);
++}
+ #else /* !CONFIG_SRCU */
+ static DECLARE_RWSEM(device_links_lock);
+ 
+@@ -222,6 +227,10 @@ int device_links_read_lock_held(void)
+ 	return lockdep_is_held(&device_links_lock);
+ }
+ #endif
++
++static inline void device_link_synchronize_removal(void)
++{
++}
+ #endif /* !CONFIG_SRCU */
+ 
+ static bool device_is_ancestor(struct device *dev, struct device *target)
+@@ -443,8 +452,13 @@ static struct attribute *devlink_attrs[] = {
+ };
+ ATTRIBUTE_GROUPS(devlink);
+ 
+-static void device_link_free(struct device_link *link)
++static void device_link_release_fn(struct work_struct *work)
+ {
++	struct device_link *link = container_of(work, struct device_link, rm_work);
++
++	/* Ensure that all references to the link object have been dropped. */
++	device_link_synchronize_removal();
++
+ 	while (refcount_dec_not_one(&link->rpm_active))
+ 		pm_runtime_put(link->supplier);
+ 
+@@ -453,24 +467,19 @@ static void device_link_free(struct device_link *link)
+ 	kfree(link);
+ }
+ 
+-#ifdef CONFIG_SRCU
+-static void __device_link_free_srcu(struct rcu_head *rhead)
+-{
+-	device_link_free(container_of(rhead, struct device_link, rcu_head));
+-}
+-
+ static void devlink_dev_release(struct device *dev)
+ {
+ 	struct device_link *link = to_devlink(dev);
+ 
+-	call_srcu(&device_links_srcu, &link->rcu_head, __device_link_free_srcu);
+-}
+-#else
+-static void devlink_dev_release(struct device *dev)
+-{
+-	device_link_free(to_devlink(dev));
++	INIT_WORK(&link->rm_work, device_link_release_fn);
++	/*
++	 * It may take a while to complete this work because of the SRCU
++	 * synchronization in device_link_release_fn() and if the consumer or
++	 * supplier devices get deleted when it runs, so put it into the "long"
++	 * workqueue.
++	 */
++	queue_work(system_long_wq, &link->rm_work);
+ }
+-#endif
+ 
+ static struct class devlink_class = {
+ 	.name = "devlink",
+diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
+index ed3b7dab678db..8b55085650ad0 100644
+--- a/drivers/char/hpet.c
++++ b/drivers/char/hpet.c
+@@ -984,6 +984,8 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
+ 		hdp->hd_phys_address = fixmem32->address;
+ 		hdp->hd_address = ioremap(fixmem32->address,
+ 						HPET_RANGE_SIZE);
++		if (!hdp->hd_address)
++			return AE_ERROR;
+ 
+ 		if (hpet_is_known(hdp)) {
+ 			iounmap(hdp->hd_address);
+diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
+index facc8e6bc5801..d385daf2c71c3 100644
+--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
++++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
+@@ -442,7 +442,6 @@ static int nitrox_probe(struct pci_dev *pdev,
+ 	err = pci_request_mem_regions(pdev, nitrox_driver_name);
+ 	if (err) {
+ 		pci_disable_device(pdev);
+-		dev_err(&pdev->dev, "Failed to request mem regions!\n");
+ 		return err;
+ 	}
+ 	pci_set_master(pdev);
+diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c
+index 806ca02c52d71..62026607f3f8b 100644
+--- a/drivers/dma/qcom/hidma_mgmt.c
++++ b/drivers/dma/qcom/hidma_mgmt.c
+@@ -418,8 +418,23 @@ static int __init hidma_mgmt_init(void)
+ 		hidma_mgmt_of_populate_channels(child);
+ 	}
+ #endif
+-	return platform_driver_register(&hidma_mgmt_driver);
++	/*
++	 * We do not check for return value here, as it is assumed that
++	 * platform_driver_register must not fail. The reason for this is that
++	 * the (potential) hidma_mgmt_of_populate_channels calls above are not
++	 * cleaned up if it does fail, and to do this work is quite
++	 * complicated. In particular, various calls of of_address_to_resource,
++	 * of_irq_to_resource, platform_device_register_full, of_dma_configure,
++	 * and of_msi_configure which then call other functions and so on, must
++	 * be cleaned up - this is not a trivial exercise.
++	 *
++	 * Currently, this module is not intended to be unloaded, and there is
++	 * no module_exit function defined which does the needed cleanup. For
++	 * this reason, we have to assume success here.
++	 */
++	platform_driver_register(&hidma_mgmt_driver);
+ 
++	return 0;
+ }
+ module_init(hidma_mgmt_init);
+ MODULE_LICENSE("GPL v2");
+diff --git a/drivers/gpio/gpio-cadence.c b/drivers/gpio/gpio-cadence.c
+index a4d3239d25944..4ab3fcd9b9ba6 100644
+--- a/drivers/gpio/gpio-cadence.c
++++ b/drivers/gpio/gpio-cadence.c
+@@ -278,6 +278,7 @@ static const struct of_device_id cdns_of_ids[] = {
+ 	{ .compatible = "cdns,gpio-r1p02" },
+ 	{ /* sentinel */ },
+ };
++MODULE_DEVICE_TABLE(of, cdns_of_ids);
+ 
+ static struct platform_driver cdns_gpio_driver = {
+ 	.driver = {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
+index fad3b91f74f54..d39cff4a1fe38 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
+@@ -156,16 +156,16 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
+ 				mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
+ 		break;
+ 	case 1:
+-		sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA1, 0,
++		sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
+ 				mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
+ 		break;
+ 	case 2:
+-		sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA2, 0,
+-				mmSDMA2_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL;
++		sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
++				mmSDMA2_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
+ 		break;
+ 	case 3:
+-		sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA3, 0,
+-				mmSDMA3_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL;
++		sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
++				mmSDMA3_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
+ 		break;
+ 	}
+ 
+@@ -450,7 +450,7 @@ static int hqd_sdma_dump_v10_3(struct kgd_dev *kgd,
+ 			engine_id, queue_id);
+ 	uint32_t i = 0, reg;
+ #undef HQD_N_REGS
+-#define HQD_N_REGS (19+6+7+10)
++#define HQD_N_REGS (19+6+7+12)
+ 
+ 	*dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ 	if (*dump == NULL)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 5eee251e3335b..85d90e857693a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -4356,7 +4356,6 @@ out:
+ 			r = amdgpu_ib_ring_tests(tmp_adev);
+ 			if (r) {
+ 				dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
+-				r = amdgpu_device_ip_suspend(tmp_adev);
+ 				need_full_reset = true;
+ 				r = -EAGAIN;
+ 				goto end;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+index 24010cacf7d0e..813b96e233ba8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+@@ -290,10 +290,13 @@ out:
+ static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev)
+ {
+ 	struct amdgpu_framebuffer *rfb = &rfbdev->rfb;
++	int i;
+ 
+ 	drm_fb_helper_unregister_fbi(&rfbdev->helper);
+ 
+ 	if (rfb->base.obj[0]) {
++		for (i = 0; i < rfb->base.format->num_planes; i++)
++			drm_gem_object_put(rfb->base.obj[0]);
+ 		amdgpufb_destroy_pinned_object(rfb->base.obj[0]);
+ 		rfb->base.obj[0] = NULL;
+ 		drm_framebuffer_unregister_private(&rfb->base);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index 6b14626c148ee..13ac2f1dcc2c7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -1287,6 +1287,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_bo_device *bdev,
+ 	if (gtt && gtt->userptr) {
+ 		amdgpu_ttm_tt_set_user_pages(ttm, NULL);
+ 		kfree(ttm->sg);
++		ttm->sg = NULL;
+ 		ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
+ 		return;
+ 	}
+diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+index 3b22953aa62e1..e1d3877ee3013 100644
+--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+@@ -172,6 +172,8 @@ static int jpeg_v2_0_hw_fini(void *handle)
+ {
+ 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ 
++	cancel_delayed_work_sync(&adev->vcn.idle_work);
++
+ 	if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
+ 	      RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
+ 		jpeg_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+index c6724a0e0c436..dc947c8ffe213 100644
+--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
++++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+@@ -198,8 +198,6 @@ static int jpeg_v2_5_hw_fini(void *handle)
+ 		if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
+ 		      RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS))
+ 			jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
+-
+-		ring->sched.ready = false;
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
+index e8fbb2a0de340..1d354245678d5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
+@@ -166,8 +166,6 @@ static int jpeg_v3_0_hw_fini(void *handle)
+ 	      RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
+ 		jpeg_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ 
+-	ring->sched.ready = false;
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+index 690a5090475a5..32c6aa03d2673 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+@@ -470,11 +470,6 @@ static void sdma_v5_2_gfx_stop(struct amdgpu_device *adev)
+ 		ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
+ 		WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
+ 	}
+-
+-	sdma0->sched.ready = false;
+-	sdma1->sched.ready = false;
+-	sdma2->sched.ready = false;
+-	sdma3->sched.ready = false;
+ }
+ 
+ /**
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index 6117931fa8d79..dd5eb61a5c906 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -231,9 +231,13 @@ static int vcn_v1_0_hw_fini(void *handle)
+ {
+ 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ 
++	cancel_delayed_work_sync(&adev->vcn.idle_work);
++
+ 	if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
+-		RREG32_SOC15(VCN, 0, mmUVD_STATUS))
++		(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
++		 RREG32_SOC15(VCN, 0, mmUVD_STATUS))) {
+ 		vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+index d63198c945bf0..0a84f0b5a363c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+@@ -262,6 +262,8 @@ static int vcn_v2_0_hw_fini(void *handle)
+ {
+ 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ 
++	cancel_delayed_work_sync(&adev->vcn.idle_work);
++
+ 	if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
+ 	    (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
+ 	      RREG32_SOC15(VCN, 0, mmUVD_STATUS)))
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+index b6e0f4ba6272c..f09d37b90da92 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+@@ -321,6 +321,8 @@ static int vcn_v2_5_hw_fini(void *handle)
+ 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ 	int i;
+ 
++	cancel_delayed_work_sync(&adev->vcn.idle_work);
++
+ 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ 		if (adev->vcn.harvest_config & (1 << i))
+ 			continue;
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+index 9b844e9fb16ff..ebbc04ff5da06 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+@@ -368,7 +368,7 @@ static int vcn_v3_0_hw_fini(void *handle)
+ {
+ 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ 	struct amdgpu_ring *ring;
+-	int i, j;
++	int i;
+ 
+ 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ 		if (adev->vcn.harvest_config & (1 << i))
+@@ -383,12 +383,6 @@ static int vcn_v3_0_hw_fini(void *handle)
+ 				vcn_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ 			}
+ 		}
+-		ring->sched.ready = false;
+-
+-		for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
+-			ring = &adev->vcn.inst[i].ring_enc[j];
+-			ring->sched.ready = false;
+-		}
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index 440bf0a0e12ae..204928479c958 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -1067,6 +1067,24 @@ static bool dc_link_detect_helper(struct dc_link *link,
+ 			    dc_is_dvi_signal(link->connector_signal)) {
+ 				if (prev_sink)
+ 					dc_sink_release(prev_sink);
++				link_disconnect_sink(link);
++
++				return false;
++			}
++			/*
++			 * Abort detection for DP connectors if we have
++			 * no EDID and connector is active converter
++			 * as there are no display downstream
++			 *
++			 */
++			if (dc_is_dp_sst_signal(link->connector_signal) &&
++				(link->dpcd_caps.dongle_type ==
++						DISPLAY_DONGLE_DP_VGA_CONVERTER ||
++				link->dpcd_caps.dongle_type ==
++						DISPLAY_DONGLE_DP_DVI_CONVERTER)) {
++				if (prev_sink)
++					dc_sink_release(prev_sink);
++				link_disconnect_sink(link);
+ 
+ 				return false;
+ 			}
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+index fbff3df72e6c6..85cd69bbdec19 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+@@ -2366,6 +2366,8 @@ static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
+ 
+ static int navi10_enable_mgpu_fan_boost(struct smu_context *smu)
+ {
++	struct smu_table_context *table_context = &smu->smu_table;
++	PPTable_t *smc_pptable = table_context->driver_pptable;
+ 	struct amdgpu_device *adev = smu->adev;
+ 	uint32_t param = 0;
+ 
+@@ -2373,6 +2375,13 @@ static int navi10_enable_mgpu_fan_boost(struct smu_context *smu)
+ 	if (adev->asic_type == CHIP_NAVI12)
+ 		return 0;
+ 
++	/*
++	 * Skip the MGpuFanBoost setting for those ASICs
++	 * which do not support it
++	 */
++	if (!smc_pptable->MGpuFanBoostLimitRpm)
++		return 0;
++
+ 	/* Workaround for WS SKU */
+ 	if (adev->pdev->device == 0x7312 &&
+ 	    adev->pdev->revision == 0)
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+index 61438940c26e8..e4761bc161129 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+@@ -3015,6 +3015,16 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
+ 
+ static int sienna_cichlid_enable_mgpu_fan_boost(struct smu_context *smu)
+ {
++	struct smu_table_context *table_context = &smu->smu_table;
++	PPTable_t *smc_pptable = table_context->driver_pptable;
++
++	/*
++	 * Skip the MGpuFanBoost setting for those ASICs
++	 * which do not support it
++	 */
++	if (!smc_pptable->MGpuFanBoostLimitRpm)
++		return 0;
++
+ 	return smu_cmn_send_smc_msg_with_param(smu,
+ 					       SMU_MSG_SetMGpuFanBoostLimitRpm,
+ 					       0,
+diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+index 2ed309534e97a..4a74502977845 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
++++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+@@ -127,49 +127,13 @@ intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable)
+ 	return drm_dp_dpcd_write(&intel_dp->aux, DP_PHY_REPEATER_MODE, &val, 1) == 1;
+ }
+ 
+-/**
+- * intel_dp_init_lttpr_and_dprx_caps - detect LTTPR and DPRX caps, init the LTTPR link training mode
+- * @intel_dp: Intel DP struct
+- *
+- * Read the LTTPR common and DPRX capabilities and switch to non-transparent
+- * link training mode if any is detected and read the PHY capabilities for all
+- * detected LTTPRs. In case of an LTTPR detection error or if the number of
+- * LTTPRs is more than is supported (8), fall back to the no-LTTPR,
+- * transparent mode link training mode.
+- *
+- * Returns:
+- *   >0  if LTTPRs were detected and the non-transparent LT mode was set. The
+- *       DPRX capabilities are read out.
+- *    0  if no LTTPRs or more than 8 LTTPRs were detected or in case of a
+- *       detection failure and the transparent LT mode was set. The DPRX
+- *       capabilities are read out.
+- *   <0  Reading out the DPRX capabilities failed.
+- */
+-int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
++static int intel_dp_init_lttpr(struct intel_dp *intel_dp)
+ {
+ 	int lttpr_count;
+-	bool ret;
+ 	int i;
+ 
+-	ret = intel_dp_read_lttpr_common_caps(intel_dp);
+-
+-	/* The DPTX shall read the DPRX caps after LTTPR detection. */
+-	if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) {
+-		intel_dp_reset_lttpr_common_caps(intel_dp);
+-		return -EIO;
+-	}
+-
+-	if (!ret)
+-		return 0;
+-
+-	/*
+-	 * The 0xF0000-0xF02FF range is only valid if the DPCD revision is
+-	 * at least 1.4.
+-	 */
+-	if (intel_dp->dpcd[DP_DPCD_REV] < 0x14) {
+-		intel_dp_reset_lttpr_common_caps(intel_dp);
++	if (!intel_dp_read_lttpr_common_caps(intel_dp))
+ 		return 0;
+-	}
+ 
+ 	lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
+ 	/*
+@@ -210,6 +174,37 @@ int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
+ 
+ 	return lttpr_count;
+ }
++
++/**
++ * intel_dp_init_lttpr_and_dprx_caps - detect LTTPR and DPRX caps, init the LTTPR link training mode
++ * @intel_dp: Intel DP struct
++ *
++ * Read the LTTPR common and DPRX capabilities and switch to non-transparent
++ * link training mode if any is detected and read the PHY capabilities for all
++ * detected LTTPRs. In case of an LTTPR detection error or if the number of
++ * LTTPRs is more than is supported (8), fall back to the no-LTTPR,
++ * transparent mode link training mode.
++ *
++ * Returns:
++ *   >0  if LTTPRs were detected and the non-transparent LT mode was set. The
++ *       DPRX capabilities are read out.
++ *    0  if no LTTPRs or more than 8 LTTPRs were detected or in case of a
++ *       detection failure and the transparent LT mode was set. The DPRX
++ *       capabilities are read out.
++ *   <0  Reading out the DPRX capabilities failed.
++ */
++int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
++{
++	int lttpr_count = intel_dp_init_lttpr(intel_dp);
++
++	/* The DPTX shall read the DPRX caps after LTTPR detection. */
++	if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) {
++		intel_dp_reset_lttpr_common_caps(intel_dp);
++		return -EIO;
++	}
++
++	return lttpr_count;
++}
+ EXPORT_SYMBOL(intel_dp_init_lttpr_and_dprx_caps);
+ 
+ static u8 dp_voltage_max(u8 preemph)
+diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
+index 453d8b4c5763d..07fcd12dca160 100644
+--- a/drivers/gpu/drm/meson/meson_drv.c
++++ b/drivers/gpu/drm/meson/meson_drv.c
+@@ -485,11 +485,12 @@ static int meson_probe_remote(struct platform_device *pdev,
+ static void meson_drv_shutdown(struct platform_device *pdev)
+ {
+ 	struct meson_drm *priv = dev_get_drvdata(&pdev->dev);
+-	struct drm_device *drm = priv->drm;
+ 
+-	DRM_DEBUG_DRIVER("\n");
+-	drm_kms_helper_poll_fini(drm);
+-	drm_atomic_helper_shutdown(drm);
++	if (!priv)
++		return;
++
++	drm_kms_helper_poll_fini(priv->drm);
++	drm_atomic_helper_shutdown(priv->drm);
+ }
+ 
+ static int meson_drv_probe(struct platform_device *pdev)
+diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
+index 99d446763530e..f9e1c2ceaac05 100644
+--- a/drivers/i2c/busses/i2c-i801.c
++++ b/drivers/i2c/busses/i2c-i801.c
+@@ -395,11 +395,9 @@ static int i801_check_post(struct i801_priv *priv, int status)
+ 		dev_err(&priv->pci_dev->dev, "Transaction timeout\n");
+ 		/* try to stop the current command */
+ 		dev_dbg(&priv->pci_dev->dev, "Terminating the current operation\n");
+-		outb_p(inb_p(SMBHSTCNT(priv)) | SMBHSTCNT_KILL,
+-		       SMBHSTCNT(priv));
++		outb_p(SMBHSTCNT_KILL, SMBHSTCNT(priv));
+ 		usleep_range(1000, 2000);
+-		outb_p(inb_p(SMBHSTCNT(priv)) & (~SMBHSTCNT_KILL),
+-		       SMBHSTCNT(priv));
++		outb_p(0, SMBHSTCNT(priv));
+ 
+ 		/* Check if it worked */
+ 		status = inb_p(SMBHSTSTS(priv));
+diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
+index bf25acba2ed53..dcde71ae63419 100644
+--- a/drivers/i2c/busses/i2c-mt65xx.c
++++ b/drivers/i2c/busses/i2c-mt65xx.c
+@@ -478,6 +478,11 @@ static void mtk_i2c_clock_disable(struct mtk_i2c *i2c)
+ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
+ {
+ 	u16 control_reg;
++	u16 intr_stat_reg;
++
++	mtk_i2c_writew(i2c, I2C_CHN_CLR_FLAG, OFFSET_START);
++	intr_stat_reg = mtk_i2c_readw(i2c, OFFSET_INTR_STAT);
++	mtk_i2c_writew(i2c, intr_stat_reg, OFFSET_INTR_STAT);
+ 
+ 	if (i2c->dev_comp->apdma_sync) {
+ 		writel(I2C_DMA_WARM_RST, i2c->pdmabase + OFFSET_RST);
+diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
+index 62a903fbe9121..7fff57889ff27 100644
+--- a/drivers/i2c/busses/i2c-s3c2410.c
++++ b/drivers/i2c/busses/i2c-s3c2410.c
+@@ -483,7 +483,10 @@ static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
+ 					 * forces us to send a new START
+ 					 * when we change direction
+ 					 */
++					dev_dbg(i2c->dev,
++						"missing START before write->read\n");
+ 					s3c24xx_i2c_stop(i2c, -EINVAL);
++					break;
+ 				}
+ 
+ 				goto retry_write;
+diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
+index 3ae6ca21a02c6..2d2e630fd4387 100644
+--- a/drivers/i2c/busses/i2c-sh_mobile.c
++++ b/drivers/i2c/busses/i2c-sh_mobile.c
+@@ -807,7 +807,7 @@ static const struct sh_mobile_dt_config r8a7740_dt_config = {
+ static const struct of_device_id sh_mobile_i2c_dt_ids[] = {
+ 	{ .compatible = "renesas,iic-r8a73a4", .data = &fast_clock_dt_config },
+ 	{ .compatible = "renesas,iic-r8a7740", .data = &r8a7740_dt_config },
+-	{ .compatible = "renesas,iic-r8a774c0", .data = &fast_clock_dt_config },
++	{ .compatible = "renesas,iic-r8a774c0", .data = &v2_freq_calc_dt_config },
+ 	{ .compatible = "renesas,iic-r8a7790", .data = &v2_freq_calc_dt_config },
+ 	{ .compatible = "renesas,iic-r8a7791", .data = &v2_freq_calc_dt_config },
+ 	{ .compatible = "renesas,iic-r8a7792", .data = &v2_freq_calc_dt_config },
+diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
+index 766c733336045..9c2401c5848ec 100644
+--- a/drivers/iio/adc/ad7124.c
++++ b/drivers/iio/adc/ad7124.c
+@@ -616,6 +616,13 @@ static int ad7124_of_parse_channel_config(struct iio_dev *indio_dev,
+ 		if (ret)
+ 			goto err;
+ 
++		if (channel >= indio_dev->num_channels) {
++			dev_err(indio_dev->dev.parent,
++				"Channel index >= number of channels\n");
++			ret = -EINVAL;
++			goto err;
++		}
++
+ 		ret = of_property_read_u32_array(child, "diff-channels",
+ 						 ain, 2);
+ 		if (ret)
+@@ -707,6 +714,11 @@ static int ad7124_setup(struct ad7124_state *st)
+ 	return ret;
+ }
+ 
++static void ad7124_reg_disable(void *r)
++{
++	regulator_disable(r);
++}
++
+ static int ad7124_probe(struct spi_device *spi)
+ {
+ 	const struct ad7124_chip_info *info;
+@@ -752,17 +764,20 @@ static int ad7124_probe(struct spi_device *spi)
+ 		ret = regulator_enable(st->vref[i]);
+ 		if (ret)
+ 			return ret;
++
++		ret = devm_add_action_or_reset(&spi->dev, ad7124_reg_disable,
++					       st->vref[i]);
++		if (ret)
++			return ret;
+ 	}
+ 
+ 	st->mclk = devm_clk_get(&spi->dev, "mclk");
+-	if (IS_ERR(st->mclk)) {
+-		ret = PTR_ERR(st->mclk);
+-		goto error_regulator_disable;
+-	}
++	if (IS_ERR(st->mclk))
++		return PTR_ERR(st->mclk);
+ 
+ 	ret = clk_prepare_enable(st->mclk);
+ 	if (ret < 0)
+-		goto error_regulator_disable;
++		return ret;
+ 
+ 	ret = ad7124_soft_reset(st);
+ 	if (ret < 0)
+@@ -792,11 +807,6 @@ error_remove_trigger:
+ 	ad_sd_cleanup_buffer_and_trigger(indio_dev);
+ error_clk_disable_unprepare:
+ 	clk_disable_unprepare(st->mclk);
+-error_regulator_disable:
+-	for (i = ARRAY_SIZE(st->vref) - 1; i >= 0; i--) {
+-		if (!IS_ERR_OR_NULL(st->vref[i]))
+-			regulator_disable(st->vref[i]);
+-	}
+ 
+ 	return ret;
+ }
+@@ -805,17 +815,11 @@ static int ad7124_remove(struct spi_device *spi)
+ {
+ 	struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ 	struct ad7124_state *st = iio_priv(indio_dev);
+-	int i;
+ 
+ 	iio_device_unregister(indio_dev);
+ 	ad_sd_cleanup_buffer_and_trigger(indio_dev);
+ 	clk_disable_unprepare(st->mclk);
+ 
+-	for (i = ARRAY_SIZE(st->vref) - 1; i >= 0; i--) {
+-		if (!IS_ERR_OR_NULL(st->vref[i]))
+-			regulator_disable(st->vref[i]);
+-	}
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c
+index 2ed580521d815..1141cc13a1249 100644
+--- a/drivers/iio/adc/ad7192.c
++++ b/drivers/iio/adc/ad7192.c
+@@ -912,7 +912,7 @@ static int ad7192_probe(struct spi_device *spi)
+ {
+ 	struct ad7192_state *st;
+ 	struct iio_dev *indio_dev;
+-	int ret, voltage_uv = 0;
++	int ret;
+ 
+ 	if (!spi->irq) {
+ 		dev_err(&spi->dev, "no IRQ?\n");
+@@ -949,15 +949,12 @@ static int ad7192_probe(struct spi_device *spi)
+ 		goto error_disable_avdd;
+ 	}
+ 
+-	voltage_uv = regulator_get_voltage(st->avdd);
+-
+-	if (voltage_uv > 0) {
+-		st->int_vref_mv = voltage_uv / 1000;
+-	} else {
+-		ret = voltage_uv;
++	ret = regulator_get_voltage(st->avdd);
++	if (ret < 0) {
+ 		dev_err(&spi->dev, "Device tree error, reference voltage undefined\n");
+ 		goto error_disable_avdd;
+ 	}
++	st->int_vref_mv = ret / 1000;
+ 
+ 	spi_set_drvdata(spi, indio_dev);
+ 	st->chip_info = of_device_get_match_data(&spi->dev);
+@@ -1014,7 +1011,9 @@ static int ad7192_probe(struct spi_device *spi)
+ 	return 0;
+ 
+ error_disable_clk:
+-	clk_disable_unprepare(st->mclk);
++	if (st->clock_sel == AD7192_CLK_EXT_MCLK1_2 ||
++	    st->clock_sel == AD7192_CLK_EXT_MCLK2)
++		clk_disable_unprepare(st->mclk);
+ error_remove_trigger:
+ 	ad_sd_cleanup_buffer_and_trigger(indio_dev);
+ error_disable_dvdd:
+@@ -1031,7 +1030,9 @@ static int ad7192_remove(struct spi_device *spi)
+ 	struct ad7192_state *st = iio_priv(indio_dev);
+ 
+ 	iio_device_unregister(indio_dev);
+-	clk_disable_unprepare(st->mclk);
++	if (st->clock_sel == AD7192_CLK_EXT_MCLK1_2 ||
++	    st->clock_sel == AD7192_CLK_EXT_MCLK2)
++		clk_disable_unprepare(st->mclk);
+ 	ad_sd_cleanup_buffer_and_trigger(indio_dev);
+ 
+ 	regulator_disable(st->dvdd);
+diff --git a/drivers/iio/adc/ad7768-1.c b/drivers/iio/adc/ad7768-1.c
+index 5c0cbee032308..293d18f0b7fe0 100644
+--- a/drivers/iio/adc/ad7768-1.c
++++ b/drivers/iio/adc/ad7768-1.c
+@@ -167,6 +167,10 @@ struct ad7768_state {
+ 	 * transfer buffers to live in their own cache lines.
+ 	 */
+ 	union {
++		struct {
++			__be32 chan;
++			s64 timestamp;
++		} scan;
+ 		__be32 d32;
+ 		u8 d8[2];
+ 	} data ____cacheline_aligned;
+@@ -469,11 +473,11 @@ static irqreturn_t ad7768_trigger_handler(int irq, void *p)
+ 
+ 	mutex_lock(&st->lock);
+ 
+-	ret = spi_read(st->spi, &st->data.d32, 3);
++	ret = spi_read(st->spi, &st->data.scan.chan, 3);
+ 	if (ret < 0)
+ 		goto err_unlock;
+ 
+-	iio_push_to_buffers_with_timestamp(indio_dev, &st->data.d32,
++	iio_push_to_buffers_with_timestamp(indio_dev, &st->data.scan,
+ 					   iio_get_time_ns(indio_dev));
+ 
+ 	iio_trigger_notify_done(indio_dev->trig);
+diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
+index 5e980a06258e6..440ef4c7be074 100644
+--- a/drivers/iio/adc/ad7793.c
++++ b/drivers/iio/adc/ad7793.c
+@@ -279,6 +279,7 @@ static int ad7793_setup(struct iio_dev *indio_dev,
+ 	id &= AD7793_ID_MASK;
+ 
+ 	if (id != st->chip_info->id) {
++		ret = -ENODEV;
+ 		dev_err(&st->sd.spi->dev, "device ID query failed\n");
+ 		goto out;
+ 	}
+diff --git a/drivers/iio/adc/ad7923.c b/drivers/iio/adc/ad7923.c
+index a2cc966580540..8c1e866f72e85 100644
+--- a/drivers/iio/adc/ad7923.c
++++ b/drivers/iio/adc/ad7923.c
+@@ -59,8 +59,10 @@ struct ad7923_state {
+ 	/*
+ 	 * DMA (thus cache coherency maintenance) requires the
+ 	 * transfer buffers to live in their own cache lines.
++	 * Ensure rx_buf can be directly used in iio_push_to_buffers_with_timetamp
++	 * Length = 8 channels + 4 extra for 8 byte timestamp
+ 	 */
+-	__be16				rx_buf[4] ____cacheline_aligned;
++	__be16				rx_buf[12] ____cacheline_aligned;
+ 	__be16				tx_buf[4];
+ };
+ 
+diff --git a/drivers/iio/dac/ad5770r.c b/drivers/iio/dac/ad5770r.c
+index 84dcf149261f9..42decba1463cc 100644
+--- a/drivers/iio/dac/ad5770r.c
++++ b/drivers/iio/dac/ad5770r.c
+@@ -524,23 +524,29 @@ static int ad5770r_channel_config(struct ad5770r_state *st)
+ 	device_for_each_child_node(&st->spi->dev, child) {
+ 		ret = fwnode_property_read_u32(child, "num", &num);
+ 		if (ret)
+-			return ret;
+-		if (num >= AD5770R_MAX_CHANNELS)
+-			return -EINVAL;
++			goto err_child_out;
++		if (num >= AD5770R_MAX_CHANNELS) {
++			ret = -EINVAL;
++			goto err_child_out;
++		}
+ 
+ 		ret = fwnode_property_read_u32_array(child,
+ 						     "adi,range-microamp",
+ 						     tmp, 2);
+ 		if (ret)
+-			return ret;
++			goto err_child_out;
+ 
+ 		min = tmp[0] / 1000;
+ 		max = tmp[1] / 1000;
+ 		ret = ad5770r_store_output_range(st, min, max, num);
+ 		if (ret)
+-			return ret;
++			goto err_child_out;
+ 	}
+ 
++	return 0;
++
++err_child_out:
++	fwnode_handle_put(child);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/iio/gyro/fxas21002c_core.c b/drivers/iio/gyro/fxas21002c_core.c
+index 129eead8febc0..b7523357d8eba 100644
+--- a/drivers/iio/gyro/fxas21002c_core.c
++++ b/drivers/iio/gyro/fxas21002c_core.c
+@@ -399,6 +399,7 @@ static int fxas21002c_temp_get(struct fxas21002c_data *data, int *val)
+ 	ret = regmap_field_read(data->regmap_fields[F_TEMP], &temp);
+ 	if (ret < 0) {
+ 		dev_err(dev, "failed to read temp: %d\n", ret);
++		fxas21002c_pm_put(data);
+ 		goto data_unlock;
+ 	}
+ 
+@@ -432,6 +433,7 @@ static int fxas21002c_axis_get(struct fxas21002c_data *data,
+ 			       &axis_be, sizeof(axis_be));
+ 	if (ret < 0) {
+ 		dev_err(dev, "failed to read axis: %d: %d\n", index, ret);
++		fxas21002c_pm_put(data);
+ 		goto data_unlock;
+ 	}
+ 
+diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
+index ea8f068a6da3e..e0dec6f9d8d8f 100644
+--- a/drivers/infiniband/hw/mlx5/mr.c
++++ b/drivers/infiniband/hw/mlx5/mr.c
+@@ -764,10 +764,10 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
+ 		ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) /
+ 			   MLX5_IB_UMR_OCTOWORD;
+ 		ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
+-		if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
++		if ((dev->mdev->profile.mask & MLX5_PROF_MASK_MR_CACHE) &&
+ 		    !dev->is_rep && mlx5_core_is_pf(dev->mdev) &&
+ 		    mlx5_ib_can_load_pas_with_umr(dev, 0))
+-			ent->limit = dev->mdev->profile->mr_cache[i].limit;
++			ent->limit = dev->mdev->profile.mr_cache[i].limit;
+ 		else
+ 			ent->limit = 0;
+ 		spin_lock_irq(&ent->lock);
+diff --git a/drivers/interconnect/qcom/bcm-voter.c b/drivers/interconnect/qcom/bcm-voter.c
+index 1cc565bce2f4d..1da6cea8ecbc4 100644
+--- a/drivers/interconnect/qcom/bcm-voter.c
++++ b/drivers/interconnect/qcom/bcm-voter.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0
+ /*
+- * Copyright (c) 2020, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+  */
+ 
+ #include <asm/div64.h>
+@@ -205,6 +205,7 @@ struct bcm_voter *of_bcm_voter_get(struct device *dev, const char *name)
+ 	}
+ 	mutex_unlock(&bcm_voter_lock);
+ 
++	of_node_put(node);
+ 	return voter;
+ }
+ EXPORT_SYMBOL_GPL(of_bcm_voter_get);
+@@ -362,6 +363,7 @@ static const struct of_device_id bcm_voter_of_match[] = {
+ 	{ .compatible = "qcom,bcm-voter" },
+ 	{ }
+ };
++MODULE_DEVICE_TABLE(of, bcm_voter_of_match);
+ 
+ static struct platform_driver qcom_icc_bcm_voter_driver = {
+ 	.probe = qcom_icc_bcm_voter_probe,
+diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
+index a69a8b573e40d..7de7a260706b3 100644
+--- a/drivers/iommu/amd/iommu.c
++++ b/drivers/iommu/amd/iommu.c
+@@ -1747,6 +1747,8 @@ static void amd_iommu_probe_finalize(struct device *dev)
+ 	domain = iommu_get_domain_for_dev(dev);
+ 	if (domain->type == IOMMU_DOMAIN_DMA)
+ 		iommu_setup_dma_ops(dev, IOVA_START_PFN << PAGE_SHIFT, 0);
++	else
++		set_dma_ops(dev, NULL);
+ }
+ 
+ static void amd_iommu_release_device(struct device *dev)
+diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
+index d5c51b5c20aff..c2bfccb19e24d 100644
+--- a/drivers/iommu/intel/dmar.c
++++ b/drivers/iommu/intel/dmar.c
+@@ -1144,7 +1144,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
+ 
+ 		err = iommu_device_register(&iommu->iommu);
+ 		if (err)
+-			goto err_unmap;
++			goto err_sysfs;
+ 	}
+ 
+ 	drhd->iommu = iommu;
+@@ -1152,6 +1152,8 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
+ 
+ 	return 0;
+ 
++err_sysfs:
++	iommu_device_sysfs_remove(&iommu->iommu);
+ err_unmap:
+ 	unmap_iommu(iommu);
+ error_free_seq_id:
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index 7e551da6c1fbd..56930e0b8f590 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -2525,9 +2525,9 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
+ 				    struct device *dev,
+ 				    u32 pasid)
+ {
+-	int flags = PASID_FLAG_SUPERVISOR_MODE;
+ 	struct dma_pte *pgd = domain->pgd;
+ 	int agaw, level;
++	int flags = 0;
+ 
+ 	/*
+ 	 * Skip top levels of page tables for iommu which has
+@@ -2543,7 +2543,10 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
+ 	if (level != 4 && level != 5)
+ 		return -EINVAL;
+ 
+-	flags |= (level == 5) ? PASID_FLAG_FL5LP : 0;
++	if (pasid != PASID_RID2PASID)
++		flags |= PASID_FLAG_SUPERVISOR_MODE;
++	if (level == 5)
++		flags |= PASID_FLAG_FL5LP;
+ 
+ 	if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED)
+ 		flags |= PASID_FLAG_PAGE_SNOOP;
+@@ -4626,6 +4629,8 @@ static int auxiliary_link_device(struct dmar_domain *domain,
+ 
+ 	if (!sinfo) {
+ 		sinfo = kzalloc(sizeof(*sinfo), GFP_ATOMIC);
++		if (!sinfo)
++			return -ENOMEM;
+ 		sinfo->domain = domain;
+ 		sinfo->pdev = dev;
+ 		list_add(&sinfo->link_phys, &info->subdevices);
+diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
+index 5093d317ff1a2..77fbe9908abdc 100644
+--- a/drivers/iommu/intel/pasid.c
++++ b/drivers/iommu/intel/pasid.c
+@@ -663,7 +663,8 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
+ 	 * Since it is a second level only translation setup, we should
+ 	 * set SRE bit as well (addresses are expected to be GPAs).
+ 	 */
+-	pasid_set_sre(pte);
++	if (pasid != PASID_RID2PASID)
++		pasid_set_sre(pte);
+ 	pasid_set_present(pte);
+ 	pasid_flush_caches(iommu, pte, pasid, did);
+ 
+diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
+index 2bfdd57348443..81dea4caf561f 100644
+--- a/drivers/iommu/virtio-iommu.c
++++ b/drivers/iommu/virtio-iommu.c
+@@ -1138,6 +1138,7 @@ static struct virtio_device_id id_table[] = {
+ 	{ VIRTIO_ID_IOMMU, VIRTIO_DEV_ANY_ID },
+ 	{ 0 },
+ };
++MODULE_DEVICE_TABLE(virtio, id_table);
+ 
+ static struct virtio_driver virtio_iommu_drv = {
+ 	.driver.name		= KBUILD_MODNAME,
+diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
+index 70061991915a5..cd5642cef01fd 100644
+--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
++++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
+@@ -46,7 +46,7 @@ static void hfcsusb_start_endpoint(struct hfcsusb *hw, int channel);
+ static void hfcsusb_stop_endpoint(struct hfcsusb *hw, int channel);
+ static int  hfcsusb_setup_bch(struct bchannel *bch, int protocol);
+ static void deactivate_bchannel(struct bchannel *bch);
+-static void hfcsusb_ph_info(struct hfcsusb *hw);
++static int  hfcsusb_ph_info(struct hfcsusb *hw);
+ 
+ /* start next background transfer for control channel */
+ static void
+@@ -241,7 +241,7 @@ hfcusb_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
+  * send full D/B channel status information
+  * as MPH_INFORMATION_IND
+  */
+-static void
++static int
+ hfcsusb_ph_info(struct hfcsusb *hw)
+ {
+ 	struct ph_info *phi;
+@@ -250,7 +250,7 @@ hfcsusb_ph_info(struct hfcsusb *hw)
+ 
+ 	phi = kzalloc(struct_size(phi, bch, dch->dev.nrbchan), GFP_ATOMIC);
+ 	if (!phi)
+-		return;
++		return -ENOMEM;
+ 
+ 	phi->dch.ch.protocol = hw->protocol;
+ 	phi->dch.ch.Flags = dch->Flags;
+@@ -263,6 +263,8 @@ hfcsusb_ph_info(struct hfcsusb *hw)
+ 	_queue_data(&dch->dev.D, MPH_INFORMATION_IND, MISDN_ID_ANY,
+ 		    struct_size(phi, bch, dch->dev.nrbchan), phi, GFP_ATOMIC);
+ 	kfree(phi);
++
++	return 0;
+ }
+ 
+ /*
+@@ -347,8 +349,7 @@ hfcusb_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb)
+ 			ret = l1_event(dch->l1, hh->prim);
+ 		break;
+ 	case MPH_INFORMATION_REQ:
+-		hfcsusb_ph_info(hw);
+-		ret = 0;
++		ret = hfcsusb_ph_info(hw);
+ 		break;
+ 	}
+ 
+@@ -403,8 +404,7 @@ hfc_l1callback(struct dchannel *dch, u_int cmd)
+ 			       hw->name, __func__, cmd);
+ 		return -1;
+ 	}
+-	hfcsusb_ph_info(hw);
+-	return 0;
++	return hfcsusb_ph_info(hw);
+ }
+ 
+ static int
+@@ -746,8 +746,7 @@ hfcsusb_setup_bch(struct bchannel *bch, int protocol)
+ 			handle_led(hw, (bch->nr == 1) ? LED_B1_OFF :
+ 				   LED_B2_OFF);
+ 	}
+-	hfcsusb_ph_info(hw);
+-	return 0;
++	return hfcsusb_ph_info(hw);
+ }
+ 
+ static void
+diff --git a/drivers/isdn/hardware/mISDN/mISDNinfineon.c b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
+index a16c7a2a7f3d0..88d592bafdb02 100644
+--- a/drivers/isdn/hardware/mISDN/mISDNinfineon.c
++++ b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
+@@ -630,17 +630,19 @@ static void
+ release_io(struct inf_hw *hw)
+ {
+ 	if (hw->cfg.mode) {
+-		if (hw->cfg.p) {
++		if (hw->cfg.mode == AM_MEMIO) {
+ 			release_mem_region(hw->cfg.start, hw->cfg.size);
+-			iounmap(hw->cfg.p);
++			if (hw->cfg.p)
++				iounmap(hw->cfg.p);
+ 		} else
+ 			release_region(hw->cfg.start, hw->cfg.size);
+ 		hw->cfg.mode = AM_NONE;
+ 	}
+ 	if (hw->addr.mode) {
+-		if (hw->addr.p) {
++		if (hw->addr.mode == AM_MEMIO) {
+ 			release_mem_region(hw->addr.start, hw->addr.size);
+-			iounmap(hw->addr.p);
++			if (hw->addr.p)
++				iounmap(hw->addr.p);
+ 		} else
+ 			release_region(hw->addr.start, hw->addr.size);
+ 		hw->addr.mode = AM_NONE;
+@@ -670,9 +672,12 @@ setup_io(struct inf_hw *hw)
+ 				(ulong)hw->cfg.start, (ulong)hw->cfg.size);
+ 			return err;
+ 		}
+-		if (hw->ci->cfg_mode == AM_MEMIO)
+-			hw->cfg.p = ioremap(hw->cfg.start, hw->cfg.size);
+ 		hw->cfg.mode = hw->ci->cfg_mode;
++		if (hw->ci->cfg_mode == AM_MEMIO) {
++			hw->cfg.p = ioremap(hw->cfg.start, hw->cfg.size);
++			if (!hw->cfg.p)
++				return -ENOMEM;
++		}
+ 		if (debug & DEBUG_HW)
+ 			pr_notice("%s: IO cfg %lx (%lu bytes) mode%d\n",
+ 				  hw->name, (ulong)hw->cfg.start,
+@@ -697,12 +702,12 @@ setup_io(struct inf_hw *hw)
+ 				(ulong)hw->addr.start, (ulong)hw->addr.size);
+ 			return err;
+ 		}
++		hw->addr.mode = hw->ci->addr_mode;
+ 		if (hw->ci->addr_mode == AM_MEMIO) {
+ 			hw->addr.p = ioremap(hw->addr.start, hw->addr.size);
+-			if (unlikely(!hw->addr.p))
++			if (!hw->addr.p)
+ 				return -ENOMEM;
+ 		}
+-		hw->addr.mode = hw->ci->addr_mode;
+ 		if (debug & DEBUG_HW)
+ 			pr_notice("%s: IO addr %lx (%lu bytes) mode%d\n",
+ 				  hw->name, (ulong)hw->addr.start,
+diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
+index 962f7df0691ef..41735a25d50aa 100644
+--- a/drivers/md/dm-snap.c
++++ b/drivers/md/dm-snap.c
+@@ -854,7 +854,7 @@ static int dm_add_exception(void *context, chunk_t old, chunk_t new)
+ static uint32_t __minimum_chunk_size(struct origin *o)
+ {
+ 	struct dm_snapshot *snap;
+-	unsigned chunk_size = 0;
++	unsigned chunk_size = rounddown_pow_of_two(UINT_MAX);
+ 
+ 	if (o)
+ 		list_for_each_entry(snap, &o->snapshots, list)
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 5d57a5bd171fa..c959e0234871b 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -5310,8 +5310,6 @@ static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
+ 	unsigned int chunk_sectors;
+ 	unsigned int bio_sectors = bio_sectors(bio);
+ 
+-	WARN_ON_ONCE(bio->bi_bdev->bd_partno);
+-
+ 	chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors);
+ 	return  chunk_sectors >=
+ 		((sector & (chunk_sectors - 1)) + bio_sectors);
+diff --git a/drivers/media/dvb-frontends/sp8870.c b/drivers/media/dvb-frontends/sp8870.c
+index 655db8272268d..9767159aeb9b2 100644
+--- a/drivers/media/dvb-frontends/sp8870.c
++++ b/drivers/media/dvb-frontends/sp8870.c
+@@ -281,7 +281,7 @@ static int sp8870_set_frontend_parameters(struct dvb_frontend *fe)
+ 
+ 	// read status reg in order to clear pending irqs
+ 	err = sp8870_readreg(state, 0x200);
+-	if (err)
++	if (err < 0)
+ 		return err;
+ 
+ 	// system controller start
+diff --git a/drivers/media/usb/gspca/cpia1.c b/drivers/media/usb/gspca/cpia1.c
+index a4f7431486f31..d93d384286c16 100644
+--- a/drivers/media/usb/gspca/cpia1.c
++++ b/drivers/media/usb/gspca/cpia1.c
+@@ -1424,7 +1424,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
+ {
+ 	struct sd *sd = (struct sd *) gspca_dev;
+ 	struct cam *cam;
+-	int ret;
+ 
+ 	sd->mainsFreq = FREQ_DEF == V4L2_CID_POWER_LINE_FREQUENCY_60HZ;
+ 	reset_camera_params(gspca_dev);
+@@ -1436,10 +1435,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
+ 	cam->cam_mode = mode;
+ 	cam->nmodes = ARRAY_SIZE(mode);
+ 
+-	ret = goto_low_power(gspca_dev);
+-	if (ret)
+-		gspca_err(gspca_dev, "Cannot go to low power mode: %d\n",
+-			  ret);
++	goto_low_power(gspca_dev);
+ 	/* Check the firmware version. */
+ 	sd->params.version.firmwareVersion = 0;
+ 	get_version_information(gspca_dev);
+diff --git a/drivers/media/usb/gspca/m5602/m5602_mt9m111.c b/drivers/media/usb/gspca/m5602/m5602_mt9m111.c
+index bfa3b381d8a26..bf1af6ed9131e 100644
+--- a/drivers/media/usb/gspca/m5602/m5602_mt9m111.c
++++ b/drivers/media/usb/gspca/m5602/m5602_mt9m111.c
+@@ -195,7 +195,7 @@ static const struct v4l2_ctrl_config mt9m111_greenbal_cfg = {
+ int mt9m111_probe(struct sd *sd)
+ {
+ 	u8 data[2] = {0x00, 0x00};
+-	int i, rc = 0;
++	int i, err;
+ 	struct gspca_dev *gspca_dev = (struct gspca_dev *)sd;
+ 
+ 	if (force_sensor) {
+@@ -213,18 +213,18 @@ int mt9m111_probe(struct sd *sd)
+ 	/* Do the preinit */
+ 	for (i = 0; i < ARRAY_SIZE(preinit_mt9m111); i++) {
+ 		if (preinit_mt9m111[i][0] == BRIDGE) {
+-			rc |= m5602_write_bridge(sd,
+-				preinit_mt9m111[i][1],
+-				preinit_mt9m111[i][2]);
++			err = m5602_write_bridge(sd,
++					preinit_mt9m111[i][1],
++					preinit_mt9m111[i][2]);
+ 		} else {
+ 			data[0] = preinit_mt9m111[i][2];
+ 			data[1] = preinit_mt9m111[i][3];
+-			rc |= m5602_write_sensor(sd,
+-				preinit_mt9m111[i][1], data, 2);
++			err = m5602_write_sensor(sd,
++					preinit_mt9m111[i][1], data, 2);
+ 		}
++		if (err < 0)
++			return err;
+ 	}
+-	if (rc < 0)
+-		return rc;
+ 
+ 	if (m5602_read_sensor(sd, MT9M111_SC_CHIPVER, data, 2))
+ 		return -ENODEV;
+diff --git a/drivers/media/usb/gspca/m5602/m5602_po1030.c b/drivers/media/usb/gspca/m5602/m5602_po1030.c
+index d680b777f097f..8fd99ceee4b67 100644
+--- a/drivers/media/usb/gspca/m5602/m5602_po1030.c
++++ b/drivers/media/usb/gspca/m5602/m5602_po1030.c
+@@ -154,8 +154,8 @@ static const struct v4l2_ctrl_config po1030_greenbal_cfg = {
+ 
+ int po1030_probe(struct sd *sd)
+ {
+-	int rc = 0;
+ 	u8 dev_id_h = 0, i;
++	int err;
+ 	struct gspca_dev *gspca_dev = (struct gspca_dev *)sd;
+ 
+ 	if (force_sensor) {
+@@ -174,14 +174,14 @@ int po1030_probe(struct sd *sd)
+ 	for (i = 0; i < ARRAY_SIZE(preinit_po1030); i++) {
+ 		u8 data = preinit_po1030[i][2];
+ 		if (preinit_po1030[i][0] == SENSOR)
+-			rc |= m5602_write_sensor(sd,
+-				preinit_po1030[i][1], &data, 1);
++			err = m5602_write_sensor(sd, preinit_po1030[i][1],
++						 &data, 1);
+ 		else
+-			rc |= m5602_write_bridge(sd, preinit_po1030[i][1],
+-						data);
++			err = m5602_write_bridge(sd, preinit_po1030[i][1],
++						 data);
++		if (err < 0)
++			return err;
+ 	}
+-	if (rc < 0)
+-		return rc;
+ 
+ 	if (m5602_read_sensor(sd, PO1030_DEVID_H, &dev_id_h, 1))
+ 		return -ENODEV;
+diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
+index 2e081a58da6c5..49489153cd162 100644
+--- a/drivers/misc/kgdbts.c
++++ b/drivers/misc/kgdbts.c
+@@ -100,8 +100,9 @@
+ 		printk(KERN_INFO a);	\
+ } while (0)
+ #define v2printk(a...) do {		\
+-	if (verbose > 1)		\
++	if (verbose > 1) {		\
+ 		printk(KERN_INFO a);	\
++	}				\
+ 	touch_nmi_watchdog();		\
+ } while (0)
+ #define eprintk(a...) do {		\
+diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
+index c394c0b08519a..7ac788fae1b86 100644
+--- a/drivers/misc/lis3lv02d/lis3lv02d.h
++++ b/drivers/misc/lis3lv02d/lis3lv02d.h
+@@ -271,6 +271,7 @@ struct lis3lv02d {
+ 	int			regs_size;
+ 	u8                      *reg_cache;
+ 	bool			regs_stored;
++	bool			init_required;
+ 	u8                      odr_mask;  /* ODR bit mask */
+ 	u8			whoami;    /* indicates measurement precision */
+ 	s16 (*read_data) (struct lis3lv02d *lis3, int reg);
+diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
+index a98f6b895af71..aab3ebfa9fc4d 100644
+--- a/drivers/misc/mei/interrupt.c
++++ b/drivers/misc/mei/interrupt.c
+@@ -277,6 +277,9 @@ static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
+ 		return ret;
+ 	}
+ 
++	pm_runtime_mark_last_busy(dev->dev);
++	pm_request_autosuspend(dev->dev);
++
+ 	list_move_tail(&cb->list, &cl->rd_pending);
+ 
+ 	return 0;
+diff --git a/drivers/mtd/nand/raw/cs553x_nand.c b/drivers/mtd/nand/raw/cs553x_nand.c
+index 6edf78c16fc8b..df40927e56788 100644
+--- a/drivers/mtd/nand/raw/cs553x_nand.c
++++ b/drivers/mtd/nand/raw/cs553x_nand.c
+@@ -18,6 +18,7 @@
+ #include <linux/module.h>
+ #include <linux/delay.h>
+ #include <linux/mtd/mtd.h>
++#include <linux/mtd/nand-ecc-sw-hamming.h>
+ #include <linux/mtd/rawnand.h>
+ #include <linux/mtd/partitions.h>
+ #include <linux/iopoll.h>
+@@ -240,6 +241,15 @@ static int cs_calculate_ecc(struct nand_chip *this, const u_char *dat,
+ 	return 0;
+ }
+ 
++static int cs553x_ecc_correct(struct nand_chip *chip,
++			      unsigned char *buf,
++			      unsigned char *read_ecc,
++			      unsigned char *calc_ecc)
++{
++	return ecc_sw_hamming_correct(buf, read_ecc, calc_ecc,
++				      chip->ecc.size, false);
++}
++
+ static struct cs553x_nand_controller *controllers[4];
+ 
+ static int cs553x_attach_chip(struct nand_chip *chip)
+@@ -251,7 +261,7 @@ static int cs553x_attach_chip(struct nand_chip *chip)
+ 	chip->ecc.bytes = 3;
+ 	chip->ecc.hwctl  = cs_enable_hwecc;
+ 	chip->ecc.calculate = cs_calculate_ecc;
+-	chip->ecc.correct  = rawnand_sw_hamming_correct;
++	chip->ecc.correct  = cs553x_ecc_correct;
+ 	chip->ecc.strength = 1;
+ 
+ 	return 0;
+diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
+index a24e2f57fa68a..99a4dde9825af 100644
+--- a/drivers/mtd/nand/raw/fsmc_nand.c
++++ b/drivers/mtd/nand/raw/fsmc_nand.c
+@@ -25,6 +25,7 @@
+ #include <linux/sched.h>
+ #include <linux/types.h>
+ #include <linux/mtd/mtd.h>
++#include <linux/mtd/nand-ecc-sw-hamming.h>
+ #include <linux/mtd/rawnand.h>
+ #include <linux/platform_device.h>
+ #include <linux/of.h>
+@@ -432,6 +433,15 @@ static int fsmc_read_hwecc_ecc1(struct nand_chip *chip, const u8 *data,
+ 	return 0;
+ }
+ 
++static int fsmc_correct_ecc1(struct nand_chip *chip,
++			     unsigned char *buf,
++			     unsigned char *read_ecc,
++			     unsigned char *calc_ecc)
++{
++	return ecc_sw_hamming_correct(buf, read_ecc, calc_ecc,
++				      chip->ecc.size, false);
++}
++
+ /* Count the number of 0's in buff upto a max of max_bits */
+ static int count_written_bits(u8 *buff, int size, int max_bits)
+ {
+@@ -917,7 +927,7 @@ static int fsmc_nand_attach_chip(struct nand_chip *nand)
+ 	case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ 		dev_info(host->dev, "Using 1-bit HW ECC scheme\n");
+ 		nand->ecc.calculate = fsmc_read_hwecc_ecc1;
+-		nand->ecc.correct = rawnand_sw_hamming_correct;
++		nand->ecc.correct = fsmc_correct_ecc1;
+ 		nand->ecc.hwctl = fsmc_enable_hwecc;
+ 		nand->ecc.bytes = 3;
+ 		nand->ecc.strength = 1;
+diff --git a/drivers/mtd/nand/raw/lpc32xx_slc.c b/drivers/mtd/nand/raw/lpc32xx_slc.c
+index 6b7269cfb7d83..d7dfc6fd85ca7 100644
+--- a/drivers/mtd/nand/raw/lpc32xx_slc.c
++++ b/drivers/mtd/nand/raw/lpc32xx_slc.c
+@@ -27,6 +27,7 @@
+ #include <linux/of.h>
+ #include <linux/of_gpio.h>
+ #include <linux/mtd/lpc32xx_slc.h>
++#include <linux/mtd/nand-ecc-sw-hamming.h>
+ 
+ #define LPC32XX_MODNAME		"lpc32xx-nand"
+ 
+@@ -344,6 +345,18 @@ static int lpc32xx_nand_ecc_calculate(struct nand_chip *chip,
+ 	return 0;
+ }
+ 
++/*
++ * Corrects the data
++ */
++static int lpc32xx_nand_ecc_correct(struct nand_chip *chip,
++				    unsigned char *buf,
++				    unsigned char *read_ecc,
++				    unsigned char *calc_ecc)
++{
++	return ecc_sw_hamming_correct(buf, read_ecc, calc_ecc,
++				      chip->ecc.size, false);
++}
++
+ /*
+  * Read a single byte from NAND device
+  */
+@@ -802,7 +815,7 @@ static int lpc32xx_nand_attach_chip(struct nand_chip *chip)
+ 	chip->ecc.write_oob = lpc32xx_nand_write_oob_syndrome;
+ 	chip->ecc.read_oob = lpc32xx_nand_read_oob_syndrome;
+ 	chip->ecc.calculate = lpc32xx_nand_ecc_calculate;
+-	chip->ecc.correct = rawnand_sw_hamming_correct;
++	chip->ecc.correct = lpc32xx_nand_ecc_correct;
+ 	chip->ecc.hwctl = lpc32xx_nand_ecc_enable;
+ 
+ 	/*
+diff --git a/drivers/mtd/nand/raw/ndfc.c b/drivers/mtd/nand/raw/ndfc.c
+index 338d6b1a189eb..98d5a94c3a242 100644
+--- a/drivers/mtd/nand/raw/ndfc.c
++++ b/drivers/mtd/nand/raw/ndfc.c
+@@ -22,6 +22,7 @@
+ #include <linux/mtd/ndfc.h>
+ #include <linux/slab.h>
+ #include <linux/mtd/mtd.h>
++#include <linux/mtd/nand-ecc-sw-hamming.h>
+ #include <linux/of_address.h>
+ #include <linux/of_platform.h>
+ #include <asm/io.h>
+@@ -100,6 +101,15 @@ static int ndfc_calculate_ecc(struct nand_chip *chip,
+ 	return 0;
+ }
+ 
++static int ndfc_correct_ecc(struct nand_chip *chip,
++			    unsigned char *buf,
++			    unsigned char *read_ecc,
++			    unsigned char *calc_ecc)
++{
++	return ecc_sw_hamming_correct(buf, read_ecc, calc_ecc,
++				      chip->ecc.size, false);
++}
++
+ /*
+  * Speedups for buffer read/write/verify
+  *
+@@ -145,7 +155,7 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
+ 	chip->controller = &ndfc->ndfc_control;
+ 	chip->legacy.read_buf = ndfc_read_buf;
+ 	chip->legacy.write_buf = ndfc_write_buf;
+-	chip->ecc.correct = rawnand_sw_hamming_correct;
++	chip->ecc.correct = ndfc_correct_ecc;
+ 	chip->ecc.hwctl = ndfc_enable_hwecc;
+ 	chip->ecc.calculate = ndfc_calculate_ecc;
+ 	chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+diff --git a/drivers/mtd/nand/raw/sharpsl.c b/drivers/mtd/nand/raw/sharpsl.c
+index 5612ee628425b..2f1fe464e6637 100644
+--- a/drivers/mtd/nand/raw/sharpsl.c
++++ b/drivers/mtd/nand/raw/sharpsl.c
+@@ -11,6 +11,7 @@
+ #include <linux/module.h>
+ #include <linux/delay.h>
+ #include <linux/mtd/mtd.h>
++#include <linux/mtd/nand-ecc-sw-hamming.h>
+ #include <linux/mtd/rawnand.h>
+ #include <linux/mtd/partitions.h>
+ #include <linux/mtd/sharpsl.h>
+@@ -96,6 +97,15 @@ static int sharpsl_nand_calculate_ecc(struct nand_chip *chip,
+ 	return readb(sharpsl->io + ECCCNTR) != 0;
+ }
+ 
++static int sharpsl_nand_correct_ecc(struct nand_chip *chip,
++				    unsigned char *buf,
++				    unsigned char *read_ecc,
++				    unsigned char *calc_ecc)
++{
++	return ecc_sw_hamming_correct(buf, read_ecc, calc_ecc,
++				      chip->ecc.size, false);
++}
++
+ static int sharpsl_attach_chip(struct nand_chip *chip)
+ {
+ 	if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+@@ -106,7 +116,7 @@ static int sharpsl_attach_chip(struct nand_chip *chip)
+ 	chip->ecc.strength = 1;
+ 	chip->ecc.hwctl = sharpsl_nand_enable_hwecc;
+ 	chip->ecc.calculate = sharpsl_nand_calculate_ecc;
+-	chip->ecc.correct = rawnand_sw_hamming_correct;
++	chip->ecc.correct = sharpsl_nand_correct_ecc;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/mtd/nand/raw/tmio_nand.c b/drivers/mtd/nand/raw/tmio_nand.c
+index de8e919d0ebe6..6d93dd31969b2 100644
+--- a/drivers/mtd/nand/raw/tmio_nand.c
++++ b/drivers/mtd/nand/raw/tmio_nand.c
+@@ -34,6 +34,7 @@
+ #include <linux/interrupt.h>
+ #include <linux/ioport.h>
+ #include <linux/mtd/mtd.h>
++#include <linux/mtd/nand-ecc-sw-hamming.h>
+ #include <linux/mtd/rawnand.h>
+ #include <linux/mtd/partitions.h>
+ #include <linux/slab.h>
+@@ -292,11 +293,12 @@ static int tmio_nand_correct_data(struct nand_chip *chip, unsigned char *buf,
+ 	int r0, r1;
+ 
+ 	/* assume ecc.size = 512 and ecc.bytes = 6 */
+-	r0 = rawnand_sw_hamming_correct(chip, buf, read_ecc, calc_ecc);
++	r0 = ecc_sw_hamming_correct(buf, read_ecc, calc_ecc,
++				    chip->ecc.size, false);
+ 	if (r0 < 0)
+ 		return r0;
+-	r1 = rawnand_sw_hamming_correct(chip, buf + 256, read_ecc + 3,
+-					calc_ecc + 3);
++	r1 = ecc_sw_hamming_correct(buf + 256, read_ecc + 3, calc_ecc + 3,
++				    chip->ecc.size, false);
+ 	if (r1 < 0)
+ 		return r1;
+ 	return r0 + r1;
+diff --git a/drivers/mtd/nand/raw/txx9ndfmc.c b/drivers/mtd/nand/raw/txx9ndfmc.c
+index 1a9449e53bf9d..b8894ac27073c 100644
+--- a/drivers/mtd/nand/raw/txx9ndfmc.c
++++ b/drivers/mtd/nand/raw/txx9ndfmc.c
+@@ -13,6 +13,7 @@
+ #include <linux/platform_device.h>
+ #include <linux/delay.h>
+ #include <linux/mtd/mtd.h>
++#include <linux/mtd/nand-ecc-sw-hamming.h>
+ #include <linux/mtd/rawnand.h>
+ #include <linux/mtd/partitions.h>
+ #include <linux/io.h>
+@@ -193,8 +194,8 @@ static int txx9ndfmc_correct_data(struct nand_chip *chip, unsigned char *buf,
+ 	int stat;
+ 
+ 	for (eccsize = chip->ecc.size; eccsize > 0; eccsize -= 256) {
+-		stat = rawnand_sw_hamming_correct(chip, buf, read_ecc,
+-						  calc_ecc);
++		stat = ecc_sw_hamming_correct(buf, read_ecc, calc_ecc,
++					      chip->ecc.size, false);
+ 		if (stat < 0)
+ 			return stat;
+ 		corrected += stat;
+diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
+index 8215cd77301f5..9f30748da4ab9 100644
+--- a/drivers/net/caif/caif_serial.c
++++ b/drivers/net/caif/caif_serial.c
+@@ -269,9 +269,6 @@ static netdev_tx_t caif_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+ 	struct ser_device *ser;
+ 
+-	if (WARN_ON(!dev))
+-		return -EINVAL;
+-
+ 	ser = netdev_priv(dev);
+ 
+ 	/* Send flow off once, on high water mark */
+diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
+index 9c86cacc4a727..23c008e9653b5 100644
+--- a/drivers/net/dsa/bcm_sf2.c
++++ b/drivers/net/dsa/bcm_sf2.c
+@@ -775,11 +775,9 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
+ 	bcm_sf2_sw_mac_link_set(ds, port, interface, true);
+ 
+ 	if (port != core_readl(priv, CORE_IMP0_PRT_ID)) {
+-		u32 reg_rgmii_ctrl;
++		u32 reg_rgmii_ctrl = 0;
+ 		u32 reg, offset;
+ 
+-		reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
+-
+ 		if (priv->type == BCM4908_DEVICE_ID ||
+ 		    priv->type == BCM7445_DEVICE_ID)
+ 			offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
+@@ -790,6 +788,7 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
+ 		    interface == PHY_INTERFACE_MODE_RGMII_TXID ||
+ 		    interface == PHY_INTERFACE_MODE_MII ||
+ 		    interface == PHY_INTERFACE_MODE_REVMII) {
++			reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
+ 			reg = reg_readl(priv, reg_rgmii_ctrl);
+ 			reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
+ 
+diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
+index 9871d7cff93a8..2e3a482e29201 100644
+--- a/drivers/net/dsa/mt7530.c
++++ b/drivers/net/dsa/mt7530.c
+@@ -1214,14 +1214,6 @@ mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port)
+ {
+ 	struct mt7530_priv *priv = ds->priv;
+ 
+-	/* The real fabric path would be decided on the membership in the
+-	 * entry of VLAN table. PCR_MATRIX set up here with ALL_MEMBERS
+-	 * means potential VLAN can be consisting of certain subset of all
+-	 * ports.
+-	 */
+-	mt7530_rmw(priv, MT7530_PCR_P(port),
+-		   PCR_MATRIX_MASK, PCR_MATRIX(MT7530_ALL_MEMBERS));
+-
+ 	/* Trapped into security mode allows packet forwarding through VLAN
+ 	 * table lookup. CPU port is set to fallback mode to let untagged
+ 	 * frames pass through.
+diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
+index b777d3f375736..12cd04b568030 100644
+--- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
++++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
+@@ -167,9 +167,10 @@ enum sja1105_hostcmd {
+ 	SJA1105_HOSTCMD_INVALIDATE = 4,
+ };
+ 
++/* Command and entry overlap */
+ static void
+-sja1105_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+-			      enum packing_op op)
++sja1105et_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
++				enum packing_op op)
+ {
+ 	const int size = SJA1105_SIZE_DYN_CMD;
+ 
+@@ -179,6 +180,20 @@ sja1105_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ 	sja1105_packing(buf, &cmd->index,    9,  0, size, op);
+ }
+ 
++/* Command and entry are separate */
++static void
++sja1105pqrs_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
++				  enum packing_op op)
++{
++	u8 *p = buf + SJA1105_SIZE_VL_LOOKUP_ENTRY;
++	const int size = SJA1105_SIZE_DYN_CMD;
++
++	sja1105_packing(p, &cmd->valid,   31, 31, size, op);
++	sja1105_packing(p, &cmd->errors,  30, 30, size, op);
++	sja1105_packing(p, &cmd->rdwrset, 29, 29, size, op);
++	sja1105_packing(p, &cmd->index,    9,  0, size, op);
++}
++
+ static size_t sja1105et_vl_lookup_entry_packing(void *buf, void *entry_ptr,
+ 						enum packing_op op)
+ {
+@@ -641,7 +656,7 @@ static size_t sja1105pqrs_cbs_entry_packing(void *buf, void *entry_ptr,
+ const struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
+ 	[BLK_IDX_VL_LOOKUP] = {
+ 		.entry_packing = sja1105et_vl_lookup_entry_packing,
+-		.cmd_packing = sja1105_vl_lookup_cmd_packing,
++		.cmd_packing = sja1105et_vl_lookup_cmd_packing,
+ 		.access = OP_WRITE,
+ 		.max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
+ 		.packed_size = SJA1105ET_SIZE_VL_LOOKUP_DYN_CMD,
+@@ -725,7 +740,7 @@ const struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
+ const struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = {
+ 	[BLK_IDX_VL_LOOKUP] = {
+ 		.entry_packing = sja1105_vl_lookup_entry_packing,
+-		.cmd_packing = sja1105_vl_lookup_cmd_packing,
++		.cmd_packing = sja1105pqrs_vl_lookup_cmd_packing,
+ 		.access = (OP_READ | OP_WRITE),
+ 		.max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
+ 		.packed_size = SJA1105PQRS_SIZE_VL_LOOKUP_DYN_CMD,
+diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
+index 51ea104c63bbb..926544440f02a 100644
+--- a/drivers/net/dsa/sja1105/sja1105_main.c
++++ b/drivers/net/dsa/sja1105/sja1105_main.c
+@@ -26,6 +26,7 @@
+ #include "sja1105_tas.h"
+ 
+ #define SJA1105_UNKNOWN_MULTICAST	0x010000000000ull
++#define SJA1105_DEFAULT_VLAN		(VLAN_N_VID - 1)
+ 
+ static const struct dsa_switch_ops sja1105_switch_ops;
+ 
+@@ -207,6 +208,7 @@ static int sja1105_init_mii_settings(struct sja1105_private *priv,
+ 		default:
+ 			dev_err(dev, "Unsupported PHY mode %s!\n",
+ 				phy_modes(ports[i].phy_mode));
++			return -EINVAL;
+ 		}
+ 
+ 		/* Even though the SerDes port is able to drive SGMII autoneg
+@@ -321,6 +323,13 @@ static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
+ 	return 0;
+ }
+ 
++/* Set up a default VLAN for untagged traffic injected from the CPU
++ * using management routes (e.g. STP, PTP) as opposed to tag_8021q.
++ * All DT-defined ports are members of this VLAN, and there are no
++ * restrictions on forwarding (since the CPU selects the destination).
++ * Frames from this VLAN will always be transmitted as untagged, and
++ * neither the bridge nor the 8021q module cannot create this VLAN ID.
++ */
+ static int sja1105_init_static_vlan(struct sja1105_private *priv)
+ {
+ 	struct sja1105_table *table;
+@@ -330,17 +339,13 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv)
+ 		.vmemb_port = 0,
+ 		.vlan_bc = 0,
+ 		.tag_port = 0,
+-		.vlanid = 1,
++		.vlanid = SJA1105_DEFAULT_VLAN,
+ 	};
+ 	struct dsa_switch *ds = priv->ds;
+ 	int port;
+ 
+ 	table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
+ 
+-	/* The static VLAN table will only contain the initial pvid of 1.
+-	 * All other VLANs are to be configured through dynamic entries,
+-	 * and kept in the static configuration table as backing memory.
+-	 */
+ 	if (table->entry_count) {
+ 		kfree(table->entries);
+ 		table->entry_count = 0;
+@@ -353,9 +358,6 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv)
+ 
+ 	table->entry_count = 1;
+ 
+-	/* VLAN 1: all DT-defined ports are members; no restrictions on
+-	 * forwarding; always transmit as untagged.
+-	 */
+ 	for (port = 0; port < ds->num_ports; port++) {
+ 		struct sja1105_bridge_vlan *v;
+ 
+@@ -366,15 +368,12 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv)
+ 		pvid.vlan_bc |= BIT(port);
+ 		pvid.tag_port &= ~BIT(port);
+ 
+-		/* Let traffic that don't need dsa_8021q (e.g. STP, PTP) be
+-		 * transmitted as untagged.
+-		 */
+ 		v = kzalloc(sizeof(*v), GFP_KERNEL);
+ 		if (!v)
+ 			return -ENOMEM;
+ 
+ 		v->port = port;
+-		v->vid = 1;
++		v->vid = SJA1105_DEFAULT_VLAN;
+ 		v->untagged = true;
+ 		if (dsa_is_cpu_port(ds, port))
+ 			v->pvid = true;
+@@ -2817,11 +2816,22 @@ static int sja1105_vlan_add_one(struct dsa_switch *ds, int port, u16 vid,
+ 	bool pvid = flags & BRIDGE_VLAN_INFO_PVID;
+ 	struct sja1105_bridge_vlan *v;
+ 
+-	list_for_each_entry(v, vlan_list, list)
+-		if (v->port == port && v->vid == vid &&
+-		    v->untagged == untagged && v->pvid == pvid)
++	list_for_each_entry(v, vlan_list, list) {
++		if (v->port == port && v->vid == vid) {
+ 			/* Already added */
+-			return 0;
++			if (v->untagged == untagged && v->pvid == pvid)
++				/* Nothing changed */
++				return 0;
++
++			/* It's the same VLAN, but some of the flags changed
++			 * and the user did not bother to delete it first.
++			 * Update it and trigger sja1105_build_vlan_table.
++			 */
++			v->untagged = untagged;
++			v->pvid = pvid;
++			return 1;
++		}
++	}
+ 
+ 	v = kzalloc(sizeof(*v), GFP_KERNEL);
+ 	if (!v) {
+@@ -2976,13 +2986,13 @@ static int sja1105_setup(struct dsa_switch *ds)
+ 	rc = sja1105_static_config_load(priv, ports);
+ 	if (rc < 0) {
+ 		dev_err(ds->dev, "Failed to load static config: %d\n", rc);
+-		return rc;
++		goto out_ptp_clock_unregister;
+ 	}
+ 	/* Configure the CGU (PHY link modes and speeds) */
+ 	rc = sja1105_clocking_setup(priv);
+ 	if (rc < 0) {
+ 		dev_err(ds->dev, "Failed to configure MII clocking: %d\n", rc);
+-		return rc;
++		goto out_static_config_free;
+ 	}
+ 	/* On SJA1105, VLAN filtering per se is always enabled in hardware.
+ 	 * The only thing we can do to disable it is lie about what the 802.1Q
+@@ -3003,7 +3013,7 @@ static int sja1105_setup(struct dsa_switch *ds)
+ 
+ 	rc = sja1105_devlink_setup(ds);
+ 	if (rc < 0)
+-		return rc;
++		goto out_static_config_free;
+ 
+ 	/* The DSA/switchdev model brings up switch ports in standalone mode by
+ 	 * default, and that means vlan_filtering is 0 since they're not under
+@@ -3012,6 +3022,17 @@ static int sja1105_setup(struct dsa_switch *ds)
+ 	rtnl_lock();
+ 	rc = sja1105_setup_8021q_tagging(ds, true);
+ 	rtnl_unlock();
++	if (rc)
++		goto out_devlink_teardown;
++
++	return 0;
++
++out_devlink_teardown:
++	sja1105_devlink_teardown(ds);
++out_ptp_clock_unregister:
++	sja1105_ptp_clock_unregister(ds);
++out_static_config_free:
++	sja1105_static_config_free(&priv->static_config);
+ 
+ 	return rc;
+ }
+@@ -3662,8 +3683,10 @@ static int sja1105_probe(struct spi_device *spi)
+ 		priv->cbs = devm_kcalloc(dev, priv->info->num_cbs_shapers,
+ 					 sizeof(struct sja1105_cbs_entry),
+ 					 GFP_KERNEL);
+-		if (!priv->cbs)
+-			return -ENOMEM;
++		if (!priv->cbs) {
++			rc = -ENOMEM;
++			goto out_unregister_switch;
++		}
+ 	}
+ 
+ 	/* Connections between dsa_port and sja1105_port */
+@@ -3688,7 +3711,7 @@ static int sja1105_probe(struct spi_device *spi)
+ 			dev_err(ds->dev,
+ 				"failed to create deferred xmit thread: %d\n",
+ 				rc);
+-			goto out;
++			goto out_destroy_workers;
+ 		}
+ 		skb_queue_head_init(&sp->xmit_queue);
+ 		sp->xmit_tpid = ETH_P_SJA1105;
+@@ -3698,7 +3721,8 @@ static int sja1105_probe(struct spi_device *spi)
+ 	}
+ 
+ 	return 0;
+-out:
++
++out_destroy_workers:
+ 	while (port-- > 0) {
+ 		struct sja1105_port *sp = &priv->ports[port];
+ 
+@@ -3707,6 +3731,10 @@ out:
+ 
+ 		kthread_destroy_worker(sp->xmit_worker);
+ 	}
++
++out_unregister_switch:
++	dsa_unregister_switch(ds);
++
+ 	return rc;
+ }
+ 
+diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
+index 3e8a179f39db4..633b103896535 100644
+--- a/drivers/net/ethernet/broadcom/bnx2.c
++++ b/drivers/net/ethernet/broadcom/bnx2.c
+@@ -8247,9 +8247,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
+ 		BNX2_WR(bp, PCI_COMMAND, reg);
+ 	} else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
+ 		!(bp->flags & BNX2_FLAG_PCIX)) {
+-
+ 		dev_err(&pdev->dev,
+ 			"5706 A1 can only be used in a PCIX bus, aborting\n");
++		rc = -EPERM;
+ 		goto err_out_unmap;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index cf4249d59383c..027997c711aba 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -282,7 +282,8 @@ static bool bnxt_vf_pciid(enum board_idx idx)
+ {
+ 	return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
+ 		idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
+-		idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF);
++		idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
++		idx == NETXTREME_E_P5_VF_HV);
+ }
+ 
+ #define DB_CP_REARM_FLAGS	(DB_KEY_CP | DB_IDX_VALID)
+@@ -6919,17 +6920,10 @@ ctx_err:
+ static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
+ 				  __le64 *pg_dir)
+ {
+-	u8 pg_size = 0;
+-
+ 	if (!rmem->nr_pages)
+ 		return;
+ 
+-	if (BNXT_PAGE_SHIFT == 13)
+-		pg_size = 1 << 4;
+-	else if (BNXT_PAGE_SIZE == 16)
+-		pg_size = 2 << 4;
+-
+-	*pg_attr = pg_size;
++	BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
+ 	if (rmem->depth >= 1) {
+ 		if (rmem->depth == 2)
+ 			*pg_attr |= 2;
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+index 1259e68cba2a7..f6bf69b02de94 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+@@ -1456,6 +1456,16 @@ struct bnxt_ctx_pg_info {
+ 
+ #define BNXT_BACKING_STORE_CFG_LEGACY_LEN	256
+ 
++#define BNXT_SET_CTX_PAGE_ATTR(attr)					\
++do {									\
++	if (BNXT_PAGE_SIZE == 0x2000)					\
++		attr = FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8K;	\
++	else if (BNXT_PAGE_SIZE == 0x10000)				\
++		attr = FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_64K;	\
++	else								\
++		attr = FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K;	\
++} while (0)
++
+ struct bnxt_ctx_mem_info {
+ 	u32	qp_max_entries;
+ 	u16	qp_min_qp1_entries;
+diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
+index 7c5af4beedc6d..591229b96257e 100644
+--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
++++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
+@@ -1153,7 +1153,7 @@ static void octeon_destroy_resources(struct octeon_device *oct)
+  * @lio: per-network private data
+  * @start_stop: whether to start or stop
+  */
+-static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
++static int send_rx_ctrl_cmd(struct lio *lio, int start_stop)
+ {
+ 	struct octeon_soft_command *sc;
+ 	union octnet_cmd *ncmd;
+@@ -1161,15 +1161,15 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
+ 	int retval;
+ 
+ 	if (oct->props[lio->ifidx].rx_on == start_stop)
+-		return;
++		return 0;
+ 
+ 	sc = (struct octeon_soft_command *)
+ 		octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
+ 					  16, 0);
+ 	if (!sc) {
+ 		netif_info(lio, rx_err, lio->netdev,
+-			   "Failed to allocate octeon_soft_command\n");
+-		return;
++			   "Failed to allocate octeon_soft_command struct\n");
++		return -ENOMEM;
+ 	}
+ 
+ 	ncmd = (union octnet_cmd *)sc->virtdptr;
+@@ -1192,18 +1192,19 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
+ 	if (retval == IQ_SEND_FAILED) {
+ 		netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
+ 		octeon_free_soft_command(oct, sc);
+-		return;
+ 	} else {
+ 		/* Sleep on a wait queue till the cond flag indicates that the
+ 		 * response arrived or timed-out.
+ 		 */
+ 		retval = wait_for_sc_completion_timeout(oct, sc, 0);
+ 		if (retval)
+-			return;
++			return retval;
+ 
+ 		oct->props[lio->ifidx].rx_on = start_stop;
+ 		WRITE_ONCE(sc->caller_is_done, true);
+ 	}
++
++	return retval;
+ }
+ 
+ /**
+@@ -1778,6 +1779,7 @@ static int liquidio_open(struct net_device *netdev)
+ 	struct octeon_device_priv *oct_priv =
+ 		(struct octeon_device_priv *)oct->priv;
+ 	struct napi_struct *napi, *n;
++	int ret = 0;
+ 
+ 	if (oct->props[lio->ifidx].napi_enabled == 0) {
+ 		tasklet_disable(&oct_priv->droq_tasklet);
+@@ -1813,7 +1815,9 @@ static int liquidio_open(struct net_device *netdev)
+ 	netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
+ 
+ 	/* tell Octeon to start forwarding packets to host */
+-	send_rx_ctrl_cmd(lio, 1);
++	ret = send_rx_ctrl_cmd(lio, 1);
++	if (ret)
++		return ret;
+ 
+ 	/* start periodical statistics fetch */
+ 	INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats);
+@@ -1824,7 +1828,7 @@ static int liquidio_open(struct net_device *netdev)
+ 	dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
+ 		 netdev->name);
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ /**
+@@ -1838,6 +1842,7 @@ static int liquidio_stop(struct net_device *netdev)
+ 	struct octeon_device_priv *oct_priv =
+ 		(struct octeon_device_priv *)oct->priv;
+ 	struct napi_struct *napi, *n;
++	int ret = 0;
+ 
+ 	ifstate_reset(lio, LIO_IFSTATE_RUNNING);
+ 
+@@ -1854,7 +1859,9 @@ static int liquidio_stop(struct net_device *netdev)
+ 	lio->link_changes++;
+ 
+ 	/* Tell Octeon that nic interface is down. */
+-	send_rx_ctrl_cmd(lio, 0);
++	ret = send_rx_ctrl_cmd(lio, 0);
++	if (ret)
++		return ret;
+ 
+ 	if (OCTEON_CN23XX_PF(oct)) {
+ 		if (!oct->msix_on)
+@@ -1889,7 +1896,7 @@ static int liquidio_stop(struct net_device *netdev)
+ 
+ 	dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+index 516f166ceff8c..ffddb3126a323 100644
+--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
++++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+@@ -595,7 +595,7 @@ static void octeon_destroy_resources(struct octeon_device *oct)
+  * @lio: per-network private data
+  * @start_stop: whether to start or stop
+  */
+-static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
++static int send_rx_ctrl_cmd(struct lio *lio, int start_stop)
+ {
+ 	struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
+ 	struct octeon_soft_command *sc;
+@@ -603,11 +603,16 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
+ 	int retval;
+ 
+ 	if (oct->props[lio->ifidx].rx_on == start_stop)
+-		return;
++		return 0;
+ 
+ 	sc = (struct octeon_soft_command *)
+ 		octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
+ 					  16, 0);
++	if (!sc) {
++		netif_info(lio, rx_err, lio->netdev,
++			   "Failed to allocate octeon_soft_command struct\n");
++		return -ENOMEM;
++	}
+ 
+ 	ncmd = (union octnet_cmd *)sc->virtdptr;
+ 
+@@ -635,11 +640,13 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
+ 		 */
+ 		retval = wait_for_sc_completion_timeout(oct, sc, 0);
+ 		if (retval)
+-			return;
++			return retval;
+ 
+ 		oct->props[lio->ifidx].rx_on = start_stop;
+ 		WRITE_ONCE(sc->caller_is_done, true);
+ 	}
++
++	return retval;
+ }
+ 
+ /**
+@@ -906,6 +913,7 @@ static int liquidio_open(struct net_device *netdev)
+ 	struct octeon_device_priv *oct_priv =
+ 		(struct octeon_device_priv *)oct->priv;
+ 	struct napi_struct *napi, *n;
++	int ret = 0;
+ 
+ 	if (!oct->props[lio->ifidx].napi_enabled) {
+ 		tasklet_disable(&oct_priv->droq_tasklet);
+@@ -932,11 +940,13 @@ static int liquidio_open(struct net_device *netdev)
+ 					(LIQUIDIO_NDEV_STATS_POLL_TIME_MS));
+ 
+ 	/* tell Octeon to start forwarding packets to host */
+-	send_rx_ctrl_cmd(lio, 1);
++	ret = send_rx_ctrl_cmd(lio, 1);
++	if (ret)
++		return ret;
+ 
+ 	dev_info(&oct->pci_dev->dev, "%s interface is opened\n", netdev->name);
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ /**
+@@ -950,9 +960,12 @@ static int liquidio_stop(struct net_device *netdev)
+ 	struct octeon_device_priv *oct_priv =
+ 		(struct octeon_device_priv *)oct->priv;
+ 	struct napi_struct *napi, *n;
++	int ret = 0;
+ 
+ 	/* tell Octeon to stop forwarding packets to host */
+-	send_rx_ctrl_cmd(lio, 0);
++	ret = send_rx_ctrl_cmd(lio, 0);
++	if (ret)
++		return ret;
+ 
+ 	netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n");
+ 	/* Inform that netif carrier is down */
+@@ -986,7 +999,7 @@ static int liquidio_stop(struct net_device *netdev)
+ 
+ 	dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+index bde8494215c41..e664e05b9f026 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+@@ -1042,7 +1042,7 @@ void clear_all_filters(struct adapter *adapter)
+ 				cxgb4_del_filter(dev, f->tid, &f->fs);
+ 		}
+ 
+-		sb = t4_read_reg(adapter, LE_DB_SRVR_START_INDEX_A);
++		sb = adapter->tids.stid_base;
+ 		for (i = 0; i < sb; i++) {
+ 			f = (struct filter_entry *)adapter->tids.tid_tab[i];
+ 
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+index 6264bc66a4fc9..421bd9b88028d 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+@@ -6480,9 +6480,9 @@ static void cxgb4_ktls_dev_del(struct net_device *netdev,
+ 
+ 	adap->uld[CXGB4_ULD_KTLS].tlsdev_ops->tls_dev_del(netdev, tls_ctx,
+ 							  direction);
+-	cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE);
+ 
+ out_unlock:
++	cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE);
+ 	mutex_unlock(&uld_mutex);
+ }
+ 
+diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+index a3f5b80888e5a..82b0aa3d0d887 100644
+--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
++++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+@@ -60,6 +60,7 @@ static int chcr_get_nfrags_to_send(struct sk_buff *skb, u32 start, u32 len)
+ }
+ 
+ static int chcr_init_tcb_fields(struct chcr_ktls_info *tx_info);
++static void clear_conn_resources(struct chcr_ktls_info *tx_info);
+ /*
+  * chcr_ktls_save_keys: calculate and save crypto keys.
+  * @tx_info - driver specific tls info.
+@@ -365,10 +366,14 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
+ 				chcr_get_ktls_tx_context(tls_ctx);
+ 	struct chcr_ktls_info *tx_info = tx_ctx->chcr_info;
+ 	struct ch_ktls_port_stats_debug *port_stats;
++	struct chcr_ktls_uld_ctx *u_ctx;
+ 
+ 	if (!tx_info)
+ 		return;
+ 
++	u_ctx = tx_info->adap->uld[CXGB4_ULD_KTLS].handle;
++	if (u_ctx && u_ctx->detach)
++		return;
+ 	/* clear l2t entry */
+ 	if (tx_info->l2te)
+ 		cxgb4_l2t_release(tx_info->l2te);
+@@ -385,6 +390,8 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
+ 	if (tx_info->tid != -1) {
+ 		cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
+ 				 tx_info->tid, tx_info->ip_family);
++
++		xa_erase(&u_ctx->tid_list, tx_info->tid);
+ 	}
+ 
+ 	port_stats = &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
+@@ -412,6 +419,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
+ 	struct tls_context *tls_ctx = tls_get_ctx(sk);
+ 	struct ch_ktls_port_stats_debug *port_stats;
+ 	struct chcr_ktls_ofld_ctx_tx *tx_ctx;
++	struct chcr_ktls_uld_ctx *u_ctx;
+ 	struct chcr_ktls_info *tx_info;
+ 	struct dst_entry *dst;
+ 	struct adapter *adap;
+@@ -426,6 +434,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
+ 	adap = pi->adapter;
+ 	port_stats = &adap->ch_ktls_stats.ktls_port[pi->port_id];
+ 	atomic64_inc(&port_stats->ktls_tx_connection_open);
++	u_ctx = adap->uld[CXGB4_ULD_KTLS].handle;
+ 
+ 	if (direction == TLS_OFFLOAD_CTX_DIR_RX) {
+ 		pr_err("not expecting for RX direction\n");
+@@ -435,6 +444,9 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
+ 	if (tx_ctx->chcr_info)
+ 		goto out;
+ 
++	if (u_ctx && u_ctx->detach)
++		goto out;
++
+ 	tx_info = kvzalloc(sizeof(*tx_info), GFP_KERNEL);
+ 	if (!tx_info)
+ 		goto out;
+@@ -570,6 +582,8 @@ free_tid:
+ 	cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
+ 			 tx_info->tid, tx_info->ip_family);
+ 
++	xa_erase(&u_ctx->tid_list, tx_info->tid);
++
+ put_module:
+ 	/* release module refcount */
+ 	module_put(THIS_MODULE);
+@@ -634,8 +648,12 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap,
+ {
+ 	const struct cpl_act_open_rpl *p = (void *)input;
+ 	struct chcr_ktls_info *tx_info = NULL;
++	struct chcr_ktls_ofld_ctx_tx *tx_ctx;
++	struct chcr_ktls_uld_ctx *u_ctx;
+ 	unsigned int atid, tid, status;
++	struct tls_context *tls_ctx;
+ 	struct tid_info *t;
++	int ret = 0;
+ 
+ 	tid = GET_TID(p);
+ 	status = AOPEN_STATUS_G(ntohl(p->atid_status));
+@@ -667,14 +685,29 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap,
+ 	if (!status) {
+ 		tx_info->tid = tid;
+ 		cxgb4_insert_tid(t, tx_info, tx_info->tid, tx_info->ip_family);
++		/* Adding tid */
++		tls_ctx = tls_get_ctx(tx_info->sk);
++		tx_ctx = chcr_get_ktls_tx_context(tls_ctx);
++		u_ctx = adap->uld[CXGB4_ULD_KTLS].handle;
++		if (u_ctx) {
++			ret = xa_insert_bh(&u_ctx->tid_list, tid, tx_ctx,
++					   GFP_NOWAIT);
++			if (ret < 0) {
++				pr_err("%s: Failed to allocate tid XA entry = %d\n",
++				       __func__, tx_info->tid);
++				tx_info->open_state = CH_KTLS_OPEN_FAILURE;
++				goto out;
++			}
++		}
+ 		tx_info->open_state = CH_KTLS_OPEN_SUCCESS;
+ 	} else {
+ 		tx_info->open_state = CH_KTLS_OPEN_FAILURE;
+ 	}
++out:
+ 	spin_unlock(&tx_info->lock);
+ 
+ 	complete(&tx_info->completion);
+-	return 0;
++	return ret;
+ }
+ 
+ /*
+@@ -2092,6 +2125,8 @@ static void *chcr_ktls_uld_add(const struct cxgb4_lld_info *lldi)
+ 		goto out;
+ 	}
+ 	u_ctx->lldi = *lldi;
++	u_ctx->detach = false;
++	xa_init_flags(&u_ctx->tid_list, XA_FLAGS_LOCK_BH);
+ out:
+ 	return u_ctx;
+ }
+@@ -2125,6 +2160,45 @@ static int chcr_ktls_uld_rx_handler(void *handle, const __be64 *rsp,
+ 	return 0;
+ }
+ 
++static void clear_conn_resources(struct chcr_ktls_info *tx_info)
++{
++	/* clear l2t entry */
++	if (tx_info->l2te)
++		cxgb4_l2t_release(tx_info->l2te);
++
++#if IS_ENABLED(CONFIG_IPV6)
++	/* clear clip entry */
++	if (tx_info->ip_family == AF_INET6)
++		cxgb4_clip_release(tx_info->netdev, (const u32 *)
++				   &tx_info->sk->sk_v6_rcv_saddr,
++				   1);
++#endif
++
++	/* clear tid */
++	if (tx_info->tid != -1)
++		cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
++				 tx_info->tid, tx_info->ip_family);
++}
++
++static void ch_ktls_reset_all_conn(struct chcr_ktls_uld_ctx *u_ctx)
++{
++	struct ch_ktls_port_stats_debug *port_stats;
++	struct chcr_ktls_ofld_ctx_tx *tx_ctx;
++	struct chcr_ktls_info *tx_info;
++	unsigned long index;
++
++	xa_for_each(&u_ctx->tid_list, index, tx_ctx) {
++		tx_info = tx_ctx->chcr_info;
++		clear_conn_resources(tx_info);
++		port_stats = &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
++		atomic64_inc(&port_stats->ktls_tx_connection_close);
++		kvfree(tx_info);
++		tx_ctx->chcr_info = NULL;
++		/* release module refcount */
++		module_put(THIS_MODULE);
++	}
++}
++
+ static int chcr_ktls_uld_state_change(void *handle, enum cxgb4_state new_state)
+ {
+ 	struct chcr_ktls_uld_ctx *u_ctx = handle;
+@@ -2141,7 +2215,10 @@ static int chcr_ktls_uld_state_change(void *handle, enum cxgb4_state new_state)
+ 	case CXGB4_STATE_DETACH:
+ 		pr_info("%s: Down\n", pci_name(u_ctx->lldi.pdev));
+ 		mutex_lock(&dev_mutex);
++		u_ctx->detach = true;
+ 		list_del(&u_ctx->entry);
++		ch_ktls_reset_all_conn(u_ctx);
++		xa_destroy(&u_ctx->tid_list);
+ 		mutex_unlock(&dev_mutex);
+ 		break;
+ 	default:
+@@ -2180,6 +2257,7 @@ static void __exit chcr_ktls_exit(void)
+ 		adap = pci_get_drvdata(u_ctx->lldi.pdev);
+ 		memset(&adap->ch_ktls_stats, 0, sizeof(adap->ch_ktls_stats));
+ 		list_del(&u_ctx->entry);
++		xa_destroy(&u_ctx->tid_list);
+ 		kfree(u_ctx);
+ 	}
+ 	mutex_unlock(&dev_mutex);
+diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h
+index 18b3b1f024156..10572dc55365a 100644
+--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h
++++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h
+@@ -75,6 +75,8 @@ struct chcr_ktls_ofld_ctx_tx {
+ struct chcr_ktls_uld_ctx {
+ 	struct list_head entry;
+ 	struct cxgb4_lld_info lldi;
++	struct xarray tid_list;
++	bool detach;
+ };
+ 
+ static inline struct chcr_ktls_ofld_ctx_tx *
+diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
+index 188d871f6b8cd..c320cc8ca68d6 100644
+--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
++++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
+@@ -1564,8 +1564,10 @@ found_ok_skb:
+ 			cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
+ 					sizeof(thdr->type), &thdr->type);
+ 
+-			if (cerr && thdr->type != TLS_RECORD_TYPE_DATA)
+-				return -EIO;
++			if (cerr && thdr->type != TLS_RECORD_TYPE_DATA) {
++				copied = -EIO;
++				break;
++			}
+ 			/*  don't send tls header, skip copy */
+ 			goto skip_copy;
+ 		}
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index 70aea9c274fed..89393fbc726f6 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -3282,7 +3282,9 @@ static int fec_enet_init(struct net_device *ndev)
+ 		return ret;
+ 	}
+ 
+-	fec_enet_alloc_queue(ndev);
++	ret = fec_enet_alloc_queue(ndev);
++	if (ret)
++		return ret;
+ 
+ 	bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize;
+ 
+@@ -3290,7 +3292,8 @@ static int fec_enet_init(struct net_device *ndev)
+ 	cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma,
+ 				       GFP_KERNEL);
+ 	if (!cbd_base) {
+-		return -ENOMEM;
++		ret = -ENOMEM;
++		goto free_queue_mem;
+ 	}
+ 
+ 	/* Get the Ethernet address */
+@@ -3368,6 +3371,10 @@ static int fec_enet_init(struct net_device *ndev)
+ 		fec_enet_update_ethtool_stats(ndev);
+ 
+ 	return 0;
++
++free_queue_mem:
++	fec_enet_free_queue(ndev);
++	return ret;
+ }
+ 
+ #ifdef CONFIG_OF
+diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+index a7b7a4aace791..b0c0504950d81 100644
+--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
++++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+@@ -548,8 +548,8 @@ static int fmvj18x_get_hwinfo(struct pcmcia_device *link, u_char *node_id)
+ 
+     base = ioremap(link->resource[2]->start, resource_size(link->resource[2]));
+     if (!base) {
+-	    pcmcia_release_window(link, link->resource[2]);
+-	    return -ENOMEM;
++	pcmcia_release_window(link, link->resource[2]);
++	return -1;
+     }
+ 
+     pcmcia_map_mem_page(link, link->resource[2], 0);
+diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
+index 7302498c6df36..bbc423e931223 100644
+--- a/drivers/net/ethernet/google/gve/gve_main.c
++++ b/drivers/net/ethernet/google/gve/gve_main.c
+@@ -180,7 +180,7 @@ static int gve_napi_poll(struct napi_struct *napi, int budget)
+ 	/* Double check we have no extra work.
+ 	 * Ensure unmask synchronizes with checking for work.
+ 	 */
+-	dma_rmb();
++	mb();
+ 	if (block->tx)
+ 		reschedule |= gve_tx_poll(block, -1);
+ 	if (block->rx)
+@@ -220,6 +220,7 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv)
+ 		int vecs_left = new_num_ntfy_blks % 2;
+ 
+ 		priv->num_ntfy_blks = new_num_ntfy_blks;
++		priv->mgmt_msix_idx = priv->num_ntfy_blks;
+ 		priv->tx_cfg.max_queues = min_t(int, priv->tx_cfg.max_queues,
+ 						vecs_per_type);
+ 		priv->rx_cfg.max_queues = min_t(int, priv->rx_cfg.max_queues,
+@@ -300,20 +301,22 @@ static void gve_free_notify_blocks(struct gve_priv *priv)
+ {
+ 	int i;
+ 
+-	/* Free the irqs */
+-	for (i = 0; i < priv->num_ntfy_blks; i++) {
+-		struct gve_notify_block *block = &priv->ntfy_blocks[i];
+-		int msix_idx = i;
++	if (priv->msix_vectors) {
++		/* Free the irqs */
++		for (i = 0; i < priv->num_ntfy_blks; i++) {
++			struct gve_notify_block *block = &priv->ntfy_blocks[i];
++			int msix_idx = i;
+ 
+-		irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
+-				      NULL);
+-		free_irq(priv->msix_vectors[msix_idx].vector, block);
++			irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
++					      NULL);
++			free_irq(priv->msix_vectors[msix_idx].vector, block);
++		}
++		free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
+ 	}
+ 	dma_free_coherent(&priv->pdev->dev,
+ 			  priv->num_ntfy_blks * sizeof(*priv->ntfy_blocks),
+ 			  priv->ntfy_blocks, priv->ntfy_block_bus);
+ 	priv->ntfy_blocks = NULL;
+-	free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
+ 	pci_disable_msix(priv->pdev);
+ 	kvfree(priv->msix_vectors);
+ 	priv->msix_vectors = NULL;
+diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
+index 6938f3a939d64..3e04a3973d680 100644
+--- a/drivers/net/ethernet/google/gve/gve_tx.c
++++ b/drivers/net/ethernet/google/gve/gve_tx.c
+@@ -212,10 +212,11 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
+ 	tx->dev = &priv->pdev->dev;
+ 	if (!tx->raw_addressing) {
+ 		tx->tx_fifo.qpl = gve_assign_tx_qpl(priv);
+-
++		if (!tx->tx_fifo.qpl)
++			goto abort_with_desc;
+ 		/* map Tx FIFO */
+ 		if (gve_tx_fifo_init(priv, &tx->tx_fifo))
+-			goto abort_with_desc;
++			goto abort_with_qpl;
+ 	}
+ 
+ 	tx->q_resources =
+@@ -236,6 +237,9 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
+ abort_with_fifo:
+ 	if (!tx->raw_addressing)
+ 		gve_tx_fifo_release(priv, &tx->tx_fifo);
++abort_with_qpl:
++	if (!tx->raw_addressing)
++		gve_unassign_qpl(priv, tx->tx_fifo.qpl->id);
+ abort_with_desc:
+ 	dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
+ 	tx->desc = NULL;
+@@ -589,7 +593,7 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
+ 	struct gve_tx_ring *tx;
+ 	int nsegs;
+ 
+-	WARN(skb_get_queue_mapping(skb) > priv->tx_cfg.num_queues,
++	WARN(skb_get_queue_mapping(skb) >= priv->tx_cfg.num_queues,
+ 	     "skb queue index out of range");
+ 	tx = &priv->tx[skb_get_queue_mapping(skb)];
+ 	if (unlikely(gve_maybe_stop_tx(tx, skb))) {
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+index 0f70158c2551d..49ea1e4a08a99 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -265,22 +265,17 @@ static void hns3_vector_coalesce_init(struct hns3_enet_tqp_vector *tqp_vector,
+ 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
+ 	struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal;
+ 	struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal;
++	struct hns3_enet_coalesce *ptx_coal = &priv->tx_coal;
++	struct hns3_enet_coalesce *prx_coal = &priv->rx_coal;
+ 
+-	/* initialize the configuration for interrupt coalescing.
+-	 * 1. GL (Interrupt Gap Limiter)
+-	 * 2. RL (Interrupt Rate Limiter)
+-	 * 3. QL (Interrupt Quantity Limiter)
+-	 *
+-	 * Default: enable interrupt coalescing self-adaptive and GL
+-	 */
+-	tx_coal->adapt_enable = 1;
+-	rx_coal->adapt_enable = 1;
++	tx_coal->adapt_enable = ptx_coal->adapt_enable;
++	rx_coal->adapt_enable = prx_coal->adapt_enable;
+ 
+-	tx_coal->int_gl = HNS3_INT_GL_50K;
+-	rx_coal->int_gl = HNS3_INT_GL_50K;
++	tx_coal->int_gl = ptx_coal->int_gl;
++	rx_coal->int_gl = prx_coal->int_gl;
+ 
+-	rx_coal->flow_level = HNS3_FLOW_LOW;
+-	tx_coal->flow_level = HNS3_FLOW_LOW;
++	rx_coal->flow_level = prx_coal->flow_level;
++	tx_coal->flow_level = ptx_coal->flow_level;
+ 
+ 	/* device version above V3(include V3), GL can configure 1us
+ 	 * unit, so uses 1us unit.
+@@ -295,8 +290,8 @@ static void hns3_vector_coalesce_init(struct hns3_enet_tqp_vector *tqp_vector,
+ 		rx_coal->ql_enable = 1;
+ 		tx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max;
+ 		rx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max;
+-		tx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
+-		rx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
++		tx_coal->int_ql = ptx_coal->int_ql;
++		rx_coal->int_ql = prx_coal->int_ql;
+ 	}
+ }
+ 
+@@ -845,8 +840,6 @@ static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
+ 	      l4.udp->dest == htons(4790))))
+ 		return false;
+ 
+-	skb_checksum_help(skb);
+-
+ 	return true;
+ }
+ 
+@@ -924,8 +917,7 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
+ 			/* the stack computes the IP header already,
+ 			 * driver calculate l4 checksum when not TSO.
+ 			 */
+-			skb_checksum_help(skb);
+-			return 0;
++			return skb_checksum_help(skb);
+ 		}
+ 
+ 		hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec);
+@@ -970,7 +962,7 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
+ 		break;
+ 	case IPPROTO_UDP:
+ 		if (hns3_tunnel_csum_bug(skb))
+-			break;
++			return skb_checksum_help(skb);
+ 
+ 		hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
+ 		hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
+@@ -995,8 +987,7 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
+ 		/* the stack computes the IP header already,
+ 		 * driver calculate l4 checksum when not TSO.
+ 		 */
+-		skb_checksum_help(skb);
+-		return 0;
++		return skb_checksum_help(skb);
+ 	}
+ 
+ 	return 0;
+@@ -3805,6 +3796,34 @@ map_ring_fail:
+ 	return ret;
+ }
+ 
++static void hns3_nic_init_coal_cfg(struct hns3_nic_priv *priv)
++{
++	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
++	struct hns3_enet_coalesce *tx_coal = &priv->tx_coal;
++	struct hns3_enet_coalesce *rx_coal = &priv->rx_coal;
++
++	/* initialize the configuration for interrupt coalescing.
++	 * 1. GL (Interrupt Gap Limiter)
++	 * 2. RL (Interrupt Rate Limiter)
++	 * 3. QL (Interrupt Quantity Limiter)
++	 *
++	 * Default: enable interrupt coalescing self-adaptive and GL
++	 */
++	tx_coal->adapt_enable = 1;
++	rx_coal->adapt_enable = 1;
++
++	tx_coal->int_gl = HNS3_INT_GL_50K;
++	rx_coal->int_gl = HNS3_INT_GL_50K;
++
++	rx_coal->flow_level = HNS3_FLOW_LOW;
++	tx_coal->flow_level = HNS3_FLOW_LOW;
++
++	if (ae_dev->dev_specs.int_ql_max) {
++		tx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
++		rx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
++	}
++}
++
+ static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
+ {
+ 	struct hnae3_handle *h = priv->ae_handle;
+@@ -4265,6 +4284,8 @@ static int hns3_client_init(struct hnae3_handle *handle)
+ 		goto out_get_ring_cfg;
+ 	}
+ 
++	hns3_nic_init_coal_cfg(priv);
++
+ 	ret = hns3_nic_alloc_vector_data(priv);
+ 	if (ret) {
+ 		ret = -ENOMEM;
+@@ -4287,12 +4308,6 @@ static int hns3_client_init(struct hnae3_handle *handle)
+ 	if (ret)
+ 		goto out_init_phy;
+ 
+-	ret = register_netdev(netdev);
+-	if (ret) {
+-		dev_err(priv->dev, "probe register netdev fail!\n");
+-		goto out_reg_netdev_fail;
+-	}
+-
+ 	/* the device can work without cpu rmap, only aRFS needs it */
+ 	ret = hns3_set_rx_cpu_rmap(netdev);
+ 	if (ret)
+@@ -4325,17 +4340,23 @@ static int hns3_client_init(struct hnae3_handle *handle)
+ 	if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
+ 		set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->supported_pflags);
+ 
++	ret = register_netdev(netdev);
++	if (ret) {
++		dev_err(priv->dev, "probe register netdev fail!\n");
++		goto out_reg_netdev_fail;
++	}
++
+ 	if (netif_msg_drv(handle))
+ 		hns3_info_show(priv);
+ 
+ 	return ret;
+ 
++out_reg_netdev_fail:
++	hns3_dbg_uninit(handle);
+ out_client_start:
+ 	hns3_free_rx_cpu_rmap(netdev);
+ 	hns3_nic_uninit_irq(priv);
+ out_init_irq_fail:
+-	unregister_netdev(netdev);
+-out_reg_netdev_fail:
+ 	hns3_uninit_phy(netdev);
+ out_init_phy:
+ 	hns3_uninit_all_ring(priv);
+@@ -4543,31 +4564,6 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h)
+ 	return 0;
+ }
+ 
+-static void hns3_store_coal(struct hns3_nic_priv *priv)
+-{
+-	/* ethtool only support setting and querying one coal
+-	 * configuration for now, so save the vector 0' coal
+-	 * configuration here in order to restore it.
+-	 */
+-	memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal,
+-	       sizeof(struct hns3_enet_coalesce));
+-	memcpy(&priv->rx_coal, &priv->tqp_vector[0].rx_group.coal,
+-	       sizeof(struct hns3_enet_coalesce));
+-}
+-
+-static void hns3_restore_coal(struct hns3_nic_priv *priv)
+-{
+-	u16 vector_num = priv->vector_num;
+-	int i;
+-
+-	for (i = 0; i < vector_num; i++) {
+-		memcpy(&priv->tqp_vector[i].tx_group.coal, &priv->tx_coal,
+-		       sizeof(struct hns3_enet_coalesce));
+-		memcpy(&priv->tqp_vector[i].rx_group.coal, &priv->rx_coal,
+-		       sizeof(struct hns3_enet_coalesce));
+-	}
+-}
+-
+ static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
+ {
+ 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+@@ -4626,8 +4622,6 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
+ 	if (ret)
+ 		goto err_put_ring;
+ 
+-	hns3_restore_coal(priv);
+-
+ 	ret = hns3_nic_init_vector_data(priv);
+ 	if (ret)
+ 		goto err_dealloc_vector;
+@@ -4693,8 +4687,6 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
+ 
+ 	hns3_nic_uninit_vector_data(priv);
+ 
+-	hns3_store_coal(priv);
+-
+ 	hns3_nic_dealloc_vector_data(priv);
+ 
+ 	hns3_uninit_all_ring(priv);
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+index d20f2e2460178..5b051df3429f8 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+@@ -1119,50 +1119,32 @@ static void hns3_get_channels(struct net_device *netdev,
+ 		h->ae_algo->ops->get_channels(h, ch);
+ }
+ 
+-static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue,
+-				       struct ethtool_coalesce *cmd)
++static int hns3_get_coalesce(struct net_device *netdev,
++			     struct ethtool_coalesce *cmd)
+ {
+-	struct hns3_enet_tqp_vector *tx_vector, *rx_vector;
+ 	struct hns3_nic_priv *priv = netdev_priv(netdev);
++	struct hns3_enet_coalesce *tx_coal = &priv->tx_coal;
++	struct hns3_enet_coalesce *rx_coal = &priv->rx_coal;
+ 	struct hnae3_handle *h = priv->ae_handle;
+-	u16 queue_num = h->kinfo.num_tqps;
+ 
+ 	if (hns3_nic_resetting(netdev))
+ 		return -EBUSY;
+ 
+-	if (queue >= queue_num) {
+-		netdev_err(netdev,
+-			   "Invalid queue value %u! Queue max id=%u\n",
+-			   queue, queue_num - 1);
+-		return -EINVAL;
+-	}
+-
+-	tx_vector = priv->ring[queue].tqp_vector;
+-	rx_vector = priv->ring[queue_num + queue].tqp_vector;
++	cmd->use_adaptive_tx_coalesce = tx_coal->adapt_enable;
++	cmd->use_adaptive_rx_coalesce = rx_coal->adapt_enable;
+ 
+-	cmd->use_adaptive_tx_coalesce =
+-			tx_vector->tx_group.coal.adapt_enable;
+-	cmd->use_adaptive_rx_coalesce =
+-			rx_vector->rx_group.coal.adapt_enable;
+-
+-	cmd->tx_coalesce_usecs = tx_vector->tx_group.coal.int_gl;
+-	cmd->rx_coalesce_usecs = rx_vector->rx_group.coal.int_gl;
++	cmd->tx_coalesce_usecs = tx_coal->int_gl;
++	cmd->rx_coalesce_usecs = rx_coal->int_gl;
+ 
+ 	cmd->tx_coalesce_usecs_high = h->kinfo.int_rl_setting;
+ 	cmd->rx_coalesce_usecs_high = h->kinfo.int_rl_setting;
+ 
+-	cmd->tx_max_coalesced_frames = tx_vector->tx_group.coal.int_ql;
+-	cmd->rx_max_coalesced_frames = rx_vector->rx_group.coal.int_ql;
++	cmd->tx_max_coalesced_frames = tx_coal->int_ql;
++	cmd->rx_max_coalesced_frames = rx_coal->int_ql;
+ 
+ 	return 0;
+ }
+ 
+-static int hns3_get_coalesce(struct net_device *netdev,
+-			     struct ethtool_coalesce *cmd)
+-{
+-	return hns3_get_coalesce_per_queue(netdev, 0, cmd);
+-}
+-
+ static int hns3_check_gl_coalesce_para(struct net_device *netdev,
+ 				       struct ethtool_coalesce *cmd)
+ {
+@@ -1277,19 +1259,7 @@ static int hns3_check_coalesce_para(struct net_device *netdev,
+ 		return ret;
+ 	}
+ 
+-	ret = hns3_check_ql_coalesce_param(netdev, cmd);
+-	if (ret)
+-		return ret;
+-
+-	if (cmd->use_adaptive_tx_coalesce == 1 ||
+-	    cmd->use_adaptive_rx_coalesce == 1) {
+-		netdev_info(netdev,
+-			    "adaptive-tx=%u and adaptive-rx=%u, tx_usecs or rx_usecs will changed dynamically.\n",
+-			    cmd->use_adaptive_tx_coalesce,
+-			    cmd->use_adaptive_rx_coalesce);
+-	}
+-
+-	return 0;
++	return hns3_check_ql_coalesce_param(netdev, cmd);
+ }
+ 
+ static void hns3_set_coalesce_per_queue(struct net_device *netdev,
+@@ -1335,6 +1305,9 @@ static int hns3_set_coalesce(struct net_device *netdev,
+ 			     struct ethtool_coalesce *cmd)
+ {
+ 	struct hnae3_handle *h = hns3_get_handle(netdev);
++	struct hns3_nic_priv *priv = netdev_priv(netdev);
++	struct hns3_enet_coalesce *tx_coal = &priv->tx_coal;
++	struct hns3_enet_coalesce *rx_coal = &priv->rx_coal;
+ 	u16 queue_num = h->kinfo.num_tqps;
+ 	int ret;
+ 	int i;
+@@ -1349,6 +1322,15 @@ static int hns3_set_coalesce(struct net_device *netdev,
+ 	h->kinfo.int_rl_setting =
+ 		hns3_rl_round_down(cmd->rx_coalesce_usecs_high);
+ 
++	tx_coal->adapt_enable = cmd->use_adaptive_tx_coalesce;
++	rx_coal->adapt_enable = cmd->use_adaptive_rx_coalesce;
++
++	tx_coal->int_gl = cmd->tx_coalesce_usecs;
++	rx_coal->int_gl = cmd->rx_coalesce_usecs;
++
++	tx_coal->int_ql = cmd->tx_max_coalesced_frames;
++	rx_coal->int_ql = cmd->rx_max_coalesced_frames;
++
+ 	for (i = 0; i < queue_num; i++)
+ 		hns3_set_coalesce_per_queue(netdev, cmd, i);
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+index c3bb16b1f0600..9265a00de18ed 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+@@ -694,7 +694,6 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
+ 	unsigned int flag;
+ 	int ret = 0;
+ 
+-	memset(&resp_msg, 0, sizeof(resp_msg));
+ 	/* handle all the mailbox requests in the queue */
+ 	while (!hclge_cmd_crq_empty(&hdev->hw)) {
+ 		if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
+@@ -722,6 +721,9 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
+ 
+ 		trace_hclge_pf_mbx_get(hdev, req);
+ 
++		/* clear the resp_msg before processing every mailbox message */
++		memset(&resp_msg, 0, sizeof(resp_msg));
++
+ 		switch (req->msg.code) {
+ 		case HCLGE_MBX_MAP_RING_TO_VECTOR:
+ 			ret = hclge_map_unmap_ring_to_vf_vector(vport, true,
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+index 988db46bff0ee..214a38de3f415 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+@@ -467,12 +467,16 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
+ 	return err;
+ }
+ 
+-static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
++static int ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 max_frame, u32 vf)
+ {
+ 	struct ixgbe_hw *hw = &adapter->hw;
+-	int max_frame = msgbuf[1];
+ 	u32 max_frs;
+ 
++	if (max_frame < ETH_MIN_MTU || max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) {
++		e_err(drv, "VF max_frame %d out of range\n", max_frame);
++		return -EINVAL;
++	}
++
+ 	/*
+ 	 * For 82599EB we have to keep all PFs and VFs operating with
+ 	 * the same max_frame value in order to avoid sending an oversize
+@@ -533,12 +537,6 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
+ 		}
+ 	}
+ 
+-	/* MTU < 68 is an error and causes problems on some kernels */
+-	if (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) {
+-		e_err(drv, "VF max_frame %d out of range\n", max_frame);
+-		return -EINVAL;
+-	}
+-
+ 	/* pull current max frame size from hardware */
+ 	max_frs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
+ 	max_frs &= IXGBE_MHADD_MFS_MASK;
+@@ -1249,7 +1247,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
+ 		retval = ixgbe_set_vf_vlan_msg(adapter, msgbuf, vf);
+ 		break;
+ 	case IXGBE_VF_SET_LPE:
+-		retval = ixgbe_set_vf_lpe(adapter, msgbuf, vf);
++		retval = ixgbe_set_vf_lpe(adapter, msgbuf[1], vf);
+ 		break;
+ 	case IXGBE_VF_SET_MACVLAN:
+ 		retval = ixgbe_set_vf_macvlan_msg(adapter, msgbuf, vf);
+diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
+index 51ed8a54d3801..135ba5b6ae980 100644
+--- a/drivers/net/ethernet/lantiq_xrx200.c
++++ b/drivers/net/ethernet/lantiq_xrx200.c
+@@ -154,6 +154,7 @@ static int xrx200_close(struct net_device *net_dev)
+ 
+ static int xrx200_alloc_skb(struct xrx200_chan *ch)
+ {
++	dma_addr_t mapping;
+ 	int ret = 0;
+ 
+ 	ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(ch->priv->net_dev,
+@@ -163,16 +164,17 @@ static int xrx200_alloc_skb(struct xrx200_chan *ch)
+ 		goto skip;
+ 	}
+ 
+-	ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(ch->priv->dev,
+-			ch->skb[ch->dma.desc]->data, XRX200_DMA_DATA_LEN,
+-			DMA_FROM_DEVICE);
+-	if (unlikely(dma_mapping_error(ch->priv->dev,
+-				       ch->dma.desc_base[ch->dma.desc].addr))) {
++	mapping = dma_map_single(ch->priv->dev, ch->skb[ch->dma.desc]->data,
++				 XRX200_DMA_DATA_LEN, DMA_FROM_DEVICE);
++	if (unlikely(dma_mapping_error(ch->priv->dev, mapping))) {
+ 		dev_kfree_skb_any(ch->skb[ch->dma.desc]);
+ 		ret = -ENOMEM;
+ 		goto skip;
+ 	}
+ 
++	ch->dma.desc_base[ch->dma.desc].addr = mapping;
++	/* Make sure the address is written before we give it to HW */
++	wmb();
+ skip:
+ 	ch->dma.desc_base[ch->dma.desc].ctl =
+ 		LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
+@@ -196,6 +198,8 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
+ 	ch->dma.desc %= LTQ_DESC_NUM;
+ 
+ 	if (ret) {
++		ch->skb[ch->dma.desc] = skb;
++		net_dev->stats.rx_dropped++;
+ 		netdev_err(net_dev, "failed to allocate new rx buffer\n");
+ 		return ret;
+ 	}
+diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+index 8edba5ea90f03..4a61c90003b5e 100644
+--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+@@ -993,6 +993,14 @@ enum mvpp22_ptp_packet_format {
+ 
+ #define MVPP2_DESC_DMA_MASK	DMA_BIT_MASK(40)
+ 
++/* Buffer header info bits */
++#define MVPP2_B_HDR_INFO_MC_ID_MASK	0xfff
++#define MVPP2_B_HDR_INFO_MC_ID(info)	((info) & MVPP2_B_HDR_INFO_MC_ID_MASK)
++#define MVPP2_B_HDR_INFO_LAST_OFFS	12
++#define MVPP2_B_HDR_INFO_LAST_MASK	BIT(12)
++#define MVPP2_B_HDR_INFO_IS_LAST(info) \
++	   (((info) & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS)
++
+ struct mvpp2_tai;
+ 
+ /* Definitions */
+@@ -1002,6 +1010,20 @@ struct mvpp2_rss_table {
+ 	u32 indir[MVPP22_RSS_TABLE_ENTRIES];
+ };
+ 
++struct mvpp2_buff_hdr {
++	__le32 next_phys_addr;
++	__le32 next_dma_addr;
++	__le16 byte_count;
++	__le16 info;
++	__le16 reserved1;	/* bm_qset (for future use, BM) */
++	u8 next_phys_addr_high;
++	u8 next_dma_addr_high;
++	__le16 reserved2;
++	__le16 reserved3;
++	__le16 reserved4;
++	__le16 reserved5;
++};
++
+ /* Shared Packet Processor resources */
+ struct mvpp2 {
+ 	/* Shared registers' base addresses */
+diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+index 1767c60056c5a..6c81e4f175ac6 100644
+--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+@@ -3840,6 +3840,35 @@ mvpp2_run_xdp(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq,
+ 	return ret;
+ }
+ 
++static void mvpp2_buff_hdr_pool_put(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc,
++				    int pool, u32 rx_status)
++{
++	phys_addr_t phys_addr, phys_addr_next;
++	dma_addr_t dma_addr, dma_addr_next;
++	struct mvpp2_buff_hdr *buff_hdr;
++
++	phys_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
++	dma_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
++
++	do {
++		buff_hdr = (struct mvpp2_buff_hdr *)phys_to_virt(phys_addr);
++
++		phys_addr_next = le32_to_cpu(buff_hdr->next_phys_addr);
++		dma_addr_next = le32_to_cpu(buff_hdr->next_dma_addr);
++
++		if (port->priv->hw_version >= MVPP22) {
++			phys_addr_next |= ((u64)buff_hdr->next_phys_addr_high << 32);
++			dma_addr_next |= ((u64)buff_hdr->next_dma_addr_high << 32);
++		}
++
++		mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
++
++		phys_addr = phys_addr_next;
++		dma_addr = dma_addr_next;
++
++	} while (!MVPP2_B_HDR_INFO_IS_LAST(le16_to_cpu(buff_hdr->info)));
++}
++
+ /* Main rx processing */
+ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
+ 		    int rx_todo, struct mvpp2_rx_queue *rxq)
+@@ -3886,14 +3915,6 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
+ 			MVPP2_RXD_BM_POOL_ID_OFFS;
+ 		bm_pool = &port->priv->bm_pools[pool];
+ 
+-		/* In case of an error, release the requested buffer pointer
+-		 * to the Buffer Manager. This request process is controlled
+-		 * by the hardware, and the information about the buffer is
+-		 * comprised by the RX descriptor.
+-		 */
+-		if (rx_status & MVPP2_RXD_ERR_SUMMARY)
+-			goto err_drop_frame;
+-
+ 		if (port->priv->percpu_pools) {
+ 			pp = port->priv->page_pool[pool];
+ 			dma_dir = page_pool_get_dma_dir(pp);
+@@ -3905,6 +3926,18 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
+ 					rx_bytes + MVPP2_MH_SIZE,
+ 					dma_dir);
+ 
++		/* Buffer header not supported */
++		if (rx_status & MVPP2_RXD_BUF_HDR)
++			goto err_drop_frame;
++
++		/* In case of an error, release the requested buffer pointer
++		 * to the Buffer Manager. This request process is controlled
++		 * by the hardware, and the information about the buffer is
++		 * comprised by the RX descriptor.
++		 */
++		if (rx_status & MVPP2_RXD_ERR_SUMMARY)
++			goto err_drop_frame;
++
+ 		/* Prefetch header */
+ 		prefetch(data);
+ 
+@@ -3986,7 +4019,10 @@ err_drop_frame:
+ 		dev->stats.rx_errors++;
+ 		mvpp2_rx_error(port, rx_desc);
+ 		/* Return the buffer to the pool */
+-		mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
++		if (rx_status & MVPP2_RXD_BUF_HDR)
++			mvpp2_buff_hdr_pool_put(port, rx_desc, pool, rx_status);
++		else
++			mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
+ 	}
+ 
+ 	rcu_read_unlock();
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+index f4962a97a0757..9d9a2e438acfc 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+@@ -786,6 +786,10 @@ static int otx2_set_rxfh_context(struct net_device *dev, const u32 *indir,
+ 	if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ 		return -EOPNOTSUPP;
+ 
++	if (*rss_context != ETH_RXFH_CONTEXT_ALLOC &&
++	    *rss_context >= MAX_RSS_GROUPS)
++		return -EINVAL;
++
+ 	rss = &pfvf->hw.rss_info;
+ 
+ 	if (!rss->enable) {
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+index bcd5e7ae8482f..10e30efa775d4 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -679,32 +679,53 @@ static int mtk_set_mac_address(struct net_device *dev, void *p)
+ void mtk_stats_update_mac(struct mtk_mac *mac)
+ {
+ 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
+-	unsigned int base = MTK_GDM1_TX_GBCNT;
+-	u64 stats;
+-
+-	base += hw_stats->reg_offset;
++	struct mtk_eth *eth = mac->hw;
+ 
+ 	u64_stats_update_begin(&hw_stats->syncp);
+ 
+-	hw_stats->rx_bytes += mtk_r32(mac->hw, base);
+-	stats =  mtk_r32(mac->hw, base + 0x04);
+-	if (stats)
+-		hw_stats->rx_bytes += (stats << 32);
+-	hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
+-	hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
+-	hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
+-	hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
+-	hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
+-	hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
+-	hw_stats->rx_flow_control_packets +=
+-					mtk_r32(mac->hw, base + 0x24);
+-	hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
+-	hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
+-	hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
+-	stats =  mtk_r32(mac->hw, base + 0x34);
+-	if (stats)
+-		hw_stats->tx_bytes += (stats << 32);
+-	hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
++	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
++		hw_stats->tx_packets += mtk_r32(mac->hw, MT7628_SDM_TPCNT);
++		hw_stats->tx_bytes += mtk_r32(mac->hw, MT7628_SDM_TBCNT);
++		hw_stats->rx_packets += mtk_r32(mac->hw, MT7628_SDM_RPCNT);
++		hw_stats->rx_bytes += mtk_r32(mac->hw, MT7628_SDM_RBCNT);
++		hw_stats->rx_checksum_errors +=
++			mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
++	} else {
++		unsigned int offs = hw_stats->reg_offset;
++		u64 stats;
++
++		hw_stats->rx_bytes += mtk_r32(mac->hw,
++					      MTK_GDM1_RX_GBCNT_L + offs);
++		stats = mtk_r32(mac->hw, MTK_GDM1_RX_GBCNT_H + offs);
++		if (stats)
++			hw_stats->rx_bytes += (stats << 32);
++		hw_stats->rx_packets +=
++			mtk_r32(mac->hw, MTK_GDM1_RX_GPCNT + offs);
++		hw_stats->rx_overflow +=
++			mtk_r32(mac->hw, MTK_GDM1_RX_OERCNT + offs);
++		hw_stats->rx_fcs_errors +=
++			mtk_r32(mac->hw, MTK_GDM1_RX_FERCNT + offs);
++		hw_stats->rx_short_errors +=
++			mtk_r32(mac->hw, MTK_GDM1_RX_SERCNT + offs);
++		hw_stats->rx_long_errors +=
++			mtk_r32(mac->hw, MTK_GDM1_RX_LENCNT + offs);
++		hw_stats->rx_checksum_errors +=
++			mtk_r32(mac->hw, MTK_GDM1_RX_CERCNT + offs);
++		hw_stats->rx_flow_control_packets +=
++			mtk_r32(mac->hw, MTK_GDM1_RX_FCCNT + offs);
++		hw_stats->tx_skip +=
++			mtk_r32(mac->hw, MTK_GDM1_TX_SKIPCNT + offs);
++		hw_stats->tx_collisions +=
++			mtk_r32(mac->hw, MTK_GDM1_TX_COLCNT + offs);
++		hw_stats->tx_bytes +=
++			mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_L + offs);
++		stats =  mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_H + offs);
++		if (stats)
++			hw_stats->tx_bytes += (stats << 32);
++		hw_stats->tx_packets +=
++			mtk_r32(mac->hw, MTK_GDM1_TX_GPCNT + offs);
++	}
++
+ 	u64_stats_update_end(&hw_stats->syncp);
+ }
+ 
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+index c47272100615d..e02e21cba8289 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -267,8 +267,21 @@
+ /* QDMA FQ Free Page Buffer Length Register */
+ #define MTK_QDMA_FQ_BLEN	0x1B2C
+ 
+-/* GMA1 Received Good Byte Count Register */
+-#define MTK_GDM1_TX_GBCNT	0x2400
++/* GMA1 counter / statics register */
++#define MTK_GDM1_RX_GBCNT_L	0x2400
++#define MTK_GDM1_RX_GBCNT_H	0x2404
++#define MTK_GDM1_RX_GPCNT	0x2408
++#define MTK_GDM1_RX_OERCNT	0x2410
++#define MTK_GDM1_RX_FERCNT	0x2414
++#define MTK_GDM1_RX_SERCNT	0x2418
++#define MTK_GDM1_RX_LENCNT	0x241c
++#define MTK_GDM1_RX_CERCNT	0x2420
++#define MTK_GDM1_RX_FCCNT	0x2424
++#define MTK_GDM1_TX_SKIPCNT	0x2428
++#define MTK_GDM1_TX_COLCNT	0x242c
++#define MTK_GDM1_TX_GBCNT_L	0x2430
++#define MTK_GDM1_TX_GBCNT_H	0x2434
++#define MTK_GDM1_TX_GPCNT	0x2438
+ #define MTK_STAT_OFFSET		0x40
+ 
+ /* QDMA descriptor txd4 */
+@@ -484,6 +497,13 @@
+ #define MT7628_SDM_MAC_ADRL	(MT7628_SDM_OFFSET + 0x0c)
+ #define MT7628_SDM_MAC_ADRH	(MT7628_SDM_OFFSET + 0x10)
+ 
++/* Counter / stat register */
++#define MT7628_SDM_TPCNT	(MT7628_SDM_OFFSET + 0x100)
++#define MT7628_SDM_TBCNT	(MT7628_SDM_OFFSET + 0x104)
++#define MT7628_SDM_RPCNT	(MT7628_SDM_OFFSET + 0x108)
++#define MT7628_SDM_RBCNT	(MT7628_SDM_OFFSET + 0x10c)
++#define MT7628_SDM_CS_ERR	(MT7628_SDM_OFFSET + 0x110)
++
+ struct mtk_rx_dma {
+ 	unsigned int rxd1;
+ 	unsigned int rxd2;
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+index 1434df66fcf2e..3616b77caa0ad 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+@@ -2027,8 +2027,6 @@ static int mlx4_en_set_tunable(struct net_device *dev,
+ 	return ret;
+ }
+ 
+-#define MLX4_EEPROM_PAGE_LEN 256
+-
+ static int mlx4_en_get_module_info(struct net_device *dev,
+ 				   struct ethtool_modinfo *modinfo)
+ {
+@@ -2063,7 +2061,7 @@ static int mlx4_en_get_module_info(struct net_device *dev,
+ 		break;
+ 	case MLX4_MODULE_ID_SFP:
+ 		modinfo->type = ETH_MODULE_SFF_8472;
+-		modinfo->eeprom_len = MLX4_EEPROM_PAGE_LEN;
++		modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ 		break;
+ 	default:
+ 		return -EINVAL;
+diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
+index ba6ac31a339dc..256a06b3c096b 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/port.c
++++ b/drivers/net/ethernet/mellanox/mlx4/port.c
+@@ -1973,6 +1973,7 @@ EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave);
+ #define I2C_ADDR_LOW  0x50
+ #define I2C_ADDR_HIGH 0x51
+ #define I2C_PAGE_SIZE 256
++#define I2C_HIGH_PAGE_SIZE 128
+ 
+ /* Module Info Data */
+ struct mlx4_cable_info {
+@@ -2026,6 +2027,88 @@ static inline const char *cable_info_mad_err_str(u16 mad_status)
+ 	return "Unknown Error";
+ }
+ 
++static int mlx4_get_module_id(struct mlx4_dev *dev, u8 port, u8 *module_id)
++{
++	struct mlx4_cmd_mailbox *inbox, *outbox;
++	struct mlx4_mad_ifc *inmad, *outmad;
++	struct mlx4_cable_info *cable_info;
++	int ret;
++
++	inbox = mlx4_alloc_cmd_mailbox(dev);
++	if (IS_ERR(inbox))
++		return PTR_ERR(inbox);
++
++	outbox = mlx4_alloc_cmd_mailbox(dev);
++	if (IS_ERR(outbox)) {
++		mlx4_free_cmd_mailbox(dev, inbox);
++		return PTR_ERR(outbox);
++	}
++
++	inmad = (struct mlx4_mad_ifc *)(inbox->buf);
++	outmad = (struct mlx4_mad_ifc *)(outbox->buf);
++
++	inmad->method = 0x1; /* Get */
++	inmad->class_version = 0x1;
++	inmad->mgmt_class = 0x1;
++	inmad->base_version = 0x1;
++	inmad->attr_id = cpu_to_be16(0xFF60); /* Module Info */
++
++	cable_info = (struct mlx4_cable_info *)inmad->data;
++	cable_info->dev_mem_address = 0;
++	cable_info->page_num = 0;
++	cable_info->i2c_addr = I2C_ADDR_LOW;
++	cable_info->size = cpu_to_be16(1);
++
++	ret = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
++			   MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
++			   MLX4_CMD_NATIVE);
++	if (ret)
++		goto out;
++
++	if (be16_to_cpu(outmad->status)) {
++		/* Mad returned with bad status */
++		ret = be16_to_cpu(outmad->status);
++		mlx4_warn(dev,
++			  "MLX4_CMD_MAD_IFC Get Module ID attr(%x) port(%d) i2c_addr(%x) offset(%d) size(%d): Response Mad Status(%x) - %s\n",
++			  0xFF60, port, I2C_ADDR_LOW, 0, 1, ret,
++			  cable_info_mad_err_str(ret));
++		ret = -ret;
++		goto out;
++	}
++	cable_info = (struct mlx4_cable_info *)outmad->data;
++	*module_id = cable_info->data[0];
++out:
++	mlx4_free_cmd_mailbox(dev, inbox);
++	mlx4_free_cmd_mailbox(dev, outbox);
++	return ret;
++}
++
++static void mlx4_sfp_eeprom_params_set(u8 *i2c_addr, u8 *page_num, u16 *offset)
++{
++	*i2c_addr = I2C_ADDR_LOW;
++	*page_num = 0;
++
++	if (*offset < I2C_PAGE_SIZE)
++		return;
++
++	*i2c_addr = I2C_ADDR_HIGH;
++	*offset -= I2C_PAGE_SIZE;
++}
++
++static void mlx4_qsfp_eeprom_params_set(u8 *i2c_addr, u8 *page_num, u16 *offset)
++{
++	/* Offsets 0-255 belong to page 0.
++	 * Offsets 256-639 belong to pages 01, 02, 03.
++	 * For example, offset 400 is page 02: 1 + (400 - 256) / 128 = 2
++	 */
++	if (*offset < I2C_PAGE_SIZE)
++		*page_num = 0;
++	else
++		*page_num = 1 + (*offset - I2C_PAGE_SIZE) / I2C_HIGH_PAGE_SIZE;
++	*i2c_addr = I2C_ADDR_LOW;
++	*offset -= *page_num * I2C_HIGH_PAGE_SIZE;
++}
++
+ /**
+  * mlx4_get_module_info - Read cable module eeprom data
+  * @dev: mlx4_dev.
+@@ -2045,12 +2128,30 @@ int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
+ 	struct mlx4_cmd_mailbox *inbox, *outbox;
+ 	struct mlx4_mad_ifc *inmad, *outmad;
+ 	struct mlx4_cable_info *cable_info;
+-	u16 i2c_addr;
++	u8 module_id, i2c_addr, page_num;
+ 	int ret;
+ 
+ 	if (size > MODULE_INFO_MAX_READ)
+ 		size = MODULE_INFO_MAX_READ;
+ 
++	ret = mlx4_get_module_id(dev, port, &module_id);
++	if (ret)
++		return ret;
++
++	switch (module_id) {
++	case MLX4_MODULE_ID_SFP:
++		mlx4_sfp_eeprom_params_set(&i2c_addr, &page_num, &offset);
++		break;
++	case MLX4_MODULE_ID_QSFP:
++	case MLX4_MODULE_ID_QSFP_PLUS:
++	case MLX4_MODULE_ID_QSFP28:
++		mlx4_qsfp_eeprom_params_set(&i2c_addr, &page_num, &offset);
++		break;
++	default:
++		mlx4_err(dev, "Module ID not recognized: %#x\n", module_id);
++		return -EINVAL;
++	}
++
+ 	inbox = mlx4_alloc_cmd_mailbox(dev);
+ 	if (IS_ERR(inbox))
+ 		return PTR_ERR(inbox);
+@@ -2076,11 +2177,9 @@ int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
+ 		 */
+ 		size -= offset + size - I2C_PAGE_SIZE;
+ 
+-	i2c_addr = I2C_ADDR_LOW;
+-
+ 	cable_info = (struct mlx4_cable_info *)inmad->data;
+ 	cable_info->dev_mem_address = cpu_to_be16(offset);
+-	cable_info->page_num = 0;
++	cable_info->page_num = page_num;
+ 	cable_info->i2c_addr = i2c_addr;
+ 	cable_info->size = cpu_to_be16(size);
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
+index 95f2b26a3ee31..9c076aa20306a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
+@@ -223,6 +223,8 @@ static void mlx5e_rep_changelowerstate_event(struct net_device *netdev, void *pt
+ 	rpriv = priv->ppriv;
+ 	fwd_vport_num = rpriv->rep->vport;
+ 	lag_dev = netdev_master_upper_dev_get(netdev);
++	if (!lag_dev)
++		return;
+ 
+ 	netdev_dbg(netdev, "lag_dev(%s)'s slave vport(%d) is txable(%d)\n",
+ 		   lag_dev->name, fwd_vport_num, net_lag_port_dev_txable(netdev));
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+index 065126370acd2..96ba027dbef3d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+@@ -645,7 +645,7 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
+ 	}
+ 
+ 	if (chain) {
+-		tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
++		tc_skb_ext = tc_skb_ext_alloc(skb);
+ 		if (!tc_skb_ext) {
+ 			WARN_ON(1);
+ 			return false;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+index 9f16ad2c0710b..1560fcbf4ac7c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+@@ -1504,7 +1504,7 @@ mlx5e_init_fib_work_ipv4(struct mlx5e_priv *priv,
+ 
+ 	fen_info = container_of(info, struct fib_entry_notifier_info, info);
+ 	fib_dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev;
+-	if (fib_dev->netdev_ops != &mlx5e_netdev_ops ||
++	if (!fib_dev || fib_dev->netdev_ops != &mlx5e_netdev_ops ||
+ 	    fen_info->dst_len != 32)
+ 		return NULL;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+index 16ce7756ac43f..bbe9b1dbb7aca 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+@@ -35,6 +35,7 @@
+ #include <linux/ipv6.h>
+ #include <linux/tcp.h>
+ #include <linux/mlx5/fs.h>
++#include <linux/mlx5/mpfs.h>
+ #include "en.h"
+ #include "lib/mpfs.h"
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 5db63b9f3b70d..99dc9f2beed5b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -3027,7 +3027,7 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
+ 	int err;
+ 
+ 	old_num_txqs = netdev->real_num_tx_queues;
+-	old_ntc = netdev->num_tc;
++	old_ntc = netdev->num_tc ? : 1;
+ 
+ 	nch = priv->channels.params.num_channels;
+ 	ntc = priv->channels.params.num_tc;
+@@ -5604,6 +5604,11 @@ static void mlx5e_update_features(struct net_device *netdev)
+ 	rtnl_unlock();
+ }
+ 
++static void mlx5e_reset_channels(struct net_device *netdev)
++{
++	netdev_reset_tc(netdev);
++}
++
+ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
+ {
+ 	const bool take_rtnl = priv->netdev->reg_state == NETREG_REGISTERED;
+@@ -5658,6 +5663,7 @@ err_cleanup_tx:
+ 	profile->cleanup_tx(priv);
+ 
+ out:
++	mlx5e_reset_channels(priv->netdev);
+ 	set_bit(MLX5E_STATE_DESTROYING, &priv->state);
+ 	cancel_work_sync(&priv->update_stats_work);
+ 	return err;
+@@ -5675,6 +5681,7 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv)
+ 
+ 	profile->cleanup_rx(priv);
+ 	profile->cleanup_tx(priv);
++	mlx5e_reset_channels(priv->netdev);
+ 	cancel_work_sync(&priv->update_stats_work);
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index d675107d9ecab..78a1403c98026 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -1276,10 +1276,10 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
+ 		      struct netlink_ext_ack *extack)
+ {
+ 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+-	struct net_device *out_dev, *encap_dev = NULL;
+ 	struct mlx5e_tc_flow_parse_attr *parse_attr;
+ 	struct mlx5_flow_attr *attr = flow->attr;
+ 	bool vf_tun = false, encap_valid = true;
++	struct net_device *encap_dev = NULL;
+ 	struct mlx5_esw_flow_attr *esw_attr;
+ 	struct mlx5_fc *counter = NULL;
+ 	struct mlx5e_rep_priv *rpriv;
+@@ -1325,16 +1325,22 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
+ 	esw_attr = attr->esw_attr;
+ 
+ 	for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
++		struct net_device *out_dev;
+ 		int mirred_ifindex;
+ 
+ 		if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
+ 			continue;
+ 
+ 		mirred_ifindex = parse_attr->mirred_ifindex[out_index];
+-		out_dev = __dev_get_by_index(dev_net(priv->netdev),
+-					     mirred_ifindex);
++		out_dev = dev_get_by_index(dev_net(priv->netdev), mirred_ifindex);
++		if (!out_dev) {
++			NL_SET_ERR_MSG_MOD(extack, "Requested mirred device not found");
++			err = -ENODEV;
++			goto err_out;
++		}
+ 		err = mlx5e_attach_encap(priv, flow, out_dev, out_index,
+ 					 extack, &encap_dev, &encap_valid);
++		dev_put(out_dev);
+ 		if (err)
+ 			goto err_out;
+ 
+@@ -1347,6 +1353,12 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
+ 		esw_attr->dests[out_index].mdev = out_priv->mdev;
+ 	}
+ 
++	if (vf_tun && esw_attr->out_count > 1) {
++		NL_SET_ERR_MSG_MOD(extack, "VF tunnel encap with mirroring is not supported");
++		err = -EOPNOTSUPP;
++		goto err_out;
++	}
++
+ 	err = mlx5_eswitch_add_vlan_action(esw, attr);
+ 	if (err)
+ 		goto err_out;
+@@ -3424,8 +3436,12 @@ static int add_vlan_push_action(struct mlx5e_priv *priv,
+ 	if (err)
+ 		return err;
+ 
+-	*out_dev = dev_get_by_index_rcu(dev_net(vlan_dev),
+-					dev_get_iflink(vlan_dev));
++	rcu_read_lock();
++	*out_dev = dev_get_by_index_rcu(dev_net(vlan_dev), dev_get_iflink(vlan_dev));
++	rcu_read_unlock();
++	if (!*out_dev)
++		return -ENODEV;
++
+ 	if (is_vlan_dev(*out_dev))
+ 		err = add_vlan_push_action(priv, attr, out_dev, action);
+ 
+@@ -4908,7 +4924,7 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
+ 	}
+ 
+ 	if (chain) {
+-		tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
++		tc_skb_ext = tc_skb_ext_alloc(skb);
+ 		if (WARN_ON(!tc_skb_ext))
+ 			return false;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+index aba17835465b4..2c6d95900e3c9 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+@@ -35,6 +35,7 @@
+ #include <linux/mlx5/mlx5_ifc.h>
+ #include <linux/mlx5/vport.h>
+ #include <linux/mlx5/fs.h>
++#include <linux/mlx5/mpfs.h>
+ #include "esw/acl/lgcy.h"
+ #include "mlx5_core.h"
+ #include "lib/eq.h"
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+index ec679560a95d0..1cbb330b9f42b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+@@ -76,10 +76,11 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev,
+ 	/* As this is the terminating action then the termination table is the
+ 	 * same prio as the slow path
+ 	 */
+-	ft_attr.flags = MLX5_FLOW_TABLE_TERMINATION |
++	ft_attr.flags = MLX5_FLOW_TABLE_TERMINATION | MLX5_FLOW_TABLE_UNMANAGED |
+ 			MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
+-	ft_attr.prio = FDB_SLOW_PATH;
++	ft_attr.prio = FDB_TC_OFFLOAD;
+ 	ft_attr.max_fte = 1;
++	ft_attr.level = 1;
+ 	ft_attr.autogroup.max_num_groups = 1;
+ 	tt->termtbl = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
+ 	if (IS_ERR(tt->termtbl)) {
+@@ -171,19 +172,6 @@ mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw,
+ 	}
+ }
+ 
+-static bool mlx5_eswitch_termtbl_is_encap_reformat(struct mlx5_pkt_reformat *rt)
+-{
+-	switch (rt->reformat_type) {
+-	case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
+-	case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
+-	case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
+-	case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
+-		return true;
+-	default:
+-		return false;
+-	}
+-}
+-
+ static void
+ mlx5_eswitch_termtbl_actions_move(struct mlx5_flow_act *src,
+ 				  struct mlx5_flow_act *dst)
+@@ -201,14 +189,6 @@ mlx5_eswitch_termtbl_actions_move(struct mlx5_flow_act *src,
+ 			memset(&src->vlan[1], 0, sizeof(src->vlan[1]));
+ 		}
+ 	}
+-
+-	if (src->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT &&
+-	    mlx5_eswitch_termtbl_is_encap_reformat(src->pkt_reformat)) {
+-		src->action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+-		dst->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+-		dst->pkt_reformat = src->pkt_reformat;
+-		src->pkt_reformat = NULL;
+-	}
+ }
+ 
+ static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw,
+@@ -237,6 +217,7 @@ mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
+ 	int i;
+ 
+ 	if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, termination_table) ||
++	    !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level) ||
+ 	    attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH ||
+ 	    !mlx5_eswitch_offload_is_uplink_port(esw, spec))
+ 		return false;
+@@ -278,6 +259,14 @@ mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
+ 		if (dest[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT)
+ 			continue;
+ 
++		if (attr->dests[num_vport_dests].flags & MLX5_ESW_DEST_ENCAP) {
++			term_tbl_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
++			term_tbl_act.pkt_reformat = attr->dests[num_vport_dests].pkt_reformat;
++		} else {
++			term_tbl_act.action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
++			term_tbl_act.pkt_reformat = NULL;
++		}
++
+ 		/* get the terminating table for the action list */
+ 		tt = mlx5_eswitch_termtbl_get_create(esw, &term_tbl_act,
+ 						     &dest[i], attr);
+@@ -299,6 +288,9 @@ mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
+ 		goto revert_changes;
+ 
+ 	/* create the FTE */
++	flow_act->action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
++	flow_act->pkt_reformat = NULL;
++	flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ 	rule = mlx5_add_flow_rules(fdb, spec, flow_act, dest, num_dest);
+ 	if (IS_ERR(rule))
+ 		goto revert_changes;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
+index 88e58ac902def..15c3a9058e728 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
+@@ -307,6 +307,11 @@ int mlx5_lag_mp_init(struct mlx5_lag *ldev)
+ 	struct lag_mp *mp = &ldev->lag_mp;
+ 	int err;
+ 
++	/* always clear mfi, as it might become stale when a route delete event
++	 * has been missed
++	 */
++	mp->mfi = NULL;
++
+ 	if (mp->fib_nb.notifier_call)
+ 		return 0;
+ 
+@@ -335,4 +340,5 @@ void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev)
+ 	unregister_fib_notifier(&init_net, &mp->fib_nb);
+ 	destroy_workqueue(mp->wq);
+ 	mp->fib_nb.notifier_call = NULL;
++	mp->mfi = NULL;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
+index fd8449ff9e176..839a01da110f3 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
+@@ -33,6 +33,7 @@
+ #include <linux/etherdevice.h>
+ #include <linux/mlx5/driver.h>
+ #include <linux/mlx5/mlx5_ifc.h>
++#include <linux/mlx5/mpfs.h>
+ #include <linux/mlx5/eswitch.h>
+ #include "mlx5_core.h"
+ #include "lib/mpfs.h"
+@@ -175,6 +176,7 @@ out:
+ 	mutex_unlock(&mpfs->lock);
+ 	return err;
+ }
++EXPORT_SYMBOL(mlx5_mpfs_add_mac);
+ 
+ int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac)
+ {
+@@ -206,3 +208,4 @@ unlock:
+ 	mutex_unlock(&mpfs->lock);
+ 	return err;
+ }
++EXPORT_SYMBOL(mlx5_mpfs_del_mac);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h
+index 4a7b2c3203a7e..4a293542a7aa1 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h
+@@ -84,12 +84,9 @@ struct l2addr_node {
+ #ifdef CONFIG_MLX5_MPFS
+ int  mlx5_mpfs_init(struct mlx5_core_dev *dev);
+ void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev);
+-int  mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac);
+-int  mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac);
+ #else /* #ifndef CONFIG_MLX5_MPFS */
+ static inline int  mlx5_mpfs_init(struct mlx5_core_dev *dev) { return 0; }
+ static inline void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev) {}
+-static inline int  mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; }
+-static inline int  mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; }
+ #endif
++
+ #endif
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index c568896cfb231..efb93d63e54cb 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -503,7 +503,7 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx)
+ 
+ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
+ {
+-	struct mlx5_profile *prof = dev->profile;
++	struct mlx5_profile *prof = &dev->profile;
+ 	void *set_hca_cap;
+ 	int err;
+ 
+@@ -524,11 +524,11 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
+ 		 to_fw_pkey_sz(dev, 128));
+ 
+ 	/* Check log_max_qp from HCA caps to set in current profile */
+-	if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < profile[prof_sel].log_max_qp) {
++	if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < prof->log_max_qp) {
+ 		mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
+-			       profile[prof_sel].log_max_qp,
++			       prof->log_max_qp,
+ 			       MLX5_CAP_GEN_MAX(dev, log_max_qp));
+-		profile[prof_sel].log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
++		prof->log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
+ 	}
+ 	if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
+ 		MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
+@@ -1335,8 +1335,7 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
+ 	struct mlx5_priv *priv = &dev->priv;
+ 	int err;
+ 
+-	dev->profile = &profile[profile_idx];
+-
++	memcpy(&dev->profile, &profile[profile_idx], sizeof(dev->profile));
+ 	INIT_LIST_HEAD(&priv->ctx_list);
+ 	spin_lock_init(&priv->ctx_lock);
+ 	mutex_init(&dev->intf_state_mutex);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+index c2ba41bb7a701..96c4509e58381 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+@@ -128,10 +128,10 @@ static enum devlink_port_fn_state mlx5_sf_to_devlink_state(u8 hw_state)
+ 	switch (hw_state) {
+ 	case MLX5_VHCA_STATE_ACTIVE:
+ 	case MLX5_VHCA_STATE_IN_USE:
+-	case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
+ 		return DEVLINK_PORT_FN_STATE_ACTIVE;
+ 	case MLX5_VHCA_STATE_INVALID:
+ 	case MLX5_VHCA_STATE_ALLOCATED:
++	case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
+ 	default:
+ 		return DEVLINK_PORT_FN_STATE_INACTIVE;
+ 	}
+@@ -184,14 +184,17 @@ sf_err:
+ 	return err;
+ }
+ 
+-static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf)
++static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf,
++			    struct netlink_ext_ack *extack)
+ {
+ 	int err;
+ 
+ 	if (mlx5_sf_is_active(sf))
+ 		return 0;
+-	if (sf->hw_state != MLX5_VHCA_STATE_ALLOCATED)
+-		return -EINVAL;
++	if (sf->hw_state != MLX5_VHCA_STATE_ALLOCATED) {
++		NL_SET_ERR_MSG_MOD(extack, "SF is inactivated but it is still attached");
++		return -EBUSY;
++	}
+ 
+ 	err = mlx5_cmd_sf_enable_hca(dev, sf->hw_fn_id);
+ 	if (err)
+@@ -218,7 +221,8 @@ static int mlx5_sf_deactivate(struct mlx5_core_dev *dev, struct mlx5_sf *sf)
+ 
+ static int mlx5_sf_state_set(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
+ 			     struct mlx5_sf *sf,
+-			     enum devlink_port_fn_state state)
++			     enum devlink_port_fn_state state,
++			     struct netlink_ext_ack *extack)
+ {
+ 	int err = 0;
+ 
+@@ -226,7 +230,7 @@ static int mlx5_sf_state_set(struct mlx5_core_dev *dev, struct mlx5_sf_table *ta
+ 	if (state == mlx5_sf_to_devlink_state(sf->hw_state))
+ 		goto out;
+ 	if (state == DEVLINK_PORT_FN_STATE_ACTIVE)
+-		err = mlx5_sf_activate(dev, sf);
++		err = mlx5_sf_activate(dev, sf, extack);
+ 	else if (state == DEVLINK_PORT_FN_STATE_INACTIVE)
+ 		err = mlx5_sf_deactivate(dev, sf);
+ 	else
+@@ -257,7 +261,7 @@ int mlx5_devlink_sf_port_fn_state_set(struct devlink *devlink, struct devlink_po
+ 		goto out;
+ 	}
+ 
+-	err = mlx5_sf_state_set(dev, table, sf, state);
++	err = mlx5_sf_state_set(dev, table, sf, state, extack);
+ out:
+ 	mlx5_sf_table_put(table);
+ 	return err;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 369d7cde39933..492415790b13f 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -1076,7 +1076,6 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
+  */
+ static int stmmac_init_phy(struct net_device *dev)
+ {
+-	struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+ 	struct stmmac_priv *priv = netdev_priv(dev);
+ 	struct device_node *node;
+ 	int ret;
+@@ -1102,8 +1101,12 @@ static int stmmac_init_phy(struct net_device *dev)
+ 		ret = phylink_connect_phy(priv->phylink, phydev);
+ 	}
+ 
+-	phylink_ethtool_get_wol(priv->phylink, &wol);
+-	device_set_wakeup_capable(priv->device, !!wol.supported);
++	if (!priv->plat->pmt) {
++		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
++
++		phylink_ethtool_get_wol(priv->phylink, &wol);
++		device_set_wakeup_capable(priv->device, !!wol.supported);
++	}
+ 
+ 	return ret;
+ }
+diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
+index d7a144b4a09f0..dc50e948195de 100644
+--- a/drivers/net/ethernet/ti/netcp_core.c
++++ b/drivers/net/ethernet/ti/netcp_core.c
+@@ -1350,8 +1350,8 @@ int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe)
+ 	tx_pipe->dma_queue = knav_queue_open(name, tx_pipe->dma_queue_id,
+ 					     KNAV_QUEUE_SHARED);
+ 	if (IS_ERR(tx_pipe->dma_queue)) {
+-		dev_err(dev, "Could not open DMA queue for channel \"%s\": %d\n",
+-			name, ret);
++		dev_err(dev, "Could not open DMA queue for channel \"%s\": %pe\n",
++			name, tx_pipe->dma_queue);
+ 		ret = PTR_ERR(tx_pipe->dma_queue);
+ 		goto err;
+ 	}
+diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h
+index 8020776313716..2734a5164b927 100644
+--- a/drivers/net/ipa/ipa.h
++++ b/drivers/net/ipa/ipa.h
+@@ -56,6 +56,7 @@ enum ipa_flag {
+  * @mem_virt:		Virtual address of IPA-local memory space
+  * @mem_offset:		Offset from @mem_virt used for access to IPA memory
+  * @mem_size:		Total size (bytes) of memory at @mem_virt
++ * @mem_count:		Number of entries in the mem array
+  * @mem:		Array of IPA-local memory region descriptors
+  * @imem_iova:		I/O virtual address of IPA region in IMEM
+  * @imem_size;		Size of IMEM region
+@@ -102,6 +103,7 @@ struct ipa {
+ 	void *mem_virt;
+ 	u32 mem_offset;
+ 	u32 mem_size;
++	u32 mem_count;
+ 	const struct ipa_mem *mem;
+ 
+ 	unsigned long imem_iova;
+diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
+index f25029b9ec857..c1294d7ebdad4 100644
+--- a/drivers/net/ipa/ipa_mem.c
++++ b/drivers/net/ipa/ipa_mem.c
+@@ -181,7 +181,7 @@ int ipa_mem_config(struct ipa *ipa)
+ 	 * for the region, write "canary" values in the space prior to
+ 	 * the region's base address.
+ 	 */
+-	for (mem_id = 0; mem_id < IPA_MEM_COUNT; mem_id++) {
++	for (mem_id = 0; mem_id < ipa->mem_count; mem_id++) {
+ 		const struct ipa_mem *mem = &ipa->mem[mem_id];
+ 		u16 canary_count;
+ 		__le32 *canary;
+@@ -488,6 +488,7 @@ int ipa_mem_init(struct ipa *ipa, const struct ipa_mem_data *mem_data)
+ 	ipa->mem_size = resource_size(res);
+ 
+ 	/* The ipa->mem[] array is indexed by enum ipa_mem_id values */
++	ipa->mem_count = mem_data->local_count;
+ 	ipa->mem = mem_data->local;
+ 
+ 	ret = ipa_imem_init(ipa, mem_data->imem_addr, mem_data->imem_size);
+diff --git a/drivers/net/mdio/mdio-octeon.c b/drivers/net/mdio/mdio-octeon.c
+index d1e1009d51afe..6faf39314ac93 100644
+--- a/drivers/net/mdio/mdio-octeon.c
++++ b/drivers/net/mdio/mdio-octeon.c
+@@ -71,7 +71,6 @@ static int octeon_mdiobus_probe(struct platform_device *pdev)
+ 
+ 	return 0;
+ fail_register:
+-	mdiobus_free(bus->mii_bus);
+ 	smi_en.u64 = 0;
+ 	oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN);
+ 	return err;
+@@ -85,7 +84,6 @@ static int octeon_mdiobus_remove(struct platform_device *pdev)
+ 	bus = platform_get_drvdata(pdev);
+ 
+ 	mdiobus_unregister(bus->mii_bus);
+-	mdiobus_free(bus->mii_bus);
+ 	smi_en.u64 = 0;
+ 	oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN);
+ 	return 0;
+diff --git a/drivers/net/mdio/mdio-thunder.c b/drivers/net/mdio/mdio-thunder.c
+index 3d7eda99d34e2..dd7430c998a2a 100644
+--- a/drivers/net/mdio/mdio-thunder.c
++++ b/drivers/net/mdio/mdio-thunder.c
+@@ -126,7 +126,6 @@ static void thunder_mdiobus_pci_remove(struct pci_dev *pdev)
+ 			continue;
+ 
+ 		mdiobus_unregister(bus->mii_bus);
+-		mdiobus_free(bus->mii_bus);
+ 		oct_mdio_writeq(0, bus->register_base + SMI_EN);
+ 	}
+ 	pci_release_regions(pdev);
+diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
+index 3ef4b2841402c..5c779cc0ea112 100644
+--- a/drivers/net/usb/hso.c
++++ b/drivers/net/usb/hso.c
+@@ -1689,7 +1689,7 @@ static int hso_serial_tiocmset(struct tty_struct *tty,
+ 	spin_unlock_irqrestore(&serial->serial_lock, flags);
+ 
+ 	return usb_control_msg(serial->parent->usb,
+-			       usb_rcvctrlpipe(serial->parent->usb, 0), 0x22,
++			       usb_sndctrlpipe(serial->parent->usb, 0), 0x22,
+ 			       0x21, val, if_num, NULL, 0,
+ 			       USB_CTRL_SET_TIMEOUT);
+ }
+@@ -2436,7 +2436,7 @@ static int hso_rfkill_set_block(void *data, bool blocked)
+ 	if (hso_dev->usb_gone)
+ 		rv = 0;
+ 	else
+-		rv = usb_control_msg(hso_dev->usb, usb_rcvctrlpipe(hso_dev->usb, 0),
++		rv = usb_control_msg(hso_dev->usb, usb_sndctrlpipe(hso_dev->usb, 0),
+ 				       enabled ? 0x82 : 0x81, 0x40, 0, 0, NULL, 0,
+ 				       USB_CTRL_SET_TIMEOUT);
+ 	mutex_unlock(&hso_dev->mutex);
+@@ -2618,32 +2618,31 @@ static struct hso_device *hso_create_bulk_serial_device(
+ 		num_urbs = 2;
+ 		serial->tiocmget = kzalloc(sizeof(struct hso_tiocmget),
+ 					   GFP_KERNEL);
++		if (!serial->tiocmget)
++			goto exit;
+ 		serial->tiocmget->serial_state_notification
+ 			= kzalloc(sizeof(struct hso_serial_state_notification),
+ 					   GFP_KERNEL);
+-		/* it isn't going to break our heart if serial->tiocmget
+-		 *  allocation fails don't bother checking this.
+-		 */
+-		if (serial->tiocmget && serial->tiocmget->serial_state_notification) {
+-			tiocmget = serial->tiocmget;
+-			tiocmget->endp = hso_get_ep(interface,
+-						    USB_ENDPOINT_XFER_INT,
+-						    USB_DIR_IN);
+-			if (!tiocmget->endp) {
+-				dev_err(&interface->dev, "Failed to find INT IN ep\n");
+-				goto exit;
+-			}
+-
+-			tiocmget->urb = usb_alloc_urb(0, GFP_KERNEL);
+-			if (tiocmget->urb) {
+-				mutex_init(&tiocmget->mutex);
+-				init_waitqueue_head(&tiocmget->waitq);
+-			} else
+-				hso_free_tiomget(serial);
++		if (!serial->tiocmget->serial_state_notification)
++			goto exit;
++		tiocmget = serial->tiocmget;
++		tiocmget->endp = hso_get_ep(interface,
++					    USB_ENDPOINT_XFER_INT,
++					    USB_DIR_IN);
++		if (!tiocmget->endp) {
++			dev_err(&interface->dev, "Failed to find INT IN ep\n");
++			goto exit;
+ 		}
+-	}
+-	else
++
++		tiocmget->urb = usb_alloc_urb(0, GFP_KERNEL);
++		if (!tiocmget->urb)
++			goto exit;
++
++		mutex_init(&tiocmget->mutex);
++		init_waitqueue_head(&tiocmget->waitq);
++	} else {
+ 		num_urbs = 1;
++	}
+ 
+ 	if (hso_serial_common_create(serial, num_urbs, BULK_URB_RX_SIZE,
+ 				     BULK_URB_TX_SIZE))
+diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
+index 4353b370249f1..76ed79bb1e3f1 100644
+--- a/drivers/net/usb/smsc75xx.c
++++ b/drivers/net/usb/smsc75xx.c
+@@ -1483,7 +1483,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
+ 	ret = smsc75xx_wait_ready(dev, 0);
+ 	if (ret < 0) {
+ 		netdev_warn(dev->net, "device not ready in smsc75xx_bind\n");
+-		return ret;
++		goto err;
+ 	}
+ 
+ 	smsc75xx_init_mac_address(dev);
+@@ -1492,7 +1492,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
+ 	ret = smsc75xx_reset(dev);
+ 	if (ret < 0) {
+ 		netdev_warn(dev->net, "smsc75xx_reset error %d\n", ret);
+-		return ret;
++		goto err;
+ 	}
+ 
+ 	dev->net->netdev_ops = &smsc75xx_netdev_ops;
+@@ -1502,6 +1502,10 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
+ 	dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
+ 	dev->net->max_mtu = MAX_SINGLE_PACKET_SIZE;
+ 	return 0;
++
++err:
++	kfree(pdata);
++	return ret;
+ }
+ 
+ static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
+diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
+index 956157946106c..dbc8aef82a65f 100644
+--- a/drivers/net/wireless/ath/ath10k/htt.h
++++ b/drivers/net/wireless/ath/ath10k/htt.h
+@@ -845,6 +845,7 @@ enum htt_security_types {
+ 
+ #define ATH10K_HTT_TXRX_PEER_SECURITY_MAX 2
+ #define ATH10K_TXRX_NUM_EXT_TIDS 19
++#define ATH10K_TXRX_NON_QOS_TID 16
+ 
+ enum htt_security_flags {
+ #define HTT_SECURITY_TYPE_MASK 0x7F
+diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
+index 1a08156d5011d..7ffb5d5b2a70e 100644
+--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
++++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
+@@ -1746,16 +1746,97 @@ static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
+ 	msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
+ }
+ 
++static u64 ath10k_htt_rx_h_get_pn(struct ath10k *ar, struct sk_buff *skb,
++				  u16 offset,
++				  enum htt_rx_mpdu_encrypt_type enctype)
++{
++	struct ieee80211_hdr *hdr;
++	u64 pn = 0;
++	u8 *ehdr;
++
++	hdr = (struct ieee80211_hdr *)(skb->data + offset);
++	ehdr = skb->data + offset + ieee80211_hdrlen(hdr->frame_control);
++
++	if (enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2) {
++		pn = ehdr[0];
++		pn |= (u64)ehdr[1] << 8;
++		pn |= (u64)ehdr[4] << 16;
++		pn |= (u64)ehdr[5] << 24;
++		pn |= (u64)ehdr[6] << 32;
++		pn |= (u64)ehdr[7] << 40;
++	}
++	return pn;
++}
++
++static bool ath10k_htt_rx_h_frag_multicast_check(struct ath10k *ar,
++						 struct sk_buff *skb,
++						 u16 offset)
++{
++	struct ieee80211_hdr *hdr;
++
++	hdr = (struct ieee80211_hdr *)(skb->data + offset);
++	return !is_multicast_ether_addr(hdr->addr1);
++}
++
++static bool ath10k_htt_rx_h_frag_pn_check(struct ath10k *ar,
++					  struct sk_buff *skb,
++					  u16 peer_id,
++					  u16 offset,
++					  enum htt_rx_mpdu_encrypt_type enctype)
++{
++	struct ath10k_peer *peer;
++	union htt_rx_pn_t *last_pn, new_pn = {0};
++	struct ieee80211_hdr *hdr;
++	bool more_frags;
++	u8 tid, frag_number;
++	u32 seq;
++
++	peer = ath10k_peer_find_by_id(ar, peer_id);
++	if (!peer) {
++		ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid peer for frag pn check\n");
++		return false;
++	}
++
++	hdr = (struct ieee80211_hdr *)(skb->data + offset);
++	if (ieee80211_is_data_qos(hdr->frame_control))
++		tid = ieee80211_get_tid(hdr);
++	else
++		tid = ATH10K_TXRX_NON_QOS_TID;
++
++	last_pn = &peer->frag_tids_last_pn[tid];
++	new_pn.pn48 = ath10k_htt_rx_h_get_pn(ar, skb, offset, enctype);
++	more_frags = ieee80211_has_morefrags(hdr->frame_control);
++	frag_number = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
++	seq = (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
++
++	if (frag_number == 0) {
++		last_pn->pn48 = new_pn.pn48;
++		peer->frag_tids_seq[tid] = seq;
++	} else {
++		if (seq != peer->frag_tids_seq[tid])
++			return false;
++
++		if (new_pn.pn48 != last_pn->pn48 + 1)
++			return false;
++
++		last_pn->pn48 = new_pn.pn48;
++	}
++
++	return true;
++}
++
+ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
+ 				 struct sk_buff_head *amsdu,
+ 				 struct ieee80211_rx_status *status,
+ 				 bool fill_crypt_header,
+ 				 u8 *rx_hdr,
+-				 enum ath10k_pkt_rx_err *err)
++				 enum ath10k_pkt_rx_err *err,
++				 u16 peer_id,
++				 bool frag)
+ {
+ 	struct sk_buff *first;
+ 	struct sk_buff *last;
+-	struct sk_buff *msdu;
++	struct sk_buff *msdu, *temp;
+ 	struct htt_rx_desc *rxd;
+ 	struct ieee80211_hdr *hdr;
+ 	enum htt_rx_mpdu_encrypt_type enctype;
+@@ -1768,6 +1849,7 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
+ 	bool is_decrypted;
+ 	bool is_mgmt;
+ 	u32 attention;
++	bool frag_pn_check = true, multicast_check = true;
+ 
+ 	if (skb_queue_empty(amsdu))
+ 		return;
+@@ -1866,7 +1948,37 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
+ 	}
+ 
+ 	skb_queue_walk(amsdu, msdu) {
++		if (frag && !fill_crypt_header && is_decrypted &&
++		    enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
++			frag_pn_check = ath10k_htt_rx_h_frag_pn_check(ar,
++								      msdu,
++								      peer_id,
++								      0,
++								      enctype);
++
++		if (frag)
++			multicast_check = ath10k_htt_rx_h_frag_multicast_check(ar,
++									       msdu,
++									       0);
++
++		if (!frag_pn_check || !multicast_check) {
++			/* Discard the fragment with invalid PN or multicast DA
++			 */
++			temp = msdu->prev;
++			__skb_unlink(msdu, amsdu);
++			dev_kfree_skb_any(msdu);
++			msdu = temp;
++			frag_pn_check = true;
++			multicast_check = true;
++			continue;
++		}
++
+ 		ath10k_htt_rx_h_csum_offload(msdu);
++
++		if (frag && !fill_crypt_header &&
++		    enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
++			status->flag &= ~RX_FLAG_MMIC_STRIPPED;
++
+ 		ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
+ 					is_decrypted);
+ 
+@@ -1884,6 +1996,11 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
+ 
+ 		hdr = (void *)msdu->data;
+ 		hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
++
++		if (frag && !fill_crypt_header &&
++		    enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
++			status->flag &= ~RX_FLAG_IV_STRIPPED &
++					~RX_FLAG_MMIC_STRIPPED;
+ 	}
+ }
+ 
+@@ -1991,14 +2108,62 @@ static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
+ 	ath10k_unchain_msdu(amsdu, unchain_cnt);
+ }
+ 
++static bool ath10k_htt_rx_validate_amsdu(struct ath10k *ar,
++					 struct sk_buff_head *amsdu)
++{
++	u8 *subframe_hdr;
++	struct sk_buff *first;
++	bool is_first, is_last;
++	struct htt_rx_desc *rxd;
++	struct ieee80211_hdr *hdr;
++	size_t hdr_len, crypto_len;
++	enum htt_rx_mpdu_encrypt_type enctype;
++	int bytes_aligned = ar->hw_params.decap_align_bytes;
++
++	first = skb_peek(amsdu);
++
++	rxd = (void *)first->data - sizeof(*rxd);
++	hdr = (void *)rxd->rx_hdr_status;
++
++	is_first = !!(rxd->msdu_end.common.info0 &
++		      __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
++	is_last = !!(rxd->msdu_end.common.info0 &
++		     __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
++
++	/* Return in case of non-aggregated msdu */
++	if (is_first && is_last)
++		return true;
++
++	/* First msdu flag is not set for the first msdu of the list */
++	if (!is_first)
++		return false;
++
++	enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
++		     RX_MPDU_START_INFO0_ENCRYPT_TYPE);
++
++	hdr_len = ieee80211_hdrlen(hdr->frame_control);
++	crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
++
++	subframe_hdr = (u8 *)hdr + round_up(hdr_len, bytes_aligned) +
++		       crypto_len;
++
++	/* Validate if the amsdu has a proper first subframe.
++	 * There are chances a single msdu can be received as amsdu when
++	 * the unauthenticated amsdu flag of a QoS header
++	 * gets flipped in non-SPP AMSDU's, in such cases the first
++	 * subframe has llc/snap header in place of a valid da.
++	 * return false if the da matches rfc1042 pattern
++	 */
++	if (ether_addr_equal(subframe_hdr, rfc1042_header))
++		return false;
++
++	return true;
++}
++
+ static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
+ 					struct sk_buff_head *amsdu,
+ 					struct ieee80211_rx_status *rx_status)
+ {
+-	/* FIXME: It might be a good idea to do some fuzzy-testing to drop
+-	 * invalid/dangerous frames.
+-	 */
+-
+ 	if (!rx_status->freq) {
+ 		ath10k_dbg(ar, ATH10K_DBG_HTT, "no channel configured; ignoring frame(s)!\n");
+ 		return false;
+@@ -2009,6 +2174,11 @@ static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
+ 		return false;
+ 	}
+ 
++	if (!ath10k_htt_rx_validate_amsdu(ar, amsdu)) {
++		ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid amsdu received\n");
++		return false;
++	}
++
+ 	return true;
+ }
+ 
+@@ -2071,7 +2241,8 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
+ 		ath10k_htt_rx_h_unchain(ar, &amsdu, &drop_cnt, &unchain_cnt);
+ 
+ 	ath10k_htt_rx_h_filter(ar, &amsdu, rx_status, &drop_cnt_filter);
+-	ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err);
++	ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err, 0,
++			     false);
+ 	msdus_to_queue = skb_queue_len(&amsdu);
+ 	ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status);
+ 
+@@ -2204,6 +2375,11 @@ static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
+ 	fw_desc = &rx->fw_desc;
+ 	rx_desc_len = fw_desc->len;
+ 
++	if (fw_desc->u.bits.discard) {
++		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt discard mpdu\n");
++		goto err;
++	}
++
+ 	/* I have not yet seen any case where num_mpdu_ranges > 1.
+ 	 * qcacld does not seem handle that case either, so we introduce the
+ 	 * same limitiation here as well.
+@@ -2509,6 +2685,13 @@ static bool ath10k_htt_rx_proc_rx_frag_ind_hl(struct ath10k_htt *htt,
+ 	rx_desc = (struct htt_hl_rx_desc *)(skb->data + tot_hdr_len);
+ 	rx_desc_info = __le32_to_cpu(rx_desc->info);
+ 
++	hdr = (struct ieee80211_hdr *)((u8 *)rx_desc + rx_hl->fw_desc.len);
++
++	if (is_multicast_ether_addr(hdr->addr1)) {
++		/* Discard the fragment with multicast DA */
++		goto err;
++	}
++
+ 	if (!MS(rx_desc_info, HTT_RX_DESC_HL_INFO_ENCRYPTED)) {
+ 		spin_unlock_bh(&ar->data_lock);
+ 		return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
+@@ -2516,8 +2699,6 @@ static bool ath10k_htt_rx_proc_rx_frag_ind_hl(struct ath10k_htt *htt,
+ 						    HTT_RX_NON_TKIP_MIC);
+ 	}
+ 
+-	hdr = (struct ieee80211_hdr *)((u8 *)rx_desc + rx_hl->fw_desc.len);
+-
+ 	if (ieee80211_has_retry(hdr->frame_control))
+ 		goto err;
+ 
+@@ -3027,7 +3208,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
+ 			ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
+ 			ath10k_htt_rx_h_filter(ar, &amsdu, status, NULL);
+ 			ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false, NULL,
+-					     NULL);
++					     NULL, peer_id, frag);
+ 			ath10k_htt_rx_h_enqueue(ar, &amsdu, status);
+ 			break;
+ 		case -EAGAIN:
+diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
+index f2b6bf8f0d60d..705b6295e4663 100644
+--- a/drivers/net/wireless/ath/ath10k/rx_desc.h
++++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
+@@ -1282,7 +1282,19 @@ struct fw_rx_desc_base {
+ #define FW_RX_DESC_UDP              (1 << 6)
+ 
+ struct fw_rx_desc_hl {
+-	u8 info0;
++	union {
++		struct {
++		u8 discard:1,
++		   forward:1,
++		   any_err:1,
++		   dup_err:1,
++		   reserved:1,
++		   inspect:1,
++		   extension:2;
++		} bits;
++		u8 info0;
++	} u;
++
+ 	u8 version;
+ 	u8 len;
+ 	u8 flags;
+diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
+index 850ad38b888f4..e3b620dd4d928 100644
+--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
++++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
+@@ -854,6 +854,24 @@ static void ath11k_dp_rx_frags_cleanup(struct dp_rx_tid *rx_tid, bool rel_link_d
+ 	__skb_queue_purge(&rx_tid->rx_frags);
+ }
+ 
++void ath11k_peer_frags_flush(struct ath11k *ar, struct ath11k_peer *peer)
++{
++	struct dp_rx_tid *rx_tid;
++	int i;
++
++	lockdep_assert_held(&ar->ab->base_lock);
++
++	for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
++		rx_tid = &peer->rx_tid[i];
++
++		spin_unlock_bh(&ar->ab->base_lock);
++		del_timer_sync(&rx_tid->frag_timer);
++		spin_lock_bh(&ar->ab->base_lock);
++
++		ath11k_dp_rx_frags_cleanup(rx_tid, true);
++	}
++}
++
+ void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer)
+ {
+ 	struct dp_rx_tid *rx_tid;
+diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.h b/drivers/net/wireless/ath/ath11k/dp_rx.h
+index bf399312b5ff5..623da3bf9dc81 100644
+--- a/drivers/net/wireless/ath/ath11k/dp_rx.h
++++ b/drivers/net/wireless/ath/ath11k/dp_rx.h
+@@ -49,6 +49,7 @@ int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif,
+ 				       const u8 *peer_addr,
+ 				       enum set_key_cmd key_cmd,
+ 				       struct ieee80211_key_conf *key);
++void ath11k_peer_frags_flush(struct ath11k *ar, struct ath11k_peer *peer);
+ void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer);
+ void ath11k_peer_rx_tid_delete(struct ath11k *ar,
+ 			       struct ath11k_peer *peer, u8 tid);
+diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
+index faa2e678e63ef..7ad0383affcba 100644
+--- a/drivers/net/wireless/ath/ath11k/mac.c
++++ b/drivers/net/wireless/ath/ath11k/mac.c
+@@ -2711,6 +2711,12 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 	 */
+ 	spin_lock_bh(&ab->base_lock);
+ 	peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr);
++
++	/* flush the fragments cache during key (re)install to
++	 * ensure all frags in the new frag list belong to the same key.
++	 */
++	if (peer && cmd == SET_KEY)
++		ath11k_peer_frags_flush(ar, peer);
+ 	spin_unlock_bh(&ab->base_lock);
+ 
+ 	if (!peer) {
+diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c
+index 7506cea46f589..433a047f3747b 100644
+--- a/drivers/net/wireless/ath/ath6kl/debug.c
++++ b/drivers/net/wireless/ath/ath6kl/debug.c
+@@ -1027,14 +1027,17 @@ static ssize_t ath6kl_lrssi_roam_write(struct file *file,
+ {
+ 	struct ath6kl *ar = file->private_data;
+ 	unsigned long lrssi_roam_threshold;
++	int ret;
+ 
+ 	if (kstrtoul_from_user(user_buf, count, 0, &lrssi_roam_threshold))
+ 		return -EINVAL;
+ 
+ 	ar->lrssi_roam_threshold = lrssi_roam_threshold;
+ 
+-	ath6kl_wmi_set_roam_lrssi_cmd(ar->wmi, ar->lrssi_roam_threshold);
++	ret = ath6kl_wmi_set_roam_lrssi_cmd(ar->wmi, ar->lrssi_roam_threshold);
+ 
++	if (ret)
++		return ret;
+ 	return count;
+ }
+ 
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+index ce8c102df7b3e..633d0ab190314 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+@@ -1217,13 +1217,9 @@ static struct sdio_driver brcmf_sdmmc_driver = {
+ 	},
+ };
+ 
+-void brcmf_sdio_register(void)
++int brcmf_sdio_register(void)
+ {
+-	int ret;
+-
+-	ret = sdio_register_driver(&brcmf_sdmmc_driver);
+-	if (ret)
+-		brcmf_err("sdio_register_driver failed: %d\n", ret);
++	return sdio_register_driver(&brcmf_sdmmc_driver);
+ }
+ 
+ void brcmf_sdio_exit(void)
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
+index 08f9d47f2e5ca..3f5da3bb6aa59 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
+@@ -275,11 +275,26 @@ void brcmf_bus_add_txhdrlen(struct device *dev, uint len);
+ 
+ #ifdef CONFIG_BRCMFMAC_SDIO
+ void brcmf_sdio_exit(void);
+-void brcmf_sdio_register(void);
++int brcmf_sdio_register(void);
++#else
++static inline void brcmf_sdio_exit(void) { }
++static inline int brcmf_sdio_register(void) { return 0; }
+ #endif
++
+ #ifdef CONFIG_BRCMFMAC_USB
+ void brcmf_usb_exit(void);
+-void brcmf_usb_register(void);
++int brcmf_usb_register(void);
++#else
++static inline void brcmf_usb_exit(void) { }
++static inline int brcmf_usb_register(void) { return 0; }
++#endif
++
++#ifdef CONFIG_BRCMFMAC_PCIE
++void brcmf_pcie_exit(void);
++int brcmf_pcie_register(void);
++#else
++static inline void brcmf_pcie_exit(void) { }
++static inline int brcmf_pcie_register(void) { return 0; }
+ #endif
+ 
+ #endif /* BRCMFMAC_BUS_H */
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+index ea78fe527c5dc..7e528833aafa0 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+@@ -1518,40 +1518,34 @@ void brcmf_bus_change_state(struct brcmf_bus *bus, enum brcmf_bus_state state)
+ 	}
+ }
+ 
+-static void brcmf_driver_register(struct work_struct *work)
+-{
+-#ifdef CONFIG_BRCMFMAC_SDIO
+-	brcmf_sdio_register();
+-#endif
+-#ifdef CONFIG_BRCMFMAC_USB
+-	brcmf_usb_register();
+-#endif
+-#ifdef CONFIG_BRCMFMAC_PCIE
+-	brcmf_pcie_register();
+-#endif
+-}
+-static DECLARE_WORK(brcmf_driver_work, brcmf_driver_register);
+-
+ int __init brcmf_core_init(void)
+ {
+-	if (!schedule_work(&brcmf_driver_work))
+-		return -EBUSY;
++	int err;
+ 
++	err = brcmf_sdio_register();
++	if (err)
++		return err;
++
++	err = brcmf_usb_register();
++	if (err)
++		goto error_usb_register;
++
++	err = brcmf_pcie_register();
++	if (err)
++		goto error_pcie_register;
+ 	return 0;
++
++error_pcie_register:
++	brcmf_usb_exit();
++error_usb_register:
++	brcmf_sdio_exit();
++	return err;
+ }
+ 
+ void __exit brcmf_core_exit(void)
+ {
+-	cancel_work_sync(&brcmf_driver_work);
+-
+-#ifdef CONFIG_BRCMFMAC_SDIO
+ 	brcmf_sdio_exit();
+-#endif
+-#ifdef CONFIG_BRCMFMAC_USB
+ 	brcmf_usb_exit();
+-#endif
+-#ifdef CONFIG_BRCMFMAC_PCIE
+ 	brcmf_pcie_exit();
+-#endif
+ }
+ 
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+index ad79e3b7e74a3..143a705b5cb3a 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+@@ -2140,15 +2140,10 @@ static struct pci_driver brcmf_pciedrvr = {
+ };
+ 
+ 
+-void brcmf_pcie_register(void)
++int brcmf_pcie_register(void)
+ {
+-	int err;
+-
+ 	brcmf_dbg(PCIE, "Enter\n");
+-	err = pci_register_driver(&brcmf_pciedrvr);
+-	if (err)
+-		brcmf_err(NULL, "PCIE driver registration failed, err=%d\n",
+-			  err);
++	return pci_register_driver(&brcmf_pciedrvr);
+ }
+ 
+ 
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.h
+index d026401d20010..8e6c227e8315c 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.h
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.h
+@@ -11,9 +11,4 @@ struct brcmf_pciedev {
+ 	struct brcmf_pciedev_info *devinfo;
+ };
+ 
+-
+-void brcmf_pcie_exit(void);
+-void brcmf_pcie_register(void);
+-
+-
+ #endif /* BRCMFMAC_PCIE_H */
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+index 586f4dfc638b9..9fb68c2dc7e39 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+@@ -1584,12 +1584,8 @@ void brcmf_usb_exit(void)
+ 	usb_deregister(&brcmf_usbdrvr);
+ }
+ 
+-void brcmf_usb_register(void)
++int brcmf_usb_register(void)
+ {
+-	int ret;
+-
+ 	brcmf_dbg(USB, "Enter\n");
+-	ret = usb_register(&brcmf_usbdrvr);
+-	if (ret)
+-		brcmf_err("usb_register failed %d\n", ret);
++	return usb_register(&brcmf_usbdrvr);
+ }
+diff --git a/drivers/net/wireless/marvell/libertas/mesh.c b/drivers/net/wireless/marvell/libertas/mesh.c
+index f5b78257d5518..c68814841583f 100644
+--- a/drivers/net/wireless/marvell/libertas/mesh.c
++++ b/drivers/net/wireless/marvell/libertas/mesh.c
+@@ -801,24 +801,6 @@ static const struct attribute_group mesh_ie_group = {
+ 	.attrs = mesh_ie_attrs,
+ };
+ 
+-static void lbs_persist_config_init(struct net_device *dev)
+-{
+-	int ret;
+-	ret = sysfs_create_group(&(dev->dev.kobj), &boot_opts_group);
+-	if (ret)
+-		pr_err("failed to create boot_opts_group.\n");
+-
+-	ret = sysfs_create_group(&(dev->dev.kobj), &mesh_ie_group);
+-	if (ret)
+-		pr_err("failed to create mesh_ie_group.\n");
+-}
+-
+-static void lbs_persist_config_remove(struct net_device *dev)
+-{
+-	sysfs_remove_group(&(dev->dev.kobj), &boot_opts_group);
+-	sysfs_remove_group(&(dev->dev.kobj), &mesh_ie_group);
+-}
+-
+ 
+ /***************************************************************************
+  * Initializing and starting, stopping mesh
+@@ -1014,6 +996,10 @@ static int lbs_add_mesh(struct lbs_private *priv)
+ 	SET_NETDEV_DEV(priv->mesh_dev, priv->dev->dev.parent);
+ 
+ 	mesh_dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
++	mesh_dev->sysfs_groups[0] = &lbs_mesh_attr_group;
++	mesh_dev->sysfs_groups[1] = &boot_opts_group;
++	mesh_dev->sysfs_groups[2] = &mesh_ie_group;
++
+ 	/* Register virtual mesh interface */
+ 	ret = register_netdev(mesh_dev);
+ 	if (ret) {
+@@ -1021,19 +1007,10 @@ static int lbs_add_mesh(struct lbs_private *priv)
+ 		goto err_free_netdev;
+ 	}
+ 
+-	ret = sysfs_create_group(&(mesh_dev->dev.kobj), &lbs_mesh_attr_group);
+-	if (ret)
+-		goto err_unregister;
+-
+-	lbs_persist_config_init(mesh_dev);
+-
+ 	/* Everything successful */
+ 	ret = 0;
+ 	goto done;
+ 
+-err_unregister:
+-	unregister_netdev(mesh_dev);
+-
+ err_free_netdev:
+ 	free_netdev(mesh_dev);
+ 
+@@ -1054,8 +1031,6 @@ void lbs_remove_mesh(struct lbs_private *priv)
+ 
+ 	netif_stop_queue(mesh_dev);
+ 	netif_carrier_off(mesh_dev);
+-	sysfs_remove_group(&(mesh_dev->dev.kobj), &lbs_mesh_attr_group);
+-	lbs_persist_config_remove(mesh_dev);
+ 	unregister_netdev(mesh_dev);
+ 	priv->mesh_dev = NULL;
+ 	kfree(mesh_dev->ieee80211_ptr);
+diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
+index d958b5da9b88a..4df4f37e6b895 100644
+--- a/drivers/nvme/target/tcp.c
++++ b/drivers/nvme/target/tcp.c
+@@ -538,7 +538,7 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req)
+ 		 * nvmet_req_init is completed.
+ 		 */
+ 		if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
+-		    len && len < cmd->req.port->inline_data_size &&
++		    len && len <= cmd->req.port->inline_data_size &&
+ 		    nvme_is_write(cmd->req.cmd))
+ 			return;
+ 	}
+diff --git a/drivers/platform/x86/hp-wireless.c b/drivers/platform/x86/hp-wireless.c
+index 12c31fd5d5ae2..0753ef18e7211 100644
+--- a/drivers/platform/x86/hp-wireless.c
++++ b/drivers/platform/x86/hp-wireless.c
+@@ -17,12 +17,14 @@ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Alex Hung");
+ MODULE_ALIAS("acpi*:HPQ6001:*");
+ MODULE_ALIAS("acpi*:WSTADEF:*");
++MODULE_ALIAS("acpi*:AMDI0051:*");
+ 
+ static struct input_dev *hpwl_input_dev;
+ 
+ static const struct acpi_device_id hpwl_ids[] = {
+ 	{"HPQ6001", 0},
+ 	{"WSTADEF", 0},
++	{"AMDI0051", 0},
+ 	{"", 0},
+ };
+ 
+diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
+index 799cbe2ffcf36..8c0867bda8280 100644
+--- a/drivers/platform/x86/hp_accel.c
++++ b/drivers/platform/x86/hp_accel.c
+@@ -88,6 +88,9 @@ MODULE_DEVICE_TABLE(acpi, lis3lv02d_device_ids);
+ static int lis3lv02d_acpi_init(struct lis3lv02d *lis3)
+ {
+ 	struct acpi_device *dev = lis3->bus_priv;
++	if (!lis3->init_required)
++		return 0;
++
+ 	if (acpi_evaluate_object(dev->handle, METHOD_NAME__INI,
+ 				 NULL, NULL) != AE_OK)
+ 		return -EINVAL;
+@@ -356,6 +359,7 @@ static int lis3lv02d_add(struct acpi_device *device)
+ 	}
+ 
+ 	/* call the core layer do its init */
++	lis3_dev.init_required = true;
+ 	ret = lis3lv02d_init_device(&lis3_dev);
+ 	if (ret)
+ 		return ret;
+@@ -403,11 +407,27 @@ static int lis3lv02d_suspend(struct device *dev)
+ 
+ static int lis3lv02d_resume(struct device *dev)
+ {
++	lis3_dev.init_required = false;
++	lis3lv02d_poweron(&lis3_dev);
++	return 0;
++}
++
++static int lis3lv02d_restore(struct device *dev)
++{
++	lis3_dev.init_required = true;
+ 	lis3lv02d_poweron(&lis3_dev);
+ 	return 0;
+ }
+ 
+-static SIMPLE_DEV_PM_OPS(hp_accel_pm, lis3lv02d_suspend, lis3lv02d_resume);
++static const struct dev_pm_ops hp_accel_pm = {
++	.suspend = lis3lv02d_suspend,
++	.resume = lis3lv02d_resume,
++	.freeze = lis3lv02d_suspend,
++	.thaw = lis3lv02d_resume,
++	.poweroff = lis3lv02d_suspend,
++	.restore = lis3lv02d_restore,
++};
++
+ #define HP_ACCEL_PM (&hp_accel_pm)
+ #else
+ #define HP_ACCEL_PM NULL
+diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c
+index 05cced59e251a..f58b8543f6ac5 100644
+--- a/drivers/platform/x86/intel_punit_ipc.c
++++ b/drivers/platform/x86/intel_punit_ipc.c
+@@ -312,6 +312,7 @@ static const struct acpi_device_id punit_ipc_acpi_ids[] = {
+ 	{ "INT34D4", 0 },
+ 	{ }
+ };
++MODULE_DEVICE_TABLE(acpi, punit_ipc_acpi_ids);
+ 
+ static struct platform_driver intel_punit_ipc_driver = {
+ 	.probe = intel_punit_ipc_probe,
+diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
+index c44a6e8dceb8c..8618c44106c2b 100644
+--- a/drivers/platform/x86/touchscreen_dmi.c
++++ b/drivers/platform/x86/touchscreen_dmi.c
+@@ -115,6 +115,32 @@ static const struct ts_dmi_data chuwi_hi10_plus_data = {
+ 	.properties     = chuwi_hi10_plus_props,
+ };
+ 
++static const struct property_entry chuwi_hi10_pro_props[] = {
++	PROPERTY_ENTRY_U32("touchscreen-min-x", 8),
++	PROPERTY_ENTRY_U32("touchscreen-min-y", 8),
++	PROPERTY_ENTRY_U32("touchscreen-size-x", 1912),
++	PROPERTY_ENTRY_U32("touchscreen-size-y", 1272),
++	PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
++	PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hi10-pro.fw"),
++	PROPERTY_ENTRY_U32("silead,max-fingers", 10),
++	PROPERTY_ENTRY_BOOL("silead,home-button"),
++	{ }
++};
++
++static const struct ts_dmi_data chuwi_hi10_pro_data = {
++	.embedded_fw = {
++		.name	= "silead/gsl1680-chuwi-hi10-pro.fw",
++		.prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
++		.length	= 42504,
++		.sha256	= { 0xdb, 0x92, 0x68, 0xa8, 0xdb, 0x81, 0x31, 0x00,
++			    0x1f, 0x58, 0x89, 0xdb, 0x19, 0x1b, 0x15, 0x8c,
++			    0x05, 0x14, 0xf4, 0x95, 0xba, 0x15, 0x45, 0x98,
++			    0x42, 0xa3, 0xbb, 0x65, 0xe3, 0x30, 0xa5, 0x93 },
++	},
++	.acpi_name      = "MSSL1680:00",
++	.properties     = chuwi_hi10_pro_props,
++};
++
+ static const struct property_entry chuwi_vi8_props[] = {
+ 	PROPERTY_ENTRY_U32("touchscreen-min-x", 4),
+ 	PROPERTY_ENTRY_U32("touchscreen-min-y", 6),
+@@ -889,6 +915,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+ 		},
+ 	},
++	{
++		/* Chuwi Hi10 Prus (CWI597) */
++		.driver_data = (void *)&chuwi_hi10_pro_data,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Hi10 pro tablet"),
++			DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
++		},
++	},
+ 	{
+ 		/* Chuwi Vi8 (CWI506) */
+ 		.driver_data = (void *)&chuwi_vi8_data,
+@@ -1070,6 +1105,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
+ 			DMI_MATCH(DMI_BIOS_VERSION, "jumperx.T87.KFBNEEA"),
+ 		},
+ 	},
++	{
++		/* Mediacom WinPad 7.0 W700 (same hw as Wintron surftab 7") */
++		.driver_data = (void *)&trekstor_surftab_wintron70_data,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "MEDIACOM"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "WinPad 7 W10 - WPW700"),
++		},
++	},
+ 	{
+ 		/* Mediacom Flexbook Edge 11 (same hw as TS Primebook C11) */
+ 		.driver_data = (void *)&trekstor_primebook_c11_data,
+diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
+index 530e5f90095e6..0d1034e3ed0f2 100644
+--- a/drivers/ptp/ptp_ocp.c
++++ b/drivers/ptp/ptp_ocp.c
+@@ -324,7 +324,7 @@ ptp_ocp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	if (!bp->base) {
+ 		dev_err(&pdev->dev, "io_remap bar0\n");
+ 		err = -ENOMEM;
+-		goto out;
++		goto out_release_regions;
+ 	}
+ 	bp->reg = bp->base + OCP_REGISTER_OFFSET;
+ 	bp->tod = bp->base + TOD_REGISTER_OFFSET;
+@@ -347,6 +347,8 @@ ptp_ocp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	return 0;
+ 
+ out:
++	pci_iounmap(pdev, bp->base);
++out_release_regions:
+ 	pci_release_regions(pdev);
+ out_disable:
+ 	pci_disable_device(pdev);
+diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
+index 1b9e1442e6a50..fd42a5fffaed1 100644
+--- a/drivers/s390/block/dasd_diag.c
++++ b/drivers/s390/block/dasd_diag.c
+@@ -642,12 +642,18 @@ static void dasd_diag_setup_blk_queue(struct dasd_block *block)
+ 	blk_queue_segment_boundary(q, PAGE_SIZE - 1);
+ }
+ 
++static int dasd_diag_pe_handler(struct dasd_device *device,
++				__u8 tbvpm, __u8 fcsecpm)
++{
++	return dasd_generic_verify_path(device, tbvpm);
++}
++
+ static struct dasd_discipline dasd_diag_discipline = {
+ 	.owner = THIS_MODULE,
+ 	.name = "DIAG",
+ 	.ebcname = "DIAG",
+ 	.check_device = dasd_diag_check_device,
+-	.verify_path = dasd_generic_verify_path,
++	.pe_handler = dasd_diag_pe_handler,
+ 	.fill_geometry = dasd_diag_fill_geometry,
+ 	.setup_blk_queue = dasd_diag_setup_blk_queue,
+ 	.start_IO = dasd_start_diag,
+diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
+index 1aeb68794ce8b..417437844d343 100644
+--- a/drivers/s390/block/dasd_fba.c
++++ b/drivers/s390/block/dasd_fba.c
+@@ -800,13 +800,19 @@ static void dasd_fba_setup_blk_queue(struct dasd_block *block)
+ 	blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
+ }
+ 
++static int dasd_fba_pe_handler(struct dasd_device *device,
++			       __u8 tbvpm, __u8 fcsecpm)
++{
++	return dasd_generic_verify_path(device, tbvpm);
++}
++
+ static struct dasd_discipline dasd_fba_discipline = {
+ 	.owner = THIS_MODULE,
+ 	.name = "FBA ",
+ 	.ebcname = "FBA ",
+ 	.check_device = dasd_fba_check_characteristics,
+ 	.do_analysis = dasd_fba_do_analysis,
+-	.verify_path = dasd_generic_verify_path,
++	.pe_handler = dasd_fba_pe_handler,
+ 	.setup_blk_queue = dasd_fba_setup_blk_queue,
+ 	.fill_geometry = dasd_fba_fill_geometry,
+ 	.start_IO = dasd_start_IO,
+diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
+index b8a04c42d1d2e..015239c7d5139 100644
+--- a/drivers/s390/block/dasd_int.h
++++ b/drivers/s390/block/dasd_int.h
+@@ -297,7 +297,6 @@ struct dasd_discipline {
+ 	 * e.g. verify that new path is compatible with the current
+ 	 * configuration.
+ 	 */
+-	int (*verify_path)(struct dasd_device *, __u8);
+ 	int (*pe_handler)(struct dasd_device *, __u8, __u8);
+ 
+ 	/*
+diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
+index b9febc581b1f4..8d1b2771c1aa0 100644
+--- a/drivers/s390/cio/vfio_ccw_cp.c
++++ b/drivers/s390/cio/vfio_ccw_cp.c
+@@ -638,6 +638,10 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
+ 	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 1);
+ 	int ret;
+ 
++	/* this is an error in the caller */
++	if (cp->initialized)
++		return -EBUSY;
++
+ 	/*
+ 	 * We only support prefetching the channel program. We assume all channel
+ 	 * programs executed by supported guests likewise support prefetching.
+diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
+index ccb061ab0a0ad..7231de2767a96 100644
+--- a/drivers/scsi/BusLogic.c
++++ b/drivers/scsi/BusLogic.c
+@@ -3078,11 +3078,11 @@ static int blogic_qcmd_lck(struct scsi_cmnd *command,
+ 		ccb->opcode = BLOGIC_INITIATOR_CCB_SG;
+ 		ccb->datalen = count * sizeof(struct blogic_sg_seg);
+ 		if (blogic_multimaster_type(adapter))
+-			ccb->data = (void *)((unsigned int) ccb->dma_handle +
++			ccb->data = (unsigned int) ccb->dma_handle +
+ 					((unsigned long) &ccb->sglist -
+-					(unsigned long) ccb));
++					(unsigned long) ccb);
+ 		else
+-			ccb->data = ccb->sglist;
++			ccb->data = virt_to_32bit_virt(ccb->sglist);
+ 
+ 		scsi_for_each_sg(command, sg, count, i) {
+ 			ccb->sglist[i].segbytes = sg_dma_len(sg);
+diff --git a/drivers/scsi/BusLogic.h b/drivers/scsi/BusLogic.h
+index 6182cc8a0344a..e081ad47d1cf4 100644
+--- a/drivers/scsi/BusLogic.h
++++ b/drivers/scsi/BusLogic.h
+@@ -814,7 +814,7 @@ struct blogic_ccb {
+ 	unsigned char cdblen;				/* Byte 2 */
+ 	unsigned char sense_datalen;			/* Byte 3 */
+ 	u32 datalen;					/* Bytes 4-7 */
+-	void *data;					/* Bytes 8-11 */
++	u32 data;					/* Bytes 8-11 */
+ 	unsigned char:8;				/* Byte 12 */
+ 	unsigned char:8;				/* Byte 13 */
+ 	enum blogic_adapter_status adapter_status;	/* Byte 14 */
+diff --git a/drivers/scsi/aic7xxx/scsi_message.h b/drivers/scsi/aic7xxx/scsi_message.h
+index a7515c3039edb..53343a6d8ae19 100644
+--- a/drivers/scsi/aic7xxx/scsi_message.h
++++ b/drivers/scsi/aic7xxx/scsi_message.h
+@@ -3,6 +3,17 @@
+  * $FreeBSD: src/sys/cam/scsi/scsi_message.h,v 1.2 2000/05/01 20:21:29 peter Exp $
+  */
+ 
++/* Messages (1 byte) */		     /* I/T (M)andatory or (O)ptional */
++#define MSG_SAVEDATAPOINTER	0x02 /* O/O */
++#define MSG_RESTOREPOINTERS	0x03 /* O/O */
++#define MSG_DISCONNECT		0x04 /* O/O */
++#define MSG_MESSAGE_REJECT	0x07 /* M/M */
++#define MSG_NOOP		0x08 /* M/M */
++
++/* Messages (2 byte) */
++#define MSG_SIMPLE_Q_TAG	0x20 /* O/O */
++#define MSG_IGN_WIDE_RESIDUE	0x23 /* O/O */
++
+ /* Identify message */		     /* M/M */	
+ #define MSG_IDENTIFYFLAG	0x80 
+ #define MSG_IDENTIFY_DISCFLAG	0x40 
+diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
+index 19cf418928faa..e3d03d744713d 100644
+--- a/drivers/scsi/libsas/sas_port.c
++++ b/drivers/scsi/libsas/sas_port.c
+@@ -25,7 +25,7 @@ static bool phy_is_wideport_member(struct asd_sas_port *port, struct asd_sas_phy
+ 
+ static void sas_resume_port(struct asd_sas_phy *phy)
+ {
+-	struct domain_device *dev;
++	struct domain_device *dev, *n;
+ 	struct asd_sas_port *port = phy->port;
+ 	struct sas_ha_struct *sas_ha = phy->ha;
+ 	struct sas_internal *si = to_sas_internal(sas_ha->core.shost->transportt);
+@@ -44,7 +44,7 @@ static void sas_resume_port(struct asd_sas_phy *phy)
+ 	 * 1/ presume every device came back
+ 	 * 2/ force the next revalidation to check all expander phys
+ 	 */
+-	list_for_each_entry(dev, &port->dev_list, dev_list_node) {
++	list_for_each_entry_safe(dev, n, &port->dev_list, dev_list_node) {
+ 		int i, rc;
+ 
+ 		rc = sas_notify_lldd_dev_found(dev);
+diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
+index 1b1a57f46989a..c2a38a172904c 100644
+--- a/drivers/scsi/pm8001/pm8001_hwi.c
++++ b/drivers/scsi/pm8001/pm8001_hwi.c
+@@ -3709,11 +3709,13 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
+ 	case HW_EVENT_PHY_START_STATUS:
+ 		pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_START_STATUS status = %x\n",
+ 			   status);
+-		if (status == 0) {
++		if (status == 0)
+ 			phy->phy_state = 1;
+-			if (pm8001_ha->flags == PM8001F_RUN_TIME &&
+-					phy->enable_completion != NULL)
+-				complete(phy->enable_completion);
++
++		if (pm8001_ha->flags == PM8001F_RUN_TIME &&
++				phy->enable_completion != NULL) {
++			complete(phy->enable_completion);
++			phy->enable_completion = NULL;
+ 		}
+ 		break;
+ 	case HW_EVENT_SAS_PHY_UP:
+diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
+index bd626ef876dac..4f3ec2bba8c95 100644
+--- a/drivers/scsi/pm8001/pm8001_init.c
++++ b/drivers/scsi/pm8001/pm8001_init.c
+@@ -1144,8 +1144,8 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
+ 		goto err_out_shost;
+ 	}
+ 	list_add_tail(&pm8001_ha->list, &hba_list);
+-	scsi_scan_host(pm8001_ha->shost);
+ 	pm8001_ha->flags = PM8001F_RUN_TIME;
++	scsi_scan_host(pm8001_ha->shost);
+ 	return 0;
+ 
+ err_out_shost:
+diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
+index a98d4496ff8b6..0a637609504e8 100644
+--- a/drivers/scsi/pm8001/pm8001_sas.c
++++ b/drivers/scsi/pm8001/pm8001_sas.c
+@@ -264,12 +264,17 @@ void pm8001_scan_start(struct Scsi_Host *shost)
+ 	int i;
+ 	struct pm8001_hba_info *pm8001_ha;
+ 	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
++	DECLARE_COMPLETION_ONSTACK(completion);
+ 	pm8001_ha = sha->lldd_ha;
+ 	/* SAS_RE_INITIALIZATION not available in SPCv/ve */
+ 	if (pm8001_ha->chip_id == chip_8001)
+ 		PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha);
+-	for (i = 0; i < pm8001_ha->chip->n_phy; ++i)
++	for (i = 0; i < pm8001_ha->chip->n_phy; ++i) {
++		pm8001_ha->phy[i].enable_completion = &completion;
+ 		PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i);
++		wait_for_completion(&completion);
++		msleep(300);
++	}
+ }
+ 
+ int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time)
+diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
+index c6b0834e38061..5de7adfabd57b 100644
+--- a/drivers/scsi/pm8001/pm80xx_hwi.c
++++ b/drivers/scsi/pm8001/pm80xx_hwi.c
+@@ -3485,13 +3485,13 @@ static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 	pm8001_dbg(pm8001_ha, INIT,
+ 		   "phy start resp status:0x%x, phyid:0x%x\n",
+ 		   status, phy_id);
+-	if (status == 0) {
++	if (status == 0)
+ 		phy->phy_state = PHY_LINK_DOWN;
+-		if (pm8001_ha->flags == PM8001F_RUN_TIME &&
+-				phy->enable_completion != NULL) {
+-			complete(phy->enable_completion);
+-			phy->enable_completion = NULL;
+-		}
++
++	if (pm8001_ha->flags == PM8001F_RUN_TIME &&
++			phy->enable_completion != NULL) {
++		complete(phy->enable_completion);
++		phy->enable_completion = NULL;
+ 	}
+ 	return 0;
+ 
+diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
+index a981f261b3043..aee3cfc7142a4 100644
+--- a/drivers/scsi/ufs/ufs-mediatek.c
++++ b/drivers/scsi/ufs/ufs-mediatek.c
+@@ -922,6 +922,7 @@ static void ufs_mtk_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
+ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+ {
+ 	int err;
++	struct arm_smccc_res res;
+ 
+ 	if (ufshcd_is_link_hibern8(hba)) {
+ 		err = ufs_mtk_link_set_lpm(hba);
+@@ -941,6 +942,9 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+ 			goto fail;
+ 	}
+ 
++	if (ufshcd_is_link_off(hba))
++		ufs_mtk_device_reset_ctrl(0, res);
++
+ 	return 0;
+ fail:
+ 	/*
+diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
+index 0287366874882..fb45e6af66381 100644
+--- a/drivers/spi/spi-fsl-dspi.c
++++ b/drivers/spi/spi-fsl-dspi.c
+@@ -1375,11 +1375,13 @@ poll_mode:
+ 	ret = spi_register_controller(ctlr);
+ 	if (ret != 0) {
+ 		dev_err(&pdev->dev, "Problem registering DSPI ctlr\n");
+-		goto out_free_irq;
++		goto out_release_dma;
+ 	}
+ 
+ 	return ret;
+ 
++out_release_dma:
++	dspi_release_dma(dspi);
+ out_free_irq:
+ 	if (dspi->irq)
+ 		free_irq(dspi->irq, dspi);
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index 8da4fe475b84b..6ae7418f648ca 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -823,16 +823,29 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
+ 
+ 	if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio)) {
+ 		if (!(spi->mode & SPI_NO_CS)) {
+-			if (spi->cs_gpiod)
+-				/* polarity handled by gpiolib */
+-				gpiod_set_value_cansleep(spi->cs_gpiod,
+-							 enable1);
+-			else
++			if (spi->cs_gpiod) {
++				/*
++				 * Historically ACPI has no means of the GPIO polarity and
++				 * thus the SPISerialBus() resource defines it on the per-chip
++				 * basis. In order to avoid a chain of negations, the GPIO
++				 * polarity is considered being Active High. Even for the cases
++				 * when _DSD() is involved (in the updated versions of ACPI)
++				 * the GPIO CS polarity must be defined Active High to avoid
++				 * ambiguity. That's why we use enable, that takes SPI_CS_HIGH
++				 * into account.
++				 */
++				if (has_acpi_companion(&spi->dev))
++					gpiod_set_value_cansleep(spi->cs_gpiod, !enable);
++				else
++					/* Polarity handled by GPIO library */
++					gpiod_set_value_cansleep(spi->cs_gpiod, enable1);
++			} else {
+ 				/*
+ 				 * invert the enable line, as active low is
+ 				 * default for SPI.
+ 				 */
+ 				gpio_set_value_cansleep(spi->cs_gpio, !enable);
++			}
+ 		}
+ 		/* Some SPI masters need both GPIO CS & slave_select */
+ 		if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
+@@ -3463,9 +3476,12 @@ int spi_set_cs_timing(struct spi_device *spi, struct spi_delay *setup,
+ 
+ 	if (spi->controller->set_cs_timing &&
+ 	    !(spi->cs_gpiod || gpio_is_valid(spi->cs_gpio))) {
++		mutex_lock(&spi->controller->io_mutex);
++
+ 		if (spi->controller->auto_runtime_pm) {
+ 			status = pm_runtime_get_sync(parent);
+ 			if (status < 0) {
++				mutex_unlock(&spi->controller->io_mutex);
+ 				pm_runtime_put_noidle(parent);
+ 				dev_err(&spi->controller->dev, "Failed to power device: %d\n",
+ 					status);
+@@ -3476,11 +3492,13 @@ int spi_set_cs_timing(struct spi_device *spi, struct spi_delay *setup,
+ 								hold, inactive);
+ 			pm_runtime_mark_last_busy(parent);
+ 			pm_runtime_put_autosuspend(parent);
+-			return status;
+ 		} else {
+-			return spi->controller->set_cs_timing(spi, setup, hold,
++			status = spi->controller->set_cs_timing(spi, setup, hold,
+ 							      inactive);
+ 		}
++
++		mutex_unlock(&spi->controller->io_mutex);
++		return status;
+ 	}
+ 
+ 	if ((setup && setup->unit == SPI_DELAY_UNIT_SCK) ||
+diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
+index 3536c03ff5235..0d50c1e190bf4 100644
+--- a/drivers/staging/emxx_udc/emxx_udc.c
++++ b/drivers/staging/emxx_udc/emxx_udc.c
+@@ -2065,7 +2065,7 @@ static int _nbu2ss_nuke(struct nbu2ss_udc *udc,
+ 			struct nbu2ss_ep *ep,
+ 			int status)
+ {
+-	struct nbu2ss_req *req;
++	struct nbu2ss_req *req, *n;
+ 
+ 	/* Endpoint Disable */
+ 	_nbu2ss_epn_exit(udc, ep);
+@@ -2077,7 +2077,7 @@ static int _nbu2ss_nuke(struct nbu2ss_udc *udc,
+ 		return 0;
+ 
+ 	/* called with irqs blocked */
+-	list_for_each_entry(req, &ep->queue, queue) {
++	list_for_each_entry_safe(req, n, &ep->queue, queue) {
+ 		_nbu2ss_ep_done(ep, req, status);
+ 	}
+ 
+diff --git a/drivers/staging/iio/cdc/ad7746.c b/drivers/staging/iio/cdc/ad7746.c
+index dfd71e99e872e..eab534dc4bcc0 100644
+--- a/drivers/staging/iio/cdc/ad7746.c
++++ b/drivers/staging/iio/cdc/ad7746.c
+@@ -700,7 +700,6 @@ static int ad7746_probe(struct i2c_client *client,
+ 		indio_dev->num_channels = ARRAY_SIZE(ad7746_channels);
+ 	else
+ 		indio_dev->num_channels =  ARRAY_SIZE(ad7746_channels) - 2;
+-	indio_dev->num_channels = ARRAY_SIZE(ad7746_channels);
+ 	indio_dev->modes = INDIO_DIRECT_MODE;
+ 
+ 	if (pdata) {
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 5ecb9f18a53de..9e8cd07179d71 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -1400,7 +1400,7 @@ void transport_init_se_cmd(
+ 	cmd->orig_fe_lun = unpacked_lun;
+ 
+ 	if (!(cmd->se_cmd_flags & SCF_USE_CPUID))
+-		cmd->cpuid = smp_processor_id();
++		cmd->cpuid = raw_smp_processor_id();
+ 
+ 	cmd->state_active = false;
+ }
+diff --git a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
+index d1248ba943a4e..62c0aa5d07837 100644
+--- a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
++++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
+@@ -237,6 +237,8 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev,
+ 	if (ACPI_FAILURE(status))
+ 		trip_cnt = 0;
+ 	else {
++		int i;
++
+ 		int34x_thermal_zone->aux_trips =
+ 			kcalloc(trip_cnt,
+ 				sizeof(*int34x_thermal_zone->aux_trips),
+@@ -247,6 +249,8 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev,
+ 		}
+ 		trip_mask = BIT(trip_cnt) - 1;
+ 		int34x_thermal_zone->aux_trip_nr = trip_cnt;
++		for (i = 0; i < trip_cnt; ++i)
++			int34x_thermal_zone->aux_trips[i] = THERMAL_TEMP_INVALID;
+ 	}
+ 
+ 	trip_cnt = int340x_thermal_read_trips(int34x_thermal_zone);
+diff --git a/drivers/thermal/intel/x86_pkg_temp_thermal.c b/drivers/thermal/intel/x86_pkg_temp_thermal.c
+index 295742e839602..4d8edc61a78b2 100644
+--- a/drivers/thermal/intel/x86_pkg_temp_thermal.c
++++ b/drivers/thermal/intel/x86_pkg_temp_thermal.c
+@@ -166,7 +166,7 @@ static int sys_get_trip_temp(struct thermal_zone_device *tzd,
+ 	if (thres_reg_value)
+ 		*temp = zonedev->tj_max - thres_reg_value * 1000;
+ 	else
+-		*temp = 0;
++		*temp = THERMAL_TEMP_INVALID;
+ 	pr_debug("sys_get_trip_temp %d\n", *temp);
+ 
+ 	return 0;
+diff --git a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
+index b460b56e981cc..232fd0b333251 100644
+--- a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
++++ b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
+@@ -441,7 +441,7 @@ static int adc_tm5_get_dt_channel_data(struct adc_tm5_chip *adc_tm,
+ 
+ 	if (args.args_count != 1 || args.args[0] >= ADC5_MAX_CHANNEL) {
+ 		dev_err(dev, "%s: invalid ADC channel number %d\n", name, chan);
+-		return ret;
++		return -EINVAL;
+ 	}
+ 	channel->adc_channel = args.args[0];
+ 
+diff --git a/drivers/thunderbolt/dma_port.c b/drivers/thunderbolt/dma_port.c
+index 7288aaf01ae6a..5631319f7b205 100644
+--- a/drivers/thunderbolt/dma_port.c
++++ b/drivers/thunderbolt/dma_port.c
+@@ -366,15 +366,15 @@ int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address,
+ 			void *buf, size_t size)
+ {
+ 	unsigned int retries = DMA_PORT_RETRIES;
+-	unsigned int offset;
+-
+-	offset = address & 3;
+-	address = address & ~3;
+ 
+ 	do {
+-		u32 nbytes = min_t(u32, size, MAIL_DATA_DWORDS * 4);
++		unsigned int offset;
++		size_t nbytes;
+ 		int ret;
+ 
++		offset = address & 3;
++		nbytes = min_t(size_t, size + offset, MAIL_DATA_DWORDS * 4);
++
+ 		ret = dma_port_flash_read_block(dma, address, dma->buf,
+ 						ALIGN(nbytes, 4));
+ 		if (ret) {
+@@ -386,6 +386,7 @@ int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address,
+ 			return ret;
+ 		}
+ 
++		nbytes -= offset;
+ 		memcpy(buf, dma->buf + offset, nbytes);
+ 
+ 		size -= nbytes;
+diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
+index 680bc738dd66d..671d72af8ba13 100644
+--- a/drivers/thunderbolt/usb4.c
++++ b/drivers/thunderbolt/usb4.c
+@@ -68,15 +68,15 @@ static int usb4_do_read_data(u16 address, void *buf, size_t size,
+ 	unsigned int retries = USB4_DATA_RETRIES;
+ 	unsigned int offset;
+ 
+-	offset = address & 3;
+-	address = address & ~3;
+-
+ 	do {
+-		size_t nbytes = min_t(size_t, size, USB4_DATA_DWORDS * 4);
+ 		unsigned int dwaddress, dwords;
+ 		u8 data[USB4_DATA_DWORDS * 4];
++		size_t nbytes;
+ 		int ret;
+ 
++		offset = address & 3;
++		nbytes = min_t(size_t, size + offset, USB4_DATA_DWORDS * 4);
++
+ 		dwaddress = address / 4;
+ 		dwords = ALIGN(nbytes, 4) / 4;
+ 
+@@ -87,6 +87,7 @@ static int usb4_do_read_data(u16 address, void *buf, size_t size,
+ 			return ret;
+ 		}
+ 
++		nbytes -= offset;
+ 		memcpy(buf, data + offset, nbytes);
+ 
+ 		size -= nbytes;
+diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
+index 52bb21205bb68..34aa2714f3c93 100644
+--- a/drivers/tty/serial/8250/8250.h
++++ b/drivers/tty/serial/8250/8250.h
+@@ -88,6 +88,7 @@ struct serial8250_config {
+ #define UART_BUG_NOMSR	(1 << 2)	/* UART has buggy MSR status bits (Au1x00) */
+ #define UART_BUG_THRE	(1 << 3)	/* UART has buggy THRE reassertion */
+ #define UART_BUG_PARITY	(1 << 4)	/* UART mishandles parity if FIFO enabled */
++#define UART_BUG_TXRACE	(1 << 5)	/* UART Tx fails to set remote DR */
+ 
+ 
+ #ifdef CONFIG_SERIAL_8250_SHARE_IRQ
+diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c
+index c33e02cbde930..ec0d1da71a203 100644
+--- a/drivers/tty/serial/8250/8250_aspeed_vuart.c
++++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c
+@@ -403,6 +403,7 @@ static int aspeed_vuart_probe(struct platform_device *pdev)
+ 	port.port.status = UPSTAT_SYNC_FIFO;
+ 	port.port.dev = &pdev->dev;
+ 	port.port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE);
++	port.bugs |= UART_BUG_TXRACE;
+ 
+ 	rc = sysfs_create_group(&vuart->dev->kobj, &aspeed_vuart_attr_group);
+ 	if (rc < 0)
+diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
+index 9e204f9b799a1..a3a0154da567d 100644
+--- a/drivers/tty/serial/8250/8250_dw.c
++++ b/drivers/tty/serial/8250/8250_dw.c
+@@ -714,6 +714,7 @@ static const struct acpi_device_id dw8250_acpi_match[] = {
+ 	{ "APMC0D08", 0},
+ 	{ "AMD0020", 0 },
+ 	{ "AMDI0020", 0 },
++	{ "AMDI0022", 0 },
+ 	{ "BRCM2032", 0 },
+ 	{ "HISI0031", 0 },
+ 	{ },
+diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
+index 689d8227f95f7..780cc99732b62 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -56,6 +56,8 @@ struct serial_private {
+ 	int			line[];
+ };
+ 
++#define PCI_DEVICE_ID_HPE_PCI_SERIAL	0x37e
++
+ static const struct pci_device_id pci_use_msi[] = {
+ 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9900,
+ 			 0xA000, 0x1000) },
+@@ -63,6 +65,8 @@ static const struct pci_device_id pci_use_msi[] = {
+ 			 0xA000, 0x1000) },
+ 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9922,
+ 			 0xA000, 0x1000) },
++	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_HP_3PAR, PCI_DEVICE_ID_HPE_PCI_SERIAL,
++			 PCI_ANY_ID, PCI_ANY_ID) },
+ 	{ }
+ };
+ 
+@@ -1997,6 +2001,16 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
+ 		.init		= pci_hp_diva_init,
+ 		.setup		= pci_hp_diva_setup,
+ 	},
++	/*
++	 * HPE PCI serial device
++	 */
++	{
++		.vendor         = PCI_VENDOR_ID_HP_3PAR,
++		.device         = PCI_DEVICE_ID_HPE_PCI_SERIAL,
++		.subvendor      = PCI_ANY_ID,
++		.subdevice      = PCI_ANY_ID,
++		.setup		= pci_hp_diva_setup,
++	},
+ 	/*
+ 	 * Intel
+ 	 */
+@@ -3944,21 +3958,26 @@ pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board)
+ 	uart.port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ;
+ 	uart.port.uartclk = board->base_baud * 16;
+ 
+-	if (pci_match_id(pci_use_msi, dev)) {
+-		dev_dbg(&dev->dev, "Using MSI(-X) interrupts\n");
+-		pci_set_master(dev);
+-		rc = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_ALL_TYPES);
++	if (board->flags & FL_NOIRQ) {
++		uart.port.irq = 0;
+ 	} else {
+-		dev_dbg(&dev->dev, "Using legacy interrupts\n");
+-		rc = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_LEGACY);
+-	}
+-	if (rc < 0) {
+-		kfree(priv);
+-		priv = ERR_PTR(rc);
+-		goto err_deinit;
++		if (pci_match_id(pci_use_msi, dev)) {
++			dev_dbg(&dev->dev, "Using MSI(-X) interrupts\n");
++			pci_set_master(dev);
++			rc = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_ALL_TYPES);
++		} else {
++			dev_dbg(&dev->dev, "Using legacy interrupts\n");
++			rc = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_LEGACY);
++		}
++		if (rc < 0) {
++			kfree(priv);
++			priv = ERR_PTR(rc);
++			goto err_deinit;
++		}
++
++		uart.port.irq = pci_irq_vector(dev, 0);
+ 	}
+ 
+-	uart.port.irq = pci_irq_vector(dev, 0);
+ 	uart.port.dev = &dev->dev;
+ 
+ 	for (i = 0; i < nr_ports; i++) {
+@@ -4973,6 +4992,10 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ 	{	PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA_AUX,
+ 		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ 		pbn_b2_1_115200 },
++	/* HPE PCI serial device */
++	{	PCI_VENDOR_ID_HP_3PAR, PCI_DEVICE_ID_HPE_PCI_SERIAL,
++		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++		pbn_b1_1_115200 },
+ 
+ 	{	PCI_VENDOR_ID_DCI, PCI_DEVICE_ID_DCI_PCCOM2,
+ 		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
+index b0af13074cd36..6e141429c9808 100644
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -1815,6 +1815,18 @@ void serial8250_tx_chars(struct uart_8250_port *up)
+ 	count = up->tx_loadsz;
+ 	do {
+ 		serial_out(up, UART_TX, xmit->buf[xmit->tail]);
++		if (up->bugs & UART_BUG_TXRACE) {
++			/*
++			 * The Aspeed BMC virtual UARTs have a bug where data
++			 * may get stuck in the BMC's Tx FIFO from bursts of
++			 * writes on the APB interface.
++			 *
++			 * Delay back-to-back writes by a read cycle to avoid
++			 * stalling the VUART. Read a register that won't have
++			 * side-effects and discard the result.
++			 */
++			serial_in(up, UART_SCR);
++		}
+ 		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ 		port->icount.tx++;
+ 		if (uart_circ_empty(xmit))
+diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
+index 1b61d26bb7afe..43e55e6abea66 100644
+--- a/drivers/tty/serial/max310x.c
++++ b/drivers/tty/serial/max310x.c
+@@ -1519,6 +1519,8 @@ static int __init max310x_uart_init(void)
+ 
+ #ifdef CONFIG_SPI_MASTER
+ 	ret = spi_register_driver(&max310x_spi_driver);
++	if (ret)
++		uart_unregister_driver(&max310x_uart);
+ #endif
+ 
+ 	return ret;
+diff --git a/drivers/tty/serial/rp2.c b/drivers/tty/serial/rp2.c
+index 5690c09cc0417..944a4c0105795 100644
+--- a/drivers/tty/serial/rp2.c
++++ b/drivers/tty/serial/rp2.c
+@@ -195,7 +195,6 @@ struct rp2_card {
+ 	void __iomem			*bar0;
+ 	void __iomem			*bar1;
+ 	spinlock_t			card_lock;
+-	struct completion		fw_loaded;
+ };
+ 
+ #define RP_ID(prod) PCI_VDEVICE(RP, (prod))
+@@ -664,17 +663,10 @@ static void rp2_remove_ports(struct rp2_card *card)
+ 	card->initialized_ports = 0;
+ }
+ 
+-static void rp2_fw_cb(const struct firmware *fw, void *context)
++static int rp2_load_firmware(struct rp2_card *card, const struct firmware *fw)
+ {
+-	struct rp2_card *card = context;
+ 	resource_size_t phys_base;
+-	int i, rc = -ENOENT;
+-
+-	if (!fw) {
+-		dev_err(&card->pdev->dev, "cannot find '%s' firmware image\n",
+-			RP2_FW_NAME);
+-		goto no_fw;
+-	}
++	int i, rc = 0;
+ 
+ 	phys_base = pci_resource_start(card->pdev, 1);
+ 
+@@ -720,23 +712,13 @@ static void rp2_fw_cb(const struct firmware *fw, void *context)
+ 		card->initialized_ports++;
+ 	}
+ 
+-	release_firmware(fw);
+-no_fw:
+-	/*
+-	 * rp2_fw_cb() is called from a workqueue long after rp2_probe()
+-	 * has already returned success.  So if something failed here,
+-	 * we'll just leave the now-dormant device in place until somebody
+-	 * unbinds it.
+-	 */
+-	if (rc)
+-		dev_warn(&card->pdev->dev, "driver initialization failed\n");
+-
+-	complete(&card->fw_loaded);
++	return rc;
+ }
+ 
+ static int rp2_probe(struct pci_dev *pdev,
+ 				   const struct pci_device_id *id)
+ {
++	const struct firmware *fw;
+ 	struct rp2_card *card;
+ 	struct rp2_uart_port *ports;
+ 	void __iomem * const *bars;
+@@ -747,7 +729,6 @@ static int rp2_probe(struct pci_dev *pdev,
+ 		return -ENOMEM;
+ 	pci_set_drvdata(pdev, card);
+ 	spin_lock_init(&card->card_lock);
+-	init_completion(&card->fw_loaded);
+ 
+ 	rc = pcim_enable_device(pdev);
+ 	if (rc)
+@@ -780,21 +761,23 @@ static int rp2_probe(struct pci_dev *pdev,
+ 		return -ENOMEM;
+ 	card->ports = ports;
+ 
+-	rc = devm_request_irq(&pdev->dev, pdev->irq, rp2_uart_interrupt,
+-			      IRQF_SHARED, DRV_NAME, card);
+-	if (rc)
++	rc = request_firmware(&fw, RP2_FW_NAME, &pdev->dev);
++	if (rc < 0) {
++		dev_err(&pdev->dev, "cannot find '%s' firmware image\n",
++			RP2_FW_NAME);
+ 		return rc;
++	}
+ 
+-	/*
+-	 * Only catastrophic errors (e.g. ENOMEM) are reported here.
+-	 * If the FW image is missing, we'll find out in rp2_fw_cb()
+-	 * and print an error message.
+-	 */
+-	rc = request_firmware_nowait(THIS_MODULE, 1, RP2_FW_NAME, &pdev->dev,
+-				     GFP_KERNEL, card, rp2_fw_cb);
++	rc = rp2_load_firmware(card, fw);
++
++	release_firmware(fw);
++	if (rc < 0)
++		return rc;
++
++	rc = devm_request_irq(&pdev->dev, pdev->irq, rp2_uart_interrupt,
++			      IRQF_SHARED, DRV_NAME, card);
+ 	if (rc)
+ 		return rc;
+-	dev_dbg(&pdev->dev, "waiting for firmware blob...\n");
+ 
+ 	return 0;
+ }
+@@ -803,7 +786,6 @@ static void rp2_remove(struct pci_dev *pdev)
+ {
+ 	struct rp2_card *card = pci_get_drvdata(pdev);
+ 
+-	wait_for_completion(&card->fw_loaded);
+ 	rp2_remove_ports(card);
+ }
+ 
+diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
+index bbae072a125db..222032792d6c2 100644
+--- a/drivers/tty/serial/serial-tegra.c
++++ b/drivers/tty/serial/serial-tegra.c
+@@ -338,7 +338,7 @@ static void tegra_uart_fifo_reset(struct tegra_uart_port *tup, u8 fcr_bits)
+ 
+ 	do {
+ 		lsr = tegra_uart_read(tup, UART_LSR);
+-		if ((lsr | UART_LSR_TEMT) && !(lsr & UART_LSR_DR))
++		if ((lsr & UART_LSR_TEMT) && !(lsr & UART_LSR_DR))
+ 			break;
+ 		udelay(1);
+ 	} while (--tmout);
+diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
+index 43f02ed055d5e..18255eb90df3d 100644
+--- a/drivers/tty/serial/serial_core.c
++++ b/drivers/tty/serial/serial_core.c
+@@ -865,9 +865,11 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
+ 		goto check_and_exit;
+ 	}
+ 
+-	retval = security_locked_down(LOCKDOWN_TIOCSSERIAL);
+-	if (retval && (change_irq || change_port))
+-		goto exit;
++	if (change_irq || change_port) {
++		retval = security_locked_down(LOCKDOWN_TIOCSSERIAL);
++		if (retval)
++			goto exit;
++	}
+ 
+ 	/*
+ 	 * Ask the low level driver to verify the settings.
+diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
+index e1179e74a2b89..3b1aaa93d750e 100644
+--- a/drivers/tty/serial/sh-sci.c
++++ b/drivers/tty/serial/sh-sci.c
+@@ -1023,10 +1023,10 @@ static int scif_set_rtrg(struct uart_port *port, int rx_trig)
+ {
+ 	unsigned int bits;
+ 
++	if (rx_trig >= port->fifosize)
++		rx_trig = port->fifosize - 1;
+ 	if (rx_trig < 1)
+ 		rx_trig = 1;
+-	if (rx_trig >= port->fifosize)
+-		rx_trig = port->fifosize;
+ 
+ 	/* HSCIF can be set to an arbitrary level. */
+ 	if (sci_getreg(port, HSRTRGR)->size) {
+diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c
+index 56707b6b0f57c..c083985e387b2 100644
+--- a/drivers/usb/cdns3/cdnsp-gadget.c
++++ b/drivers/usb/cdns3/cdnsp-gadget.c
+@@ -422,17 +422,17 @@ unmap:
+ int cdnsp_ep_dequeue(struct cdnsp_ep *pep, struct cdnsp_request *preq)
+ {
+ 	struct cdnsp_device *pdev = pep->pdev;
+-	int ret;
++	int ret_stop = 0;
++	int ret_rem;
+ 
+ 	trace_cdnsp_request_dequeue(preq);
+ 
+-	if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_RUNNING) {
+-		ret = cdnsp_cmd_stop_ep(pdev, pep);
+-		if (ret)
+-			return ret;
+-	}
++	if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_RUNNING)
++		ret_stop = cdnsp_cmd_stop_ep(pdev, pep);
++
++	ret_rem = cdnsp_remove_request(pdev, preq, pep);
+ 
+-	return cdnsp_remove_request(pdev, preq, pep);
++	return ret_rem ? ret_rem : ret_stop;
+ }
+ 
+ static void cdnsp_zero_in_ctx(struct cdnsp_device *pdev)
+diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
+index 533236366a03b..2218941d35a3f 100644
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -1218,7 +1218,12 @@ static int do_proc_bulk(struct usb_dev_state *ps,
+ 	ret = usbfs_increase_memory_usage(len1 + sizeof(struct urb));
+ 	if (ret)
+ 		return ret;
+-	tbuf = kmalloc(len1, GFP_KERNEL);
++
++	/*
++	 * len1 can be almost arbitrarily large.  Don't WARN if it's
++	 * too big, just fail the request.
++	 */
++	tbuf = kmalloc(len1, GFP_KERNEL | __GFP_NOWARN);
+ 	if (!tbuf) {
+ 		ret = -ENOMEM;
+ 		goto done;
+@@ -1696,7 +1701,7 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
+ 	if (num_sgs) {
+ 		as->urb->sg = kmalloc_array(num_sgs,
+ 					    sizeof(struct scatterlist),
+-					    GFP_KERNEL);
++					    GFP_KERNEL | __GFP_NOWARN);
+ 		if (!as->urb->sg) {
+ 			ret = -ENOMEM;
+ 			goto error;
+@@ -1731,7 +1736,7 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
+ 					(uurb_start - as->usbm->vm_start);
+ 		} else {
+ 			as->urb->transfer_buffer = kmalloc(uurb->buffer_length,
+-					GFP_KERNEL);
++					GFP_KERNEL | __GFP_NOWARN);
+ 			if (!as->urb->transfer_buffer) {
+ 				ret = -ENOMEM;
+ 				goto error;
+diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
+index 73f4482d833a7..22ea1f4f2d66d 100644
+--- a/drivers/usb/core/hub.h
++++ b/drivers/usb/core/hub.h
+@@ -148,8 +148,10 @@ static inline unsigned hub_power_on_good_delay(struct usb_hub *hub)
+ {
+ 	unsigned delay = hub->descriptor->bPwrOn2PwrGood * 2;
+ 
+-	/* Wait at least 100 msec for power to become stable */
+-	return max(delay, 100U);
++	if (!hub->hdev->parent)	/* root hub */
++		return delay;
++	else /* Wait at least 100 msec for power to become stable */
++		return max(delay, 100U);
+ }
+ 
+ static inline int hub_port_debounce_be_connected(struct usb_hub *hub,
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 8585b56d9f2df..a721d0e4d9994 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -1236,6 +1236,7 @@ static int dwc3_prepare_trbs_sg(struct dwc3_ep *dep,
+ 			req->start_sg = sg_next(s);
+ 
+ 		req->num_queued_sgs++;
++		req->num_pending_sgs--;
+ 
+ 		/*
+ 		 * The number of pending SG entries may not correspond to the
+@@ -1243,7 +1244,7 @@ static int dwc3_prepare_trbs_sg(struct dwc3_ep *dep,
+ 		 * don't include unused SG entries.
+ 		 */
+ 		if (length == 0) {
+-			req->num_pending_sgs -= req->request.num_mapped_sgs - req->num_queued_sgs;
++			req->num_pending_sgs = 0;
+ 			break;
+ 		}
+ 
+@@ -2839,15 +2840,15 @@ static int dwc3_gadget_ep_reclaim_trb_sg(struct dwc3_ep *dep,
+ 	struct dwc3_trb *trb = &dep->trb_pool[dep->trb_dequeue];
+ 	struct scatterlist *sg = req->sg;
+ 	struct scatterlist *s;
+-	unsigned int pending = req->num_pending_sgs;
++	unsigned int num_queued = req->num_queued_sgs;
+ 	unsigned int i;
+ 	int ret = 0;
+ 
+-	for_each_sg(sg, s, pending, i) {
++	for_each_sg(sg, s, num_queued, i) {
+ 		trb = &dep->trb_pool[dep->trb_dequeue];
+ 
+ 		req->sg = sg_next(s);
+-		req->num_pending_sgs--;
++		req->num_queued_sgs--;
+ 
+ 		ret = dwc3_gadget_ep_reclaim_completed_trb(dep, req,
+ 				trb, event, status, true);
+@@ -2870,7 +2871,7 @@ static int dwc3_gadget_ep_reclaim_trb_linear(struct dwc3_ep *dep,
+ 
+ static bool dwc3_gadget_ep_request_completed(struct dwc3_request *req)
+ {
+-	return req->num_pending_sgs == 0;
++	return req->num_pending_sgs == 0 && req->num_queued_sgs == 0;
+ }
+ 
+ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
+@@ -2879,7 +2880,7 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
+ {
+ 	int ret;
+ 
+-	if (req->num_pending_sgs)
++	if (req->request.num_mapped_sgs)
+ 		ret = dwc3_gadget_ep_reclaim_trb_sg(dep, req, event,
+ 				status);
+ 	else
+diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
+index 0c418ce50ba0f..f1b35a39d1ba8 100644
+--- a/drivers/usb/gadget/udc/renesas_usb3.c
++++ b/drivers/usb/gadget/udc/renesas_usb3.c
+@@ -1488,7 +1488,7 @@ static void usb3_start_pipen(struct renesas_usb3_ep *usb3_ep,
+ 			     struct renesas_usb3_request *usb3_req)
+ {
+ 	struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
+-	struct renesas_usb3_request *usb3_req_first = usb3_get_request(usb3_ep);
++	struct renesas_usb3_request *usb3_req_first;
+ 	unsigned long flags;
+ 	int ret = -EAGAIN;
+ 	u32 enable_bits = 0;
+@@ -1496,7 +1496,8 @@ static void usb3_start_pipen(struct renesas_usb3_ep *usb3_ep,
+ 	spin_lock_irqsave(&usb3->lock, flags);
+ 	if (usb3_ep->halt || usb3_ep->started)
+ 		goto out;
+-	if (usb3_req != usb3_req_first)
++	usb3_req_first = __usb3_get_request(usb3_ep);
++	if (!usb3_req_first || usb3_req != usb3_req_first)
+ 		goto out;
+ 
+ 	if (usb3_pn_change(usb3, usb3_ep->num) < 0)
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 6cdea0d00d194..82de51c3db676 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -829,14 +829,10 @@ static void xhci_giveback_invalidated_tds(struct xhci_virt_ep *ep)
+ 	list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list,
+ 				 cancelled_td_list) {
+ 
+-		/*
+-		 * Doesn't matter what we pass for status, since the core will
+-		 * just overwrite it (because the URB has been unlinked).
+-		 */
+ 		ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb);
+ 
+ 		if (td->cancel_status == TD_CLEARED)
+-			xhci_td_cleanup(ep->xhci, td, ring, 0);
++			xhci_td_cleanup(ep->xhci, td, ring, td->status);
+ 
+ 		if (ep->xhci->xhc_state & XHCI_STATE_DYING)
+ 			return;
+@@ -938,14 +934,18 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
+ 			continue;
+ 		}
+ 		/*
+-		 * If ring stopped on the TD we need to cancel, then we have to
++		 * If a ring stopped on the TD we need to cancel then we have to
+ 		 * move the xHC endpoint ring dequeue pointer past this TD.
++		 * Rings halted due to STALL may show hw_deq is past the stalled
++		 * TD, but still require a set TR Deq command to flush xHC cache.
+ 		 */
+ 		hw_deq = xhci_get_hw_deq(xhci, ep->vdev, ep->ep_index,
+ 					 td->urb->stream_id);
+ 		hw_deq &= ~0xf;
+ 
+-		if (trb_in_td(xhci, td->start_seg, td->first_trb,
++		if (td->cancel_status == TD_HALTED) {
++			cached_td = td;
++		} else if (trb_in_td(xhci, td->start_seg, td->first_trb,
+ 			      td->last_trb, hw_deq, false)) {
+ 			switch (td->cancel_status) {
+ 			case TD_CLEARED: /* TD is already no-op */
+diff --git a/drivers/usb/misc/trancevibrator.c b/drivers/usb/misc/trancevibrator.c
+index a3dfc77578ea1..26baba3ab7d73 100644
+--- a/drivers/usb/misc/trancevibrator.c
++++ b/drivers/usb/misc/trancevibrator.c
+@@ -61,9 +61,9 @@ static ssize_t speed_store(struct device *dev, struct device_attribute *attr,
+ 	/* Set speed */
+ 	retval = usb_control_msg(tv->udev, usb_sndctrlpipe(tv->udev, 0),
+ 				 0x01, /* vendor request: set speed */
+-				 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_OTHER,
++				 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
+ 				 tv->speed, /* speed value */
+-				 0, NULL, 0, USB_CTRL_GET_TIMEOUT);
++				 0, NULL, 0, USB_CTRL_SET_TIMEOUT);
+ 	if (retval) {
+ 		tv->speed = old;
+ 		dev_dbg(&tv->udev->dev, "retval = %d\n", retval);
+diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
+index b5d6616442635..748139d262633 100644
+--- a/drivers/usb/misc/uss720.c
++++ b/drivers/usb/misc/uss720.c
+@@ -736,6 +736,7 @@ static int uss720_probe(struct usb_interface *intf,
+ 	parport_announce_port(pp);
+ 
+ 	usb_set_intfdata(intf, pp);
++	usb_put_dev(usbdev);
+ 	return 0;
+ 
+ probe_abort:
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index c867592477c9c..5a725ffb76756 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -1034,6 +1034,9 @@ static const struct usb_device_id id_table_combined[] = {
+ 	/* Sienna devices */
+ 	{ USB_DEVICE(FTDI_VID, FTDI_SIENNA_PID) },
+ 	{ USB_DEVICE(ECHELON_VID, ECHELON_U20_PID) },
++	/* IDS GmbH devices */
++	{ USB_DEVICE(IDS_VID, IDS_SI31A_PID) },
++	{ USB_DEVICE(IDS_VID, IDS_CM31A_PID) },
+ 	/* U-Blox devices */
+ 	{ USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ZED_PID) },
+ 	{ USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ODIN_PID) },
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 3d47c6d72256e..d854e04a4286e 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -1567,6 +1567,13 @@
+ #define UNJO_VID			0x22B7
+ #define UNJO_ISODEBUG_V1_PID		0x150D
+ 
++/*
++ * IDS GmbH
++ */
++#define IDS_VID				0x2CAF
++#define IDS_SI31A_PID			0x13A2
++#define IDS_CM31A_PID			0x13A3
++
+ /*
+  * U-Blox products (http://www.u-blox.com).
+  */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index c6969ca728390..61d94641ddc08 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -1240,6 +1240,10 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = NCTRL(0) | RSVD(1) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff),	/* Telit LN940 (MBIM) */
+ 	  .driver_info = NCTRL(0) },
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7010, 0xff),	/* Telit LE910-S1 (RNDIS) */
++	  .driver_info = NCTRL(2) },
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7011, 0xff),	/* Telit LE910-S1 (ECM) */
++	  .driver_info = NCTRL(2) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, 0x9010),				/* Telit SBL FN980 flashing device */
+ 	  .driver_info = NCTRL(0) | ZLP },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
+diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
+index eed9acd1ae089..f2895c274e19d 100644
+--- a/drivers/usb/serial/pl2303.c
++++ b/drivers/usb/serial/pl2303.c
+@@ -113,6 +113,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
+ 	{ USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
+ 	{ USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) },
++	{ USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530GC_PRODUCT_ID) },
+ 	{ USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) },
+ 	{ USB_DEVICE(AT_VENDOR_ID, AT_VTKIT3_PRODUCT_ID) },
+ 	{ }					/* Terminating entry */
+diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
+index 0f681ddbfd288..6097ee8fccb25 100644
+--- a/drivers/usb/serial/pl2303.h
++++ b/drivers/usb/serial/pl2303.h
+@@ -158,6 +158,7 @@
+ /* ADLINK ND-6530 RS232,RS485 and RS422 adapter */
+ #define ADLINK_VENDOR_ID		0x0b63
+ #define ADLINK_ND6530_PRODUCT_ID	0x6530
++#define ADLINK_ND6530GC_PRODUCT_ID	0x653a
+ 
+ /* SMART USB Serial Adapter */
+ #define SMART_VENDOR_ID	0x0b8c
+diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
+index fe1c13a8849cc..f94d7b20d32e7 100644
+--- a/drivers/usb/serial/ti_usb_3410_5052.c
++++ b/drivers/usb/serial/ti_usb_3410_5052.c
+@@ -37,6 +37,7 @@
+ /* Vendor and product ids */
+ #define TI_VENDOR_ID			0x0451
+ #define IBM_VENDOR_ID			0x04b3
++#define STARTECH_VENDOR_ID		0x14b0
+ #define TI_3410_PRODUCT_ID		0x3410
+ #define IBM_4543_PRODUCT_ID		0x4543
+ #define IBM_454B_PRODUCT_ID		0x454b
+@@ -372,6 +373,7 @@ static const struct usb_device_id ti_id_table_3410[] = {
+ 	{ USB_DEVICE(MXU1_VENDOR_ID, MXU1_1131_PRODUCT_ID) },
+ 	{ USB_DEVICE(MXU1_VENDOR_ID, MXU1_1150_PRODUCT_ID) },
+ 	{ USB_DEVICE(MXU1_VENDOR_ID, MXU1_1151_PRODUCT_ID) },
++	{ USB_DEVICE(STARTECH_VENDOR_ID, TI_3410_PRODUCT_ID) },
+ 	{ }	/* terminator */
+ };
+ 
+@@ -410,6 +412,7 @@ static const struct usb_device_id ti_id_table_combined[] = {
+ 	{ USB_DEVICE(MXU1_VENDOR_ID, MXU1_1131_PRODUCT_ID) },
+ 	{ USB_DEVICE(MXU1_VENDOR_ID, MXU1_1150_PRODUCT_ID) },
+ 	{ USB_DEVICE(MXU1_VENDOR_ID, MXU1_1151_PRODUCT_ID) },
++	{ USB_DEVICE(STARTECH_VENDOR_ID, TI_3410_PRODUCT_ID) },
+ 	{ }	/* terminator */
+ };
+ 
+diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c
+index cf720e944aaaa..42acdc8b684fe 100644
+--- a/drivers/usb/typec/mux.c
++++ b/drivers/usb/typec/mux.c
+@@ -191,6 +191,7 @@ static void *typec_mux_match(struct fwnode_handle *fwnode, const char *id,
+ 	bool match;
+ 	int nval;
+ 	u16 *val;
++	int ret;
+ 	int i;
+ 
+ 	/*
+@@ -218,10 +219,10 @@ static void *typec_mux_match(struct fwnode_handle *fwnode, const char *id,
+ 	if (!val)
+ 		return ERR_PTR(-ENOMEM);
+ 
+-	nval = fwnode_property_read_u16_array(fwnode, "svid", val, nval);
+-	if (nval < 0) {
++	ret = fwnode_property_read_u16_array(fwnode, "svid", val, nval);
++	if (ret < 0) {
+ 		kfree(val);
+-		return ERR_PTR(nval);
++		return ERR_PTR(ret);
+ 	}
+ 
+ 	for (i = 0; i < nval; i++) {
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
+index 52acc884a61f1..b3e3dff464917 100644
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -1534,6 +1534,8 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
+ 			if (PD_VDO_SVDM_VER(p[0]) < svdm_version)
+ 				typec_partner_set_svdm_version(port->partner,
+ 							       PD_VDO_SVDM_VER(p[0]));
++
++			tcpm_ams_start(port, DISCOVER_IDENTITY);
+ 			/* 6.4.4.3.1: Only respond as UFP (device) */
+ 			if (port->data_role == TYPEC_DEVICE &&
+ 			    port->nr_snk_vdo) {
+@@ -1552,14 +1554,19 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
+ 			}
+ 			break;
+ 		case CMD_DISCOVER_SVID:
++			tcpm_ams_start(port, DISCOVER_SVIDS);
+ 			break;
+ 		case CMD_DISCOVER_MODES:
++			tcpm_ams_start(port, DISCOVER_MODES);
+ 			break;
+ 		case CMD_ENTER_MODE:
++			tcpm_ams_start(port, DFP_TO_UFP_ENTER_MODE);
+ 			break;
+ 		case CMD_EXIT_MODE:
++			tcpm_ams_start(port, DFP_TO_UFP_EXIT_MODE);
+ 			break;
+ 		case CMD_ATTENTION:
++			tcpm_ams_start(port, ATTENTION);
+ 			/* Attention command does not have response */
+ 			*adev_action = ADEV_ATTENTION;
+ 			return 0;
+@@ -2267,6 +2274,12 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
+ 	bool frs_enable;
+ 	int ret;
+ 
++	if (tcpm_vdm_ams(port) && type != PD_DATA_VENDOR_DEF) {
++		port->vdm_state = VDM_STATE_ERR_BUSY;
++		tcpm_ams_finish(port);
++		mod_vdm_delayed_work(port, 0);
++	}
++
+ 	switch (type) {
+ 	case PD_DATA_SOURCE_CAP:
+ 		for (i = 0; i < cnt; i++)
+@@ -2397,7 +2410,10 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
+ 					   NONE_AMS);
+ 		break;
+ 	case PD_DATA_VENDOR_DEF:
+-		tcpm_handle_vdm_request(port, msg->payload, cnt);
++		if (tcpm_vdm_ams(port) || port->nr_snk_vdo)
++			tcpm_handle_vdm_request(port, msg->payload, cnt);
++		else if (port->negotiated_rev > PD_REV20)
++			tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
+ 		break;
+ 	case PD_DATA_BIST:
+ 		port->bist_request = le32_to_cpu(msg->payload[0]);
+@@ -2439,6 +2455,16 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
+ 	enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
+ 	enum tcpm_state next_state;
+ 
++	/*
++	 * Stop VDM state machine if interrupted by other Messages while NOT_SUPP is allowed in
++	 * VDM AMS if waiting for VDM responses and will be handled later.
++	 */
++	if (tcpm_vdm_ams(port) && type != PD_CTRL_NOT_SUPP && type != PD_CTRL_GOOD_CRC) {
++		port->vdm_state = VDM_STATE_ERR_BUSY;
++		tcpm_ams_finish(port);
++		mod_vdm_delayed_work(port, 0);
++	}
++
+ 	switch (type) {
+ 	case PD_CTRL_GOOD_CRC:
+ 	case PD_CTRL_PING:
+@@ -2697,7 +2723,14 @@ static void tcpm_pd_ext_msg_request(struct tcpm_port *port,
+ 	enum pd_ext_msg_type type = pd_header_type_le(msg->header);
+ 	unsigned int data_size = pd_ext_header_data_size_le(msg->ext_msg.header);
+ 
+-	if (!(msg->ext_msg.header & PD_EXT_HDR_CHUNKED)) {
++	/* stopping VDM state machine if interrupted by other Messages */
++	if (tcpm_vdm_ams(port)) {
++		port->vdm_state = VDM_STATE_ERR_BUSY;
++		tcpm_ams_finish(port);
++		mod_vdm_delayed_work(port, 0);
++	}
++
++	if (!(le16_to_cpu(msg->ext_msg.header) & PD_EXT_HDR_CHUNKED)) {
+ 		tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
+ 		tcpm_log(port, "Unchunked extended messages unsupported");
+ 		return;
+@@ -2791,7 +2824,7 @@ static void tcpm_pd_rx_handler(struct kthread_work *work)
+ 				 "Data role mismatch, initiating error recovery");
+ 			tcpm_set_state(port, ERROR_RECOVERY, 0);
+ 		} else {
+-			if (msg->header & PD_HEADER_EXT_HDR)
++			if (le16_to_cpu(msg->header) & PD_HEADER_EXT_HDR)
+ 				tcpm_pd_ext_msg_request(port, msg);
+ 			else if (cnt)
+ 				tcpm_pd_data_request(port, msg);
+diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
+index 1e266f083bf8a..7104ddb9696d6 100644
+--- a/drivers/usb/typec/ucsi/ucsi.c
++++ b/drivers/usb/typec/ucsi/ucsi.c
+@@ -717,8 +717,8 @@ static void ucsi_handle_connector_change(struct work_struct *work)
+ 	ucsi_send_command(con->ucsi, command, NULL, 0);
+ 
+ 	/* 3. ACK connector change */
+-	clear_bit(EVENT_PENDING, &ucsi->flags);
+ 	ret = ucsi_acknowledge_connector_change(ucsi);
++	clear_bit(EVENT_PENDING, &ucsi->flags);
+ 	if (ret) {
+ 		dev_err(ucsi->dev, "%s: ACK failed (%d)", __func__, ret);
+ 		goto out_unlock;
+diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+index 4d2809c7d4e32..a0e86c5d7cd7a 100644
+--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
++++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+@@ -15,6 +15,7 @@
+ #include <linux/mlx5/vport.h>
+ #include <linux/mlx5/fs.h>
+ #include <linux/mlx5/mlx5_ifc_vdpa.h>
++#include <linux/mlx5/mpfs.h>
+ #include "mlx5_vdpa.h"
+ 
+ MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
+@@ -1854,11 +1855,16 @@ static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb *iotlb
+ static void mlx5_vdpa_free(struct vdpa_device *vdev)
+ {
+ 	struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
++	struct mlx5_core_dev *pfmdev;
+ 	struct mlx5_vdpa_net *ndev;
+ 
+ 	ndev = to_mlx5_vdpa_ndev(mvdev);
+ 
+ 	free_resources(ndev);
++	if (!is_zero_ether_addr(ndev->config.mac)) {
++		pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev));
++		mlx5_mpfs_del_mac(pfmdev, ndev->config.mac);
++	}
+ 	mlx5_vdpa_free_resources(&ndev->mvdev);
+ 	mutex_destroy(&ndev->reslock);
+ }
+@@ -1980,6 +1986,7 @@ static int mlx5v_probe(struct auxiliary_device *adev,
+ 	struct mlx5_adev *madev = container_of(adev, struct mlx5_adev, adev);
+ 	struct mlx5_core_dev *mdev = madev->mdev;
+ 	struct virtio_net_config *config;
++	struct mlx5_core_dev *pfmdev;
+ 	struct mlx5_vdpa_dev *mvdev;
+ 	struct mlx5_vdpa_net *ndev;
+ 	u32 max_vqs;
+@@ -2008,10 +2015,17 @@ static int mlx5v_probe(struct auxiliary_device *adev,
+ 	if (err)
+ 		goto err_mtu;
+ 
++	if (!is_zero_ether_addr(config->mac)) {
++		pfmdev = pci_get_drvdata(pci_physfn(mdev->pdev));
++		err = mlx5_mpfs_add_mac(pfmdev, config->mac);
++		if (err)
++			goto err_mtu;
++	}
++
+ 	mvdev->vdev.dma_dev = mdev->device;
+ 	err = mlx5_vdpa_alloc_resources(&ndev->mvdev);
+ 	if (err)
+-		goto err_mtu;
++		goto err_mpfs;
+ 
+ 	err = alloc_resources(ndev);
+ 	if (err)
+@@ -2028,6 +2042,9 @@ err_reg:
+ 	free_resources(ndev);
+ err_res:
+ 	mlx5_vdpa_free_resources(&ndev->mvdev);
++err_mpfs:
++	if (!is_zero_ether_addr(config->mac))
++		mlx5_mpfs_del_mac(pfmdev, config->mac);
+ err_mtu:
+ 	mutex_destroy(&ndev->reslock);
+ 	put_device(&mvdev->vdev.dev);
+diff --git a/fs/afs/dir.c b/fs/afs/dir.c
+index 31251d11d576c..a3d42f29d0235 100644
+--- a/fs/afs/dir.c
++++ b/fs/afs/dir.c
+@@ -1842,7 +1842,9 @@ static void afs_rename_edit_dir(struct afs_operation *op)
+ 	new_inode = d_inode(new_dentry);
+ 	if (new_inode) {
+ 		spin_lock(&new_inode->i_lock);
+-		if (new_inode->i_nlink > 0)
++		if (S_ISDIR(new_inode->i_mode))
++			clear_nlink(new_inode);
++		else if (new_inode->i_nlink > 0)
+ 			drop_nlink(new_inode);
+ 		spin_unlock(&new_inode->i_lock);
+ 	}
+diff --git a/fs/block_dev.c b/fs/block_dev.c
+index a5a6a7930e5ed..389609bc5aa3f 100644
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -1244,6 +1244,9 @@ int bdev_disk_changed(struct block_device *bdev, bool invalidate)
+ 
+ 	lockdep_assert_held(&bdev->bd_mutex);
+ 
++	if (!(disk->flags & GENHD_FL_UP))
++		return -ENXIO;
++
+ rescan:
+ 	ret = blk_drop_partitions(bdev);
+ 	if (ret)
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 910769d5fcdb4..1eb5d22d53735 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -4975,7 +4975,7 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
+ 		  u64 start, u64 len)
+ {
+ 	int ret = 0;
+-	u64 off = start;
++	u64 off;
+ 	u64 max = start + len;
+ 	u32 flags = 0;
+ 	u32 found_type;
+@@ -5010,6 +5010,11 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
+ 		goto out_free_ulist;
+ 	}
+ 
++	/*
++	 * We can't initialize that to 'start' as this could miss extents due
++	 * to extent item merging
++	 */
++	off = 0;
+ 	start = round_down(start, btrfs_inode_sectorsize(inode));
+ 	len = round_up(max, btrfs_inode_sectorsize(inode)) - start;
+ 
+diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c
+index 0abbf050580d1..53ee17f5e382c 100644
+--- a/fs/btrfs/reflink.c
++++ b/fs/btrfs/reflink.c
+@@ -285,6 +285,11 @@ copy_inline_extent:
+ 	ret = btrfs_inode_set_file_extent_range(BTRFS_I(dst), 0, aligned_end);
+ out:
+ 	if (!ret && !trans) {
++		/*
++		 * Release path before starting a new transaction so we don't
++		 * hold locks that would confuse lockdep.
++		 */
++		btrfs_release_path(path);
+ 		/*
+ 		 * No transaction here means we copied the inline extent into a
+ 		 * page of the destination inode.
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 53624fca0747a..d7f1599e69b1f 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -1858,8 +1858,6 @@ static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
+ 		ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+ 	} else if (ret == -EEXIST) {
+ 		ret = 0;
+-	} else {
+-		BUG(); /* Logic Error */
+ 	}
+ 	iput(inode);
+ 
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 29272d99102c5..7606ce344de56 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -958,6 +958,13 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
+ 	/* Internal types */
+ 	server->capabilities |= SMB2_NT_FIND | SMB2_LARGE_FILES;
+ 
++	/*
++	 * SMB3.0 supports only 1 cipher and doesn't have a encryption neg context
++	 * Set the cipher type manually.
++	 */
++	if (server->dialect == SMB30_PROT_ID && (server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
++		server->cipher_type = SMB2_ENCRYPTION_AES128_CCM;
++
+ 	security_blob = smb2_get_data_area_len(&blob_offset, &blob_length,
+ 					       (struct smb2_sync_hdr *)rsp);
+ 	/*
+@@ -3900,10 +3907,10 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
+ 			 * Related requests use info from previous read request
+ 			 * in chain.
+ 			 */
+-			shdr->SessionId = 0xFFFFFFFF;
++			shdr->SessionId = 0xFFFFFFFFFFFFFFFF;
+ 			shdr->TreeId = 0xFFFFFFFF;
+-			req->PersistentFileId = 0xFFFFFFFF;
+-			req->VolatileFileId = 0xFFFFFFFF;
++			req->PersistentFileId = 0xFFFFFFFFFFFFFFFF;
++			req->VolatileFileId = 0xFFFFFFFFFFFFFFFF;
+ 		}
+ 	}
+ 	if (remaining_bytes > io_parms->length)
+diff --git a/fs/cifs/trace.h b/fs/cifs/trace.h
+index d6df908dccade..dafcb6ab050dd 100644
+--- a/fs/cifs/trace.h
++++ b/fs/cifs/trace.h
+@@ -12,6 +12,11 @@
+ 
+ #include <linux/tracepoint.h>
+ 
++/*
++ * Please use this 3-part article as a reference for writing new tracepoints:
++ * https://lwn.net/Articles/379903/
++ */
++
+ /* For logging errors in read or write */
+ DECLARE_EVENT_CLASS(smb3_rw_err_class,
+ 	TP_PROTO(unsigned int xid,
+@@ -529,16 +534,16 @@ DECLARE_EVENT_CLASS(smb3_exit_err_class,
+ 	TP_ARGS(xid, func_name, rc),
+ 	TP_STRUCT__entry(
+ 		__field(unsigned int, xid)
+-		__field(const char *, func_name)
++		__string(func_name, func_name)
+ 		__field(int, rc)
+ 	),
+ 	TP_fast_assign(
+ 		__entry->xid = xid;
+-		__entry->func_name = func_name;
++		__assign_str(func_name, func_name);
+ 		__entry->rc = rc;
+ 	),
+ 	TP_printk("\t%s: xid=%u rc=%d",
+-		__entry->func_name, __entry->xid, __entry->rc)
++		__get_str(func_name), __entry->xid, __entry->rc)
+ )
+ 
+ #define DEFINE_SMB3_EXIT_ERR_EVENT(name)          \
+@@ -583,14 +588,14 @@ DECLARE_EVENT_CLASS(smb3_enter_exit_class,
+ 	TP_ARGS(xid, func_name),
+ 	TP_STRUCT__entry(
+ 		__field(unsigned int, xid)
+-		__field(const char *, func_name)
++		__string(func_name, func_name)
+ 	),
+ 	TP_fast_assign(
+ 		__entry->xid = xid;
+-		__entry->func_name = func_name;
++		__assign_str(func_name, func_name);
+ 	),
+ 	TP_printk("\t%s: xid=%u",
+-		__entry->func_name, __entry->xid)
++		__get_str(func_name), __entry->xid)
+ )
+ 
+ #define DEFINE_SMB3_ENTER_EXIT_EVENT(name)        \
+@@ -857,16 +862,16 @@ DECLARE_EVENT_CLASS(smb3_reconnect_class,
+ 	TP_STRUCT__entry(
+ 		__field(__u64, currmid)
+ 		__field(__u64, conn_id)
+-		__field(char *, hostname)
++		__string(hostname, hostname)
+ 	),
+ 	TP_fast_assign(
+ 		__entry->currmid = currmid;
+ 		__entry->conn_id = conn_id;
+-		__entry->hostname = hostname;
++		__assign_str(hostname, hostname);
+ 	),
+ 	TP_printk("conn_id=0x%llx server=%s current_mid=%llu",
+ 		__entry->conn_id,
+-		__entry->hostname,
++		__get_str(hostname),
+ 		__entry->currmid)
+ )
+ 
+@@ -891,7 +896,7 @@ DECLARE_EVENT_CLASS(smb3_credit_class,
+ 	TP_STRUCT__entry(
+ 		__field(__u64, currmid)
+ 		__field(__u64, conn_id)
+-		__field(char *, hostname)
++		__string(hostname, hostname)
+ 		__field(int, credits)
+ 		__field(int, credits_to_add)
+ 		__field(int, in_flight)
+@@ -899,7 +904,7 @@ DECLARE_EVENT_CLASS(smb3_credit_class,
+ 	TP_fast_assign(
+ 		__entry->currmid = currmid;
+ 		__entry->conn_id = conn_id;
+-		__entry->hostname = hostname;
++		__assign_str(hostname, hostname);
+ 		__entry->credits = credits;
+ 		__entry->credits_to_add = credits_to_add;
+ 		__entry->in_flight = in_flight;
+@@ -907,7 +912,7 @@ DECLARE_EVENT_CLASS(smb3_credit_class,
+ 	TP_printk("conn_id=0x%llx server=%s current_mid=%llu "
+ 			"credits=%d credit_change=%d in_flight=%d",
+ 		__entry->conn_id,
+-		__entry->hostname,
++		__get_str(hostname),
+ 		__entry->currmid,
+ 		__entry->credits,
+ 		__entry->credits_to_add,
+diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
+index 1d252164d97b6..8129a430d789d 100644
+--- a/fs/debugfs/inode.c
++++ b/fs/debugfs/inode.c
+@@ -45,10 +45,13 @@ static unsigned int debugfs_allow __ro_after_init = DEFAULT_DEBUGFS_ALLOW_BITS;
+ static int debugfs_setattr(struct user_namespace *mnt_userns,
+ 			   struct dentry *dentry, struct iattr *ia)
+ {
+-	int ret = security_locked_down(LOCKDOWN_DEBUGFS);
++	int ret;
+ 
+-	if (ret && (ia->ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID)))
+-		return ret;
++	if (ia->ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID)) {
++		ret = security_locked_down(LOCKDOWN_DEBUGFS);
++		if (ret)
++			return ret;
++	}
+ 	return simple_setattr(&init_user_ns, dentry, ia);
+ }
+ 
+diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
+index d158a500c25c6..d2103852475fa 100644
+--- a/fs/nfs/filelayout/filelayout.c
++++ b/fs/nfs/filelayout/filelayout.c
+@@ -718,7 +718,7 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
+ 		if (unlikely(!p))
+ 			goto out_err;
+ 		fl->fh_array[i]->size = be32_to_cpup(p++);
+-		if (sizeof(struct nfs_fh) < fl->fh_array[i]->size) {
++		if (fl->fh_array[i]->size > NFS_MAXFHSIZE) {
+ 			printk(KERN_ERR "NFS: Too big fh %d received %d\n",
+ 			       i, fl->fh_array[i]->size);
+ 			goto out_err;
+diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
+index 441a2fa073c8f..2bcbafded19ec 100644
+--- a/fs/nfs/nfs4file.c
++++ b/fs/nfs/nfs4file.c
+@@ -211,7 +211,7 @@ static loff_t nfs4_file_llseek(struct file *filep, loff_t offset, int whence)
+ 	case SEEK_HOLE:
+ 	case SEEK_DATA:
+ 		ret = nfs42_proc_llseek(filep, offset, whence);
+-		if (ret != -ENOTSUPP)
++		if (ret != -EOPNOTSUPP)
+ 			return ret;
+ 		fallthrough;
+ 	default:
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 820abae88cf04..0b809cc6ad1df 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -1682,7 +1682,7 @@ static void nfs_set_open_stateid_locked(struct nfs4_state *state,
+ 		rcu_read_unlock();
+ 		trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0);
+ 
+-		if (!signal_pending(current)) {
++		if (!fatal_signal_pending(current)) {
+ 			if (schedule_timeout(5*HZ) == 0)
+ 				status = -EAGAIN;
+ 			else
+@@ -3458,7 +3458,7 @@ static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst,
+ 		write_sequnlock(&state->seqlock);
+ 		trace_nfs4_close_stateid_update_wait(state->inode, dst, 0);
+ 
+-		if (signal_pending(current))
++		if (fatal_signal_pending(current))
+ 			status = -EINTR;
+ 		else
+ 			if (schedule_timeout(5*HZ) != 0)
+diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
+index 78c9c4bdef2b6..98b9c1ed366ee 100644
+--- a/fs/nfs/pagelist.c
++++ b/fs/nfs/pagelist.c
+@@ -1094,15 +1094,16 @@ nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
+ 	struct nfs_page *prev = NULL;
+ 	unsigned int size;
+ 
+-	if (mirror->pg_count != 0) {
+-		prev = nfs_list_entry(mirror->pg_list.prev);
+-	} else {
++	if (list_empty(&mirror->pg_list)) {
+ 		if (desc->pg_ops->pg_init)
+ 			desc->pg_ops->pg_init(desc, req);
+ 		if (desc->pg_error < 0)
+ 			return 0;
+ 		mirror->pg_base = req->wb_pgbase;
+-	}
++		mirror->pg_count = 0;
++		mirror->pg_recoalesce = 0;
++	} else
++		prev = nfs_list_entry(mirror->pg_list.prev);
+ 
+ 	if (desc->pg_maxretrans && req->wb_nio > desc->pg_maxretrans) {
+ 		if (NFS_SERVER(desc->pg_inode)->flags & NFS_MOUNT_SOFTERR)
+@@ -1127,17 +1128,16 @@ static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
+ {
+ 	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
+ 
+-
+ 	if (!list_empty(&mirror->pg_list)) {
+ 		int error = desc->pg_ops->pg_doio(desc);
+ 		if (error < 0)
+ 			desc->pg_error = error;
+-		else
++		if (list_empty(&mirror->pg_list)) {
+ 			mirror->pg_bytes_written += mirror->pg_count;
+-	}
+-	if (list_empty(&mirror->pg_list)) {
+-		mirror->pg_count = 0;
+-		mirror->pg_base = 0;
++			mirror->pg_count = 0;
++			mirror->pg_base = 0;
++			mirror->pg_recoalesce = 0;
++		}
+ 	}
+ }
+ 
+@@ -1227,7 +1227,6 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
+ 
+ 	do {
+ 		list_splice_init(&mirror->pg_list, &head);
+-		mirror->pg_bytes_written -= mirror->pg_count;
+ 		mirror->pg_count = 0;
+ 		mirror->pg_base = 0;
+ 		mirror->pg_recoalesce = 0;
+diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
+index f726f8b12b7e8..a5e3628a52733 100644
+--- a/fs/nfs/pnfs.c
++++ b/fs/nfs/pnfs.c
+@@ -1317,6 +1317,11 @@ _pnfs_return_layout(struct inode *ino)
+ {
+ 	struct pnfs_layout_hdr *lo = NULL;
+ 	struct nfs_inode *nfsi = NFS_I(ino);
++	struct pnfs_layout_range range = {
++		.iomode		= IOMODE_ANY,
++		.offset		= 0,
++		.length		= NFS4_MAX_UINT64,
++	};
+ 	LIST_HEAD(tmp_list);
+ 	const struct cred *cred;
+ 	nfs4_stateid stateid;
+@@ -1344,16 +1349,10 @@ _pnfs_return_layout(struct inode *ino)
+ 	}
+ 	valid_layout = pnfs_layout_is_valid(lo);
+ 	pnfs_clear_layoutcommit(ino, &tmp_list);
+-	pnfs_mark_matching_lsegs_return(lo, &tmp_list, NULL, 0);
++	pnfs_mark_matching_lsegs_return(lo, &tmp_list, &range, 0);
+ 
+-	if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
+-		struct pnfs_layout_range range = {
+-			.iomode		= IOMODE_ANY,
+-			.offset		= 0,
+-			.length		= NFS4_MAX_UINT64,
+-		};
++	if (NFS_SERVER(ino)->pnfs_curr_ld->return_range)
+ 		NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo, &range);
+-	}
+ 
+ 	/* Don't send a LAYOUTRETURN if list was initially empty */
+ 	if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) ||
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index 3851bfcdba56e..58bbf334265b7 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -2703,6 +2703,10 @@ static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf,
+ 	void *page;
+ 	int rv;
+ 
++	/* A task may only write when it was the opener. */
++	if (file->f_cred != current_real_cred())
++		return -EPERM;
++
+ 	rcu_read_lock();
+ 	task = pid_task(proc_pid(inode), PIDTYPE_PID);
+ 	if (!task) {
+diff --git a/include/linux/bits.h b/include/linux/bits.h
+index 7f475d59a0974..87d112650dfbb 100644
+--- a/include/linux/bits.h
++++ b/include/linux/bits.h
+@@ -22,7 +22,7 @@
+ #include <linux/build_bug.h>
+ #define GENMASK_INPUT_CHECK(h, l) \
+ 	(BUILD_BUG_ON_ZERO(__builtin_choose_expr( \
+-		__builtin_constant_p((l) > (h)), (l) > (h), 0)))
++		__is_constexpr((l) > (h)), (l) > (h), 0)))
+ #else
+ /*
+  * BUILD_BUG_ON_ZERO is not available in h files included from asm files,
+diff --git a/include/linux/const.h b/include/linux/const.h
+index 81b8aae5a8559..435ddd72d2c46 100644
+--- a/include/linux/const.h
++++ b/include/linux/const.h
+@@ -3,4 +3,12 @@
+ 
+ #include <vdso/const.h>
+ 
++/*
++ * This returns a constant expression while determining if an argument is
++ * a constant expression, most importantly without evaluating the argument.
++ * Glory to Martin Uecker <Martin.Uecker@med.uni-goettingen.de>
++ */
++#define __is_constexpr(x) \
++	(sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8)))
++
+ #endif /* _LINUX_CONST_H */
+diff --git a/include/linux/device.h b/include/linux/device.h
+index ba660731bd258..bec8d09014daa 100644
+--- a/include/linux/device.h
++++ b/include/linux/device.h
+@@ -566,7 +566,7 @@ struct device {
+  * @flags: Link flags.
+  * @rpm_active: Whether or not the consumer device is runtime-PM-active.
+  * @kref: Count repeated addition of the same link.
+- * @rcu_head: An RCU head to use for deferred execution of SRCU callbacks.
++ * @rm_work: Work structure used for removing the link.
+  * @supplier_preactivated: Supplier has been made active before consumer probe.
+  */
+ struct device_link {
+@@ -579,9 +579,7 @@ struct device_link {
+ 	u32 flags;
+ 	refcount_t rpm_active;
+ 	struct kref kref;
+-#ifdef CONFIG_SRCU
+-	struct rcu_head rcu_head;
+-#endif
++	struct work_struct rm_work;
+ 	bool supplier_preactivated; /* Owned by consumer probe. */
+ };
+ 
+diff --git a/include/linux/minmax.h b/include/linux/minmax.h
+index c0f57b0c64d90..5433c08fcc685 100644
+--- a/include/linux/minmax.h
++++ b/include/linux/minmax.h
+@@ -2,6 +2,8 @@
+ #ifndef _LINUX_MINMAX_H
+ #define _LINUX_MINMAX_H
+ 
++#include <linux/const.h>
++
+ /*
+  * min()/max()/clamp() macros must accomplish three things:
+  *
+@@ -17,14 +19,6 @@
+ #define __typecheck(x, y) \
+ 	(!!(sizeof((typeof(x) *)1 == (typeof(y) *)1)))
+ 
+-/*
+- * This returns a constant expression while determining if an argument is
+- * a constant expression, most importantly without evaluating the argument.
+- * Glory to Martin Uecker <Martin.Uecker@med.uni-goettingen.de>
+- */
+-#define __is_constexpr(x) \
+-	(sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8)))
+-
+ #define __no_side_effects(x, y) \
+ 		(__is_constexpr(x) && __is_constexpr(y))
+ 
+diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
+index ab07f09f2bad9..133967c40214b 100644
+--- a/include/linux/mlx5/driver.h
++++ b/include/linux/mlx5/driver.h
+@@ -698,6 +698,27 @@ struct mlx5_hv_vhca;
+ #define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity))
+ #define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))
+ 
++enum {
++	MLX5_PROF_MASK_QP_SIZE		= (u64)1 << 0,
++	MLX5_PROF_MASK_MR_CACHE		= (u64)1 << 1,
++};
++
++enum {
++	MR_CACHE_LAST_STD_ENTRY = 20,
++	MLX5_IMR_MTT_CACHE_ENTRY,
++	MLX5_IMR_KSM_CACHE_ENTRY,
++	MAX_MR_CACHE_ENTRIES
++};
++
++struct mlx5_profile {
++	u64	mask;
++	u8	log_max_qp;
++	struct {
++		int	size;
++		int	limit;
++	} mr_cache[MAX_MR_CACHE_ENTRIES];
++};
++
+ struct mlx5_core_dev {
+ 	struct device *device;
+ 	enum mlx5_coredev_type coredev_type;
+@@ -726,7 +747,7 @@ struct mlx5_core_dev {
+ 	struct mutex		intf_state_mutex;
+ 	unsigned long		intf_state;
+ 	struct mlx5_priv	priv;
+-	struct mlx5_profile	*profile;
++	struct mlx5_profile	profile;
+ 	u32			issi;
+ 	struct mlx5e_resources  mlx5e_res;
+ 	struct mlx5_dm          *dm;
+@@ -1073,18 +1094,6 @@ static inline u8 mlx5_mkey_variant(u32 mkey)
+ 	return mkey & 0xff;
+ }
+ 
+-enum {
+-	MLX5_PROF_MASK_QP_SIZE		= (u64)1 << 0,
+-	MLX5_PROF_MASK_MR_CACHE		= (u64)1 << 1,
+-};
+-
+-enum {
+-	MR_CACHE_LAST_STD_ENTRY = 20,
+-	MLX5_IMR_MTT_CACHE_ENTRY,
+-	MLX5_IMR_KSM_CACHE_ENTRY,
+-	MAX_MR_CACHE_ENTRIES
+-};
+-
+ /* Async-atomic event notifier used by mlx5 core to forward FW
+  * evetns recived from event queue to mlx5 consumers.
+  * Optimise event queue dipatching.
+@@ -1138,15 +1147,6 @@ int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev,
+ 			    struct ib_device *device,
+ 			    struct rdma_netdev_alloc_params *params);
+ 
+-struct mlx5_profile {
+-	u64	mask;
+-	u8	log_max_qp;
+-	struct {
+-		int	size;
+-		int	limit;
+-	} mr_cache[MAX_MR_CACHE_ENTRIES];
+-};
+-
+ enum {
+ 	MLX5_PCI_DEV_IS_VF		= 1 << 0,
+ };
+diff --git a/include/linux/mlx5/mpfs.h b/include/linux/mlx5/mpfs.h
+new file mode 100644
+index 0000000000000..bf700c8d55164
+--- /dev/null
++++ b/include/linux/mlx5/mpfs.h
+@@ -0,0 +1,18 @@
++/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
++ * Copyright (c) 2021 Mellanox Technologies Ltd.
++ */
++
++#ifndef _MLX5_MPFS_
++#define _MLX5_MPFS_
++
++struct mlx5_core_dev;
++
++#ifdef CONFIG_MLX5_MPFS
++int  mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac);
++int  mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac);
++#else /* #ifndef CONFIG_MLX5_MPFS */
++static inline int  mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; }
++static inline int  mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; }
++#endif
++
++#endif
+diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
+index d2e97ee802af5..8a063b2b944d1 100644
+--- a/include/linux/sunrpc/xprt.h
++++ b/include/linux/sunrpc/xprt.h
+@@ -367,6 +367,8 @@ struct rpc_xprt *	xprt_alloc(struct net *net, size_t size,
+ 				unsigned int num_prealloc,
+ 				unsigned int max_req);
+ void			xprt_free(struct rpc_xprt *);
++void			xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task);
++bool			xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req);
+ 
+ static inline int
+ xprt_enable_swap(struct rpc_xprt *xprt)
+diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
+index 911fae42b0c0e..394c4269901cb 100644
+--- a/include/net/cfg80211.h
++++ b/include/net/cfg80211.h
+@@ -5756,7 +5756,7 @@ unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr);
+  */
+ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
+ 				  const u8 *addr, enum nl80211_iftype iftype,
+-				  u8 data_offset);
++				  u8 data_offset, bool is_amsdu);
+ 
+ /**
+  * ieee80211_data_to_8023 - convert an 802.11 data frame to 802.3
+@@ -5768,7 +5768,7 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
+ static inline int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
+ 					 enum nl80211_iftype iftype)
+ {
+-	return ieee80211_data_to_8023_exthdr(skb, NULL, addr, iftype, 0);
++	return ieee80211_data_to_8023_exthdr(skb, NULL, addr, iftype, 0, false);
+ }
+ 
+ /**
+diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
+index 54c4d5c908a52..233f47f4189c7 100644
+--- a/include/net/netfilter/nf_flow_table.h
++++ b/include/net/netfilter/nf_flow_table.h
+@@ -130,7 +130,6 @@ enum nf_flow_flags {
+ 	NF_FLOW_HW,
+ 	NF_FLOW_HW_DYING,
+ 	NF_FLOW_HW_DEAD,
+-	NF_FLOW_HW_REFRESH,
+ 	NF_FLOW_HW_PENDING,
+ };
+ 
+diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
+index 255e4f4b521f4..ec7823921bd26 100644
+--- a/include/net/pkt_cls.h
++++ b/include/net/pkt_cls.h
+@@ -709,6 +709,17 @@ tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common,
+ 		cls_common->extack = extack;
+ }
+ 
++#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
++static inline struct tc_skb_ext *tc_skb_ext_alloc(struct sk_buff *skb)
++{
++	struct tc_skb_ext *tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
++
++	if (tc_skb_ext)
++		memset(tc_skb_ext, 0, sizeof(*tc_skb_ext));
++	return tc_skb_ext;
++}
++#endif
++
+ enum tc_matchall_command {
+ 	TC_CLSMATCHALL_REPLACE,
+ 	TC_CLSMATCHALL_DESTROY,
+diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
+index 15b1b30f454e4..4ba757aba9b2d 100644
+--- a/include/net/pkt_sched.h
++++ b/include/net/pkt_sched.h
+@@ -128,12 +128,7 @@ void __qdisc_run(struct Qdisc *q);
+ static inline void qdisc_run(struct Qdisc *q)
+ {
+ 	if (qdisc_run_begin(q)) {
+-		/* NOLOCK qdisc must check 'state' under the qdisc seqlock
+-		 * to avoid racing with dev_qdisc_reset()
+-		 */
+-		if (!(q->flags & TCQ_F_NOLOCK) ||
+-		    likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
+-			__qdisc_run(q);
++		__qdisc_run(q);
+ 		qdisc_run_end(q);
+ 	}
+ }
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index 2d6eb60c58c81..2c4f3527cc098 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -36,6 +36,7 @@ struct qdisc_rate_table {
+ enum qdisc_state_t {
+ 	__QDISC_STATE_SCHED,
+ 	__QDISC_STATE_DEACTIVATED,
++	__QDISC_STATE_MISSED,
+ };
+ 
+ struct qdisc_size_table {
+@@ -159,8 +160,33 @@ static inline bool qdisc_is_empty(const struct Qdisc *qdisc)
+ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
+ {
+ 	if (qdisc->flags & TCQ_F_NOLOCK) {
++		if (spin_trylock(&qdisc->seqlock))
++			goto nolock_empty;
++
++		/* If the MISSED flag is set, it means other thread has
++		 * set the MISSED flag before second spin_trylock(), so
++		 * we can return false here to avoid multi cpus doing
++		 * the set_bit() and second spin_trylock() concurrently.
++		 */
++		if (test_bit(__QDISC_STATE_MISSED, &qdisc->state))
++			return false;
++
++		/* Set the MISSED flag before the second spin_trylock(),
++		 * if the second spin_trylock() return false, it means
++		 * other cpu holding the lock will do dequeuing for us
++		 * or it will see the MISSED flag set after releasing
++		 * lock and reschedule the net_tx_action() to do the
++		 * dequeuing.
++		 */
++		set_bit(__QDISC_STATE_MISSED, &qdisc->state);
++
++		/* Retry again in case other CPU may not see the new flag
++		 * after it releases the lock at the end of qdisc_run_end().
++		 */
+ 		if (!spin_trylock(&qdisc->seqlock))
+ 			return false;
++
++nolock_empty:
+ 		WRITE_ONCE(qdisc->empty, false);
+ 	} else if (qdisc_is_running(qdisc)) {
+ 		return false;
+@@ -176,8 +202,15 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
+ static inline void qdisc_run_end(struct Qdisc *qdisc)
+ {
+ 	write_seqcount_end(&qdisc->running);
+-	if (qdisc->flags & TCQ_F_NOLOCK)
++	if (qdisc->flags & TCQ_F_NOLOCK) {
+ 		spin_unlock(&qdisc->seqlock);
++
++		if (unlikely(test_bit(__QDISC_STATE_MISSED,
++				      &qdisc->state))) {
++			clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
++			__netif_schedule(qdisc);
++		}
++	}
+ }
+ 
+ static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 8487f58da36d2..62e3811e95a78 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -2225,13 +2225,15 @@ static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
+ 	sk_mem_charge(sk, skb->truesize);
+ }
+ 
+-static inline void skb_set_owner_sk_safe(struct sk_buff *skb, struct sock *sk)
++static inline __must_check bool skb_set_owner_sk_safe(struct sk_buff *skb, struct sock *sk)
+ {
+ 	if (sk && refcount_inc_not_zero(&sk->sk_refcnt)) {
+ 		skb_orphan(skb);
+ 		skb->destructor = sock_efree;
+ 		skb->sk = sk;
++		return true;
+ 	}
++	return false;
+ }
+ 
+ void sk_reset_timer(struct sock *sk, struct timer_list *timer,
+diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
+index f6afee209620d..26e6d94d64ed4 100644
+--- a/include/uapi/linux/kvm.h
++++ b/include/uapi/linux/kvm.h
+@@ -8,6 +8,7 @@
+  * Note: you must update KVM_API_VERSION if you change this interface.
+  */
+ 
++#include <linux/const.h>
+ #include <linux/types.h>
+ #include <linux/compiler.h>
+ #include <linux/ioctl.h>
+@@ -1834,8 +1835,8 @@ struct kvm_hyperv_eventfd {
+  * conversion after harvesting an entry.  Also, it must not skip any
+  * dirty bits, so that dirty bits are always harvested in sequence.
+  */
+-#define KVM_DIRTY_GFN_F_DIRTY           BIT(0)
+-#define KVM_DIRTY_GFN_F_RESET           BIT(1)
++#define KVM_DIRTY_GFN_F_DIRTY           _BITUL(0)
++#define KVM_DIRTY_GFN_F_RESET           _BITUL(1)
+ #define KVM_DIRTY_GFN_F_MASK            0x3
+ 
+ /*
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 21247e49fe82b..b186d852fe3df 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -12714,12 +12714,6 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
+ 	if (is_priv)
+ 		env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ;
+ 
+-	if (bpf_prog_is_dev_bound(env->prog->aux)) {
+-		ret = bpf_prog_offload_verifier_prep(env->prog);
+-		if (ret)
+-			goto skip_full_check;
+-	}
+-
+ 	env->explored_states = kvcalloc(state_htab_size(env),
+ 				       sizeof(struct bpf_verifier_state_list *),
+ 				       GFP_USER);
+@@ -12743,6 +12737,12 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
+ 	if (ret < 0)
+ 		goto skip_full_check;
+ 
++	if (bpf_prog_is_dev_bound(env->prog->aux)) {
++		ret = bpf_prog_offload_verifier_prep(env->prog);
++		if (ret)
++			goto skip_full_check;
++	}
++
+ 	ret = check_cfg(env);
+ 	if (ret < 0)
+ 		goto skip_full_check;
+diff --git a/kernel/seccomp.c b/kernel/seccomp.c
+index 1d60fc2c99871..93684cc63285a 100644
+--- a/kernel/seccomp.c
++++ b/kernel/seccomp.c
+@@ -1098,28 +1098,30 @@ static int seccomp_do_user_notification(int this_syscall,
+ 
+ 	up(&match->notif->request);
+ 	wake_up_poll(&match->wqh, EPOLLIN | EPOLLRDNORM);
+-	mutex_unlock(&match->notify_lock);
+ 
+ 	/*
+ 	 * This is where we wait for a reply from userspace.
+ 	 */
+-wait:
+-	err = wait_for_completion_interruptible(&n.ready);
+-	mutex_lock(&match->notify_lock);
+-	if (err == 0) {
+-		/* Check if we were woken up by a addfd message */
++	do {
++		mutex_unlock(&match->notify_lock);
++		err = wait_for_completion_interruptible(&n.ready);
++		mutex_lock(&match->notify_lock);
++		if (err != 0)
++			goto interrupted;
++
+ 		addfd = list_first_entry_or_null(&n.addfd,
+ 						 struct seccomp_kaddfd, list);
+-		if (addfd && n.state != SECCOMP_NOTIFY_REPLIED) {
++		/* Check if we were woken up by a addfd message */
++		if (addfd)
+ 			seccomp_handle_addfd(addfd);
+-			mutex_unlock(&match->notify_lock);
+-			goto wait;
+-		}
+-		ret = n.val;
+-		err = n.error;
+-		flags = n.flags;
+-	}
+ 
++	}  while (n.state != SECCOMP_NOTIFY_REPLIED);
++
++	ret = n.val;
++	err = n.error;
++	flags = n.flags;
++
++interrupted:
+ 	/* If there were any pending addfd calls, clear them out */
+ 	list_for_each_entry_safe(addfd, tmp, &n.addfd, list) {
+ 		/* The process went away before we got a chance to handle it */
+diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
+index 07cfa3249f83a..0a2d78e811cf5 100644
+--- a/net/bluetooth/cmtp/core.c
++++ b/net/bluetooth/cmtp/core.c
+@@ -392,6 +392,11 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
+ 	if (!(session->flags & BIT(CMTP_LOOPBACK))) {
+ 		err = cmtp_attach_device(session);
+ 		if (err < 0) {
++			/* Caller will call fput in case of failure, and so
++			 * will cmtp_session kthread.
++			 */
++			get_file(session->sock->file);
++
+ 			atomic_inc(&session->terminate);
+ 			wake_up_interruptible(sk_sleep(session->sock->sk));
+ 			up_write(&cmtp_session_sem);
+diff --git a/net/can/isotp.c b/net/can/isotp.c
+index 9f94ad3caee92..253b24417c8e5 100644
+--- a/net/can/isotp.c
++++ b/net/can/isotp.c
+@@ -1062,27 +1062,31 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
+ 	if (len < ISOTP_MIN_NAMELEN)
+ 		return -EINVAL;
+ 
++	if (addr->can_addr.tp.tx_id & (CAN_ERR_FLAG | CAN_RTR_FLAG))
++		return -EADDRNOTAVAIL;
++
++	if (!addr->can_ifindex)
++		return -ENODEV;
++
++	lock_sock(sk);
++
+ 	/* do not register frame reception for functional addressing */
+ 	if (so->opt.flags & CAN_ISOTP_SF_BROADCAST)
+ 		do_rx_reg = 0;
+ 
+ 	/* do not validate rx address for functional addressing */
+ 	if (do_rx_reg) {
+-		if (addr->can_addr.tp.rx_id == addr->can_addr.tp.tx_id)
+-			return -EADDRNOTAVAIL;
++		if (addr->can_addr.tp.rx_id == addr->can_addr.tp.tx_id) {
++			err = -EADDRNOTAVAIL;
++			goto out;
++		}
+ 
+-		if (addr->can_addr.tp.rx_id & (CAN_ERR_FLAG | CAN_RTR_FLAG))
+-			return -EADDRNOTAVAIL;
++		if (addr->can_addr.tp.rx_id & (CAN_ERR_FLAG | CAN_RTR_FLAG)) {
++			err = -EADDRNOTAVAIL;
++			goto out;
++		}
+ 	}
+ 
+-	if (addr->can_addr.tp.tx_id & (CAN_ERR_FLAG | CAN_RTR_FLAG))
+-		return -EADDRNOTAVAIL;
+-
+-	if (!addr->can_ifindex)
+-		return -ENODEV;
+-
+-	lock_sock(sk);
+-
+ 	if (so->bound && addr->can_ifindex == so->ifindex &&
+ 	    addr->can_addr.tp.rx_id == so->rxid &&
+ 	    addr->can_addr.tp.tx_id == so->txid)
+@@ -1164,16 +1168,13 @@ static int isotp_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
+ 	return ISOTP_MIN_NAMELEN;
+ }
+ 
+-static int isotp_setsockopt(struct socket *sock, int level, int optname,
++static int isotp_setsockopt_locked(struct socket *sock, int level, int optname,
+ 			    sockptr_t optval, unsigned int optlen)
+ {
+ 	struct sock *sk = sock->sk;
+ 	struct isotp_sock *so = isotp_sk(sk);
+ 	int ret = 0;
+ 
+-	if (level != SOL_CAN_ISOTP)
+-		return -EINVAL;
+-
+ 	if (so->bound)
+ 		return -EISCONN;
+ 
+@@ -1248,6 +1249,22 @@ static int isotp_setsockopt(struct socket *sock, int level, int optname,
+ 	return ret;
+ }
+ 
++static int isotp_setsockopt(struct socket *sock, int level, int optname,
++			    sockptr_t optval, unsigned int optlen)
++
++{
++	struct sock *sk = sock->sk;
++	int ret;
++
++	if (level != SOL_CAN_ISOTP)
++		return -EINVAL;
++
++	lock_sock(sk);
++	ret = isotp_setsockopt_locked(sock, level, optname, optval, optlen);
++	release_sock(sk);
++	return ret;
++}
++
+ static int isotp_getsockopt(struct socket *sock, int level, int optname,
+ 			    char __user *optval, int __user *optlen)
+ {
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 70829c568645c..9631944740586 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3804,7 +3804,8 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
+ 
+ 	if (q->flags & TCQ_F_NOLOCK) {
+ 		rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
+-		qdisc_run(q);
++		if (likely(!netif_xmit_frozen_or_stopped(txq)))
++			qdisc_run(q);
+ 
+ 		if (unlikely(to_free))
+ 			kfree_skb_list(to_free);
+@@ -4974,25 +4975,43 @@ static __latent_entropy void net_tx_action(struct softirq_action *h)
+ 		sd->output_queue_tailp = &sd->output_queue;
+ 		local_irq_enable();
+ 
++		rcu_read_lock();
++
+ 		while (head) {
+ 			struct Qdisc *q = head;
+ 			spinlock_t *root_lock = NULL;
+ 
+ 			head = head->next_sched;
+ 
+-			if (!(q->flags & TCQ_F_NOLOCK)) {
+-				root_lock = qdisc_lock(q);
+-				spin_lock(root_lock);
+-			}
+ 			/* We need to make sure head->next_sched is read
+ 			 * before clearing __QDISC_STATE_SCHED
+ 			 */
+ 			smp_mb__before_atomic();
++
++			if (!(q->flags & TCQ_F_NOLOCK)) {
++				root_lock = qdisc_lock(q);
++				spin_lock(root_lock);
++			} else if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED,
++						     &q->state))) {
++				/* There is a synchronize_net() between
++				 * STATE_DEACTIVATED flag being set and
++				 * qdisc_reset()/some_qdisc_is_busy() in
++				 * dev_deactivate(), so we can safely bail out
++				 * early here to avoid data race between
++				 * qdisc_deactivate() and some_qdisc_is_busy()
++				 * for lockless qdisc.
++				 */
++				clear_bit(__QDISC_STATE_SCHED, &q->state);
++				continue;
++			}
++
+ 			clear_bit(__QDISC_STATE_SCHED, &q->state);
+ 			qdisc_run(q);
+ 			if (root_lock)
+ 				spin_unlock(root_lock);
+ 		}
++
++		rcu_read_unlock();
+ 	}
+ 
+ 	xfrm_dev_backlog(sd);
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 9323d34d34ccc..52f4359efbd2b 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -3782,6 +3782,7 @@ static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room,
+ 		__skb_push(skb, head_room);
+ 		memset(skb->data, 0, head_room);
+ 		skb_reset_mac_header(skb);
++		skb_reset_mac_len(skb);
+ 	}
+ 
+ 	return ret;
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index 8379719d1dcef..98f20efbfadf2 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -131,6 +131,9 @@ static void neigh_update_gc_list(struct neighbour *n)
+ 	write_lock_bh(&n->tbl->lock);
+ 	write_lock(&n->lock);
+ 
++	if (n->dead)
++		goto out;
++
+ 	/* remove from the gc list if new state is permanent or if neighbor
+ 	 * is externally learned; otherwise entry should be on the gc list
+ 	 */
+@@ -147,6 +150,7 @@ static void neigh_update_gc_list(struct neighbour *n)
+ 		atomic_inc(&n->tbl->gc_entries);
+ 	}
+ 
++out:
+ 	write_unlock(&n->lock);
+ 	write_unlock_bh(&n->tbl->lock);
+ }
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 5ec90f99e1028..9c7b143e7a964 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -2132,10 +2132,10 @@ void skb_orphan_partial(struct sk_buff *skb)
+ 	if (skb_is_tcp_pure_ack(skb))
+ 		return;
+ 
+-	if (can_skb_orphan_partial(skb))
+-		skb_set_owner_sk_safe(skb, skb->sk);
+-	else
+-		skb_orphan(skb);
++	if (can_skb_orphan_partial(skb) && skb_set_owner_sk_safe(skb, skb->sk))
++		return;
++
++	skb_orphan(skb);
+ }
+ EXPORT_SYMBOL(skb_orphan_partial);
+ 
+diff --git a/net/dsa/master.c b/net/dsa/master.c
+index 052a977914a6d..63adbc21a735a 100644
+--- a/net/dsa/master.c
++++ b/net/dsa/master.c
+@@ -147,8 +147,7 @@ static void dsa_master_get_strings(struct net_device *dev, uint32_t stringset,
+ 	struct dsa_switch *ds = cpu_dp->ds;
+ 	int port = cpu_dp->index;
+ 	int len = ETH_GSTRING_LEN;
+-	int mcount = 0, count;
+-	unsigned int i;
++	int mcount = 0, count, i;
+ 	uint8_t pfx[4];
+ 	uint8_t *ndata;
+ 
+@@ -178,6 +177,8 @@ static void dsa_master_get_strings(struct net_device *dev, uint32_t stringset,
+ 		 */
+ 		ds->ops->get_strings(ds, port, stringset, ndata);
+ 		count = ds->ops->get_sset_count(ds, port, stringset);
++		if (count < 0)
++			return;
+ 		for (i = 0; i < count; i++) {
+ 			memmove(ndata + (i * len + sizeof(pfx)),
+ 				ndata + i * len, len - sizeof(pfx));
+diff --git a/net/dsa/slave.c b/net/dsa/slave.c
+index 992fcab4b5529..8dd7c8e84a65a 100644
+--- a/net/dsa/slave.c
++++ b/net/dsa/slave.c
+@@ -787,13 +787,15 @@ static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
+ 	struct dsa_switch *ds = dp->ds;
+ 
+ 	if (sset == ETH_SS_STATS) {
+-		int count;
++		int count = 0;
+ 
+-		count = 4;
+-		if (ds->ops->get_sset_count)
+-			count += ds->ops->get_sset_count(ds, dp->index, sset);
++		if (ds->ops->get_sset_count) {
++			count = ds->ops->get_sset_count(ds, dp->index, sset);
++			if (count < 0)
++				return count;
++		}
+ 
+-		return count;
++		return count + 4;
+ 	}
+ 
+ 	return -EOPNOTSUPP;
+diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
+index bfcdc75fc01e6..26c32407f0290 100644
+--- a/net/hsr/hsr_device.c
++++ b/net/hsr/hsr_device.c
+@@ -218,6 +218,7 @@ static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	if (master) {
+ 		skb->dev = master->dev;
+ 		skb_reset_mac_header(skb);
++		skb_reset_mac_len(skb);
+ 		hsr_forward_skb(skb, master);
+ 	} else {
+ 		atomic_long_inc(&dev->tx_dropped);
+@@ -259,6 +260,7 @@ static struct sk_buff *hsr_init_skb(struct hsr_port *master)
+ 		goto out;
+ 
+ 	skb_reset_mac_header(skb);
++	skb_reset_mac_len(skb);
+ 	skb_reset_network_header(skb);
+ 	skb_reset_transport_header(skb);
+ 
+diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
+index 6852e9bccf5b8..ceb8afb2a62f4 100644
+--- a/net/hsr/hsr_forward.c
++++ b/net/hsr/hsr_forward.c
+@@ -474,8 +474,8 @@ static void handle_std_frame(struct sk_buff *skb,
+ 	}
+ }
+ 
+-void hsr_fill_frame_info(__be16 proto, struct sk_buff *skb,
+-			 struct hsr_frame_info *frame)
++int hsr_fill_frame_info(__be16 proto, struct sk_buff *skb,
++			struct hsr_frame_info *frame)
+ {
+ 	struct hsr_port *port = frame->port_rcv;
+ 	struct hsr_priv *hsr = port->hsr;
+@@ -483,20 +483,26 @@ void hsr_fill_frame_info(__be16 proto, struct sk_buff *skb,
+ 	/* HSRv0 supervisory frames double as a tag so treat them as tagged. */
+ 	if ((!hsr->prot_version && proto == htons(ETH_P_PRP)) ||
+ 	    proto == htons(ETH_P_HSR)) {
++		/* Check if skb contains hsr_ethhdr */
++		if (skb->mac_len < sizeof(struct hsr_ethhdr))
++			return -EINVAL;
++
+ 		/* HSR tagged frame :- Data or Supervision */
+ 		frame->skb_std = NULL;
+ 		frame->skb_prp = NULL;
+ 		frame->skb_hsr = skb;
+ 		frame->sequence_nr = hsr_get_skb_sequence_nr(skb);
+-		return;
++		return 0;
+ 	}
+ 
+ 	/* Standard frame or PRP from master port */
+ 	handle_std_frame(skb, frame);
++
++	return 0;
+ }
+ 
+-void prp_fill_frame_info(__be16 proto, struct sk_buff *skb,
+-			 struct hsr_frame_info *frame)
++int prp_fill_frame_info(__be16 proto, struct sk_buff *skb,
++			struct hsr_frame_info *frame)
+ {
+ 	/* Supervision frame */
+ 	struct prp_rct *rct = skb_get_PRP_rct(skb);
+@@ -507,9 +513,11 @@ void prp_fill_frame_info(__be16 proto, struct sk_buff *skb,
+ 		frame->skb_std = NULL;
+ 		frame->skb_prp = skb;
+ 		frame->sequence_nr = prp_get_skb_sequence_nr(rct);
+-		return;
++		return 0;
+ 	}
+ 	handle_std_frame(skb, frame);
++
++	return 0;
+ }
+ 
+ static int fill_frame_info(struct hsr_frame_info *frame,
+@@ -519,9 +527,10 @@ static int fill_frame_info(struct hsr_frame_info *frame,
+ 	struct hsr_vlan_ethhdr *vlan_hdr;
+ 	struct ethhdr *ethhdr;
+ 	__be16 proto;
++	int ret;
+ 
+-	/* Check if skb contains hsr_ethhdr */
+-	if (skb->mac_len < sizeof(struct hsr_ethhdr))
++	/* Check if skb contains ethhdr */
++	if (skb->mac_len < sizeof(struct ethhdr))
+ 		return -EINVAL;
+ 
+ 	memset(frame, 0, sizeof(*frame));
+@@ -548,7 +557,10 @@ static int fill_frame_info(struct hsr_frame_info *frame,
+ 
+ 	frame->is_from_san = false;
+ 	frame->port_rcv = port;
+-	hsr->proto_ops->fill_frame_info(proto, skb, frame);
++	ret = hsr->proto_ops->fill_frame_info(proto, skb, frame);
++	if (ret)
++		return ret;
++
+ 	check_local_dest(port->hsr, skb, frame);
+ 
+ 	return 0;
+diff --git a/net/hsr/hsr_forward.h b/net/hsr/hsr_forward.h
+index b6acaafa83fc2..206636750b300 100644
+--- a/net/hsr/hsr_forward.h
++++ b/net/hsr/hsr_forward.h
+@@ -24,8 +24,8 @@ struct sk_buff *prp_get_untagged_frame(struct hsr_frame_info *frame,
+ 				       struct hsr_port *port);
+ bool prp_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port);
+ bool hsr_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port);
+-void prp_fill_frame_info(__be16 proto, struct sk_buff *skb,
+-			 struct hsr_frame_info *frame);
+-void hsr_fill_frame_info(__be16 proto, struct sk_buff *skb,
+-			 struct hsr_frame_info *frame);
++int prp_fill_frame_info(__be16 proto, struct sk_buff *skb,
++			struct hsr_frame_info *frame);
++int hsr_fill_frame_info(__be16 proto, struct sk_buff *skb,
++			struct hsr_frame_info *frame);
+ #endif /* __HSR_FORWARD_H */
+diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h
+index 8f264672b70bd..53d1f7a824630 100644
+--- a/net/hsr/hsr_main.h
++++ b/net/hsr/hsr_main.h
+@@ -186,8 +186,8 @@ struct hsr_proto_ops {
+ 					       struct hsr_port *port);
+ 	struct sk_buff * (*create_tagged_frame)(struct hsr_frame_info *frame,
+ 						struct hsr_port *port);
+-	void (*fill_frame_info)(__be16 proto, struct sk_buff *skb,
+-				struct hsr_frame_info *frame);
++	int (*fill_frame_info)(__be16 proto, struct sk_buff *skb,
++			       struct hsr_frame_info *frame);
+ 	bool (*invalid_dan_ingress_frame)(__be16 protocol);
+ 	void (*update_san_info)(struct hsr_node *node, bool is_sup);
+ };
+diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c
+index c5227d42faf56..b70e6bbf6021f 100644
+--- a/net/hsr/hsr_slave.c
++++ b/net/hsr/hsr_slave.c
+@@ -60,12 +60,11 @@ static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb)
+ 		goto finish_pass;
+ 
+ 	skb_push(skb, ETH_HLEN);
+-
+-	if (skb_mac_header(skb) != skb->data) {
+-		WARN_ONCE(1, "%s:%d: Malformed frame at source port %s)\n",
+-			  __func__, __LINE__, port->dev->name);
+-		goto finish_consume;
+-	}
++	skb_reset_mac_header(skb);
++	if ((!hsr->prot_version && protocol == htons(ETH_P_PRP)) ||
++	    protocol == htons(ETH_P_HSR))
++		skb_set_network_header(skb, ETH_HLEN + HSR_HLEN);
++	skb_reset_mac_len(skb);
+ 
+ 	hsr_forward_skb(skb, port);
+ 
+diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
+index 6c8604390266f..2cab0c563214c 100644
+--- a/net/ipv6/mcast.c
++++ b/net/ipv6/mcast.c
+@@ -1601,10 +1601,7 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
+ 		     IPV6_TLV_PADN, 0 };
+ 
+ 	/* we assume size > sizeof(ra) here */
+-	/* limit our allocations to order-0 page */
+-	size = min_t(int, size, SKB_MAX_ORDER(0, 0));
+ 	skb = sock_alloc_send_skb(sk, size, 1, &err);
+-
+ 	if (!skb)
+ 		return NULL;
+ 
+diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
+index 47a0dc46cbdb0..28e44782c94d1 100644
+--- a/net/ipv6/reassembly.c
++++ b/net/ipv6/reassembly.c
+@@ -343,7 +343,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
+ 	hdr = ipv6_hdr(skb);
+ 	fhdr = (struct frag_hdr *)skb_transport_header(skb);
+ 
+-	if (!(fhdr->frag_off & htons(0xFFF9))) {
++	if (!(fhdr->frag_off & htons(IP6_OFFSET | IP6_MF))) {
+ 		/* It is not a fragmented frame */
+ 		skb->transport_header += sizeof(struct frag_hdr);
+ 		__IP6_INC_STATS(net,
+@@ -351,6 +351,8 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
+ 
+ 		IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
+ 		IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
++		IP6CB(skb)->frag_max_size = ntohs(hdr->payload_len) +
++					    sizeof(struct ipv6hdr);
+ 		return 1;
+ 	}
+ 
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index ecda126a70266..02e818d740f60 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -50,12 +50,6 @@ struct ieee80211_local;
+ #define IEEE80211_ENCRYPT_HEADROOM 8
+ #define IEEE80211_ENCRYPT_TAILROOM 18
+ 
+-/* IEEE 802.11 (Ch. 9.5 Defragmentation) requires support for concurrent
+- * reception of at least three fragmented frames. This limit can be increased
+- * by changing this define, at the cost of slower frame reassembly and
+- * increased memory use (about 2 kB of RAM per entry). */
+-#define IEEE80211_FRAGMENT_MAX 4
+-
+ /* power level hasn't been configured (or set to automatic) */
+ #define IEEE80211_UNSET_POWER_LEVEL	INT_MIN
+ 
+@@ -88,18 +82,6 @@ extern const u8 ieee80211_ac_to_qos_mask[IEEE80211_NUM_ACS];
+ 
+ #define IEEE80211_MAX_NAN_INSTANCE_ID 255
+ 
+-struct ieee80211_fragment_entry {
+-	struct sk_buff_head skb_list;
+-	unsigned long first_frag_time;
+-	u16 seq;
+-	u16 extra_len;
+-	u16 last_frag;
+-	u8 rx_queue;
+-	bool check_sequential_pn; /* needed for CCMP/GCMP */
+-	u8 last_pn[6]; /* PN of the last fragment if CCMP was used */
+-};
+-
+-
+ struct ieee80211_bss {
+ 	u32 device_ts_beacon, device_ts_presp;
+ 
+@@ -241,8 +223,15 @@ struct ieee80211_rx_data {
+ 	 */
+ 	int security_idx;
+ 
+-	u32 tkip_iv32;
+-	u16 tkip_iv16;
++	union {
++		struct {
++			u32 iv32;
++			u16 iv16;
++		} tkip;
++		struct {
++			u8 pn[IEEE80211_CCMP_PN_LEN];
++		} ccm_gcm;
++	};
+ };
+ 
+ struct ieee80211_csa_settings {
+@@ -902,9 +891,7 @@ struct ieee80211_sub_if_data {
+ 
+ 	char name[IFNAMSIZ];
+ 
+-	/* Fragment table for host-based reassembly */
+-	struct ieee80211_fragment_entry	fragments[IEEE80211_FRAGMENT_MAX];
+-	unsigned int fragment_next;
++	struct ieee80211_fragment_cache frags;
+ 
+ 	/* TID bitmap for NoAck policy */
+ 	u16 noack_map;
+@@ -2318,4 +2305,7 @@ u32 ieee80211_calc_expected_tx_airtime(struct ieee80211_hw *hw,
+ #define debug_noinline
+ #endif
+ 
++void ieee80211_init_frag_cache(struct ieee80211_fragment_cache *cache);
++void ieee80211_destroy_frag_cache(struct ieee80211_fragment_cache *cache);
++
+ #endif /* IEEE80211_I_H */
+diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
+index b80c9b016b2be..6f8885766cbaa 100644
+--- a/net/mac80211/iface.c
++++ b/net/mac80211/iface.c
+@@ -8,7 +8,7 @@
+  * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
+  * Copyright 2013-2014  Intel Mobile Communications GmbH
+  * Copyright (c) 2016        Intel Deutschland GmbH
+- * Copyright (C) 2018-2020 Intel Corporation
++ * Copyright (C) 2018-2021 Intel Corporation
+  */
+ #include <linux/slab.h>
+ #include <linux/kernel.h>
+@@ -676,16 +676,12 @@ static void ieee80211_set_multicast_list(struct net_device *dev)
+  */
+ static void ieee80211_teardown_sdata(struct ieee80211_sub_if_data *sdata)
+ {
+-	int i;
+-
+ 	/* free extra data */
+ 	ieee80211_free_keys(sdata, false);
+ 
+ 	ieee80211_debugfs_remove_netdev(sdata);
+ 
+-	for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++)
+-		__skb_queue_purge(&sdata->fragments[i].skb_list);
+-	sdata->fragment_next = 0;
++	ieee80211_destroy_frag_cache(&sdata->frags);
+ 
+ 	if (ieee80211_vif_is_mesh(&sdata->vif))
+ 		ieee80211_mesh_teardown_sdata(sdata);
+@@ -1928,8 +1924,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
+ 	sdata->wdev.wiphy = local->hw.wiphy;
+ 	sdata->local = local;
+ 
+-	for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++)
+-		skb_queue_head_init(&sdata->fragments[i].skb_list);
++	ieee80211_init_frag_cache(&sdata->frags);
+ 
+ 	INIT_LIST_HEAD(&sdata->key_list);
+ 
+diff --git a/net/mac80211/key.c b/net/mac80211/key.c
+index 56c068cb49c4d..f695fc80088bc 100644
+--- a/net/mac80211/key.c
++++ b/net/mac80211/key.c
+@@ -799,6 +799,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
+ 		       struct ieee80211_sub_if_data *sdata,
+ 		       struct sta_info *sta)
+ {
++	static atomic_t key_color = ATOMIC_INIT(0);
+ 	struct ieee80211_key *old_key;
+ 	int idx = key->conf.keyidx;
+ 	bool pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE;
+@@ -850,6 +851,12 @@ int ieee80211_key_link(struct ieee80211_key *key,
+ 	key->sdata = sdata;
+ 	key->sta = sta;
+ 
++	/*
++	 * Assign a unique ID to every key so we can easily prevent mixed
++	 * key and fragment cache attacks.
++	 */
++	key->color = atomic_inc_return(&key_color);
++
+ 	increment_tailroom_need_count(sdata);
+ 
+ 	ret = ieee80211_key_replace(sdata, sta, pairwise, old_key, key);
+diff --git a/net/mac80211/key.h b/net/mac80211/key.h
+index 7ad72e9b4991d..1e326c89d7217 100644
+--- a/net/mac80211/key.h
++++ b/net/mac80211/key.h
+@@ -128,6 +128,8 @@ struct ieee80211_key {
+ 	} debugfs;
+ #endif
+ 
++	unsigned int color;
++
+ 	/*
+ 	 * key config, must be last because it contains key
+ 	 * material as variable length member
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index c1343c028b767..59de7a86599dc 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -6,7 +6,7 @@
+  * Copyright 2007-2010	Johannes Berg <johannes@sipsolutions.net>
+  * Copyright 2013-2014  Intel Mobile Communications GmbH
+  * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+- * Copyright (C) 2018-2020 Intel Corporation
++ * Copyright (C) 2018-2021 Intel Corporation
+  */
+ 
+ #include <linux/jiffies.h>
+@@ -2122,19 +2122,34 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
+ 	return result;
+ }
+ 
++void ieee80211_init_frag_cache(struct ieee80211_fragment_cache *cache)
++{
++	int i;
++
++	for (i = 0; i < ARRAY_SIZE(cache->entries); i++)
++		skb_queue_head_init(&cache->entries[i].skb_list);
++}
++
++void ieee80211_destroy_frag_cache(struct ieee80211_fragment_cache *cache)
++{
++	int i;
++
++	for (i = 0; i < ARRAY_SIZE(cache->entries); i++)
++		__skb_queue_purge(&cache->entries[i].skb_list);
++}
++
+ static inline struct ieee80211_fragment_entry *
+-ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
++ieee80211_reassemble_add(struct ieee80211_fragment_cache *cache,
+ 			 unsigned int frag, unsigned int seq, int rx_queue,
+ 			 struct sk_buff **skb)
+ {
+ 	struct ieee80211_fragment_entry *entry;
+ 
+-	entry = &sdata->fragments[sdata->fragment_next++];
+-	if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
+-		sdata->fragment_next = 0;
++	entry = &cache->entries[cache->next++];
++	if (cache->next >= IEEE80211_FRAGMENT_MAX)
++		cache->next = 0;
+ 
+-	if (!skb_queue_empty(&entry->skb_list))
+-		__skb_queue_purge(&entry->skb_list);
++	__skb_queue_purge(&entry->skb_list);
+ 
+ 	__skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
+ 	*skb = NULL;
+@@ -2149,14 +2164,14 @@ ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
+ }
+ 
+ static inline struct ieee80211_fragment_entry *
+-ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
++ieee80211_reassemble_find(struct ieee80211_fragment_cache *cache,
+ 			  unsigned int frag, unsigned int seq,
+ 			  int rx_queue, struct ieee80211_hdr *hdr)
+ {
+ 	struct ieee80211_fragment_entry *entry;
+ 	int i, idx;
+ 
+-	idx = sdata->fragment_next;
++	idx = cache->next;
+ 	for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
+ 		struct ieee80211_hdr *f_hdr;
+ 		struct sk_buff *f_skb;
+@@ -2165,7 +2180,7 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
+ 		if (idx < 0)
+ 			idx = IEEE80211_FRAGMENT_MAX - 1;
+ 
+-		entry = &sdata->fragments[idx];
++		entry = &cache->entries[idx];
+ 		if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
+ 		    entry->rx_queue != rx_queue ||
+ 		    entry->last_frag + 1 != frag)
+@@ -2193,15 +2208,27 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
+ 	return NULL;
+ }
+ 
++static bool requires_sequential_pn(struct ieee80211_rx_data *rx, __le16 fc)
++{
++	return rx->key &&
++		(rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP ||
++		 rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 ||
++		 rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP ||
++		 rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) &&
++		ieee80211_has_protected(fc);
++}
++
+ static ieee80211_rx_result debug_noinline
+ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
+ {
++	struct ieee80211_fragment_cache *cache = &rx->sdata->frags;
+ 	struct ieee80211_hdr *hdr;
+ 	u16 sc;
+ 	__le16 fc;
+ 	unsigned int frag, seq;
+ 	struct ieee80211_fragment_entry *entry;
+ 	struct sk_buff *skb;
++	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
+ 
+ 	hdr = (struct ieee80211_hdr *)rx->skb->data;
+ 	fc = hdr->frame_control;
+@@ -2217,6 +2244,9 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
+ 		goto out_no_led;
+ 	}
+ 
++	if (rx->sta)
++		cache = &rx->sta->frags;
++
+ 	if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
+ 		goto out;
+ 
+@@ -2235,20 +2265,17 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
+ 
+ 	if (frag == 0) {
+ 		/* This is the first fragment of a new frame. */
+-		entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
++		entry = ieee80211_reassemble_add(cache, frag, seq,
+ 						 rx->seqno_idx, &(rx->skb));
+-		if (rx->key &&
+-		    (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP ||
+-		     rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 ||
+-		     rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP ||
+-		     rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) &&
+-		    ieee80211_has_protected(fc)) {
++		if (requires_sequential_pn(rx, fc)) {
+ 			int queue = rx->security_idx;
+ 
+ 			/* Store CCMP/GCMP PN so that we can verify that the
+ 			 * next fragment has a sequential PN value.
+ 			 */
+ 			entry->check_sequential_pn = true;
++			entry->is_protected = true;
++			entry->key_color = rx->key->color;
+ 			memcpy(entry->last_pn,
+ 			       rx->key->u.ccmp.rx_pn[queue],
+ 			       IEEE80211_CCMP_PN_LEN);
+@@ -2260,6 +2287,11 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
+ 				     sizeof(rx->key->u.gcmp.rx_pn[queue]));
+ 			BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN !=
+ 				     IEEE80211_GCMP_PN_LEN);
++		} else if (rx->key &&
++			   (ieee80211_has_protected(fc) ||
++			    (status->flag & RX_FLAG_DECRYPTED))) {
++			entry->is_protected = true;
++			entry->key_color = rx->key->color;
+ 		}
+ 		return RX_QUEUED;
+ 	}
+@@ -2267,7 +2299,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
+ 	/* This is a fragment for a frame that should already be pending in
+ 	 * fragment cache. Add this fragment to the end of the pending entry.
+ 	 */
+-	entry = ieee80211_reassemble_find(rx->sdata, frag, seq,
++	entry = ieee80211_reassemble_find(cache, frag, seq,
+ 					  rx->seqno_idx, hdr);
+ 	if (!entry) {
+ 		I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
+@@ -2282,25 +2314,39 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
+ 	if (entry->check_sequential_pn) {
+ 		int i;
+ 		u8 pn[IEEE80211_CCMP_PN_LEN], *rpn;
+-		int queue;
+ 
+-		if (!rx->key ||
+-		    (rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP &&
+-		     rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256 &&
+-		     rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP &&
+-		     rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP_256))
++		if (!requires_sequential_pn(rx, fc))
++			return RX_DROP_UNUSABLE;
++
++		/* Prevent mixed key and fragment cache attacks */
++		if (entry->key_color != rx->key->color)
+ 			return RX_DROP_UNUSABLE;
++
+ 		memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN);
+ 		for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) {
+ 			pn[i]++;
+ 			if (pn[i])
+ 				break;
+ 		}
+-		queue = rx->security_idx;
+-		rpn = rx->key->u.ccmp.rx_pn[queue];
++
++		rpn = rx->ccm_gcm.pn;
+ 		if (memcmp(pn, rpn, IEEE80211_CCMP_PN_LEN))
+ 			return RX_DROP_UNUSABLE;
+ 		memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN);
++	} else if (entry->is_protected &&
++		   (!rx->key ||
++		    (!ieee80211_has_protected(fc) &&
++		     !(status->flag & RX_FLAG_DECRYPTED)) ||
++		    rx->key->color != entry->key_color)) {
++		/* Drop this as a mixed key or fragment cache attack, even
++		 * if for TKIP Michael MIC should protect us, and WEP is a
++		 * lost cause anyway.
++		 */
++		return RX_DROP_UNUSABLE;
++	} else if (entry->is_protected && rx->key &&
++		   entry->key_color != rx->key->color &&
++		   (status->flag & RX_FLAG_DECRYPTED)) {
++		return RX_DROP_UNUSABLE;
+ 	}
+ 
+ 	skb_pull(rx->skb, ieee80211_hdrlen(fc));
+@@ -2493,13 +2539,13 @@ static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
+ 	struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
+ 
+ 	/*
+-	 * Allow EAPOL frames to us/the PAE group address regardless
+-	 * of whether the frame was encrypted or not.
++	 * Allow EAPOL frames to us/the PAE group address regardless of
++	 * whether the frame was encrypted or not, and always disallow
++	 * all other destination addresses for them.
+ 	 */
+-	if (ehdr->h_proto == rx->sdata->control_port_protocol &&
+-	    (ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) ||
+-	     ether_addr_equal(ehdr->h_dest, pae_group_addr)))
+-		return true;
++	if (unlikely(ehdr->h_proto == rx->sdata->control_port_protocol))
++		return ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) ||
++		       ether_addr_equal(ehdr->h_dest, pae_group_addr);
+ 
+ 	if (ieee80211_802_1x_port_control(rx) ||
+ 	    ieee80211_drop_unencrypted(rx, fc))
+@@ -2524,8 +2570,28 @@ static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb,
+ 		cfg80211_rx_control_port(dev, skb, noencrypt);
+ 		dev_kfree_skb(skb);
+ 	} else {
++		struct ethhdr *ehdr = (void *)skb_mac_header(skb);
++
+ 		memset(skb->cb, 0, sizeof(skb->cb));
+ 
++		/*
++		 * 802.1X over 802.11 requires that the authenticator address
++		 * be used for EAPOL frames. However, 802.1X allows the use of
++		 * the PAE group address instead. If the interface is part of
++		 * a bridge and we pass the frame with the PAE group address,
++		 * then the bridge will forward it to the network (even if the
++		 * client was not associated yet), which isn't supposed to
++		 * happen.
++		 * To avoid that, rewrite the destination address to our own
++		 * address, so that the authenticator (e.g. hostapd) will see
++		 * the frame, but bridge won't forward it anywhere else. Note
++		 * that due to earlier filtering, the only other address can
++		 * be the PAE group address.
++		 */
++		if (unlikely(skb->protocol == sdata->control_port_protocol &&
++			     !ether_addr_equal(ehdr->h_dest, sdata->vif.addr)))
++			ether_addr_copy(ehdr->h_dest, sdata->vif.addr);
++
+ 		/* deliver to local stack */
+ 		if (rx->list)
+ 			list_add_tail(&skb->list, rx->list);
+@@ -2565,6 +2631,7 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
+ 	if ((sdata->vif.type == NL80211_IFTYPE_AP ||
+ 	     sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
+ 	    !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
++	    ehdr->h_proto != rx->sdata->control_port_protocol &&
+ 	    (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) {
+ 		if (is_multicast_ether_addr(ehdr->h_dest) &&
+ 		    ieee80211_vif_get_num_mcast_if(sdata) != 0) {
+@@ -2674,7 +2741,7 @@ __ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset)
+ 	if (ieee80211_data_to_8023_exthdr(skb, &ethhdr,
+ 					  rx->sdata->vif.addr,
+ 					  rx->sdata->vif.type,
+-					  data_offset))
++					  data_offset, true))
+ 		return RX_DROP_UNUSABLE;
+ 
+ 	ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
+@@ -2731,6 +2798,23 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
+ 	if (is_multicast_ether_addr(hdr->addr1))
+ 		return RX_DROP_UNUSABLE;
+ 
++	if (rx->key) {
++		/*
++		 * We should not receive A-MSDUs on pre-HT connections,
++		 * and HT connections cannot use old ciphers. Thus drop
++		 * them, as in those cases we couldn't even have SPP
++		 * A-MSDUs or such.
++		 */
++		switch (rx->key->conf.cipher) {
++		case WLAN_CIPHER_SUITE_WEP40:
++		case WLAN_CIPHER_SUITE_WEP104:
++		case WLAN_CIPHER_SUITE_TKIP:
++			return RX_DROP_UNUSABLE;
++		default:
++			break;
++		}
++	}
++
+ 	return __ieee80211_rx_h_amsdu(rx, 0);
+ }
+ 
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index ec6973ee88ef4..f2fb69da9b6e1 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -4,7 +4,7 @@
+  * Copyright 2006-2007	Jiri Benc <jbenc@suse.cz>
+  * Copyright 2013-2014  Intel Mobile Communications GmbH
+  * Copyright (C) 2015 - 2017 Intel Deutschland GmbH
+- * Copyright (C) 2018-2020 Intel Corporation
++ * Copyright (C) 2018-2021 Intel Corporation
+  */
+ 
+ #include <linux/module.h>
+@@ -392,6 +392,8 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
+ 
+ 	u64_stats_init(&sta->rx_stats.syncp);
+ 
++	ieee80211_init_frag_cache(&sta->frags);
++
+ 	sta->sta_state = IEEE80211_STA_NONE;
+ 
+ 	/* Mark TID as unreserved */
+@@ -1102,6 +1104,8 @@ static void __sta_info_destroy_part2(struct sta_info *sta)
+ 
+ 	ieee80211_sta_debugfs_remove(sta);
+ 
++	ieee80211_destroy_frag_cache(&sta->frags);
++
+ 	cleanup_single_sta(sta);
+ }
+ 
+diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
+index 78b9d0c7cc583..0333072ebd982 100644
+--- a/net/mac80211/sta_info.h
++++ b/net/mac80211/sta_info.h
+@@ -3,7 +3,7 @@
+  * Copyright 2002-2005, Devicescape Software, Inc.
+  * Copyright 2013-2014  Intel Mobile Communications GmbH
+  * Copyright(c) 2015-2017 Intel Deutschland GmbH
+- * Copyright(c) 2020 Intel Corporation
++ * Copyright(c) 2020-2021 Intel Corporation
+  */
+ 
+ #ifndef STA_INFO_H
+@@ -438,6 +438,34 @@ struct ieee80211_sta_rx_stats {
+ 	u64 msdu[IEEE80211_NUM_TIDS + 1];
+ };
+ 
++/*
++ * IEEE 802.11-2016 (10.6 "Defragmentation") recommends support for "concurrent
++ * reception of at least one MSDU per access category per associated STA"
++ * on APs, or "at least one MSDU per access category" on other interface types.
++ *
++ * This limit can be increased by changing this define, at the cost of slower
++ * frame reassembly and increased memory use while fragments are pending.
++ */
++#define IEEE80211_FRAGMENT_MAX 4
++
++struct ieee80211_fragment_entry {
++	struct sk_buff_head skb_list;
++	unsigned long first_frag_time;
++	u16 seq;
++	u16 extra_len;
++	u16 last_frag;
++	u8 rx_queue;
++	u8 check_sequential_pn:1, /* needed for CCMP/GCMP */
++	   is_protected:1;
++	u8 last_pn[6]; /* PN of the last fragment if CCMP was used */
++	unsigned int key_color;
++};
++
++struct ieee80211_fragment_cache {
++	struct ieee80211_fragment_entry	entries[IEEE80211_FRAGMENT_MAX];
++	unsigned int next;
++};
++
+ /*
+  * The bandwidth threshold below which the per-station CoDel parameters will be
+  * scaled to be more lenient (to prevent starvation of slow stations). This
+@@ -531,6 +559,7 @@ struct ieee80211_sta_rx_stats {
+  * @status_stats.last_ack_signal: last ACK signal
+  * @status_stats.ack_signal_filled: last ACK signal validity
+  * @status_stats.avg_ack_signal: average ACK signal
++ * @frags: fragment cache
+  */
+ struct sta_info {
+ 	/* General information, mostly static */
+@@ -639,6 +668,8 @@ struct sta_info {
+ 
+ 	struct cfg80211_chan_def tdls_chandef;
+ 
++	struct ieee80211_fragment_cache frags;
++
+ 	/* keep last! */
+ 	struct ieee80211_sta sta;
+ };
+diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
+index 91bf32af55e9a..bca47fad5a162 100644
+--- a/net/mac80211/wpa.c
++++ b/net/mac80211/wpa.c
+@@ -3,6 +3,7 @@
+  * Copyright 2002-2004, Instant802 Networks, Inc.
+  * Copyright 2008, Jouni Malinen <j@w1.fi>
+  * Copyright (C) 2016-2017 Intel Deutschland GmbH
++ * Copyright (C) 2020-2021 Intel Corporation
+  */
+ 
+ #include <linux/netdevice.h>
+@@ -167,8 +168,8 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
+ 
+ update_iv:
+ 	/* update IV in key information to be able to detect replays */
+-	rx->key->u.tkip.rx[rx->security_idx].iv32 = rx->tkip_iv32;
+-	rx->key->u.tkip.rx[rx->security_idx].iv16 = rx->tkip_iv16;
++	rx->key->u.tkip.rx[rx->security_idx].iv32 = rx->tkip.iv32;
++	rx->key->u.tkip.rx[rx->security_idx].iv16 = rx->tkip.iv16;
+ 
+ 	return RX_CONTINUE;
+ 
+@@ -294,8 +295,8 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
+ 					  key, skb->data + hdrlen,
+ 					  skb->len - hdrlen, rx->sta->sta.addr,
+ 					  hdr->addr1, hwaccel, rx->security_idx,
+-					  &rx->tkip_iv32,
+-					  &rx->tkip_iv16);
++					  &rx->tkip.iv32,
++					  &rx->tkip.iv16);
+ 	if (res != TKIP_DECRYPT_OK)
+ 		return RX_DROP_UNUSABLE;
+ 
+@@ -553,6 +554,8 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx,
+ 		}
+ 
+ 		memcpy(key->u.ccmp.rx_pn[queue], pn, IEEE80211_CCMP_PN_LEN);
++		if (unlikely(ieee80211_is_frag(hdr)))
++			memcpy(rx->ccm_gcm.pn, pn, IEEE80211_CCMP_PN_LEN);
+ 	}
+ 
+ 	/* Remove CCMP header and MIC */
+@@ -781,6 +784,8 @@ ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx)
+ 		}
+ 
+ 		memcpy(key->u.gcmp.rx_pn[queue], pn, IEEE80211_GCMP_PN_LEN);
++		if (unlikely(ieee80211_is_frag(hdr)))
++			memcpy(rx->ccm_gcm.pn, pn, IEEE80211_CCMP_PN_LEN);
+ 	}
+ 
+ 	/* Remove GCMP header and MIC */
+diff --git a/net/mptcp/options.c b/net/mptcp/options.c
+index 89a4225ed3211..8848a9e2a95b1 100644
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -127,7 +127,6 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ 			memcpy(mp_opt->hmac, ptr, MPTCPOPT_HMAC_LEN);
+ 			pr_debug("MP_JOIN hmac");
+ 		} else {
+-			pr_warn("MP_JOIN bad option size");
+ 			mp_opt->mp_join = 0;
+ 		}
+ 		break;
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 65e5d3eb10789..228dd40828c4b 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -869,12 +869,18 @@ static bool mptcp_skb_can_collapse_to(u64 write_seq,
+ 	       !mpext->frozen;
+ }
+ 
++/* we can append data to the given data frag if:
++ * - there is space available in the backing page_frag
++ * - the data frag tail matches the current page_frag free offset
++ * - the data frag end sequence number matches the current write seq
++ */
+ static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk,
+ 				       const struct page_frag *pfrag,
+ 				       const struct mptcp_data_frag *df)
+ {
+ 	return df && pfrag->page == df->page &&
+ 		pfrag->size - pfrag->offset > 0 &&
++		pfrag->offset == (df->offset + df->data_len) &&
+ 		df->data_seq + df->data_len == msk->write_seq;
+ }
+ 
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 4fe7acaa472f2..1936db3574d2e 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -839,7 +839,6 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
+ 
+ 	data_len = mpext->data_len;
+ 	if (data_len == 0) {
+-		pr_err("Infinite mapping not handled");
+ 		MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
+ 		return MAPPING_INVALID;
+ 	}
+diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
+index c77ba8690ed8d..434a50bfb5b7a 100644
+--- a/net/netfilter/nf_flow_table_core.c
++++ b/net/netfilter/nf_flow_table_core.c
+@@ -259,8 +259,7 @@ void flow_offload_refresh(struct nf_flowtable *flow_table,
+ {
+ 	flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT;
+ 
+-	if (likely(!nf_flowtable_hw_offload(flow_table) ||
+-		   !test_and_clear_bit(NF_FLOW_HW_REFRESH, &flow->flags)))
++	if (likely(!nf_flowtable_hw_offload(flow_table)))
+ 		return;
+ 
+ 	nf_flow_offload_add(flow_table, flow);
+diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
+index 1c5460e7bce87..92047cea3c170 100644
+--- a/net/netfilter/nf_flow_table_offload.c
++++ b/net/netfilter/nf_flow_table_offload.c
+@@ -753,10 +753,11 @@ static void flow_offload_work_add(struct flow_offload_work *offload)
+ 
+ 	err = flow_offload_rule_add(offload, flow_rule);
+ 	if (err < 0)
+-		set_bit(NF_FLOW_HW_REFRESH, &offload->flow->flags);
+-	else
+-		set_bit(IPS_HW_OFFLOAD_BIT, &offload->flow->ct->status);
++		goto out;
++
++	set_bit(IPS_HW_OFFLOAD_BIT, &offload->flow->ct->status);
+ 
++out:
+ 	nf_flow_offload_destroy(flow_rule);
+ }
+ 
+diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
+index 9944523f5c2c3..2d73f265b12c9 100644
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -408,8 +408,8 @@ int pipapo_refill(unsigned long *map, int len, int rules, unsigned long *dst,
+  *
+  * Return: true on match, false otherwise.
+  */
+-static bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+-			      const u32 *key, const struct nft_set_ext **ext)
++bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
++		       const u32 *key, const struct nft_set_ext **ext)
+ {
+ 	struct nft_pipapo *priv = nft_set_priv(set);
+ 	unsigned long *res_map, *fill_map;
+diff --git a/net/netfilter/nft_set_pipapo.h b/net/netfilter/nft_set_pipapo.h
+index 25a75591583eb..d84afb8fa79a1 100644
+--- a/net/netfilter/nft_set_pipapo.h
++++ b/net/netfilter/nft_set_pipapo.h
+@@ -178,6 +178,8 @@ struct nft_pipapo_elem {
+ 
+ int pipapo_refill(unsigned long *map, int len, int rules, unsigned long *dst,
+ 		  union nft_pipapo_map_bucket *mt, bool match_only);
++bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
++		       const u32 *key, const struct nft_set_ext **ext);
+ 
+ /**
+  * pipapo_and_field_buckets_4bit() - Intersect 4-bit buckets
+diff --git a/net/netfilter/nft_set_pipapo_avx2.c b/net/netfilter/nft_set_pipapo_avx2.c
+index d65ae0e23028d..eabdb8d552eef 100644
+--- a/net/netfilter/nft_set_pipapo_avx2.c
++++ b/net/netfilter/nft_set_pipapo_avx2.c
+@@ -1131,6 +1131,9 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ 	bool map_index;
+ 	int i, ret = 0;
+ 
++	if (unlikely(!irq_fpu_usable()))
++		return nft_pipapo_lookup(net, set, key, ext);
++
+ 	m = rcu_dereference(priv->match);
+ 
+ 	/* This also protects access to all data related to scratch maps */
+diff --git a/net/openvswitch/meter.c b/net/openvswitch/meter.c
+index 15424d26e85df..ca3c37f2f1e67 100644
+--- a/net/openvswitch/meter.c
++++ b/net/openvswitch/meter.c
+@@ -611,6 +611,14 @@ bool ovs_meter_execute(struct datapath *dp, struct sk_buff *skb,
+ 	spin_lock(&meter->lock);
+ 
+ 	long_delta_ms = (now_ms - meter->used); /* ms */
++	if (long_delta_ms < 0) {
++		/* This condition means that we have several threads fighting
++		 * for a meter lock, and the one who received the packets a
++		 * bit later wins. Assuming that all racing threads received
++		 * packets at the same time to avoid overflow.
++		 */
++		long_delta_ms = 0;
++	}
+ 
+ 	/* Make sure delta_ms will not be too large, so that bucket will not
+ 	 * wrap around below.
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 9611e41c7b8be..c52557ec7fb33 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -422,7 +422,8 @@ static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec64 *ts,
+ 	    ktime_to_timespec64_cond(shhwtstamps->hwtstamp, ts))
+ 		return TP_STATUS_TS_RAW_HARDWARE;
+ 
+-	if (ktime_to_timespec64_cond(skb->tstamp, ts))
++	if ((flags & SOF_TIMESTAMPING_SOFTWARE) &&
++	    ktime_to_timespec64_cond(skb->tstamp, ts))
+ 		return TP_STATUS_TS_SOFTWARE;
+ 
+ 	return 0;
+@@ -2340,7 +2341,12 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
+ 
+ 	skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
+ 
+-	if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
++	/* Always timestamp; prefer an existing software timestamp taken
++	 * closer to the time of capture.
++	 */
++	ts_status = tpacket_get_timestamp(skb, &ts,
++					  po->tp_tstamp | SOF_TIMESTAMPING_SOFTWARE);
++	if (!ts_status)
+ 		ktime_get_real_ts64(&ts);
+ 
+ 	status |= ts_status;
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index 340d5af86e87f..94f6942d7ec11 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -1624,7 +1624,7 @@ int tcf_classify_ingress(struct sk_buff *skb,
+ 
+ 	/* If we missed on some chain */
+ 	if (ret == TC_ACT_UNSPEC && last_executed_chain) {
+-		ext = skb_ext_add(skb, TC_SKB_EXT);
++		ext = tc_skb_ext_alloc(skb);
+ 		if (WARN_ON_ONCE(!ext))
+ 			return TC_ACT_SHOT;
+ 		ext->chain = last_executed_chain;
+diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
+index cd2748e2d4a20..d320bcfb2da2c 100644
+--- a/net/sched/sch_dsmark.c
++++ b/net/sched/sch_dsmark.c
+@@ -407,7 +407,8 @@ static void dsmark_reset(struct Qdisc *sch)
+ 	struct dsmark_qdisc_data *p = qdisc_priv(sch);
+ 
+ 	pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
+-	qdisc_reset(p->q);
++	if (p->q)
++		qdisc_reset(p->q);
+ 	sch->qstats.backlog = 0;
+ 	sch->q.qlen = 0;
+ }
+diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c
+index 949163fe68afd..cac684952edc5 100644
+--- a/net/sched/sch_fq_pie.c
++++ b/net/sched/sch_fq_pie.c
+@@ -138,8 +138,15 @@ static int fq_pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ 
+ 	/* Classifies packet into corresponding flow */
+ 	idx = fq_pie_classify(skb, sch, &ret);
+-	sel_flow = &q->flows[idx];
++	if (idx == 0) {
++		if (ret & __NET_XMIT_BYPASS)
++			qdisc_qstats_drop(sch);
++		__qdisc_drop(skb, to_free);
++		return ret;
++	}
++	idx--;
+ 
++	sel_flow = &q->flows[idx];
+ 	/* Checks whether adding a new packet would exceed memory limit */
+ 	get_pie_cb(skb)->mem_usage = skb->truesize;
+ 	memory_limited = q->memory_usage > q->memory_limit + skb->truesize;
+@@ -297,9 +304,9 @@ static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt,
+ 			goto flow_error;
+ 		}
+ 		q->flows_cnt = nla_get_u32(tb[TCA_FQ_PIE_FLOWS]);
+-		if (!q->flows_cnt || q->flows_cnt >= 65536) {
++		if (!q->flows_cnt || q->flows_cnt > 65536) {
+ 			NL_SET_ERR_MSG_MOD(extack,
+-					   "Number of flows must range in [1..65535]");
++					   "Number of flows must range in [1..65536]");
+ 			goto flow_error;
+ 		}
+ 	}
+@@ -367,7 +374,7 @@ static void fq_pie_timer(struct timer_list *t)
+ 	struct fq_pie_sched_data *q = from_timer(q, t, adapt_timer);
+ 	struct Qdisc *sch = q->sch;
+ 	spinlock_t *root_lock; /* to lock qdisc for probability calculations */
+-	u16 idx;
++	u32 idx;
+ 
+ 	root_lock = qdisc_lock(qdisc_root_sleeping(sch));
+ 	spin_lock(root_lock);
+@@ -388,7 +395,7 @@ static int fq_pie_init(struct Qdisc *sch, struct nlattr *opt,
+ {
+ 	struct fq_pie_sched_data *q = qdisc_priv(sch);
+ 	int err;
+-	u16 idx;
++	u32 idx;
+ 
+ 	pie_params_init(&q->p_params);
+ 	sch->limit = 10 * 1024;
+@@ -500,7 +507,7 @@ static int fq_pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
+ static void fq_pie_reset(struct Qdisc *sch)
+ {
+ 	struct fq_pie_sched_data *q = qdisc_priv(sch);
+-	u16 idx;
++	u32 idx;
+ 
+ 	INIT_LIST_HEAD(&q->new_flows);
+ 	INIT_LIST_HEAD(&q->old_flows);
+diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
+index 49eae93d1489d..854d2b38db856 100644
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -35,6 +35,25 @@
+ const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops;
+ EXPORT_SYMBOL(default_qdisc_ops);
+ 
++static void qdisc_maybe_clear_missed(struct Qdisc *q,
++				     const struct netdev_queue *txq)
++{
++	clear_bit(__QDISC_STATE_MISSED, &q->state);
++
++	/* Make sure the below netif_xmit_frozen_or_stopped()
++	 * checking happens after clearing STATE_MISSED.
++	 */
++	smp_mb__after_atomic();
++
++	/* Checking netif_xmit_frozen_or_stopped() again to
++	 * make sure STATE_MISSED is set if the STATE_MISSED
++	 * set by netif_tx_wake_queue()'s rescheduling of
++	 * net_tx_action() is cleared by the above clear_bit().
++	 */
++	if (!netif_xmit_frozen_or_stopped(txq))
++		set_bit(__QDISC_STATE_MISSED, &q->state);
++}
++
+ /* Main transmission queue. */
+ 
+ /* Modifications to data participating in scheduling must be protected with
+@@ -74,6 +93,7 @@ static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
+ 			}
+ 		} else {
+ 			skb = SKB_XOFF_MAGIC;
++			qdisc_maybe_clear_missed(q, txq);
+ 		}
+ 	}
+ 
+@@ -242,6 +262,7 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
+ 			}
+ 		} else {
+ 			skb = NULL;
++			qdisc_maybe_clear_missed(q, txq);
+ 		}
+ 		if (lock)
+ 			spin_unlock(lock);
+@@ -251,8 +272,10 @@ validate:
+ 	*validate = true;
+ 
+ 	if ((q->flags & TCQ_F_ONETXQUEUE) &&
+-	    netif_xmit_frozen_or_stopped(txq))
++	    netif_xmit_frozen_or_stopped(txq)) {
++		qdisc_maybe_clear_missed(q, txq);
+ 		return skb;
++	}
+ 
+ 	skb = qdisc_dequeue_skb_bad_txq(q);
+ 	if (unlikely(skb)) {
+@@ -311,6 +334,8 @@ bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
+ 		HARD_TX_LOCK(dev, txq, smp_processor_id());
+ 		if (!netif_xmit_frozen_or_stopped(txq))
+ 			skb = dev_hard_start_xmit(skb, dev, txq, &ret);
++		else
++			qdisc_maybe_clear_missed(q, txq);
+ 
+ 		HARD_TX_UNLOCK(dev, txq);
+ 	} else {
+@@ -640,8 +665,10 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
+ {
+ 	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
+ 	struct sk_buff *skb = NULL;
++	bool need_retry = true;
+ 	int band;
+ 
++retry:
+ 	for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
+ 		struct skb_array *q = band2list(priv, band);
+ 
+@@ -652,6 +679,23 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
+ 	}
+ 	if (likely(skb)) {
+ 		qdisc_update_stats_at_dequeue(qdisc, skb);
++	} else if (need_retry &&
++		   test_bit(__QDISC_STATE_MISSED, &qdisc->state)) {
++		/* Delay clearing the STATE_MISSED here to reduce
++		 * the overhead of the second spin_trylock() in
++		 * qdisc_run_begin() and __netif_schedule() calling
++		 * in qdisc_run_end().
++		 */
++		clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
++
++		/* Make sure dequeuing happens after clearing
++		 * STATE_MISSED.
++		 */
++		smp_mb__after_atomic();
++
++		need_retry = false;
++
++		goto retry;
+ 	} else {
+ 		WRITE_ONCE(qdisc->empty, true);
+ 	}
+@@ -1158,8 +1202,10 @@ static void dev_reset_queue(struct net_device *dev,
+ 	qdisc_reset(qdisc);
+ 
+ 	spin_unlock_bh(qdisc_lock(qdisc));
+-	if (nolock)
++	if (nolock) {
++		clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
+ 		spin_unlock_bh(&qdisc->seqlock);
++	}
+ }
+ 
+ static bool some_qdisc_is_busy(struct net_device *dev)
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 4ae428f2f2c57..413266eb83b64 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -4473,6 +4473,7 @@ static int sctp_setsockopt_encap_port(struct sock *sk,
+ 				    transports)
+ 			t->encap_port = encap_port;
+ 
++		asoc->encap_port = encap_port;
+ 		return 0;
+ 	}
+ 
+diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
+index e92df779af733..55871b277f475 100644
+--- a/net/sctp/sysctl.c
++++ b/net/sctp/sysctl.c
+@@ -307,7 +307,7 @@ static struct ctl_table sctp_net_table[] = {
+ 		.data		= &init_net.sctp.encap_port,
+ 		.maxlen		= sizeof(int),
+ 		.mode		= 0644,
+-		.proc_handler	= proc_dointvec,
++		.proc_handler	= proc_dointvec_minmax,
+ 		.extra1		= SYSCTL_ZERO,
+ 		.extra2		= &udp_port_max,
+ 	},
+diff --git a/net/smc/smc_ism.c b/net/smc/smc_ism.c
+index 9c6e95882553e..967712ba52a0d 100644
+--- a/net/smc/smc_ism.c
++++ b/net/smc/smc_ism.c
+@@ -402,6 +402,14 @@ struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name,
+ 		return NULL;
+ 	}
+ 
++	smcd->event_wq = alloc_ordered_workqueue("ism_evt_wq-%s)",
++						 WQ_MEM_RECLAIM, name);
++	if (!smcd->event_wq) {
++		kfree(smcd->conn);
++		kfree(smcd);
++		return NULL;
++	}
++
+ 	smcd->dev.parent = parent;
+ 	smcd->dev.release = smcd_release;
+ 	device_initialize(&smcd->dev);
+@@ -415,19 +423,14 @@ struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name,
+ 	INIT_LIST_HEAD(&smcd->vlan);
+ 	INIT_LIST_HEAD(&smcd->lgr_list);
+ 	init_waitqueue_head(&smcd->lgrs_deleted);
+-	smcd->event_wq = alloc_ordered_workqueue("ism_evt_wq-%s)",
+-						 WQ_MEM_RECLAIM, name);
+-	if (!smcd->event_wq) {
+-		kfree(smcd->conn);
+-		kfree(smcd);
+-		return NULL;
+-	}
+ 	return smcd;
+ }
+ EXPORT_SYMBOL_GPL(smcd_alloc_dev);
+ 
+ int smcd_register_dev(struct smcd_dev *smcd)
+ {
++	int rc;
++
+ 	mutex_lock(&smcd_dev_list.mutex);
+ 	if (list_empty(&smcd_dev_list.list)) {
+ 		u8 *system_eid = NULL;
+@@ -447,7 +450,14 @@ int smcd_register_dev(struct smcd_dev *smcd)
+ 			    dev_name(&smcd->dev), smcd->pnetid,
+ 			    smcd->pnetid_by_user ? " (user defined)" : "");
+ 
+-	return device_add(&smcd->dev);
++	rc = device_add(&smcd->dev);
++	if (rc) {
++		mutex_lock(&smcd_dev_list.mutex);
++		list_del(&smcd->list);
++		mutex_unlock(&smcd_dev_list.mutex);
++	}
++
++	return rc;
+ }
+ EXPORT_SYMBOL_GPL(smcd_register_dev);
+ 
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index f555d335e910d..42623d6b8f0ec 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -1677,13 +1677,6 @@ call_reserveresult(struct rpc_task *task)
+ 		return;
+ 	}
+ 
+-	/*
+-	 * Even though there was an error, we may have acquired
+-	 * a request slot somehow.  Make sure not to leak it.
+-	 */
+-	if (task->tk_rqstp)
+-		xprt_release(task);
+-
+ 	switch (status) {
+ 	case -ENOMEM:
+ 		rpc_delay(task, HZ >> 2);
+diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
+index 20fe31b1b776f..dc9207c1ff078 100644
+--- a/net/sunrpc/xprt.c
++++ b/net/sunrpc/xprt.c
+@@ -70,6 +70,7 @@
+ static void	 xprt_init(struct rpc_xprt *xprt, struct net *net);
+ static __be32	xprt_alloc_xid(struct rpc_xprt *xprt);
+ static void	 xprt_destroy(struct rpc_xprt *xprt);
++static void	 xprt_request_init(struct rpc_task *task);
+ 
+ static DEFINE_SPINLOCK(xprt_list_lock);
+ static LIST_HEAD(xprt_list);
+@@ -1602,17 +1603,40 @@ xprt_transmit(struct rpc_task *task)
+ 	spin_unlock(&xprt->queue_lock);
+ }
+ 
+-static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
++static void xprt_complete_request_init(struct rpc_task *task)
++{
++	if (task->tk_rqstp)
++		xprt_request_init(task);
++}
++
++void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
+ {
+ 	set_bit(XPRT_CONGESTED, &xprt->state);
+-	rpc_sleep_on(&xprt->backlog, task, NULL);
++	rpc_sleep_on(&xprt->backlog, task, xprt_complete_request_init);
++}
++EXPORT_SYMBOL_GPL(xprt_add_backlog);
++
++static bool __xprt_set_rq(struct rpc_task *task, void *data)
++{
++	struct rpc_rqst *req = data;
++
++	if (task->tk_rqstp == NULL) {
++		memset(req, 0, sizeof(*req));	/* mark unused */
++		task->tk_rqstp = req;
++		return true;
++	}
++	return false;
+ }
+ 
+-static void xprt_wake_up_backlog(struct rpc_xprt *xprt)
++bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req)
+ {
+-	if (rpc_wake_up_next(&xprt->backlog) == NULL)
++	if (rpc_wake_up_first(&xprt->backlog, __xprt_set_rq, req) == NULL) {
+ 		clear_bit(XPRT_CONGESTED, &xprt->state);
++		return false;
++	}
++	return true;
+ }
++EXPORT_SYMBOL_GPL(xprt_wake_up_backlog);
+ 
+ static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
+ {
+@@ -1622,7 +1646,7 @@ static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task
+ 		goto out;
+ 	spin_lock(&xprt->reserve_lock);
+ 	if (test_bit(XPRT_CONGESTED, &xprt->state)) {
+-		rpc_sleep_on(&xprt->backlog, task, NULL);
++		xprt_add_backlog(xprt, task);
+ 		ret = true;
+ 	}
+ 	spin_unlock(&xprt->reserve_lock);
+@@ -1699,11 +1723,11 @@ EXPORT_SYMBOL_GPL(xprt_alloc_slot);
+ void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
+ {
+ 	spin_lock(&xprt->reserve_lock);
+-	if (!xprt_dynamic_free_slot(xprt, req)) {
++	if (!xprt_wake_up_backlog(xprt, req) &&
++	    !xprt_dynamic_free_slot(xprt, req)) {
+ 		memset(req, 0, sizeof(*req));	/* mark unused */
+ 		list_add(&req->rq_list, &xprt->free);
+ 	}
+-	xprt_wake_up_backlog(xprt);
+ 	spin_unlock(&xprt->reserve_lock);
+ }
+ EXPORT_SYMBOL_GPL(xprt_free_slot);
+@@ -1890,10 +1914,10 @@ void xprt_release(struct rpc_task *task)
+ 	xdr_free_bvec(&req->rq_snd_buf);
+ 	if (req->rq_cred != NULL)
+ 		put_rpccred(req->rq_cred);
+-	task->tk_rqstp = NULL;
+ 	if (req->rq_release_snd_buf)
+ 		req->rq_release_snd_buf(req);
+ 
++	task->tk_rqstp = NULL;
+ 	if (likely(!bc_prealloc(req)))
+ 		xprt->ops->free_slot(xprt, req);
+ 	else
+diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
+index 21ddd78a8c351..15e0c4e7b24ad 100644
+--- a/net/sunrpc/xprtrdma/rpc_rdma.c
++++ b/net/sunrpc/xprtrdma/rpc_rdma.c
+@@ -628,8 +628,9 @@ out_mapping_err:
+ 	return false;
+ }
+ 
+-/* The tail iovec might not reside in the same page as the
+- * head iovec.
++/* The tail iovec may include an XDR pad for the page list,
++ * as well as additional content, and may not reside in the
++ * same page as the head iovec.
+  */
+ static bool rpcrdma_prepare_tail_iov(struct rpcrdma_req *req,
+ 				     struct xdr_buf *xdr,
+@@ -747,19 +748,27 @@ static bool rpcrdma_prepare_readch(struct rpcrdma_xprt *r_xprt,
+ 				   struct rpcrdma_req *req,
+ 				   struct xdr_buf *xdr)
+ {
+-	struct kvec *tail = &xdr->tail[0];
+-
+ 	if (!rpcrdma_prepare_head_iov(r_xprt, req, xdr->head[0].iov_len))
+ 		return false;
+ 
+-	/* If there is a Read chunk, the page list is handled
++	/* If there is a Read chunk, the page list is being handled
+ 	 * via explicit RDMA, and thus is skipped here.
+ 	 */
+ 
+-	if (tail->iov_len) {
+-		if (!rpcrdma_prepare_tail_iov(req, xdr,
+-					      offset_in_page(tail->iov_base),
+-					      tail->iov_len))
++	/* Do not include the tail if it is only an XDR pad */
++	if (xdr->tail[0].iov_len > 3) {
++		unsigned int page_base, len;
++
++		/* If the content in the page list is an odd length,
++		 * xdr_write_pages() adds a pad at the beginning of
++		 * the tail iovec. Force the tail's non-pad content to
++		 * land at the next XDR position in the Send message.
++		 */
++		page_base = offset_in_page(xdr->tail[0].iov_base);
++		len = xdr->tail[0].iov_len;
++		page_base += len & 3;
++		len -= len & 3;
++		if (!rpcrdma_prepare_tail_iov(req, xdr, page_base, len))
+ 			return false;
+ 		kref_get(&req->rl_kref);
+ 	}
+diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
+index 09953597d055a..19a49d26b1e41 100644
+--- a/net/sunrpc/xprtrdma/transport.c
++++ b/net/sunrpc/xprtrdma/transport.c
+@@ -520,9 +520,8 @@ xprt_rdma_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
+ 	return;
+ 
+ out_sleep:
+-	set_bit(XPRT_CONGESTED, &xprt->state);
+-	rpc_sleep_on(&xprt->backlog, task, NULL);
+ 	task->tk_status = -EAGAIN;
++	xprt_add_backlog(xprt, task);
+ }
+ 
+ /**
+@@ -537,10 +536,11 @@ xprt_rdma_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *rqst)
+ 	struct rpcrdma_xprt *r_xprt =
+ 		container_of(xprt, struct rpcrdma_xprt, rx_xprt);
+ 
+-	memset(rqst, 0, sizeof(*rqst));
+-	rpcrdma_buffer_put(&r_xprt->rx_buf, rpcr_to_rdmar(rqst));
+-	if (unlikely(!rpc_wake_up_next(&xprt->backlog)))
+-		clear_bit(XPRT_CONGESTED, &xprt->state);
++	rpcrdma_reply_put(&r_xprt->rx_buf, rpcr_to_rdmar(rqst));
++	if (!xprt_wake_up_backlog(xprt, rqst)) {
++		memset(rqst, 0, sizeof(*rqst));
++		rpcrdma_buffer_put(&r_xprt->rx_buf, rpcr_to_rdmar(rqst));
++	}
+ }
+ 
+ static bool rpcrdma_check_regbuf(struct rpcrdma_xprt *r_xprt,
+diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
+index f3fffc74ab0fa..0731b4756c491 100644
+--- a/net/sunrpc/xprtrdma/verbs.c
++++ b/net/sunrpc/xprtrdma/verbs.c
+@@ -1184,6 +1184,20 @@ rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt)
+ 	return mr;
+ }
+ 
++/**
++ * rpcrdma_reply_put - Put reply buffers back into pool
++ * @buffers: buffer pool
++ * @req: object to return
++ *
++ */
++void rpcrdma_reply_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req)
++{
++	if (req->rl_reply) {
++		rpcrdma_rep_put(buffers, req->rl_reply);
++		req->rl_reply = NULL;
++	}
++}
++
+ /**
+  * rpcrdma_buffer_get - Get a request buffer
+  * @buffers: Buffer pool from which to obtain a buffer
+@@ -1212,9 +1226,7 @@ rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
+  */
+ void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req)
+ {
+-	if (req->rl_reply)
+-		rpcrdma_rep_put(buffers, req->rl_reply);
+-	req->rl_reply = NULL;
++	rpcrdma_reply_put(buffers, req);
+ 
+ 	spin_lock(&buffers->rb_lock);
+ 	list_add(&req->rl_list, &buffers->rb_send_bufs);
+diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
+index 28af11fbe6438..ac13b93af9bd1 100644
+--- a/net/sunrpc/xprtrdma/xprt_rdma.h
++++ b/net/sunrpc/xprtrdma/xprt_rdma.h
+@@ -481,6 +481,7 @@ struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *);
+ void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers,
+ 			struct rpcrdma_req *req);
+ void rpcrdma_recv_buffer_put(struct rpcrdma_rep *);
++void rpcrdma_reply_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req);
+ 
+ bool rpcrdma_regbuf_realloc(struct rpcrdma_regbuf *rb, size_t size,
+ 			    gfp_t flags);
+diff --git a/net/tipc/core.c b/net/tipc/core.c
+index 5cc1f03072150..72f3ac73779bf 100644
+--- a/net/tipc/core.c
++++ b/net/tipc/core.c
+@@ -119,6 +119,8 @@ static void __net_exit tipc_exit_net(struct net *net)
+ #ifdef CONFIG_TIPC_CRYPTO
+ 	tipc_crypto_stop(&tipc_net(net)->crypto_tx);
+ #endif
++	while (atomic_read(&tn->wq_count))
++		cond_resched();
+ }
+ 
+ static void __net_exit tipc_pernet_pre_exit(struct net *net)
+diff --git a/net/tipc/core.h b/net/tipc/core.h
+index 03de7b213f553..5741ae488bb56 100644
+--- a/net/tipc/core.h
++++ b/net/tipc/core.h
+@@ -149,6 +149,8 @@ struct tipc_net {
+ #endif
+ 	/* Work item for net finalize */
+ 	struct tipc_net_work final_work;
++	/* The numbers of work queues in schedule */
++	atomic_t wq_count;
+ };
+ 
+ static inline struct tipc_net *tipc_net(struct net *net)
+diff --git a/net/tipc/msg.c b/net/tipc/msg.c
+index e9263280a2d4a..d0fc5fadbc680 100644
+--- a/net/tipc/msg.c
++++ b/net/tipc/msg.c
+@@ -149,18 +149,13 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
+ 		if (unlikely(head))
+ 			goto err;
+ 		*buf = NULL;
++		if (skb_has_frag_list(frag) && __skb_linearize(frag))
++			goto err;
+ 		frag = skb_unshare(frag, GFP_ATOMIC);
+ 		if (unlikely(!frag))
+ 			goto err;
+ 		head = *headbuf = frag;
+ 		TIPC_SKB_CB(head)->tail = NULL;
+-		if (skb_is_nonlinear(head)) {
+-			skb_walk_frags(head, tail) {
+-				TIPC_SKB_CB(head)->tail = tail;
+-			}
+-		} else {
+-			skb_frag_list_init(head);
+-		}
+ 		return 0;
+ 	}
+ 
+diff --git a/net/tipc/socket.c b/net/tipc/socket.c
+index 022999e0202d7..5792f4891dc18 100644
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -1265,7 +1265,10 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
+ 		spin_lock_bh(&inputq->lock);
+ 		if (skb_peek(arrvq) == skb) {
+ 			skb_queue_splice_tail_init(&tmpq, inputq);
+-			__skb_dequeue(arrvq);
++			/* Decrease the skb's refcnt as increasing in the
++			 * function tipc_skb_peek
++			 */
++			kfree_skb(__skb_dequeue(arrvq));
+ 		}
+ 		spin_unlock_bh(&inputq->lock);
+ 		__skb_queue_purge(&tmpq);
+diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
+index 21e75e28e86ad..4c66e22b52f77 100644
+--- a/net/tipc/udp_media.c
++++ b/net/tipc/udp_media.c
+@@ -812,6 +812,7 @@ static void cleanup_bearer(struct work_struct *work)
+ 		kfree_rcu(rcast, rcu);
+ 	}
+ 
++	atomic_dec(&tipc_net(sock_net(ub->ubsock->sk))->wq_count);
+ 	dst_cache_destroy(&ub->rcast.dst_cache);
+ 	udp_tunnel_sock_release(ub->ubsock);
+ 	synchronize_net();
+@@ -832,6 +833,7 @@ static void tipc_udp_disable(struct tipc_bearer *b)
+ 	RCU_INIT_POINTER(ub->bearer, NULL);
+ 
+ 	/* sock_release need to be done outside of rtnl lock */
++	atomic_inc(&tipc_net(sock_net(ub->ubsock->sk))->wq_count);
+ 	INIT_WORK(&ub->work, cleanup_bearer);
+ 	schedule_work(&ub->work);
+ }
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index 01d933ae5f164..6086cf4f10a7c 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -37,6 +37,7 @@
+ 
+ #include <linux/sched/signal.h>
+ #include <linux/module.h>
++#include <linux/splice.h>
+ #include <crypto/aead.h>
+ 
+ #include <net/strparser.h>
+@@ -1281,7 +1282,7 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
+ }
+ 
+ static struct sk_buff *tls_wait_data(struct sock *sk, struct sk_psock *psock,
+-				     int flags, long timeo, int *err)
++				     bool nonblock, long timeo, int *err)
+ {
+ 	struct tls_context *tls_ctx = tls_get_ctx(sk);
+ 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
+@@ -1306,7 +1307,7 @@ static struct sk_buff *tls_wait_data(struct sock *sk, struct sk_psock *psock,
+ 		if (sock_flag(sk, SOCK_DONE))
+ 			return NULL;
+ 
+-		if ((flags & MSG_DONTWAIT) || !timeo) {
++		if (nonblock || !timeo) {
+ 			*err = -EAGAIN;
+ 			return NULL;
+ 		}
+@@ -1786,7 +1787,7 @@ int tls_sw_recvmsg(struct sock *sk,
+ 		bool async_capable;
+ 		bool async = false;
+ 
+-		skb = tls_wait_data(sk, psock, flags, timeo, &err);
++		skb = tls_wait_data(sk, psock, flags & MSG_DONTWAIT, timeo, &err);
+ 		if (!skb) {
+ 			if (psock) {
+ 				int ret = __tcp_bpf_recvmsg(sk, psock,
+@@ -1990,9 +1991,9 @@ ssize_t tls_sw_splice_read(struct socket *sock,  loff_t *ppos,
+ 
+ 	lock_sock(sk);
+ 
+-	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
++	timeo = sock_rcvtimeo(sk, flags & SPLICE_F_NONBLOCK);
+ 
+-	skb = tls_wait_data(sk, NULL, flags, timeo, &err);
++	skb = tls_wait_data(sk, NULL, flags & SPLICE_F_NONBLOCK, timeo, &err);
+ 	if (!skb)
+ 		goto splice_read_end;
+ 
+diff --git a/net/wireless/util.c b/net/wireless/util.c
+index 1bf0200f562ab..f342b61476754 100644
+--- a/net/wireless/util.c
++++ b/net/wireless/util.c
+@@ -542,7 +542,7 @@ EXPORT_SYMBOL(ieee80211_get_mesh_hdrlen);
+ 
+ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
+ 				  const u8 *addr, enum nl80211_iftype iftype,
+-				  u8 data_offset)
++				  u8 data_offset, bool is_amsdu)
+ {
+ 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ 	struct {
+@@ -629,7 +629,7 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
+ 	skb_copy_bits(skb, hdrlen, &payload, sizeof(payload));
+ 	tmp.h_proto = payload.proto;
+ 
+-	if (likely((ether_addr_equal(payload.hdr, rfc1042_header) &&
++	if (likely((!is_amsdu && ether_addr_equal(payload.hdr, rfc1042_header) &&
+ 		    tmp.h_proto != htons(ETH_P_AARP) &&
+ 		    tmp.h_proto != htons(ETH_P_IPX)) ||
+ 		   ether_addr_equal(payload.hdr, bridge_tunnel_header)))
+@@ -771,6 +771,9 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
+ 		remaining = skb->len - offset;
+ 		if (subframe_len > remaining)
+ 			goto purge;
++		/* mitigate A-MSDU aggregation injection attacks */
++		if (ether_addr_equal(eth.h_dest, rfc1042_header))
++			goto purge;
+ 
+ 		offset += sizeof(struct ethhdr);
+ 		last = remaining <= subframe_len + padding;
+diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c
+index 1e2a1105d0e67..1278b8f03fc45 100644
+--- a/samples/bpf/xdpsock_user.c
++++ b/samples/bpf/xdpsock_user.c
+@@ -1282,7 +1282,7 @@ static void tx_only(struct xsk_socket_info *xsk, u32 *frame_nb, int batch_size)
+ 	for (i = 0; i < batch_size; i++) {
+ 		struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx,
+ 								  idx + i);
+-		tx_desc->addr = (*frame_nb + i) << XSK_UMEM__DEFAULT_FRAME_SHIFT;
++		tx_desc->addr = (*frame_nb + i) * opt_xsk_frame_size;
+ 		tx_desc->len = PKT_SIZE;
+ 	}
+ 
+diff --git a/sound/firewire/dice/dice-pcm.c b/sound/firewire/dice/dice-pcm.c
+index af8a90ee40f39..a69ca1111b033 100644
+--- a/sound/firewire/dice/dice-pcm.c
++++ b/sound/firewire/dice/dice-pcm.c
+@@ -218,7 +218,7 @@ static int pcm_open(struct snd_pcm_substream *substream)
+ 
+ 		if (frames_per_period > 0) {
+ 			// For double_pcm_frame quirk.
+-			if (rate > 96000) {
++			if (rate > 96000 && !dice->disable_double_pcm_frames) {
+ 				frames_per_period *= 2;
+ 				frames_per_buffer *= 2;
+ 			}
+@@ -273,7 +273,7 @@ static int pcm_hw_params(struct snd_pcm_substream *substream,
+ 
+ 		mutex_lock(&dice->mutex);
+ 		// For double_pcm_frame quirk.
+-		if (rate > 96000) {
++		if (rate > 96000 && !dice->disable_double_pcm_frames) {
+ 			events_per_period /= 2;
+ 			events_per_buffer /= 2;
+ 		}
+diff --git a/sound/firewire/dice/dice-stream.c b/sound/firewire/dice/dice-stream.c
+index 1a14c083e8cea..c4dfe76500c29 100644
+--- a/sound/firewire/dice/dice-stream.c
++++ b/sound/firewire/dice/dice-stream.c
+@@ -181,7 +181,7 @@ static int keep_resources(struct snd_dice *dice, struct amdtp_stream *stream,
+ 	// as 'Dual Wire'.
+ 	// For this quirk, blocking mode is required and PCM buffer size should
+ 	// be aligned to SYT_INTERVAL.
+-	double_pcm_frames = rate > 96000;
++	double_pcm_frames = (rate > 96000 && !dice->disable_double_pcm_frames);
+ 	if (double_pcm_frames) {
+ 		rate /= 2;
+ 		pcm_chs *= 2;
+diff --git a/sound/firewire/dice/dice.c b/sound/firewire/dice/dice.c
+index 107a81691f0e8..239d164b0eea8 100644
+--- a/sound/firewire/dice/dice.c
++++ b/sound/firewire/dice/dice.c
+@@ -21,6 +21,7 @@ MODULE_LICENSE("GPL v2");
+ #define OUI_SSL			0x0050c2	// Actually ID reserved by IEEE.
+ #define OUI_PRESONUS		0x000a92
+ #define OUI_HARMAN		0x000fd7
++#define OUI_AVID		0x00a07e
+ 
+ #define DICE_CATEGORY_ID	0x04
+ #define WEISS_CATEGORY_ID	0x00
+@@ -222,6 +223,14 @@ static int dice_probe(struct fw_unit *unit,
+ 				(snd_dice_detect_formats_t)entry->driver_data;
+ 	}
+ 
++	// Below models are compliant to IEC 61883-1/6 and have no quirk at high sampling transfer
++	// frequency.
++	// * Avid M-Box 3 Pro
++	// * M-Audio Profire 610
++	// * M-Audio Profire 2626
++	if (entry->vendor_id == OUI_MAUDIO || entry->vendor_id == OUI_AVID)
++		dice->disable_double_pcm_frames = true;
++
+ 	spin_lock_init(&dice->lock);
+ 	mutex_init(&dice->mutex);
+ 	init_completion(&dice->clock_accepted);
+@@ -278,7 +287,22 @@ static void dice_bus_reset(struct fw_unit *unit)
+ 
+ #define DICE_INTERFACE	0x000001
+ 
++#define DICE_DEV_ENTRY_TYPICAL(vendor, model, data) \
++	{ \
++		.match_flags	= IEEE1394_MATCH_VENDOR_ID | \
++				  IEEE1394_MATCH_MODEL_ID | \
++				  IEEE1394_MATCH_SPECIFIER_ID | \
++				  IEEE1394_MATCH_VERSION, \
++		.vendor_id	= (vendor), \
++		.model_id	= (model), \
++		.specifier_id	= (vendor), \
++		.version	= DICE_INTERFACE, \
++		.driver_data = (kernel_ulong_t)(data), \
++	}
++
+ static const struct ieee1394_device_id dice_id_table[] = {
++	// Avid M-Box 3 Pro. To match in probe function.
++	DICE_DEV_ENTRY_TYPICAL(OUI_AVID, 0x000004, snd_dice_detect_extension_formats),
+ 	/* M-Audio Profire 2626 has a different value in version field. */
+ 	{
+ 		.match_flags	= IEEE1394_MATCH_VENDOR_ID |
+diff --git a/sound/firewire/dice/dice.h b/sound/firewire/dice/dice.h
+index adc6f7c844609..3c967d1b3605d 100644
+--- a/sound/firewire/dice/dice.h
++++ b/sound/firewire/dice/dice.h
+@@ -109,7 +109,8 @@ struct snd_dice {
+ 	struct fw_iso_resources rx_resources[MAX_STREAMS];
+ 	struct amdtp_stream tx_stream[MAX_STREAMS];
+ 	struct amdtp_stream rx_stream[MAX_STREAMS];
+-	bool global_enabled;
++	bool global_enabled:1;
++	bool disable_double_pcm_frames:1;
+ 	struct completion clock_accepted;
+ 	unsigned int substreams_counter;
+ 
+diff --git a/sound/isa/gus/gus_main.c b/sound/isa/gus/gus_main.c
+index afc088f0377ce..b7518122a10d6 100644
+--- a/sound/isa/gus/gus_main.c
++++ b/sound/isa/gus/gus_main.c
+@@ -77,17 +77,8 @@ static const struct snd_kcontrol_new snd_gus_joystick_control = {
+ 
+ static void snd_gus_init_control(struct snd_gus_card *gus)
+ {
+-	int ret;
+-
+-	if (!gus->ace_flag) {
+-		ret =
+-			snd_ctl_add(gus->card,
+-					snd_ctl_new1(&snd_gus_joystick_control,
+-						gus));
+-		if (ret)
+-			snd_printk(KERN_ERR "gus: snd_ctl_add failed: %d\n",
+-					ret);
+-	}
++	if (!gus->ace_flag)
++		snd_ctl_add(gus->card, snd_ctl_new1(&snd_gus_joystick_control, gus));
+ }
+ 
+ /*
+diff --git a/sound/isa/sb/sb16_main.c b/sound/isa/sb/sb16_main.c
+index 38dc1fde25f3c..aa48705310231 100644
+--- a/sound/isa/sb/sb16_main.c
++++ b/sound/isa/sb/sb16_main.c
+@@ -846,14 +846,10 @@ int snd_sb16dsp_pcm(struct snd_sb *chip, int device)
+ 	snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_sb16_playback_ops);
+ 	snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_sb16_capture_ops);
+ 
+-	if (chip->dma16 >= 0 && chip->dma8 != chip->dma16) {
+-		err = snd_ctl_add(card, snd_ctl_new1(
+-					&snd_sb16_dma_control, chip));
+-		if (err)
+-			return err;
+-	} else {
++	if (chip->dma16 >= 0 && chip->dma8 != chip->dma16)
++		snd_ctl_add(card, snd_ctl_new1(&snd_sb16_dma_control, chip));
++	else
+ 		pcm->info_flags = SNDRV_PCM_INFO_HALF_DUPLEX;
+-	}
+ 
+ 	snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV,
+ 				       card->dev, 64*1024, 128*1024);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 43a63db4ab6ad..d8424d226714f 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -2603,6 +2603,28 @@ static const struct hda_model_fixup alc882_fixup_models[] = {
+ 	{}
+ };
+ 
++static const struct snd_hda_pin_quirk alc882_pin_fixup_tbl[] = {
++	SND_HDA_PIN_QUIRK(0x10ec1220, 0x1043, "ASUS", ALC1220_FIXUP_CLEVO_P950,
++		{0x14, 0x01014010},
++		{0x15, 0x01011012},
++		{0x16, 0x01016011},
++		{0x18, 0x01a19040},
++		{0x19, 0x02a19050},
++		{0x1a, 0x0181304f},
++		{0x1b, 0x0221401f},
++		{0x1e, 0x01456130}),
++	SND_HDA_PIN_QUIRK(0x10ec1220, 0x1462, "MS-7C35", ALC1220_FIXUP_CLEVO_P950,
++		{0x14, 0x01015010},
++		{0x15, 0x01011012},
++		{0x16, 0x01011011},
++		{0x18, 0x01a11040},
++		{0x19, 0x02a19050},
++		{0x1a, 0x0181104f},
++		{0x1b, 0x0221401f},
++		{0x1e, 0x01451130}),
++	{}
++};
++
+ /*
+  * BIOS auto configuration
+  */
+@@ -2644,6 +2666,7 @@ static int patch_alc882(struct hda_codec *codec)
+ 
+ 	snd_hda_pick_fixup(codec, alc882_fixup_models, alc882_fixup_tbl,
+ 		       alc882_fixups);
++	snd_hda_pick_pin_fixup(codec, alc882_pin_fixup_tbl, alc882_fixups, true);
+ 	snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);
+ 
+ 	alc_auto_parse_customize_define(codec);
+@@ -6535,6 +6558,8 @@ enum {
+ 	ALC295_FIXUP_ASUS_DACS,
+ 	ALC295_FIXUP_HP_OMEN,
+ 	ALC285_FIXUP_HP_SPECTRE_X360,
++	ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP,
++	ALC623_FIXUP_LENOVO_THINKSTATION_P340,
+ };
+ 
+ static const struct hda_fixup alc269_fixups[] = {
+@@ -8095,6 +8120,18 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC285_FIXUP_SPEAKER2_TO_DAC1,
+ 	},
++	[ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc285_fixup_ideapad_s740_coef,
++		.chained = true,
++		.chain_id = ALC285_FIXUP_THINKPAD_HEADSET_JACK,
++	},
++	[ALC623_FIXUP_LENOVO_THINKSTATION_P340] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc_fixup_no_shutup,
++		.chained = true,
++		.chain_id = ALC283_FIXUP_HEADSET_MIC,
++	},
+ };
+ 
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -8277,6 +8314,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
+ 	SND_PCI_QUIRK(0x103c, 0x8846, "HP EliteBook 850 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x884c, "HP EliteBook 840 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x886d, "HP ZBook Fury 17.3 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
++	SND_PCI_QUIRK(0x103c, 0x8870, "HP ZBook Fury 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
++	SND_PCI_QUIRK(0x103c, 0x8873, "HP ZBook Studio 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
++	SND_PCI_QUIRK(0x103c, 0x8896, "HP EliteBook 855 G8 Notebook PC", ALC285_FIXUP_HP_MUTE_LED),
+ 	SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
+ 	SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+@@ -8412,7 +8453,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1558, 0xc019, "Clevo NH77D[BE]Q", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0xc022, "Clevo NH77[DC][QW]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS),
+-	SND_PCI_QUIRK(0x17aa, 0x1048, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
++	SND_PCI_QUIRK(0x17aa, 0x1048, "ThinkCentre Station", ALC623_FIXUP_LENOVO_THINKSTATION_P340),
+ 	SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
+ 	SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
+ 	SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
+@@ -8462,6 +8503,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME),
+ 	SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF),
++	SND_PCI_QUIRK(0x17aa, 0x3843, "Yoga 9i", ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP),
+ 	SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
+ 	SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
+@@ -8677,6 +8719,8 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
+ 	{.id = ALC245_FIXUP_HP_X360_AMP, .name = "alc245-hp-x360-amp"},
+ 	{.id = ALC295_FIXUP_HP_OMEN, .name = "alc295-hp-omen"},
+ 	{.id = ALC285_FIXUP_HP_SPECTRE_X360, .name = "alc285-hp-spectre-x360"},
++	{.id = ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP, .name = "alc287-ideapad-bass-spk-amp"},
++	{.id = ALC623_FIXUP_LENOVO_THINKSTATION_P340, .name = "alc623-lenovo-thinkstation-p340"},
+ 	{}
+ };
+ #define ALC225_STANDARD_PINS \
+diff --git a/sound/soc/codecs/cs35l33.c b/sound/soc/codecs/cs35l33.c
+index 7ad7b733af9b6..e8f3dcfd144da 100644
+--- a/sound/soc/codecs/cs35l33.c
++++ b/sound/soc/codecs/cs35l33.c
+@@ -1201,6 +1201,7 @@ static int cs35l33_i2c_probe(struct i2c_client *i2c_client,
+ 		dev_err(&i2c_client->dev,
+ 			"CS35L33 Device ID (%X). Expected ID %X\n",
+ 			devid, CS35L33_CHIP_ID);
++		ret = -EINVAL;
+ 		goto err_enable;
+ 	}
+ 
+diff --git a/sound/soc/codecs/cs42l42.c b/sound/soc/codecs/cs42l42.c
+index 811b7b1c9732e..323a4f7f384cd 100644
+--- a/sound/soc/codecs/cs42l42.c
++++ b/sound/soc/codecs/cs42l42.c
+@@ -398,6 +398,9 @@ static const struct regmap_config cs42l42_regmap = {
+ 	.reg_defaults = cs42l42_reg_defaults,
+ 	.num_reg_defaults = ARRAY_SIZE(cs42l42_reg_defaults),
+ 	.cache_type = REGCACHE_RBTREE,
++
++	.use_single_read = true,
++	.use_single_write = true,
+ };
+ 
+ static DECLARE_TLV_DB_SCALE(adc_tlv, -9600, 100, false);
+diff --git a/sound/soc/codecs/cs43130.c b/sound/soc/codecs/cs43130.c
+index 80bc7c10ed757..80cd3ea0c1577 100644
+--- a/sound/soc/codecs/cs43130.c
++++ b/sound/soc/codecs/cs43130.c
+@@ -1735,6 +1735,14 @@ static DEVICE_ATTR(hpload_dc_r, 0444, cs43130_show_dc_r, NULL);
+ static DEVICE_ATTR(hpload_ac_l, 0444, cs43130_show_ac_l, NULL);
+ static DEVICE_ATTR(hpload_ac_r, 0444, cs43130_show_ac_r, NULL);
+ 
++static struct attribute *hpload_attrs[] = {
++	&dev_attr_hpload_dc_l.attr,
++	&dev_attr_hpload_dc_r.attr,
++	&dev_attr_hpload_ac_l.attr,
++	&dev_attr_hpload_ac_r.attr,
++};
++ATTRIBUTE_GROUPS(hpload);
++
+ static struct reg_sequence hp_en_cal_seq[] = {
+ 	{CS43130_INT_MASK_4, CS43130_INT_MASK_ALL},
+ 	{CS43130_HP_MEAS_LOAD_1, 0},
+@@ -2302,25 +2310,15 @@ static int cs43130_probe(struct snd_soc_component *component)
+ 
+ 	cs43130->hpload_done = false;
+ 	if (cs43130->dc_meas) {
+-		ret = device_create_file(component->dev, &dev_attr_hpload_dc_l);
+-		if (ret < 0)
+-			return ret;
+-
+-		ret = device_create_file(component->dev, &dev_attr_hpload_dc_r);
+-		if (ret < 0)
+-			return ret;
+-
+-		ret = device_create_file(component->dev, &dev_attr_hpload_ac_l);
+-		if (ret < 0)
+-			return ret;
+-
+-		ret = device_create_file(component->dev, &dev_attr_hpload_ac_r);
+-		if (ret < 0)
++		ret = sysfs_create_groups(&component->dev->kobj, hpload_groups);
++		if (ret)
+ 			return ret;
+ 
+ 		cs43130->wq = create_singlethread_workqueue("cs43130_hp");
+-		if (!cs43130->wq)
++		if (!cs43130->wq) {
++			sysfs_remove_groups(&component->dev->kobj, hpload_groups);
+ 			return -ENOMEM;
++		}
+ 		INIT_WORK(&cs43130->work, cs43130_imp_meas);
+ 	}
+ 
+diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c
+index be360a402b67f..936384a94f25e 100644
+--- a/sound/soc/qcom/lpass-cpu.c
++++ b/sound/soc/qcom/lpass-cpu.c
+@@ -835,18 +835,8 @@ int asoc_qcom_lpass_cpu_platform_probe(struct platform_device *pdev)
+ 		if (dai_id == LPASS_DP_RX)
+ 			continue;
+ 
+-		drvdata->mi2s_osr_clk[dai_id] = devm_clk_get(dev,
++		drvdata->mi2s_osr_clk[dai_id] = devm_clk_get_optional(dev,
+ 					     variant->dai_osr_clk_names[i]);
+-		if (IS_ERR(drvdata->mi2s_osr_clk[dai_id])) {
+-			dev_warn(dev,
+-				"%s() error getting optional %s: %ld\n",
+-				__func__,
+-				variant->dai_osr_clk_names[i],
+-				PTR_ERR(drvdata->mi2s_osr_clk[dai_id]));
+-
+-			drvdata->mi2s_osr_clk[dai_id] = NULL;
+-		}
+-
+ 		drvdata->mi2s_bit_clk[dai_id] = devm_clk_get(dev,
+ 						variant->dai_bit_clk_names[i]);
+ 		if (IS_ERR(drvdata->mi2s_bit_clk[dai_id])) {
+diff --git a/sound/usb/format.c b/sound/usb/format.c
+index e6ff317a67852..2287f8c653150 100644
+--- a/sound/usb/format.c
++++ b/sound/usb/format.c
+@@ -436,7 +436,7 @@ static bool check_valid_altsetting_v2v3(struct snd_usb_audio *chip, int iface,
+ 	if (snd_BUG_ON(altsetting >= 64 - 8))
+ 		return false;
+ 
+-	err = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), UAC2_CS_CUR,
++	err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR,
+ 			      USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
+ 			      UAC2_AS_VAL_ALT_SETTINGS << 8,
+ 			      iface, &raw_data, sizeof(raw_data));
+diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
+index ffd922327ae4a..805873d0cd553 100644
+--- a/sound/usb/mixer_quirks.c
++++ b/sound/usb/mixer_quirks.c
+@@ -3017,7 +3017,7 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
+ 	case USB_ID(0x1235, 0x8203): /* Focusrite Scarlett 6i6 2nd Gen */
+ 	case USB_ID(0x1235, 0x8204): /* Focusrite Scarlett 18i8 2nd Gen */
+ 	case USB_ID(0x1235, 0x8201): /* Focusrite Scarlett 18i20 2nd Gen */
+-		err = snd_scarlett_gen2_controls_create(mixer);
++		err = snd_scarlett_gen2_init(mixer);
+ 		break;
+ 
+ 	case USB_ID(0x041e, 0x323b): /* Creative Sound Blaster E1 */
+diff --git a/sound/usb/mixer_scarlett_gen2.c b/sound/usb/mixer_scarlett_gen2.c
+index 560c2ade829d0..4caf379d5b991 100644
+--- a/sound/usb/mixer_scarlett_gen2.c
++++ b/sound/usb/mixer_scarlett_gen2.c
+@@ -635,7 +635,7 @@ static int scarlett2_usb(
+ 	/* send a second message to get the response */
+ 
+ 	err = snd_usb_ctl_msg(mixer->chip->dev,
+-			usb_sndctrlpipe(mixer->chip->dev, 0),
++			usb_rcvctrlpipe(mixer->chip->dev, 0),
+ 			SCARLETT2_USB_VENDOR_SPECIFIC_CMD_RESP,
+ 			USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
+ 			0,
+@@ -1997,38 +1997,11 @@ static int scarlett2_mixer_status_create(struct usb_mixer_interface *mixer)
+ 	return usb_submit_urb(mixer->urb, GFP_KERNEL);
+ }
+ 
+-/* Entry point */
+-int snd_scarlett_gen2_controls_create(struct usb_mixer_interface *mixer)
++static int snd_scarlett_gen2_controls_create(struct usb_mixer_interface *mixer,
++					     const struct scarlett2_device_info *info)
+ {
+-	const struct scarlett2_device_info *info;
+ 	int err;
+ 
+-	/* only use UAC_VERSION_2 */
+-	if (!mixer->protocol)
+-		return 0;
+-
+-	switch (mixer->chip->usb_id) {
+-	case USB_ID(0x1235, 0x8203):
+-		info = &s6i6_gen2_info;
+-		break;
+-	case USB_ID(0x1235, 0x8204):
+-		info = &s18i8_gen2_info;
+-		break;
+-	case USB_ID(0x1235, 0x8201):
+-		info = &s18i20_gen2_info;
+-		break;
+-	default: /* device not (yet) supported */
+-		return -EINVAL;
+-	}
+-
+-	if (!(mixer->chip->setup & SCARLETT2_ENABLE)) {
+-		usb_audio_err(mixer->chip,
+-			"Focusrite Scarlett Gen 2 Mixer Driver disabled; "
+-			"use options snd_usb_audio device_setup=1 "
+-			"to enable and report any issues to g@b4.vu");
+-		return 0;
+-	}
+-
+ 	/* Initialise private data, routing, sequence number */
+ 	err = scarlett2_init_private(mixer, info);
+ 	if (err < 0)
+@@ -2073,3 +2046,51 @@ int snd_scarlett_gen2_controls_create(struct usb_mixer_interface *mixer)
+ 
+ 	return 0;
+ }
++
++int snd_scarlett_gen2_init(struct usb_mixer_interface *mixer)
++{
++	struct snd_usb_audio *chip = mixer->chip;
++	const struct scarlett2_device_info *info;
++	int err;
++
++	/* only use UAC_VERSION_2 */
++	if (!mixer->protocol)
++		return 0;
++
++	switch (chip->usb_id) {
++	case USB_ID(0x1235, 0x8203):
++		info = &s6i6_gen2_info;
++		break;
++	case USB_ID(0x1235, 0x8204):
++		info = &s18i8_gen2_info;
++		break;
++	case USB_ID(0x1235, 0x8201):
++		info = &s18i20_gen2_info;
++		break;
++	default: /* device not (yet) supported */
++		return -EINVAL;
++	}
++
++	if (!(chip->setup & SCARLETT2_ENABLE)) {
++		usb_audio_info(chip,
++			"Focusrite Scarlett Gen 2 Mixer Driver disabled; "
++			"use options snd_usb_audio vid=0x%04x pid=0x%04x "
++			"device_setup=1 to enable and report any issues "
++			"to g@b4.vu",
++			USB_ID_VENDOR(chip->usb_id),
++			USB_ID_PRODUCT(chip->usb_id));
++		return 0;
++	}
++
++	usb_audio_info(chip,
++		"Focusrite Scarlett Gen 2 Mixer Driver enabled pid=0x%04x",
++		USB_ID_PRODUCT(chip->usb_id));
++
++	err = snd_scarlett_gen2_controls_create(mixer, info);
++	if (err < 0)
++		usb_audio_err(mixer->chip,
++			      "Error initialising Scarlett Mixer Driver: %d",
++			      err);
++
++	return err;
++}
+diff --git a/sound/usb/mixer_scarlett_gen2.h b/sound/usb/mixer_scarlett_gen2.h
+index 52e1dad77afd4..668c6b0cb50a6 100644
+--- a/sound/usb/mixer_scarlett_gen2.h
++++ b/sound/usb/mixer_scarlett_gen2.h
+@@ -2,6 +2,6 @@
+ #ifndef __USB_MIXER_SCARLETT_GEN2_H
+ #define __USB_MIXER_SCARLETT_GEN2_H
+ 
+-int snd_scarlett_gen2_controls_create(struct usb_mixer_interface *mixer);
++int snd_scarlett_gen2_init(struct usb_mixer_interface *mixer);
+ 
+ #endif /* __USB_MIXER_SCARLETT_GEN2_H */
+diff --git a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
+index 790944c356025..baee8591ac76a 100644
+--- a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
++++ b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
+@@ -30,7 +30,8 @@ CGROUP COMMANDS
+ |	*ATTACH_TYPE* := { **ingress** | **egress** | **sock_create** | **sock_ops** | **device** |
+ |		**bind4** | **bind6** | **post_bind4** | **post_bind6** | **connect4** | **connect6** |
+ |               **getpeername4** | **getpeername6** | **getsockname4** | **getsockname6** | **sendmsg4** |
+-|               **sendmsg6** | **recvmsg4** | **recvmsg6** | **sysctl** | **getsockopt** | **setsockopt** }
++|               **sendmsg6** | **recvmsg4** | **recvmsg6** | **sysctl** | **getsockopt** | **setsockopt** |
++|               **sock_release** }
+ |	*ATTACH_FLAGS* := { **multi** | **override** }
+ 
+ DESCRIPTION
+@@ -106,6 +107,7 @@ DESCRIPTION
+ 		  **getpeername6** call to getpeername(2) for an inet6 socket (since 5.8);
+ 		  **getsockname4** call to getsockname(2) for an inet4 socket (since 5.8);
+ 		  **getsockname6** call to getsockname(2) for an inet6 socket (since 5.8).
++		  **sock_release** closing an userspace inet socket (since 5.9).
+ 
+ 	**bpftool cgroup detach** *CGROUP* *ATTACH_TYPE* *PROG*
+ 		  Detach *PROG* from the cgroup *CGROUP* and attach type
+diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
+index 358c7309d4191..fe1b38e7e887d 100644
+--- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst
++++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
+@@ -44,7 +44,7 @@ PROG COMMANDS
+ |		**cgroup/connect4** | **cgroup/connect6** | **cgroup/getpeername4** | **cgroup/getpeername6** |
+ |               **cgroup/getsockname4** | **cgroup/getsockname6** | **cgroup/sendmsg4** | **cgroup/sendmsg6** |
+ |		**cgroup/recvmsg4** | **cgroup/recvmsg6** | **cgroup/sysctl** |
+-|		**cgroup/getsockopt** | **cgroup/setsockopt** |
++|		**cgroup/getsockopt** | **cgroup/setsockopt** | **cgroup/sock_release** |
+ |		**struct_ops** | **fentry** | **fexit** | **freplace** | **sk_lookup**
+ |	}
+ |       *ATTACH_TYPE* := {
+diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool
+index fdffbc64c65c6..c10807914f7b6 100644
+--- a/tools/bpf/bpftool/bash-completion/bpftool
++++ b/tools/bpf/bpftool/bash-completion/bpftool
+@@ -478,7 +478,7 @@ _bpftool()
+                                 cgroup/recvmsg4 cgroup/recvmsg6 \
+                                 cgroup/post_bind4 cgroup/post_bind6 \
+                                 cgroup/sysctl cgroup/getsockopt \
+-                                cgroup/setsockopt struct_ops \
++                                cgroup/setsockopt cgroup/sock_release struct_ops \
+                                 fentry fexit freplace sk_lookup" -- \
+                                                    "$cur" ) )
+                             return 0
+@@ -1008,7 +1008,7 @@ _bpftool()
+                         device bind4 bind6 post_bind4 post_bind6 connect4 connect6 \
+                         getpeername4 getpeername6 getsockname4 getsockname6 \
+                         sendmsg4 sendmsg6 recvmsg4 recvmsg6 sysctl getsockopt \
+-                        setsockopt'
++                        setsockopt sock_release'
+                     local ATTACH_FLAGS='multi override'
+                     local PROG_TYPE='id pinned tag name'
+                     case $prev in
+@@ -1019,7 +1019,7 @@ _bpftool()
+                         ingress|egress|sock_create|sock_ops|device|bind4|bind6|\
+                         post_bind4|post_bind6|connect4|connect6|getpeername4|\
+                         getpeername6|getsockname4|getsockname6|sendmsg4|sendmsg6|\
+-                        recvmsg4|recvmsg6|sysctl|getsockopt|setsockopt)
++                        recvmsg4|recvmsg6|sysctl|getsockopt|setsockopt|sock_release)
+                             COMPREPLY=( $( compgen -W "$PROG_TYPE" -- \
+                                 "$cur" ) )
+                             return 0
+diff --git a/tools/bpf/bpftool/cgroup.c b/tools/bpf/bpftool/cgroup.c
+index d901cc1b904af..6e53b1d393f4a 100644
+--- a/tools/bpf/bpftool/cgroup.c
++++ b/tools/bpf/bpftool/cgroup.c
+@@ -28,7 +28,8 @@
+ 	"                        connect6 | getpeername4 | getpeername6 |\n"   \
+ 	"                        getsockname4 | getsockname6 | sendmsg4 |\n"   \
+ 	"                        sendmsg6 | recvmsg4 | recvmsg6 |\n"           \
+-	"                        sysctl | getsockopt | setsockopt }"
++	"                        sysctl | getsockopt | setsockopt |\n"	       \
++	"                        sock_release }"
+ 
+ static unsigned int query_flags;
+ 
+diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
+index f2b915b205464..0430dec6e288e 100644
+--- a/tools/bpf/bpftool/prog.c
++++ b/tools/bpf/bpftool/prog.c
+@@ -2137,7 +2137,7 @@ static int do_help(int argc, char **argv)
+ 		"                 cgroup/getpeername4 | cgroup/getpeername6 |\n"
+ 		"                 cgroup/getsockname4 | cgroup/getsockname6 | cgroup/sendmsg4 |\n"
+ 		"                 cgroup/sendmsg6 | cgroup/recvmsg4 | cgroup/recvmsg6 |\n"
+-		"                 cgroup/getsockopt | cgroup/setsockopt |\n"
++		"                 cgroup/getsockopt | cgroup/setsockopt | cgroup/sock_release |\n"
+ 		"                 struct_ops | fentry | fexit | freplace | sk_lookup }\n"
+ 		"       ATTACH_TYPE := { msg_verdict | stream_verdict | stream_parser |\n"
+ 		"                        flow_dissector }\n"
+diff --git a/tools/include/linux/bits.h b/tools/include/linux/bits.h
+index 7f475d59a0974..87d112650dfbb 100644
+--- a/tools/include/linux/bits.h
++++ b/tools/include/linux/bits.h
+@@ -22,7 +22,7 @@
+ #include <linux/build_bug.h>
+ #define GENMASK_INPUT_CHECK(h, l) \
+ 	(BUILD_BUG_ON_ZERO(__builtin_choose_expr( \
+-		__builtin_constant_p((l) > (h)), (l) > (h), 0)))
++		__is_constexpr((l) > (h)), (l) > (h), 0)))
+ #else
+ /*
+  * BUILD_BUG_ON_ZERO is not available in h files included from asm files,
+diff --git a/tools/include/linux/const.h b/tools/include/linux/const.h
+index 81b8aae5a8559..435ddd72d2c46 100644
+--- a/tools/include/linux/const.h
++++ b/tools/include/linux/const.h
+@@ -3,4 +3,12 @@
+ 
+ #include <vdso/const.h>
+ 
++/*
++ * This returns a constant expression while determining if an argument is
++ * a constant expression, most importantly without evaluating the argument.
++ * Glory to Martin Uecker <Martin.Uecker@med.uni-goettingen.de>
++ */
++#define __is_constexpr(x) \
++	(sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8)))
++
+ #endif /* _LINUX_CONST_H */
+diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
+index f6afee209620d..26e6d94d64ed4 100644
+--- a/tools/include/uapi/linux/kvm.h
++++ b/tools/include/uapi/linux/kvm.h
+@@ -8,6 +8,7 @@
+  * Note: you must update KVM_API_VERSION if you change this interface.
+  */
+ 
++#include <linux/const.h>
+ #include <linux/types.h>
+ #include <linux/compiler.h>
+ #include <linux/ioctl.h>
+@@ -1834,8 +1835,8 @@ struct kvm_hyperv_eventfd {
+  * conversion after harvesting an entry.  Also, it must not skip any
+  * dirty bits, so that dirty bits are always harvested in sequence.
+  */
+-#define KVM_DIRTY_GFN_F_DIRTY           BIT(0)
+-#define KVM_DIRTY_GFN_F_RESET           BIT(1)
++#define KVM_DIRTY_GFN_F_DIRTY           _BITUL(0)
++#define KVM_DIRTY_GFN_F_RESET           _BITUL(1)
+ #define KVM_DIRTY_GFN_F_MASK            0x3
+ 
+ /*
+diff --git a/tools/perf/perf.c b/tools/perf/perf.c
+index 20cb91ef06ffc..2f6b67189b426 100644
+--- a/tools/perf/perf.c
++++ b/tools/perf/perf.c
+@@ -443,6 +443,8 @@ int main(int argc, const char **argv)
+ 	const char *cmd;
+ 	char sbuf[STRERR_BUFSIZE];
+ 
++	perf_debug_setup();
++
+ 	/* libsubcmd init */
+ 	exec_cmd_init("perf", PREFIX, PERF_EXEC_PATH, EXEC_PATH_ENVIRONMENT);
+ 	pager_init(PERF_PAGER_ENVIRONMENT);
+@@ -531,8 +533,6 @@ int main(int argc, const char **argv)
+ 	 */
+ 	pthread__block_sigwinch();
+ 
+-	perf_debug_setup();
+-
+ 	while (1) {
+ 		static int done_help;
+ 
+diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
+index e1f3f5c8c550c..9cda076693aee 100644
+--- a/tools/perf/pmu-events/jevents.c
++++ b/tools/perf/pmu-events/jevents.c
+@@ -958,7 +958,7 @@ static int get_maxfds(void)
+ 	struct rlimit rlim;
+ 
+ 	if (getrlimit(RLIMIT_NOFILE, &rlim) == 0)
+-		return min((int)rlim.rlim_max / 2, 512);
++		return min(rlim.rlim_max / 2, (rlim_t)512);
+ 
+ 	return 512;
+ }
+diff --git a/tools/perf/scripts/python/exported-sql-viewer.py b/tools/perf/scripts/python/exported-sql-viewer.py
+index 7daa8bb70a5a0..711d4f9f5645c 100755
+--- a/tools/perf/scripts/python/exported-sql-viewer.py
++++ b/tools/perf/scripts/python/exported-sql-viewer.py
+@@ -91,6 +91,11 @@
+ from __future__ import print_function
+ 
+ import sys
++# Only change warnings if the python -W option was not used
++if not sys.warnoptions:
++	import warnings
++	# PySide2 causes deprecation warnings, ignore them.
++	warnings.filterwarnings("ignore", category=DeprecationWarning)
+ import argparse
+ import weakref
+ import threading
+@@ -125,8 +130,9 @@ if pyside_version_1:
+ 	from PySide.QtGui import *
+ 	from PySide.QtSql import *
+ 
+-from decimal import *
+-from ctypes import *
++from decimal import Decimal, ROUND_HALF_UP
++from ctypes import CDLL, Structure, create_string_buffer, addressof, sizeof, \
++		   c_void_p, c_bool, c_byte, c_char, c_int, c_uint, c_longlong, c_ulonglong
+ from multiprocessing import Process, Array, Value, Event
+ 
+ # xrange is range in Python3
+@@ -3868,7 +3874,7 @@ def CopyTableCellsToClipboard(view, as_csv=False, with_hdr=False):
+ 	if with_hdr:
+ 		model = indexes[0].model()
+ 		for col in range(min_col, max_col + 1):
+-			val = model.headerData(col, Qt.Horizontal)
++			val = model.headerData(col, Qt.Horizontal, Qt.DisplayRole)
+ 			if as_csv:
+ 				text += sep + ToCSValue(val)
+ 				sep = ","
+diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+index 8c59677bee130..20ad663978cc4 100644
+--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
++++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+@@ -1146,6 +1146,8 @@ static bool intel_pt_fup_event(struct intel_pt_decoder *decoder)
+ 		decoder->set_fup_tx_flags = false;
+ 		decoder->tx_flags = decoder->fup_tx_flags;
+ 		decoder->state.type = INTEL_PT_TRANSACTION;
++		if (decoder->fup_tx_flags & INTEL_PT_ABORT_TX)
++			decoder->state.type |= INTEL_PT_BRANCH;
+ 		decoder->state.from_ip = decoder->ip;
+ 		decoder->state.to_ip = 0;
+ 		decoder->state.flags = decoder->fup_tx_flags;
+@@ -1220,8 +1222,10 @@ static int intel_pt_walk_fup(struct intel_pt_decoder *decoder)
+ 			return 0;
+ 		if (err == -EAGAIN ||
+ 		    intel_pt_fup_with_nlip(decoder, &intel_pt_insn, ip, err)) {
++			bool no_tip = decoder->pkt_state != INTEL_PT_STATE_FUP;
++
+ 			decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
+-			if (intel_pt_fup_event(decoder))
++			if (intel_pt_fup_event(decoder) && no_tip)
+ 				return 0;
+ 			return -EAGAIN;
+ 		}
+diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
+index f6e28ac231b79..05a83637c9758 100644
+--- a/tools/perf/util/intel-pt.c
++++ b/tools/perf/util/intel-pt.c
+@@ -707,8 +707,10 @@ static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
+ 
+ 			*ip += intel_pt_insn->length;
+ 
+-			if (to_ip && *ip == to_ip)
++			if (to_ip && *ip == to_ip) {
++				intel_pt_insn->length = 0;
+ 				goto out_no_cache;
++			}
+ 
+ 			if (*ip >= al.map->end)
+ 				break;
+@@ -1198,6 +1200,7 @@ static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt,
+ 
+ static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
+ {
++	ptq->insn_len = 0;
+ 	if (ptq->state->flags & INTEL_PT_ABORT_TX) {
+ 		ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT;
+ 	} else if (ptq->state->flags & INTEL_PT_ASYNC) {
+diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
+index 0f4258eaa629e..61ab17238715a 100644
+--- a/tools/testing/selftests/kvm/include/kvm_util.h
++++ b/tools/testing/selftests/kvm/include/kvm_util.h
+@@ -295,7 +295,7 @@ bool vm_is_unrestricted_guest(struct kvm_vm *vm);
+ 
+ unsigned int vm_get_page_size(struct kvm_vm *vm);
+ unsigned int vm_get_page_shift(struct kvm_vm *vm);
+-unsigned int vm_get_max_gfn(struct kvm_vm *vm);
++uint64_t vm_get_max_gfn(struct kvm_vm *vm);
+ int vm_get_fd(struct kvm_vm *vm);
+ 
+ unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size);
+diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
+index b8849a1aca792..2f0e4365f61bd 100644
+--- a/tools/testing/selftests/kvm/lib/kvm_util.c
++++ b/tools/testing/selftests/kvm/lib/kvm_util.c
+@@ -1969,7 +1969,7 @@ unsigned int vm_get_page_shift(struct kvm_vm *vm)
+ 	return vm->page_shift;
+ }
+ 
+-unsigned int vm_get_max_gfn(struct kvm_vm *vm)
++uint64_t vm_get_max_gfn(struct kvm_vm *vm)
+ {
+ 	return vm->max_gfn;
+ }
+diff --git a/tools/testing/selftests/kvm/lib/perf_test_util.c b/tools/testing/selftests/kvm/lib/perf_test_util.c
+index 81490b9b4e32a..abf381800a590 100644
+--- a/tools/testing/selftests/kvm/lib/perf_test_util.c
++++ b/tools/testing/selftests/kvm/lib/perf_test_util.c
+@@ -2,6 +2,7 @@
+ /*
+  * Copyright (C) 2020, Google LLC.
+  */
++#include <inttypes.h>
+ 
+ #include "kvm_util.h"
+ #include "perf_test_util.h"
+@@ -80,7 +81,8 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
+ 	 */
+ 	TEST_ASSERT(guest_num_pages < vm_get_max_gfn(vm),
+ 		    "Requested more guest memory than address space allows.\n"
+-		    "    guest pages: %lx max gfn: %x vcpus: %d wss: %lx]\n",
++		    "    guest pages: %" PRIx64 " max gfn: %" PRIx64
++		    " vcpus: %d wss: %" PRIx64 "]\n",
+ 		    guest_num_pages, vm_get_max_gfn(vm), vcpus,
+ 		    vcpu_memory_bytes);
+ 
+diff --git a/tools/testing/selftests/kvm/memslot_modification_stress_test.c b/tools/testing/selftests/kvm/memslot_modification_stress_test.c
+index 6096bf0a5b34f..98351ba0933cd 100644
+--- a/tools/testing/selftests/kvm/memslot_modification_stress_test.c
++++ b/tools/testing/selftests/kvm/memslot_modification_stress_test.c
+@@ -71,14 +71,22 @@ struct memslot_antagonist_args {
+ };
+ 
+ static void add_remove_memslot(struct kvm_vm *vm, useconds_t delay,
+-			      uint64_t nr_modifications, uint64_t gpa)
++			       uint64_t nr_modifications)
+ {
++	const uint64_t pages = 1;
++	uint64_t gpa;
+ 	int i;
+ 
++	/*
++	 * Add the dummy memslot just below the perf_test_util memslot, which is
++	 * at the top of the guest physical address space.
++	 */
++	gpa = guest_test_phys_mem - pages * vm_get_page_size(vm);
++
+ 	for (i = 0; i < nr_modifications; i++) {
+ 		usleep(delay);
+ 		vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, gpa,
+-					    DUMMY_MEMSLOT_INDEX, 1, 0);
++					    DUMMY_MEMSLOT_INDEX, pages, 0);
+ 
+ 		vm_mem_region_delete(vm, DUMMY_MEMSLOT_INDEX);
+ 	}
+@@ -120,11 +128,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
+ 	pr_info("Started all vCPUs\n");
+ 
+ 	add_remove_memslot(vm, p->memslot_modification_delay,
+-			   p->nr_memslot_modifications,
+-			   guest_test_phys_mem +
+-			   (guest_percpu_mem_size * nr_vcpus) +
+-			   perf_test_args.host_page_size +
+-			   perf_test_args.guest_page_size);
++			   p->nr_memslot_modifications);
+ 
+ 	run_vcpus = false;
+ 
+diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq_pie.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq_pie.json
+index 1cda2e11b3ad9..773c5027553d2 100644
+--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq_pie.json
++++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq_pie.json
+@@ -9,11 +9,11 @@
+         "setup": [
+             "$IP link add dev $DUMMY type dummy || /bin/true"
+         ],
+-        "cmdUnderTest": "$TC qdisc add dev $DUMMY root fq_pie flows 65536",
+-        "expExitCode": "2",
++        "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root fq_pie flows 65536",
++        "expExitCode": "0",
+         "verifyCmd": "$TC qdisc show dev $DUMMY",
+-        "matchPattern": "qdisc",
+-        "matchCount": "0",
++        "matchPattern": "qdisc fq_pie 1: root refcnt 2 limit 10240p flows 65536",
++        "matchCount": "1",
+         "teardown": [
+             "$IP link del dev $DUMMY"
+         ]
+diff --git a/virt/lib/irqbypass.c b/virt/lib/irqbypass.c
+index c9bb3957f58a7..28fda42e471bb 100644
+--- a/virt/lib/irqbypass.c
++++ b/virt/lib/irqbypass.c
+@@ -40,21 +40,17 @@ static int __connect(struct irq_bypass_producer *prod,
+ 	if (prod->add_consumer)
+ 		ret = prod->add_consumer(prod, cons);
+ 
+-	if (ret)
+-		goto err_add_consumer;
+-
+-	ret = cons->add_producer(cons, prod);
+-	if (ret)
+-		goto err_add_producer;
++	if (!ret) {
++		ret = cons->add_producer(cons, prod);
++		if (ret && prod->del_consumer)
++			prod->del_consumer(prod, cons);
++	}
+ 
+ 	if (cons->start)
+ 		cons->start(cons);
+ 	if (prod->start)
+ 		prod->start(prod);
+-err_add_producer:
+-	if (prod->del_consumer)
+-		prod->del_consumer(prod, cons);
+-err_add_consumer:
++
+ 	return ret;
+ }
+ 


^ permalink raw reply related	[flat|nested] 33+ messages in thread
* [gentoo-commits] proj/linux-patches:5.12 commit in: /
@ 2021-05-28 12:17 Alice Ferrazzi
  0 siblings, 0 replies; 33+ messages in thread
From: Alice Ferrazzi @ 2021-05-28 12:17 UTC (permalink / raw
  To: gentoo-commits

commit:     e6d9be74b3e7338b6962aabb831d42f3b594f44f
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Fri May 28 12:17:14 2021 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Fri May 28 12:17:25 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e6d9be74

Linux patch 5.12.8

Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>

 0000_README             |   4 +
 1007_linux-5.12.8.patch | 288 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 292 insertions(+)

diff --git a/0000_README b/0000_README
index 22c40ca..90784e9 100644
--- a/0000_README
+++ b/0000_README
@@ -71,6 +71,10 @@ Patch:  1006_linux-5.12.7.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.12.7
 
+Patch:  1007_linux-5.12.8.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.12.8
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1007_linux-5.12.8.patch b/1007_linux-5.12.8.patch
new file mode 100644
index 0000000..1cfbe69
--- /dev/null
+++ b/1007_linux-5.12.8.patch
@@ -0,0 +1,288 @@
+diff --git a/Makefile b/Makefile
+index 6a73dee7c2219..a20afcb7d2bf4 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 12
+-SUBLEVEL = 7
++SUBLEVEL = 8
+ EXTRAVERSION =
+ NAME = Frozen Wasteland
+ 
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index 48ee3deab64b1..9a6825feaf53f 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -3815,15 +3815,15 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu,
+ 	 * have them in state 'on' as recorded before entering guest mode.
+ 	 * Same as enter_from_user_mode().
+ 	 *
+-	 * guest_exit_irqoff() restores host context and reinstates RCU if
+-	 * enabled and required.
++	 * context_tracking_guest_exit() restores host context and reinstates
++	 * RCU if enabled and required.
+ 	 *
+ 	 * This needs to be done before the below as native_read_msr()
+ 	 * contains a tracepoint and x86_spec_ctrl_restore_host() calls
+ 	 * into world and some more.
+ 	 */
+ 	lockdep_hardirqs_off(CALLER_ADDR0);
+-	guest_exit_irqoff();
++	context_tracking_guest_exit();
+ 
+ 	instrumentation_begin();
+ 	trace_hardirqs_off_finish();
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index f68ed9a1abcc9..ae63d59be38c7 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -6701,15 +6701,15 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
+ 	 * have them in state 'on' as recorded before entering guest mode.
+ 	 * Same as enter_from_user_mode().
+ 	 *
+-	 * guest_exit_irqoff() restores host context and reinstates RCU if
+-	 * enabled and required.
++	 * context_tracking_guest_exit() restores host context and reinstates
++	 * RCU if enabled and required.
+ 	 *
+ 	 * This needs to be done before the below as native_read_msr()
+ 	 * contains a tracepoint and x86_spec_ctrl_restore_host() calls
+ 	 * into world and some more.
+ 	 */
+ 	lockdep_hardirqs_off(CALLER_ADDR0);
+-	guest_exit_irqoff();
++	context_tracking_guest_exit();
+ 
+ 	instrumentation_begin();
+ 	trace_hardirqs_off_finish();
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 87311d39f9145..86678f8b35020 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -9236,6 +9236,15 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
+ 	local_irq_disable();
+ 	kvm_after_interrupt(vcpu);
+ 
++	/*
++	 * Wait until after servicing IRQs to account guest time so that any
++	 * ticks that occurred while running the guest are properly accounted
++	 * to the guest.  Waiting until IRQs are enabled degrades the accuracy
++	 * of accounting via context tracking, but the loss of accuracy is
++	 * acceptable for all known use cases.
++	 */
++	vtime_account_guest_exit();
++
+ 	if (lapic_in_kernel(vcpu)) {
+ 		s64 delta = vcpu->arch.apic->lapic_timer.advance_expire_delta;
+ 		if (delta != S64_MIN) {
+diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
+index bceb064985214..4f4556232dcf7 100644
+--- a/include/linux/context_tracking.h
++++ b/include/linux/context_tracking.h
+@@ -131,16 +131,26 @@ static __always_inline void guest_enter_irqoff(void)
+ 	}
+ }
+ 
+-static __always_inline void guest_exit_irqoff(void)
++static __always_inline void context_tracking_guest_exit(void)
+ {
+ 	if (context_tracking_enabled())
+ 		__context_tracking_exit(CONTEXT_GUEST);
++}
+ 
+-	instrumentation_begin();
++static __always_inline void vtime_account_guest_exit(void)
++{
+ 	if (vtime_accounting_enabled_this_cpu())
+ 		vtime_guest_exit(current);
+ 	else
+ 		current->flags &= ~PF_VCPU;
++}
++
++static __always_inline void guest_exit_irqoff(void)
++{
++	context_tracking_guest_exit();
++
++	instrumentation_begin();
++	vtime_account_guest_exit();
+ 	instrumentation_end();
+ }
+ 
+@@ -159,12 +169,19 @@ static __always_inline void guest_enter_irqoff(void)
+ 	instrumentation_end();
+ }
+ 
++static __always_inline void context_tracking_guest_exit(void) { }
++
++static __always_inline void vtime_account_guest_exit(void)
++{
++	vtime_account_kernel(current);
++	current->flags &= ~PF_VCPU;
++}
++
+ static __always_inline void guest_exit_irqoff(void)
+ {
+ 	instrumentation_begin();
+ 	/* Flush the guest cputime we spent on the guest */
+-	vtime_account_kernel(current);
+-	current->flags &= ~PF_VCPU;
++	vtime_account_guest_exit();
+ 	instrumentation_end();
+ }
+ #endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
+diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
+index 43c9c5d2bedbd..33979017b7824 100644
+--- a/include/net/nfc/nci_core.h
++++ b/include/net/nfc/nci_core.h
+@@ -298,6 +298,7 @@ int nci_nfcc_loopback(struct nci_dev *ndev, void *data, size_t data_len,
+ 		      struct sk_buff **resp);
+ 
+ struct nci_hci_dev *nci_hci_allocate(struct nci_dev *ndev);
++void nci_hci_deallocate(struct nci_dev *ndev);
+ int nci_hci_send_event(struct nci_dev *ndev, u8 gate, u8 event,
+ 		       const u8 *param, size_t param_len);
+ int nci_hci_send_cmd(struct nci_dev *ndev, u8 gate,
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 7fa6fc6bedf1f..21247e49fe82b 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -5863,18 +5863,10 @@ enum {
+ };
+ 
+ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
+-			      const struct bpf_reg_state *off_reg,
+-			      u32 *alu_limit, u8 opcode)
++			      u32 *alu_limit, bool mask_to_left)
+ {
+-	bool off_is_neg = off_reg->smin_value < 0;
+-	bool mask_to_left = (opcode == BPF_ADD &&  off_is_neg) ||
+-			    (opcode == BPF_SUB && !off_is_neg);
+ 	u32 max = 0, ptr_limit = 0;
+ 
+-	if (!tnum_is_const(off_reg->var_off) &&
+-	    (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
+-		return REASON_BOUNDS;
+-
+ 	switch (ptr_reg->type) {
+ 	case PTR_TO_STACK:
+ 		/* Offset 0 is out-of-bounds, but acceptable start for the
+@@ -5940,15 +5932,20 @@ static bool sanitize_needed(u8 opcode)
+ 	return opcode == BPF_ADD || opcode == BPF_SUB;
+ }
+ 
++struct bpf_sanitize_info {
++	struct bpf_insn_aux_data aux;
++	bool mask_to_left;
++};
++
+ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
+ 			    struct bpf_insn *insn,
+ 			    const struct bpf_reg_state *ptr_reg,
+ 			    const struct bpf_reg_state *off_reg,
+ 			    struct bpf_reg_state *dst_reg,
+-			    struct bpf_insn_aux_data *tmp_aux,
++			    struct bpf_sanitize_info *info,
+ 			    const bool commit_window)
+ {
+-	struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : tmp_aux;
++	struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : &info->aux;
+ 	struct bpf_verifier_state *vstate = env->cur_state;
+ 	bool off_is_imm = tnum_is_const(off_reg->var_off);
+ 	bool off_is_neg = off_reg->smin_value < 0;
+@@ -5969,7 +5966,16 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
+ 	if (vstate->speculative)
+ 		goto do_sim;
+ 
+-	err = retrieve_ptr_limit(ptr_reg, off_reg, &alu_limit, opcode);
++	if (!commit_window) {
++		if (!tnum_is_const(off_reg->var_off) &&
++		    (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
++			return REASON_BOUNDS;
++
++		info->mask_to_left = (opcode == BPF_ADD &&  off_is_neg) ||
++				     (opcode == BPF_SUB && !off_is_neg);
++	}
++
++	err = retrieve_ptr_limit(ptr_reg, &alu_limit, info->mask_to_left);
+ 	if (err < 0)
+ 		return err;
+ 
+@@ -5977,8 +5983,8 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
+ 		/* In commit phase we narrow the masking window based on
+ 		 * the observed pointer move after the simulated operation.
+ 		 */
+-		alu_state = tmp_aux->alu_state;
+-		alu_limit = abs(tmp_aux->alu_limit - alu_limit);
++		alu_state = info->aux.alu_state;
++		alu_limit = abs(info->aux.alu_limit - alu_limit);
+ 	} else {
+ 		alu_state  = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
+ 		alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0;
+@@ -5993,8 +5999,12 @@ do_sim:
+ 	/* If we're in commit phase, we're done here given we already
+ 	 * pushed the truncated dst_reg into the speculative verification
+ 	 * stack.
++	 *
++	 * Also, when register is a known constant, we rewrite register-based
++	 * operation to immediate-based, and thus do not need masking (and as
++	 * a consequence, do not need to simulate the zero-truncation either).
+ 	 */
+-	if (commit_window)
++	if (commit_window || off_is_imm)
+ 		return 0;
+ 
+ 	/* Simulate and find potential out-of-bounds access under
+@@ -6139,7 +6149,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
+ 	    smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
+ 	u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
+ 	    umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
+-	struct bpf_insn_aux_data tmp_aux = {};
++	struct bpf_sanitize_info info = {};
+ 	u8 opcode = BPF_OP(insn->code);
+ 	u32 dst = insn->dst_reg;
+ 	int ret;
+@@ -6208,7 +6218,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
+ 
+ 	if (sanitize_needed(opcode)) {
+ 		ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg,
+-				       &tmp_aux, false);
++				       &info, false);
+ 		if (ret < 0)
+ 			return sanitize_err(env, insn, ret, off_reg, dst_reg);
+ 	}
+@@ -6349,7 +6359,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
+ 		return -EACCES;
+ 	if (sanitize_needed(opcode)) {
+ 		ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg,
+-				       &tmp_aux, true);
++				       &info, true);
+ 		if (ret < 0)
+ 			return sanitize_err(env, insn, ret, off_reg, dst_reg);
+ 	}
+diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
+index 59257400697d1..142d71c8d6521 100644
+--- a/net/nfc/nci/core.c
++++ b/net/nfc/nci/core.c
+@@ -1191,6 +1191,7 @@ EXPORT_SYMBOL(nci_allocate_device);
+ void nci_free_device(struct nci_dev *ndev)
+ {
+ 	nfc_free_device(ndev->nfc_dev);
++	nci_hci_deallocate(ndev);
+ 	kfree(ndev);
+ }
+ EXPORT_SYMBOL(nci_free_device);
+diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c
+index 6b275a387a92a..96865142104f4 100644
+--- a/net/nfc/nci/hci.c
++++ b/net/nfc/nci/hci.c
+@@ -792,3 +792,8 @@ struct nci_hci_dev *nci_hci_allocate(struct nci_dev *ndev)
+ 
+ 	return hdev;
+ }
++
++void nci_hci_deallocate(struct nci_dev *ndev)
++{
++	kfree(ndev->hci_dev);
++}


^ permalink raw reply related	[flat|nested] 33+ messages in thread
* [gentoo-commits] proj/linux-patches:5.12 commit in: /
@ 2021-05-26 12:08 Mike Pagano
  0 siblings, 0 replies; 33+ messages in thread
From: Mike Pagano @ 2021-05-26 12:08 UTC (permalink / raw
  To: gentoo-commits

commit:     3e880daf8c1c2089abdf0b092cb6143b35fddb3c
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed May 26 12:08:06 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed May 26 12:08:06 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=3e880daf

Linux patch 5.12.7

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1006_linux-5.12.7.patch | 5240 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 5244 insertions(+)

diff --git a/0000_README b/0000_README
index 528cc11..22c40ca 100644
--- a/0000_README
+++ b/0000_README
@@ -67,6 +67,10 @@ Patch:  1005_linux-5.12.6.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.12.6
 
+Patch:  1006_linux-5.12.7.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.12.7
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1006_linux-5.12.7.patch b/1006_linux-5.12.7.patch
new file mode 100644
index 0000000..6bf34f2
--- /dev/null
+++ b/1006_linux-5.12.7.patch
@@ -0,0 +1,5240 @@
+diff --git a/Documentation/powerpc/syscall64-abi.rst b/Documentation/powerpc/syscall64-abi.rst
+index dabee3729e5a5..56490c4c0c07a 100644
+--- a/Documentation/powerpc/syscall64-abi.rst
++++ b/Documentation/powerpc/syscall64-abi.rst
+@@ -109,6 +109,16 @@ auxiliary vector.
+ 
+ scv 0 syscalls will always behave as PPC_FEATURE2_HTM_NOSC.
+ 
++ptrace
++------
++When ptracing system calls (PTRACE_SYSCALL), the pt_regs.trap value contains
++the system call type that can be used to distinguish between sc and scv 0
++system calls, and the different register conventions can be accounted for.
++
++If the value of (pt_regs.trap & 0xfff0) is 0xc00 then the system call was
++performed with the sc instruction, if it is 0x3000 then the system call was
++performed with the scv 0 instruction.
++
+ vsyscall
+ ========
+ 
+diff --git a/Makefile b/Makefile
+index dd021135838b8..6a73dee7c2219 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 12
+-SUBLEVEL = 6
++SUBLEVEL = 7
+ EXTRAVERSION =
+ NAME = Frozen Wasteland
+ 
+diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c
+index 2416a9f915330..c6f9e7b9f7cb2 100644
+--- a/arch/openrisc/kernel/setup.c
++++ b/arch/openrisc/kernel/setup.c
+@@ -278,6 +278,8 @@ void calibrate_delay(void)
+ 	pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n",
+ 		loops_per_jiffy / (500000 / HZ),
+ 		(loops_per_jiffy / (5000 / HZ)) % 100, loops_per_jiffy);
++
++	of_node_put(cpu);
+ }
+ 
+ void __init setup_arch(char **cmdline_p)
+diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
+index bf9b2310fc936..f3fa02b8838af 100644
+--- a/arch/openrisc/mm/init.c
++++ b/arch/openrisc/mm/init.c
+@@ -75,7 +75,6 @@ static void __init map_ram(void)
+ 	/* These mark extents of read-only kernel pages...
+ 	 * ...from vmlinux.lds.S
+ 	 */
+-	struct memblock_region *region;
+ 
+ 	v = PAGE_OFFSET;
+ 
+@@ -121,7 +120,7 @@ static void __init map_ram(void)
+ 		}
+ 
+ 		printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__,
+-		       region->base, region->base + region->size);
++		       start, end);
+ 	}
+ }
+ 
+diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
+index ed6086d57b22e..0c92b01a3c3c1 100644
+--- a/arch/powerpc/include/asm/hvcall.h
++++ b/arch/powerpc/include/asm/hvcall.h
+@@ -446,6 +446,9 @@
+  */
+ long plpar_hcall_norets(unsigned long opcode, ...);
+ 
++/* Variant which does not do hcall tracing */
++long plpar_hcall_norets_notrace(unsigned long opcode, ...);
++
+ /**
+  * plpar_hcall: - Make a pseries hypervisor call
+  * @opcode: The hypervisor call to make.
+diff --git a/arch/powerpc/include/asm/paravirt.h b/arch/powerpc/include/asm/paravirt.h
+index 5d1726bb28e79..bcb7b5f917be6 100644
+--- a/arch/powerpc/include/asm/paravirt.h
++++ b/arch/powerpc/include/asm/paravirt.h
+@@ -28,19 +28,35 @@ static inline u32 yield_count_of(int cpu)
+ 	return be32_to_cpu(yield_count);
+ }
+ 
++/*
++ * Spinlock code confers and prods, so don't trace the hcalls because the
++ * tracing code takes spinlocks which can cause recursion deadlocks.
++ *
++ * These calls are made while the lock is not held: the lock slowpath yields if
++ * it can not acquire the lock, and unlock slow path might prod if a waiter has
++ * yielded). So this may not be a problem for simple spin locks because the
++ * tracing does not technically recurse on the lock, but we avoid it anyway.
++ *
++ * However the queued spin lock contended path is more strictly ordered: the
++ * H_CONFER hcall is made after the task has queued itself on the lock, so then
++ * recursing on that lock will cause the task to then queue up again behind the
++ * first instance (or worse: queued spinlocks use tricks that assume a context
++ * never waits on more than one spinlock, so such recursion may cause random
++ * corruption in the lock code).
++ */
+ static inline void yield_to_preempted(int cpu, u32 yield_count)
+ {
+-	plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(cpu), yield_count);
++	plpar_hcall_norets_notrace(H_CONFER, get_hard_smp_processor_id(cpu), yield_count);
+ }
+ 
+ static inline void prod_cpu(int cpu)
+ {
+-	plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu));
++	plpar_hcall_norets_notrace(H_PROD, get_hard_smp_processor_id(cpu));
+ }
+ 
+ static inline void yield_to_any(void)
+ {
+-	plpar_hcall_norets(H_CONFER, -1, 0);
++	plpar_hcall_norets_notrace(H_CONFER, -1, 0);
+ }
+ #else
+ static inline bool is_shared_processor(void)
+diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
+index 1499e928ea6a6..5d8d397e928a0 100644
+--- a/arch/powerpc/include/asm/ptrace.h
++++ b/arch/powerpc/include/asm/ptrace.h
+@@ -19,6 +19,7 @@
+ #ifndef _ASM_POWERPC_PTRACE_H
+ #define _ASM_POWERPC_PTRACE_H
+ 
++#include <linux/err.h>
+ #include <uapi/asm/ptrace.h>
+ #include <asm/asm-const.h>
+ 
+@@ -152,25 +153,6 @@ extern unsigned long profile_pc(struct pt_regs *regs);
+ long do_syscall_trace_enter(struct pt_regs *regs);
+ void do_syscall_trace_leave(struct pt_regs *regs);
+ 
+-#define kernel_stack_pointer(regs) ((regs)->gpr[1])
+-static inline int is_syscall_success(struct pt_regs *regs)
+-{
+-	return !(regs->ccr & 0x10000000);
+-}
+-
+-static inline long regs_return_value(struct pt_regs *regs)
+-{
+-	if (is_syscall_success(regs))
+-		return regs->gpr[3];
+-	else
+-		return -regs->gpr[3];
+-}
+-
+-static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
+-{
+-	regs->gpr[3] = rc;
+-}
+-
+ #ifdef __powerpc64__
+ #define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1)
+ #else
+@@ -252,6 +234,31 @@ static inline void set_trap_norestart(struct pt_regs *regs)
+ 	regs->trap |= 0x10;
+ }
+ 
++#define kernel_stack_pointer(regs) ((regs)->gpr[1])
++static inline int is_syscall_success(struct pt_regs *regs)
++{
++	if (trap_is_scv(regs))
++		return !IS_ERR_VALUE((unsigned long)regs->gpr[3]);
++	else
++		return !(regs->ccr & 0x10000000);
++}
++
++static inline long regs_return_value(struct pt_regs *regs)
++{
++	if (trap_is_scv(regs))
++		return regs->gpr[3];
++
++	if (is_syscall_success(regs))
++		return regs->gpr[3];
++	else
++		return -regs->gpr[3];
++}
++
++static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
++{
++	regs->gpr[3] = rc;
++}
++
+ #define arch_has_single_step()	(1)
+ #define arch_has_block_step()	(true)
+ #define ARCH_HAS_USER_SINGLE_STEP_REPORT
+diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h
+index fd1b518eed17c..ba0f88f3a30da 100644
+--- a/arch/powerpc/include/asm/syscall.h
++++ b/arch/powerpc/include/asm/syscall.h
+@@ -41,11 +41,17 @@ static inline void syscall_rollback(struct task_struct *task,
+ static inline long syscall_get_error(struct task_struct *task,
+ 				     struct pt_regs *regs)
+ {
+-	/*
+-	 * If the system call failed,
+-	 * regs->gpr[3] contains a positive ERRORCODE.
+-	 */
+-	return (regs->ccr & 0x10000000UL) ? -regs->gpr[3] : 0;
++	if (trap_is_scv(regs)) {
++		unsigned long error = regs->gpr[3];
++
++		return IS_ERR_VALUE(error) ? error : 0;
++	} else {
++		/*
++		 * If the system call failed,
++		 * regs->gpr[3] contains a positive ERRORCODE.
++		 */
++		return (regs->ccr & 0x10000000UL) ? -regs->gpr[3] : 0;
++	}
+ }
+ 
+ static inline long syscall_get_return_value(struct task_struct *task,
+@@ -58,18 +64,22 @@ static inline void syscall_set_return_value(struct task_struct *task,
+ 					    struct pt_regs *regs,
+ 					    int error, long val)
+ {
+-	/*
+-	 * In the general case it's not obvious that we must deal with CCR
+-	 * here, as the syscall exit path will also do that for us. However
+-	 * there are some places, eg. the signal code, which check ccr to
+-	 * decide if the value in r3 is actually an error.
+-	 */
+-	if (error) {
+-		regs->ccr |= 0x10000000L;
+-		regs->gpr[3] = error;
++	if (trap_is_scv(regs)) {
++		regs->gpr[3] = (long) error ?: val;
+ 	} else {
+-		regs->ccr &= ~0x10000000L;
+-		regs->gpr[3] = val;
++		/*
++		 * In the general case it's not obvious that we must deal with
++		 * CCR here, as the syscall exit path will also do that for us.
++		 * However there are some places, eg. the signal code, which
++		 * check ccr to decide if the value in r3 is actually an error.
++		 */
++		if (error) {
++			regs->ccr |= 0x10000000L;
++			regs->gpr[3] = error;
++		} else {
++			regs->ccr &= ~0x10000000L;
++			regs->gpr[3] = val;
++		}
+ 	}
+ }
+ 
+diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
+index 830fee91b2d99..c914fe8a2c67f 100644
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -369,11 +369,11 @@ void __init early_setup(unsigned long dt_ptr)
+ 	apply_feature_fixups();
+ 	setup_feature_keys();
+ 
+-	early_ioremap_setup();
+-
+ 	/* Initialize the hash table or TLB handling */
+ 	early_init_mmu();
+ 
++	early_ioremap_setup();
++
+ 	/*
+ 	 * After firmware and early platform setup code has set things up,
+ 	 * we note the SPR values for configurable control/performance
+diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S
+index 2136e42833af3..8a2b8d64265bc 100644
+--- a/arch/powerpc/platforms/pseries/hvCall.S
++++ b/arch/powerpc/platforms/pseries/hvCall.S
+@@ -102,6 +102,16 @@ END_FTR_SECTION(0, 1);						\
+ #define HCALL_BRANCH(LABEL)
+ #endif
+ 
++_GLOBAL_TOC(plpar_hcall_norets_notrace)
++	HMT_MEDIUM
++
++	mfcr	r0
++	stw	r0,8(r1)
++	HVSC				/* invoke the hypervisor */
++	lwz	r0,8(r1)
++	mtcrf	0xff,r0
++	blr				/* return r3 = status */
++
+ _GLOBAL_TOC(plpar_hcall_norets)
+ 	HMT_MEDIUM
+ 
+diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
+index cd38bd421f381..d4aa6a46e1fa6 100644
+--- a/arch/powerpc/platforms/pseries/lpar.c
++++ b/arch/powerpc/platforms/pseries/lpar.c
+@@ -1830,8 +1830,7 @@ void hcall_tracepoint_unregfunc(void)
+ 
+ /*
+  * Since the tracing code might execute hcalls we need to guard against
+- * recursion. One example of this are spinlocks calling H_YIELD on
+- * shared processor partitions.
++ * recursion.
+  */
+ static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);
+ 
+diff --git a/arch/x86/Makefile b/arch/x86/Makefile
+index 78faf9c7e3aed..1f2e5bfb9bb03 100644
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -170,11 +170,6 @@ ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1)
+ 	KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args,)
+ endif
+ 
+-ifdef CONFIG_LTO_CLANG
+-KBUILD_LDFLAGS	+= -plugin-opt=-code-model=kernel \
+-		   -plugin-opt=-stack-alignment=$(if $(CONFIG_X86_32),4,8)
+-endif
+-
+ # Workaround for a gcc prelease that unfortunately was shipped in a suse release
+ KBUILD_CFLAGS += -Wno-sign-compare
+ #
+@@ -194,7 +189,12 @@ ifdef CONFIG_RETPOLINE
+   endif
+ endif
+ 
+-KBUILD_LDFLAGS := -m elf_$(UTS_MACHINE)
++KBUILD_LDFLAGS += -m elf_$(UTS_MACHINE)
++
++ifdef CONFIG_LTO_CLANG
++KBUILD_LDFLAGS	+= -plugin-opt=-code-model=kernel \
++		   -plugin-opt=-stack-alignment=$(if $(CONFIG_X86_32),4,8)
++endif
+ 
+ ifdef CONFIG_X86_NEED_RELOCS
+ LDFLAGS_vmlinux := --emit-relocs --discard-none
+diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
+index e94874f4bbc1d..ae1fe558a2d88 100644
+--- a/arch/x86/boot/compressed/head_64.S
++++ b/arch/x86/boot/compressed/head_64.S
+@@ -172,11 +172,21 @@ SYM_FUNC_START(startup_32)
+ 	 */
+ 	call	get_sev_encryption_bit
+ 	xorl	%edx, %edx
++#ifdef	CONFIG_AMD_MEM_ENCRYPT
+ 	testl	%eax, %eax
+ 	jz	1f
+ 	subl	$32, %eax	/* Encryption bit is always above bit 31 */
+ 	bts	%eax, %edx	/* Set encryption mask for page tables */
++	/*
++	 * Mark SEV as active in sev_status so that startup32_check_sev_cbit()
++	 * will do a check. The sev_status memory will be fully initialized
++	 * with the contents of MSR_AMD_SEV_STATUS later in
++	 * set_sev_encryption_mask(). For now it is sufficient to know that SEV
++	 * is active.
++	 */
++	movl	$1, rva(sev_status)(%ebp)
+ 1:
++#endif
+ 
+ 	/* Initialize Page tables to 0 */
+ 	leal	rva(pgtable)(%ebx), %edi
+@@ -261,6 +271,9 @@ SYM_FUNC_START(startup_32)
+ 	movl	%esi, %edx
+ 1:
+ #endif
++	/* Check if the C-bit position is correct when SEV is active */
++	call	startup32_check_sev_cbit
++
+ 	pushl	$__KERNEL_CS
+ 	pushl	%eax
+ 
+@@ -786,6 +799,78 @@ SYM_DATA_START_LOCAL(loaded_image_proto)
+ SYM_DATA_END(loaded_image_proto)
+ #endif
+ 
++/*
++ * Check for the correct C-bit position when the startup_32 boot-path is used.
++ *
++ * The check makes use of the fact that all memory is encrypted when paging is
++ * disabled. The function creates 64 bits of random data using the RDRAND
++ * instruction. RDRAND is mandatory for SEV guests, so always available. If the
++ * hypervisor violates that the kernel will crash right here.
++ *
++ * The 64 bits of random data are stored to a memory location and at the same
++ * time kept in the %eax and %ebx registers. Since encryption is always active
++ * when paging is off the random data will be stored encrypted in main memory.
++ *
++ * Then paging is enabled. When the C-bit position is correct all memory is
++ * still mapped encrypted and comparing the register values with memory will
++ * succeed. An incorrect C-bit position will map all memory unencrypted, so that
++ * the compare will use the encrypted random data and fail.
++ */
++	__HEAD
++	.code32
++SYM_FUNC_START(startup32_check_sev_cbit)
++#ifdef CONFIG_AMD_MEM_ENCRYPT
++	pushl	%eax
++	pushl	%ebx
++	pushl	%ecx
++	pushl	%edx
++
++	/* Check for non-zero sev_status */
++	movl	rva(sev_status)(%ebp), %eax
++	testl	%eax, %eax
++	jz	4f
++
++	/*
++	 * Get two 32-bit random values - Don't bail out if RDRAND fails
++	 * because it is better to prevent forward progress if no random value
++	 * can be gathered.
++	 */
++1:	rdrand	%eax
++	jnc	1b
++2:	rdrand	%ebx
++	jnc	2b
++
++	/* Store to memory and keep it in the registers */
++	movl	%eax, rva(sev_check_data)(%ebp)
++	movl	%ebx, rva(sev_check_data+4)(%ebp)
++
++	/* Enable paging to see if encryption is active */
++	movl	%cr0, %edx			 /* Backup %cr0 in %edx */
++	movl	$(X86_CR0_PG | X86_CR0_PE), %ecx /* Enable Paging and Protected mode */
++	movl	%ecx, %cr0
++
++	cmpl	%eax, rva(sev_check_data)(%ebp)
++	jne	3f
++	cmpl	%ebx, rva(sev_check_data+4)(%ebp)
++	jne	3f
++
++	movl	%edx, %cr0	/* Restore previous %cr0 */
++
++	jmp	4f
++
++3:	/* Check failed - hlt the machine */
++	hlt
++	jmp	3b
++
++4:
++	popl	%edx
++	popl	%ecx
++	popl	%ebx
++	popl	%eax
++#endif
++	ret
++SYM_FUNC_END(startup32_check_sev_cbit)
++
+ /*
+  * Stack and heap for uncompression
+  */
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index c57ec8e279078..4c18e7fb58f58 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -5741,7 +5741,7 @@ __init int intel_pmu_init(void)
+ 	 * Check all LBT MSR here.
+ 	 * Disable LBR access if any LBR MSRs can not be accessed.
+ 	 */
+-	if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
++	if (x86_pmu.lbr_tos && !check_msr(x86_pmu.lbr_tos, 0x3UL))
+ 		x86_pmu.lbr_nr = 0;
+ 	for (i = 0; i < x86_pmu.lbr_nr; i++) {
+ 		if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
+diff --git a/arch/x86/kernel/sev-es-shared.c b/arch/x86/kernel/sev-es-shared.c
+index 387b716698187..ecb20b17b7df6 100644
+--- a/arch/x86/kernel/sev-es-shared.c
++++ b/arch/x86/kernel/sev-es-shared.c
+@@ -63,6 +63,7 @@ static bool sev_es_negotiate_protocol(void)
+ 
+ static __always_inline void vc_ghcb_invalidate(struct ghcb *ghcb)
+ {
++	ghcb->save.sw_exit_code = 0;
+ 	memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
+ }
+ 
+diff --git a/arch/x86/kernel/sev-es.c b/arch/x86/kernel/sev-es.c
+index 04a780abb512d..e0cdab7cb632b 100644
+--- a/arch/x86/kernel/sev-es.c
++++ b/arch/x86/kernel/sev-es.c
+@@ -191,8 +191,18 @@ static __always_inline struct ghcb *sev_es_get_ghcb(struct ghcb_state *state)
+ 	if (unlikely(data->ghcb_active)) {
+ 		/* GHCB is already in use - save its contents */
+ 
+-		if (unlikely(data->backup_ghcb_active))
+-			return NULL;
++		if (unlikely(data->backup_ghcb_active)) {
++			/*
++			 * Backup-GHCB is also already in use. There is no way
++			 * to continue here so just kill the machine. To make
++			 * panic() work, mark GHCBs inactive so that messages
++			 * can be printed out.
++			 */
++			data->ghcb_active        = false;
++			data->backup_ghcb_active = false;
++
++			panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
++		}
+ 
+ 		/* Mark backup_ghcb active before writing to it */
+ 		data->backup_ghcb_active = true;
+@@ -209,24 +219,6 @@ static __always_inline struct ghcb *sev_es_get_ghcb(struct ghcb_state *state)
+ 	return ghcb;
+ }
+ 
+-static __always_inline void sev_es_put_ghcb(struct ghcb_state *state)
+-{
+-	struct sev_es_runtime_data *data;
+-	struct ghcb *ghcb;
+-
+-	data = this_cpu_read(runtime_data);
+-	ghcb = &data->ghcb_page;
+-
+-	if (state->ghcb) {
+-		/* Restore GHCB from Backup */
+-		*ghcb = *state->ghcb;
+-		data->backup_ghcb_active = false;
+-		state->ghcb = NULL;
+-	} else {
+-		data->ghcb_active = false;
+-	}
+-}
+-
+ /* Needed in vc_early_forward_exception */
+ void do_early_exception(struct pt_regs *regs, int trapnr);
+ 
+@@ -296,31 +288,44 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
+ 	u16 d2;
+ 	u8  d1;
+ 
+-	/* If instruction ran in kernel mode and the I/O buffer is in kernel space */
+-	if (!user_mode(ctxt->regs) && !access_ok(target, size)) {
+-		memcpy(dst, buf, size);
+-		return ES_OK;
+-	}
+-
++	/*
++	 * This function uses __put_user() independent of whether kernel or user
++	 * memory is accessed. This works fine because __put_user() does no
++	 * sanity checks of the pointer being accessed. All that it does is
++	 * to report when the access failed.
++	 *
++	 * Also, this function runs in atomic context, so __put_user() is not
++	 * allowed to sleep. The page-fault handler detects that it is running
++	 * in atomic context and will not try to take mmap_sem and handle the
++	 * fault, so additional pagefault_enable()/disable() calls are not
++	 * needed.
++	 *
++	 * The access can't be done via copy_to_user() here because
++	 * vc_write_mem() must not use string instructions to access unsafe
++	 * memory. The reason is that MOVS is emulated by the #VC handler by
++	 * splitting the move up into a read and a write and taking a nested #VC
++	 * exception on whatever of them is the MMIO access. Using string
++	 * instructions here would cause infinite nesting.
++	 */
+ 	switch (size) {
+ 	case 1:
+ 		memcpy(&d1, buf, 1);
+-		if (put_user(d1, target))
++		if (__put_user(d1, target))
+ 			goto fault;
+ 		break;
+ 	case 2:
+ 		memcpy(&d2, buf, 2);
+-		if (put_user(d2, target))
++		if (__put_user(d2, target))
+ 			goto fault;
+ 		break;
+ 	case 4:
+ 		memcpy(&d4, buf, 4);
+-		if (put_user(d4, target))
++		if (__put_user(d4, target))
+ 			goto fault;
+ 		break;
+ 	case 8:
+ 		memcpy(&d8, buf, 8);
+-		if (put_user(d8, target))
++		if (__put_user(d8, target))
+ 			goto fault;
+ 		break;
+ 	default:
+@@ -351,30 +356,43 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
+ 	u16 d2;
+ 	u8  d1;
+ 
+-	/* If instruction ran in kernel mode and the I/O buffer is in kernel space */
+-	if (!user_mode(ctxt->regs) && !access_ok(s, size)) {
+-		memcpy(buf, src, size);
+-		return ES_OK;
+-	}
+-
++	/*
++	 * This function uses __get_user() independent of whether kernel or user
++	 * memory is accessed. This works fine because __get_user() does no
++	 * sanity checks of the pointer being accessed. All that it does is
++	 * to report when the access failed.
++	 *
++	 * Also, this function runs in atomic context, so __get_user() is not
++	 * allowed to sleep. The page-fault handler detects that it is running
++	 * in atomic context and will not try to take mmap_sem and handle the
++	 * fault, so additional pagefault_enable()/disable() calls are not
++	 * needed.
++	 *
++	 * The access can't be done via copy_from_user() here because
++	 * vc_read_mem() must not use string instructions to access unsafe
++	 * memory. The reason is that MOVS is emulated by the #VC handler by
++	 * splitting the move up into a read and a write and taking a nested #VC
++	 * exception on whatever of them is the MMIO access. Using string
++	 * instructions here would cause infinite nesting.
++	 */
+ 	switch (size) {
+ 	case 1:
+-		if (get_user(d1, s))
++		if (__get_user(d1, s))
+ 			goto fault;
+ 		memcpy(buf, &d1, 1);
+ 		break;
+ 	case 2:
+-		if (get_user(d2, s))
++		if (__get_user(d2, s))
+ 			goto fault;
+ 		memcpy(buf, &d2, 2);
+ 		break;
+ 	case 4:
+-		if (get_user(d4, s))
++		if (__get_user(d4, s))
+ 			goto fault;
+ 		memcpy(buf, &d4, 4);
+ 		break;
+ 	case 8:
+-		if (get_user(d8, s))
++		if (__get_user(d8, s))
+ 			goto fault;
+ 		memcpy(buf, &d8, 8);
+ 		break;
+@@ -434,6 +452,29 @@ static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt
+ /* Include code shared with pre-decompression boot stage */
+ #include "sev-es-shared.c"
+ 
++static __always_inline void sev_es_put_ghcb(struct ghcb_state *state)
++{
++	struct sev_es_runtime_data *data;
++	struct ghcb *ghcb;
++
++	data = this_cpu_read(runtime_data);
++	ghcb = &data->ghcb_page;
++
++	if (state->ghcb) {
++		/* Restore GHCB from Backup */
++		*ghcb = *state->ghcb;
++		data->backup_ghcb_active = false;
++		state->ghcb = NULL;
++	} else {
++		/*
++		 * Invalidate the GHCB so a VMGEXIT instruction issued
++		 * from userspace won't appear to be valid.
++		 */
++		vc_ghcb_invalidate(ghcb);
++		data->ghcb_active = false;
++	}
++}
++
+ void noinstr __sev_es_nmi_complete(void)
+ {
+ 	struct ghcb_state state;
+@@ -1228,6 +1269,10 @@ static __always_inline void vc_forward_exception(struct es_em_ctxt *ctxt)
+ 	case X86_TRAP_UD:
+ 		exc_invalid_op(ctxt->regs);
+ 		break;
++	case X86_TRAP_PF:
++		write_cr2(ctxt->fi.cr2);
++		exc_page_fault(ctxt->regs, error_code);
++		break;
+ 	case X86_TRAP_AC:
+ 		exc_alignment_check(ctxt->regs, error_code);
+ 		break;
+@@ -1257,7 +1302,6 @@ static __always_inline bool on_vc_fallback_stack(struct pt_regs *regs)
+  */
+ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
+ {
+-	struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
+ 	irqentry_state_t irq_state;
+ 	struct ghcb_state state;
+ 	struct es_em_ctxt ctxt;
+@@ -1283,16 +1327,6 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
+ 	 */
+ 
+ 	ghcb = sev_es_get_ghcb(&state);
+-	if (!ghcb) {
+-		/*
+-		 * Mark GHCBs inactive so that panic() is able to print the
+-		 * message.
+-		 */
+-		data->ghcb_active        = false;
+-		data->backup_ghcb_active = false;
+-
+-		panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
+-	}
+ 
+ 	vc_ghcb_invalidate(ghcb);
+ 	result = vc_init_em_ctxt(&ctxt, regs, error_code);
+diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
+index dc0a337f985b6..8183ddb3700c4 100644
+--- a/arch/x86/xen/enlighten_pv.c
++++ b/arch/x86/xen/enlighten_pv.c
+@@ -1276,16 +1276,16 @@ asmlinkage __visible void __init xen_start_kernel(void)
+ 	/* Get mfn list */
+ 	xen_build_dynamic_phys_to_machine();
+ 
++	/* Work out if we support NX */
++	get_cpu_cap(&boot_cpu_data);
++	x86_configure_nx();
++
+ 	/*
+ 	 * Set up kernel GDT and segment registers, mainly so that
+ 	 * -fstack-protector code can be executed.
+ 	 */
+ 	xen_setup_gdt(0);
+ 
+-	/* Work out if we support NX */
+-	get_cpu_cap(&boot_cpu_data);
+-	x86_configure_nx();
+-
+ 	/* Determine virtual and physical address sizes */
+ 	get_cpu_address_sizes(&boot_cpu_data);
+ 
+diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
+index 9874fc1c815b5..1831099306aa9 100644
+--- a/drivers/cdrom/gdrom.c
++++ b/drivers/cdrom/gdrom.c
+@@ -743,6 +743,13 @@ static const struct blk_mq_ops gdrom_mq_ops = {
+ static int probe_gdrom(struct platform_device *devptr)
+ {
+ 	int err;
++
++	/*
++	 * Ensure our "one" device is initialized properly in case of previous
++	 * usages of it
++	 */
++	memset(&gd, 0, sizeof(gd));
++
+ 	/* Start the device */
+ 	if (gdrom_execute_diagnostic() != 1) {
+ 		pr_warn("ATA Probe for GDROM failed\n");
+@@ -831,6 +838,8 @@ static int remove_gdrom(struct platform_device *devptr)
+ 	if (gdrom_major)
+ 		unregister_blkdev(gdrom_major, GDROM_DEV_NAME);
+ 	unregister_cdrom(gd.cd_info);
++	kfree(gd.cd_info);
++	kfree(gd.toc);
+ 
+ 	return 0;
+ }
+@@ -846,7 +855,7 @@ static struct platform_driver gdrom_driver = {
+ static int __init init_gdrom(void)
+ {
+ 	int rc;
+-	gd.toc = NULL;
++
+ 	rc = platform_driver_register(&gdrom_driver);
+ 	if (rc)
+ 		return rc;
+@@ -862,8 +871,6 @@ static void __exit exit_gdrom(void)
+ {
+ 	platform_device_unregister(pd);
+ 	platform_driver_unregister(&gdrom_driver);
+-	kfree(gd.toc);
+-	kfree(gd.cd_info);
+ }
+ 
+ module_init(init_gdrom);
+diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
+index f264b70c383eb..eadd1eaa2fb54 100644
+--- a/drivers/dma-buf/dma-buf.c
++++ b/drivers/dma-buf/dma-buf.c
+@@ -760,7 +760,7 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
+ 
+ 		if (dma_buf_is_dynamic(attach->dmabuf)) {
+ 			dma_resv_lock(attach->dmabuf->resv, NULL);
+-			ret = dma_buf_pin(attach);
++			ret = dmabuf->ops->pin(attach);
+ 			if (ret)
+ 				goto err_unlock;
+ 		}
+@@ -786,7 +786,7 @@ err_attach:
+ 
+ err_unpin:
+ 	if (dma_buf_is_dynamic(attach->dmabuf))
+-		dma_buf_unpin(attach);
++		dmabuf->ops->unpin(attach);
+ 
+ err_unlock:
+ 	if (dma_buf_is_dynamic(attach->dmabuf))
+@@ -843,7 +843,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
+ 		__unmap_dma_buf(attach, attach->sgt, attach->dir);
+ 
+ 		if (dma_buf_is_dynamic(attach->dmabuf)) {
+-			dma_buf_unpin(attach);
++			dmabuf->ops->unpin(attach);
+ 			dma_resv_unlock(attach->dmabuf->resv);
+ 		}
+ 	}
+@@ -956,7 +956,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
+ 	if (dma_buf_is_dynamic(attach->dmabuf)) {
+ 		dma_resv_assert_held(attach->dmabuf->resv);
+ 		if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
+-			r = dma_buf_pin(attach);
++			r = attach->dmabuf->ops->pin(attach);
+ 			if (r)
+ 				return ERR_PTR(r);
+ 		}
+@@ -968,7 +968,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
+ 
+ 	if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
+ 	     !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
+-		dma_buf_unpin(attach);
++		attach->dmabuf->ops->unpin(attach);
+ 
+ 	if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
+ 		attach->sgt = sg_table;
+diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
+index d0dee37ad5228..4ceba5ef78958 100644
+--- a/drivers/firmware/arm_scpi.c
++++ b/drivers/firmware/arm_scpi.c
+@@ -552,8 +552,10 @@ static unsigned long scpi_clk_get_val(u16 clk_id)
+ 
+ 	ret = scpi_send_message(CMD_GET_CLOCK_VALUE, &le_clk_id,
+ 				sizeof(le_clk_id), &rate, sizeof(rate));
++	if (ret)
++		return 0;
+ 
+-	return ret ? ret : le32_to_cpu(rate);
++	return le32_to_cpu(rate);
+ }
+ 
+ static int scpi_clk_set_val(u16 clk_id, unsigned long rate)
+diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
+index 1bd9e44df7184..05974b760796b 100644
+--- a/drivers/gpio/gpio-tegra186.c
++++ b/drivers/gpio/gpio-tegra186.c
+@@ -444,16 +444,6 @@ static int tegra186_irq_set_wake(struct irq_data *data, unsigned int on)
+ 	return 0;
+ }
+ 
+-static int tegra186_irq_set_affinity(struct irq_data *data,
+-				     const struct cpumask *dest,
+-				     bool force)
+-{
+-	if (data->parent_data)
+-		return irq_chip_set_affinity_parent(data, dest, force);
+-
+-	return -EINVAL;
+-}
+-
+ static void tegra186_gpio_irq(struct irq_desc *desc)
+ {
+ 	struct tegra_gpio *gpio = irq_desc_get_handler_data(desc);
+@@ -700,7 +690,6 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
+ 	gpio->intc.irq_unmask = tegra186_irq_unmask;
+ 	gpio->intc.irq_set_type = tegra186_irq_set_type;
+ 	gpio->intc.irq_set_wake = tegra186_irq_set_wake;
+-	gpio->intc.irq_set_affinity = tegra186_irq_set_affinity;
+ 
+ 	irq = &gpio->gpio.irq;
+ 	irq->chip = &gpio->intc;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index 383c178cf0746..6b14626c148ee 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -267,7 +267,7 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
+ 	*addr += offset & ~PAGE_MASK;
+ 
+ 	num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
+-	num_bytes = num_pages * 8;
++	num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
+ 
+ 	r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
+ 				     AMDGPU_IB_POOL_DELAYED, &job);
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+index 63691deb7df3c..2342c5d216f9b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+@@ -1391,9 +1391,10 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
+ 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG, 0xffffffff, 0x20000000),
+ 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xffffffff, 0x00000420),
+ 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000200),
+-	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04800000),
++	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04900000),
+ 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DFSM_TILES_IN_FLIGHT, 0x0000ffff, 0x0000003f),
+ 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_LAST_OF_BURST_CONFIG, 0xffffffff, 0x03860204),
++	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1800ff, 0x00000044),
+ 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff0ffff, 0x00000500),
+ 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PRIV_CONTROL, 0x00007fff, 0x000001fe),
+ 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
+@@ -1411,12 +1412,13 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
+ 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000820, 0x00000820),
+ 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
+ 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101),
++	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x001f0000, 0x00070104),
+ 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff),
+ 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000133, 0x00000130),
+ 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
+ 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
+ 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0xffdf80ff, 0x479c0010),
+-	SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00800000)
++	SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00c00000)
+ };
+ 
+ static void gfx_v10_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v)
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 65db88bb6cbcd..d2c020a91c0be 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -4864,7 +4864,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
+ 	amdgpu_gfx_rlc_enter_safe_mode(adev);
+ 
+ 	/* Enable 3D CGCG/CGLS */
+-	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
++	if (enable) {
+ 		/* write cmd to clear cgcg/cgls ov */
+ 		def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
+ 		/* unset CGCG override */
+@@ -4876,8 +4876,12 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
+ 		/* enable 3Dcgcg FSM(0x0000363f) */
+ 		def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
+ 
+-		data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
+-			RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
++		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)
++			data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
++				RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
++		else
++			data = 0x0 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT;
++
+ 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
+ 			data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
+ 				RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+index d345e324837dd..2a27fe26232b6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+@@ -123,6 +123,10 @@ static const struct soc15_reg_golden golden_settings_sdma_nv14[] = {
+ 
+ static const struct soc15_reg_golden golden_settings_sdma_nv12[] = {
+ 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
++	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG, 0x001877ff, 0x00000044),
++	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044),
++	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG, 0x001877ff, 0x00000044),
++	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044),
+ 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ };
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index 1221aa6b40a9f..d1045a9b37d98 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -1151,7 +1151,6 @@ static int soc15_common_early_init(void *handle)
+ 			adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
+ 				AMD_CG_SUPPORT_GFX_MGLS |
+ 				AMD_CG_SUPPORT_GFX_CP_LS |
+-				AMD_CG_SUPPORT_GFX_3D_CGCG |
+ 				AMD_CG_SUPPORT_GFX_3D_CGLS |
+ 				AMD_CG_SUPPORT_GFX_CGCG |
+ 				AMD_CG_SUPPORT_GFX_CGLS |
+@@ -1170,7 +1169,6 @@ static int soc15_common_early_init(void *handle)
+ 				AMD_CG_SUPPORT_GFX_MGLS |
+ 				AMD_CG_SUPPORT_GFX_RLC_LS |
+ 				AMD_CG_SUPPORT_GFX_CP_LS |
+-				AMD_CG_SUPPORT_GFX_3D_CGCG |
+ 				AMD_CG_SUPPORT_GFX_3D_CGLS |
+ 				AMD_CG_SUPPORT_GFX_CGCG |
+ 				AMD_CG_SUPPORT_GFX_CGLS |
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+index 71e2d5e025710..9b33182f3abd5 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+@@ -826,10 +826,11 @@ static const struct dc_plane_cap plane_cap = {
+ 			.fp16 = 16000
+ 	},
+ 
++	/* 6:1 downscaling ratio: 1000/6 = 166.666 */
+ 	.max_downscale_factor = {
+-			.argb8888 = 600,
+-			.nv12 = 600,
+-			.fp16 = 600
++			.argb8888 = 167,
++			.nv12 = 167,
++			.fp16 = 167
+ 	}
+ };
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+index c494235016e09..00f066f1da0c7 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+@@ -843,10 +843,11 @@ static const struct dc_plane_cap plane_cap = {
+ 			.fp16 = 16000
+ 	},
+ 
++	/* 6:1 downscaling ratio: 1000/6 = 166.666 */
+ 	.max_downscale_factor = {
+-			.argb8888 = 600,
+-			.nv12 = 600,
+-			.fp16 = 600
++			.argb8888 = 167,
++			.nv12 = 167,
++			.fp16 = 167
+ 	},
+ 	64,
+ 	64
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+index d03b1975e4178..7d9d591de411b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+@@ -282,10 +282,11 @@ static const struct dc_plane_cap plane_cap = {
+ 				.nv12 = 16000,
+ 				.fp16 = 16000
+ 		},
++		/* 6:1 downscaling ratio: 1000/6 = 166.666 */
+ 		.max_downscale_factor = {
+-				.argb8888 = 600,
+-				.nv12 = 600,
+-				.fp16 = 600
++				.argb8888 = 167,
++				.nv12 = 167,
++				.fp16 = 167
+ 		},
+ 		16,
+ 		16
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+index 43028f3539a6d..76574e2459161 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+@@ -63,6 +63,8 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
+ 	    i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+ 		GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
+ 		i915_gem_object_set_tiling_quirk(obj);
++		GEM_BUG_ON(!list_empty(&obj->mm.link));
++		atomic_inc(&obj->mm.shrink_pin);
+ 		shrinkable = false;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.c b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
+index de575fdb033f5..21f08e53889c3 100644
+--- a/drivers/gpu/drm/i915/gt/gen7_renderclear.c
++++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
+@@ -397,7 +397,10 @@ static void emit_batch(struct i915_vma * const vma,
+ 	gen7_emit_pipeline_invalidate(&cmds);
+ 	batch_add(&cmds, MI_LOAD_REGISTER_IMM(2));
+ 	batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_0_GEN7));
+-	batch_add(&cmds, 0xffff0000);
++	batch_add(&cmds, 0xffff0000 |
++			((IS_IVB_GT1(i915) || IS_VALLEYVIEW(i915)) ?
++			 HIZ_RAW_STALL_OPT_DISABLE :
++			 0));
+ 	batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_1));
+ 	batch_add(&cmds, 0xffff0000 | PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
+ 	gen7_emit_pipeline_invalidate(&cmds);
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index aa44909344694..19351addb68c4 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -972,12 +972,11 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
+ 		obj->mm.madv = args->madv;
+ 
+ 	if (i915_gem_object_has_pages(obj)) {
+-		struct list_head *list;
++		unsigned long flags;
+ 
+-		if (i915_gem_object_is_shrinkable(obj)) {
+-			unsigned long flags;
+-
+-			spin_lock_irqsave(&i915->mm.obj_lock, flags);
++		spin_lock_irqsave(&i915->mm.obj_lock, flags);
++		if (!list_empty(&obj->mm.link)) {
++			struct list_head *list;
+ 
+ 			if (obj->mm.madv != I915_MADV_WILLNEED)
+ 				list = &i915->mm.purge_list;
+@@ -985,8 +984,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
+ 				list = &i915->mm.shrink_list;
+ 			list_move_tail(&obj->mm.link, list);
+ 
+-			spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+ 		}
++		spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+ 	}
+ 
+ 	/* if the object is no longer attached, discard its backing storage */
+diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
+index 3808a753127bc..04109a2a6fd76 100644
+--- a/drivers/gpu/drm/radeon/radeon_gart.c
++++ b/drivers/gpu/drm/radeon/radeon_gart.c
+@@ -301,7 +301,8 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
+ 	p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
+ 
+ 	for (i = 0; i < pages; i++, p++) {
+-		rdev->gart.pages[p] = pagelist[i];
++		rdev->gart.pages[p] = pagelist ? pagelist[i] :
++			rdev->dummy_page.page;
+ 		page_base = dma_addr[i];
+ 		for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
+ 			page_entry = radeon_gart_get_page_entry(page_base, flags);
+diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
+index 101a68dc615b6..799ec7a7caa4d 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo.c
++++ b/drivers/gpu/drm/ttm/ttm_bo.c
+@@ -153,6 +153,8 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
+ 
+ 		swap = &ttm_bo_glob.swap_lru[bo->priority];
+ 		list_move_tail(&bo->swap, swap);
++	} else {
++		list_del_init(&bo->swap);
+ 	}
+ 
+ 	if (bdev->driver->del_from_lru_notify)
+diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
+index ac4adb44b224d..97ab491d2922c 100644
+--- a/drivers/hwmon/lm80.c
++++ b/drivers/hwmon/lm80.c
+@@ -596,7 +596,6 @@ static int lm80_probe(struct i2c_client *client)
+ 	struct device *dev = &client->dev;
+ 	struct device *hwmon_dev;
+ 	struct lm80_data *data;
+-	int rv;
+ 
+ 	data = devm_kzalloc(dev, sizeof(struct lm80_data), GFP_KERNEL);
+ 	if (!data)
+@@ -609,14 +608,8 @@ static int lm80_probe(struct i2c_client *client)
+ 	lm80_init_client(client);
+ 
+ 	/* A few vars need to be filled upon startup */
+-	rv = lm80_read_value(client, LM80_REG_FAN_MIN(1));
+-	if (rv < 0)
+-		return rv;
+-	data->fan[f_min][0] = rv;
+-	rv = lm80_read_value(client, LM80_REG_FAN_MIN(2));
+-	if (rv < 0)
+-		return rv;
+-	data->fan[f_min][1] = rv;
++	data->fan[f_min][0] = lm80_read_value(client, LM80_REG_FAN_MIN(1));
++	data->fan[f_min][1] = lm80_read_value(client, LM80_REG_FAN_MIN(2));
+ 
+ 	hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ 							   data, lm80_groups);
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index 6ac07911a17bd..5b9022a8c9ece 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -482,6 +482,7 @@ static void cma_release_dev(struct rdma_id_private *id_priv)
+ 	list_del(&id_priv->list);
+ 	cma_dev_put(id_priv->cma_dev);
+ 	id_priv->cma_dev = NULL;
++	id_priv->id.device = NULL;
+ 	if (id_priv->id.route.addr.dev_addr.sgid_attr) {
+ 		rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr);
+ 		id_priv->id.route.addr.dev_addr.sgid_attr = NULL;
+@@ -1864,6 +1865,7 @@ static void _destroy_id(struct rdma_id_private *id_priv,
+ 				iw_destroy_cm_id(id_priv->cm_id.iw);
+ 		}
+ 		cma_leave_mc_groups(id_priv);
++		rdma_restrack_del(&id_priv->res);
+ 		cma_release_dev(id_priv);
+ 	}
+ 
+@@ -1877,7 +1879,6 @@ static void _destroy_id(struct rdma_id_private *id_priv,
+ 	kfree(id_priv->id.route.path_rec);
+ 
+ 	put_net(id_priv->id.route.addr.dev_addr.net);
+-	rdma_restrack_del(&id_priv->res);
+ 	kfree(id_priv);
+ }
+ 
+@@ -3740,7 +3741,7 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
+ 	}
+ 
+ 	id_priv->backlog = backlog;
+-	if (id->device) {
++	if (id_priv->cma_dev) {
+ 		if (rdma_cap_ib_cm(id->device, 1)) {
+ 			ret = cma_ib_listen(id_priv);
+ 			if (ret)
+diff --git a/drivers/infiniband/core/uverbs_std_types_device.c b/drivers/infiniband/core/uverbs_std_types_device.c
+index 9ec6971056fa8..049684880ae03 100644
+--- a/drivers/infiniband/core/uverbs_std_types_device.c
++++ b/drivers/infiniband/core/uverbs_std_types_device.c
+@@ -117,8 +117,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_INFO_HANDLES)(
+ 		return ret;
+ 
+ 	uapi_object = uapi_get_object(attrs->ufile->device->uapi, object_id);
+-	if (!uapi_object)
+-		return -EINVAL;
++	if (IS_ERR(uapi_object))
++		return PTR_ERR(uapi_object);
+ 
+ 	handles = gather_objects_handle(attrs->ufile, uapi_object, attrs,
+ 					out_len, &total);
+@@ -331,6 +331,9 @@ static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_GID_TABLE)(
+ 	if (ret)
+ 		return ret;
+ 
++	if (!user_entry_size)
++		return -EINVAL;
++
+ 	max_entries = uverbs_attr_ptr_get_array_size(
+ 		attrs, UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES,
+ 		user_entry_size);
+diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
+index 07b8350929cd6..81276b4247f8e 100644
+--- a/drivers/infiniband/hw/mlx5/devx.c
++++ b/drivers/infiniband/hw/mlx5/devx.c
+@@ -630,9 +630,8 @@ static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
+ 	case UVERBS_OBJECT_QP:
+ 	{
+ 		struct mlx5_ib_qp *qp = to_mqp(uobj->object);
+-		enum ib_qp_type	qp_type = qp->ibqp.qp_type;
+ 
+-		if (qp_type == IB_QPT_RAW_PACKET ||
++		if (qp->type == IB_QPT_RAW_PACKET ||
+ 		    (qp->flags & IB_QP_CREATE_SOURCE_QPN)) {
+ 			struct mlx5_ib_raw_packet_qp *raw_packet_qp =
+ 							 &qp->raw_packet_qp;
+@@ -649,10 +648,9 @@ static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
+ 					       sq->tisn) == obj_id);
+ 		}
+ 
+-		if (qp_type == MLX5_IB_QPT_DCT)
++		if (qp->type == MLX5_IB_QPT_DCT)
+ 			return get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
+ 					      qp->dct.mdct.mqp.qpn) == obj_id;
+-
+ 		return get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ 				      qp->ibqp.qp_num) == obj_id;
+ 	}
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index 4be7bccefaa40..59ffbbdda3179 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -4655,6 +4655,7 @@ static int mlx5r_mp_probe(struct auxiliary_device *adev,
+ 
+ 		if (bound) {
+ 			rdma_roce_rescan_device(&dev->ib_dev);
++			mpi->ibdev->ib_active = true;
+ 			break;
+ 		}
+ 	}
+diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
+index 17a361b8dbb16..06b556169867a 100644
+--- a/drivers/infiniband/sw/rxe/rxe_comp.c
++++ b/drivers/infiniband/sw/rxe/rxe_comp.c
+@@ -345,14 +345,16 @@ static inline enum comp_state do_read(struct rxe_qp *qp,
+ 
+ 	ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
+ 			&wqe->dma, payload_addr(pkt),
+-			payload_size(pkt), to_mem_obj, NULL);
+-	if (ret)
++			payload_size(pkt), to_mr_obj, NULL);
++	if (ret) {
++		wqe->status = IB_WC_LOC_PROT_ERR;
+ 		return COMPST_ERROR;
++	}
+ 
+ 	if (wqe->dma.resid == 0 && (pkt->mask & RXE_END_MASK))
+ 		return COMPST_COMP_ACK;
+-	else
+-		return COMPST_UPDATE_COMP;
++
++	return COMPST_UPDATE_COMP;
+ }
+ 
+ static inline enum comp_state do_atomic(struct rxe_qp *qp,
+@@ -365,11 +367,13 @@ static inline enum comp_state do_atomic(struct rxe_qp *qp,
+ 
+ 	ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
+ 			&wqe->dma, &atomic_orig,
+-			sizeof(u64), to_mem_obj, NULL);
+-	if (ret)
++			sizeof(u64), to_mr_obj, NULL);
++	if (ret) {
++		wqe->status = IB_WC_LOC_PROT_ERR;
+ 		return COMPST_ERROR;
+-	else
+-		return COMPST_COMP_ACK;
++	}
++
++	return COMPST_COMP_ACK;
+ }
+ 
+ static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
+diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
+index 0d758760b9ae7..08e21fa9ec97e 100644
+--- a/drivers/infiniband/sw/rxe/rxe_loc.h
++++ b/drivers/infiniband/sw/rxe/rxe_loc.h
+@@ -72,40 +72,37 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
+ 
+ /* rxe_mr.c */
+ enum copy_direction {
+-	to_mem_obj,
+-	from_mem_obj,
++	to_mr_obj,
++	from_mr_obj,
+ };
+ 
+-void rxe_mem_init_dma(struct rxe_pd *pd,
+-		      int access, struct rxe_mem *mem);
++void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr);
+ 
+-int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
+-		      u64 length, u64 iova, int access, struct ib_udata *udata,
+-		      struct rxe_mem *mr);
++int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
++		     int access, struct ib_udata *udata, struct rxe_mr *mr);
+ 
+-int rxe_mem_init_fast(struct rxe_pd *pd,
+-		      int max_pages, struct rxe_mem *mem);
++int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr);
+ 
+-int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr,
+-		 int length, enum copy_direction dir, u32 *crcp);
++int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
++		enum copy_direction dir, u32 *crcp);
+ 
+ int copy_data(struct rxe_pd *pd, int access,
+ 	      struct rxe_dma_info *dma, void *addr, int length,
+ 	      enum copy_direction dir, u32 *crcp);
+ 
+-void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length);
++void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length);
+ 
+ enum lookup_type {
+ 	lookup_local,
+ 	lookup_remote,
+ };
+ 
+-struct rxe_mem *lookup_mem(struct rxe_pd *pd, int access, u32 key,
+-			   enum lookup_type type);
++struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
++			 enum lookup_type type);
+ 
+-int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length);
++int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length);
+ 
+-void rxe_mem_cleanup(struct rxe_pool_entry *arg);
++void rxe_mr_cleanup(struct rxe_pool_entry *arg);
+ 
+ int advance_dma_data(struct rxe_dma_info *dma, unsigned int length);
+ 
+diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
+index 6e8c41567ba08..9f63947bab123 100644
+--- a/drivers/infiniband/sw/rxe/rxe_mr.c
++++ b/drivers/infiniband/sw/rxe/rxe_mr.c
+@@ -24,16 +24,15 @@ static u8 rxe_get_key(void)
+ 	return key;
+ }
+ 
+-int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length)
++int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
+ {
+-	switch (mem->type) {
+-	case RXE_MEM_TYPE_DMA:
++	switch (mr->type) {
++	case RXE_MR_TYPE_DMA:
+ 		return 0;
+ 
+-	case RXE_MEM_TYPE_MR:
+-		if (iova < mem->iova ||
+-		    length > mem->length ||
+-		    iova > mem->iova + mem->length - length)
++	case RXE_MR_TYPE_MR:
++		if (iova < mr->iova || length > mr->length ||
++		    iova > mr->iova + mr->length - length)
+ 			return -EFAULT;
+ 		return 0;
+ 
+@@ -46,85 +45,83 @@ int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length)
+ 				| IB_ACCESS_REMOTE_WRITE	\
+ 				| IB_ACCESS_REMOTE_ATOMIC)
+ 
+-static void rxe_mem_init(int access, struct rxe_mem *mem)
++static void rxe_mr_init(int access, struct rxe_mr *mr)
+ {
+-	u32 lkey = mem->pelem.index << 8 | rxe_get_key();
++	u32 lkey = mr->pelem.index << 8 | rxe_get_key();
+ 	u32 rkey = (access & IB_ACCESS_REMOTE) ? lkey : 0;
+ 
+-	mem->ibmr.lkey		= lkey;
+-	mem->ibmr.rkey		= rkey;
+-	mem->state		= RXE_MEM_STATE_INVALID;
+-	mem->type		= RXE_MEM_TYPE_NONE;
+-	mem->map_shift		= ilog2(RXE_BUF_PER_MAP);
++	mr->ibmr.lkey = lkey;
++	mr->ibmr.rkey = rkey;
++	mr->state = RXE_MR_STATE_INVALID;
++	mr->type = RXE_MR_TYPE_NONE;
++	mr->map_shift = ilog2(RXE_BUF_PER_MAP);
+ }
+ 
+-void rxe_mem_cleanup(struct rxe_pool_entry *arg)
++void rxe_mr_cleanup(struct rxe_pool_entry *arg)
+ {
+-	struct rxe_mem *mem = container_of(arg, typeof(*mem), pelem);
++	struct rxe_mr *mr = container_of(arg, typeof(*mr), pelem);
+ 	int i;
+ 
+-	ib_umem_release(mem->umem);
++	ib_umem_release(mr->umem);
+ 
+-	if (mem->map) {
+-		for (i = 0; i < mem->num_map; i++)
+-			kfree(mem->map[i]);
++	if (mr->map) {
++		for (i = 0; i < mr->num_map; i++)
++			kfree(mr->map[i]);
+ 
+-		kfree(mem->map);
++		kfree(mr->map);
+ 	}
+ }
+ 
+-static int rxe_mem_alloc(struct rxe_mem *mem, int num_buf)
++static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf)
+ {
+ 	int i;
+ 	int num_map;
+-	struct rxe_map **map = mem->map;
++	struct rxe_map **map = mr->map;
+ 
+ 	num_map = (num_buf + RXE_BUF_PER_MAP - 1) / RXE_BUF_PER_MAP;
+ 
+-	mem->map = kmalloc_array(num_map, sizeof(*map), GFP_KERNEL);
+-	if (!mem->map)
++	mr->map = kmalloc_array(num_map, sizeof(*map), GFP_KERNEL);
++	if (!mr->map)
+ 		goto err1;
+ 
+ 	for (i = 0; i < num_map; i++) {
+-		mem->map[i] = kmalloc(sizeof(**map), GFP_KERNEL);
+-		if (!mem->map[i])
++		mr->map[i] = kmalloc(sizeof(**map), GFP_KERNEL);
++		if (!mr->map[i])
+ 			goto err2;
+ 	}
+ 
+ 	BUILD_BUG_ON(!is_power_of_2(RXE_BUF_PER_MAP));
+ 
+-	mem->map_shift	= ilog2(RXE_BUF_PER_MAP);
+-	mem->map_mask	= RXE_BUF_PER_MAP - 1;
++	mr->map_shift = ilog2(RXE_BUF_PER_MAP);
++	mr->map_mask = RXE_BUF_PER_MAP - 1;
+ 
+-	mem->num_buf = num_buf;
+-	mem->num_map = num_map;
+-	mem->max_buf = num_map * RXE_BUF_PER_MAP;
++	mr->num_buf = num_buf;
++	mr->num_map = num_map;
++	mr->max_buf = num_map * RXE_BUF_PER_MAP;
+ 
+ 	return 0;
+ 
+ err2:
+ 	for (i--; i >= 0; i--)
+-		kfree(mem->map[i]);
++		kfree(mr->map[i]);
+ 
+-	kfree(mem->map);
++	kfree(mr->map);
+ err1:
+ 	return -ENOMEM;
+ }
+ 
+-void rxe_mem_init_dma(struct rxe_pd *pd,
+-		      int access, struct rxe_mem *mem)
++void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr)
+ {
+-	rxe_mem_init(access, mem);
++	rxe_mr_init(access, mr);
+ 
+-	mem->ibmr.pd		= &pd->ibpd;
+-	mem->access		= access;
+-	mem->state		= RXE_MEM_STATE_VALID;
+-	mem->type		= RXE_MEM_TYPE_DMA;
++	mr->ibmr.pd = &pd->ibpd;
++	mr->access = access;
++	mr->state = RXE_MR_STATE_VALID;
++	mr->type = RXE_MR_TYPE_DMA;
+ }
+ 
+-int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
+-		      u64 length, u64 iova, int access, struct ib_udata *udata,
+-		      struct rxe_mem *mem)
++int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
++		     int access, struct ib_udata *udata, struct rxe_mr *mr)
+ {
+ 	struct rxe_map		**map;
+ 	struct rxe_phys_buf	*buf = NULL;
+@@ -142,23 +139,23 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
+ 		goto err1;
+ 	}
+ 
+-	mem->umem = umem;
++	mr->umem = umem;
+ 	num_buf = ib_umem_num_pages(umem);
+ 
+-	rxe_mem_init(access, mem);
++	rxe_mr_init(access, mr);
+ 
+-	err = rxe_mem_alloc(mem, num_buf);
++	err = rxe_mr_alloc(mr, num_buf);
+ 	if (err) {
+-		pr_warn("err %d from rxe_mem_alloc\n", err);
++		pr_warn("err %d from rxe_mr_alloc\n", err);
+ 		ib_umem_release(umem);
+ 		goto err1;
+ 	}
+ 
+-	mem->page_shift		= PAGE_SHIFT;
+-	mem->page_mask = PAGE_SIZE - 1;
++	mr->page_shift = PAGE_SHIFT;
++	mr->page_mask = PAGE_SIZE - 1;
+ 
+ 	num_buf			= 0;
+-	map			= mem->map;
++	map = mr->map;
+ 	if (length > 0) {
+ 		buf = map[0]->buf;
+ 
+@@ -185,15 +182,15 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
+ 		}
+ 	}
+ 
+-	mem->ibmr.pd		= &pd->ibpd;
+-	mem->umem		= umem;
+-	mem->access		= access;
+-	mem->length		= length;
+-	mem->iova		= iova;
+-	mem->va			= start;
+-	mem->offset		= ib_umem_offset(umem);
+-	mem->state		= RXE_MEM_STATE_VALID;
+-	mem->type		= RXE_MEM_TYPE_MR;
++	mr->ibmr.pd = &pd->ibpd;
++	mr->umem = umem;
++	mr->access = access;
++	mr->length = length;
++	mr->iova = iova;
++	mr->va = start;
++	mr->offset = ib_umem_offset(umem);
++	mr->state = RXE_MR_STATE_VALID;
++	mr->type = RXE_MR_TYPE_MR;
+ 
+ 	return 0;
+ 
+@@ -201,24 +198,23 @@ err1:
+ 	return err;
+ }
+ 
+-int rxe_mem_init_fast(struct rxe_pd *pd,
+-		      int max_pages, struct rxe_mem *mem)
++int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr)
+ {
+ 	int err;
+ 
+-	rxe_mem_init(0, mem);
++	rxe_mr_init(0, mr);
+ 
+ 	/* In fastreg, we also set the rkey */
+-	mem->ibmr.rkey = mem->ibmr.lkey;
++	mr->ibmr.rkey = mr->ibmr.lkey;
+ 
+-	err = rxe_mem_alloc(mem, max_pages);
++	err = rxe_mr_alloc(mr, max_pages);
+ 	if (err)
+ 		goto err1;
+ 
+-	mem->ibmr.pd		= &pd->ibpd;
+-	mem->max_buf		= max_pages;
+-	mem->state		= RXE_MEM_STATE_FREE;
+-	mem->type		= RXE_MEM_TYPE_MR;
++	mr->ibmr.pd = &pd->ibpd;
++	mr->max_buf = max_pages;
++	mr->state = RXE_MR_STATE_FREE;
++	mr->type = RXE_MR_TYPE_MR;
+ 
+ 	return 0;
+ 
+@@ -226,28 +222,24 @@ err1:
+ 	return err;
+ }
+ 
+-static void lookup_iova(
+-	struct rxe_mem	*mem,
+-	u64			iova,
+-	int			*m_out,
+-	int			*n_out,
+-	size_t			*offset_out)
++static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out,
++			size_t *offset_out)
+ {
+-	size_t			offset = iova - mem->iova + mem->offset;
++	size_t offset = iova - mr->iova + mr->offset;
+ 	int			map_index;
+ 	int			buf_index;
+ 	u64			length;
+ 
+-	if (likely(mem->page_shift)) {
+-		*offset_out = offset & mem->page_mask;
+-		offset >>= mem->page_shift;
+-		*n_out = offset & mem->map_mask;
+-		*m_out = offset >> mem->map_shift;
++	if (likely(mr->page_shift)) {
++		*offset_out = offset & mr->page_mask;
++		offset >>= mr->page_shift;
++		*n_out = offset & mr->map_mask;
++		*m_out = offset >> mr->map_shift;
+ 	} else {
+ 		map_index = 0;
+ 		buf_index = 0;
+ 
+-		length = mem->map[map_index]->buf[buf_index].size;
++		length = mr->map[map_index]->buf[buf_index].size;
+ 
+ 		while (offset >= length) {
+ 			offset -= length;
+@@ -257,7 +249,7 @@ static void lookup_iova(
+ 				map_index++;
+ 				buf_index = 0;
+ 			}
+-			length = mem->map[map_index]->buf[buf_index].size;
++			length = mr->map[map_index]->buf[buf_index].size;
+ 		}
+ 
+ 		*m_out = map_index;
+@@ -266,49 +258,49 @@ static void lookup_iova(
+ 	}
+ }
+ 
+-void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length)
++void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
+ {
+ 	size_t offset;
+ 	int m, n;
+ 	void *addr;
+ 
+-	if (mem->state != RXE_MEM_STATE_VALID) {
+-		pr_warn("mem not in valid state\n");
++	if (mr->state != RXE_MR_STATE_VALID) {
++		pr_warn("mr not in valid state\n");
+ 		addr = NULL;
+ 		goto out;
+ 	}
+ 
+-	if (!mem->map) {
++	if (!mr->map) {
+ 		addr = (void *)(uintptr_t)iova;
+ 		goto out;
+ 	}
+ 
+-	if (mem_check_range(mem, iova, length)) {
++	if (mr_check_range(mr, iova, length)) {
+ 		pr_warn("range violation\n");
+ 		addr = NULL;
+ 		goto out;
+ 	}
+ 
+-	lookup_iova(mem, iova, &m, &n, &offset);
++	lookup_iova(mr, iova, &m, &n, &offset);
+ 
+-	if (offset + length > mem->map[m]->buf[n].size) {
++	if (offset + length > mr->map[m]->buf[n].size) {
+ 		pr_warn("crosses page boundary\n");
+ 		addr = NULL;
+ 		goto out;
+ 	}
+ 
+-	addr = (void *)(uintptr_t)mem->map[m]->buf[n].addr + offset;
++	addr = (void *)(uintptr_t)mr->map[m]->buf[n].addr + offset;
+ 
+ out:
+ 	return addr;
+ }
+ 
+ /* copy data from a range (vaddr, vaddr+length-1) to or from
+- * a mem object starting at iova. Compute incremental value of
+- * crc32 if crcp is not zero. caller must hold a reference to mem
++ * a mr object starting at iova. Compute incremental value of
++ * crc32 if crcp is not zero. caller must hold a reference to mr
+  */
+-int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length,
+-		 enum copy_direction dir, u32 *crcp)
++int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
++		enum copy_direction dir, u32 *crcp)
+ {
+ 	int			err;
+ 	int			bytes;
+@@ -323,43 +315,41 @@ int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length,
+ 	if (length == 0)
+ 		return 0;
+ 
+-	if (mem->type == RXE_MEM_TYPE_DMA) {
++	if (mr->type == RXE_MR_TYPE_DMA) {
+ 		u8 *src, *dest;
+ 
+-		src  = (dir == to_mem_obj) ?
+-			addr : ((void *)(uintptr_t)iova);
++		src = (dir == to_mr_obj) ? addr : ((void *)(uintptr_t)iova);
+ 
+-		dest = (dir == to_mem_obj) ?
+-			((void *)(uintptr_t)iova) : addr;
++		dest = (dir == to_mr_obj) ? ((void *)(uintptr_t)iova) : addr;
+ 
+ 		memcpy(dest, src, length);
+ 
+ 		if (crcp)
+-			*crcp = rxe_crc32(to_rdev(mem->ibmr.device),
+-					*crcp, dest, length);
++			*crcp = rxe_crc32(to_rdev(mr->ibmr.device), *crcp, dest,
++					  length);
+ 
+ 		return 0;
+ 	}
+ 
+-	WARN_ON_ONCE(!mem->map);
++	WARN_ON_ONCE(!mr->map);
+ 
+-	err = mem_check_range(mem, iova, length);
++	err = mr_check_range(mr, iova, length);
+ 	if (err) {
+ 		err = -EFAULT;
+ 		goto err1;
+ 	}
+ 
+-	lookup_iova(mem, iova, &m, &i, &offset);
++	lookup_iova(mr, iova, &m, &i, &offset);
+ 
+-	map	= mem->map + m;
++	map = mr->map + m;
+ 	buf	= map[0]->buf + i;
+ 
+ 	while (length > 0) {
+ 		u8 *src, *dest;
+ 
+ 		va	= (u8 *)(uintptr_t)buf->addr + offset;
+-		src  = (dir == to_mem_obj) ? addr : va;
+-		dest = (dir == to_mem_obj) ? va : addr;
++		src = (dir == to_mr_obj) ? addr : va;
++		dest = (dir == to_mr_obj) ? va : addr;
+ 
+ 		bytes	= buf->size - offset;
+ 
+@@ -369,8 +359,8 @@ int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length,
+ 		memcpy(dest, src, bytes);
+ 
+ 		if (crcp)
+-			crc = rxe_crc32(to_rdev(mem->ibmr.device),
+-					crc, dest, bytes);
++			crc = rxe_crc32(to_rdev(mr->ibmr.device), crc, dest,
++					bytes);
+ 
+ 		length	-= bytes;
+ 		addr	+= bytes;
+@@ -411,7 +401,7 @@ int copy_data(
+ 	struct rxe_sge		*sge	= &dma->sge[dma->cur_sge];
+ 	int			offset	= dma->sge_offset;
+ 	int			resid	= dma->resid;
+-	struct rxe_mem		*mem	= NULL;
++	struct rxe_mr		*mr	= NULL;
+ 	u64			iova;
+ 	int			err;
+ 
+@@ -424,8 +414,8 @@ int copy_data(
+ 	}
+ 
+ 	if (sge->length && (offset < sge->length)) {
+-		mem = lookup_mem(pd, access, sge->lkey, lookup_local);
+-		if (!mem) {
++		mr = lookup_mr(pd, access, sge->lkey, lookup_local);
++		if (!mr) {
+ 			err = -EINVAL;
+ 			goto err1;
+ 		}
+@@ -435,9 +425,9 @@ int copy_data(
+ 		bytes = length;
+ 
+ 		if (offset >= sge->length) {
+-			if (mem) {
+-				rxe_drop_ref(mem);
+-				mem = NULL;
++			if (mr) {
++				rxe_drop_ref(mr);
++				mr = NULL;
+ 			}
+ 			sge++;
+ 			dma->cur_sge++;
+@@ -449,9 +439,9 @@ int copy_data(
+ 			}
+ 
+ 			if (sge->length) {
+-				mem = lookup_mem(pd, access, sge->lkey,
+-						 lookup_local);
+-				if (!mem) {
++				mr = lookup_mr(pd, access, sge->lkey,
++					       lookup_local);
++				if (!mr) {
+ 					err = -EINVAL;
+ 					goto err1;
+ 				}
+@@ -466,7 +456,7 @@ int copy_data(
+ 		if (bytes > 0) {
+ 			iova = sge->addr + offset;
+ 
+-			err = rxe_mem_copy(mem, iova, addr, bytes, dir, crcp);
++			err = rxe_mr_copy(mr, iova, addr, bytes, dir, crcp);
+ 			if (err)
+ 				goto err2;
+ 
+@@ -480,14 +470,14 @@ int copy_data(
+ 	dma->sge_offset = offset;
+ 	dma->resid	= resid;
+ 
+-	if (mem)
+-		rxe_drop_ref(mem);
++	if (mr)
++		rxe_drop_ref(mr);
+ 
+ 	return 0;
+ 
+ err2:
+-	if (mem)
+-		rxe_drop_ref(mem);
++	if (mr)
++		rxe_drop_ref(mr);
+ err1:
+ 	return err;
+ }
+@@ -525,31 +515,30 @@ int advance_dma_data(struct rxe_dma_info *dma, unsigned int length)
+ 	return 0;
+ }
+ 
+-/* (1) find the mem (mr or mw) corresponding to lkey/rkey
++/* (1) find the mr corresponding to lkey/rkey
+  *     depending on lookup_type
+- * (2) verify that the (qp) pd matches the mem pd
+- * (3) verify that the mem can support the requested access
+- * (4) verify that mem state is valid
++ * (2) verify that the (qp) pd matches the mr pd
++ * (3) verify that the mr can support the requested access
++ * (4) verify that mr state is valid
+  */
+-struct rxe_mem *lookup_mem(struct rxe_pd *pd, int access, u32 key,
+-			   enum lookup_type type)
++struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
++			 enum lookup_type type)
+ {
+-	struct rxe_mem *mem;
++	struct rxe_mr *mr;
+ 	struct rxe_dev *rxe = to_rdev(pd->ibpd.device);
+ 	int index = key >> 8;
+ 
+-	mem = rxe_pool_get_index(&rxe->mr_pool, index);
+-	if (!mem)
++	mr = rxe_pool_get_index(&rxe->mr_pool, index);
++	if (!mr)
+ 		return NULL;
+ 
+-	if (unlikely((type == lookup_local && mr_lkey(mem) != key) ||
+-		     (type == lookup_remote && mr_rkey(mem) != key) ||
+-		     mr_pd(mem) != pd ||
+-		     (access && !(access & mem->access)) ||
+-		     mem->state != RXE_MEM_STATE_VALID)) {
+-		rxe_drop_ref(mem);
+-		mem = NULL;
++	if (unlikely((type == lookup_local && mr_lkey(mr) != key) ||
++		     (type == lookup_remote && mr_rkey(mr) != key) ||
++		     mr_pd(mr) != pd || (access && !(access & mr->access)) ||
++		     mr->state != RXE_MR_STATE_VALID)) {
++		rxe_drop_ref(mr);
++		mr = NULL;
+ 	}
+ 
+-	return mem;
++	return mr;
+ }
+diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
+index 307d8986e7c9b..d24901f2af3fb 100644
+--- a/drivers/infiniband/sw/rxe/rxe_pool.c
++++ b/drivers/infiniband/sw/rxe/rxe_pool.c
+@@ -8,8 +8,6 @@
+ #include "rxe_loc.h"
+ 
+ /* info about object pools
+- * note that mr and mw share a single index space
+- * so that one can map an lkey to the correct type of object
+  */
+ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
+ 	[RXE_TYPE_UC] = {
+@@ -56,18 +54,18 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
+ 	},
+ 	[RXE_TYPE_MR] = {
+ 		.name		= "rxe-mr",
+-		.size		= sizeof(struct rxe_mem),
+-		.elem_offset	= offsetof(struct rxe_mem, pelem),
+-		.cleanup	= rxe_mem_cleanup,
++		.size		= sizeof(struct rxe_mr),
++		.elem_offset	= offsetof(struct rxe_mr, pelem),
++		.cleanup	= rxe_mr_cleanup,
+ 		.flags		= RXE_POOL_INDEX,
+ 		.max_index	= RXE_MAX_MR_INDEX,
+ 		.min_index	= RXE_MIN_MR_INDEX,
+ 	},
+ 	[RXE_TYPE_MW] = {
+ 		.name		= "rxe-mw",
+-		.size		= sizeof(struct rxe_mem),
+-		.elem_offset	= offsetof(struct rxe_mem, pelem),
+-		.flags		= RXE_POOL_INDEX,
++		.size		= sizeof(struct rxe_mw),
++		.elem_offset	= offsetof(struct rxe_mw, pelem),
++		.flags		= RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
+ 		.max_index	= RXE_MAX_MW_INDEX,
+ 		.min_index	= RXE_MIN_MW_INDEX,
+ 	},
+diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
+index 34ae957a315ca..b0f350d674fdb 100644
+--- a/drivers/infiniband/sw/rxe/rxe_qp.c
++++ b/drivers/infiniband/sw/rxe/rxe_qp.c
+@@ -242,6 +242,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
+ 	if (err) {
+ 		vfree(qp->sq.queue->buf);
+ 		kfree(qp->sq.queue);
++		qp->sq.queue = NULL;
+ 		return err;
+ 	}
+ 
+@@ -295,6 +296,7 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
+ 		if (err) {
+ 			vfree(qp->rq.queue->buf);
+ 			kfree(qp->rq.queue);
++			qp->rq.queue = NULL;
+ 			return err;
+ 		}
+ 	}
+@@ -355,6 +357,11 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
+ err2:
+ 	rxe_queue_cleanup(qp->sq.queue);
+ err1:
++	qp->pd = NULL;
++	qp->rcq = NULL;
++	qp->scq = NULL;
++	qp->srq = NULL;
++
+ 	if (srq)
+ 		rxe_drop_ref(srq);
+ 	rxe_drop_ref(scq);
+diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
+index 889290793d75b..3664cdae7e1f4 100644
+--- a/drivers/infiniband/sw/rxe/rxe_req.c
++++ b/drivers/infiniband/sw/rxe/rxe_req.c
+@@ -464,7 +464,7 @@ static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
+ 		} else {
+ 			err = copy_data(qp->pd, 0, &wqe->dma,
+ 					payload_addr(pkt), paylen,
+-					from_mem_obj,
++					from_mr_obj,
+ 					&crc);
+ 			if (err)
+ 				return err;
+@@ -596,7 +596,7 @@ next_wqe:
+ 	if (wqe->mask & WR_REG_MASK) {
+ 		if (wqe->wr.opcode == IB_WR_LOCAL_INV) {
+ 			struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+-			struct rxe_mem *rmr;
++			struct rxe_mr *rmr;
+ 
+ 			rmr = rxe_pool_get_index(&rxe->mr_pool,
+ 						 wqe->wr.ex.invalidate_rkey >> 8);
+@@ -607,14 +607,14 @@ next_wqe:
+ 				wqe->status = IB_WC_MW_BIND_ERR;
+ 				goto exit;
+ 			}
+-			rmr->state = RXE_MEM_STATE_FREE;
++			rmr->state = RXE_MR_STATE_FREE;
+ 			rxe_drop_ref(rmr);
+ 			wqe->state = wqe_state_done;
+ 			wqe->status = IB_WC_SUCCESS;
+ 		} else if (wqe->wr.opcode == IB_WR_REG_MR) {
+-			struct rxe_mem *rmr = to_rmr(wqe->wr.wr.reg.mr);
++			struct rxe_mr *rmr = to_rmr(wqe->wr.wr.reg.mr);
+ 
+-			rmr->state = RXE_MEM_STATE_VALID;
++			rmr->state = RXE_MR_STATE_VALID;
+ 			rmr->access = wqe->wr.wr.reg.access;
+ 			rmr->ibmr.lkey = wqe->wr.wr.reg.key;
+ 			rmr->ibmr.rkey = wqe->wr.wr.reg.key;
+diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
+index 142f3d8014d83..8e237b623b316 100644
+--- a/drivers/infiniband/sw/rxe/rxe_resp.c
++++ b/drivers/infiniband/sw/rxe/rxe_resp.c
+@@ -391,7 +391,7 @@ static enum resp_states check_length(struct rxe_qp *qp,
+ static enum resp_states check_rkey(struct rxe_qp *qp,
+ 				   struct rxe_pkt_info *pkt)
+ {
+-	struct rxe_mem *mem = NULL;
++	struct rxe_mr *mr = NULL;
+ 	u64 va;
+ 	u32 rkey;
+ 	u32 resid;
+@@ -430,18 +430,18 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
+ 	resid	= qp->resp.resid;
+ 	pktlen	= payload_size(pkt);
+ 
+-	mem = lookup_mem(qp->pd, access, rkey, lookup_remote);
+-	if (!mem) {
++	mr = lookup_mr(qp->pd, access, rkey, lookup_remote);
++	if (!mr) {
+ 		state = RESPST_ERR_RKEY_VIOLATION;
+ 		goto err;
+ 	}
+ 
+-	if (unlikely(mem->state == RXE_MEM_STATE_FREE)) {
++	if (unlikely(mr->state == RXE_MR_STATE_FREE)) {
+ 		state = RESPST_ERR_RKEY_VIOLATION;
+ 		goto err;
+ 	}
+ 
+-	if (mem_check_range(mem, va, resid)) {
++	if (mr_check_range(mr, va, resid)) {
+ 		state = RESPST_ERR_RKEY_VIOLATION;
+ 		goto err;
+ 	}
+@@ -469,12 +469,12 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
+ 
+ 	WARN_ON_ONCE(qp->resp.mr);
+ 
+-	qp->resp.mr = mem;
++	qp->resp.mr = mr;
+ 	return RESPST_EXECUTE;
+ 
+ err:
+-	if (mem)
+-		rxe_drop_ref(mem);
++	if (mr)
++		rxe_drop_ref(mr);
+ 	return state;
+ }
+ 
+@@ -484,7 +484,7 @@ static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr,
+ 	int err;
+ 
+ 	err = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma,
+-			data_addr, data_len, to_mem_obj, NULL);
++			data_addr, data_len, to_mr_obj, NULL);
+ 	if (unlikely(err))
+ 		return (err == -ENOSPC) ? RESPST_ERR_LENGTH
+ 					: RESPST_ERR_MALFORMED_WQE;
+@@ -499,8 +499,8 @@ static enum resp_states write_data_in(struct rxe_qp *qp,
+ 	int	err;
+ 	int data_len = payload_size(pkt);
+ 
+-	err = rxe_mem_copy(qp->resp.mr, qp->resp.va, payload_addr(pkt),
+-			   data_len, to_mem_obj, NULL);
++	err = rxe_mr_copy(qp->resp.mr, qp->resp.va, payload_addr(pkt), data_len,
++			  to_mr_obj, NULL);
+ 	if (err) {
+ 		rc = RESPST_ERR_RKEY_VIOLATION;
+ 		goto out;
+@@ -522,9 +522,9 @@ static enum resp_states process_atomic(struct rxe_qp *qp,
+ 	u64 iova = atmeth_va(pkt);
+ 	u64 *vaddr;
+ 	enum resp_states ret;
+-	struct rxe_mem *mr = qp->resp.mr;
++	struct rxe_mr *mr = qp->resp.mr;
+ 
+-	if (mr->state != RXE_MEM_STATE_VALID) {
++	if (mr->state != RXE_MR_STATE_VALID) {
+ 		ret = RESPST_ERR_RKEY_VIOLATION;
+ 		goto out;
+ 	}
+@@ -700,8 +700,8 @@ static enum resp_states read_reply(struct rxe_qp *qp,
+ 	if (!skb)
+ 		return RESPST_ERR_RNR;
+ 
+-	err = rxe_mem_copy(res->read.mr, res->read.va, payload_addr(&ack_pkt),
+-			   payload, from_mem_obj, &icrc);
++	err = rxe_mr_copy(res->read.mr, res->read.va, payload_addr(&ack_pkt),
++			  payload, from_mr_obj, &icrc);
+ 	if (err)
+ 		pr_err("Failed copying memory\n");
+ 
+@@ -883,7 +883,7 @@ static enum resp_states do_complete(struct rxe_qp *qp,
+ 			}
+ 
+ 			if (pkt->mask & RXE_IETH_MASK) {
+-				struct rxe_mem *rmr;
++				struct rxe_mr *rmr;
+ 
+ 				wc->wc_flags |= IB_WC_WITH_INVALIDATE;
+ 				wc->ex.invalidate_rkey = ieth_rkey(pkt);
+@@ -895,7 +895,7 @@ static enum resp_states do_complete(struct rxe_qp *qp,
+ 					       wc->ex.invalidate_rkey);
+ 					return RESPST_ERROR;
+ 				}
+-				rmr->state = RXE_MEM_STATE_FREE;
++				rmr->state = RXE_MR_STATE_FREE;
+ 				rxe_drop_ref(rmr);
+ 			}
+ 
+diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
+index dee5e0e919d28..38249c1a76a88 100644
+--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
++++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
+@@ -865,7 +865,7 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
+ {
+ 	struct rxe_dev *rxe = to_rdev(ibpd->device);
+ 	struct rxe_pd *pd = to_rpd(ibpd);
+-	struct rxe_mem *mr;
++	struct rxe_mr *mr;
+ 
+ 	mr = rxe_alloc(&rxe->mr_pool);
+ 	if (!mr)
+@@ -873,7 +873,7 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
+ 
+ 	rxe_add_index(mr);
+ 	rxe_add_ref(pd);
+-	rxe_mem_init_dma(pd, access, mr);
++	rxe_mr_init_dma(pd, access, mr);
+ 
+ 	return &mr->ibmr;
+ }
+@@ -887,7 +887,7 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
+ 	int err;
+ 	struct rxe_dev *rxe = to_rdev(ibpd->device);
+ 	struct rxe_pd *pd = to_rpd(ibpd);
+-	struct rxe_mem *mr;
++	struct rxe_mr *mr;
+ 
+ 	mr = rxe_alloc(&rxe->mr_pool);
+ 	if (!mr) {
+@@ -899,8 +899,7 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
+ 
+ 	rxe_add_ref(pd);
+ 
+-	err = rxe_mem_init_user(pd, start, length, iova,
+-				access, udata, mr);
++	err = rxe_mr_init_user(pd, start, length, iova, access, udata, mr);
+ 	if (err)
+ 		goto err3;
+ 
+@@ -916,9 +915,9 @@ err2:
+ 
+ static int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
+ {
+-	struct rxe_mem *mr = to_rmr(ibmr);
++	struct rxe_mr *mr = to_rmr(ibmr);
+ 
+-	mr->state = RXE_MEM_STATE_ZOMBIE;
++	mr->state = RXE_MR_STATE_ZOMBIE;
+ 	rxe_drop_ref(mr_pd(mr));
+ 	rxe_drop_index(mr);
+ 	rxe_drop_ref(mr);
+@@ -930,7 +929,7 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
+ {
+ 	struct rxe_dev *rxe = to_rdev(ibpd->device);
+ 	struct rxe_pd *pd = to_rpd(ibpd);
+-	struct rxe_mem *mr;
++	struct rxe_mr *mr;
+ 	int err;
+ 
+ 	if (mr_type != IB_MR_TYPE_MEM_REG)
+@@ -946,7 +945,7 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
+ 
+ 	rxe_add_ref(pd);
+ 
+-	err = rxe_mem_init_fast(pd, max_num_sg, mr);
++	err = rxe_mr_init_fast(pd, max_num_sg, mr);
+ 	if (err)
+ 		goto err2;
+ 
+@@ -962,7 +961,7 @@ err1:
+ 
+ static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
+ {
+-	struct rxe_mem *mr = to_rmr(ibmr);
++	struct rxe_mr *mr = to_rmr(ibmr);
+ 	struct rxe_map *map;
+ 	struct rxe_phys_buf *buf;
+ 
+@@ -982,7 +981,7 @@ static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
+ static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
+ 			 int sg_nents, unsigned int *sg_offset)
+ {
+-	struct rxe_mem *mr = to_rmr(ibmr);
++	struct rxe_mr *mr = to_rmr(ibmr);
+ 	int n;
+ 
+ 	mr->nbuf = 0;
+@@ -1110,6 +1109,7 @@ static const struct ib_device_ops rxe_dev_ops = {
+ 	INIT_RDMA_OBJ_SIZE(ib_pd, rxe_pd, ibpd),
+ 	INIT_RDMA_OBJ_SIZE(ib_srq, rxe_srq, ibsrq),
+ 	INIT_RDMA_OBJ_SIZE(ib_ucontext, rxe_ucontext, ibuc),
++	INIT_RDMA_OBJ_SIZE(ib_mw, rxe_mw, ibmw),
+ };
+ 
+ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
+diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
+index 79e0a5a878da3..11eba7a3ba8f4 100644
+--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
++++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
+@@ -156,7 +156,7 @@ struct resp_res {
+ 			struct sk_buff	*skb;
+ 		} atomic;
+ 		struct {
+-			struct rxe_mem	*mr;
++			struct rxe_mr	*mr;
+ 			u64		va_org;
+ 			u32		rkey;
+ 			u32		length;
+@@ -183,7 +183,7 @@ struct rxe_resp_info {
+ 
+ 	/* RDMA read / atomic only */
+ 	u64			va;
+-	struct rxe_mem		*mr;
++	struct rxe_mr		*mr;
+ 	u32			resid;
+ 	u32			rkey;
+ 	u32			length;
+@@ -262,18 +262,18 @@ struct rxe_qp {
+ 	struct execute_work	cleanup_work;
+ };
+ 
+-enum rxe_mem_state {
+-	RXE_MEM_STATE_ZOMBIE,
+-	RXE_MEM_STATE_INVALID,
+-	RXE_MEM_STATE_FREE,
+-	RXE_MEM_STATE_VALID,
++enum rxe_mr_state {
++	RXE_MR_STATE_ZOMBIE,
++	RXE_MR_STATE_INVALID,
++	RXE_MR_STATE_FREE,
++	RXE_MR_STATE_VALID,
+ };
+ 
+-enum rxe_mem_type {
+-	RXE_MEM_TYPE_NONE,
+-	RXE_MEM_TYPE_DMA,
+-	RXE_MEM_TYPE_MR,
+-	RXE_MEM_TYPE_MW,
++enum rxe_mr_type {
++	RXE_MR_TYPE_NONE,
++	RXE_MR_TYPE_DMA,
++	RXE_MR_TYPE_MR,
++	RXE_MR_TYPE_MW,
+ };
+ 
+ #define RXE_BUF_PER_MAP		(PAGE_SIZE / sizeof(struct rxe_phys_buf))
+@@ -287,17 +287,14 @@ struct rxe_map {
+ 	struct rxe_phys_buf	buf[RXE_BUF_PER_MAP];
+ };
+ 
+-struct rxe_mem {
++struct rxe_mr {
+ 	struct rxe_pool_entry	pelem;
+-	union {
+-		struct ib_mr		ibmr;
+-		struct ib_mw		ibmw;
+-	};
++	struct ib_mr		ibmr;
+ 
+ 	struct ib_umem		*umem;
+ 
+-	enum rxe_mem_state	state;
+-	enum rxe_mem_type	type;
++	enum rxe_mr_state	state;
++	enum rxe_mr_type	type;
+ 	u64			va;
+ 	u64			iova;
+ 	size_t			length;
+@@ -318,6 +315,17 @@ struct rxe_mem {
+ 	struct rxe_map		**map;
+ };
+ 
++enum rxe_mw_state {
++	RXE_MW_STATE_INVALID = RXE_MR_STATE_INVALID,
++	RXE_MW_STATE_FREE = RXE_MR_STATE_FREE,
++	RXE_MW_STATE_VALID = RXE_MR_STATE_VALID,
++};
++
++struct rxe_mw {
++	struct ib_mw ibmw;
++	struct rxe_pool_entry pelem;
++};
++
+ struct rxe_mc_grp {
+ 	struct rxe_pool_entry	pelem;
+ 	spinlock_t		mcg_lock; /* guard group */
+@@ -422,27 +430,27 @@ static inline struct rxe_cq *to_rcq(struct ib_cq *cq)
+ 	return cq ? container_of(cq, struct rxe_cq, ibcq) : NULL;
+ }
+ 
+-static inline struct rxe_mem *to_rmr(struct ib_mr *mr)
++static inline struct rxe_mr *to_rmr(struct ib_mr *mr)
+ {
+-	return mr ? container_of(mr, struct rxe_mem, ibmr) : NULL;
++	return mr ? container_of(mr, struct rxe_mr, ibmr) : NULL;
+ }
+ 
+-static inline struct rxe_mem *to_rmw(struct ib_mw *mw)
++static inline struct rxe_mw *to_rmw(struct ib_mw *mw)
+ {
+-	return mw ? container_of(mw, struct rxe_mem, ibmw) : NULL;
++	return mw ? container_of(mw, struct rxe_mw, ibmw) : NULL;
+ }
+ 
+-static inline struct rxe_pd *mr_pd(struct rxe_mem *mr)
++static inline struct rxe_pd *mr_pd(struct rxe_mr *mr)
+ {
+ 	return to_rpd(mr->ibmr.pd);
+ }
+ 
+-static inline u32 mr_lkey(struct rxe_mem *mr)
++static inline u32 mr_lkey(struct rxe_mr *mr)
+ {
+ 	return mr->ibmr.lkey;
+ }
+ 
+-static inline u32 mr_rkey(struct rxe_mem *mr)
++static inline u32 mr_rkey(struct rxe_mr *mr)
+ {
+ 	return mr->ibmr.rkey;
+ }
+diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
+index e389d44e5591d..8a00c06e5f56f 100644
+--- a/drivers/infiniband/sw/siw/siw_verbs.c
++++ b/drivers/infiniband/sw/siw/siw_verbs.c
+@@ -300,7 +300,6 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
+ 	struct siw_ucontext *uctx =
+ 		rdma_udata_to_drv_context(udata, struct siw_ucontext,
+ 					  base_ucontext);
+-	struct siw_cq *scq = NULL, *rcq = NULL;
+ 	unsigned long flags;
+ 	int num_sqe, num_rqe, rv = 0;
+ 	size_t length;
+@@ -343,10 +342,8 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
+ 		rv = -EINVAL;
+ 		goto err_out;
+ 	}
+-	scq = to_siw_cq(attrs->send_cq);
+-	rcq = to_siw_cq(attrs->recv_cq);
+ 
+-	if (!scq || (!rcq && !attrs->srq)) {
++	if (!attrs->send_cq || (!attrs->recv_cq && !attrs->srq)) {
+ 		siw_dbg(base_dev, "send CQ or receive CQ invalid\n");
+ 		rv = -EINVAL;
+ 		goto err_out;
+@@ -378,7 +375,7 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
+ 	else {
+ 		/* Zero sized SQ is not supported */
+ 		rv = -EINVAL;
+-		goto err_out;
++		goto err_out_xa;
+ 	}
+ 	if (num_rqe)
+ 		num_rqe = roundup_pow_of_two(num_rqe);
+@@ -401,8 +398,8 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
+ 		}
+ 	}
+ 	qp->pd = pd;
+-	qp->scq = scq;
+-	qp->rcq = rcq;
++	qp->scq = to_siw_cq(attrs->send_cq);
++	qp->rcq = to_siw_cq(attrs->recv_cq);
+ 
+ 	if (attrs->srq) {
+ 		/*
+diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
+index fc433e63b1dc0..b1590cb4a1887 100644
+--- a/drivers/leds/leds-lp5523.c
++++ b/drivers/leds/leds-lp5523.c
+@@ -307,7 +307,7 @@ static int lp5523_init_program_engine(struct lp55xx_chip *chip)
+ 	usleep_range(3000, 6000);
+ 	ret = lp55xx_read(chip, LP5523_REG_STATUS, &status);
+ 	if (ret)
+-		return ret;
++		goto out;
+ 	status &= LP5523_ENG_STATUS_MASK;
+ 
+ 	if (status != LP5523_ENG_STATUS_MASK) {
+diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
+index 11890db71f3fe..962f7df0691ef 100644
+--- a/drivers/md/dm-snap.c
++++ b/drivers/md/dm-snap.c
+@@ -1408,6 +1408,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ 
+ 	if (!s->store->chunk_size) {
+ 		ti->error = "Chunk size not set";
++		r = -EINVAL;
+ 		goto bad_read_metadata;
+ 	}
+ 
+diff --git a/drivers/media/platform/rcar_drif.c b/drivers/media/platform/rcar_drif.c
+index 83bd9a412a560..1e3b68a8743af 100644
+--- a/drivers/media/platform/rcar_drif.c
++++ b/drivers/media/platform/rcar_drif.c
+@@ -915,7 +915,6 @@ static int rcar_drif_g_fmt_sdr_cap(struct file *file, void *priv,
+ {
+ 	struct rcar_drif_sdr *sdr = video_drvdata(file);
+ 
+-	memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
+ 	f->fmt.sdr.pixelformat = sdr->fmt->pixelformat;
+ 	f->fmt.sdr.buffersize = sdr->fmt->buffersize;
+ 
+diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
+index 926408b41270c..7a6f01ace78ac 100644
+--- a/drivers/misc/eeprom/at24.c
++++ b/drivers/misc/eeprom/at24.c
+@@ -763,7 +763,8 @@ static int at24_probe(struct i2c_client *client)
+ 	at24->nvmem = devm_nvmem_register(dev, &nvmem_config);
+ 	if (IS_ERR(at24->nvmem)) {
+ 		pm_runtime_disable(dev);
+-		regulator_disable(at24->vcc_reg);
++		if (!pm_runtime_status_suspended(dev))
++			regulator_disable(at24->vcc_reg);
+ 		return PTR_ERR(at24->nvmem);
+ 	}
+ 
+@@ -774,7 +775,8 @@ static int at24_probe(struct i2c_client *client)
+ 	err = at24_read(at24, 0, &test_byte, 1);
+ 	if (err) {
+ 		pm_runtime_disable(dev);
+-		regulator_disable(at24->vcc_reg);
++		if (!pm_runtime_status_suspended(dev))
++			regulator_disable(at24->vcc_reg);
+ 		return -ENODEV;
+ 	}
+ 
+diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
+index 9152242778f5e..ecdedd87f8ccf 100644
+--- a/drivers/misc/habanalabs/gaudi/gaudi.c
++++ b/drivers/misc/habanalabs/gaudi/gaudi.c
+@@ -5546,6 +5546,7 @@ static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
+ 	struct hl_cs_job *job;
+ 	u32 cb_size, ctl, err_cause;
+ 	struct hl_cb *cb;
++	u64 id;
+ 	int rc;
+ 
+ 	cb = hl_cb_kernel_create(hdev, PAGE_SIZE, false);
+@@ -5612,8 +5613,9 @@ static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
+ 	}
+ 
+ release_cb:
++	id = cb->id;
+ 	hl_cb_put(cb);
+-	hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
++	hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, id << PAGE_SHIFT);
+ 
+ 	return rc;
+ }
+diff --git a/drivers/misc/ics932s401.c b/drivers/misc/ics932s401.c
+index 2bdf560ee681b..0f9ea75b0b189 100644
+--- a/drivers/misc/ics932s401.c
++++ b/drivers/misc/ics932s401.c
+@@ -134,7 +134,7 @@ static struct ics932s401_data *ics932s401_update_device(struct device *dev)
+ 	for (i = 0; i < NUM_MIRRORED_REGS; i++) {
+ 		temp = i2c_smbus_read_word_data(client, regs_to_copy[i]);
+ 		if (temp < 0)
+-			data->regs[regs_to_copy[i]] = 0;
++			temp = 0;
+ 		data->regs[regs_to_copy[i]] = temp >> 8;
+ 	}
+ 
+diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
+index b8b771b643cc8..016a6106151a5 100644
+--- a/drivers/mmc/host/meson-gx-mmc.c
++++ b/drivers/mmc/host/meson-gx-mmc.c
+@@ -236,7 +236,8 @@ static void meson_mmc_get_transfer_mode(struct mmc_host *mmc,
+ 	if (host->dram_access_quirk)
+ 		return;
+ 
+-	if (data->blocks > 1) {
++	/* SD_IO_RW_EXTENDED (CMD53) can also use block mode under the hood */
++	if (data->blocks > 1 || mrq->cmd->opcode == SD_IO_RW_EXTENDED) {
+ 		/*
+ 		 * In block mode DMA descriptor format, "length" field indicates
+ 		 * number of blocks and there is no way to pass DMA size that
+@@ -258,7 +259,9 @@ static void meson_mmc_get_transfer_mode(struct mmc_host *mmc,
+ 	for_each_sg(data->sg, sg, data->sg_len, i) {
+ 		/* check for 8 byte alignment */
+ 		if (sg->offset % 8) {
+-			WARN_ONCE(1, "unaligned scatterlist buffer\n");
++			dev_warn_once(mmc_dev(mmc),
++				      "unaligned sg offset %u, disabling descriptor DMA for transfer\n",
++				      sg->offset);
+ 			return;
+ 		}
+ 	}
+diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
+index 4a0f69b97a78f..7572119225068 100644
+--- a/drivers/mmc/host/sdhci-pci-gli.c
++++ b/drivers/mmc/host/sdhci-pci-gli.c
+@@ -587,8 +587,13 @@ static void sdhci_gli_voltage_switch(struct sdhci_host *host)
+ 	 *
+ 	 * Wait 5ms after set 1.8V signal enable in Host Control 2 register
+ 	 * to ensure 1.8V signal enable bit is set by GL9750/GL9755.
++	 *
++	 * ...however, the controller in the NUC10i3FNK4 (a 9755) requires
++	 * slightly longer than 5ms before the control register reports that
++	 * 1.8V is ready, and far longer still before the card will actually
++	 * work reliably.
+ 	 */
+-	usleep_range(5000, 5500);
++	usleep_range(100000, 110000);
+ }
+ 
+ static void sdhci_gl9750_reset(struct sdhci_host *host, u8 mask)
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+index d8a3ecaed3fc6..d8f0863b39342 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+@@ -1048,7 +1048,7 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
+ 	for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) {
+ 		skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE);
+ 		if (!skb)
+-			break;
++			goto error;
+ 		qlcnic_create_loopback_buff(skb->data, adapter->mac_addr);
+ 		skb_put(skb, QLCNIC_ILB_PKT_SIZE);
+ 		adapter->ahw->diag_cnt = 0;
+@@ -1072,6 +1072,7 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
+ 			cnt++;
+ 	}
+ 	if (cnt != i) {
++error:
+ 		dev_err(&adapter->pdev->dev,
+ 			"LB Test: failed, TX[%d], RX[%d]\n", i, cnt);
+ 		if (mode != QLCNIC_ILB_MODE)
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+index 0e1ca2cba3c7c..e18dee7fe6876 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+@@ -30,7 +30,7 @@ struct sunxi_priv_data {
+ static int sun7i_gmac_init(struct platform_device *pdev, void *priv)
+ {
+ 	struct sunxi_priv_data *gmac = priv;
+-	int ret;
++	int ret = 0;
+ 
+ 	if (gmac->regulator) {
+ 		ret = regulator_enable(gmac->regulator);
+@@ -51,11 +51,11 @@ static int sun7i_gmac_init(struct platform_device *pdev, void *priv)
+ 	} else {
+ 		clk_set_rate(gmac->tx_clk, SUN7I_GMAC_MII_RATE);
+ 		ret = clk_prepare(gmac->tx_clk);
+-		if (ret)
+-			return ret;
++		if (ret && gmac->regulator)
++			regulator_disable(gmac->regulator);
+ 	}
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ static void sun7i_gmac_exit(struct platform_device *pdev, void *priv)
+diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
+index 707ccdd03b19e..74e748662ec01 100644
+--- a/drivers/net/ethernet/sun/niu.c
++++ b/drivers/net/ethernet/sun/niu.c
+@@ -8144,10 +8144,10 @@ static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end)
+ 				     "VPD_SCAN: Reading in property [%s] len[%d]\n",
+ 				     namebuf, prop_len);
+ 			for (i = 0; i < prop_len; i++) {
+-				err = niu_pci_eeprom_read(np, off + i);
+-				if (err >= 0)
+-					*prop_buf = err;
+-				++prop_buf;
++				err =  niu_pci_eeprom_read(np, off + i);
++				if (err < 0)
++					return err;
++				*prop_buf++ = err;
+ 			}
+ 		}
+ 
+@@ -8158,14 +8158,14 @@ static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end)
+ }
+ 
+ /* ESPC_PIO_EN_ENABLE must be set */
+-static void niu_pci_vpd_fetch(struct niu *np, u32 start)
++static int niu_pci_vpd_fetch(struct niu *np, u32 start)
+ {
+ 	u32 offset;
+ 	int err;
+ 
+ 	err = niu_pci_eeprom_read16_swp(np, start + 1);
+ 	if (err < 0)
+-		return;
++		return err;
+ 
+ 	offset = err + 3;
+ 
+@@ -8174,12 +8174,14 @@ static void niu_pci_vpd_fetch(struct niu *np, u32 start)
+ 		u32 end;
+ 
+ 		err = niu_pci_eeprom_read(np, here);
++		if (err < 0)
++			return err;
+ 		if (err != 0x90)
+-			return;
++			return -EINVAL;
+ 
+ 		err = niu_pci_eeprom_read16_swp(np, here + 1);
+ 		if (err < 0)
+-			return;
++			return err;
+ 
+ 		here = start + offset + 3;
+ 		end = start + offset + err;
+@@ -8187,9 +8189,12 @@ static void niu_pci_vpd_fetch(struct niu *np, u32 start)
+ 		offset += err;
+ 
+ 		err = niu_pci_vpd_scan_props(np, here, end);
+-		if (err < 0 || err == 1)
+-			return;
++		if (err < 0)
++			return err;
++		if (err == 1)
++			return -EINVAL;
+ 	}
++	return 0;
+ }
+ 
+ /* ESPC_PIO_EN_ENABLE must be set */
+@@ -9280,8 +9285,11 @@ static int niu_get_invariants(struct niu *np)
+ 		offset = niu_pci_vpd_offset(np);
+ 		netif_printk(np, probe, KERN_DEBUG, np->dev,
+ 			     "%s() VPD offset [%08x]\n", __func__, offset);
+-		if (offset)
+-			niu_pci_vpd_fetch(np, offset);
++		if (offset) {
++			err = niu_pci_vpd_fetch(np, offset);
++			if (err < 0)
++				return err;
++		}
+ 		nw64(ESPC_PIO_EN, 0);
+ 
+ 		if (np->flags & NIU_FLAGS_VPD_VALID) {
+diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
+index 6e8bd99e8911d..1866f6c2acab1 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/base.c
++++ b/drivers/net/wireless/realtek/rtlwifi/base.c
+@@ -440,9 +440,14 @@ static void rtl_watchdog_wq_callback(struct work_struct *work);
+ static void rtl_fwevt_wq_callback(struct work_struct *work);
+ static void rtl_c2hcmd_wq_callback(struct work_struct *work);
+ 
+-static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
++static int _rtl_init_deferred_work(struct ieee80211_hw *hw)
+ {
+ 	struct rtl_priv *rtlpriv = rtl_priv(hw);
++	struct workqueue_struct *wq;
++
++	wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name);
++	if (!wq)
++		return -ENOMEM;
+ 
+ 	/* <1> timer */
+ 	timer_setup(&rtlpriv->works.watchdog_timer,
+@@ -451,11 +456,7 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
+ 		    rtl_easy_concurrent_retrytimer_callback, 0);
+ 	/* <2> work queue */
+ 	rtlpriv->works.hw = hw;
+-	rtlpriv->works.rtl_wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name);
+-	if (unlikely(!rtlpriv->works.rtl_wq)) {
+-		pr_err("Failed to allocate work queue\n");
+-		return;
+-	}
++	rtlpriv->works.rtl_wq = wq;
+ 
+ 	INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq,
+ 			  rtl_watchdog_wq_callback);
+@@ -466,6 +467,7 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
+ 			  rtl_swlps_rfon_wq_callback);
+ 	INIT_DELAYED_WORK(&rtlpriv->works.fwevt_wq, rtl_fwevt_wq_callback);
+ 	INIT_DELAYED_WORK(&rtlpriv->works.c2hcmd_wq, rtl_c2hcmd_wq_callback);
++	return 0;
+ }
+ 
+ void rtl_deinit_deferred_work(struct ieee80211_hw *hw, bool ips_wq)
+@@ -565,9 +567,7 @@ int rtl_init_core(struct ieee80211_hw *hw)
+ 	rtlmac->link_state = MAC80211_NOLINK;
+ 
+ 	/* <6> init deferred work */
+-	_rtl_init_deferred_work(hw);
+-
+-	return 0;
++	return _rtl_init_deferred_work(hw);
+ }
+ EXPORT_SYMBOL_GPL(rtl_init_core);
+ 
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index d5d7e0cdd78d8..091b2e77d39ba 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -3190,7 +3190,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
+ 		ctrl->hmmaxd = le16_to_cpu(id->hmmaxd);
+ 	}
+ 
+-	ret = nvme_mpath_init(ctrl, id);
++	ret = nvme_mpath_init_identify(ctrl, id);
+ 	kfree(id);
+ 
+ 	if (ret < 0)
+@@ -4580,6 +4580,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
+ 		min(default_ps_max_latency_us, (unsigned long)S32_MAX));
+ 
+ 	nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device));
++	nvme_mpath_init_ctrl(ctrl);
+ 
+ 	return 0;
+ out_free_name:
+diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
+index 6ffa8de2a0d77..5eee603bc2493 100644
+--- a/drivers/nvme/host/fc.c
++++ b/drivers/nvme/host/fc.c
+@@ -2460,6 +2460,18 @@ nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
+ static void
+ __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
+ {
++	int q;
++
++	/*
++	 * if aborting io, the queues are no longer good, mark them
++	 * all as not live.
++	 */
++	if (ctrl->ctrl.queue_count > 1) {
++		for (q = 1; q < ctrl->ctrl.queue_count; q++)
++			clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[q].flags);
++	}
++	clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
++
+ 	/*
+ 	 * If io queues are present, stop them and terminate all outstanding
+ 	 * ios on them. As FC allocates FC exchange for each io, the
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index ec1e454848e58..56852e6edd81a 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -709,9 +709,18 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
+ 	put_disk(head->disk);
+ }
+ 
+-int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
++void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
+ {
+-	int error;
++	mutex_init(&ctrl->ana_lock);
++	timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
++	INIT_WORK(&ctrl->ana_work, nvme_ana_work);
++}
++
++int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
++{
++	size_t max_transfer_size = ctrl->max_hw_sectors << SECTOR_SHIFT;
++	size_t ana_log_size;
++	int error = 0;
+ 
+ 	/* check if multipath is enabled and we have the capability */
+ 	if (!multipath || !ctrl->subsys ||
+@@ -723,37 +732,31 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
+ 	ctrl->nanagrpid = le32_to_cpu(id->nanagrpid);
+ 	ctrl->anagrpmax = le32_to_cpu(id->anagrpmax);
+ 
+-	mutex_init(&ctrl->ana_lock);
+-	timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
+-	ctrl->ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
+-		ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc);
+-	ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32);
+-
+-	if (ctrl->ana_log_size > ctrl->max_hw_sectors << SECTOR_SHIFT) {
++	ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
++		ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc) +
++		ctrl->max_namespaces * sizeof(__le32);
++	if (ana_log_size > max_transfer_size) {
+ 		dev_err(ctrl->device,
+-			"ANA log page size (%zd) larger than MDTS (%d).\n",
+-			ctrl->ana_log_size,
+-			ctrl->max_hw_sectors << SECTOR_SHIFT);
++			"ANA log page size (%zd) larger than MDTS (%zd).\n",
++			ana_log_size, max_transfer_size);
+ 		dev_err(ctrl->device, "disabling ANA support.\n");
+-		return 0;
++		goto out_uninit;
+ 	}
+-
+-	INIT_WORK(&ctrl->ana_work, nvme_ana_work);
+-	kfree(ctrl->ana_log_buf);
+-	ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL);
+-	if (!ctrl->ana_log_buf) {
+-		error = -ENOMEM;
+-		goto out;
++	if (ana_log_size > ctrl->ana_log_size) {
++		nvme_mpath_stop(ctrl);
++		kfree(ctrl->ana_log_buf);
++		ctrl->ana_log_buf = kmalloc(ana_log_size, GFP_KERNEL);
++		if (!ctrl->ana_log_buf)
++			return -ENOMEM;
+ 	}
+-
++	ctrl->ana_log_size = ana_log_size;
+ 	error = nvme_read_ana_log(ctrl);
+ 	if (error)
+-		goto out_free_ana_log_buf;
++		goto out_uninit;
+ 	return 0;
+-out_free_ana_log_buf:
+-	kfree(ctrl->ana_log_buf);
+-	ctrl->ana_log_buf = NULL;
+-out:
++
++out_uninit:
++	nvme_mpath_uninit(ctrl);
+ 	return error;
+ }
+ 
+diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
+index 07b34175c6ce6..447b0720aef5e 100644
+--- a/drivers/nvme/host/nvme.h
++++ b/drivers/nvme/host/nvme.h
+@@ -668,7 +668,8 @@ void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
+ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
+ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id);
+ void nvme_mpath_remove_disk(struct nvme_ns_head *head);
+-int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
++int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
++void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
+ void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
+ void nvme_mpath_stop(struct nvme_ctrl *ctrl);
+ bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
+@@ -742,7 +743,10 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
+ static inline void nvme_trace_bio_complete(struct request *req)
+ {
+ }
+-static inline int nvme_mpath_init(struct nvme_ctrl *ctrl,
++static inline void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
++{
++}
++static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl,
+ 		struct nvme_id_ctrl *id)
+ {
+ 	if (ctrl->subsys->cmic & (1 << 3))
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index d7d7c81d07014..8c2ae6284c3b2 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -940,7 +940,6 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
+ 		if (ret <= 0)
+ 			return ret;
+ 
+-		nvme_tcp_advance_req(req, ret);
+ 		if (queue->data_digest)
+ 			nvme_tcp_ddgst_update(queue->snd_hash, page,
+ 					offset, ret);
+@@ -957,6 +956,7 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
+ 			}
+ 			return 1;
+ 		}
++		nvme_tcp_advance_req(req, ret);
+ 	}
+ 	return -EAGAIN;
+ }
+@@ -1137,7 +1137,8 @@ static void nvme_tcp_io_work(struct work_struct *w)
+ 				pending = true;
+ 			else if (unlikely(result < 0))
+ 				break;
+-		}
++		} else
++			pending = !llist_empty(&queue->req_list);
+ 
+ 		result = nvme_tcp_try_recv(queue);
+ 		if (result > 0)
+diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
+index a027433b8be84..348057fdc568f 100644
+--- a/drivers/nvme/target/core.c
++++ b/drivers/nvme/target/core.c
+@@ -1371,7 +1371,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
+ 		goto out_free_changed_ns_list;
+ 
+ 	if (subsys->cntlid_min > subsys->cntlid_max)
+-		goto out_free_changed_ns_list;
++		goto out_free_sqs;
+ 
+ 	ret = ida_simple_get(&cntlid_ida,
+ 			     subsys->cntlid_min, subsys->cntlid_max,
+diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
+index 715d4376c9979..7fdbdc496597d 100644
+--- a/drivers/nvme/target/io-cmd-file.c
++++ b/drivers/nvme/target/io-cmd-file.c
+@@ -49,9 +49,11 @@ int nvmet_file_ns_enable(struct nvmet_ns *ns)
+ 
+ 	ns->file = filp_open(ns->device_path, flags, 0);
+ 	if (IS_ERR(ns->file)) {
+-		pr_err("failed to open file %s: (%ld)\n",
+-				ns->device_path, PTR_ERR(ns->file));
+-		return PTR_ERR(ns->file);
++		ret = PTR_ERR(ns->file);
++		pr_err("failed to open file %s: (%d)\n",
++			ns->device_path, ret);
++		ns->file = NULL;
++		return ret;
+ 	}
+ 
+ 	ret = nvmet_file_ns_revalidate(ns);
+diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
+index 3e189e753bcf5..14913a4588ecc 100644
+--- a/drivers/nvme/target/loop.c
++++ b/drivers/nvme/target/loop.c
+@@ -588,8 +588,10 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
+ 
+ 	ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
+ 				0 /* no quirks, we're perfect! */);
+-	if (ret)
++	if (ret) {
++		kfree(ctrl);
+ 		goto out;
++	}
+ 
+ 	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
+ 		WARN_ON_ONCE(1);
+diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c
+index bbc4e71a16ff8..38800e86ed8ad 100644
+--- a/drivers/platform/mellanox/mlxbf-tmfifo.c
++++ b/drivers/platform/mellanox/mlxbf-tmfifo.c
+@@ -294,6 +294,9 @@ mlxbf_tmfifo_get_next_desc(struct mlxbf_tmfifo_vring *vring)
+ 	if (vring->next_avail == virtio16_to_cpu(vdev, vr->avail->idx))
+ 		return NULL;
+ 
++	/* Make sure 'avail->idx' is visible already. */
++	virtio_rmb(false);
++
+ 	idx = vring->next_avail % vr->num;
+ 	head = virtio16_to_cpu(vdev, vr->avail->ring[idx]);
+ 	if (WARN_ON(head >= vr->num))
+@@ -322,7 +325,7 @@ static void mlxbf_tmfifo_release_desc(struct mlxbf_tmfifo_vring *vring,
+ 	 * done or not. Add a memory barrier here to make sure the update above
+ 	 * completes before updating the idx.
+ 	 */
+-	mb();
++	virtio_mb(false);
+ 	vr->used->idx = cpu_to_virtio16(vdev, vr_idx + 1);
+ }
+ 
+@@ -733,6 +736,12 @@ static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring,
+ 		desc = NULL;
+ 		fifo->vring[is_rx] = NULL;
+ 
++		/*
++		 * Make sure the load/store are in order before
++		 * returning back to virtio.
++		 */
++		virtio_mb(false);
++
+ 		/* Notify upper layer that packet is done. */
+ 		spin_lock_irqsave(&fifo->spin_lock[is_rx], flags);
+ 		vring_interrupt(0, vring->vq);
+diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
+index 461ec61530ebf..205a096e9ceee 100644
+--- a/drivers/platform/x86/Kconfig
++++ b/drivers/platform/x86/Kconfig
+@@ -688,7 +688,7 @@ config INTEL_HID_EVENT
+ 
+ config INTEL_INT0002_VGPIO
+ 	tristate "Intel ACPI INT0002 Virtual GPIO driver"
+-	depends on GPIOLIB && ACPI
++	depends on GPIOLIB && ACPI && PM_SLEEP
+ 	select GPIOLIB_IRQCHIP
+ 	help
+ 	  Some peripherals on Bay Trail and Cherry Trail platforms signal a
+diff --git a/drivers/platform/x86/dell/dell-smbios-wmi.c b/drivers/platform/x86/dell/dell-smbios-wmi.c
+index 27a298b7c541b..c97bd4a452422 100644
+--- a/drivers/platform/x86/dell/dell-smbios-wmi.c
++++ b/drivers/platform/x86/dell/dell-smbios-wmi.c
+@@ -271,7 +271,8 @@ int init_dell_smbios_wmi(void)
+ 
+ void exit_dell_smbios_wmi(void)
+ {
+-	wmi_driver_unregister(&dell_smbios_wmi_driver);
++	if (wmi_supported)
++		wmi_driver_unregister(&dell_smbios_wmi_driver);
+ }
+ 
+ MODULE_DEVICE_TABLE(wmi, dell_smbios_wmi_id_table);
+diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
+index 6cb5ad4be231d..3878172909219 100644
+--- a/drivers/platform/x86/ideapad-laptop.c
++++ b/drivers/platform/x86/ideapad-laptop.c
+@@ -57,8 +57,8 @@ enum {
+ };
+ 
+ enum {
+-	SMBC_CONSERVATION_ON  = 3,
+-	SMBC_CONSERVATION_OFF = 5,
++	SBMC_CONSERVATION_ON  = 3,
++	SBMC_CONSERVATION_OFF = 5,
+ };
+ 
+ enum {
+@@ -182,9 +182,9 @@ static int eval_gbmd(acpi_handle handle, unsigned long *res)
+ 	return eval_int(handle, "GBMD", res);
+ }
+ 
+-static int exec_smbc(acpi_handle handle, unsigned long arg)
++static int exec_sbmc(acpi_handle handle, unsigned long arg)
+ {
+-	return exec_simple_method(handle, "SMBC", arg);
++	return exec_simple_method(handle, "SBMC", arg);
+ }
+ 
+ static int eval_hals(acpi_handle handle, unsigned long *res)
+@@ -477,7 +477,7 @@ static ssize_t conservation_mode_store(struct device *dev,
+ 	if (err)
+ 		return err;
+ 
+-	err = exec_smbc(priv->adev->handle, state ? SMBC_CONSERVATION_ON : SMBC_CONSERVATION_OFF);
++	err = exec_sbmc(priv->adev->handle, state ? SBMC_CONSERVATION_ON : SBMC_CONSERVATION_OFF);
+ 	if (err)
+ 		return err;
+ 
+@@ -809,6 +809,7 @@ static int dytc_profile_set(struct platform_profile_handler *pprof,
+ {
+ 	struct ideapad_dytc_priv *dytc = container_of(pprof, struct ideapad_dytc_priv, pprof);
+ 	struct ideapad_private *priv = dytc->priv;
++	unsigned long output;
+ 	int err;
+ 
+ 	err = mutex_lock_interruptible(&dytc->mutex);
+@@ -829,7 +830,7 @@ static int dytc_profile_set(struct platform_profile_handler *pprof,
+ 
+ 		/* Determine if we are in CQL mode. This alters the commands we do */
+ 		err = dytc_cql_command(priv, DYTC_SET_COMMAND(DYTC_FUNCTION_MMC, perfmode, 1),
+-				       NULL);
++				       &output);
+ 		if (err)
+ 			goto unlock;
+ 	}
+diff --git a/drivers/platform/x86/intel_int0002_vgpio.c b/drivers/platform/x86/intel_int0002_vgpio.c
+index 289c6655d425d..569342aa8926e 100644
+--- a/drivers/platform/x86/intel_int0002_vgpio.c
++++ b/drivers/platform/x86/intel_int0002_vgpio.c
+@@ -51,6 +51,12 @@
+ #define GPE0A_STS_PORT			0x420
+ #define GPE0A_EN_PORT			0x428
+ 
++struct int0002_data {
++	struct gpio_chip chip;
++	int parent_irq;
++	int wake_enable_count;
++};
++
+ /*
+  * As this is not a real GPIO at all, but just a hack to model an event in
+  * ACPI the get / set functions are dummy functions.
+@@ -98,14 +104,16 @@ static void int0002_irq_mask(struct irq_data *data)
+ static int int0002_irq_set_wake(struct irq_data *data, unsigned int on)
+ {
+ 	struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+-	struct platform_device *pdev = to_platform_device(chip->parent);
+-	int irq = platform_get_irq(pdev, 0);
++	struct int0002_data *int0002 = container_of(chip, struct int0002_data, chip);
+ 
+-	/* Propagate to parent irq */
++	/*
++	 * Applying of the wakeup flag to our parent IRQ is delayed till system
++	 * suspend, because we only want to do this when using s2idle.
++	 */
+ 	if (on)
+-		enable_irq_wake(irq);
++		int0002->wake_enable_count++;
+ 	else
+-		disable_irq_wake(irq);
++		int0002->wake_enable_count--;
+ 
+ 	return 0;
+ }
+@@ -135,7 +143,7 @@ static bool int0002_check_wake(void *data)
+ 	return (gpe_sts_reg & GPE0A_PME_B0_STS_BIT);
+ }
+ 
+-static struct irq_chip int0002_byt_irqchip = {
++static struct irq_chip int0002_irqchip = {
+ 	.name			= DRV_NAME,
+ 	.irq_ack		= int0002_irq_ack,
+ 	.irq_mask		= int0002_irq_mask,
+@@ -143,21 +151,9 @@ static struct irq_chip int0002_byt_irqchip = {
+ 	.irq_set_wake		= int0002_irq_set_wake,
+ };
+ 
+-static struct irq_chip int0002_cht_irqchip = {
+-	.name			= DRV_NAME,
+-	.irq_ack		= int0002_irq_ack,
+-	.irq_mask		= int0002_irq_mask,
+-	.irq_unmask		= int0002_irq_unmask,
+-	/*
+-	 * No set_wake, on CHT the IRQ is typically shared with the ACPI SCI
+-	 * and we don't want to mess with the ACPI SCI irq settings.
+-	 */
+-	.flags			= IRQCHIP_SKIP_SET_WAKE,
+-};
+-
+ static const struct x86_cpu_id int0002_cpu_ids[] = {
+-	X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT,	&int0002_byt_irqchip),
+-	X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT,	&int0002_cht_irqchip),
++	X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, NULL),
++	X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, NULL),
+ 	{}
+ };
+ 
+@@ -172,8 +168,9 @@ static int int0002_probe(struct platform_device *pdev)
+ {
+ 	struct device *dev = &pdev->dev;
+ 	const struct x86_cpu_id *cpu_id;
+-	struct gpio_chip *chip;
++	struct int0002_data *int0002;
+ 	struct gpio_irq_chip *girq;
++	struct gpio_chip *chip;
+ 	int irq, ret;
+ 
+ 	/* Menlow has a different INT0002 device? <sigh> */
+@@ -185,10 +182,13 @@ static int int0002_probe(struct platform_device *pdev)
+ 	if (irq < 0)
+ 		return irq;
+ 
+-	chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+-	if (!chip)
++	int0002 = devm_kzalloc(dev, sizeof(*int0002), GFP_KERNEL);
++	if (!int0002)
+ 		return -ENOMEM;
+ 
++	int0002->parent_irq = irq;
++
++	chip = &int0002->chip;
+ 	chip->label = DRV_NAME;
+ 	chip->parent = dev;
+ 	chip->owner = THIS_MODULE;
+@@ -214,7 +214,7 @@ static int int0002_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	girq = &chip->irq;
+-	girq->chip = (struct irq_chip *)cpu_id->driver_data;
++	girq->chip = &int0002_irqchip;
+ 	/* This let us handle the parent IRQ in the driver */
+ 	girq->parent_handler = NULL;
+ 	girq->num_parents = 0;
+@@ -230,6 +230,7 @@ static int int0002_probe(struct platform_device *pdev)
+ 
+ 	acpi_register_wakeup_handler(irq, int0002_check_wake, NULL);
+ 	device_init_wakeup(dev, true);
++	dev_set_drvdata(dev, int0002);
+ 	return 0;
+ }
+ 
+@@ -240,6 +241,36 @@ static int int0002_remove(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
++static int int0002_suspend(struct device *dev)
++{
++	struct int0002_data *int0002 = dev_get_drvdata(dev);
++
++	/*
++	 * The INT0002 parent IRQ is often shared with the ACPI GPE IRQ, don't
++	 * muck with it when firmware based suspend is used, otherwise we may
++	 * cause spurious wakeups from firmware managed suspend.
++	 */
++	if (!pm_suspend_via_firmware() && int0002->wake_enable_count)
++		enable_irq_wake(int0002->parent_irq);
++
++	return 0;
++}
++
++static int int0002_resume(struct device *dev)
++{
++	struct int0002_data *int0002 = dev_get_drvdata(dev);
++
++	if (!pm_suspend_via_firmware() && int0002->wake_enable_count)
++		disable_irq_wake(int0002->parent_irq);
++
++	return 0;
++}
++
++static const struct dev_pm_ops int0002_pm_ops = {
++	.suspend = int0002_suspend,
++	.resume = int0002_resume,
++};
++
+ static const struct acpi_device_id int0002_acpi_ids[] = {
+ 	{ "INT0002", 0 },
+ 	{ },
+@@ -250,6 +281,7 @@ static struct platform_driver int0002_driver = {
+ 	.driver = {
+ 		.name			= DRV_NAME,
+ 		.acpi_match_table	= int0002_acpi_ids,
++		.pm			= &int0002_pm_ops,
+ 	},
+ 	.probe	= int0002_probe,
+ 	.remove	= int0002_remove,
+diff --git a/drivers/rapidio/rio_cm.c b/drivers/rapidio/rio_cm.c
+index 50ec53d67a4c0..db4c265287ae6 100644
+--- a/drivers/rapidio/rio_cm.c
++++ b/drivers/rapidio/rio_cm.c
+@@ -2127,6 +2127,14 @@ static int riocm_add_mport(struct device *dev,
+ 		return -ENODEV;
+ 	}
+ 
++	cm->rx_wq = create_workqueue(DRV_NAME "/rxq");
++	if (!cm->rx_wq) {
++		rio_release_inb_mbox(mport, cmbox);
++		rio_release_outb_mbox(mport, cmbox);
++		kfree(cm);
++		return -ENOMEM;
++	}
++
+ 	/*
+ 	 * Allocate and register inbound messaging buffers to be ready
+ 	 * to receive channel and system management requests
+@@ -2137,15 +2145,6 @@ static int riocm_add_mport(struct device *dev,
+ 	cm->rx_slots = RIOCM_RX_RING_SIZE;
+ 	mutex_init(&cm->rx_lock);
+ 	riocm_rx_fill(cm, RIOCM_RX_RING_SIZE);
+-	cm->rx_wq = create_workqueue(DRV_NAME "/rxq");
+-	if (!cm->rx_wq) {
+-		riocm_error("failed to allocate IBMBOX_%d on %s",
+-			    cmbox, mport->name);
+-		rio_release_outb_mbox(mport, cmbox);
+-		kfree(cm);
+-		return -ENOMEM;
+-	}
+-
+ 	INIT_WORK(&cm->rx_work, rio_ibmsg_handler);
+ 
+ 	cm->tx_slot = 0;
+diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
+index aef6c1ee8bb0e..82becae142299 100644
+--- a/drivers/rtc/rtc-pcf85063.c
++++ b/drivers/rtc/rtc-pcf85063.c
+@@ -478,6 +478,7 @@ static struct clk *pcf85063_clkout_register_clk(struct pcf85063 *pcf85063)
+ {
+ 	struct clk *clk;
+ 	struct clk_init_data init;
++	struct device_node *node = pcf85063->rtc->dev.parent->of_node;
+ 
+ 	init.name = "pcf85063-clkout";
+ 	init.ops = &pcf85063_clkout_ops;
+@@ -487,15 +488,13 @@ static struct clk *pcf85063_clkout_register_clk(struct pcf85063 *pcf85063)
+ 	pcf85063->clkout_hw.init = &init;
+ 
+ 	/* optional override of the clockname */
+-	of_property_read_string(pcf85063->rtc->dev.of_node,
+-				"clock-output-names", &init.name);
++	of_property_read_string(node, "clock-output-names", &init.name);
+ 
+ 	/* register the clock */
+ 	clk = devm_clk_register(&pcf85063->rtc->dev, &pcf85063->clkout_hw);
+ 
+ 	if (!IS_ERR(clk))
+-		of_clk_add_provider(pcf85063->rtc->dev.of_node,
+-				    of_clk_src_simple_get, clk);
++		of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ 
+ 	return clk;
+ }
+diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
+index cec27f2ef70d7..e5076f09d5ed4 100644
+--- a/drivers/scsi/qedf/qedf_main.c
++++ b/drivers/scsi/qedf/qedf_main.c
+@@ -536,7 +536,9 @@ static void qedf_update_link_speed(struct qedf_ctx *qedf,
+ 	if (linkmode_intersects(link->supported_caps, sup_caps))
+ 		lport->link_supported_speeds |= FC_PORTSPEED_20GBIT;
+ 
+-	fc_host_supported_speeds(lport->host) = lport->link_supported_speeds;
++	if (lport->host && lport->host->shost_data)
++		fc_host_supported_speeds(lport->host) =
++			lport->link_supported_speeds;
+ }
+ 
+ static void qedf_bw_update(void *dev)
+diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
+index 0677295957bc5..615e44af1ca60 100644
+--- a/drivers/scsi/qla2xxx/qla_nx.c
++++ b/drivers/scsi/qla2xxx/qla_nx.c
+@@ -1063,7 +1063,8 @@ qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr,
+ 		return ret;
+ 	}
+ 
+-	if (qla82xx_flash_set_write_enable(ha))
++	ret = qla82xx_flash_set_write_enable(ha);
++	if (ret < 0)
+ 		goto done_write;
+ 
+ 	qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, data);
+diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c
+index 0aa58131e7915..d0626773eb386 100644
+--- a/drivers/scsi/ufs/ufs-hisi.c
++++ b/drivers/scsi/ufs/ufs-hisi.c
+@@ -467,21 +467,24 @@ static int ufs_hisi_init_common(struct ufs_hba *hba)
+ 	host->hba = hba;
+ 	ufshcd_set_variant(hba, host);
+ 
+-	host->rst  = devm_reset_control_get(dev, "rst");
++	host->rst = devm_reset_control_get(dev, "rst");
+ 	if (IS_ERR(host->rst)) {
+ 		dev_err(dev, "%s: failed to get reset control\n", __func__);
+-		return PTR_ERR(host->rst);
++		err = PTR_ERR(host->rst);
++		goto error;
+ 	}
+ 
+ 	ufs_hisi_set_pm_lvl(hba);
+ 
+ 	err = ufs_hisi_get_resource(host);
+-	if (err) {
+-		ufshcd_set_variant(hba, NULL);
+-		return err;
+-	}
++	if (err)
++		goto error;
+ 
+ 	return 0;
++
++error:
++	ufshcd_set_variant(hba, NULL);
++	return err;
+ }
+ 
+ static int ufs_hi3660_init(struct ufs_hba *hba)
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
+index 0c71a159d08f1..e1e510882ff42 100644
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -2849,7 +2849,7 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
+  * ufshcd_exec_dev_cmd - API for sending device management requests
+  * @hba: UFS hba
+  * @cmd_type: specifies the type (NOP, Query...)
+- * @timeout: time in seconds
++ * @timeout: timeout in milliseconds
+  *
+  * NOTE: Since there is only one available tag for device management commands,
+  * it is expected you hold the hba->dev_cmd.lock mutex.
+@@ -2879,6 +2879,9 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
+ 	}
+ 	tag = req->tag;
+ 	WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
++	/* Set the timeout such that the SCSI error handler is not activated. */
++	req->timeout = msecs_to_jiffies(2 * timeout);
++	blk_mq_start_request(req);
+ 
+ 	init_completion(&wait);
+ 	lrbp = &hba->lrb[tag];
+diff --git a/drivers/tee/amdtee/amdtee_private.h b/drivers/tee/amdtee/amdtee_private.h
+index 337c8d82f74eb..6d0f7062bb870 100644
+--- a/drivers/tee/amdtee/amdtee_private.h
++++ b/drivers/tee/amdtee/amdtee_private.h
+@@ -21,6 +21,7 @@
+ #define TEEC_SUCCESS			0x00000000
+ #define TEEC_ERROR_GENERIC		0xFFFF0000
+ #define TEEC_ERROR_BAD_PARAMETERS	0xFFFF0006
++#define TEEC_ERROR_OUT_OF_MEMORY	0xFFFF000C
+ #define TEEC_ERROR_COMMUNICATION	0xFFFF000E
+ 
+ #define TEEC_ORIGIN_COMMS		0x00000002
+@@ -93,6 +94,18 @@ struct amdtee_shm_data {
+ 	u32     buf_id;
+ };
+ 
++/**
++ * struct amdtee_ta_data - Keeps track of all TAs loaded in AMD Secure
++ *			   Processor
++ * @ta_handle:	Handle to TA loaded in TEE
++ * @refcount:	Reference count for the loaded TA
++ */
++struct amdtee_ta_data {
++	struct list_head list_node;
++	u32 ta_handle;
++	u32 refcount;
++};
++
+ #define LOWER_TWO_BYTE_MASK	0x0000FFFF
+ 
+ /**
+diff --git a/drivers/tee/amdtee/call.c b/drivers/tee/amdtee/call.c
+index 096dd4d92d39c..07f36ac834c88 100644
+--- a/drivers/tee/amdtee/call.c
++++ b/drivers/tee/amdtee/call.c
+@@ -121,15 +121,69 @@ static int amd_params_to_tee_params(struct tee_param *tee, u32 count,
+ 	return ret;
+ }
+ 
++static DEFINE_MUTEX(ta_refcount_mutex);
++static struct list_head ta_list = LIST_HEAD_INIT(ta_list);
++
++static u32 get_ta_refcount(u32 ta_handle)
++{
++	struct amdtee_ta_data *ta_data;
++	u32 count = 0;
++
++	/* Caller must hold a mutex */
++	list_for_each_entry(ta_data, &ta_list, list_node)
++		if (ta_data->ta_handle == ta_handle)
++			return ++ta_data->refcount;
++
++	ta_data = kzalloc(sizeof(*ta_data), GFP_KERNEL);
++	if (ta_data) {
++		ta_data->ta_handle = ta_handle;
++		ta_data->refcount = 1;
++		count = ta_data->refcount;
++		list_add(&ta_data->list_node, &ta_list);
++	}
++
++	return count;
++}
++
++static u32 put_ta_refcount(u32 ta_handle)
++{
++	struct amdtee_ta_data *ta_data;
++	u32 count = 0;
++
++	/* Caller must hold a mutex */
++	list_for_each_entry(ta_data, &ta_list, list_node)
++		if (ta_data->ta_handle == ta_handle) {
++			count = --ta_data->refcount;
++			if (count == 0) {
++				list_del(&ta_data->list_node);
++				kfree(ta_data);
++				break;
++			}
++		}
++
++	return count;
++}
++
+ int handle_unload_ta(u32 ta_handle)
+ {
+ 	struct tee_cmd_unload_ta cmd = {0};
+-	u32 status;
++	u32 status, count;
+ 	int ret;
+ 
+ 	if (!ta_handle)
+ 		return -EINVAL;
+ 
++	mutex_lock(&ta_refcount_mutex);
++
++	count = put_ta_refcount(ta_handle);
++
++	if (count) {
++		pr_debug("unload ta: not unloading %u count %u\n",
++			 ta_handle, count);
++		ret = -EBUSY;
++		goto unlock;
++	}
++
+ 	cmd.ta_handle = ta_handle;
+ 
+ 	ret = psp_tee_process_cmd(TEE_CMD_ID_UNLOAD_TA, (void *)&cmd,
+@@ -137,8 +191,12 @@ int handle_unload_ta(u32 ta_handle)
+ 	if (!ret && status != 0) {
+ 		pr_err("unload ta: status = 0x%x\n", status);
+ 		ret = -EBUSY;
++	} else {
++		pr_debug("unloaded ta handle %u\n", ta_handle);
+ 	}
+ 
++unlock:
++	mutex_unlock(&ta_refcount_mutex);
+ 	return ret;
+ }
+ 
+@@ -340,7 +398,8 @@ int handle_open_session(struct tee_ioctl_open_session_arg *arg, u32 *info,
+ 
+ int handle_load_ta(void *data, u32 size, struct tee_ioctl_open_session_arg *arg)
+ {
+-	struct tee_cmd_load_ta cmd = {0};
++	struct tee_cmd_unload_ta unload_cmd = {};
++	struct tee_cmd_load_ta load_cmd = {};
+ 	phys_addr_t blob;
+ 	int ret;
+ 
+@@ -353,21 +412,36 @@ int handle_load_ta(void *data, u32 size, struct tee_ioctl_open_session_arg *arg)
+ 		return -EINVAL;
+ 	}
+ 
+-	cmd.hi_addr = upper_32_bits(blob);
+-	cmd.low_addr = lower_32_bits(blob);
+-	cmd.size = size;
++	load_cmd.hi_addr = upper_32_bits(blob);
++	load_cmd.low_addr = lower_32_bits(blob);
++	load_cmd.size = size;
+ 
+-	ret = psp_tee_process_cmd(TEE_CMD_ID_LOAD_TA, (void *)&cmd,
+-				  sizeof(cmd), &arg->ret);
++	mutex_lock(&ta_refcount_mutex);
++
++	ret = psp_tee_process_cmd(TEE_CMD_ID_LOAD_TA, (void *)&load_cmd,
++				  sizeof(load_cmd), &arg->ret);
+ 	if (ret) {
+ 		arg->ret_origin = TEEC_ORIGIN_COMMS;
+ 		arg->ret = TEEC_ERROR_COMMUNICATION;
+-	} else {
+-		set_session_id(cmd.ta_handle, 0, &arg->session);
++	} else if (arg->ret == TEEC_SUCCESS) {
++		ret = get_ta_refcount(load_cmd.ta_handle);
++		if (!ret) {
++			arg->ret_origin = TEEC_ORIGIN_COMMS;
++			arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
++
++			/* Unload the TA on error */
++			unload_cmd.ta_handle = load_cmd.ta_handle;
++			psp_tee_process_cmd(TEE_CMD_ID_UNLOAD_TA,
++					    (void *)&unload_cmd,
++					    sizeof(unload_cmd), &ret);
++		} else {
++			set_session_id(load_cmd.ta_handle, 0, &arg->session);
++		}
+ 	}
++	mutex_unlock(&ta_refcount_mutex);
+ 
+ 	pr_debug("load TA: TA handle = 0x%x, RO = 0x%x, ret = 0x%x\n",
+-		 cmd.ta_handle, arg->ret_origin, arg->ret);
++		 load_cmd.ta_handle, arg->ret_origin, arg->ret);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/tee/amdtee/core.c b/drivers/tee/amdtee/core.c
+index 8a6a8f30bb427..da6b88e80dc07 100644
+--- a/drivers/tee/amdtee/core.c
++++ b/drivers/tee/amdtee/core.c
+@@ -59,10 +59,9 @@ static void release_session(struct amdtee_session *sess)
+ 			continue;
+ 
+ 		handle_close_session(sess->ta_handle, sess->session_info[i]);
++		handle_unload_ta(sess->ta_handle);
+ 	}
+ 
+-	/* Unload Trusted Application once all sessions are closed */
+-	handle_unload_ta(sess->ta_handle);
+ 	kfree(sess);
+ }
+ 
+@@ -224,8 +223,6 @@ static void destroy_session(struct kref *ref)
+ 	struct amdtee_session *sess = container_of(ref, struct amdtee_session,
+ 						   refcount);
+ 
+-	/* Unload the TA from TEE */
+-	handle_unload_ta(sess->ta_handle);
+ 	mutex_lock(&session_list_mutex);
+ 	list_del(&sess->list_node);
+ 	mutex_unlock(&session_list_mutex);
+@@ -238,7 +235,7 @@ int amdtee_open_session(struct tee_context *ctx,
+ {
+ 	struct amdtee_context_data *ctxdata = ctx->data;
+ 	struct amdtee_session *sess = NULL;
+-	u32 session_info;
++	u32 session_info, ta_handle;
+ 	size_t ta_size;
+ 	int rc, i;
+ 	void *ta;
+@@ -259,11 +256,14 @@ int amdtee_open_session(struct tee_context *ctx,
+ 	if (arg->ret != TEEC_SUCCESS)
+ 		goto out;
+ 
++	ta_handle = get_ta_handle(arg->session);
++
+ 	mutex_lock(&session_list_mutex);
+ 	sess = alloc_session(ctxdata, arg->session);
+ 	mutex_unlock(&session_list_mutex);
+ 
+ 	if (!sess) {
++		handle_unload_ta(ta_handle);
+ 		rc = -ENOMEM;
+ 		goto out;
+ 	}
+@@ -277,6 +277,7 @@ int amdtee_open_session(struct tee_context *ctx,
+ 
+ 	if (i >= TEE_NUM_SESSIONS) {
+ 		pr_err("reached maximum session count %d\n", TEE_NUM_SESSIONS);
++		handle_unload_ta(ta_handle);
+ 		kref_put(&sess->refcount, destroy_session);
+ 		rc = -ENOMEM;
+ 		goto out;
+@@ -289,12 +290,13 @@ int amdtee_open_session(struct tee_context *ctx,
+ 		spin_lock(&sess->lock);
+ 		clear_bit(i, sess->sess_mask);
+ 		spin_unlock(&sess->lock);
++		handle_unload_ta(ta_handle);
+ 		kref_put(&sess->refcount, destroy_session);
+ 		goto out;
+ 	}
+ 
+ 	sess->session_info[i] = session_info;
+-	set_session_id(sess->ta_handle, i, &arg->session);
++	set_session_id(ta_handle, i, &arg->session);
+ out:
+ 	free_pages((u64)ta, get_order(ta_size));
+ 	return rc;
+@@ -329,6 +331,7 @@ int amdtee_close_session(struct tee_context *ctx, u32 session)
+ 
+ 	/* Close the session */
+ 	handle_close_session(ta_handle, session_info);
++	handle_unload_ta(ta_handle);
+ 
+ 	kref_put(&sess->refcount, destroy_session);
+ 
+diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
+index e0c00a1b07639..51b0ecabf2ec9 100644
+--- a/drivers/tty/serial/mvebu-uart.c
++++ b/drivers/tty/serial/mvebu-uart.c
+@@ -818,9 +818,6 @@ static int mvebu_uart_probe(struct platform_device *pdev)
+ 		return -EINVAL;
+ 	}
+ 
+-	if (!match)
+-		return -ENODEV;
+-
+ 	/* Assume that all UART ports have a DT alias or none has */
+ 	id = of_alias_get_id(pdev->dev.of_node, "serial");
+ 	if (!pdev->dev.of_node || id < 0)
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index 0cc360da5426a..53cbf2c3f0330 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -1171,7 +1171,7 @@ static inline int resize_screen(struct vc_data *vc, int width, int height,
+ 	/* Resizes the resolution of the display adapater */
+ 	int err = 0;
+ 
+-	if (vc->vc_mode != KD_GRAPHICS && vc->vc_sw->con_resize)
++	if (vc->vc_sw->con_resize)
+ 		err = vc->vc_sw->con_resize(vc, width, height, user);
+ 
+ 	return err;
+diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
+index 89aeaf3c1bca6..0e0cd9e9e589e 100644
+--- a/drivers/tty/vt/vt_ioctl.c
++++ b/drivers/tty/vt/vt_ioctl.c
+@@ -671,21 +671,58 @@ static int vt_resizex(struct vc_data *vc, struct vt_consize __user *cs)
+ 	if (copy_from_user(&v, cs, sizeof(struct vt_consize)))
+ 		return -EFAULT;
+ 
+-	if (v.v_vlin)
+-		pr_info_once("\"struct vt_consize\"->v_vlin is ignored. Please report if you need this.\n");
+-	if (v.v_clin)
+-		pr_info_once("\"struct vt_consize\"->v_clin is ignored. Please report if you need this.\n");
++	/* FIXME: Should check the copies properly */
++	if (!v.v_vlin)
++		v.v_vlin = vc->vc_scan_lines;
++
++	if (v.v_clin) {
++		int rows = v.v_vlin / v.v_clin;
++		if (v.v_rows != rows) {
++			if (v.v_rows) /* Parameters don't add up */
++				return -EINVAL;
++			v.v_rows = rows;
++		}
++	}
++
++	if (v.v_vcol && v.v_ccol) {
++		int cols = v.v_vcol / v.v_ccol;
++		if (v.v_cols != cols) {
++			if (v.v_cols)
++				return -EINVAL;
++			v.v_cols = cols;
++		}
++	}
++
++	if (v.v_clin > 32)
++		return -EINVAL;
+ 
+-	console_lock();
+ 	for (i = 0; i < MAX_NR_CONSOLES; i++) {
+-		vc = vc_cons[i].d;
++		struct vc_data *vcp;
+ 
+-		if (vc) {
+-			vc->vc_resize_user = 1;
+-			vc_resize(vc, v.v_cols, v.v_rows);
++		if (!vc_cons[i].d)
++			continue;
++		console_lock();
++		vcp = vc_cons[i].d;
++		if (vcp) {
++			int ret;
++			int save_scan_lines = vcp->vc_scan_lines;
++			int save_cell_height = vcp->vc_cell_height;
++
++			if (v.v_vlin)
++				vcp->vc_scan_lines = v.v_vlin;
++			if (v.v_clin)
++				vcp->vc_cell_height = v.v_clin;
++			vcp->vc_resize_user = 1;
++			ret = vc_resize(vcp, v.v_cols, v.v_rows);
++			if (ret) {
++				vcp->vc_scan_lines = save_scan_lines;
++				vcp->vc_cell_height = save_cell_height;
++				console_unlock();
++				return ret;
++			}
+ 		}
++		console_unlock();
+ 	}
+-	console_unlock();
+ 
+ 	return 0;
+ }
+diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
+index 0330ba99730e2..652fe25475878 100644
+--- a/drivers/uio/uio_hv_generic.c
++++ b/drivers/uio/uio_hv_generic.c
+@@ -291,13 +291,15 @@ hv_uio_probe(struct hv_device *dev,
+ 	pdata->recv_buf = vzalloc(RECV_BUFFER_SIZE);
+ 	if (pdata->recv_buf == NULL) {
+ 		ret = -ENOMEM;
+-		goto fail_close;
++		goto fail_free_ring;
+ 	}
+ 
+ 	ret = vmbus_establish_gpadl(channel, pdata->recv_buf,
+ 				    RECV_BUFFER_SIZE, &pdata->recv_gpadl);
+-	if (ret)
++	if (ret) {
++		vfree(pdata->recv_buf);
+ 		goto fail_close;
++	}
+ 
+ 	/* put Global Physical Address Label in name */
+ 	snprintf(pdata->recv_name, sizeof(pdata->recv_name),
+@@ -316,8 +318,10 @@ hv_uio_probe(struct hv_device *dev,
+ 
+ 	ret = vmbus_establish_gpadl(channel, pdata->send_buf,
+ 				    SEND_BUFFER_SIZE, &pdata->send_gpadl);
+-	if (ret)
++	if (ret) {
++		vfree(pdata->send_buf);
+ 		goto fail_close;
++	}
+ 
+ 	snprintf(pdata->send_name, sizeof(pdata->send_name),
+ 		 "send:%u", pdata->send_gpadl);
+@@ -347,6 +351,8 @@ hv_uio_probe(struct hv_device *dev,
+ 
+ fail_close:
+ 	hv_uio_cleanup(dev, pdata);
++fail_free_ring:
++	vmbus_free_ring(dev->channel);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/uio/uio_pci_generic.c b/drivers/uio/uio_pci_generic.c
+index c7d681fef198d..3bb0b00754679 100644
+--- a/drivers/uio/uio_pci_generic.c
++++ b/drivers/uio/uio_pci_generic.c
+@@ -82,7 +82,7 @@ static int probe(struct pci_dev *pdev,
+ 	}
+ 
+ 	if (pdev->irq && !pci_intx_mask_supported(pdev))
+-		return -ENOMEM;
++		return -ENODEV;
+ 
+ 	gdev = devm_kzalloc(&pdev->dev, sizeof(struct uio_pci_generic_dev), GFP_KERNEL);
+ 	if (!gdev)
+diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
+index 962c12be97741..631eb918f8e14 100644
+--- a/drivers/video/console/vgacon.c
++++ b/drivers/video/console/vgacon.c
+@@ -383,7 +383,7 @@ static void vgacon_init(struct vc_data *c, int init)
+ 		vc_resize(c, vga_video_num_columns, vga_video_num_lines);
+ 
+ 	c->vc_scan_lines = vga_scan_lines;
+-	c->vc_font.height = vga_video_font_height;
++	c->vc_font.height = c->vc_cell_height = vga_video_font_height;
+ 	c->vc_complement_mask = 0x7700;
+ 	if (vga_512_chars)
+ 		c->vc_hi_font_mask = 0x0800;
+@@ -518,32 +518,32 @@ static void vgacon_cursor(struct vc_data *c, int mode)
+ 		switch (CUR_SIZE(c->vc_cursor_type)) {
+ 		case CUR_UNDERLINE:
+ 			vgacon_set_cursor_size(c->state.x,
+-					       c->vc_font.height -
+-					       (c->vc_font.height <
++					       c->vc_cell_height -
++					       (c->vc_cell_height <
+ 						10 ? 2 : 3),
+-					       c->vc_font.height -
+-					       (c->vc_font.height <
++					       c->vc_cell_height -
++					       (c->vc_cell_height <
+ 						10 ? 1 : 2));
+ 			break;
+ 		case CUR_TWO_THIRDS:
+ 			vgacon_set_cursor_size(c->state.x,
+-					       c->vc_font.height / 3,
+-					       c->vc_font.height -
+-					       (c->vc_font.height <
++					       c->vc_cell_height / 3,
++					       c->vc_cell_height -
++					       (c->vc_cell_height <
+ 						10 ? 1 : 2));
+ 			break;
+ 		case CUR_LOWER_THIRD:
+ 			vgacon_set_cursor_size(c->state.x,
+-					       (c->vc_font.height * 2) / 3,
+-					       c->vc_font.height -
+-					       (c->vc_font.height <
++					       (c->vc_cell_height * 2) / 3,
++					       c->vc_cell_height -
++					       (c->vc_cell_height <
+ 						10 ? 1 : 2));
+ 			break;
+ 		case CUR_LOWER_HALF:
+ 			vgacon_set_cursor_size(c->state.x,
+-					       c->vc_font.height / 2,
+-					       c->vc_font.height -
+-					       (c->vc_font.height <
++					       c->vc_cell_height / 2,
++					       c->vc_cell_height -
++					       (c->vc_cell_height <
+ 						10 ? 1 : 2));
+ 			break;
+ 		case CUR_NONE:
+@@ -554,7 +554,7 @@ static void vgacon_cursor(struct vc_data *c, int mode)
+ 			break;
+ 		default:
+ 			vgacon_set_cursor_size(c->state.x, 1,
+-					       c->vc_font.height);
++					       c->vc_cell_height);
+ 			break;
+ 		}
+ 		break;
+@@ -565,13 +565,13 @@ static int vgacon_doresize(struct vc_data *c,
+ 		unsigned int width, unsigned int height)
+ {
+ 	unsigned long flags;
+-	unsigned int scanlines = height * c->vc_font.height;
++	unsigned int scanlines = height * c->vc_cell_height;
+ 	u8 scanlines_lo = 0, r7 = 0, vsync_end = 0, mode, max_scan;
+ 
+ 	raw_spin_lock_irqsave(&vga_lock, flags);
+ 
+ 	vgacon_xres = width * VGA_FONTWIDTH;
+-	vgacon_yres = height * c->vc_font.height;
++	vgacon_yres = height * c->vc_cell_height;
+ 	if (vga_video_type >= VIDEO_TYPE_VGAC) {
+ 		outb_p(VGA_CRTC_MAX_SCAN, vga_video_port_reg);
+ 		max_scan = inb_p(vga_video_port_val);
+@@ -626,9 +626,9 @@ static int vgacon_doresize(struct vc_data *c,
+ static int vgacon_switch(struct vc_data *c)
+ {
+ 	int x = c->vc_cols * VGA_FONTWIDTH;
+-	int y = c->vc_rows * c->vc_font.height;
++	int y = c->vc_rows * c->vc_cell_height;
+ 	int rows = screen_info.orig_video_lines * vga_default_font_height/
+-		c->vc_font.height;
++		c->vc_cell_height;
+ 	/*
+ 	 * We need to save screen size here as it's the only way
+ 	 * we can spot the screen has been resized and we need to
+@@ -1041,7 +1041,7 @@ static int vgacon_adjust_height(struct vc_data *vc, unsigned fontheight)
+ 				cursor_size_lastto = 0;
+ 				c->vc_sw->con_cursor(c, CM_DRAW);
+ 			}
+-			c->vc_font.height = fontheight;
++			c->vc_font.height = c->vc_cell_height = fontheight;
+ 			vc_resize(c, 0, rows);	/* Adjust console size */
+ 		}
+ 	}
+@@ -1089,12 +1089,20 @@ static int vgacon_resize(struct vc_data *c, unsigned int width,
+ 	if ((width << 1) * height > vga_vram_size)
+ 		return -EINVAL;
+ 
++	if (user) {
++		/*
++		 * Ho ho!  Someone (svgatextmode, eh?) may have reprogrammed
++		 * the video mode!  Set the new defaults then and go away.
++		 */
++		screen_info.orig_video_cols = width;
++		screen_info.orig_video_lines = height;
++		vga_default_font_height = c->vc_cell_height;
++		return 0;
++	}
+ 	if (width % 2 || width > screen_info.orig_video_cols ||
+ 	    height > (screen_info.orig_video_lines * vga_default_font_height)/
+-	    c->vc_font.height)
+-		/* let svgatextmode tinker with video timings and
+-		   return success */
+-		return (user) ? 0 : -EINVAL;
++	    c->vc_cell_height)
++		return -EINVAL;
+ 
+ 	if (con_is_visible(c) && !vga_is_gfx) /* who knows */
+ 		vgacon_doresize(c, width, height);
+diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
+index 3406067985b1f..22bb3892f6bd1 100644
+--- a/drivers/video/fbdev/core/fbcon.c
++++ b/drivers/video/fbdev/core/fbcon.c
+@@ -2019,7 +2019,7 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
+ 			return -EINVAL;
+ 
+ 		pr_debug("resize now %ix%i\n", var.xres, var.yres);
+-		if (con_is_visible(vc)) {
++		if (con_is_visible(vc) && vc->vc_mode == KD_TEXT) {
+ 			var.activate = FB_ACTIVATE_NOW |
+ 				FB_ACTIVATE_FORCE;
+ 			fb_set_var(info, &var);
+diff --git a/drivers/video/fbdev/hgafb.c b/drivers/video/fbdev/hgafb.c
+index 8bbac7182ad32..bd3d07aa4f0ec 100644
+--- a/drivers/video/fbdev/hgafb.c
++++ b/drivers/video/fbdev/hgafb.c
+@@ -286,7 +286,7 @@ static int hga_card_detect(void)
+ 
+ 	hga_vram = ioremap(0xb0000, hga_vram_len);
+ 	if (!hga_vram)
+-		goto error;
++		return -ENOMEM;
+ 
+ 	if (request_region(0x3b0, 12, "hgafb"))
+ 		release_io_ports = 1;
+@@ -346,13 +346,18 @@ static int hga_card_detect(void)
+ 			hga_type_name = "Hercules";
+ 			break;
+ 	}
+-	return 1;
++	return 0;
+ error:
+ 	if (release_io_ports)
+ 		release_region(0x3b0, 12);
+ 	if (release_io_port)
+ 		release_region(0x3bf, 1);
+-	return 0;
++
++	iounmap(hga_vram);
++
++	pr_err("hgafb: HGA card not detected.\n");
++
++	return -EINVAL;
+ }
+ 
+ /**
+@@ -550,13 +555,11 @@ static const struct fb_ops hgafb_ops = {
+ static int hgafb_probe(struct platform_device *pdev)
+ {
+ 	struct fb_info *info;
++	int ret;
+ 
+-	if (! hga_card_detect()) {
+-		printk(KERN_INFO "hgafb: HGA card not detected.\n");
+-		if (hga_vram)
+-			iounmap(hga_vram);
+-		return -EINVAL;
+-	}
++	ret = hga_card_detect();
++	if (ret)
++		return ret;
+ 
+ 	printk(KERN_INFO "hgafb: %s with %ldK of memory detected.\n",
+ 		hga_type_name, hga_vram_len/1024);
+diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c
+index 3ac053b884958..e04411701ec85 100644
+--- a/drivers/video/fbdev/imsttfb.c
++++ b/drivers/video/fbdev/imsttfb.c
+@@ -1512,11 +1512,6 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	info->fix.smem_start = addr;
+ 	info->screen_base = (__u8 *)ioremap(addr, par->ramdac == IBM ?
+ 					    0x400000 : 0x800000);
+-	if (!info->screen_base) {
+-		release_mem_region(addr, size);
+-		framebuffer_release(info);
+-		return -ENOMEM;
+-	}
+ 	info->fix.mmio_start = addr + 0x800000;
+ 	par->dc_regs = ioremap(addr + 0x800000, 0x1000);
+ 	par->cmap_regs_phys = addr + 0x840000;
+diff --git a/drivers/xen/xen-pciback/vpci.c b/drivers/xen/xen-pciback/vpci.c
+index 5447b5ab7c766..1221cfd914cb0 100644
+--- a/drivers/xen/xen-pciback/vpci.c
++++ b/drivers/xen/xen-pciback/vpci.c
+@@ -70,7 +70,7 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
+ 				   struct pci_dev *dev, int devid,
+ 				   publish_pci_dev_cb publish_cb)
+ {
+-	int err = 0, slot, func = -1;
++	int err = 0, slot, func = PCI_FUNC(dev->devfn);
+ 	struct pci_dev_entry *t, *dev_entry;
+ 	struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
+ 
+@@ -95,22 +95,25 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
+ 
+ 	/*
+ 	 * Keep multi-function devices together on the virtual PCI bus, except
+-	 * virtual functions.
++	 * that we want to keep virtual functions at func 0 on their own. They
++	 * aren't multi-function devices and hence their presence at func 0
++	 * may cause guests to not scan the other functions.
+ 	 */
+-	if (!dev->is_virtfn) {
++	if (!dev->is_virtfn || func) {
+ 		for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
+ 			if (list_empty(&vpci_dev->dev_list[slot]))
+ 				continue;
+ 
+ 			t = list_entry(list_first(&vpci_dev->dev_list[slot]),
+ 				       struct pci_dev_entry, list);
++			if (t->dev->is_virtfn && !PCI_FUNC(t->dev->devfn))
++				continue;
+ 
+ 			if (match_slot(dev, t->dev)) {
+ 				dev_info(&dev->dev, "vpci: assign to virtual slot %d func %d\n",
+-					 slot, PCI_FUNC(dev->devfn));
++					 slot, func);
+ 				list_add_tail(&dev_entry->list,
+ 					      &vpci_dev->dev_list[slot]);
+-				func = PCI_FUNC(dev->devfn);
+ 				goto unlock;
+ 			}
+ 		}
+@@ -123,7 +126,6 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
+ 				 slot);
+ 			list_add_tail(&dev_entry->list,
+ 				      &vpci_dev->dev_list[slot]);
+-			func = dev->is_virtfn ? 0 : PCI_FUNC(dev->devfn);
+ 			goto unlock;
+ 		}
+ 	}
+diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
+index 5188f02e75fb3..c09c7ebd6968d 100644
+--- a/drivers/xen/xen-pciback/xenbus.c
++++ b/drivers/xen/xen-pciback/xenbus.c
+@@ -359,7 +359,8 @@ out:
+ 	return err;
+ }
+ 
+-static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)
++static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev,
++				 enum xenbus_state state)
+ {
+ 	int err = 0;
+ 	int num_devs;
+@@ -373,9 +374,7 @@ static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)
+ 	dev_dbg(&pdev->xdev->dev, "Reconfiguring device ...\n");
+ 
+ 	mutex_lock(&pdev->dev_lock);
+-	/* Make sure we only reconfigure once */
+-	if (xenbus_read_driver_state(pdev->xdev->nodename) !=
+-	    XenbusStateReconfiguring)
++	if (xenbus_read_driver_state(pdev->xdev->nodename) != state)
+ 		goto out;
+ 
+ 	err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, "num_devs", "%d",
+@@ -500,6 +499,10 @@ static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)
+ 		}
+ 	}
+ 
++	if (state != XenbusStateReconfiguring)
++		/* Make sure we only reconfigure once. */
++		goto out;
++
+ 	err = xenbus_switch_state(pdev->xdev, XenbusStateReconfigured);
+ 	if (err) {
+ 		xenbus_dev_fatal(pdev->xdev, err,
+@@ -525,7 +528,7 @@ static void xen_pcibk_frontend_changed(struct xenbus_device *xdev,
+ 		break;
+ 
+ 	case XenbusStateReconfiguring:
+-		xen_pcibk_reconfigure(pdev);
++		xen_pcibk_reconfigure(pdev, XenbusStateReconfiguring);
+ 		break;
+ 
+ 	case XenbusStateConnected:
+@@ -664,6 +667,15 @@ static void xen_pcibk_be_watch(struct xenbus_watch *watch,
+ 		xen_pcibk_setup_backend(pdev);
+ 		break;
+ 
++	case XenbusStateInitialised:
++		/*
++		 * We typically move to Initialised when the first device was
++		 * added. Hence subsequent devices getting added may need
++		 * reconfiguring.
++		 */
++		xen_pcibk_reconfigure(pdev, XenbusStateInitialised);
++		break;
++
+ 	default:
+ 		break;
+ 	}
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 8c4d2eaa5d58b..81b93c9c659b7 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -3253,6 +3253,7 @@ void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
+ 		inode = list_first_entry(&fs_info->delayed_iputs,
+ 				struct btrfs_inode, delayed_iput);
+ 		run_delayed_iput_locked(fs_info, inode);
++		cond_resched_lock(&fs_info->delayed_iput_lock);
+ 	}
+ 	spin_unlock(&fs_info->delayed_iput_lock);
+ }
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 47e76e79b3d6b..53624fca0747a 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -6457,6 +6457,24 @@ void btrfs_log_new_name(struct btrfs_trans_handle *trans,
+ 	    (!old_dir || old_dir->logged_trans < trans->transid))
+ 		return;
+ 
++	/*
++	 * If we are doing a rename (old_dir is not NULL) from a directory that
++	 * was previously logged, make sure the next log attempt on the directory
++	 * is not skipped and logs the inode again. This is because the log may
++	 * not currently be authoritative for a range including the old
++	 * BTRFS_DIR_ITEM_KEY and BTRFS_DIR_INDEX_KEY keys, so we want to make
++	 * sure after a log replay we do not end up with both the new and old
++	 * dentries around (in case the inode is a directory we would have a
++	 * directory with two hard links and 2 inode references for different
++	 * parents). The next log attempt of old_dir will happen at
++	 * btrfs_log_all_parents(), called through btrfs_log_inode_parent()
++	 * below, because we have previously set inode->last_unlink_trans to the
++	 * current transaction ID, either here or at btrfs_record_unlink_dir() in
++	 * case inode is a directory.
++	 */
++	if (old_dir)
++		old_dir->logged_trans = 0;
++
+ 	btrfs_init_log_ctx(&ctx, &inode->vfs_inode);
+ 	ctx.logging_new_name = true;
+ 	/*
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 5df6daacc230b..e9a530da4255c 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -1822,6 +1822,8 @@ smb2_copychunk_range(const unsigned int xid,
+ 			cpu_to_le32(min_t(u32, len, tcon->max_bytes_chunk));
+ 
+ 		/* Request server copy to target from src identified by key */
++		kfree(retbuf);
++		retbuf = NULL;
+ 		rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
+ 			trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
+ 			true /* is_fsctl */, (char *)pcchunk,
+diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
+index 943e523f4c9df..3d8623139538b 100644
+--- a/fs/ecryptfs/crypto.c
++++ b/fs/ecryptfs/crypto.c
+@@ -296,10 +296,8 @@ static int crypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
+ 	struct extent_crypt_result ecr;
+ 	int rc = 0;
+ 
+-	if (!crypt_stat || !crypt_stat->tfm
+-	       || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED))
+-		return -EINVAL;
+-
++	BUG_ON(!crypt_stat || !crypt_stat->tfm
++	       || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED));
+ 	if (unlikely(ecryptfs_verbosity > 0)) {
+ 		ecryptfs_printk(KERN_DEBUG, "Key size [%zd]; key:\n",
+ 				crypt_stat->key_size);
+diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
+index 99df69b848224..c63d0a7f7ba4f 100644
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -532,7 +532,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
+ 			 * the subpool and global reserve usage count can need
+ 			 * to be adjusted.
+ 			 */
+-			VM_BUG_ON(PagePrivate(page));
++			VM_BUG_ON(HPageRestoreReserve(page));
+ 			remove_huge_page(page);
+ 			freed++;
+ 			if (!truncate_op) {
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 56bb5a5fdc0d0..4d2e827ddb598 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -3853,8 +3853,12 @@ static int can_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt)
+ 	if (!(m->mnt_sb->s_type->fs_flags & FS_ALLOW_IDMAP))
+ 		return -EINVAL;
+ 
++	/* Don't yet support filesystem mountable in user namespaces. */
++	if (m->mnt_sb->s_user_ns != &init_user_ns)
++		return -EINVAL;
++
+ 	/* We're not controlling the superblock. */
+-	if (!ns_capable(m->mnt_sb->s_user_ns, CAP_SYS_ADMIN))
++	if (!capable(CAP_SYS_ADMIN))
+ 		return -EPERM;
+ 
+ 	/* Mount has already been visible in the filesystem hierarchy. */
+diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
+index 153734816b49c..d5b9c8d40c18e 100644
+--- a/include/linux/console_struct.h
++++ b/include/linux/console_struct.h
+@@ -101,6 +101,7 @@ struct vc_data {
+ 	unsigned int	vc_rows;
+ 	unsigned int	vc_size_row;		/* Bytes per row */
+ 	unsigned int	vc_scan_lines;		/* # of scan lines */
++	unsigned int	vc_cell_height;		/* CRTC character cell height */
+ 	unsigned long	vc_origin;		/* [!] Start of real screen */
+ 	unsigned long	vc_scr_end;		/* [!] End of real screen */
+ 	unsigned long	vc_visible_origin;	/* [!] Top of visible window */
+diff --git a/ipc/mqueue.c b/ipc/mqueue.c
+index 8031464ed4ae2..4e4e61111500c 100644
+--- a/ipc/mqueue.c
++++ b/ipc/mqueue.c
+@@ -1004,12 +1004,14 @@ static inline void __pipelined_op(struct wake_q_head *wake_q,
+ 				  struct mqueue_inode_info *info,
+ 				  struct ext_wait_queue *this)
+ {
++	struct task_struct *task;
++
+ 	list_del(&this->list);
+-	get_task_struct(this->task);
++	task = get_task_struct(this->task);
+ 
+ 	/* see MQ_BARRIER for purpose/pairing */
+ 	smp_store_release(&this->state, STATE_READY);
+-	wake_q_add_safe(wake_q, this->task);
++	wake_q_add_safe(wake_q, task);
+ }
+ 
+ /* pipelined_send() - send a message directly to the task waiting in
+diff --git a/ipc/msg.c b/ipc/msg.c
+index acd1bc7af55a2..6e6c8e0c9380e 100644
+--- a/ipc/msg.c
++++ b/ipc/msg.c
+@@ -251,11 +251,13 @@ static void expunge_all(struct msg_queue *msq, int res,
+ 	struct msg_receiver *msr, *t;
+ 
+ 	list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) {
+-		get_task_struct(msr->r_tsk);
++		struct task_struct *r_tsk;
++
++		r_tsk = get_task_struct(msr->r_tsk);
+ 
+ 		/* see MSG_BARRIER for purpose/pairing */
+ 		smp_store_release(&msr->r_msg, ERR_PTR(res));
+-		wake_q_add_safe(wake_q, msr->r_tsk);
++		wake_q_add_safe(wake_q, r_tsk);
+ 	}
+ }
+ 
+diff --git a/ipc/sem.c b/ipc/sem.c
+index f6c30a85dadf9..7d9c06b0ad6e2 100644
+--- a/ipc/sem.c
++++ b/ipc/sem.c
+@@ -784,12 +784,14 @@ would_block:
+ static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error,
+ 					     struct wake_q_head *wake_q)
+ {
+-	get_task_struct(q->sleeper);
++	struct task_struct *sleeper;
++
++	sleeper = get_task_struct(q->sleeper);
+ 
+ 	/* see SEM_BARRIER_2 for purpuse/pairing */
+ 	smp_store_release(&q->status, error);
+ 
+-	wake_q_add_safe(wake_q, q->sleeper);
++	wake_q_add_safe(wake_q, sleeper);
+ }
+ 
+ static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
+diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c
+index 209ad8dcfcecf..62a52be8f6ba9 100644
+--- a/kernel/kcsan/debugfs.c
++++ b/kernel/kcsan/debugfs.c
+@@ -261,9 +261,10 @@ static const struct file_operations debugfs_ops =
+ 	.release = single_release
+ };
+ 
+-static void __init kcsan_debugfs_init(void)
++static int __init kcsan_debugfs_init(void)
+ {
+ 	debugfs_create_file("kcsan", 0644, NULL, NULL, &debugfs_ops);
++	return 0;
+ }
+ 
+ late_initcall(kcsan_debugfs_init);
+diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
+index f160f1c97ca1e..f39c383c71804 100644
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -5731,7 +5731,7 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
+ {
+ 	unsigned long flags;
+ 
+-	trace_lock_acquired(lock, ip);
++	trace_lock_contended(lock, ip);
+ 
+ 	if (unlikely(!lock_stat || !lockdep_enabled()))
+ 		return;
+@@ -5749,7 +5749,7 @@ void lock_acquired(struct lockdep_map *lock, unsigned long ip)
+ {
+ 	unsigned long flags;
+ 
+-	trace_lock_contended(lock, ip);
++	trace_lock_acquired(lock, ip);
+ 
+ 	if (unlikely(!lock_stat || !lockdep_enabled()))
+ 		return;
+diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
+index a7276aaf2abc0..db9301591e3fc 100644
+--- a/kernel/locking/mutex-debug.c
++++ b/kernel/locking/mutex-debug.c
+@@ -57,7 +57,7 @@ void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+ 	task->blocked_on = waiter;
+ }
+ 
+-void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
++void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+ 			 struct task_struct *task)
+ {
+ 	DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
+@@ -65,7 +65,7 @@ void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+ 	DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
+ 	task->blocked_on = NULL;
+ 
+-	list_del_init(&waiter->list);
++	INIT_LIST_HEAD(&waiter->list);
+ 	waiter->task = NULL;
+ }
+ 
+diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
+index 1edd3f45a4ecb..53e631e1d76da 100644
+--- a/kernel/locking/mutex-debug.h
++++ b/kernel/locking/mutex-debug.h
+@@ -22,7 +22,7 @@ extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
+ extern void debug_mutex_add_waiter(struct mutex *lock,
+ 				   struct mutex_waiter *waiter,
+ 				   struct task_struct *task);
+-extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
++extern void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+ 				struct task_struct *task);
+ extern void debug_mutex_unlock(struct mutex *lock);
+ extern void debug_mutex_init(struct mutex *lock, const char *name,
+diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
+index 622ebdfcd083b..3899157c13b10 100644
+--- a/kernel/locking/mutex.c
++++ b/kernel/locking/mutex.c
+@@ -194,7 +194,7 @@ static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_wait
+  * Add @waiter to a given location in the lock wait_list and set the
+  * FLAG_WAITERS flag if it's the first waiter.
+  */
+-static void __sched
++static void
+ __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+ 		   struct list_head *list)
+ {
+@@ -205,6 +205,16 @@ __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+ 		__mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
+ }
+ 
++static void
++__mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
++{
++	list_del(&waiter->list);
++	if (likely(list_empty(&lock->wait_list)))
++		__mutex_clear_flag(lock, MUTEX_FLAGS);
++
++	debug_mutex_remove_waiter(lock, waiter, current);
++}
++
+ /*
+  * Give up ownership to a specific task, when @task = NULL, this is equivalent
+  * to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves
+@@ -1061,9 +1071,7 @@ acquired:
+ 			__ww_mutex_check_waiters(lock, ww_ctx);
+ 	}
+ 
+-	mutex_remove_waiter(lock, &waiter, current);
+-	if (likely(list_empty(&lock->wait_list)))
+-		__mutex_clear_flag(lock, MUTEX_FLAGS);
++	__mutex_remove_waiter(lock, &waiter);
+ 
+ 	debug_mutex_free_waiter(&waiter);
+ 
+@@ -1080,7 +1088,7 @@ skip_wait:
+ 
+ err:
+ 	__set_current_state(TASK_RUNNING);
+-	mutex_remove_waiter(lock, &waiter, current);
++	__mutex_remove_waiter(lock, &waiter);
+ err_early_kill:
+ 	spin_unlock(&lock->wait_lock);
+ 	debug_mutex_free_waiter(&waiter);
+diff --git a/kernel/locking/mutex.h b/kernel/locking/mutex.h
+index 1c2287d3fa719..f0c710b1d1927 100644
+--- a/kernel/locking/mutex.h
++++ b/kernel/locking/mutex.h
+@@ -10,12 +10,10 @@
+  * !CONFIG_DEBUG_MUTEXES case. Most of them are NOPs:
+  */
+ 
+-#define mutex_remove_waiter(lock, waiter, task) \
+-		__list_del((waiter)->list.prev, (waiter)->list.next)
+-
+ #define debug_mutex_wake_waiter(lock, waiter)		do { } while (0)
+ #define debug_mutex_free_waiter(waiter)			do { } while (0)
+ #define debug_mutex_add_waiter(lock, waiter, ti)	do { } while (0)
++#define debug_mutex_remove_waiter(lock, waiter, ti)     do { } while (0)
+ #define debug_mutex_unlock(lock)			do { } while (0)
+ #define debug_mutex_init(lock, name, key)		do { } while (0)
+ 
+diff --git a/kernel/ptrace.c b/kernel/ptrace.c
+index 61db50f7ca866..5f50fdd1d855e 100644
+--- a/kernel/ptrace.c
++++ b/kernel/ptrace.c
+@@ -169,6 +169,21 @@ void __ptrace_unlink(struct task_struct *child)
+ 	spin_unlock(&child->sighand->siglock);
+ }
+ 
++static bool looks_like_a_spurious_pid(struct task_struct *task)
++{
++	if (task->exit_code != ((PTRACE_EVENT_EXEC << 8) | SIGTRAP))
++		return false;
++
++	if (task_pid_vnr(task) == task->ptrace_message)
++		return false;
++	/*
++	 * The tracee changed its pid but the PTRACE_EVENT_EXEC event
++	 * was not wait()'ed, most probably debugger targets the old
++	 * leader which was destroyed in de_thread().
++	 */
++	return true;
++}
++
+ /* Ensure that nothing can wake it up, even SIGKILL */
+ static bool ptrace_freeze_traced(struct task_struct *task)
+ {
+@@ -179,7 +194,8 @@ static bool ptrace_freeze_traced(struct task_struct *task)
+ 		return ret;
+ 
+ 	spin_lock_irq(&task->sighand->siglock);
+-	if (task_is_traced(task) && !__fatal_signal_pending(task)) {
++	if (task_is_traced(task) && !looks_like_a_spurious_pid(task) &&
++	    !__fatal_signal_pending(task)) {
+ 		task->state = __TASK_TRACED;
+ 		ret = true;
+ 	}
+diff --git a/mm/gup.c b/mm/gup.c
+index 333f5dfd89423..4164a70160e31 100644
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -1535,10 +1535,6 @@ struct page *get_dump_page(unsigned long addr)
+ 				      FOLL_FORCE | FOLL_DUMP | FOLL_GET);
+ 	if (locked)
+ 		mmap_read_unlock(mm);
+-
+-	if (ret == 1 && is_page_poisoned(page))
+-		return NULL;
+-
+ 	return (ret == 1) ? page : NULL;
+ }
+ #endif /* CONFIG_ELF_CORE */
+diff --git a/mm/internal.h b/mm/internal.h
+index cb3c5e0a7799f..1432feec62df0 100644
+--- a/mm/internal.h
++++ b/mm/internal.h
+@@ -97,26 +97,6 @@ static inline void set_page_refcounted(struct page *page)
+ 	set_page_count(page, 1);
+ }
+ 
+-/*
+- * When kernel touch the user page, the user page may be have been marked
+- * poison but still mapped in user space, if without this page, the kernel
+- * can guarantee the data integrity and operation success, the kernel is
+- * better to check the posion status and avoid touching it, be good not to
+- * panic, coredump for process fatal signal is a sample case matching this
+- * scenario. Or if kernel can't guarantee the data integrity, it's better
+- * not to call this function, let kernel touch the poison page and get to
+- * panic.
+- */
+-static inline bool is_page_poisoned(struct page *page)
+-{
+-	if (PageHWPoison(page))
+-		return true;
+-	else if (PageHuge(page) && PageHWPoison(compound_head(page)))
+-		return true;
+-
+-	return false;
+-}
+-
+ extern unsigned long highest_memmap_pfn;
+ 
+ /*
+diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
+index 9a3d451402d7b..28b2314f1cf12 100644
+--- a/mm/userfaultfd.c
++++ b/mm/userfaultfd.c
+@@ -362,38 +362,38 @@ out:
+ 		 * If a reservation for the page existed in the reservation
+ 		 * map of a private mapping, the map was modified to indicate
+ 		 * the reservation was consumed when the page was allocated.
+-		 * We clear the PagePrivate flag now so that the global
++		 * We clear the HPageRestoreReserve flag now so that the global
+ 		 * reserve count will not be incremented in free_huge_page.
+ 		 * The reservation map will still indicate the reservation
+ 		 * was consumed and possibly prevent later page allocation.
+ 		 * This is better than leaking a global reservation.  If no
+-		 * reservation existed, it is still safe to clear PagePrivate
+-		 * as no adjustments to reservation counts were made during
+-		 * allocation.
++		 * reservation existed, it is still safe to clear
++		 * HPageRestoreReserve as no adjustments to reservation counts
++		 * were made during allocation.
+ 		 *
+ 		 * The reservation map for shared mappings indicates which
+ 		 * pages have reservations.  When a huge page is allocated
+ 		 * for an address with a reservation, no change is made to
+-		 * the reserve map.  In this case PagePrivate will be set
+-		 * to indicate that the global reservation count should be
++		 * the reserve map.  In this case HPageRestoreReserve will be
++		 * set to indicate that the global reservation count should be
+ 		 * incremented when the page is freed.  This is the desired
+ 		 * behavior.  However, when a huge page is allocated for an
+ 		 * address without a reservation a reservation entry is added
+-		 * to the reservation map, and PagePrivate will not be set.
+-		 * When the page is freed, the global reserve count will NOT
+-		 * be incremented and it will appear as though we have leaked
+-		 * reserved page.  In this case, set PagePrivate so that the
+-		 * global reserve count will be incremented to match the
+-		 * reservation map entry which was created.
++		 * to the reservation map, and HPageRestoreReserve will not be
++		 * set. When the page is freed, the global reserve count will
++		 * NOT be incremented and it will appear as though we have
++		 * leaked reserved page.  In this case, set HPageRestoreReserve
++		 * so that the global reserve count will be incremented to
++		 * match the reservation map entry which was created.
+ 		 *
+ 		 * Note that vm_alloc_shared is based on the flags of the vma
+ 		 * for which the page was originally allocated.  dst_vma could
+ 		 * be different or NULL on error.
+ 		 */
+ 		if (vm_alloc_shared)
+-			SetPagePrivate(page);
++			SetHPageRestoreReserve(page);
+ 		else
+-			ClearPagePrivate(page);
++			ClearHPageRestoreReserve(page);
+ 		put_page(page);
+ 	}
+ 	BUG_ON(copied < 0);
+diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
+index b0c1ee110eff9..e03cc284161c4 100644
+--- a/net/bluetooth/smp.c
++++ b/net/bluetooth/smp.c
+@@ -2732,6 +2732,15 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb)
+ 	if (skb->len < sizeof(*key))
+ 		return SMP_INVALID_PARAMS;
+ 
++	/* Check if remote and local public keys are the same and debug key is
++	 * not in use.
++	 */
++	if (!test_bit(SMP_FLAG_DEBUG_KEY, &smp->flags) &&
++	    !crypto_memneq(key, smp->local_pk, 64)) {
++		bt_dev_err(hdev, "Remote and local public keys are identical");
++		return SMP_UNSPECIFIED;
++	}
++
+ 	memcpy(smp->remote_pk, key, 64);
+ 
+ 	if (test_bit(SMP_FLAG_REMOTE_OOB, &smp->flags)) {
+diff --git a/sound/firewire/Kconfig b/sound/firewire/Kconfig
+index 25778765cbfe9..9897bd26a4388 100644
+--- a/sound/firewire/Kconfig
++++ b/sound/firewire/Kconfig
+@@ -38,7 +38,7 @@ config SND_OXFW
+ 	   * Mackie(Loud) Onyx 1640i (former model)
+ 	   * Mackie(Loud) Onyx Satellite
+ 	   * Mackie(Loud) Tapco Link.Firewire
+-	   * Mackie(Loud) d.2 pro/d.4 pro
++	   * Mackie(Loud) d.4 pro
+ 	   * Mackie(Loud) U.420/U.420d
+ 	   * TASCAM FireOne
+ 	   * Stanton Controllers & Systems 1 Deck/Mixer
+@@ -84,7 +84,7 @@ config SND_BEBOB
+ 	  * PreSonus FIREBOX/FIREPOD/FP10/Inspire1394
+ 	  * BridgeCo RDAudio1/Audio5
+ 	  * Mackie Onyx 1220/1620/1640 (FireWire I/O Card)
+-	  * Mackie d.2 (FireWire Option)
++	  * Mackie d.2 (FireWire Option) and d.2 Pro
+ 	  * Stanton FinalScratch 2 (ScratchAmp)
+ 	  * Tascam IF-FW/DM
+ 	  * Behringer XENIX UFX 1204/1604
+diff --git a/sound/firewire/amdtp-stream-trace.h b/sound/firewire/amdtp-stream-trace.h
+index 26e7cb555d3c5..aa53c13b89d34 100644
+--- a/sound/firewire/amdtp-stream-trace.h
++++ b/sound/firewire/amdtp-stream-trace.h
+@@ -14,8 +14,8 @@
+ #include <linux/tracepoint.h>
+ 
+ TRACE_EVENT(amdtp_packet,
+-	TP_PROTO(const struct amdtp_stream *s, u32 cycles, const __be32 *cip_header, unsigned int payload_length, unsigned int data_blocks, unsigned int data_block_counter, unsigned int index),
+-	TP_ARGS(s, cycles, cip_header, payload_length, data_blocks, data_block_counter, index),
++	TP_PROTO(const struct amdtp_stream *s, u32 cycles, const __be32 *cip_header, unsigned int payload_length, unsigned int data_blocks, unsigned int data_block_counter, unsigned int packet_index, unsigned int index),
++	TP_ARGS(s, cycles, cip_header, payload_length, data_blocks, data_block_counter, packet_index, index),
+ 	TP_STRUCT__entry(
+ 		__field(unsigned int, second)
+ 		__field(unsigned int, cycle)
+@@ -48,7 +48,7 @@ TRACE_EVENT(amdtp_packet,
+ 		__entry->payload_quadlets = payload_length / sizeof(__be32);
+ 		__entry->data_blocks = data_blocks;
+ 		__entry->data_block_counter = data_block_counter,
+-		__entry->packet_index = s->packet_index;
++		__entry->packet_index = packet_index;
+ 		__entry->irq = !!in_interrupt();
+ 		__entry->index = index;
+ 	),
+diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
+index 4e2f2bb7879fb..e0faa6601966c 100644
+--- a/sound/firewire/amdtp-stream.c
++++ b/sound/firewire/amdtp-stream.c
+@@ -526,7 +526,7 @@ static void build_it_pkt_header(struct amdtp_stream *s, unsigned int cycle,
+ 	}
+ 
+ 	trace_amdtp_packet(s, cycle, cip_header, payload_length, data_blocks,
+-			   data_block_counter, index);
++			   data_block_counter, s->packet_index, index);
+ }
+ 
+ static int check_cip_header(struct amdtp_stream *s, const __be32 *buf,
+@@ -630,21 +630,27 @@ static int parse_ir_ctx_header(struct amdtp_stream *s, unsigned int cycle,
+ 			       unsigned int *payload_length,
+ 			       unsigned int *data_blocks,
+ 			       unsigned int *data_block_counter,
+-			       unsigned int *syt, unsigned int index)
++			       unsigned int *syt, unsigned int packet_index, unsigned int index)
+ {
+ 	const __be32 *cip_header;
++	unsigned int cip_header_size;
+ 	int err;
+ 
+ 	*payload_length = be32_to_cpu(ctx_header[0]) >> ISO_DATA_LENGTH_SHIFT;
+-	if (*payload_length > s->ctx_data.tx.ctx_header_size +
+-					s->ctx_data.tx.max_ctx_payload_length) {
++
++	if (!(s->flags & CIP_NO_HEADER))
++		cip_header_size = 8;
++	else
++		cip_header_size = 0;
++
++	if (*payload_length > cip_header_size + s->ctx_data.tx.max_ctx_payload_length) {
+ 		dev_err(&s->unit->device,
+ 			"Detect jumbo payload: %04x %04x\n",
+-			*payload_length, s->ctx_data.tx.max_ctx_payload_length);
++			*payload_length, cip_header_size + s->ctx_data.tx.max_ctx_payload_length);
+ 		return -EIO;
+ 	}
+ 
+-	if (!(s->flags & CIP_NO_HEADER)) {
++	if (cip_header_size > 0) {
+ 		cip_header = ctx_header + 2;
+ 		err = check_cip_header(s, cip_header, *payload_length,
+ 				       data_blocks, data_block_counter, syt);
+@@ -662,7 +668,7 @@ static int parse_ir_ctx_header(struct amdtp_stream *s, unsigned int cycle,
+ 	}
+ 
+ 	trace_amdtp_packet(s, cycle, cip_header, *payload_length, *data_blocks,
+-			   *data_block_counter, index);
++			   *data_block_counter, packet_index, index);
+ 
+ 	return err;
+ }
+@@ -701,12 +707,13 @@ static int generate_device_pkt_descs(struct amdtp_stream *s,
+ 				     unsigned int packets)
+ {
+ 	unsigned int dbc = s->data_block_counter;
++	unsigned int packet_index = s->packet_index;
++	unsigned int queue_size = s->queue_size;
+ 	int i;
+ 	int err;
+ 
+ 	for (i = 0; i < packets; ++i) {
+ 		struct pkt_desc *desc = descs + i;
+-		unsigned int index = (s->packet_index + i) % s->queue_size;
+ 		unsigned int cycle;
+ 		unsigned int payload_length;
+ 		unsigned int data_blocks;
+@@ -715,7 +722,7 @@ static int generate_device_pkt_descs(struct amdtp_stream *s,
+ 		cycle = compute_cycle_count(ctx_header[1]);
+ 
+ 		err = parse_ir_ctx_header(s, cycle, ctx_header, &payload_length,
+-					  &data_blocks, &dbc, &syt, i);
++					  &data_blocks, &dbc, &syt, packet_index, i);
+ 		if (err < 0)
+ 			return err;
+ 
+@@ -723,13 +730,15 @@ static int generate_device_pkt_descs(struct amdtp_stream *s,
+ 		desc->syt = syt;
+ 		desc->data_blocks = data_blocks;
+ 		desc->data_block_counter = dbc;
+-		desc->ctx_payload = s->buffer.packets[index].buffer;
++		desc->ctx_payload = s->buffer.packets[packet_index].buffer;
+ 
+ 		if (!(s->flags & CIP_DBC_IS_END_EVENT))
+ 			dbc = (dbc + desc->data_blocks) & 0xff;
+ 
+ 		ctx_header +=
+ 			s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header);
++
++		packet_index = (packet_index + 1) % queue_size;
+ 	}
+ 
+ 	s->data_block_counter = dbc;
+@@ -1065,23 +1074,22 @@ static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed,
+ 		s->data_block_counter = 0;
+ 	}
+ 
+-	/* initialize packet buffer */
++	// initialize packet buffer.
++	max_ctx_payload_size = amdtp_stream_get_max_payload(s);
+ 	if (s->direction == AMDTP_IN_STREAM) {
+ 		dir = DMA_FROM_DEVICE;
+ 		type = FW_ISO_CONTEXT_RECEIVE;
+-		if (!(s->flags & CIP_NO_HEADER))
++		if (!(s->flags & CIP_NO_HEADER)) {
++			max_ctx_payload_size -= 8;
+ 			ctx_header_size = IR_CTX_HEADER_SIZE_CIP;
+-		else
++		} else {
+ 			ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP;
+-
+-		max_ctx_payload_size = amdtp_stream_get_max_payload(s) -
+-				       ctx_header_size;
++		}
+ 	} else {
+ 		dir = DMA_TO_DEVICE;
+ 		type = FW_ISO_CONTEXT_TRANSMIT;
+ 		ctx_header_size = 0;	// No effect for IT context.
+ 
+-		max_ctx_payload_size = amdtp_stream_get_max_payload(s);
+ 		if (!(s->flags & CIP_NO_HEADER))
+ 			max_ctx_payload_size -= IT_PKT_HEADER_SIZE_CIP;
+ 	}
+diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c
+index 2c8e3392a4903..daeecfa8b9aac 100644
+--- a/sound/firewire/bebob/bebob.c
++++ b/sound/firewire/bebob/bebob.c
+@@ -387,7 +387,7 @@ static const struct ieee1394_device_id bebob_id_table[] = {
+ 	SND_BEBOB_DEV_ENTRY(VEN_BRIDGECO, 0x00010049, &spec_normal),
+ 	/* Mackie, Onyx 1220/1620/1640 (Firewire I/O Card) */
+ 	SND_BEBOB_DEV_ENTRY(VEN_MACKIE2, 0x00010065, &spec_normal),
+-	/* Mackie, d.2 (Firewire Option) */
++	// Mackie, d.2 (Firewire option card) and d.2 Pro (the card is built-in).
+ 	SND_BEBOB_DEV_ENTRY(VEN_MACKIE1, 0x00010067, &spec_normal),
+ 	/* Stanton, ScratchAmp */
+ 	SND_BEBOB_DEV_ENTRY(VEN_STANTON, 0x00000001, &spec_normal),
+diff --git a/sound/firewire/dice/dice-alesis.c b/sound/firewire/dice/dice-alesis.c
+index 0916864511d50..27c13b9cc9efd 100644
+--- a/sound/firewire/dice/dice-alesis.c
++++ b/sound/firewire/dice/dice-alesis.c
+@@ -16,7 +16,7 @@ alesis_io14_tx_pcm_chs[MAX_STREAMS][SND_DICE_RATE_MODE_COUNT] = {
+ static const unsigned int
+ alesis_io26_tx_pcm_chs[MAX_STREAMS][SND_DICE_RATE_MODE_COUNT] = {
+ 	{10, 10, 4},	/* Tx0 = Analog + S/PDIF. */
+-	{16, 8, 0},	/* Tx1 = ADAT1 + ADAT2. */
++	{16, 4, 0},	/* Tx1 = ADAT1 + ADAT2 (available at low rate). */
+ };
+ 
+ int snd_dice_detect_alesis_formats(struct snd_dice *dice)
+diff --git a/sound/firewire/dice/dice-tcelectronic.c b/sound/firewire/dice/dice-tcelectronic.c
+index a8875d24ba2aa..43a3bcb15b3d1 100644
+--- a/sound/firewire/dice/dice-tcelectronic.c
++++ b/sound/firewire/dice/dice-tcelectronic.c
+@@ -38,8 +38,8 @@ static const struct dice_tc_spec konnekt_24d = {
+ };
+ 
+ static const struct dice_tc_spec konnekt_live = {
+-	.tx_pcm_chs = {{16, 16, 16}, {0, 0, 0} },
+-	.rx_pcm_chs = {{16, 16, 16}, {0, 0, 0} },
++	.tx_pcm_chs = {{16, 16, 6}, {0, 0, 0} },
++	.rx_pcm_chs = {{16, 16, 6}, {0, 0, 0} },
+ 	.has_midi = true,
+ };
+ 
+diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c
+index 1f1e3236efb8e..9eea25c46dc7e 100644
+--- a/sound/firewire/oxfw/oxfw.c
++++ b/sound/firewire/oxfw/oxfw.c
+@@ -355,7 +355,6 @@ static const struct ieee1394_device_id oxfw_id_table[] = {
+ 	 *  Onyx-i series (former models):	0x081216
+ 	 *  Mackie Onyx Satellite:		0x00200f
+ 	 *  Tapco LINK.firewire 4x6:		0x000460
+-	 *  d.2 pro:				Unknown
+ 	 *  d.4 pro:				Unknown
+ 	 *  U.420:				Unknown
+ 	 *  U.420d:				Unknown
+diff --git a/sound/isa/sb/sb8.c b/sound/isa/sb/sb8.c
+index 6c9d534ce8b61..95290ffe5c6e7 100644
+--- a/sound/isa/sb/sb8.c
++++ b/sound/isa/sb/sb8.c
+@@ -95,10 +95,6 @@ static int snd_sb8_probe(struct device *pdev, unsigned int dev)
+ 
+ 	/* block the 0x388 port to avoid PnP conflicts */
+ 	acard->fm_res = request_region(0x388, 4, "SoundBlaster FM");
+-	if (!acard->fm_res) {
+-		err = -EBUSY;
+-		goto _err;
+-	}
+ 
+ 	if (port[dev] != SNDRV_AUTO_PORT) {
+ 		if ((err = snd_sbdsp_create(card, port[dev], irq[dev],
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 1fe70f2fe4fe8..43a63db4ab6ad 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -395,7 +395,6 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
+ 	case 0x10ec0282:
+ 	case 0x10ec0283:
+ 	case 0x10ec0286:
+-	case 0x10ec0287:
+ 	case 0x10ec0288:
+ 	case 0x10ec0285:
+ 	case 0x10ec0298:
+@@ -406,6 +405,10 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
+ 	case 0x10ec0275:
+ 		alc_update_coef_idx(codec, 0xe, 0, 1<<0);
+ 		break;
++	case 0x10ec0287:
++		alc_update_coef_idx(codec, 0x10, 1<<9, 0);
++		alc_write_coef_idx(codec, 0x8, 0x4ab7);
++		break;
+ 	case 0x10ec0293:
+ 		alc_update_coef_idx(codec, 0xa, 1<<13, 0);
+ 		break;
+@@ -5717,6 +5720,18 @@ static void alc_fixup_tpt470_dacs(struct hda_codec *codec,
+ 		spec->gen.preferred_dacs = preferred_pairs;
+ }
+ 
++static void alc295_fixup_asus_dacs(struct hda_codec *codec,
++				   const struct hda_fixup *fix, int action)
++{
++	static const hda_nid_t preferred_pairs[] = {
++		0x17, 0x02, 0x21, 0x03, 0
++	};
++	struct alc_spec *spec = codec->spec;
++
++	if (action == HDA_FIXUP_ACT_PRE_PROBE)
++		spec->gen.preferred_dacs = preferred_pairs;
++}
++
+ static void alc_shutup_dell_xps13(struct hda_codec *codec)
+ {
+ 	struct alc_spec *spec = codec->spec;
+@@ -6232,6 +6247,35 @@ static void alc294_fixup_gx502_hp(struct hda_codec *codec,
+ 	}
+ }
+ 
++static void alc294_gu502_toggle_output(struct hda_codec *codec,
++				       struct hda_jack_callback *cb)
++{
++	/* Windows sets 0x10 to 0x8420 for Node 0x20 which is
++	 * responsible from changes between speakers and headphones
++	 */
++	if (snd_hda_jack_detect_state(codec, 0x21) == HDA_JACK_PRESENT)
++		alc_write_coef_idx(codec, 0x10, 0x8420);
++	else
++		alc_write_coef_idx(codec, 0x10, 0x0a20);
++}
++
++static void alc294_fixup_gu502_hp(struct hda_codec *codec,
++				  const struct hda_fixup *fix, int action)
++{
++	if (!is_jack_detectable(codec, 0x21))
++		return;
++
++	switch (action) {
++	case HDA_FIXUP_ACT_PRE_PROBE:
++		snd_hda_jack_detect_enable_callback(codec, 0x21,
++				alc294_gu502_toggle_output);
++		break;
++	case HDA_FIXUP_ACT_INIT:
++		alc294_gu502_toggle_output(codec, NULL);
++		break;
++	}
++}
++
+ static void  alc285_fixup_hp_gpio_amp_init(struct hda_codec *codec,
+ 			      const struct hda_fixup *fix, int action)
+ {
+@@ -6449,6 +6493,9 @@ enum {
+ 	ALC294_FIXUP_ASUS_GX502_HP,
+ 	ALC294_FIXUP_ASUS_GX502_PINS,
+ 	ALC294_FIXUP_ASUS_GX502_VERBS,
++	ALC294_FIXUP_ASUS_GU502_HP,
++	ALC294_FIXUP_ASUS_GU502_PINS,
++	ALC294_FIXUP_ASUS_GU502_VERBS,
+ 	ALC285_FIXUP_HP_GPIO_LED,
+ 	ALC285_FIXUP_HP_MUTE_LED,
+ 	ALC236_FIXUP_HP_GPIO_LED,
+@@ -6485,6 +6532,9 @@ enum {
+ 	ALC255_FIXUP_ACER_LIMIT_INT_MIC_BOOST,
+ 	ALC256_FIXUP_ACER_HEADSET_MIC,
+ 	ALC285_FIXUP_IDEAPAD_S740_COEF,
++	ALC295_FIXUP_ASUS_DACS,
++	ALC295_FIXUP_HP_OMEN,
++	ALC285_FIXUP_HP_SPECTRE_X360,
+ };
+ 
+ static const struct hda_fixup alc269_fixups[] = {
+@@ -7687,6 +7737,35 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc294_fixup_gx502_hp,
+ 	},
++	[ALC294_FIXUP_ASUS_GU502_PINS] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			{ 0x19, 0x01a11050 }, /* rear HP mic */
++			{ 0x1a, 0x01a11830 }, /* rear external mic */
++			{ 0x21, 0x012110f0 }, /* rear HP out */
++			{ }
++		},
++		.chained = true,
++		.chain_id = ALC294_FIXUP_ASUS_GU502_VERBS
++	},
++	[ALC294_FIXUP_ASUS_GU502_VERBS] = {
++		.type = HDA_FIXUP_VERBS,
++		.v.verbs = (const struct hda_verb[]) {
++			/* set 0x15 to HP-OUT ctrl */
++			{ 0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, 0xc0 },
++			/* unmute the 0x15 amp */
++			{ 0x15, AC_VERB_SET_AMP_GAIN_MUTE, 0xb000 },
++			/* set 0x1b to HP-OUT */
++			{ 0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x24 },
++			{ }
++		},
++		.chained = true,
++		.chain_id = ALC294_FIXUP_ASUS_GU502_HP
++	},
++	[ALC294_FIXUP_ASUS_GU502_HP] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc294_fixup_gu502_hp,
++	},
+ 	[ALC294_FIXUP_ASUS_COEF_1B] = {
+ 		.type = HDA_FIXUP_VERBS,
+ 		.v.verbs = (const struct hda_verb[]) {
+@@ -7983,6 +8062,39 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC269_FIXUP_THINKPAD_ACPI,
+ 	},
++	[ALC295_FIXUP_ASUS_DACS] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc295_fixup_asus_dacs,
++	},
++	[ALC295_FIXUP_HP_OMEN] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			{ 0x12, 0xb7a60130 },
++			{ 0x13, 0x40000000 },
++			{ 0x14, 0x411111f0 },
++			{ 0x16, 0x411111f0 },
++			{ 0x17, 0x90170110 },
++			{ 0x18, 0x411111f0 },
++			{ 0x19, 0x02a11030 },
++			{ 0x1a, 0x411111f0 },
++			{ 0x1b, 0x04a19030 },
++			{ 0x1d, 0x40600001 },
++			{ 0x1e, 0x411111f0 },
++			{ 0x21, 0x03211020 },
++			{}
++		},
++		.chained = true,
++		.chain_id = ALC269_FIXUP_HP_LINE1_MIC1_LED,
++	},
++	[ALC285_FIXUP_HP_SPECTRE_X360] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			{ 0x14, 0x90170110 }, /* enable top speaker */
++			{}
++		},
++		.chained = true,
++		.chain_id = ALC285_FIXUP_SPEAKER2_TO_DAC1,
++	},
+ };
+ 
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -8141,7 +8253,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ 	SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
++	SND_PCI_QUIRK(0x103c, 0x84da, "HP OMEN dc0019-ur", ALC295_FIXUP_HP_OMEN),
+ 	SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
++	SND_PCI_QUIRK(0x103c, 0x8519, "HP Spectre x360 15-df0xxx", ALC285_FIXUP_HP_SPECTRE_X360),
+ 	SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO),
+ 	SND_PCI_QUIRK(0x103c, 0x8724, "HP EliteBook 850 G7", ALC285_FIXUP_HP_GPIO_LED),
+@@ -8181,6 +8295,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
+ 	SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
+ 	SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
++	SND_PCI_QUIRK(0x1043, 0x1740, "ASUS UX430UA", ALC295_FIXUP_ASUS_DACS),
+ 	SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK),
+ 	SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
+ 	SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
+@@ -8198,6 +8313,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
+ 	SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
++	SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS),
+ 	SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401),
+ 	SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401),
+ 	SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
+@@ -8254,12 +8370,19 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1558, 0x50b8, "Clevo NK50SZ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x50d5, "Clevo NP50D5", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x50f0, "Clevo NH50A[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x50f2, "Clevo NH50E[PR]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x50f3, "Clevo NH58DPQ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x50f5, "Clevo NH55EPY", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x50f6, "Clevo NH55DPQ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x5101, "Clevo S510WU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x5157, "Clevo W517GU1", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x51a1, "Clevo NS50MU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x70a1, "Clevo NB70T[HJK]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x70b3, "Clevo NK70SB", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x70f2, "Clevo NH79EPY", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x70f3, "Clevo NH77DPQ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x70f4, "Clevo NH77EPY", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x70f6, "Clevo NH77DPQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x8228, "Clevo NR40BU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x8520, "Clevo NH50D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x8521, "Clevo NH77D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+@@ -8277,9 +8400,17 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1558, 0x8a51, "Clevo NH70RCQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x8d50, "Clevo NH55RCQ-M", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x951d, "Clevo N950T[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x9600, "Clevo N960K[PR]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x961d, "Clevo N960S[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x971d, "Clevo N970T[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0xa500, "Clevo NL53RU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0xa600, "Clevo NL5XNU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0xb018, "Clevo NP50D[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0xb019, "Clevo NH77D[BE]Q", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0xb022, "Clevo NH77D[DC][QW]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0xc018, "Clevo NP50D[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0xc019, "Clevo NH77D[BE]Q", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0xc022, "Clevo NH77[DC][QW]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS),
+ 	SND_PCI_QUIRK(0x17aa, 0x1048, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
+@@ -8544,6 +8675,8 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
+ 	{.id = ALC255_FIXUP_XIAOMI_HEADSET_MIC, .name = "alc255-xiaomi-headset"},
+ 	{.id = ALC274_FIXUP_HP_MIC, .name = "alc274-hp-mic-detect"},
+ 	{.id = ALC245_FIXUP_HP_X360_AMP, .name = "alc245-hp-x360-amp"},
++	{.id = ALC295_FIXUP_HP_OMEN, .name = "alc295-hp-omen"},
++	{.id = ALC285_FIXUP_HP_SPECTRE_X360, .name = "alc285-hp-spectre-x360"},
+ 	{}
+ };
+ #define ALC225_STANDARD_PINS \
+diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
+index 35903d1a1cbd2..5b124c4ad5725 100644
+--- a/sound/pci/intel8x0.c
++++ b/sound/pci/intel8x0.c
+@@ -331,6 +331,7 @@ struct ichdev {
+ 	unsigned int ali_slot;			/* ALI DMA slot */
+ 	struct ac97_pcm *pcm;
+ 	int pcm_open_flag;
++	unsigned int prepared:1;
+ 	unsigned int suspended: 1;
+ };
+ 
+@@ -691,6 +692,9 @@ static inline void snd_intel8x0_update(struct intel8x0 *chip, struct ichdev *ich
+ 	int status, civ, i, step;
+ 	int ack = 0;
+ 
++	if (!ichdev->prepared || ichdev->suspended)
++		return;
++
+ 	spin_lock_irqsave(&chip->reg_lock, flags);
+ 	status = igetbyte(chip, port + ichdev->roff_sr);
+ 	civ = igetbyte(chip, port + ICH_REG_OFF_CIV);
+@@ -881,6 +885,7 @@ static int snd_intel8x0_hw_params(struct snd_pcm_substream *substream,
+ 	if (ichdev->pcm_open_flag) {
+ 		snd_ac97_pcm_close(ichdev->pcm);
+ 		ichdev->pcm_open_flag = 0;
++		ichdev->prepared = 0;
+ 	}
+ 	err = snd_ac97_pcm_open(ichdev->pcm, params_rate(hw_params),
+ 				params_channels(hw_params),
+@@ -902,6 +907,7 @@ static int snd_intel8x0_hw_free(struct snd_pcm_substream *substream)
+ 	if (ichdev->pcm_open_flag) {
+ 		snd_ac97_pcm_close(ichdev->pcm);
+ 		ichdev->pcm_open_flag = 0;
++		ichdev->prepared = 0;
+ 	}
+ 	return 0;
+ }
+@@ -976,6 +982,7 @@ static int snd_intel8x0_pcm_prepare(struct snd_pcm_substream *substream)
+ 			ichdev->pos_shift = (runtime->sample_bits > 16) ? 2 : 1;
+ 	}
+ 	snd_intel8x0_setup_periods(chip, ichdev);
++	ichdev->prepared = 1;
+ 	return 0;
+ }
+ 
+diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c
+index a030dd65eb280..9602929b7de90 100644
+--- a/sound/usb/line6/driver.c
++++ b/sound/usb/line6/driver.c
+@@ -699,6 +699,10 @@ static int line6_init_cap_control(struct usb_line6 *line6)
+ 		line6->buffer_message = kmalloc(LINE6_MIDI_MESSAGE_MAXLEN, GFP_KERNEL);
+ 		if (!line6->buffer_message)
+ 			return -ENOMEM;
++
++		ret = line6_init_midi(line6);
++		if (ret < 0)
++			return ret;
+ 	} else {
+ 		ret = line6_hwdep_init(line6);
+ 		if (ret < 0)
+diff --git a/sound/usb/line6/pod.c b/sound/usb/line6/pod.c
+index cd44cb5f1310c..16e644330c4d6 100644
+--- a/sound/usb/line6/pod.c
++++ b/sound/usb/line6/pod.c
+@@ -376,11 +376,6 @@ static int pod_init(struct usb_line6 *line6,
+ 	if (err < 0)
+ 		return err;
+ 
+-	/* initialize MIDI subsystem: */
+-	err = line6_init_midi(line6);
+-	if (err < 0)
+-		return err;
+-
+ 	/* initialize PCM subsystem: */
+ 	err = line6_init_pcm(line6, &pod_pcm_properties);
+ 	if (err < 0)
+diff --git a/sound/usb/line6/variax.c b/sound/usb/line6/variax.c
+index ed158f04de80f..c2245aa93b08f 100644
+--- a/sound/usb/line6/variax.c
++++ b/sound/usb/line6/variax.c
+@@ -159,7 +159,6 @@ static int variax_init(struct usb_line6 *line6,
+ 		       const struct usb_device_id *id)
+ {
+ 	struct usb_line6_variax *variax = line6_to_variax(line6);
+-	int err;
+ 
+ 	line6->process_message = line6_variax_process_message;
+ 	line6->disconnect = line6_variax_disconnect;
+@@ -172,11 +171,6 @@ static int variax_init(struct usb_line6 *line6,
+ 	if (variax->buffer_activate == NULL)
+ 		return -ENOMEM;
+ 
+-	/* initialize MIDI subsystem: */
+-	err = line6_init_midi(&variax->line6);
+-	if (err < 0)
+-		return err;
+-
+ 	/* initiate startup procedure: */
+ 	schedule_delayed_work(&line6->startup_work,
+ 			      msecs_to_jiffies(VARIAX_STARTUP_DELAY1));
+diff --git a/sound/usb/midi.c b/sound/usb/midi.c
+index cd46ca7cd28de..fa91290ad89db 100644
+--- a/sound/usb/midi.c
++++ b/sound/usb/midi.c
+@@ -1889,8 +1889,12 @@ static int snd_usbmidi_get_ms_info(struct snd_usb_midi *umidi,
+ 		ms_ep = find_usb_ms_endpoint_descriptor(hostep);
+ 		if (!ms_ep)
+ 			continue;
++		if (ms_ep->bLength <= sizeof(*ms_ep))
++			continue;
+ 		if (ms_ep->bNumEmbMIDIJack > 0x10)
+ 			continue;
++		if (ms_ep->bLength < sizeof(*ms_ep) + ms_ep->bNumEmbMIDIJack)
++			continue;
+ 		if (usb_endpoint_dir_out(ep)) {
+ 			if (endpoints[epidx].out_ep) {
+ 				if (++epidx >= MIDI_MAX_ENDPOINTS) {
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 7c6e83eee71dc..8b8bee3c3dd63 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1511,6 +1511,10 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
+ 	case USB_ID(0x2b73, 0x0013): /* Pioneer DJM-450 */
+ 		pioneer_djm_set_format_quirk(subs, 0x0082);
+ 		break;
++	case USB_ID(0x08e4, 0x017f): /* Pioneer DJM-750 */
++	case USB_ID(0x08e4, 0x0163): /* Pioneer DJM-850 */
++		pioneer_djm_set_format_quirk(subs, 0x0086);
++		break;
+ 	}
+ }
+ 
+diff --git a/tools/testing/selftests/exec/Makefile b/tools/testing/selftests/exec/Makefile
+index cf69b2fcce59e..dd61118df66ed 100644
+--- a/tools/testing/selftests/exec/Makefile
++++ b/tools/testing/selftests/exec/Makefile
+@@ -28,8 +28,8 @@ $(OUTPUT)/execveat.denatured: $(OUTPUT)/execveat
+ 	cp $< $@
+ 	chmod -x $@
+ $(OUTPUT)/load_address_4096: load_address.c
+-	$(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x1000 -pie $< -o $@
++	$(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x1000 -pie -static $< -o $@
+ $(OUTPUT)/load_address_2097152: load_address.c
+-	$(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x200000 -pie $< -o $@
++	$(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x200000 -pie -static $< -o $@
+ $(OUTPUT)/load_address_16777216: load_address.c
+-	$(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x1000000 -pie $< -o $@
++	$(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x1000000 -pie -static $< -o $@
+diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
+index 98c3b647f54dc..e3d5c77a86121 100644
+--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
++++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
+@@ -1753,16 +1753,25 @@ TEST_F(TRACE_poke, getpid_runs_normally)
+ # define SYSCALL_RET_SET(_regs, _val)				\
+ 	do {							\
+ 		typeof(_val) _result = (_val);			\
+-		/*						\
+-		 * A syscall error is signaled by CR0 SO bit	\
+-		 * and the code is stored as a positive value.	\
+-		 */						\
+-		if (_result < 0) {				\
+-			SYSCALL_RET(_regs) = -_result;		\
+-			(_regs).ccr |= 0x10000000;		\
+-		} else {					\
++		if ((_regs.trap & 0xfff0) == 0x3000) {		\
++			/*					\
++			 * scv 0 system call uses -ve result	\
++			 * for error, so no need to adjust.	\
++			 */					\
+ 			SYSCALL_RET(_regs) = _result;		\
+-			(_regs).ccr &= ~0x10000000;		\
++		} else {					\
++			/*					\
++			 * A syscall error is signaled by the	\
++			 * CR0 SO bit and the code is stored as	\
++			 * a positive value.			\
++			 */					\
++			if (_result < 0) {			\
++				SYSCALL_RET(_regs) = -_result;	\
++				(_regs).ccr |= 0x10000000;	\
++			} else {				\
++				SYSCALL_RET(_regs) = _result;	\
++				(_regs).ccr &= ~0x10000000;	\
++			}					\
+ 		}						\
+ 	} while (0)
+ # define SYSCALL_RET_SET_ON_PTRACE_EXIT


^ permalink raw reply related	[flat|nested] 33+ messages in thread
* [gentoo-commits] proj/linux-patches:5.12 commit in: /
@ 2021-05-24 17:26 Mike Pagano
  0 siblings, 0 replies; 33+ messages in thread
From: Mike Pagano @ 2021-05-24 17:26 UTC (permalink / raw
  To: gentoo-commits

commit:     80a620eb6fbcf00a18a04c8bbe7c44497971c5ff
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon May 24 17:25:12 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon May 24 17:25:12 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=80a620eb

Patch to enable link security restrictions by default.

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                          |  4 ++++
 ...nable-link-security-restrictions-by-default.patch | 20 ++++++++++++++++++++
 2 files changed, 24 insertions(+)

diff --git a/0000_README b/0000_README
index e979ed2..528cc11 100644
--- a/0000_README
+++ b/0000_README
@@ -71,6 +71,10 @@ Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.
 
+Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
+From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
+Desc:   Enable link security restrictions by default.
+
 Patch:  2000_BT-Check-key-sizes-only-if-Secure-Simple-Pairing-enabled.patch
 From:   https://lore.kernel.org/linux-bluetooth/20190522070540.48895-1-marcel@holtmann.org/raw
 Desc:   Bluetooth: Check key sizes only when Secure Simple Pairing is enabled. See bug #686758

diff --git a/1510_fs-enable-link-security-restrictions-by-default.patch b/1510_fs-enable-link-security-restrictions-by-default.patch
new file mode 100644
index 0000000..f0ed144
--- /dev/null
+++ b/1510_fs-enable-link-security-restrictions-by-default.patch
@@ -0,0 +1,20 @@
+From: Ben Hutchings <ben@decadent.org.uk>
+Subject: fs: Enable link security restrictions by default
+Date: Fri, 02 Nov 2012 05:32:06 +0000
+Bug-Debian: https://bugs.debian.org/609455
+Forwarded: not-needed
+This reverts commit 561ec64ae67ef25cac8d72bb9c4bfc955edfd415
+('VFS: don't do protected {sym,hard}links by default').
+--- a/fs/namei.c	2018-09-28 07:56:07.770005006 -0400
++++ b/fs/namei.c	2018-09-28 07:56:43.370349204 -0400
+@@ -885,8 +885,8 @@ static inline void put_link(struct namei
+ 		path_put(&last->link);
+ }
+ 
+-int sysctl_protected_symlinks __read_mostly = 0;
+-int sysctl_protected_hardlinks __read_mostly = 0;
++int sysctl_protected_symlinks __read_mostly = 1;
++int sysctl_protected_hardlinks __read_mostly = 1;
+ int sysctl_protected_fifos __read_mostly;
+ int sysctl_protected_regular __read_mostly;
+ 


^ permalink raw reply related	[flat|nested] 33+ messages in thread
* [gentoo-commits] proj/linux-patches:5.12 commit in: /
@ 2021-05-22 16:50 Mike Pagano
  0 siblings, 0 replies; 33+ messages in thread
From: Mike Pagano @ 2021-05-22 16:50 UTC (permalink / raw
  To: gentoo-commits

commit:     c0d0d030463194c0fc373cc47aa844c505eed41b
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat May 22 16:50:34 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat May 22 16:50:34 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=c0d0d030

Linux patch 5.12.6

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1005_linux-5.12.6.patch | 1722 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1726 insertions(+)

diff --git a/0000_README b/0000_README
index 055c634..e979ed2 100644
--- a/0000_README
+++ b/0000_README
@@ -63,6 +63,10 @@ Patch:  1004_linux-5.12.5.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.12.5
 
+Patch:  1005_linux-5.12.6.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.12.6
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1005_linux-5.12.6.patch b/1005_linux-5.12.6.patch
new file mode 100644
index 0000000..d7987ca
--- /dev/null
+++ b/1005_linux-5.12.6.patch
@@ -0,0 +1,1722 @@
+diff --git a/Makefile b/Makefile
+index f60212a0412c8..dd021135838b8 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 12
+-SUBLEVEL = 5
++SUBLEVEL = 6
+ EXTRAVERSION =
+ NAME = Frozen Wasteland
+ 
+diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
+index be8050b0c3dfb..70993af22d80c 100644
+--- a/arch/arm/kernel/asm-offsets.c
++++ b/arch/arm/kernel/asm-offsets.c
+@@ -24,6 +24,7 @@
+ #include <asm/vdso_datapage.h>
+ #include <asm/hardware/cache-l2x0.h>
+ #include <linux/kbuild.h>
++#include <linux/arm-smccc.h>
+ #include "signal.h"
+ 
+ /*
+@@ -148,6 +149,8 @@ int main(void)
+   DEFINE(SLEEP_SAVE_SP_PHYS,	offsetof(struct sleep_save_sp, save_ptr_stash_phys));
+   DEFINE(SLEEP_SAVE_SP_VIRT,	offsetof(struct sleep_save_sp, save_ptr_stash));
+ #endif
++  DEFINE(ARM_SMCCC_QUIRK_ID_OFFS,	offsetof(struct arm_smccc_quirk, id));
++  DEFINE(ARM_SMCCC_QUIRK_STATE_OFFS,	offsetof(struct arm_smccc_quirk, state));
+   BLANK();
+   DEFINE(DMA_BIDIRECTIONAL,	DMA_BIDIRECTIONAL);
+   DEFINE(DMA_TO_DEVICE,		DMA_TO_DEVICE);
+diff --git a/arch/arm/kernel/smccc-call.S b/arch/arm/kernel/smccc-call.S
+index 00664c78facab..931df62a78312 100644
+--- a/arch/arm/kernel/smccc-call.S
++++ b/arch/arm/kernel/smccc-call.S
+@@ -3,7 +3,9 @@
+  * Copyright (c) 2015, Linaro Limited
+  */
+ #include <linux/linkage.h>
++#include <linux/arm-smccc.h>
+ 
++#include <asm/asm-offsets.h>
+ #include <asm/opcodes-sec.h>
+ #include <asm/opcodes-virt.h>
+ #include <asm/unwind.h>
+@@ -27,7 +29,14 @@ UNWIND(	.fnstart)
+ UNWIND(	.save	{r4-r7})
+ 	ldm	r12, {r4-r7}
+ 	\instr
+-	pop	{r4-r7}
++	ldr	r4, [sp, #36]
++	cmp	r4, #0
++	beq	1f			// No quirk structure
++	ldr     r5, [r4, #ARM_SMCCC_QUIRK_ID_OFFS]
++	cmp     r5, #ARM_SMCCC_QUIRK_QCOM_A6
++	bne	1f			// No quirk present
++	str	r6, [r4, #ARM_SMCCC_QUIRK_STATE_OFFS]
++1:	pop	{r4-r7}
+ 	ldr	r12, [sp, #(4 * 4)]
+ 	stm	r12, {r0-r3}
+ 	bx	lr
+diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c
+index 24bd20564be77..43f0a3ebf3909 100644
+--- a/arch/arm/kernel/suspend.c
++++ b/arch/arm/kernel/suspend.c
+@@ -1,4 +1,5 @@
+ // SPDX-License-Identifier: GPL-2.0
++#include <linux/ftrace.h>
+ #include <linux/init.h>
+ #include <linux/slab.h>
+ #include <linux/mm_types.h>
+@@ -25,6 +26,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
+ 	if (!idmap_pgd)
+ 		return -EINVAL;
+ 
++	/*
++	 * Function graph tracer state gets incosistent when the kernel
++	 * calls functions that never return (aka suspend finishers) hence
++	 * disable graph tracing during their execution.
++	 */
++	pause_graph_tracing();
++
+ 	/*
+ 	 * Provide a temporary page table with an identity mapping for
+ 	 * the MMU-enable code, required for resuming.  On successful
+@@ -32,6 +40,9 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
+ 	 * back to the correct page tables.
+ 	 */
+ 	ret = __cpu_suspend(arg, fn, __mpidr);
++
++	unpause_graph_tracing();
++
+ 	if (ret == 0) {
+ 		cpu_switch_mm(mm->pgd, mm);
+ 		local_flush_bp_all();
+@@ -45,7 +56,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
+ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
+ {
+ 	u32 __mpidr = cpu_logical_map(smp_processor_id());
+-	return __cpu_suspend(arg, fn, __mpidr);
++	int ret;
++
++	pause_graph_tracing();
++	ret = __cpu_suspend(arg, fn, __mpidr);
++	unpause_graph_tracing();
++
++	return ret;
+ }
+ #define	idmap_pgd	NULL
+ #endif
+diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h
+index 845002cc2e571..04dad33800418 100644
+--- a/arch/riscv/include/asm/ftrace.h
++++ b/arch/riscv/include/asm/ftrace.h
+@@ -13,9 +13,19 @@
+ #endif
+ #define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
+ 
++/*
++ * Clang prior to 13 had "mcount" instead of "_mcount":
++ * https://reviews.llvm.org/D98881
++ */
++#if defined(CONFIG_CC_IS_GCC) || CONFIG_CLANG_VERSION >= 130000
++#define MCOUNT_NAME _mcount
++#else
++#define MCOUNT_NAME mcount
++#endif
++
+ #define ARCH_SUPPORTS_FTRACE_OPS 1
+ #ifndef __ASSEMBLY__
+-void _mcount(void);
++void MCOUNT_NAME(void);
+ static inline unsigned long ftrace_call_adjust(unsigned long addr)
+ {
+ 	return addr;
+@@ -36,7 +46,7 @@ struct dyn_arch_ftrace {
+  * both auipc and jalr at the same time.
+  */
+ 
+-#define MCOUNT_ADDR		((unsigned long)_mcount)
++#define MCOUNT_ADDR		((unsigned long)MCOUNT_NAME)
+ #define JALR_SIGN_MASK		(0x00000800)
+ #define JALR_OFFSET_MASK	(0x00000fff)
+ #define AUIPC_OFFSET_MASK	(0xfffff000)
+diff --git a/arch/riscv/kernel/mcount.S b/arch/riscv/kernel/mcount.S
+index 8a5593ff9ff3d..6d462681c9c02 100644
+--- a/arch/riscv/kernel/mcount.S
++++ b/arch/riscv/kernel/mcount.S
+@@ -47,8 +47,8 @@
+ 
+ ENTRY(ftrace_stub)
+ #ifdef CONFIG_DYNAMIC_FTRACE
+-       .global _mcount
+-       .set    _mcount, ftrace_stub
++       .global MCOUNT_NAME
++       .set    MCOUNT_NAME, ftrace_stub
+ #endif
+ 	ret
+ ENDPROC(ftrace_stub)
+@@ -78,7 +78,7 @@ ENDPROC(return_to_handler)
+ #endif
+ 
+ #ifndef CONFIG_DYNAMIC_FTRACE
+-ENTRY(_mcount)
++ENTRY(MCOUNT_NAME)
+ 	la	t4, ftrace_stub
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ 	la	t0, ftrace_graph_return
+@@ -124,6 +124,6 @@ do_trace:
+ 	jalr	t5
+ 	RESTORE_ABI_STATE
+ 	ret
+-ENDPROC(_mcount)
++ENDPROC(MCOUNT_NAME)
+ #endif
+-EXPORT_SYMBOL(_mcount)
++EXPORT_SYMBOL(MCOUNT_NAME)
+diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
+index 71a315e73cbe7..ca2b40dfd24b8 100644
+--- a/arch/riscv/kernel/vdso/Makefile
++++ b/arch/riscv/kernel/vdso/Makefile
+@@ -41,11 +41,10 @@ KASAN_SANITIZE := n
+ $(obj)/vdso.o: $(obj)/vdso.so
+ 
+ # link rule for the .so file, .lds has to be first
+-SYSCFLAGS_vdso.so.dbg = $(c_flags)
+ $(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
+ 	$(call if_changed,vdsold)
+-SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
+-	-Wl,--build-id=sha1 -Wl,--hash-style=both
++LDFLAGS_vdso.so.dbg = -shared -s -soname=linux-vdso.so.1 \
++	--build-id=sha1 --hash-style=both --eh-frame-hdr
+ 
+ # We also create a special relocatable object that should mirror the symbol
+ # table and layout of the linked DSO. With ld --just-symbols we can then
+@@ -60,13 +59,10 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
+ 
+ # actual build commands
+ # The DSO images are built using a special linker script
+-# Add -lgcc so rv32 gets static muldi3 and lshrdi3 definitions.
+ # Make sure only to export the intended __vdso_xxx symbol offsets.
+ quiet_cmd_vdsold = VDSOLD  $@
+-      cmd_vdsold = $(CC) $(KBUILD_CFLAGS) $(call cc-option, -no-pie) -nostdlib -nostartfiles $(SYSCFLAGS_$(@F)) \
+-                           -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp && \
+-                   $(CROSS_COMPILE)objcopy \
+-                           $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \
++      cmd_vdsold = $(LD) $(ld_flags) -T $(filter-out FORCE,$^) -o $@.tmp && \
++                   $(OBJCOPY) $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \
+                    rm $@.tmp
+ 
+ # Extracts symbol offsets from the VDSO, converting them into an assembly file
+diff --git a/arch/um/Kconfig.debug b/arch/um/Kconfig.debug
+index 315d368e63adc..1dfb2959c73b8 100644
+--- a/arch/um/Kconfig.debug
++++ b/arch/um/Kconfig.debug
+@@ -17,6 +17,7 @@ config GCOV
+ 	bool "Enable gcov support"
+ 	depends on DEBUG_INFO
+ 	depends on !KCOV
++	depends on !MODULES
+ 	help
+ 	  This option allows developers to retrieve coverage data from a UML
+ 	  session.
+diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile
+index 5aa882011e041..e698e0c7dbdca 100644
+--- a/arch/um/kernel/Makefile
++++ b/arch/um/kernel/Makefile
+@@ -21,7 +21,6 @@ obj-y = config.o exec.o exitcode.o irq.o ksyms.o mem.o \
+ 
+ obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o
+ obj-$(CONFIG_GPROF)	+= gprof_syms.o
+-obj-$(CONFIG_GCOV)	+= gmon_syms.o
+ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+ obj-$(CONFIG_STACKTRACE) += stacktrace.o
+ 
+diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S
+index dacbfabf66d8e..2f2a8ce92f1ee 100644
+--- a/arch/um/kernel/dyn.lds.S
++++ b/arch/um/kernel/dyn.lds.S
+@@ -6,6 +6,12 @@ OUTPUT_ARCH(ELF_ARCH)
+ ENTRY(_start)
+ jiffies = jiffies_64;
+ 
++VERSION {
++  {
++    local: *;
++  };
++}
++
+ SECTIONS
+ {
+   PROVIDE (__executable_start = START);
+diff --git a/arch/um/kernel/gmon_syms.c b/arch/um/kernel/gmon_syms.c
+deleted file mode 100644
+index 9361a8eb9bf1a..0000000000000
+--- a/arch/um/kernel/gmon_syms.c
++++ /dev/null
+@@ -1,16 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/*
+- * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
+- */
+-
+-#include <linux/module.h>
+-
+-extern void __bb_init_func(void *)  __attribute__((weak));
+-EXPORT_SYMBOL(__bb_init_func);
+-
+-extern void __gcov_init(void *)  __attribute__((weak));
+-EXPORT_SYMBOL(__gcov_init);
+-extern void __gcov_merge_add(void *, unsigned int)  __attribute__((weak));
+-EXPORT_SYMBOL(__gcov_merge_add);
+-extern void __gcov_exit(void)  __attribute__((weak));
+-EXPORT_SYMBOL(__gcov_exit);
+diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S
+index 45d957d7004ca..7a8e2b123e29c 100644
+--- a/arch/um/kernel/uml.lds.S
++++ b/arch/um/kernel/uml.lds.S
+@@ -7,6 +7,12 @@ OUTPUT_ARCH(ELF_ARCH)
+ ENTRY(_start)
+ jiffies = jiffies_64;
+ 
++VERSION {
++  {
++    local: *;
++  };
++}
++
+ SECTIONS
+ {
+   /* This must contain the right address - not quite the default ELF one.*/
+diff --git a/arch/x86/lib/msr-smp.c b/arch/x86/lib/msr-smp.c
+index 75a0915b0d01d..40bbe56bde325 100644
+--- a/arch/x86/lib/msr-smp.c
++++ b/arch/x86/lib/msr-smp.c
+@@ -252,7 +252,7 @@ static void __wrmsr_safe_regs_on_cpu(void *info)
+ 	rv->err = wrmsr_safe_regs(rv->regs);
+ }
+ 
+-int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
++int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
+ {
+ 	int err;
+ 	struct msr_regs_info rv;
+@@ -265,7 +265,7 @@ int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
+ }
+ EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
+ 
+-int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
++int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
+ {
+ 	int err;
+ 	struct msr_regs_info rv;
+diff --git a/drivers/bus/mhi/core/boot.c b/drivers/bus/mhi/core/boot.c
+index c2546bf229fb3..08c28740dc4e5 100644
+--- a/drivers/bus/mhi/core/boot.c
++++ b/drivers/bus/mhi/core/boot.c
+@@ -389,7 +389,6 @@ static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
+ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
+ {
+ 	const struct firmware *firmware = NULL;
+-	struct image_info *image_info;
+ 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ 	const char *fw_name;
+ 	void *buf;
+@@ -491,44 +490,42 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
+ fw_load_ee_pthru:
+ 	/* Transitioning into MHI RESET->READY state */
+ 	ret = mhi_ready_state_transition(mhi_cntrl);
+-
+-	if (!mhi_cntrl->fbc_download)
+-		return;
+-
+ 	if (ret) {
+ 		dev_err(dev, "MHI did not enter READY state\n");
+ 		goto error_ready_state;
+ 	}
+ 
+-	/* Wait for the SBL event */
+-	ret = wait_event_timeout(mhi_cntrl->state_event,
+-				 mhi_cntrl->ee == MHI_EE_SBL ||
+-				 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+-				 msecs_to_jiffies(mhi_cntrl->timeout_ms));
++	dev_info(dev, "Wait for device to enter SBL or Mission mode\n");
++	return;
+ 
+-	if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+-		dev_err(dev, "MHI did not enter SBL\n");
+-		goto error_ready_state;
++error_ready_state:
++	if (mhi_cntrl->fbc_download) {
++		mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
++		mhi_cntrl->fbc_image = NULL;
+ 	}
+ 
+-	/* Start full firmware image download */
+-	image_info = mhi_cntrl->fbc_image;
++error_fw_load:
++	mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR;
++	wake_up_all(&mhi_cntrl->state_event);
++}
++
++int mhi_download_amss_image(struct mhi_controller *mhi_cntrl)
++{
++	struct image_info *image_info = mhi_cntrl->fbc_image;
++	struct device *dev = &mhi_cntrl->mhi_dev->dev;
++	int ret;
++
++	if (!image_info)
++		return -EIO;
++
+ 	ret = mhi_fw_load_bhie(mhi_cntrl,
+ 			       /* Vector table is the last entry */
+ 			       &image_info->mhi_buf[image_info->entries - 1]);
+ 	if (ret) {
+-		dev_err(dev, "MHI did not load image over BHIe, ret: %d\n",
+-			ret);
+-		goto error_fw_load;
++		dev_err(dev, "MHI did not load AMSS, ret:%d\n", ret);
++		mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR;
++		wake_up_all(&mhi_cntrl->state_event);
+ 	}
+ 
+-	return;
+-
+-error_ready_state:
+-	mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
+-	mhi_cntrl->fbc_image = NULL;
+-
+-error_fw_load:
+-	mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR;
+-	wake_up_all(&mhi_cntrl->state_event);
++	return ret;
+ }
+diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/core/internal.h
+index 6f80ec30c0cdc..6f37439e52472 100644
+--- a/drivers/bus/mhi/core/internal.h
++++ b/drivers/bus/mhi/core/internal.h
+@@ -619,6 +619,7 @@ int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl);
+ int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl);
+ int mhi_send_cmd(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
+ 		 enum mhi_cmd_type cmd);
++int mhi_download_amss_image(struct mhi_controller *mhi_cntrl);
+ static inline bool mhi_is_active(struct mhi_controller *mhi_cntrl)
+ {
+ 	return (mhi_cntrl->dev_state >= MHI_STATE_M0 &&
+diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c
+index 277704af7eb6f..87d3b73bcaded 100644
+--- a/drivers/bus/mhi/core/pm.c
++++ b/drivers/bus/mhi/core/pm.c
+@@ -759,6 +759,8 @@ void mhi_pm_st_worker(struct work_struct *work)
+ 			 * either SBL or AMSS states
+ 			 */
+ 			mhi_create_devices(mhi_cntrl);
++			if (mhi_cntrl->fbc_download)
++				mhi_download_amss_image(mhi_cntrl);
+ 			break;
+ 		case DEV_ST_TRANSITION_MISSION_MODE:
+ 			mhi_pm_mission_mode_transition(mhi_cntrl);
+diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
+index 08d71dafa0015..58c8cc8fe0e11 100644
+--- a/drivers/dma/dw-edma/dw-edma-core.c
++++ b/drivers/dma/dw-edma/dw-edma-core.c
+@@ -937,22 +937,21 @@ int dw_edma_remove(struct dw_edma_chip *chip)
+ 	/* Power management */
+ 	pm_runtime_disable(dev);
+ 
++	/* Deregister eDMA device */
++	dma_async_device_unregister(&dw->wr_edma);
+ 	list_for_each_entry_safe(chan, _chan, &dw->wr_edma.channels,
+ 				 vc.chan.device_node) {
+-		list_del(&chan->vc.chan.device_node);
+ 		tasklet_kill(&chan->vc.task);
++		list_del(&chan->vc.chan.device_node);
+ 	}
+ 
++	dma_async_device_unregister(&dw->rd_edma);
+ 	list_for_each_entry_safe(chan, _chan, &dw->rd_edma.channels,
+ 				 vc.chan.device_node) {
+-		list_del(&chan->vc.chan.device_node);
+ 		tasklet_kill(&chan->vc.task);
++		list_del(&chan->vc.chan.device_node);
+ 	}
+ 
+-	/* Deregister eDMA device */
+-	dma_async_device_unregister(&dw->wr_edma);
+-	dma_async_device_unregister(&dw->rd_edma);
+-
+ 	/* Turn debugfs off */
+ 	dw_edma_v0_core_debugfs_off();
+ 
+diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
+index 1aacd2a5a1fd5..174839f3772f2 100644
+--- a/drivers/gpio/gpiolib-acpi.c
++++ b/drivers/gpio/gpiolib-acpi.c
+@@ -1438,6 +1438,20 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
+ 			.no_edge_events_on_boot = true,
+ 		},
+ 	},
++	{
++		/*
++		 * The Dell Venue 10 Pro 5055, with Bay Trail SoC + TI PMIC uses an
++		 * external embedded-controller connected via I2C + an ACPI GPIO
++		 * event handler on INT33FFC:02 pin 12, causing spurious wakeups.
++		 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Venue 10 Pro 5055"),
++		},
++		.driver_data = &(struct acpi_gpiolib_dmi_quirk) {
++			.ignore_wake = "INT33FC:02@12",
++		},
++	},
+ 	{
+ 		/*
+ 		 * HP X2 10 models with Cherry Trail SoC + TI PMIC use an
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 71e07ebc8f88a..b63f55ea8758a 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -9344,6 +9344,53 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
+ }
+ #endif
+ 
++static int validate_overlay(struct drm_atomic_state *state)
++{
++	int i;
++	struct drm_plane *plane;
++	struct drm_plane_state *old_plane_state, *new_plane_state;
++	struct drm_plane_state *primary_state, *overlay_state = NULL;
++
++	/* Check if primary plane is contained inside overlay */
++	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
++		if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
++			if (drm_atomic_plane_disabling(plane->state, new_plane_state))
++				return 0;
++
++			overlay_state = new_plane_state;
++			continue;
++		}
++	}
++
++	/* check if we're making changes to the overlay plane */
++	if (!overlay_state)
++		return 0;
++
++	/* check if overlay plane is enabled */
++	if (!overlay_state->crtc)
++		return 0;
++
++	/* find the primary plane for the CRTC that the overlay is enabled on */
++	primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
++	if (IS_ERR(primary_state))
++		return PTR_ERR(primary_state);
++
++	/* check if primary plane is enabled */
++	if (!primary_state->crtc)
++		return 0;
++
++	/* Perform the bounds check to ensure the overlay plane covers the primary */
++	if (primary_state->crtc_x < overlay_state->crtc_x ||
++	    primary_state->crtc_y < overlay_state->crtc_y ||
++	    primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
++	    primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
++		DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
++		return -EINVAL;
++	}
++
++	return 0;
++}
++
+ /**
+  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
+  * @dev: The DRM device
+@@ -9518,6 +9565,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ 			goto fail;
+ 	}
+ 
++	ret = validate_overlay(state);
++	if (ret)
++		goto fail;
++
+ 	/* Add new/modified planes */
+ 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
+ 		ret = dm_update_plane_state(dc, state, plane,
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+index 6e641f1513d80..fbff3df72e6c6 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+@@ -1110,7 +1110,6 @@ static int navi10_force_clk_levels(struct smu_context *smu,
+ 	case SMU_SOCCLK:
+ 	case SMU_MCLK:
+ 	case SMU_UCLK:
+-	case SMU_DCEFCLK:
+ 	case SMU_FCLK:
+ 		/* There is only 2 levels for fine grained DPM */
+ 		if (navi10_is_support_fine_grained_dpm(smu, clk_type)) {
+@@ -1130,6 +1129,10 @@ static int navi10_force_clk_levels(struct smu_context *smu,
+ 		if (ret)
+ 			return size;
+ 		break;
++	case SMU_DCEFCLK:
++		dev_info(smu->adev->dev,"Setting DCEFCLK min/max dpm level is not supported!\n");
++		break;
++
+ 	default:
+ 		break;
+ 	}
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+index af73e1430af53..61438940c26e8 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+@@ -1127,7 +1127,6 @@ static int sienna_cichlid_force_clk_levels(struct smu_context *smu,
+ 	case SMU_SOCCLK:
+ 	case SMU_MCLK:
+ 	case SMU_UCLK:
+-	case SMU_DCEFCLK:
+ 	case SMU_FCLK:
+ 		/* There is only 2 levels for fine grained DPM */
+ 		if (sienna_cichlid_is_support_fine_grained_dpm(smu, clk_type)) {
+@@ -1147,6 +1146,9 @@ static int sienna_cichlid_force_clk_levels(struct smu_context *smu,
+ 		if (ret)
+ 			goto forec_level_out;
+ 		break;
++	case SMU_DCEFCLK:
++		dev_info(smu->adev->dev,"Setting DCEFCLK min/max dpm level is not supported!\n");
++		break;
+ 	default:
+ 		break;
+ 	}
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
+index 5a51036325647..97a785aa8839b 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -4488,7 +4488,18 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
+ 	drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0);
+ 
+ 	for (;;) {
+-		u8 esi[DP_DPRX_ESI_LEN] = {};
++		/*
++		 * The +2 is because DP_DPRX_ESI_LEN is 14, but we then
++		 * pass in "esi+10" to drm_dp_channel_eq_ok(), which
++		 * takes a 6-byte array. So we actually need 16 bytes
++		 * here.
++		 *
++		 * Somebody who knows what the limits actually are
++		 * should check this, but for now this is at least
++		 * harmless and avoids a valid compiler warning about
++		 * using more of the array than we have allocated.
++		 */
++		u8 esi[DP_DPRX_ESI_LEN+2] = {};
+ 		bool handled;
+ 		int retry;
+ 
+diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
+index 5f7706febcb09..17540bdb1eaf7 100644
+--- a/drivers/input/touchscreen/elants_i2c.c
++++ b/drivers/input/touchscreen/elants_i2c.c
+@@ -38,6 +38,7 @@
+ #include <linux/of.h>
+ #include <linux/gpio/consumer.h>
+ #include <linux/regulator/consumer.h>
++#include <linux/uuid.h>
+ #include <asm/unaligned.h>
+ 
+ /* Device, Driver information */
+@@ -1334,6 +1335,40 @@ static void elants_i2c_power_off(void *_data)
+ 	}
+ }
+ 
++#ifdef CONFIG_ACPI
++static const struct acpi_device_id i2c_hid_ids[] = {
++	{"ACPI0C50", 0 },
++	{"PNP0C50", 0 },
++	{ },
++};
++
++static const guid_t i2c_hid_guid =
++	GUID_INIT(0x3CDFF6F7, 0x4267, 0x4555,
++		  0xAD, 0x05, 0xB3, 0x0A, 0x3D, 0x89, 0x38, 0xDE);
++
++static bool elants_acpi_is_hid_device(struct device *dev)
++{
++	acpi_handle handle = ACPI_HANDLE(dev);
++	union acpi_object *obj;
++
++	if (acpi_match_device_ids(ACPI_COMPANION(dev), i2c_hid_ids))
++		return false;
++
++	obj = acpi_evaluate_dsm_typed(handle, &i2c_hid_guid, 1, 1, NULL, ACPI_TYPE_INTEGER);
++	if (obj) {
++		ACPI_FREE(obj);
++		return true;
++	}
++
++	return false;
++}
++#else
++static bool elants_acpi_is_hid_device(struct device *dev)
++{
++	return false;
++}
++#endif
++
+ static int elants_i2c_probe(struct i2c_client *client,
+ 			    const struct i2c_device_id *id)
+ {
+@@ -1342,9 +1377,14 @@ static int elants_i2c_probe(struct i2c_client *client,
+ 	unsigned long irqflags;
+ 	int error;
+ 
++	/* Don't bind to i2c-hid compatible devices, these are handled by the i2c-hid drv. */
++	if (elants_acpi_is_hid_device(&client->dev)) {
++		dev_warn(&client->dev, "This device appears to be an I2C-HID device, not binding\n");
++		return -ENODEV;
++	}
++
+ 	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+-		dev_err(&client->dev,
+-			"%s: i2c check functionality error\n", DEVICE_NAME);
++		dev_err(&client->dev, "I2C check functionality error\n");
+ 		return -ENXIO;
+ 	}
+ 
+diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c
+index 8fa2f3b7cfd8b..e8b6c3137420b 100644
+--- a/drivers/input/touchscreen/silead.c
++++ b/drivers/input/touchscreen/silead.c
+@@ -20,6 +20,7 @@
+ #include <linux/input/mt.h>
+ #include <linux/input/touchscreen.h>
+ #include <linux/pm.h>
++#include <linux/pm_runtime.h>
+ #include <linux/irq.h>
+ #include <linux/regulator/consumer.h>
+ 
+@@ -335,10 +336,8 @@ static int silead_ts_get_id(struct i2c_client *client)
+ 
+ 	error = i2c_smbus_read_i2c_block_data(client, SILEAD_REG_ID,
+ 					      sizeof(chip_id), (u8 *)&chip_id);
+-	if (error < 0) {
+-		dev_err(&client->dev, "Chip ID read error %d\n", error);
++	if (error < 0)
+ 		return error;
+-	}
+ 
+ 	data->chip_id = le32_to_cpu(chip_id);
+ 	dev_info(&client->dev, "Silead chip ID: 0x%8X", data->chip_id);
+@@ -351,12 +350,49 @@ static int silead_ts_setup(struct i2c_client *client)
+ 	int error;
+ 	u32 status;
+ 
++	/*
++	 * Some buggy BIOS-es bring up the chip in a stuck state where it
++	 * blocks the I2C bus. The following steps are necessary to
++	 * unstuck the chip / bus:
++	 * 1. Turn off the Silead chip.
++	 * 2. Try to do an I2C transfer with the chip, this will fail in
++	 *    response to which the I2C-bus-driver will call:
++	 *    i2c_recover_bus() which will unstuck the I2C-bus. Note the
++	 *    unstuck-ing of the I2C bus only works if we first drop the
++	 *    chip off the bus by turning it off.
++	 * 3. Turn the chip back on.
++	 *
++	 * On the x86/ACPI systems were this problem is seen, step 1. and
++	 * 3. require making ACPI calls and dealing with ACPI Power
++	 * Resources. The workaround below runtime-suspends the chip to
++	 * turn it off, leaving it up to the ACPI subsystem to deal with
++	 * this.
++	 */
++
++	if (device_property_read_bool(&client->dev,
++				      "silead,stuck-controller-bug")) {
++		pm_runtime_set_active(&client->dev);
++		pm_runtime_enable(&client->dev);
++		pm_runtime_allow(&client->dev);
++
++		pm_runtime_suspend(&client->dev);
++
++		dev_warn(&client->dev, FW_BUG "Stuck I2C bus: please ignore the next 'controller timed out' error\n");
++		silead_ts_get_id(client);
++
++		/* The forbid will also resume the device */
++		pm_runtime_forbid(&client->dev);
++		pm_runtime_disable(&client->dev);
++	}
++
+ 	silead_ts_set_power(client, SILEAD_POWER_OFF);
+ 	silead_ts_set_power(client, SILEAD_POWER_ON);
+ 
+ 	error = silead_ts_get_id(client);
+-	if (error)
++	if (error) {
++		dev_err(&client->dev, "Chip ID read error %d\n", error);
+ 		return error;
++	}
+ 
+ 	error = silead_ts_init(client);
+ 	if (error)
+diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
+index 945701bce5536..2e081a58da6c5 100644
+--- a/drivers/misc/kgdbts.c
++++ b/drivers/misc/kgdbts.c
+@@ -95,19 +95,19 @@
+ 
+ #include <asm/sections.h>
+ 
+-#define v1printk(a...) do { \
+-	if (verbose) \
+-		printk(KERN_INFO a); \
+-	} while (0)
+-#define v2printk(a...) do { \
+-	if (verbose > 1) \
+-		printk(KERN_INFO a); \
+-		touch_nmi_watchdog();	\
+-	} while (0)
+-#define eprintk(a...) do { \
+-		printk(KERN_ERR a); \
+-		WARN_ON(1); \
+-	} while (0)
++#define v1printk(a...) do {		\
++	if (verbose)			\
++		printk(KERN_INFO a);	\
++} while (0)
++#define v2printk(a...) do {		\
++	if (verbose > 1)		\
++		printk(KERN_INFO a);	\
++	touch_nmi_watchdog();		\
++} while (0)
++#define eprintk(a...) do {		\
++	printk(KERN_ERR a);		\
++	WARN_ON(1);			\
++} while (0)
+ #define MAX_CONFIG_LEN		40
+ 
+ static struct kgdb_io kgdbts_io_ops;
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
+index 256fae15e032a..1e5f2edb70cf4 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
+@@ -2563,12 +2563,12 @@ int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc)
+ 	spin_lock_bh(&eosw_txq->lock);
+ 	if (tc != FW_SCHED_CLS_NONE) {
+ 		if (eosw_txq->state != CXGB4_EO_STATE_CLOSED)
+-			goto out_unlock;
++			goto out_free_skb;
+ 
+ 		next_state = CXGB4_EO_STATE_FLOWC_OPEN_SEND;
+ 	} else {
+ 		if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE)
+-			goto out_unlock;
++			goto out_free_skb;
+ 
+ 		next_state = CXGB4_EO_STATE_FLOWC_CLOSE_SEND;
+ 	}
+@@ -2604,17 +2604,19 @@ int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc)
+ 		eosw_txq_flush_pending_skbs(eosw_txq);
+ 
+ 	ret = eosw_txq_enqueue(eosw_txq, skb);
+-	if (ret) {
+-		dev_consume_skb_any(skb);
+-		goto out_unlock;
+-	}
++	if (ret)
++		goto out_free_skb;
+ 
+ 	eosw_txq->state = next_state;
+ 	eosw_txq->flowc_idx = eosw_txq->pidx;
+ 	eosw_txq_advance(eosw_txq, 1);
+ 	ethofld_xmit(dev, eosw_txq);
+ 
+-out_unlock:
++	spin_unlock_bh(&eosw_txq->lock);
++	return 0;
++
++out_free_skb:
++	dev_consume_skb_any(skb);
+ 	spin_unlock_bh(&eosw_txq->lock);
+ 	return ret;
+ }
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+index 62aa0e95beb70..a7249e4071f16 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+@@ -222,7 +222,7 @@ static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode,
+ 				       u32 channel, int fifosz, u8 qmode)
+ {
+ 	unsigned int rqs = fifosz / 256 - 1;
+-	u32 mtl_rx_op, mtl_rx_int;
++	u32 mtl_rx_op;
+ 
+ 	mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel));
+ 
+@@ -283,11 +283,6 @@ static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode,
+ 	}
+ 
+ 	writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel));
+-
+-	/* Enable MTL RX overflow */
+-	mtl_rx_int = readl(ioaddr + MTL_CHAN_INT_CTRL(channel));
+-	writel(mtl_rx_int | MTL_RX_OVERFLOW_INT_EN,
+-	       ioaddr + MTL_CHAN_INT_CTRL(channel));
+ }
+ 
+ static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode,
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index c6f24abf64328..369d7cde39933 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -4168,7 +4168,6 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
+ 	/* To handle GMAC own interrupts */
+ 	if ((priv->plat->has_gmac) || xmac) {
+ 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
+-		int mtl_status;
+ 
+ 		if (unlikely(status)) {
+ 			/* For LPI we need to save the tx status */
+@@ -4179,17 +4178,8 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
+ 		}
+ 
+ 		for (queue = 0; queue < queues_count; queue++) {
+-			struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+-
+-			mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
+-								queue);
+-			if (mtl_status != -EINVAL)
+-				status |= mtl_status;
+-
+-			if (status & CORE_IRQ_MTL_RX_OVERFLOW)
+-				stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
+-						       rx_q->rx_tail_addr,
+-						       queue);
++			status = stmmac_host_mtl_irq_status(priv, priv->hw,
++							    queue);
+ 		}
+ 
+ 		/* PCS link status */
+diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
+index 60db38c389606..fd37d4d2983b6 100644
+--- a/drivers/net/wireless/cisco/airo.c
++++ b/drivers/net/wireless/cisco/airo.c
+@@ -3817,6 +3817,68 @@ static inline void set_auth_type(struct airo_info *local, int auth_type)
+ 		local->last_auth = auth_type;
+ }
+ 
++static int noinline_for_stack airo_readconfig(struct airo_info *ai, u8 *mac, int lock)
++{
++	int i, status;
++	/* large variables, so don't inline this function,
++	 * maybe change to kmalloc
++	 */
++	tdsRssiRid rssi_rid;
++	CapabilityRid cap_rid;
++
++	kfree(ai->SSID);
++	ai->SSID = NULL;
++	// general configuration (read/modify/write)
++	status = readConfigRid(ai, lock);
++	if (status != SUCCESS) return ERROR;
++
++	status = readCapabilityRid(ai, &cap_rid, lock);
++	if (status != SUCCESS) return ERROR;
++
++	status = PC4500_readrid(ai, RID_RSSI, &rssi_rid, sizeof(rssi_rid), lock);
++	if (status == SUCCESS) {
++		if (ai->rssi || (ai->rssi = kmalloc(512, GFP_KERNEL)) != NULL)
++			memcpy(ai->rssi, (u8*)&rssi_rid + 2, 512); /* Skip RID length member */
++	}
++	else {
++		kfree(ai->rssi);
++		ai->rssi = NULL;
++		if (cap_rid.softCap & cpu_to_le16(8))
++			ai->config.rmode |= RXMODE_NORMALIZED_RSSI;
++		else
++			airo_print_warn(ai->dev->name, "unknown received signal "
++					"level scale");
++	}
++	ai->config.opmode = adhoc ? MODE_STA_IBSS : MODE_STA_ESS;
++	set_auth_type(ai, AUTH_OPEN);
++	ai->config.modulation = MOD_CCK;
++
++	if (le16_to_cpu(cap_rid.len) >= sizeof(cap_rid) &&
++	    (cap_rid.extSoftCap & cpu_to_le16(1)) &&
++	    micsetup(ai) == SUCCESS) {
++		ai->config.opmode |= MODE_MIC;
++		set_bit(FLAG_MIC_CAPABLE, &ai->flags);
++	}
++
++	/* Save off the MAC */
++	for (i = 0; i < ETH_ALEN; i++) {
++		mac[i] = ai->config.macAddr[i];
++	}
++
++	/* Check to see if there are any insmod configured
++	   rates to add */
++	if (rates[0]) {
++		memset(ai->config.rates, 0, sizeof(ai->config.rates));
++		for (i = 0; i < 8 && rates[i]; i++) {
++			ai->config.rates[i] = rates[i];
++		}
++	}
++	set_bit (FLAG_COMMIT, &ai->flags);
++
++	return SUCCESS;
++}
++
++
+ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
+ {
+ 	Cmd cmd;
+@@ -3863,58 +3925,9 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
+ 	if (lock)
+ 		up(&ai->sem);
+ 	if (ai->config.len == 0) {
+-		int i;
+-		tdsRssiRid rssi_rid;
+-		CapabilityRid cap_rid;
+-
+-		kfree(ai->SSID);
+-		ai->SSID = NULL;
+-		// general configuration (read/modify/write)
+-		status = readConfigRid(ai, lock);
+-		if (status != SUCCESS) return ERROR;
+-
+-		status = readCapabilityRid(ai, &cap_rid, lock);
+-		if (status != SUCCESS) return ERROR;
+-
+-		status = PC4500_readrid(ai, RID_RSSI,&rssi_rid, sizeof(rssi_rid), lock);
+-		if (status == SUCCESS) {
+-			if (ai->rssi || (ai->rssi = kmalloc(512, GFP_KERNEL)) != NULL)
+-				memcpy(ai->rssi, (u8*)&rssi_rid + 2, 512); /* Skip RID length member */
+-		}
+-		else {
+-			kfree(ai->rssi);
+-			ai->rssi = NULL;
+-			if (cap_rid.softCap & cpu_to_le16(8))
+-				ai->config.rmode |= RXMODE_NORMALIZED_RSSI;
+-			else
+-				airo_print_warn(ai->dev->name, "unknown received signal "
+-						"level scale");
+-		}
+-		ai->config.opmode = adhoc ? MODE_STA_IBSS : MODE_STA_ESS;
+-		set_auth_type(ai, AUTH_OPEN);
+-		ai->config.modulation = MOD_CCK;
+-
+-		if (le16_to_cpu(cap_rid.len) >= sizeof(cap_rid) &&
+-		    (cap_rid.extSoftCap & cpu_to_le16(1)) &&
+-		    micsetup(ai) == SUCCESS) {
+-			ai->config.opmode |= MODE_MIC;
+-			set_bit(FLAG_MIC_CAPABLE, &ai->flags);
+-		}
+-
+-		/* Save off the MAC */
+-		for (i = 0; i < ETH_ALEN; i++) {
+-			mac[i] = ai->config.macAddr[i];
+-		}
+-
+-		/* Check to see if there are any insmod configured
+-		   rates to add */
+-		if (rates[0]) {
+-			memset(ai->config.rates, 0, sizeof(ai->config.rates));
+-			for (i = 0; i < 8 && rates[i]; i++) {
+-				ai->config.rates[i] = rates[i];
+-			}
+-		}
+-		set_bit (FLAG_COMMIT, &ai->flags);
++		status = airo_readconfig(ai, mac, lock);
++		if (status != SUCCESS)
++			return ERROR;
+ 	}
+ 
+ 	/* Setup the SSIDs if present */
+diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
+index 81224447605b9..5a1ab49908c34 100644
+--- a/drivers/nvme/target/admin-cmd.c
++++ b/drivers/nvme/target/admin-cmd.c
+@@ -307,7 +307,7 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req)
+ 	case NVME_LOG_ANA:
+ 		return nvmet_execute_get_log_page_ana(req);
+ 	}
+-	pr_err("unhandled lid %d on qid %d\n",
++	pr_debug("unhandled lid %d on qid %d\n",
+ 	       req->cmd->get_log_page.lid, req->sq->qid);
+ 	req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
+ 	nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
+@@ -659,7 +659,7 @@ static void nvmet_execute_identify(struct nvmet_req *req)
+ 		return nvmet_execute_identify_desclist(req);
+ 	}
+ 
+-	pr_err("unhandled identify cns %d on qid %d\n",
++	pr_debug("unhandled identify cns %d on qid %d\n",
+ 	       req->cmd->identify.cns, req->sq->qid);
+ 	req->error_loc = offsetof(struct nvme_identify, cns);
+ 	nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
+@@ -977,7 +977,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
+ 		return 0;
+ 	}
+ 
+-	pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
++	pr_debug("unhandled cmd %d on qid %d\n", cmd->common.opcode,
+ 	       req->sq->qid);
+ 	req->error_loc = offsetof(struct nvme_common_command, opcode);
+ 	return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
+index 6fa216e52d142..0e94190ca4e88 100644
+--- a/drivers/pci/controller/dwc/pcie-tegra194.c
++++ b/drivers/pci/controller/dwc/pcie-tegra194.c
+@@ -1645,7 +1645,7 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
+ 	if (pcie->ep_state == EP_STATE_ENABLED)
+ 		return;
+ 
+-	ret = pm_runtime_get_sync(dev);
++	ret = pm_runtime_resume_and_get(dev);
+ 	if (ret < 0) {
+ 		dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n",
+ 			ret);
+diff --git a/drivers/pci/controller/pci-thunder-ecam.c b/drivers/pci/controller/pci-thunder-ecam.c
+index f964fd26f7e0c..ffd84656544f2 100644
+--- a/drivers/pci/controller/pci-thunder-ecam.c
++++ b/drivers/pci/controller/pci-thunder-ecam.c
+@@ -116,7 +116,7 @@ static int thunder_ecam_p2_config_read(struct pci_bus *bus, unsigned int devfn,
+ 	 * the config space access window.  Since we are working with
+ 	 * the high-order 32 bits, shift everything down by 32 bits.
+ 	 */
+-	node_bits = (cfg->res.start >> 32) & (1 << 12);
++	node_bits = upper_32_bits(cfg->res.start) & (1 << 12);
+ 
+ 	v |= node_bits;
+ 	set_val(v, where, size, val);
+diff --git a/drivers/pci/controller/pci-thunder-pem.c b/drivers/pci/controller/pci-thunder-pem.c
+index 1a3f70ac61fc3..0660b9da204f0 100644
+--- a/drivers/pci/controller/pci-thunder-pem.c
++++ b/drivers/pci/controller/pci-thunder-pem.c
+@@ -12,6 +12,7 @@
+ #include <linux/pci-acpi.h>
+ #include <linux/pci-ecam.h>
+ #include <linux/platform_device.h>
++#include <linux/io-64-nonatomic-lo-hi.h>
+ #include "../pci.h"
+ 
+ #if defined(CONFIG_PCI_HOST_THUNDER_PEM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
+@@ -324,9 +325,9 @@ static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg,
+ 	 * structure here for the BAR.
+ 	 */
+ 	bar4_start = res_pem->start + 0xf00000;
+-	pem_pci->ea_entry[0] = (u32)bar4_start | 2;
+-	pem_pci->ea_entry[1] = (u32)(res_pem->end - bar4_start) & ~3u;
+-	pem_pci->ea_entry[2] = (u32)(bar4_start >> 32);
++	pem_pci->ea_entry[0] = lower_32_bits(bar4_start) | 2;
++	pem_pci->ea_entry[1] = lower_32_bits(res_pem->end - bar4_start) & ~3u;
++	pem_pci->ea_entry[2] = upper_32_bits(bar4_start);
+ 
+ 	cfg->priv = pem_pci;
+ 	return 0;
+@@ -334,9 +335,9 @@ static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg,
+ 
+ #if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
+ 
+-#define PEM_RES_BASE		0x87e0c0000000UL
+-#define PEM_NODE_MASK		GENMASK(45, 44)
+-#define PEM_INDX_MASK		GENMASK(26, 24)
++#define PEM_RES_BASE		0x87e0c0000000ULL
++#define PEM_NODE_MASK		GENMASK_ULL(45, 44)
++#define PEM_INDX_MASK		GENMASK_ULL(26, 24)
+ #define PEM_MIN_DOM_IN_NODE	4
+ #define PEM_MAX_DOM_IN_NODE	10
+ 
+diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
+index 3365c93abf0e2..f031302ad4019 100644
+--- a/drivers/pci/hotplug/acpiphp_glue.c
++++ b/drivers/pci/hotplug/acpiphp_glue.c
+@@ -533,6 +533,7 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge)
+ 			slot->flags &= ~SLOT_ENABLED;
+ 			continue;
+ 		}
++		pci_dev_put(dev);
+ 	}
+ }
+ 
+diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
+index ef7c4661314fa..9684b468267f2 100644
+--- a/drivers/pci/pci.h
++++ b/drivers/pci/pci.h
+@@ -624,6 +624,12 @@ static inline int pci_dev_specific_reset(struct pci_dev *dev, int probe)
+ #if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64)
+ int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment,
+ 			  struct resource *res);
++#else
++static inline int acpi_get_rc_resources(struct device *dev, const char *hid,
++					u16 segment, struct resource *res)
++{
++	return -ENODEV;
++}
+ #endif
+ 
+ int pci_rebar_get_current_size(struct pci_dev *pdev, int bar);
+diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
+index 0811562deecca..24be8f550ae00 100644
+--- a/drivers/platform/chrome/cros_ec_typec.c
++++ b/drivers/platform/chrome/cros_ec_typec.c
+@@ -483,6 +483,11 @@ static int cros_typec_enable_dp(struct cros_typec_data *typec,
+ 		return -ENOTSUPP;
+ 	}
+ 
++	if (!pd_ctrl->dp_mode) {
++		dev_err(typec->dev, "No valid DP mode provided.\n");
++		return -EINVAL;
++	}
++
+ 	/* Status VDO. */
+ 	dp_data.status = DP_STATUS_ENABLED;
+ 	if (port->mux_flags & USB_PD_MUX_HPD_IRQ)
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index bd31feb3d5e18..920cf329268b5 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -11807,13 +11807,20 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
+ 			   lpfc_ctx_cmd ctx_cmd)
+ {
+ 	struct lpfc_io_buf *lpfc_cmd;
++	IOCB_t *icmd = NULL;
+ 	int rc = 1;
+ 
+ 	if (!iocbq || iocbq->vport != vport)
+ 		return rc;
+ 
+-	if (!(iocbq->iocb_flag &  LPFC_IO_FCP) ||
+-	    !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ))
++	if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
++	    !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ) ||
++	      iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
++		return rc;
++
++	icmd = &iocbq->iocb;
++	if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
++	    icmd->ulpCommand == CMD_CLOSE_XRI_CN)
+ 		return rc;
+ 
+ 	lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
+diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
+index bf73cd5f4b04c..6809c970be038 100644
+--- a/drivers/target/target_core_user.c
++++ b/drivers/target/target_core_user.c
+@@ -1377,7 +1377,7 @@ static int tcmu_run_tmr_queue(struct tcmu_dev *udev)
+ 	return 1;
+ }
+ 
+-static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
++static bool tcmu_handle_completions(struct tcmu_dev *udev)
+ {
+ 	struct tcmu_mailbox *mb;
+ 	struct tcmu_cmd *cmd;
+@@ -1420,7 +1420,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
+ 			pr_err("cmd_id %u not found, ring is broken\n",
+ 			       entry->hdr.cmd_id);
+ 			set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
+-			break;
++			return false;
+ 		}
+ 
+ 		tcmu_handle_completion(cmd, entry);
+diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
+index 115ced0d93e10..1be692d3cf903 100644
+--- a/drivers/usb/host/sl811-hcd.c
++++ b/drivers/usb/host/sl811-hcd.c
+@@ -1287,11 +1287,10 @@ sl811h_hub_control(
+ 			goto error;
+ 		put_unaligned_le32(sl811->port1, buf);
+ 
+-#ifndef	VERBOSE
+-	if (*(u16*)(buf+2))	/* only if wPortChange is interesting */
+-#endif
+-		dev_dbg(hcd->self.controller, "GetPortStatus %08x\n",
+-			sl811->port1);
++		if (__is_defined(VERBOSE) ||
++		    *(u16*)(buf+2)) /* only if wPortChange is interesting */
++			dev_dbg(hcd->self.controller, "GetPortStatus %08x\n",
++				sl811->port1);
+ 		break;
+ 	case SetPortFeature:
+ 		if (wIndex != 1 || wLength != 0)
+diff --git a/fs/block_dev.c b/fs/block_dev.c
+index 09d6f7229db9d..a5a6a7930e5ed 100644
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -1684,6 +1684,7 @@ ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ 	struct inode *bd_inode = bdev_file_inode(file);
+ 	loff_t size = i_size_read(bd_inode);
+ 	struct blk_plug plug;
++	size_t shorted = 0;
+ 	ssize_t ret;
+ 
+ 	if (bdev_read_only(I_BDEV(bd_inode)))
+@@ -1701,12 +1702,17 @@ ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ 	if ((iocb->ki_flags & (IOCB_NOWAIT | IOCB_DIRECT)) == IOCB_NOWAIT)
+ 		return -EOPNOTSUPP;
+ 
+-	iov_iter_truncate(from, size - iocb->ki_pos);
++	size -= iocb->ki_pos;
++	if (iov_iter_count(from) > size) {
++		shorted = iov_iter_count(from) - size;
++		iov_iter_truncate(from, size);
++	}
+ 
+ 	blk_start_plug(&plug);
+ 	ret = __generic_file_write_iter(iocb, from);
+ 	if (ret > 0)
+ 		ret = generic_write_sync(iocb, ret);
++	iov_iter_reexpand(from, iov_iter_count(from) + shorted);
+ 	blk_finish_plug(&plug);
+ 	return ret;
+ }
+@@ -1718,13 +1724,21 @@ ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
+ 	struct inode *bd_inode = bdev_file_inode(file);
+ 	loff_t size = i_size_read(bd_inode);
+ 	loff_t pos = iocb->ki_pos;
++	size_t shorted = 0;
++	ssize_t ret;
+ 
+ 	if (pos >= size)
+ 		return 0;
+ 
+ 	size -= pos;
+-	iov_iter_truncate(to, size);
+-	return generic_file_read_iter(iocb, to);
++	if (iov_iter_count(to) > size) {
++		shorted = iov_iter_count(to) - size;
++		iov_iter_truncate(to, size);
++	}
++
++	ret = generic_file_read_iter(iocb, to);
++	iov_iter_reexpand(to, iov_iter_count(to) + shorted);
++	return ret;
+ }
+ EXPORT_SYMBOL_GPL(blkdev_read_iter);
+ 
+diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
+index 570731c4d019d..d405ba801492f 100644
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -1867,6 +1867,7 @@ static int try_nonblocking_invalidate(struct inode *inode)
+ 	u32 invalidating_gen = ci->i_rdcache_gen;
+ 
+ 	spin_unlock(&ci->i_ceph_lock);
++	ceph_fscache_invalidate(inode);
+ 	invalidate_mapping_pages(&inode->i_data, 0, -1);
+ 	spin_lock(&ci->i_ceph_lock);
+ 
+diff --git a/fs/ceph/export.c b/fs/ceph/export.c
+index baa6368bece59..042bb4a02c0a2 100644
+--- a/fs/ceph/export.c
++++ b/fs/ceph/export.c
+@@ -129,6 +129,10 @@ static struct inode *__lookup_inode(struct super_block *sb, u64 ino)
+ 
+ 	vino.ino = ino;
+ 	vino.snap = CEPH_NOSNAP;
++
++	if (ceph_vino_is_reserved(vino))
++		return ERR_PTR(-ESTALE);
++
+ 	inode = ceph_find_inode(sb, vino);
+ 	if (!inode) {
+ 		struct ceph_mds_request *req;
+@@ -214,6 +218,10 @@ static struct dentry *__snapfh_to_dentry(struct super_block *sb,
+ 		vino.ino = sfh->ino;
+ 		vino.snap = sfh->snapid;
+ 	}
++
++	if (ceph_vino_is_reserved(vino))
++		return ERR_PTR(-ESTALE);
++
+ 	inode = ceph_find_inode(sb, vino);
+ 	if (inode)
+ 		return d_obtain_alias(inode);
+diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
+index 156f849f53856..179d2ef69a24a 100644
+--- a/fs/ceph/inode.c
++++ b/fs/ceph/inode.c
+@@ -56,6 +56,9 @@ struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
+ {
+ 	struct inode *inode;
+ 
++	if (ceph_vino_is_reserved(vino))
++		return ERR_PTR(-EREMOTEIO);
++
+ 	inode = iget5_locked(sb, (unsigned long)vino.ino, ceph_ino_compare,
+ 			     ceph_set_ino_cb, &vino);
+ 	if (!inode)
+@@ -87,14 +90,15 @@ struct inode *ceph_get_snapdir(struct inode *parent)
+ 	inode->i_mtime = parent->i_mtime;
+ 	inode->i_ctime = parent->i_ctime;
+ 	inode->i_atime = parent->i_atime;
+-	inode->i_op = &ceph_snapdir_iops;
+-	inode->i_fop = &ceph_snapdir_fops;
+-	ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
+ 	ci->i_rbytes = 0;
+ 	ci->i_btime = ceph_inode(parent)->i_btime;
+ 
+-	if (inode->i_state & I_NEW)
++	if (inode->i_state & I_NEW) {
++		inode->i_op = &ceph_snapdir_iops;
++		inode->i_fop = &ceph_snapdir_fops;
++		ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
+ 		unlock_new_inode(inode);
++	}
+ 
+ 	return inode;
+ }
+@@ -1863,6 +1867,7 @@ static void ceph_do_invalidate_pages(struct inode *inode)
+ 	orig_gen = ci->i_rdcache_gen;
+ 	spin_unlock(&ci->i_ceph_lock);
+ 
++	ceph_fscache_invalidate(inode);
+ 	if (invalidate_inode_pages2(inode->i_mapping) < 0) {
+ 		pr_err("invalidate_pages %p fails\n", inode);
+ 	}
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index d87bd852ed961..298cb0b3d28c0 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -433,6 +433,13 @@ static int ceph_parse_deleg_inos(void **p, void *end,
+ 
+ 		ceph_decode_64_safe(p, end, start, bad);
+ 		ceph_decode_64_safe(p, end, len, bad);
++
++		/* Don't accept a delegation of system inodes */
++		if (start < CEPH_INO_SYSTEM_BASE) {
++			pr_warn_ratelimited("ceph: ignoring reserved inode range delegation (start=0x%llx len=0x%llx)\n",
++					start, len);
++			continue;
++		}
+ 		while (len--) {
+ 			int err = xa_insert(&s->s_delegated_inos, ino = start++,
+ 					    DELEGATED_INO_AVAILABLE,
+diff --git a/fs/ceph/super.h b/fs/ceph/super.h
+index c48bb30c8d70e..1d2fe70439bd0 100644
+--- a/fs/ceph/super.h
++++ b/fs/ceph/super.h
+@@ -529,10 +529,34 @@ static inline int ceph_ino_compare(struct inode *inode, void *data)
+ 		ci->i_vino.snap == pvino->snap;
+ }
+ 
++/*
++ * The MDS reserves a set of inodes for its own usage. These should never
++ * be accessible by clients, and so the MDS has no reason to ever hand these
++ * out. The range is CEPH_MDS_INO_MDSDIR_OFFSET..CEPH_INO_SYSTEM_BASE.
++ *
++ * These come from src/mds/mdstypes.h in the ceph sources.
++ */
++#define CEPH_MAX_MDS		0x100
++#define CEPH_NUM_STRAY		10
++#define CEPH_MDS_INO_MDSDIR_OFFSET	(1 * CEPH_MAX_MDS)
++#define CEPH_INO_SYSTEM_BASE		((6*CEPH_MAX_MDS) + (CEPH_MAX_MDS * CEPH_NUM_STRAY))
++
++static inline bool ceph_vino_is_reserved(const struct ceph_vino vino)
++{
++	if (vino.ino < CEPH_INO_SYSTEM_BASE &&
++	    vino.ino >= CEPH_MDS_INO_MDSDIR_OFFSET) {
++		WARN_RATELIMIT(1, "Attempt to access reserved inode number 0x%llx", vino.ino);
++		return true;
++	}
++	return false;
++}
+ 
+ static inline struct inode *ceph_find_inode(struct super_block *sb,
+ 					    struct ceph_vino vino)
+ {
++	if (ceph_vino_is_reserved(vino))
++		return NULL;
++
+ 	/*
+ 	 * NB: The hashval will be run through the fs/inode.c hash function
+ 	 * anyway, so there is no need to squash the inode number down to
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
+index 77456d228f2a1..bb6d862557410 100644
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -186,7 +186,10 @@ void f2fs_register_inmem_page(struct inode *inode, struct page *page)
+ {
+ 	struct inmem_pages *new;
+ 
+-	f2fs_set_page_private(page, ATOMIC_WRITTEN_PAGE);
++	if (PagePrivate(page))
++		set_page_private(page, (unsigned long)ATOMIC_WRITTEN_PAGE);
++	else
++		f2fs_set_page_private(page, ATOMIC_WRITTEN_PAGE);
+ 
+ 	new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
+ 
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index 7cfeee3eeef7f..ae8bc84e39fb3 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -219,15 +219,16 @@ void nfs_set_cache_invalid(struct inode *inode, unsigned long flags)
+ 				| NFS_INO_INVALID_SIZE
+ 				| NFS_INO_REVAL_PAGECACHE
+ 				| NFS_INO_INVALID_XATTR);
+-	}
++	} else if (flags & NFS_INO_REVAL_PAGECACHE)
++		flags |= NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_SIZE;
+ 
+ 	if (!nfs_has_xattr_cache(nfsi))
+ 		flags &= ~NFS_INO_INVALID_XATTR;
++	if (flags & NFS_INO_INVALID_DATA)
++		nfs_fscache_invalidate(inode);
+ 	if (inode->i_mapping->nrpages == 0)
+ 		flags &= ~(NFS_INO_INVALID_DATA|NFS_INO_DATA_INVAL_DEFER);
+ 	nfsi->cache_validity |= flags;
+-	if (flags & NFS_INO_INVALID_DATA)
+-		nfs_fscache_invalidate(inode);
+ }
+ EXPORT_SYMBOL_GPL(nfs_set_cache_invalid);
+ 
+diff --git a/lib/stackdepot.c b/lib/stackdepot.c
+index 49f67a0c6e5d2..df9179f4f4414 100644
+--- a/lib/stackdepot.c
++++ b/lib/stackdepot.c
+@@ -71,7 +71,7 @@ static void *stack_slabs[STACK_ALLOC_MAX_SLABS];
+ static int depot_index;
+ static int next_slab_inited;
+ static size_t depot_offset;
+-static DEFINE_SPINLOCK(depot_lock);
++static DEFINE_RAW_SPINLOCK(depot_lock);
+ 
+ static bool init_stack_slab(void **prealloc)
+ {
+@@ -305,7 +305,7 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries,
+ 			prealloc = page_address(page);
+ 	}
+ 
+-	spin_lock_irqsave(&depot_lock, flags);
++	raw_spin_lock_irqsave(&depot_lock, flags);
+ 
+ 	found = find_stack(*bucket, entries, nr_entries, hash);
+ 	if (!found) {
+@@ -329,7 +329,7 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries,
+ 		WARN_ON(!init_stack_slab(&prealloc));
+ 	}
+ 
+-	spin_unlock_irqrestore(&depot_lock, flags);
++	raw_spin_unlock_irqrestore(&depot_lock, flags);
+ exit:
+ 	if (prealloc) {
+ 		/* Nobody used this memory, ok to free it. */
+diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
+index 0456593aceec1..e4e6e991313e7 100644
+--- a/net/bridge/br_netlink.c
++++ b/net/bridge/br_netlink.c
+@@ -103,8 +103,9 @@ static size_t br_get_link_af_size_filtered(const struct net_device *dev,
+ 
+ 	rcu_read_lock();
+ 	if (netif_is_bridge_port(dev)) {
+-		p = br_port_get_rcu(dev);
+-		vg = nbp_vlan_group_rcu(p);
++		p = br_port_get_check_rcu(dev);
++		if (p)
++			vg = nbp_vlan_group_rcu(p);
+ 	} else if (dev->priv_flags & IFF_EBRIDGE) {
+ 		br = netdev_priv(dev);
+ 		vg = br_vlan_group_rcu(br);
+diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
+index b218e4594009c..6852e9bccf5b8 100644
+--- a/net/hsr/hsr_forward.c
++++ b/net/hsr/hsr_forward.c
+@@ -520,6 +520,10 @@ static int fill_frame_info(struct hsr_frame_info *frame,
+ 	struct ethhdr *ethhdr;
+ 	__be16 proto;
+ 
++	/* Check if skb contains hsr_ethhdr */
++	if (skb->mac_len < sizeof(struct hsr_ethhdr))
++		return -EINVAL;
++
+ 	memset(frame, 0, sizeof(*frame));
+ 	frame->is_supervision = is_supervision_frame(port->hsr, skb);
+ 	frame->node_src = hsr_get_node(port, &hsr->node_db, skb,
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index 1baf43aacb2e4..bc224f917bbd5 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -387,7 +387,6 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
+ 	if (!(nt->parms.o_flags & TUNNEL_SEQ))
+ 		dev->features |= NETIF_F_LLTX;
+ 
+-	dev_hold(dev);
+ 	ip6gre_tunnel_link(ign, nt);
+ 	return nt;
+ 
+@@ -1496,6 +1495,7 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
+ 	}
+ 	ip6gre_tnl_init_features(dev);
+ 
++	dev_hold(dev);
+ 	return 0;
+ 
+ cleanup_dst_cache_init:
+@@ -1538,8 +1538,6 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
+ 	strcpy(tunnel->parms.name, dev->name);
+ 
+ 	tunnel->hlen		= sizeof(struct ipv6hdr) + 4;
+-
+-	dev_hold(dev);
+ }
+ 
+ static struct inet6_protocol ip6gre_protocol __read_mostly = {
+@@ -1889,6 +1887,7 @@ static int ip6erspan_tap_init(struct net_device *dev)
+ 	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ 	ip6erspan_tnl_link_config(tunnel, 1);
+ 
++	dev_hold(dev);
+ 	return 0;
+ 
+ cleanup_dst_cache_init:
+@@ -1988,8 +1987,6 @@ static int ip6gre_newlink_common(struct net *src_net, struct net_device *dev,
+ 	if (tb[IFLA_MTU])
+ 		ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
+ 
+-	dev_hold(dev);
+-
+ out:
+ 	return err;
+ }
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index 42fe7db6bbb37..d42f471b0d655 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -266,7 +266,6 @@ static int ip6_tnl_create2(struct net_device *dev)
+ 
+ 	strcpy(t->parms.name, dev->name);
+ 
+-	dev_hold(dev);
+ 	ip6_tnl_link(ip6n, t);
+ 	return 0;
+ 
+@@ -1882,6 +1881,7 @@ ip6_tnl_dev_init_gen(struct net_device *dev)
+ 	dev->min_mtu = ETH_MIN_MTU;
+ 	dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len;
+ 
++	dev_hold(dev);
+ 	return 0;
+ 
+ destroy_dst:
+@@ -1925,7 +1925,6 @@ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
+ 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
+ 
+ 	t->parms.proto = IPPROTO_IPV6;
+-	dev_hold(dev);
+ 
+ 	rcu_assign_pointer(ip6n->tnls_wc[0], t);
+ 	return 0;
+diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
+index 932b15b13053d..2d048e21abbba 100644
+--- a/net/ipv6/ip6_vti.c
++++ b/net/ipv6/ip6_vti.c
+@@ -965,7 +965,6 @@ static int __net_init vti6_fb_tnl_dev_init(struct net_device *dev)
+ 	struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+ 
+ 	t->parms.proto = IPPROTO_IPV6;
+-	dev_hold(dev);
+ 
+ 	rcu_assign_pointer(ip6n->tnls_wc[0], t);
+ 	return 0;
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index 9fdccf0718b59..fcc9ba2c80e95 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -218,8 +218,6 @@ static int ipip6_tunnel_create(struct net_device *dev)
+ 
+ 	ipip6_tunnel_clone_6rd(dev, sitn);
+ 
+-	dev_hold(dev);
+-
+ 	ipip6_tunnel_link(sitn, t);
+ 	return 0;
+ 
+@@ -1456,7 +1454,7 @@ static int ipip6_tunnel_init(struct net_device *dev)
+ 		dev->tstats = NULL;
+ 		return err;
+ 	}
+-
++	dev_hold(dev);
+ 	return 0;
+ }
+ 
+@@ -1472,7 +1470,6 @@ static void __net_init ipip6_fb_tunnel_init(struct net_device *dev)
+ 	iph->ihl		= 5;
+ 	iph->ttl		= 64;
+ 
+-	dev_hold(dev);
+ 	rcu_assign_pointer(sitn->tunnels_wc[0], tunnel);
+ }
+ 
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+index 52c759a8543ec..3669661457c15 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+@@ -958,7 +958,7 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
+ 	p = xdr_reserve_space(&sctxt->sc_stream,
+ 			      rpcrdma_fixed_maxsz * sizeof(*p));
+ 	if (!p)
+-		goto err0;
++		goto err1;
+ 
+ 	ret = svc_rdma_send_reply_chunk(rdma, rctxt, &rqstp->rq_res);
+ 	if (ret < 0)
+@@ -970,11 +970,11 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
+ 	*p = pcl_is_empty(&rctxt->rc_reply_pcl) ? rdma_msg : rdma_nomsg;
+ 
+ 	if (svc_rdma_encode_read_list(sctxt) < 0)
+-		goto err0;
++		goto err1;
+ 	if (svc_rdma_encode_write_list(rctxt, sctxt) < 0)
+-		goto err0;
++		goto err1;
+ 	if (svc_rdma_encode_reply_chunk(rctxt, sctxt, ret) < 0)
+-		goto err0;
++		goto err1;
+ 
+ 	ret = svc_rdma_send_reply_msg(rdma, sctxt, rctxt, rqstp);
+ 	if (ret < 0)
+diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
+index 867860ea57daa..7b83a1aaec98d 100755
+--- a/scripts/recordmcount.pl
++++ b/scripts/recordmcount.pl
+@@ -392,7 +392,7 @@ if ($arch eq "x86_64") {
+     $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s_mcount\$";
+ } elsif ($arch eq "riscv") {
+     $function_regex = "^([0-9a-fA-F]+)\\s+<([^.0-9][0-9a-zA-Z_\\.]+)>:";
+-    $mcount_regex = "^\\s*([0-9a-fA-F]+):\\sR_RISCV_CALL\\s_mcount\$";
++    $mcount_regex = "^\\s*([0-9a-fA-F]+):\\sR_RISCV_CALL(_PLT)?\\s_?mcount\$";
+     $type = ".quad";
+     $alignment = 2;
+ } elsif ($arch eq "nds32") {
+diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
+index f5cba7afd1c66..ff0fb2d16d82f 100644
+--- a/sound/pci/hda/hda_generic.c
++++ b/sound/pci/hda/hda_generic.c
+@@ -1202,11 +1202,17 @@ static const char *get_line_out_pfx(struct hda_codec *codec, int ch,
+ 		*index = ch;
+ 		return "Headphone";
+ 	case AUTO_PIN_LINE_OUT:
+-		/* This deals with the case where we have two DACs and
+-		 * one LO, one HP and one Speaker */
+-		if (!ch && cfg->speaker_outs && cfg->hp_outs) {
+-			bool hp_lo_shared = !path_has_mixer(codec, spec->hp_paths[0], ctl_type);
+-			bool spk_lo_shared = !path_has_mixer(codec, spec->speaker_paths[0], ctl_type);
++		/* This deals with the case where one HP or one Speaker or
++		 * one HP + one Speaker need to share the DAC with LO
++		 */
++		if (!ch) {
++			bool hp_lo_shared = false, spk_lo_shared = false;
++
++			if (cfg->speaker_outs)
++				spk_lo_shared = !path_has_mixer(codec,
++								spec->speaker_paths[0],	ctl_type);
++			if (cfg->hp_outs)
++				hp_lo_shared = !path_has_mixer(codec, spec->hp_paths[0], ctl_type);
+ 			if (hp_lo_shared && spk_lo_shared)
+ 				return spec->vmaster_mute.hook ? "PCM" : "Master";
+ 			if (hp_lo_shared)


^ permalink raw reply related	[flat|nested] 33+ messages in thread
* [gentoo-commits] proj/linux-patches:5.12 commit in: /
@ 2021-05-19 12:25 Mike Pagano
  0 siblings, 0 replies; 33+ messages in thread
From: Mike Pagano @ 2021-05-19 12:25 UTC (permalink / raw
  To: gentoo-commits

commit:     d3cadfda9c7e1f2c2045f8709a31a571ad9ae675
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed May 19 12:25:37 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed May 19 12:25:37 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=d3cadfda

Linux patch 5.12.5

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |     4 +
 1004_linux-5.12.5.patch | 15905 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 15909 insertions(+)

diff --git a/0000_README b/0000_README
index 9e34155..055c634 100644
--- a/0000_README
+++ b/0000_README
@@ -59,6 +59,10 @@ Patch:  1003_linux-5.12.4.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.12.4
 
+Patch:  1004_linux-5.12.5.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.12.5
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1004_linux-5.12.5.patch b/1004_linux-5.12.5.patch
new file mode 100644
index 0000000..bf752c4
--- /dev/null
+++ b/1004_linux-5.12.5.patch
@@ -0,0 +1,15905 @@
+diff --git a/.gitignore b/.gitignore
+index 3af66272d6f10..127012c1f7170 100644
+--- a/.gitignore
++++ b/.gitignore
+@@ -57,6 +57,7 @@ modules.order
+ /tags
+ /TAGS
+ /linux
++/modules-only.symvers
+ /vmlinux
+ /vmlinux.32
+ /vmlinux.symvers
+diff --git a/Documentation/devicetree/bindings/media/renesas,vin.yaml b/Documentation/devicetree/bindings/media/renesas,vin.yaml
+index fe7c4cbfe4ba9..dd1a5ce5896ce 100644
+--- a/Documentation/devicetree/bindings/media/renesas,vin.yaml
++++ b/Documentation/devicetree/bindings/media/renesas,vin.yaml
+@@ -193,23 +193,35 @@ required:
+   - interrupts
+   - clocks
+   - power-domains
+-  - resets
+-
+-if:
+-  properties:
+-    compatible:
+-      contains:
+-        enum:
+-          - renesas,vin-r8a7778
+-          - renesas,vin-r8a7779
+-          - renesas,rcar-gen2-vin
+-then:
+-  required:
+-    - port
+-else:
+-  required:
+-    - renesas,id
+-    - ports
++
++allOf:
++  - if:
++      not:
++        properties:
++          compatible:
++            contains:
++              enum:
++                - renesas,vin-r8a7778
++                - renesas,vin-r8a7779
++    then:
++      required:
++        - resets
++
++  - if:
++      properties:
++        compatible:
++          contains:
++            enum:
++              - renesas,vin-r8a7778
++              - renesas,vin-r8a7779
++              - renesas,rcar-gen2-vin
++    then:
++      required:
++        - port
++    else:
++      required:
++        - renesas,id
++        - ports
+ 
+ additionalProperties: false
+ 
+diff --git a/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml b/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml
+index 4a2bcc0158e2d..8fdfbc763d704 100644
+--- a/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml
++++ b/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml
+@@ -17,6 +17,7 @@ allOf:
+ properties:
+   compatible:
+     oneOf:
++      - const: renesas,pcie-r8a7779       # R-Car H1
+       - items:
+           - enum:
+               - renesas,pcie-r8a7742      # RZ/G1H
+@@ -74,7 +75,16 @@ required:
+   - clocks
+   - clock-names
+   - power-domains
+-  - resets
++
++if:
++  not:
++    properties:
++      compatible:
++        contains:
++          const: renesas,pcie-r8a7779
++then:
++  required:
++    - resets
+ 
+ unevaluatedProperties: false
+ 
+diff --git a/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml
+index 626447fee0925..7808ec8bc7128 100644
+--- a/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml
++++ b/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml
+@@ -25,11 +25,13 @@ properties:
+       - qcom,msm8998-qmp-pcie-phy
+       - qcom,msm8998-qmp-ufs-phy
+       - qcom,msm8998-qmp-usb3-phy
++      - qcom,sc7180-qmp-usb3-phy
+       - qcom,sc8180x-qmp-ufs-phy
+       - qcom,sc8180x-qmp-usb3-phy
+       - qcom,sdm845-qhp-pcie-phy
+       - qcom,sdm845-qmp-pcie-phy
+       - qcom,sdm845-qmp-ufs-phy
++      - qcom,sdm845-qmp-usb3-phy
+       - qcom,sdm845-qmp-usb3-uni-phy
+       - qcom,sm8150-qmp-ufs-phy
+       - qcom,sm8150-qmp-usb3-phy
+diff --git a/Documentation/devicetree/bindings/phy/qcom,qmp-usb3-dp-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,qmp-usb3-dp-phy.yaml
+index 33974ad10afe4..62c0179d17657 100644
+--- a/Documentation/devicetree/bindings/phy/qcom,qmp-usb3-dp-phy.yaml
++++ b/Documentation/devicetree/bindings/phy/qcom,qmp-usb3-dp-phy.yaml
+@@ -14,9 +14,7 @@ properties:
+   compatible:
+     enum:
+       - qcom,sc7180-qmp-usb3-dp-phy
+-      - qcom,sc7180-qmp-usb3-phy
+       - qcom,sdm845-qmp-usb3-dp-phy
+-      - qcom,sdm845-qmp-usb3-phy
+   reg:
+     items:
+       - description: Address and length of PHY's USB serdes block.
+diff --git a/Documentation/devicetree/bindings/serial/8250.yaml b/Documentation/devicetree/bindings/serial/8250.yaml
+index f54cae9ff7b28..d3f87f2bfdc25 100644
+--- a/Documentation/devicetree/bindings/serial/8250.yaml
++++ b/Documentation/devicetree/bindings/serial/8250.yaml
+@@ -93,11 +93,6 @@ properties:
+               - mediatek,mt7622-btif
+               - mediatek,mt7623-btif
+           - const: mediatek,mtk-btif
+-      - items:
+-          - enum:
+-              - mediatek,mt7622-btif
+-              - mediatek,mt7623-btif
+-          - const: mediatek,mtk-btif
+       - items:
+           - const: mrvl,mmp-uart
+           - const: intel,xscale-uart
+diff --git a/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml b/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml
+index b33a76eeac4e4..f963204e0b162 100644
+--- a/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml
++++ b/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml
+@@ -28,14 +28,7 @@ properties:
+       - renesas,r8a77980-thermal # R-Car V3H
+       - renesas,r8a779a0-thermal # R-Car V3U
+ 
+-  reg:
+-    minItems: 2
+-    maxItems: 4
+-    items:
+-      - description: TSC1 registers
+-      - description: TSC2 registers
+-      - description: TSC3 registers
+-      - description: TSC4 registers
++  reg: true
+ 
+   interrupts:
+     items:
+@@ -71,8 +64,25 @@ if:
+           enum:
+             - renesas,r8a779a0-thermal
+ then:
++  properties:
++    reg:
++      minItems: 2
++      maxItems: 3
++      items:
++        - description: TSC1 registers
++        - description: TSC2 registers
++        - description: TSC3 registers
+   required:
+     - interrupts
++else:
++  properties:
++    reg:
++      items:
++        - description: TSC0 registers
++        - description: TSC1 registers
++        - description: TSC2 registers
++        - description: TSC3 registers
++        - description: TSC4 registers
+ 
+ additionalProperties: false
+ 
+@@ -111,3 +121,20 @@ examples:
+                     };
+             };
+     };
++  - |
++    #include <dt-bindings/clock/r8a779a0-cpg-mssr.h>
++    #include <dt-bindings/interrupt-controller/arm-gic.h>
++    #include <dt-bindings/power/r8a779a0-sysc.h>
++
++    tsc_r8a779a0: thermal@e6190000 {
++            compatible = "renesas,r8a779a0-thermal";
++            reg = <0xe6190000 0x200>,
++                  <0xe6198000 0x200>,
++                  <0xe61a0000 0x200>,
++                  <0xe61a8000 0x200>,
++                  <0xe61b0000 0x200>;
++            clocks = <&cpg CPG_MOD 919>;
++            power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
++            resets = <&cpg 919>;
++            #thermal-sensor-cells = <1>;
++    };
+diff --git a/Documentation/dontdiff b/Documentation/dontdiff
+index e361fc95ca293..82e3eee7363b0 100644
+--- a/Documentation/dontdiff
++++ b/Documentation/dontdiff
+@@ -178,6 +178,7 @@ mktables
+ mktree
+ mkutf8data
+ modpost
++modules-only.symvers
+ modules.builtin
+ modules.builtin.modinfo
+ modules.nsdeps
+diff --git a/Makefile b/Makefile
+index 0b1852621615c..f60212a0412c8 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 12
+-SUBLEVEL = 4
++SUBLEVEL = 5
+ EXTRAVERSION =
+ NAME = Frozen Wasteland
+ 
+@@ -1513,7 +1513,7 @@ endif # CONFIG_MODULES
+ # make distclean Remove editor backup files, patch leftover files and the like
+ 
+ # Directories & files removed with 'make clean'
+-CLEAN_FILES += include/ksym vmlinux.symvers \
++CLEAN_FILES += include/ksym vmlinux.symvers modules-only.symvers \
+ 	       modules.builtin modules.builtin.modinfo modules.nsdeps \
+ 	       compile_commands.json .thinlto-cache
+ 
+diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
+index ad9b7fe4dba36..4a9d33372fe2b 100644
+--- a/arch/arc/include/asm/page.h
++++ b/arch/arc/include/asm/page.h
+@@ -7,6 +7,18 @@
+ 
+ #include <uapi/asm/page.h>
+ 
++#ifdef CONFIG_ARC_HAS_PAE40
++
++#define MAX_POSSIBLE_PHYSMEM_BITS	40
++#define PAGE_MASK_PHYS			(0xff00000000ull | PAGE_MASK)
++
++#else /* CONFIG_ARC_HAS_PAE40 */
++
++#define MAX_POSSIBLE_PHYSMEM_BITS	32
++#define PAGE_MASK_PHYS			PAGE_MASK
++
++#endif /* CONFIG_ARC_HAS_PAE40 */
++
+ #ifndef __ASSEMBLY__
+ 
+ #define clear_page(paddr)		memset((paddr), 0, PAGE_SIZE)
+diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
+index 163641726a2b9..5878846f00cfe 100644
+--- a/arch/arc/include/asm/pgtable.h
++++ b/arch/arc/include/asm/pgtable.h
+@@ -107,8 +107,8 @@
+ #define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
+ 
+ /* Set of bits not changed in pte_modify */
+-#define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL)
+-
++#define _PAGE_CHG_MASK	(PAGE_MASK_PHYS | _PAGE_ACCESSED | _PAGE_DIRTY | \
++							   _PAGE_SPECIAL)
+ /* More Abbrevaited helpers */
+ #define PAGE_U_NONE     __pgprot(___DEF)
+ #define PAGE_U_R        __pgprot(___DEF | _PAGE_READ)
+@@ -132,13 +132,7 @@
+ #define PTE_BITS_IN_PD0		(_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_HW_SZ)
+ #define PTE_BITS_RWX		(_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
+ 
+-#ifdef CONFIG_ARC_HAS_PAE40
+-#define PTE_BITS_NON_RWX_IN_PD1	(0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE)
+-#define MAX_POSSIBLE_PHYSMEM_BITS 40
+-#else
+-#define PTE_BITS_NON_RWX_IN_PD1	(PAGE_MASK | _PAGE_CACHEABLE)
+-#define MAX_POSSIBLE_PHYSMEM_BITS 32
+-#endif
++#define PTE_BITS_NON_RWX_IN_PD1	(PAGE_MASK_PHYS | _PAGE_CACHEABLE)
+ 
+ /**************************************************************************
+  * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
+diff --git a/arch/arc/include/uapi/asm/page.h b/arch/arc/include/uapi/asm/page.h
+index 2a97e2718a219..2a4ad619abfba 100644
+--- a/arch/arc/include/uapi/asm/page.h
++++ b/arch/arc/include/uapi/asm/page.h
+@@ -33,5 +33,4 @@
+ 
+ #define PAGE_MASK	(~(PAGE_SIZE-1))
+ 
+-
+ #endif /* _UAPI__ASM_ARC_PAGE_H */
+diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
+index 1743506081da6..2cb8dfe866b66 100644
+--- a/arch/arc/kernel/entry.S
++++ b/arch/arc/kernel/entry.S
+@@ -177,7 +177,7 @@ tracesys:
+ 
+ 	; Do the Sys Call as we normally would.
+ 	; Validate the Sys Call number
+-	cmp     r8,  NR_syscalls
++	cmp     r8,  NR_syscalls - 1
+ 	mov.hi  r0, -ENOSYS
+ 	bhi     tracesys_exit
+ 
+@@ -255,7 +255,7 @@ ENTRY(EV_Trap)
+ 	;============ Normal syscall case
+ 
+ 	; syscall num shd not exceed the total system calls avail
+-	cmp     r8,  NR_syscalls
++	cmp     r8,  NR_syscalls - 1
+ 	mov.hi  r0, -ENOSYS
+ 	bhi     .Lret_from_system_call
+ 
+diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
+index ce07e697916c8..1bcc6985b9a0e 100644
+--- a/arch/arc/mm/init.c
++++ b/arch/arc/mm/init.c
+@@ -157,7 +157,16 @@ void __init setup_arch_memory(void)
+ 	min_high_pfn = PFN_DOWN(high_mem_start);
+ 	max_high_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
+ 
+-	max_zone_pfn[ZONE_HIGHMEM] = min_low_pfn;
++	/*
++	 * max_high_pfn should be ok here for both HIGHMEM and HIGHMEM+PAE.
++	 * For HIGHMEM without PAE max_high_pfn should be less than
++	 * min_low_pfn to guarantee that these two regions don't overlap.
++	 * For PAE case highmem is greater than lowmem, so it is natural
++	 * to use max_high_pfn.
++	 *
++	 * In both cases, holes should be handled by pfn_valid().
++	 */
++	max_zone_pfn[ZONE_HIGHMEM] = max_high_pfn;
+ 
+ 	high_memory = (void *)(min_high_pfn << PAGE_SHIFT);
+ 
+diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c
+index fac4adc902044..95c649fbc95af 100644
+--- a/arch/arc/mm/ioremap.c
++++ b/arch/arc/mm/ioremap.c
+@@ -53,9 +53,10 @@ EXPORT_SYMBOL(ioremap);
+ void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
+ 			   unsigned long flags)
+ {
++	unsigned int off;
+ 	unsigned long vaddr;
+ 	struct vm_struct *area;
+-	phys_addr_t off, end;
++	phys_addr_t end;
+ 	pgprot_t prot = __pgprot(flags);
+ 
+ 	/* Don't allow wraparound, zero size */
+@@ -72,7 +73,7 @@ void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
+ 
+ 	/* Mappings have to be page-aligned */
+ 	off = paddr & ~PAGE_MASK;
+-	paddr &= PAGE_MASK;
++	paddr &= PAGE_MASK_PHYS;
+ 	size = PAGE_ALIGN(end + 1) - paddr;
+ 
+ 	/*
+diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
+index 9bb3c24f36770..9c7c682472896 100644
+--- a/arch/arc/mm/tlb.c
++++ b/arch/arc/mm/tlb.c
+@@ -576,7 +576,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
+ 		      pte_t *ptep)
+ {
+ 	unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
+-	phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK;
++	phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK_PHYS;
+ 	struct page *page = pfn_to_page(pte_pfn(*ptep));
+ 
+ 	create_tlb(vma, vaddr, ptep);
+diff --git a/arch/arm/boot/dts/dra7-l4.dtsi b/arch/arm/boot/dts/dra7-l4.dtsi
+index 3bf90d9e33353..a294a02f2d232 100644
+--- a/arch/arm/boot/dts/dra7-l4.dtsi
++++ b/arch/arm/boot/dts/dra7-l4.dtsi
+@@ -1168,7 +1168,7 @@
+ 			};
+ 		};
+ 
+-		target-module@34000 {			/* 0x48034000, ap 7 46.0 */
++		timer3_target: target-module@34000 {	/* 0x48034000, ap 7 46.0 */
+ 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
+ 			reg = <0x34000 0x4>,
+ 			      <0x34010 0x4>;
+@@ -1195,7 +1195,7 @@
+ 			};
+ 		};
+ 
+-		target-module@36000 {			/* 0x48036000, ap 9 4e.0 */
++		timer4_target: target-module@36000 {	/* 0x48036000, ap 9 4e.0 */
+ 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
+ 			reg = <0x36000 0x4>,
+ 			      <0x36010 0x4>;
+diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
+index ce1194744f840..53d68786a61f2 100644
+--- a/arch/arm/boot/dts/dra7.dtsi
++++ b/arch/arm/boot/dts/dra7.dtsi
+@@ -46,6 +46,7 @@
+ 
+ 	timer {
+ 		compatible = "arm,armv7-timer";
++		status = "disabled";	/* See ARM architected timer wrap erratum i940 */
+ 		interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+ 			     <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+ 			     <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+@@ -1241,3 +1242,22 @@
+ 		assigned-clock-parents = <&sys_32k_ck>;
+ 	};
+ };
++
++/* Local timers, see ARM architected timer wrap erratum i940 */
++&timer3_target {
++	ti,no-reset-on-init;
++	ti,no-idle;
++	timer@0 {
++		assigned-clocks = <&l4per_clkctrl DRA7_L4PER_TIMER3_CLKCTRL 24>;
++		assigned-clock-parents = <&timer_sys_clk_div>;
++	};
++};
++
++&timer4_target {
++	ti,no-reset-on-init;
++	ti,no-idle;
++	timer@0 {
++		assigned-clocks = <&l4per_clkctrl DRA7_L4PER_TIMER4_CLKCTRL 24>;
++		assigned-clock-parents = <&timer_sys_clk_div>;
++	};
++};
+diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
+index 08660ae9dcbce..b1423fb130ea4 100644
+--- a/arch/arm/kernel/hw_breakpoint.c
++++ b/arch/arm/kernel/hw_breakpoint.c
+@@ -886,7 +886,7 @@ static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs)
+ 			info->trigger = addr;
+ 			pr_debug("breakpoint fired: address = 0x%x\n", addr);
+ 			perf_bp_event(bp, regs);
+-			if (!bp->overflow_handler)
++			if (is_default_overflow_handler(bp))
+ 				enable_single_step(bp, addr);
+ 			goto unlock;
+ 		}
+diff --git a/arch/arm64/boot/dts/renesas/r8a779a0-falcon-cpu.dtsi b/arch/arm64/boot/dts/renesas/r8a779a0-falcon-cpu.dtsi
+index fa284a7260d68..e202e8aa69413 100644
+--- a/arch/arm64/boot/dts/renesas/r8a779a0-falcon-cpu.dtsi
++++ b/arch/arm64/boot/dts/renesas/r8a779a0-falcon-cpu.dtsi
+@@ -12,6 +12,14 @@
+ 	model = "Renesas Falcon CPU board";
+ 	compatible = "renesas,falcon-cpu", "renesas,r8a779a0";
+ 
++	aliases {
++		serial0 = &scif0;
++	};
++
++	chosen {
++		stdout-path = "serial0:115200n8";
++	};
++
+ 	memory@48000000 {
+ 		device_type = "memory";
+ 		/* first 128MB is reserved for secure area. */
+diff --git a/arch/arm64/boot/dts/renesas/r8a779a0-falcon.dts b/arch/arm64/boot/dts/renesas/r8a779a0-falcon.dts
+index 5617b81dd7dc3..273857ae38f3d 100644
+--- a/arch/arm64/boot/dts/renesas/r8a779a0-falcon.dts
++++ b/arch/arm64/boot/dts/renesas/r8a779a0-falcon.dts
+@@ -14,11 +14,6 @@
+ 
+ 	aliases {
+ 		ethernet0 = &avb0;
+-		serial0 = &scif0;
+-	};
+-
+-	chosen {
+-		stdout-path = "serial0:115200n8";
+ 	};
+ };
+ 
+diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h
+index 1c26d7baa67f8..cfdde3a568059 100644
+--- a/arch/arm64/include/asm/daifflags.h
++++ b/arch/arm64/include/asm/daifflags.h
+@@ -131,6 +131,9 @@ static inline void local_daif_inherit(struct pt_regs *regs)
+ 	if (interrupts_enabled(regs))
+ 		trace_hardirqs_on();
+ 
++	if (system_uses_irq_prio_masking())
++		gic_write_pmr(regs->pmr_save);
++
+ 	/*
+ 	 * We can't use local_daif_restore(regs->pstate) here as
+ 	 * system_has_prio_mask_debugging() won't restore the I bit if it can
+diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
+index 9d35884504737..117412bae9155 100644
+--- a/arch/arm64/kernel/entry-common.c
++++ b/arch/arm64/kernel/entry-common.c
+@@ -226,14 +226,6 @@ static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
+ {
+ 	unsigned long far = read_sysreg(far_el1);
+ 
+-	/*
+-	 * The CPU masked interrupts, and we are leaving them masked during
+-	 * do_debug_exception(). Update PMR as if we had called
+-	 * local_daif_mask().
+-	 */
+-	if (system_uses_irq_prio_masking())
+-		gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
+-
+ 	arm64_enter_el1_dbg(regs);
+ 	if (!cortex_a76_erratum_1463225_debug_handler(regs))
+ 		do_debug_exception(far, esr, regs);
+@@ -398,9 +390,6 @@ static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
+ 	/* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
+ 	unsigned long far = read_sysreg(far_el1);
+ 
+-	if (system_uses_irq_prio_masking())
+-		gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
+-
+ 	enter_from_user_mode();
+ 	do_debug_exception(far, esr, regs);
+ 	local_daif_restore(DAIF_PROCCTX_NOIRQ);
+@@ -408,9 +397,6 @@ static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
+ 
+ static void noinstr el0_svc(struct pt_regs *regs)
+ {
+-	if (system_uses_irq_prio_masking())
+-		gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
+-
+ 	enter_from_user_mode();
+ 	cortex_a76_erratum_1463225_svc_handler();
+ 	do_el0_svc(regs);
+@@ -486,9 +472,6 @@ static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
+ 
+ static void noinstr el0_svc_compat(struct pt_regs *regs)
+ {
+-	if (system_uses_irq_prio_masking())
+-		gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
+-
+ 	enter_from_user_mode();
+ 	cortex_a76_erratum_1463225_svc_handler();
+ 	do_el0_svc_compat(regs);
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
+index 6acfc5e6b5e05..e03fba3ae2a0d 100644
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -263,16 +263,16 @@ alternative_else_nop_endif
+ 	stp	lr, x21, [sp, #S_LR]
+ 
+ 	/*
+-	 * For exceptions from EL0, terminate the callchain here.
++	 * For exceptions from EL0, create a terminal frame record.
+ 	 * For exceptions from EL1, create a synthetic frame record so the
+ 	 * interrupted code shows up in the backtrace.
+ 	 */
+ 	.if \el == 0
+-	mov	x29, xzr
++	stp	xzr, xzr, [sp, #S_STACKFRAME]
+ 	.else
+ 	stp	x29, x22, [sp, #S_STACKFRAME]
+-	add	x29, sp, #S_STACKFRAME
+ 	.endif
++	add	x29, sp, #S_STACKFRAME
+ 
+ #ifdef CONFIG_ARM64_SW_TTBR0_PAN
+ alternative_if_not ARM64_HAS_PAN
+@@ -292,6 +292,8 @@ alternative_else_nop_endif
+ alternative_if ARM64_HAS_IRQ_PRIO_MASKING
+ 	mrs_s	x20, SYS_ICC_PMR_EL1
+ 	str	x20, [sp, #S_PMR_SAVE]
++	mov	x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET
++	msr_s	SYS_ICC_PMR_EL1, x20
+ alternative_else_nop_endif
+ 
+ 	/* Re-enable tag checking (TCO set on exception entry) */
+@@ -493,8 +495,8 @@ tsk	.req	x28		// current thread_info
+ /*
+  * Interrupt handling.
+  */
+-	.macro	irq_handler
+-	ldr_l	x1, handle_arch_irq
++	.macro	irq_handler, handler:req
++	ldr_l	x1, \handler
+ 	mov	x0, sp
+ 	irq_stack_entry
+ 	blr	x1
+@@ -524,13 +526,41 @@ alternative_endif
+ #endif
+ 	.endm
+ 
+-	.macro	gic_prio_irq_setup, pmr:req, tmp:req
+-#ifdef CONFIG_ARM64_PSEUDO_NMI
+-	alternative_if ARM64_HAS_IRQ_PRIO_MASKING
+-	orr	\tmp, \pmr, #GIC_PRIO_PSR_I_SET
+-	msr_s	SYS_ICC_PMR_EL1, \tmp
+-	alternative_else_nop_endif
++	.macro el1_interrupt_handler, handler:req
++	enable_da_f
++
++	mov	x0, sp
++	bl	enter_el1_irq_or_nmi
++
++	irq_handler	\handler
++
++#ifdef CONFIG_PREEMPTION
++	ldr	x24, [tsk, #TSK_TI_PREEMPT]	// get preempt count
++alternative_if ARM64_HAS_IRQ_PRIO_MASKING
++	/*
++	 * DA_F were cleared at start of handling. If anything is set in DAIF,
++	 * we come back from an NMI, so skip preemption
++	 */
++	mrs	x0, daif
++	orr	x24, x24, x0
++alternative_else_nop_endif
++	cbnz	x24, 1f				// preempt count != 0 || NMI return path
++	bl	arm64_preempt_schedule_irq	// irq en/disable is done inside
++1:
+ #endif
++
++	mov	x0, sp
++	bl	exit_el1_irq_or_nmi
++	.endm
++
++	.macro el0_interrupt_handler, handler:req
++	user_exit_irqoff
++	enable_da_f
++
++	tbz	x22, #55, 1f
++	bl	do_el0_irq_bp_hardening
++1:
++	irq_handler	\handler
+ 	.endm
+ 
+ 	.text
+@@ -662,32 +692,7 @@ SYM_CODE_END(el1_sync)
+ 	.align	6
+ SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
+ 	kernel_entry 1
+-	gic_prio_irq_setup pmr=x20, tmp=x1
+-	enable_da_f
+-
+-	mov	x0, sp
+-	bl	enter_el1_irq_or_nmi
+-
+-	irq_handler
+-
+-#ifdef CONFIG_PREEMPTION
+-	ldr	x24, [tsk, #TSK_TI_PREEMPT]	// get preempt count
+-alternative_if ARM64_HAS_IRQ_PRIO_MASKING
+-	/*
+-	 * DA_F were cleared at start of handling. If anything is set in DAIF,
+-	 * we come back from an NMI, so skip preemption
+-	 */
+-	mrs	x0, daif
+-	orr	x24, x24, x0
+-alternative_else_nop_endif
+-	cbnz	x24, 1f				// preempt count != 0 || NMI return path
+-	bl	arm64_preempt_schedule_irq	// irq en/disable is done inside
+-1:
+-#endif
+-
+-	mov	x0, sp
+-	bl	exit_el1_irq_or_nmi
+-
++	el1_interrupt_handler handle_arch_irq
+ 	kernel_exit 1
+ SYM_CODE_END(el1_irq)
+ 
+@@ -727,22 +732,13 @@ SYM_CODE_END(el0_error_compat)
+ SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
+ 	kernel_entry 0
+ el0_irq_naked:
+-	gic_prio_irq_setup pmr=x20, tmp=x0
+-	user_exit_irqoff
+-	enable_da_f
+-
+-	tbz	x22, #55, 1f
+-	bl	do_el0_irq_bp_hardening
+-1:
+-	irq_handler
+-
++	el0_interrupt_handler handle_arch_irq
+ 	b	ret_to_user
+ SYM_CODE_END(el0_irq)
+ 
+ SYM_CODE_START_LOCAL(el1_error)
+ 	kernel_entry 1
+ 	mrs	x1, esr_el1
+-	gic_prio_kentry_setup tmp=x2
+ 	enable_dbg
+ 	mov	x0, sp
+ 	bl	do_serror
+@@ -753,7 +749,6 @@ SYM_CODE_START_LOCAL(el0_error)
+ 	kernel_entry 0
+ el0_error_naked:
+ 	mrs	x25, esr_el1
+-	gic_prio_kentry_setup tmp=x2
+ 	user_exit_irqoff
+ 	enable_dbg
+ 	mov	x0, sp
+diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
+index d55bdfb7789c1..7032a5f9e624c 100644
+--- a/arch/arm64/kernel/stacktrace.c
++++ b/arch/arm64/kernel/stacktrace.c
+@@ -44,10 +44,6 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
+ 	unsigned long fp = frame->fp;
+ 	struct stack_info info;
+ 
+-	/* Terminal record; nothing to unwind */
+-	if (!fp)
+-		return -ENOENT;
+-
+ 	if (fp & 0xf)
+ 		return -EINVAL;
+ 
+@@ -108,6 +104,12 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
+ 
+ 	frame->pc = ptrauth_strip_insn_pac(frame->pc);
+ 
++	/*
++	 * This is a terminal record, so we have finished unwinding.
++	 */
++	if (!frame->fp && !frame->pc)
++		return -ENOENT;
++
+ 	return 0;
+ }
+ NOKPROBE_SYMBOL(unwind_frame);
+diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
+index ac485163a4a76..6d44c028d1c9e 100644
+--- a/arch/arm64/mm/flush.c
++++ b/arch/arm64/mm/flush.c
+@@ -55,8 +55,10 @@ void __sync_icache_dcache(pte_t pte)
+ {
+ 	struct page *page = pte_page(pte);
+ 
+-	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
++	if (!test_bit(PG_dcache_clean, &page->flags)) {
+ 		sync_icache_aliases(page_address(page), page_size(page));
++		set_bit(PG_dcache_clean, &page->flags);
++	}
+ }
+ EXPORT_SYMBOL_GPL(__sync_icache_dcache);
+ 
+diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
+index c967bfd30d2b5..b183216a591cc 100644
+--- a/arch/arm64/mm/proc.S
++++ b/arch/arm64/mm/proc.S
+@@ -444,6 +444,18 @@ SYM_FUNC_START(__cpu_setup)
+ 	mov	x10, #(SYS_GCR_EL1_RRND | SYS_GCR_EL1_EXCL_MASK)
+ 	msr_s	SYS_GCR_EL1, x10
+ 
++	/*
++	 * If GCR_EL1.RRND=1 is implemented the same way as RRND=0, then
++	 * RGSR_EL1.SEED must be non-zero for IRG to produce
++	 * pseudorandom numbers. As RGSR_EL1 is UNKNOWN out of reset, we
++	 * must initialize it.
++	 */
++	mrs	x10, CNTVCT_EL0
++	ands	x10, x10, #SYS_RGSR_EL1_SEED_MASK
++	csinc	x10, x10, xzr, ne
++	lsl	x10, x10, #SYS_RGSR_EL1_SEED_SHIFT
++	msr_s	SYS_RGSR_EL1, x10
++
+ 	/* clear any pending tag check faults in TFSR*_EL1 */
+ 	msr_s	SYS_TFSR_EL1, xzr
+ 	msr_s	SYS_TFSRE0_EL1, xzr
+diff --git a/arch/ia64/include/asm/module.h b/arch/ia64/include/asm/module.h
+index 5a29652e6defc..7271b9c5fc760 100644
+--- a/arch/ia64/include/asm/module.h
++++ b/arch/ia64/include/asm/module.h
+@@ -14,16 +14,20 @@
+ struct elf64_shdr;			/* forward declration */
+ 
+ struct mod_arch_specific {
++	/* Used only at module load time. */
+ 	struct elf64_shdr *core_plt;	/* core PLT section */
+ 	struct elf64_shdr *init_plt;	/* init PLT section */
+ 	struct elf64_shdr *got;		/* global offset table */
+ 	struct elf64_shdr *opd;		/* official procedure descriptors */
+ 	struct elf64_shdr *unwind;	/* unwind-table section */
+ 	unsigned long gp;		/* global-pointer for module */
++	unsigned int next_got_entry;	/* index of next available got entry */
+ 
++	/* Used at module run and cleanup time. */
+ 	void *core_unw_table;		/* core unwind-table cookie returned by unwinder */
+ 	void *init_unw_table;		/* init unwind-table cookie returned by unwinder */
+-	unsigned int next_got_entry;	/* index of next available got entry */
++	void *opd_addr;			/* symbolize uses .opd to get to actual function */
++	unsigned long opd_size;
+ };
+ 
+ #define ARCH_SHF_SMALL	SHF_IA_64_SHORT
+diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
+index 00a496cb346f6..2cba53c1da82e 100644
+--- a/arch/ia64/kernel/module.c
++++ b/arch/ia64/kernel/module.c
+@@ -905,9 +905,31 @@ register_unwind_table (struct module *mod)
+ int
+ module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod)
+ {
++	struct mod_arch_specific *mas = &mod->arch;
++
+ 	DEBUGP("%s: init: entry=%p\n", __func__, mod->init);
+-	if (mod->arch.unwind)
++	if (mas->unwind)
+ 		register_unwind_table(mod);
++
++	/*
++	 * ".opd" was already relocated to the final destination. Store
++	 * it's address for use in symbolizer.
++	 */
++	mas->opd_addr = (void *)mas->opd->sh_addr;
++	mas->opd_size = mas->opd->sh_size;
++
++	/*
++	 * Module relocation was already done at this point. Section
++	 * headers are about to be deleted. Wipe out load-time context.
++	 */
++	mas->core_plt = NULL;
++	mas->init_plt = NULL;
++	mas->got = NULL;
++	mas->opd = NULL;
++	mas->unwind = NULL;
++	mas->gp = 0;
++	mas->next_got_entry = 0;
++
+ 	return 0;
+ }
+ 
+@@ -926,10 +948,9 @@ module_arch_cleanup (struct module *mod)
+ 
+ void *dereference_module_function_descriptor(struct module *mod, void *ptr)
+ {
+-	Elf64_Shdr *opd = mod->arch.opd;
++	struct mod_arch_specific *mas = &mod->arch;
+ 
+-	if (ptr < (void *)opd->sh_addr ||
+-			ptr >= (void *)(opd->sh_addr + opd->sh_size))
++	if (ptr < mas->opd_addr || ptr >= mas->opd_addr + mas->opd_size)
+ 		return ptr;
+ 
+ 	return dereference_function_descriptor(ptr);
+diff --git a/arch/mips/include/asm/div64.h b/arch/mips/include/asm/div64.h
+index dc5ea57364408..ceece76fc971a 100644
+--- a/arch/mips/include/asm/div64.h
++++ b/arch/mips/include/asm/div64.h
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright (C) 2000, 2004  Maciej W. Rozycki
++ * Copyright (C) 2000, 2004, 2021  Maciej W. Rozycki
+  * Copyright (C) 2003, 07 Ralf Baechle (ralf@linux-mips.org)
+  *
+  * This file is subject to the terms and conditions of the GNU General Public
+@@ -9,25 +9,18 @@
+ #ifndef __ASM_DIV64_H
+ #define __ASM_DIV64_H
+ 
+-#include <asm-generic/div64.h>
+-
+-#if BITS_PER_LONG == 64
++#include <asm/bitsperlong.h>
+ 
+-#include <linux/types.h>
++#if BITS_PER_LONG == 32
+ 
+ /*
+  * No traps on overflows for any of these...
+  */
+ 
+-#define __div64_32(n, base)						\
+-({									\
++#define do_div64_32(res, high, low, base) ({				\
+ 	unsigned long __cf, __tmp, __tmp2, __i;				\
+ 	unsigned long __quot32, __mod32;				\
+-	unsigned long __high, __low;					\
+-	unsigned long long __n;						\
+ 									\
+-	__high = *__n >> 32;						\
+-	__low = __n;							\
+ 	__asm__(							\
+ 	"	.set	push					\n"	\
+ 	"	.set	noat					\n"	\
+@@ -51,18 +44,48 @@
+ 	"	subu	%0, %0, %z6				\n"	\
+ 	"	addiu	%2, %2, 1				\n"	\
+ 	"3:							\n"	\
+-	"	bnez	%4, 0b\n\t"					\
+-	"	 srl	%5, %1, 0x1f\n\t"				\
++	"	bnez	%4, 0b					\n"	\
++	"	 srl	%5, %1, 0x1f				\n"	\
+ 	"	.set	pop"						\
+ 	: "=&r" (__mod32), "=&r" (__tmp),				\
+ 	  "=&r" (__quot32), "=&r" (__cf),				\
+ 	  "=&r" (__i), "=&r" (__tmp2)					\
+-	: "Jr" (base), "0" (__high), "1" (__low));			\
++	: "Jr" (base), "0" (high), "1" (low));				\
+ 									\
+-	(__n) = __quot32;						\
++	(res) = __quot32;						\
+ 	__mod32;							\
+ })
+ 
+-#endif /* BITS_PER_LONG == 64 */
++#define __div64_32(n, base) ({						\
++	unsigned long __upper, __low, __high, __radix;			\
++	unsigned long long __quot;					\
++	unsigned long long __div;					\
++	unsigned long __mod;						\
++									\
++	__div = (*n);							\
++	__radix = (base);						\
++									\
++	__high = __div >> 32;						\
++	__low = __div;							\
++									\
++	if (__high < __radix) {						\
++		__upper = __high;					\
++		__high = 0;						\
++	} else {							\
++		__upper = __high % __radix;				\
++		__high /= __radix;					\
++	}								\
++									\
++	__mod = do_div64_32(__low, __upper, __low, __radix);		\
++									\
++	__quot = __high;						\
++	__quot = __quot << 32 | __low;					\
++	(*n) = __quot;							\
++	__mod;								\
++})
++
++#endif /* BITS_PER_LONG == 32 */
++
++#include <asm-generic/div64.h>
+ 
+ #endif /* __ASM_DIV64_H */
+diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
+index b71892064f273..0ef240adefb59 100644
+--- a/arch/mips/kernel/cpu-probe.c
++++ b/arch/mips/kernel/cpu-probe.c
+@@ -1752,7 +1752,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
+ 			set_isa(c, MIPS_CPU_ISA_M64R2);
+ 			break;
+ 		}
+-		c->writecombine = _CACHE_UNCACHED_ACCELERATED;
+ 		c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_EXT |
+ 				MIPS_ASE_LOONGSON_EXT2);
+ 		break;
+@@ -1782,7 +1781,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
+ 		 * register, we correct it here.
+ 		 */
+ 		c->options |= MIPS_CPU_FTLB | MIPS_CPU_TLBINV | MIPS_CPU_LDPTE;
+-		c->writecombine = _CACHE_UNCACHED_ACCELERATED;
+ 		c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
+ 			MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2);
+ 		c->ases &= ~MIPS_ASE_VZ; /* VZ of Loongson-3A2000/3000 is incomplete */
+@@ -1793,7 +1791,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
+ 		set_elf_platform(cpu, "loongson3a");
+ 		set_isa(c, MIPS_CPU_ISA_M64R2);
+ 		decode_cpucfg(c);
+-		c->writecombine = _CACHE_UNCACHED_ACCELERATED;
+ 		break;
+ 	default:
+ 		panic("Unknown Loongson Processor ID!");
+diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h
+index e8d09a841373b..31ed5356590a2 100644
+--- a/arch/powerpc/include/asm/interrupt.h
++++ b/arch/powerpc/include/asm/interrupt.h
+@@ -138,6 +138,13 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
+ 	local_paca->irq_soft_mask = IRQS_ALL_DISABLED;
+ 	local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+ 
++	if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !(regs->msr & MSR_PR) &&
++				regs->nip < (unsigned long)__end_interrupts) {
++		// Kernel code running below __end_interrupts is
++		// implicitly soft-masked.
++		regs->softe = IRQS_ALL_DISABLED;
++	}
++
+ 	/* Don't do any per-CPU operations until interrupt state is fixed */
+ #endif
+ 	/* Allow DEC and PMI to be traced when they are soft-NMI */
+diff --git a/arch/powerpc/kernel/head_32.h b/arch/powerpc/kernel/head_32.h
+index 5d4706c145727..cf8ca08295bff 100644
+--- a/arch/powerpc/kernel/head_32.h
++++ b/arch/powerpc/kernel/head_32.h
+@@ -261,11 +261,7 @@ label:
+ 	lis	r1, emergency_ctx@ha
+ #endif
+ 	lwz	r1, emergency_ctx@l(r1)
+-	cmpwi	cr1, r1, 0
+-	bne	cr1, 1f
+-	lis	r1, init_thread_union@ha
+-	addi	r1, r1, init_thread_union@l
+-1:	addi	r1, r1, THREAD_SIZE - INT_FRAME_SIZE
++	addi	r1, r1, THREAD_SIZE - INT_FRAME_SIZE
+ 	EXCEPTION_PROLOG_2
+ 	SAVE_NVGPRS(r11)
+ 	addi	r3, r1, STACK_FRAME_OVERHEAD
+diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
+index c00214a4355c8..4023f91defa68 100644
+--- a/arch/powerpc/kernel/iommu.c
++++ b/arch/powerpc/kernel/iommu.c
+@@ -1096,7 +1096,7 @@ int iommu_take_ownership(struct iommu_table *tbl)
+ 
+ 	spin_lock_irqsave(&tbl->large_pool.lock, flags);
+ 	for (i = 0; i < tbl->nr_pools; i++)
+-		spin_lock(&tbl->pools[i].lock);
++		spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
+ 
+ 	iommu_table_release_pages(tbl);
+ 
+@@ -1124,7 +1124,7 @@ void iommu_release_ownership(struct iommu_table *tbl)
+ 
+ 	spin_lock_irqsave(&tbl->large_pool.lock, flags);
+ 	for (i = 0; i < tbl->nr_pools; i++)
+-		spin_lock(&tbl->pools[i].lock);
++		spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
+ 
+ 	memset(tbl->it_map, 0, sz);
+ 
+diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
+index 8ba49a6bf5159..d7c1f92152af6 100644
+--- a/arch/powerpc/kernel/setup_32.c
++++ b/arch/powerpc/kernel/setup_32.c
+@@ -164,7 +164,7 @@ void __init irqstack_early_init(void)
+ }
+ 
+ #ifdef CONFIG_VMAP_STACK
+-void *emergency_ctx[NR_CPUS] __ro_after_init;
++void *emergency_ctx[NR_CPUS] __ro_after_init = {[0] = &init_stack};
+ 
+ void __init emergency_stack_init(void)
+ {
+diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
+index 5c7ce1d506312..c2473e20f5f5b 100644
+--- a/arch/powerpc/kernel/smp.c
++++ b/arch/powerpc/kernel/smp.c
+@@ -1546,6 +1546,9 @@ void start_secondary(void *unused)
+ 
+ 	vdso_getcpu_init();
+ #endif
++	set_numa_node(numa_cpu_lookup_table[cpu]);
++	set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
++
+ 	/* Update topology CPU masks */
+ 	add_cpu_to_masks(cpu);
+ 
+@@ -1564,9 +1567,6 @@ void start_secondary(void *unused)
+ 			shared_caches = true;
+ 	}
+ 
+-	set_numa_node(numa_cpu_lookup_table[cpu]);
+-	set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
+-
+ 	smp_wmb();
+ 	notify_cpu_starting(cpu);
+ 	set_cpu_online(cpu, true);
+diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
+index 1fd31b4b0e139..0aefa6a4a259b 100644
+--- a/arch/powerpc/lib/feature-fixups.c
++++ b/arch/powerpc/lib/feature-fixups.c
+@@ -14,6 +14,7 @@
+ #include <linux/string.h>
+ #include <linux/init.h>
+ #include <linux/sched/mm.h>
++#include <linux/stop_machine.h>
+ #include <asm/cputable.h>
+ #include <asm/code-patching.h>
+ #include <asm/page.h>
+@@ -227,11 +228,25 @@ static void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
+ 		                                           : "unknown");
+ }
+ 
++static int __do_stf_barrier_fixups(void *data)
++{
++	enum stf_barrier_type *types = data;
++
++	do_stf_entry_barrier_fixups(*types);
++	do_stf_exit_barrier_fixups(*types);
++
++	return 0;
++}
+ 
+ void do_stf_barrier_fixups(enum stf_barrier_type types)
+ {
+-	do_stf_entry_barrier_fixups(types);
+-	do_stf_exit_barrier_fixups(types);
++	/*
++	 * The call to the fallback entry flush, and the fallback/sync-ori exit
++	 * flush can not be safely patched in/out while other CPUs are executing
++	 * them. So call __do_stf_barrier_fixups() on one CPU while all other CPUs
++	 * spin in the stop machine core with interrupts hard disabled.
++	 */
++	stop_machine(__do_stf_barrier_fixups, &types, NULL);
+ }
+ 
+ void do_uaccess_flush_fixups(enum l1d_flush_type types)
+@@ -284,8 +299,9 @@ void do_uaccess_flush_fixups(enum l1d_flush_type types)
+ 						: "unknown");
+ }
+ 
+-void do_entry_flush_fixups(enum l1d_flush_type types)
++static int __do_entry_flush_fixups(void *data)
+ {
++	enum l1d_flush_type types = *(enum l1d_flush_type *)data;
+ 	unsigned int instrs[3], *dest;
+ 	long *start, *end;
+ 	int i;
+@@ -354,6 +370,19 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
+ 							: "ori type" :
+ 		(types &  L1D_FLUSH_MTTRIG)     ? "mttrig type"
+ 						: "unknown");
++
++	return 0;
++}
++
++void do_entry_flush_fixups(enum l1d_flush_type types)
++{
++	/*
++	 * The call to the fallback flush can not be safely patched in/out while
++	 * other CPUs are executing it. So call __do_entry_flush_fixups() on one
++	 * CPU while all other CPUs spin in the stop machine core with interrupts
++	 * hard disabled.
++	 */
++	stop_machine(__do_entry_flush_fixups, &types, NULL);
+ }
+ 
+ void do_rfi_flush_fixups(enum l1d_flush_type types)
+diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
+index 7719995323c3f..12de1906e97bc 100644
+--- a/arch/powerpc/mm/book3s64/hash_utils.c
++++ b/arch/powerpc/mm/book3s64/hash_utils.c
+@@ -338,7 +338,7 @@ repeat:
+ int htab_remove_mapping(unsigned long vstart, unsigned long vend,
+ 		      int psize, int ssize)
+ {
+-	unsigned long vaddr;
++	unsigned long vaddr, time_limit;
+ 	unsigned int step, shift;
+ 	int rc;
+ 	int ret = 0;
+@@ -351,8 +351,19 @@ int htab_remove_mapping(unsigned long vstart, unsigned long vend,
+ 
+ 	/* Unmap the full range specificied */
+ 	vaddr = ALIGN_DOWN(vstart, step);
++	time_limit = jiffies + HZ;
++
+ 	for (;vaddr < vend; vaddr += step) {
+ 		rc = mmu_hash_ops.hpte_removebolted(vaddr, psize, ssize);
++
++		/*
++		 * For large number of mappings introduce a cond_resched()
++		 * to prevent softlockup warnings.
++		 */
++		if (time_after(jiffies, time_limit)) {
++			cond_resched();
++			time_limit = jiffies + HZ;
++		}
+ 		if (rc == -ENOENT) {
+ 			ret = -ENOENT;
+ 			continue;
+diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c
+index 019669eb21d27..4ab7c3ef5826c 100644
+--- a/arch/powerpc/platforms/powernv/memtrace.c
++++ b/arch/powerpc/platforms/powernv/memtrace.c
+@@ -88,8 +88,8 @@ static void memtrace_clear_range(unsigned long start_pfn,
+ 	 * Before we go ahead and use this range as cache inhibited range
+ 	 * flush the cache.
+ 	 */
+-	flush_dcache_range_chunked(PFN_PHYS(start_pfn),
+-				   PFN_PHYS(start_pfn + nr_pages),
++	flush_dcache_range_chunked((unsigned long)pfn_to_kaddr(start_pfn),
++				   (unsigned long)pfn_to_kaddr(start_pfn + nr_pages),
+ 				   FLUSH_CHUNK_SIZE);
+ }
+ 
+diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
+index 12cbffd3c2e32..325f3b220f360 100644
+--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
++++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
+@@ -47,9 +47,6 @@ static void rtas_stop_self(void)
+ 
+ 	BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE);
+ 
+-	printk("cpu %u (hwid %u) Ready to die...\n",
+-	       smp_processor_id(), hard_smp_processor_id());
+-
+ 	rtas_call_unlocked(&args, rtas_stop_self_token, 0, 1, NULL);
+ 
+ 	panic("Alas, I survived.\n");
+diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
+index 5cacb632eb37a..31b657c377353 100644
+--- a/arch/powerpc/sysdev/xive/common.c
++++ b/arch/powerpc/sysdev/xive/common.c
+@@ -1341,17 +1341,14 @@ static int xive_prepare_cpu(unsigned int cpu)
+ 
+ 	xc = per_cpu(xive_cpu, cpu);
+ 	if (!xc) {
+-		struct device_node *np;
+-
+ 		xc = kzalloc_node(sizeof(struct xive_cpu),
+ 				  GFP_KERNEL, cpu_to_node(cpu));
+ 		if (!xc)
+ 			return -ENOMEM;
+-		np = of_get_cpu_node(cpu, NULL);
+-		if (np)
+-			xc->chip_id = of_get_ibm_chip_id(np);
+-		of_node_put(np);
+ 		xc->hw_ipi = XIVE_BAD_IRQ;
++		xc->chip_id = XIVE_INVALID_CHIP_ID;
++		if (xive_ops->prepare_cpu)
++			xive_ops->prepare_cpu(cpu, xc);
+ 
+ 		per_cpu(xive_cpu, cpu) = xc;
+ 	}
+diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
+index 05a800a3104ed..57e3f15404354 100644
+--- a/arch/powerpc/sysdev/xive/native.c
++++ b/arch/powerpc/sysdev/xive/native.c
+@@ -380,6 +380,11 @@ static void xive_native_update_pending(struct xive_cpu *xc)
+ 	}
+ }
+ 
++static void xive_native_prepare_cpu(unsigned int cpu, struct xive_cpu *xc)
++{
++	xc->chip_id = cpu_to_chip_id(cpu);
++}
++
+ static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
+ {
+ 	s64 rc;
+@@ -462,6 +467,7 @@ static const struct xive_ops xive_native_ops = {
+ 	.match			= xive_native_match,
+ 	.shutdown		= xive_native_shutdown,
+ 	.update_pending		= xive_native_update_pending,
++	.prepare_cpu		= xive_native_prepare_cpu,
+ 	.setup_cpu		= xive_native_setup_cpu,
+ 	.teardown_cpu		= xive_native_teardown_cpu,
+ 	.sync_source		= xive_native_sync_source,
+diff --git a/arch/powerpc/sysdev/xive/xive-internal.h b/arch/powerpc/sysdev/xive/xive-internal.h
+index 9cf57c722faa3..6478be19b4d36 100644
+--- a/arch/powerpc/sysdev/xive/xive-internal.h
++++ b/arch/powerpc/sysdev/xive/xive-internal.h
+@@ -46,6 +46,7 @@ struct xive_ops {
+ 				  u32 *sw_irq);
+ 	int	(*setup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
+ 	void	(*cleanup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
++	void	(*prepare_cpu)(unsigned int cpu, struct xive_cpu *xc);
+ 	void	(*setup_cpu)(unsigned int cpu, struct xive_cpu *xc);
+ 	void	(*teardown_cpu)(unsigned int cpu, struct xive_cpu *xc);
+ 	bool	(*match)(struct device_node *np);
+diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
+index 4515a10c5d222..d9522fc35ca5a 100644
+--- a/arch/riscv/Kconfig
++++ b/arch/riscv/Kconfig
+@@ -227,7 +227,7 @@ config ARCH_RV64I
+ 	bool "RV64I"
+ 	select 64BIT
+ 	select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && GCC_VERSION >= 50000
+-	select HAVE_DYNAMIC_FTRACE if MMU
++	select HAVE_DYNAMIC_FTRACE if MMU && $(cc-option,-fpatchable-function-entry=8)
+ 	select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
+ 	select HAVE_FTRACE_MCOUNT_RECORD
+ 	select HAVE_FUNCTION_GRAPH_TRACER
+diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
+index ea028d9e0d242..d44567490d911 100644
+--- a/arch/riscv/kernel/smp.c
++++ b/arch/riscv/kernel/smp.c
+@@ -54,7 +54,7 @@ int riscv_hartid_to_cpuid(int hartid)
+ 			return i;
+ 
+ 	pr_err("Couldn't find cpu id for hartid [%d]\n", hartid);
+-	return i;
++	return -ENOENT;
+ }
+ 
+ void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out)
+diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
+index f5beecdac6938..e76b221570999 100644
+--- a/arch/sh/kernel/traps.c
++++ b/arch/sh/kernel/traps.c
+@@ -180,7 +180,6 @@ static inline void arch_ftrace_nmi_exit(void) { }
+ 
+ BUILD_TRAP_HANDLER(nmi)
+ {
+-	unsigned int cpu = smp_processor_id();
+ 	TRAP_HANDLER_DECL;
+ 
+ 	arch_ftrace_nmi_enter();
+diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h
+index 5eb3bdf36a419..06b0789d61b91 100644
+--- a/arch/x86/include/asm/idtentry.h
++++ b/arch/x86/include/asm/idtentry.h
+@@ -588,6 +588,21 @@ DECLARE_IDTENTRY_RAW(X86_TRAP_MC,	xenpv_exc_machine_check);
+ #endif
+ 
+ /* NMI */
++
++#if defined(CONFIG_X86_64) && IS_ENABLED(CONFIG_KVM_INTEL)
++/*
++ * Special NOIST entry point for VMX which invokes this on the kernel
++ * stack. asm_exc_nmi() requires an IST to work correctly vs. the NMI
++ * 'executing' marker.
++ *
++ * On 32bit this just uses the regular NMI entry point because 32-bit does
++ * not have ISTs.
++ */
++DECLARE_IDTENTRY(X86_TRAP_NMI,		exc_nmi_noist);
++#else
++#define asm_exc_nmi_noist		asm_exc_nmi
++#endif
++
+ DECLARE_IDTENTRY_NMI(X86_TRAP_NMI,	exc_nmi);
+ #ifdef CONFIG_XEN_PV
+ DECLARE_IDTENTRY_RAW(X86_TRAP_NMI,	xenpv_exc_nmi);
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 3768819693e5c..eec2dcca2f390 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1753,6 +1753,7 @@ int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
+ 		    unsigned long icr, int op_64_bit);
+ 
+ void kvm_define_user_return_msr(unsigned index, u32 msr);
++int kvm_probe_user_return_msr(u32 msr);
+ int kvm_set_user_return_msr(unsigned index, u64 val, u64 mask);
+ 
+ u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc);
+diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
+index f1b9ed5efaa90..908bcaea13617 100644
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -804,8 +804,10 @@ DECLARE_PER_CPU(u64, msr_misc_features_shadow);
+ 
+ #ifdef CONFIG_CPU_SUP_AMD
+ extern u32 amd_get_nodes_per_socket(void);
++extern u32 amd_get_highest_perf(void);
+ #else
+ static inline u32 amd_get_nodes_per_socket(void)	{ return 0; }
++static inline u32 amd_get_highest_perf(void)		{ return 0; }
+ #endif
+ 
+ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index 347a956f71ca0..eedb2b320946f 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -1170,3 +1170,19 @@ void set_dr_addr_mask(unsigned long mask, int dr)
+ 		break;
+ 	}
+ }
++
++u32 amd_get_highest_perf(void)
++{
++	struct cpuinfo_x86 *c = &boot_cpu_data;
++
++	if (c->x86 == 0x17 && ((c->x86_model >= 0x30 && c->x86_model < 0x40) ||
++			       (c->x86_model >= 0x70 && c->x86_model < 0x80)))
++		return 166;
++
++	if (c->x86 == 0x19 && ((c->x86_model >= 0x20 && c->x86_model < 0x30) ||
++			       (c->x86_model >= 0x40 && c->x86_model < 0x70)))
++		return 166;
++
++	return 255;
++}
++EXPORT_SYMBOL_GPL(amd_get_highest_perf);
+diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
+index bf250a339655f..2ef961cf4cfc5 100644
+--- a/arch/x86/kernel/nmi.c
++++ b/arch/x86/kernel/nmi.c
+@@ -524,6 +524,16 @@ nmi_restart:
+ 		mds_user_clear_cpu_buffers();
+ }
+ 
++#if defined(CONFIG_X86_64) && IS_ENABLED(CONFIG_KVM_INTEL)
++DEFINE_IDTENTRY_RAW(exc_nmi_noist)
++{
++	exc_nmi(regs);
++}
++#endif
++#if IS_MODULE(CONFIG_KVM_INTEL)
++EXPORT_SYMBOL_GPL(asm_exc_nmi_noist);
++#endif
++
+ void stop_nmi(void)
+ {
+ 	ignore_nmis++;
+diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
+index 6b08d1eb173fd..363b36bbd791a 100644
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -2046,7 +2046,7 @@ static bool amd_set_max_freq_ratio(void)
+ 		return false;
+ 	}
+ 
+-	highest_perf = perf_caps.highest_perf;
++	highest_perf = amd_get_highest_perf();
+ 	nominal_perf = perf_caps.nominal_perf;
+ 
+ 	if (!highest_perf || !nominal_perf) {
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index 6bd2f8b830e49..62f795352c024 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -589,7 +589,8 @@ static int __do_cpuid_func_emulated(struct kvm_cpuid_array *array, u32 func)
+ 	case 7:
+ 		entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+ 		entry->eax = 0;
+-		entry->ecx = F(RDPID);
++		if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP))
++			entry->ecx = F(RDPID);
+ 		++array->nent;
+ 	default:
+ 		break;
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index abd9a4db11a88..8fc71e70857d0 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -4502,7 +4502,7 @@ static const struct opcode group8[] = {
+  * from the register case of group9.
+  */
+ static const struct gprefix pfx_0f_c7_7 = {
+-	N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdtscp),
++	N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdpid),
+ };
+ 
+ 
+diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h
+index 0d359115429ad..f016838faedd6 100644
+--- a/arch/x86/kvm/kvm_emulate.h
++++ b/arch/x86/kvm/kvm_emulate.h
+@@ -468,6 +468,7 @@ enum x86_intercept {
+ 	x86_intercept_clgi,
+ 	x86_intercept_skinit,
+ 	x86_intercept_rdtscp,
++	x86_intercept_rdpid,
+ 	x86_intercept_icebp,
+ 	x86_intercept_wbinvd,
+ 	x86_intercept_monitor,
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 49a839d0567a5..fa023f3feb25d 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -1913,8 +1913,8 @@ void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
+ 	if (!apic->lapic_timer.hv_timer_in_use)
+ 		goto out;
+ 	WARN_ON(rcuwait_active(&vcpu->wait));
+-	cancel_hv_timer(apic);
+ 	apic_timer_expired(apic, false);
++	cancel_hv_timer(apic);
+ 
+ 	if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
+ 		advance_periodic_target_expiration(apic);
+diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
+index 019130011d0fc..dbc6214d69def 100644
+--- a/arch/x86/kvm/svm/sev.c
++++ b/arch/x86/kvm/svm/sev.c
+@@ -1668,7 +1668,7 @@ vmgexit_err:
+ 	return -EINVAL;
+ }
+ 
+-static void pre_sev_es_run(struct vcpu_svm *svm)
++void sev_es_unmap_ghcb(struct vcpu_svm *svm)
+ {
+ 	if (!svm->ghcb)
+ 		return;
+@@ -1704,9 +1704,6 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu)
+ 	struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
+ 	int asid = sev_get_asid(svm->vcpu.kvm);
+ 
+-	/* Perform any SEV-ES pre-run actions */
+-	pre_sev_es_run(svm);
+-
+ 	/* Assign the asid allocated with this SEV guest */
+ 	svm->asid = asid;
+ 
+@@ -2106,5 +2103,8 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
+ 	 * the guest will set the CS and RIP. Set SW_EXIT_INFO_2 to a
+ 	 * non-zero value.
+ 	 */
++	if (!svm->ghcb)
++		return;
++
+ 	ghcb_set_sw_exit_info_2(svm->ghcb, 1);
+ }
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index 309725151313d..48ee3deab64b1 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -1416,6 +1416,9 @@ static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
+ 	struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu);
+ 	unsigned int i;
+ 
++	if (sev_es_guest(vcpu->kvm))
++		sev_es_unmap_ghcb(svm);
++
+ 	if (svm->guest_state_loaded)
+ 		return;
+ 
+@@ -2738,7 +2741,8 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ 		if (!boot_cpu_has(X86_FEATURE_RDTSCP))
+ 			return 1;
+ 		if (!msr_info->host_initiated &&
+-		    !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
++		    !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) &&
++		    !guest_cpuid_has(vcpu, X86_FEATURE_RDPID))
+ 			return 1;
+ 		msr_info->data = svm->tsc_aux;
+ 		break;
+@@ -2811,7 +2815,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ static int svm_complete_emulated_msr(struct kvm_vcpu *vcpu, int err)
+ {
+ 	struct vcpu_svm *svm = to_svm(vcpu);
+-	if (!sev_es_guest(svm->vcpu.kvm) || !err)
++	if (!err || !sev_es_guest(vcpu->kvm) || WARN_ON_ONCE(!svm->ghcb))
+ 		return kvm_complete_insn_gp(&svm->vcpu, err);
+ 
+ 	ghcb_set_sw_exit_info_1(svm->ghcb, 1);
+@@ -2949,7 +2953,8 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
+ 			return 1;
+ 
+ 		if (!msr->host_initiated &&
+-		    !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
++		    !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) &&
++		    !guest_cpuid_has(vcpu, X86_FEATURE_RDPID))
+ 			return 1;
+ 
+ 		/*
+diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
+index 39e071fdab0ca..98da0b91f273b 100644
+--- a/arch/x86/kvm/svm/svm.h
++++ b/arch/x86/kvm/svm/svm.h
+@@ -571,6 +571,7 @@ void sev_es_init_vmcb(struct vcpu_svm *svm);
+ void sev_es_create_vcpu(struct vcpu_svm *svm);
+ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
+ void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu);
++void sev_es_unmap_ghcb(struct vcpu_svm *svm);
+ 
+ /* vmenter.S */
+ 
+diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
+index 1727057c53130..4ba2a43e188b5 100644
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -3100,15 +3100,8 @@ static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu)
+ 			nested_vmx_handle_enlightened_vmptrld(vcpu, false);
+ 
+ 		if (evmptrld_status == EVMPTRLD_VMFAIL ||
+-		    evmptrld_status == EVMPTRLD_ERROR) {
+-			pr_debug_ratelimited("%s: enlightened vmptrld failed\n",
+-					     __func__);
+-			vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+-			vcpu->run->internal.suberror =
+-				KVM_INTERNAL_ERROR_EMULATION;
+-			vcpu->run->internal.ndata = 0;
++		    evmptrld_status == EVMPTRLD_ERROR)
+ 			return false;
+-		}
+ 	}
+ 
+ 	return true;
+@@ -3196,8 +3189,16 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
+ 
+ static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu)
+ {
+-	if (!nested_get_evmcs_page(vcpu))
++	if (!nested_get_evmcs_page(vcpu)) {
++		pr_debug_ratelimited("%s: enlightened vmptrld failed\n",
++				     __func__);
++		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
++		vcpu->run->internal.suberror =
++			KVM_INTERNAL_ERROR_EMULATION;
++		vcpu->run->internal.ndata = 0;
++
+ 		return false;
++	}
+ 
+ 	if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu))
+ 		return false;
+@@ -4424,7 +4425,15 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
+ 	/* trying to cancel vmlaunch/vmresume is a bug */
+ 	WARN_ON_ONCE(vmx->nested.nested_run_pending);
+ 
+-	kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
++	if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
++		/*
++		 * KVM_REQ_GET_NESTED_STATE_PAGES is also used to map
++		 * Enlightened VMCS after migration and we still need to
++		 * do that when something is forcing L2->L1 exit prior to
++		 * the first L2 run.
++		 */
++		(void)nested_get_evmcs_page(vcpu);
++	}
+ 
+ 	/* Service the TLB flush request for L2 before switching to L1. */
+ 	if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index f705e0d9f1618..f68ed9a1abcc9 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -36,6 +36,7 @@
+ #include <asm/debugreg.h>
+ #include <asm/desc.h>
+ #include <asm/fpu/internal.h>
++#include <asm/idtentry.h>
+ #include <asm/io.h>
+ #include <asm/irq_remapping.h>
+ #include <asm/kexec.h>
+@@ -1733,7 +1734,8 @@ static void setup_msrs(struct vcpu_vmx *vmx)
+ 	if (update_transition_efer(vmx))
+ 		vmx_setup_uret_msr(vmx, MSR_EFER);
+ 
+-	if (guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP))
++	if (guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP)  ||
++	    guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDPID))
+ 		vmx_setup_uret_msr(vmx, MSR_TSC_AUX);
+ 
+ 	vmx_setup_uret_msr(vmx, MSR_IA32_TSX_CTRL);
+@@ -1932,7 +1934,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ 		break;
+ 	case MSR_TSC_AUX:
+ 		if (!msr_info->host_initiated &&
+-		    !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
++		    !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) &&
++		    !guest_cpuid_has(vcpu, X86_FEATURE_RDPID))
+ 			return 1;
+ 		goto find_uret_msr;
+ 	case MSR_IA32_DEBUGCTLMSR:
+@@ -2229,7 +2232,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ 		break;
+ 	case MSR_TSC_AUX:
+ 		if (!msr_info->host_initiated &&
+-		    !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
++		    !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) &&
++		    !guest_cpuid_has(vcpu, X86_FEATURE_RDPID))
+ 			return 1;
+ 		/* Check reserved bit, higher 32 bits should be zero */
+ 		if ((data >> 32) != 0)
+@@ -4301,7 +4305,23 @@ static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx)
+ 						  xsaves_enabled, false);
+ 	}
+ 
+-	vmx_adjust_sec_exec_feature(vmx, &exec_control, rdtscp, RDTSCP);
++	/*
++	 * RDPID is also gated by ENABLE_RDTSCP, turn on the control if either
++	 * feature is exposed to the guest.  This creates a virtualization hole
++	 * if both are supported in hardware but only one is exposed to the
++	 * guest, but letting the guest execute RDTSCP or RDPID when either one
++	 * is advertised is preferable to emulating the advertised instruction
++	 * in KVM on #UD, and obviously better than incorrectly injecting #UD.
++	 */
++	if (cpu_has_vmx_rdtscp()) {
++		bool rdpid_or_rdtscp_enabled =
++			guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) ||
++			guest_cpuid_has(vcpu, X86_FEATURE_RDPID);
++
++		vmx_adjust_secondary_exec_control(vmx, &exec_control,
++						  SECONDARY_EXEC_ENABLE_RDTSCP,
++						  rdpid_or_rdtscp_enabled, false);
++	}
+ 	vmx_adjust_sec_exec_feature(vmx, &exec_control, invpcid, INVPCID);
+ 
+ 	vmx_adjust_sec_exec_exiting(vmx, &exec_control, rdrand, RDRAND);
+@@ -6394,18 +6414,17 @@ static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu)
+ 
+ void vmx_do_interrupt_nmi_irqoff(unsigned long entry);
+ 
+-static void handle_interrupt_nmi_irqoff(struct kvm_vcpu *vcpu, u32 intr_info)
++static void handle_interrupt_nmi_irqoff(struct kvm_vcpu *vcpu,
++					unsigned long entry)
+ {
+-	unsigned int vector = intr_info & INTR_INFO_VECTOR_MASK;
+-	gate_desc *desc = (gate_desc *)host_idt_base + vector;
+-
+ 	kvm_before_interrupt(vcpu);
+-	vmx_do_interrupt_nmi_irqoff(gate_offset(desc));
++	vmx_do_interrupt_nmi_irqoff(entry);
+ 	kvm_after_interrupt(vcpu);
+ }
+ 
+ static void handle_exception_nmi_irqoff(struct vcpu_vmx *vmx)
+ {
++	const unsigned long nmi_entry = (unsigned long)asm_exc_nmi_noist;
+ 	u32 intr_info = vmx_get_intr_info(&vmx->vcpu);
+ 
+ 	/* if exit due to PF check for async PF */
+@@ -6416,18 +6435,20 @@ static void handle_exception_nmi_irqoff(struct vcpu_vmx *vmx)
+ 		kvm_machine_check();
+ 	/* We need to handle NMIs before interrupts are enabled */
+ 	else if (is_nmi(intr_info))
+-		handle_interrupt_nmi_irqoff(&vmx->vcpu, intr_info);
++		handle_interrupt_nmi_irqoff(&vmx->vcpu, nmi_entry);
+ }
+ 
+ static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu)
+ {
+ 	u32 intr_info = vmx_get_intr_info(vcpu);
++	unsigned int vector = intr_info & INTR_INFO_VECTOR_MASK;
++	gate_desc *desc = (gate_desc *)host_idt_base + vector;
+ 
+ 	if (WARN_ONCE(!is_external_intr(intr_info),
+ 	    "KVM: unexpected VM-Exit interrupt info: 0x%x", intr_info))
+ 		return;
+ 
+-	handle_interrupt_nmi_irqoff(vcpu, intr_info);
++	handle_interrupt_nmi_irqoff(vcpu, gate_offset(desc));
+ }
+ 
+ static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
+@@ -6893,12 +6914,9 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
+ 
+ 	for (i = 0; i < ARRAY_SIZE(vmx_uret_msrs_list); ++i) {
+ 		u32 index = vmx_uret_msrs_list[i];
+-		u32 data_low, data_high;
+ 		int j = vmx->nr_uret_msrs;
+ 
+-		if (rdmsr_safe(index, &data_low, &data_high) < 0)
+-			continue;
+-		if (wrmsr_safe(index, data_low, data_high) < 0)
++		if (kvm_probe_user_return_msr(index))
+ 			continue;
+ 
+ 		vmx->guest_uret_msrs[j].slot = i;
+@@ -7331,9 +7349,11 @@ static __init void vmx_set_cpu_caps(void)
+ 	if (!cpu_has_vmx_xsaves())
+ 		kvm_cpu_cap_clear(X86_FEATURE_XSAVES);
+ 
+-	/* CPUID 0x80000001 */
+-	if (!cpu_has_vmx_rdtscp())
++	/* CPUID 0x80000001 and 0x7 (RDPID) */
++	if (!cpu_has_vmx_rdtscp()) {
+ 		kvm_cpu_cap_clear(X86_FEATURE_RDTSCP);
++		kvm_cpu_cap_clear(X86_FEATURE_RDPID);
++	}
+ 
+ 	if (cpu_has_vmx_waitpkg())
+ 		kvm_cpu_cap_check_and_set(X86_FEATURE_WAITPKG);
+@@ -7389,8 +7409,9 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu,
+ 	/*
+ 	 * RDPID causes #UD if disabled through secondary execution controls.
+ 	 * Because it is marked as EmulateOnUD, we need to intercept it here.
++	 * Note, RDPID is hidden behind ENABLE_RDTSCP.
+ 	 */
+-	case x86_intercept_rdtscp:
++	case x86_intercept_rdpid:
+ 		if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_RDTSCP)) {
+ 			exception->vector = UD_VECTOR;
+ 			exception->error_code_valid = false;
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 3406ff421c1a3..87311d39f9145 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -335,6 +335,22 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
+ 	}
+ }
+ 
++int kvm_probe_user_return_msr(u32 msr)
++{
++	u64 val;
++	int ret;
++
++	preempt_disable();
++	ret = rdmsrl_safe(msr, &val);
++	if (ret)
++		goto out;
++	ret = wrmsrl_safe(msr, val);
++out:
++	preempt_enable();
++	return ret;
++}
++EXPORT_SYMBOL_GPL(kvm_probe_user_return_msr);
++
+ void kvm_define_user_return_msr(unsigned slot, u32 msr)
+ {
+ 	BUG_ON(slot >= KVM_MAX_NR_USER_RETURN_MSRS);
+@@ -5864,7 +5880,8 @@ static void kvm_init_msr_list(void)
+ 				continue;
+ 			break;
+ 		case MSR_TSC_AUX:
+-			if (!kvm_cpu_cap_has(X86_FEATURE_RDTSCP))
++			if (!kvm_cpu_cap_has(X86_FEATURE_RDTSCP) &&
++			    !kvm_cpu_cap_has(X86_FEATURE_RDPID))
+ 				continue;
+ 			break;
+ 		case MSR_IA32_UMWAIT_CONTROL:
+@@ -7964,6 +7981,18 @@ static void pvclock_gtod_update_fn(struct work_struct *work)
+ 
+ static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn);
+ 
++/*
++ * Indirection to move queue_work() out of the tk_core.seq write held
++ * region to prevent possible deadlocks against time accessors which
++ * are invoked with work related locks held.
++ */
++static void pvclock_irq_work_fn(struct irq_work *w)
++{
++	queue_work(system_long_wq, &pvclock_gtod_work);
++}
++
++static DEFINE_IRQ_WORK(pvclock_irq_work, pvclock_irq_work_fn);
++
+ /*
+  * Notification about pvclock gtod data update.
+  */
+@@ -7975,13 +8004,14 @@ static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused,
+ 
+ 	update_pvclock_gtod(tk);
+ 
+-	/* disable master clock if host does not trust, or does not
+-	 * use, TSC based clocksource.
++	/*
++	 * Disable master clock if host does not trust, or does not use,
++	 * TSC based clocksource. Delegate queue_work() to irq_work as
++	 * this is invoked with tk_core.seq write held.
+ 	 */
+ 	if (!gtod_is_based_on_tsc(gtod->clock.vclock_mode) &&
+ 	    atomic_read(&kvm_guest_has_master_clock) != 0)
+-		queue_work(system_long_wq, &pvclock_gtod_work);
+-
++		irq_work_queue(&pvclock_irq_work);
+ 	return 0;
+ }
+ 
+@@ -8096,6 +8126,8 @@ void kvm_arch_exit(void)
+ 	cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE);
+ #ifdef CONFIG_X86_64
+ 	pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier);
++	irq_work_sync(&pvclock_irq_work);
++	cancel_work_sync(&pvclock_gtod_work);
+ #endif
+ 	kvm_x86_ops.hardware_enable = NULL;
+ 	kvm_mmu_module_exit();
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
+index 20ba5db0f61cd..bc319931d2b36 100644
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -2263,10 +2263,9 @@ static void bfq_remove_request(struct request_queue *q,
+ 
+ }
+ 
+-static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
++static bool bfq_bio_merge(struct request_queue *q, struct bio *bio,
+ 		unsigned int nr_segs)
+ {
+-	struct request_queue *q = hctx->queue;
+ 	struct bfq_data *bfqd = q->elevator->elevator_data;
+ 	struct request *free = NULL;
+ 	/*
+diff --git a/block/blk-iocost.c b/block/blk-iocost.c
+index 98d656bdb42b7..4fbc875f7cb29 100644
+--- a/block/blk-iocost.c
++++ b/block/blk-iocost.c
+@@ -1073,7 +1073,17 @@ static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
+ 
+ 	lockdep_assert_held(&ioc->lock);
+ 
+-	inuse = clamp_t(u32, inuse, 1, active);
++	/*
++	 * For an active leaf node, its inuse shouldn't be zero or exceed
++	 * @active. An active internal node's inuse is solely determined by the
++	 * inuse to active ratio of its children regardless of @inuse.
++	 */
++	if (list_empty(&iocg->active_list) && iocg->child_active_sum) {
++		inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum,
++					   iocg->child_active_sum);
++	} else {
++		inuse = clamp_t(u32, inuse, 1, active);
++	}
+ 
+ 	iocg->last_inuse = iocg->inuse;
+ 	if (save)
+@@ -1090,7 +1100,7 @@ static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
+ 		/* update the level sums */
+ 		parent->child_active_sum += (s32)(active - child->active);
+ 		parent->child_inuse_sum += (s32)(inuse - child->inuse);
+-		/* apply the udpates */
++		/* apply the updates */
+ 		child->active = active;
+ 		child->inuse = inuse;
+ 
+diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
+index e1e997af89a0d..fdeb9773b55cf 100644
+--- a/block/blk-mq-sched.c
++++ b/block/blk-mq-sched.c
+@@ -348,14 +348,16 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
+ 		unsigned int nr_segs)
+ {
+ 	struct elevator_queue *e = q->elevator;
+-	struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
+-	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
++	struct blk_mq_ctx *ctx;
++	struct blk_mq_hw_ctx *hctx;
+ 	bool ret = false;
+ 	enum hctx_type type;
+ 
+ 	if (e && e->type->ops.bio_merge)
+-		return e->type->ops.bio_merge(hctx, bio, nr_segs);
++		return e->type->ops.bio_merge(q, bio, nr_segs);
+ 
++	ctx = blk_mq_get_ctx(q);
++	hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
+ 	type = hctx->type;
+ 	if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) ||
+ 	    list_empty_careful(&ctx->rq_lists[type]))
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index d4d7c1caa4396..0e120547ccb72 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -2216,8 +2216,9 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
+ 		/* Bypass scheduler for flush requests */
+ 		blk_insert_flush(rq);
+ 		blk_mq_run_hw_queue(data.hctx, true);
+-	} else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs ||
+-				!blk_queue_nonrot(q))) {
++	} else if (plug && (q->nr_hw_queues == 1 ||
++		   blk_mq_is_sbitmap_shared(rq->mq_hctx->flags) ||
++		   q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) {
+ 		/*
+ 		 * Use plugging if we have a ->commit_rqs() hook as well, as
+ 		 * we know the driver uses bd->last in a smart fashion.
+@@ -3269,10 +3270,12 @@ EXPORT_SYMBOL(blk_mq_init_allocated_queue);
+ /* tags can _not_ be used after returning from blk_mq_exit_queue */
+ void blk_mq_exit_queue(struct request_queue *q)
+ {
+-	struct blk_mq_tag_set	*set = q->tag_set;
++	struct blk_mq_tag_set *set = q->tag_set;
+ 
+-	blk_mq_del_queue_tag_set(q);
++	/* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */
+ 	blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
++	/* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */
++	blk_mq_del_queue_tag_set(q);
+ }
+ 
+ static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
+diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
+index 33d34d69cade2..79b69d7046d66 100644
+--- a/block/kyber-iosched.c
++++ b/block/kyber-iosched.c
+@@ -560,11 +560,12 @@ static void kyber_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
+ 	}
+ }
+ 
+-static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
++static bool kyber_bio_merge(struct request_queue *q, struct bio *bio,
+ 		unsigned int nr_segs)
+ {
++	struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
++	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
+ 	struct kyber_hctx_data *khd = hctx->sched_data;
+-	struct blk_mq_ctx *ctx = blk_mq_get_ctx(hctx->queue);
+ 	struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw[hctx->type]];
+ 	unsigned int sched_domain = kyber_sched_domain(bio->bi_opf);
+ 	struct list_head *rq_list = &kcq->rq_list[sched_domain];
+diff --git a/block/mq-deadline.c b/block/mq-deadline.c
+index f3631a2874667..3aabcd2a7893c 100644
+--- a/block/mq-deadline.c
++++ b/block/mq-deadline.c
+@@ -461,10 +461,9 @@ static int dd_request_merge(struct request_queue *q, struct request **rq,
+ 	return ELEVATOR_NO_MERGE;
+ }
+ 
+-static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
++static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
+ 		unsigned int nr_segs)
+ {
+-	struct request_queue *q = hctx->queue;
+ 	struct deadline_data *dd = q->elevator->elevator_data;
+ 	struct request *free = NULL;
+ 	bool ret;
+diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
+index 096153761ebc3..58876248b1921 100644
+--- a/drivers/acpi/device_pm.c
++++ b/drivers/acpi/device_pm.c
+@@ -1310,6 +1310,7 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
+ 		{"PNP0C0B", }, /* Generic ACPI fan */
+ 		{"INT3404", }, /* Fan */
+ 		{"INTC1044", }, /* Fan for Tiger Lake generation */
++		{"INTC1048", }, /* Fan for Alder Lake generation */
+ 		{}
+ 	};
+ 	struct acpi_device *adev = ACPI_COMPANION(dev);
+diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
+index 6efe7edd7b1ea..345777bf7af9c 100644
+--- a/drivers/acpi/scan.c
++++ b/drivers/acpi/scan.c
+@@ -701,6 +701,7 @@ int acpi_device_add(struct acpi_device *device,
+ 
+ 		result = acpi_device_set_name(device, acpi_device_bus_id);
+ 		if (result) {
++			kfree_const(acpi_device_bus_id->bus_id);
+ 			kfree(acpi_device_bus_id);
+ 			goto err_unlock;
+ 		}
+diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
+index 5b32df5d33adc..6e9c5ade4c2ea 100644
+--- a/drivers/ata/ahci_brcm.c
++++ b/drivers/ata/ahci_brcm.c
+@@ -86,7 +86,8 @@ struct brcm_ahci_priv {
+ 	u32 port_mask;
+ 	u32 quirks;
+ 	enum brcm_ahci_version version;
+-	struct reset_control *rcdev;
++	struct reset_control *rcdev_rescal;
++	struct reset_control *rcdev_ahci;
+ };
+ 
+ static inline u32 brcm_sata_readreg(void __iomem *addr)
+@@ -352,8 +353,8 @@ static int brcm_ahci_suspend(struct device *dev)
+ 	else
+ 		ret = 0;
+ 
+-	if (priv->version != BRCM_SATA_BCM7216)
+-		reset_control_assert(priv->rcdev);
++	reset_control_assert(priv->rcdev_ahci);
++	reset_control_rearm(priv->rcdev_rescal);
+ 
+ 	return ret;
+ }
+@@ -365,10 +366,10 @@ static int __maybe_unused brcm_ahci_resume(struct device *dev)
+ 	struct brcm_ahci_priv *priv = hpriv->plat_data;
+ 	int ret = 0;
+ 
+-	if (priv->version == BRCM_SATA_BCM7216)
+-		ret = reset_control_reset(priv->rcdev);
+-	else
+-		ret = reset_control_deassert(priv->rcdev);
++	ret = reset_control_deassert(priv->rcdev_ahci);
++	if (ret)
++		return ret;
++	ret = reset_control_reset(priv->rcdev_rescal);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -434,7 +435,6 @@ static int brcm_ahci_probe(struct platform_device *pdev)
+ {
+ 	const struct of_device_id *of_id;
+ 	struct device *dev = &pdev->dev;
+-	const char *reset_name = NULL;
+ 	struct brcm_ahci_priv *priv;
+ 	struct ahci_host_priv *hpriv;
+ 	struct resource *res;
+@@ -456,15 +456,15 @@ static int brcm_ahci_probe(struct platform_device *pdev)
+ 	if (IS_ERR(priv->top_ctrl))
+ 		return PTR_ERR(priv->top_ctrl);
+ 
+-	/* Reset is optional depending on platform and named differently */
+-	if (priv->version == BRCM_SATA_BCM7216)
+-		reset_name = "rescal";
+-	else
+-		reset_name = "ahci";
+-
+-	priv->rcdev = devm_reset_control_get_optional(&pdev->dev, reset_name);
+-	if (IS_ERR(priv->rcdev))
+-		return PTR_ERR(priv->rcdev);
++	if (priv->version == BRCM_SATA_BCM7216) {
++		priv->rcdev_rescal = devm_reset_control_get_optional_shared(
++			&pdev->dev, "rescal");
++		if (IS_ERR(priv->rcdev_rescal))
++			return PTR_ERR(priv->rcdev_rescal);
++	}
++	priv->rcdev_ahci = devm_reset_control_get_optional(&pdev->dev, "ahci");
++	if (IS_ERR(priv->rcdev_ahci))
++		return PTR_ERR(priv->rcdev_ahci);
+ 
+ 	hpriv = ahci_platform_get_resources(pdev, 0);
+ 	if (IS_ERR(hpriv))
+@@ -485,10 +485,10 @@ static int brcm_ahci_probe(struct platform_device *pdev)
+ 		break;
+ 	}
+ 
+-	if (priv->version == BRCM_SATA_BCM7216)
+-		ret = reset_control_reset(priv->rcdev);
+-	else
+-		ret = reset_control_deassert(priv->rcdev);
++	ret = reset_control_reset(priv->rcdev_rescal);
++	if (ret)
++		return ret;
++	ret = reset_control_deassert(priv->rcdev_ahci);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -539,8 +539,8 @@ out_disable_regulators:
+ out_disable_clks:
+ 	ahci_platform_disable_clks(hpriv);
+ out_reset:
+-	if (priv->version != BRCM_SATA_BCM7216)
+-		reset_control_assert(priv->rcdev);
++	reset_control_assert(priv->rcdev_ahci);
++	reset_control_rearm(priv->rcdev_rescal);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index fe1dad68aee4d..ae011f2bc537d 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -1637,6 +1637,7 @@ void pm_runtime_init(struct device *dev)
+ 	dev->power.request_pending = false;
+ 	dev->power.request = RPM_REQ_NONE;
+ 	dev->power.deferred_resume = false;
++	dev->power.needs_force_resume = 0;
+ 	INIT_WORK(&dev->power.work, pm_runtime_work);
+ 
+ 	dev->power.timer_expires = 0;
+@@ -1804,10 +1805,12 @@ int pm_runtime_force_suspend(struct device *dev)
+ 	 * its parent, but set its status to RPM_SUSPENDED anyway in case this
+ 	 * function will be called again for it in the meantime.
+ 	 */
+-	if (pm_runtime_need_not_resume(dev))
++	if (pm_runtime_need_not_resume(dev)) {
+ 		pm_runtime_set_suspended(dev);
+-	else
++	} else {
+ 		__update_runtime_status(dev, RPM_SUSPENDED);
++		dev->power.needs_force_resume = 1;
++	}
+ 
+ 	return 0;
+ 
+@@ -1834,7 +1837,7 @@ int pm_runtime_force_resume(struct device *dev)
+ 	int (*callback)(struct device *);
+ 	int ret = 0;
+ 
+-	if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev))
++	if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume)
+ 		goto out;
+ 
+ 	/*
+@@ -1853,6 +1856,7 @@ int pm_runtime_force_resume(struct device *dev)
+ 
+ 	pm_runtime_mark_last_busy(dev);
+ out:
++	dev->power.needs_force_resume = 0;
+ 	pm_runtime_enable(dev);
+ 	return ret;
+ }
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index 4ff71b579cfcc..974da561b8e5e 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -1980,7 +1980,8 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
+ 	 * config ref and try to destroy the workqueue from inside the work
+ 	 * queue.
+ 	 */
+-	flush_workqueue(nbd->recv_workq);
++	if (nbd->recv_workq)
++		flush_workqueue(nbd->recv_workq);
+ 	if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
+ 			       &nbd->config->runtime_flags))
+ 		nbd_config_put(nbd);
+diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
+index 45a4700766524..5ab7319ff2ead 100644
+--- a/drivers/block/rnbd/rnbd-clt.c
++++ b/drivers/block/rnbd/rnbd-clt.c
+@@ -693,7 +693,11 @@ static void remap_devs(struct rnbd_clt_session *sess)
+ 		return;
+ 	}
+ 
+-	rtrs_clt_query(sess->rtrs, &attrs);
++	err = rtrs_clt_query(sess->rtrs, &attrs);
++	if (err) {
++		pr_err("rtrs_clt_query(\"%s\"): %d\n", sess->sessname, err);
++		return;
++	}
+ 	mutex_lock(&sess->lock);
+ 	sess->max_io_size = attrs.max_io_size;
+ 
+@@ -1234,7 +1238,11 @@ find_and_get_or_create_sess(const char *sessname,
+ 		err = PTR_ERR(sess->rtrs);
+ 		goto wake_up_and_put;
+ 	}
+-	rtrs_clt_query(sess->rtrs, &attrs);
++
++	err = rtrs_clt_query(sess->rtrs, &attrs);
++	if (err)
++		goto close_rtrs;
++
+ 	sess->max_io_size = attrs.max_io_size;
+ 	sess->queue_depth = attrs.queue_depth;
+ 
+diff --git a/drivers/block/rnbd/rnbd-clt.h b/drivers/block/rnbd/rnbd-clt.h
+index 537d499dad3b0..73d9808405310 100644
+--- a/drivers/block/rnbd/rnbd-clt.h
++++ b/drivers/block/rnbd/rnbd-clt.h
+@@ -87,7 +87,7 @@ struct rnbd_clt_session {
+ 	DECLARE_BITMAP(cpu_queues_bm, NR_CPUS);
+ 	int	__percpu	*cpu_rr; /* per-cpu var for CPU round-robin */
+ 	atomic_t		busy;
+-	int			queue_depth;
++	size_t			queue_depth;
+ 	u32			max_io_size;
+ 	struct blk_mq_tag_set	tag_set;
+ 	struct mutex		lock; /* protects state and devs_list */
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 5cbfbd948f676..4a901508e48e7 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -399,7 +399,9 @@ static const struct usb_device_id blacklist_table[] = {
+ 
+ 	/* MediaTek Bluetooth devices */
+ 	{ USB_VENDOR_AND_INTERFACE_INFO(0x0e8d, 0xe0, 0x01, 0x01),
+-	  .driver_info = BTUSB_MEDIATEK },
++	  .driver_info = BTUSB_MEDIATEK |
++			 BTUSB_WIDEBAND_SPEECH |
++			 BTUSB_VALID_LE_STATES },
+ 
+ 	/* Additional MediaTek MT7615E Bluetooth devices */
+ 	{ USB_DEVICE(0x13d3, 0x3560), .driver_info = BTUSB_MEDIATEK},
+diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
+index eff1f12d981ab..c84d239512197 100644
+--- a/drivers/char/tpm/tpm2-cmd.c
++++ b/drivers/char/tpm/tpm2-cmd.c
+@@ -656,6 +656,7 @@ int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip)
+ 
+ 	if (nr_commands !=
+ 	    be32_to_cpup((__be32 *)&buf.data[TPM_HEADER_SIZE + 5])) {
++		rc = -EFAULT;
+ 		tpm_buf_destroy(&buf);
+ 		goto out;
+ 	}
+diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
+index a2e0395cbe618..55b9d3965ae1b 100644
+--- a/drivers/char/tpm/tpm_tis_core.c
++++ b/drivers/char/tpm/tpm_tis_core.c
+@@ -709,16 +709,14 @@ static int tpm_tis_gen_interrupt(struct tpm_chip *chip)
+ 	cap_t cap;
+ 	int ret;
+ 
+-	/* TPM 2.0 */
+-	if (chip->flags & TPM_CHIP_FLAG_TPM2)
+-		return tpm2_get_tpm_pt(chip, 0x100, &cap2, desc);
+-
+-	/* TPM 1.2 */
+ 	ret = request_locality(chip, 0);
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0);
++	if (chip->flags & TPM_CHIP_FLAG_TPM2)
++		ret = tpm2_get_tpm_pt(chip, 0x100, &cap2, desc);
++	else
++		ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0);
+ 
+ 	release_locality(chip, 0);
+ 
+@@ -1127,12 +1125,20 @@ int tpm_tis_resume(struct device *dev)
+ 	if (ret)
+ 		return ret;
+ 
+-	/* TPM 1.2 requires self-test on resume. This function actually returns
++	/*
++	 * TPM 1.2 requires self-test on resume. This function actually returns
+ 	 * an error code but for unknown reason it isn't handled.
+ 	 */
+-	if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
++	if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
++		ret = request_locality(chip, 0);
++		if (ret < 0)
++			return ret;
++
+ 		tpm1_do_selftest(chip);
+ 
++		release_locality(chip, 0);
++	}
++
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(tpm_tis_resume);
+diff --git a/drivers/clk/samsung/clk-exynos7.c b/drivers/clk/samsung/clk-exynos7.c
+index 87ee1bad9a9a8..4a5d2a914bd66 100644
+--- a/drivers/clk/samsung/clk-exynos7.c
++++ b/drivers/clk/samsung/clk-exynos7.c
+@@ -537,8 +537,13 @@ static const struct samsung_gate_clock top1_gate_clks[] __initconst = {
+ 	GATE(CLK_ACLK_FSYS0_200, "aclk_fsys0_200", "dout_aclk_fsys0_200",
+ 		ENABLE_ACLK_TOP13, 28, CLK_SET_RATE_PARENT |
+ 		CLK_IS_CRITICAL, 0),
++	/*
++	 * This clock is required for the CMU_FSYS1 registers access, keep it
++	 * enabled permanently until proper runtime PM support is added.
++	 */
+ 	GATE(CLK_ACLK_FSYS1_200, "aclk_fsys1_200", "dout_aclk_fsys1_200",
+-		ENABLE_ACLK_TOP13, 24, CLK_SET_RATE_PARENT, 0),
++		ENABLE_ACLK_TOP13, 24, CLK_SET_RATE_PARENT |
++		CLK_IS_CRITICAL, 0),
+ 
+ 	GATE(CLK_SCLK_PHY_FSYS1_26M, "sclk_phy_fsys1_26m",
+ 		"dout_sclk_phy_fsys1_26m", ENABLE_SCLK_TOP1_FSYS11,
+diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c
+index 3fae9ebb58b83..b6f97960d8ee0 100644
+--- a/drivers/clocksource/timer-ti-dm-systimer.c
++++ b/drivers/clocksource/timer-ti-dm-systimer.c
+@@ -2,6 +2,7 @@
+ #include <linux/clk.h>
+ #include <linux/clocksource.h>
+ #include <linux/clockchips.h>
++#include <linux/cpuhotplug.h>
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
+ #include <linux/iopoll.h>
+@@ -530,17 +531,17 @@ static void omap_clockevent_unidle(struct clock_event_device *evt)
+ 	writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->wakeup);
+ }
+ 
+-static int __init dmtimer_clockevent_init(struct device_node *np)
++static int __init dmtimer_clkevt_init_common(struct dmtimer_clockevent *clkevt,
++					     struct device_node *np,
++					     unsigned int features,
++					     const struct cpumask *cpumask,
++					     const char *name,
++					     int rating)
+ {
+-	struct dmtimer_clockevent *clkevt;
+ 	struct clock_event_device *dev;
+ 	struct dmtimer_systimer *t;
+ 	int error;
+ 
+-	clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL);
+-	if (!clkevt)
+-		return -ENOMEM;
+-
+ 	t = &clkevt->t;
+ 	dev = &clkevt->dev;
+ 
+@@ -548,25 +549,23 @@ static int __init dmtimer_clockevent_init(struct device_node *np)
+ 	 * We mostly use cpuidle_coupled with ARM local timers for runtime,
+ 	 * so there's probably no use for CLOCK_EVT_FEAT_DYNIRQ here.
+ 	 */
+-	dev->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+-	dev->rating = 300;
++	dev->features = features;
++	dev->rating = rating;
+ 	dev->set_next_event = dmtimer_set_next_event;
+ 	dev->set_state_shutdown = dmtimer_clockevent_shutdown;
+ 	dev->set_state_periodic = dmtimer_set_periodic;
+ 	dev->set_state_oneshot = dmtimer_clockevent_shutdown;
+ 	dev->set_state_oneshot_stopped = dmtimer_clockevent_shutdown;
+ 	dev->tick_resume = dmtimer_clockevent_shutdown;
+-	dev->cpumask = cpu_possible_mask;
++	dev->cpumask = cpumask;
+ 
+ 	dev->irq = irq_of_parse_and_map(np, 0);
+-	if (!dev->irq) {
+-		error = -ENXIO;
+-		goto err_out_free;
+-	}
++	if (!dev->irq)
++		return -ENXIO;
+ 
+ 	error = dmtimer_systimer_setup(np, &clkevt->t);
+ 	if (error)
+-		goto err_out_free;
++		return error;
+ 
+ 	clkevt->period = 0xffffffff - DIV_ROUND_CLOSEST(t->rate, HZ);
+ 
+@@ -578,38 +577,132 @@ static int __init dmtimer_clockevent_init(struct device_node *np)
+ 	writel_relaxed(OMAP_TIMER_CTRL_POSTED, t->base + t->ifctrl);
+ 
+ 	error = request_irq(dev->irq, dmtimer_clockevent_interrupt,
+-			    IRQF_TIMER, "clockevent", clkevt);
++			    IRQF_TIMER, name, clkevt);
+ 	if (error)
+ 		goto err_out_unmap;
+ 
+ 	writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_ena);
+ 	writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->wakeup);
+ 
+-	pr_info("TI gptimer clockevent: %s%lu Hz at %pOF\n",
+-		of_find_property(np, "ti,timer-alwon", NULL) ?
++	pr_info("TI gptimer %s: %s%lu Hz at %pOF\n",
++		name, of_find_property(np, "ti,timer-alwon", NULL) ?
+ 		"always-on " : "", t->rate, np->parent);
+ 
+-	clockevents_config_and_register(dev, t->rate,
+-					3, /* Timer internal resynch latency */
++	return 0;
++
++err_out_unmap:
++	iounmap(t->base);
++
++	return error;
++}
++
++static int __init dmtimer_clockevent_init(struct device_node *np)
++{
++	struct dmtimer_clockevent *clkevt;
++	int error;
++
++	clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL);
++	if (!clkevt)
++		return -ENOMEM;
++
++	error = dmtimer_clkevt_init_common(clkevt, np,
++					   CLOCK_EVT_FEAT_PERIODIC |
++					   CLOCK_EVT_FEAT_ONESHOT,
++					   cpu_possible_mask, "clockevent",
++					   300);
++	if (error)
++		goto err_out_free;
++
++	clockevents_config_and_register(&clkevt->dev, clkevt->t.rate,
++					3, /* Timer internal resync latency */
+ 					0xffffffff);
+ 
+ 	if (of_machine_is_compatible("ti,am33xx") ||
+ 	    of_machine_is_compatible("ti,am43")) {
+-		dev->suspend = omap_clockevent_idle;
+-		dev->resume = omap_clockevent_unidle;
++		clkevt->dev.suspend = omap_clockevent_idle;
++		clkevt->dev.resume = omap_clockevent_unidle;
+ 	}
+ 
+ 	return 0;
+ 
+-err_out_unmap:
+-	iounmap(t->base);
+-
+ err_out_free:
+ 	kfree(clkevt);
+ 
+ 	return error;
+ }
+ 
++/* Dmtimer as percpu timer. See dra7 ARM architected timer wrap erratum i940 */
++static DEFINE_PER_CPU(struct dmtimer_clockevent, dmtimer_percpu_timer);
++
++static int __init dmtimer_percpu_timer_init(struct device_node *np, int cpu)
++{
++	struct dmtimer_clockevent *clkevt;
++	int error;
++
++	if (!cpu_possible(cpu))
++		return -EINVAL;
++
++	if (!of_property_read_bool(np->parent, "ti,no-reset-on-init") ||
++	    !of_property_read_bool(np->parent, "ti,no-idle"))
++		pr_warn("Incomplete dtb for percpu dmtimer %pOF\n", np->parent);
++
++	clkevt = per_cpu_ptr(&dmtimer_percpu_timer, cpu);
++
++	error = dmtimer_clkevt_init_common(clkevt, np, CLOCK_EVT_FEAT_ONESHOT,
++					   cpumask_of(cpu), "percpu-dmtimer",
++					   500);
++	if (error)
++		return error;
++
++	return 0;
++}
++
++/* See TRM for timer internal resynch latency */
++static int omap_dmtimer_starting_cpu(unsigned int cpu)
++{
++	struct dmtimer_clockevent *clkevt = per_cpu_ptr(&dmtimer_percpu_timer, cpu);
++	struct clock_event_device *dev = &clkevt->dev;
++	struct dmtimer_systimer *t = &clkevt->t;
++
++	clockevents_config_and_register(dev, t->rate, 3, ULONG_MAX);
++	irq_force_affinity(dev->irq, cpumask_of(cpu));
++
++	return 0;
++}
++
++static int __init dmtimer_percpu_timer_startup(void)
++{
++	struct dmtimer_clockevent *clkevt = per_cpu_ptr(&dmtimer_percpu_timer, 0);
++	struct dmtimer_systimer *t = &clkevt->t;
++
++	if (t->sysc) {
++		cpuhp_setup_state(CPUHP_AP_TI_GP_TIMER_STARTING,
++				  "clockevents/omap/gptimer:starting",
++				  omap_dmtimer_starting_cpu, NULL);
++	}
++
++	return 0;
++}
++subsys_initcall(dmtimer_percpu_timer_startup);
++
++static int __init dmtimer_percpu_quirk_init(struct device_node *np, u32 pa)
++{
++	struct device_node *arm_timer;
++
++	arm_timer = of_find_compatible_node(NULL, NULL, "arm,armv7-timer");
++	if (of_device_is_available(arm_timer)) {
++		pr_warn_once("ARM architected timer wrap issue i940 detected\n");
++		return 0;
++	}
++
++	if (pa == 0x48034000)		/* dra7 dmtimer3 */
++		return dmtimer_percpu_timer_init(np, 0);
++	else if (pa == 0x48036000)	/* dra7 dmtimer4 */
++		return dmtimer_percpu_timer_init(np, 1);
++
++	return 0;
++}
++
+ /* Clocksource */
+ static struct dmtimer_clocksource *
+ to_dmtimer_clocksource(struct clocksource *cs)
+@@ -743,6 +836,9 @@ static int __init dmtimer_systimer_init(struct device_node *np)
+ 	if (clockevent == pa)
+ 		return dmtimer_clockevent_init(np);
+ 
++	if (of_machine_is_compatible("ti,dra7"))
++		return dmtimer_percpu_quirk_init(np, pa);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
+index d1bbc16fba4b4..7e7450453714d 100644
+--- a/drivers/cpufreq/acpi-cpufreq.c
++++ b/drivers/cpufreq/acpi-cpufreq.c
+@@ -646,7 +646,11 @@ static u64 get_max_boost_ratio(unsigned int cpu)
+ 		return 0;
+ 	}
+ 
+-	highest_perf = perf_caps.highest_perf;
++	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
++		highest_perf = amd_get_highest_perf();
++	else
++		highest_perf = perf_caps.highest_perf;
++
+ 	nominal_perf = perf_caps.nominal_perf;
+ 
+ 	if (!highest_perf || !nominal_perf) {
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index 5175ae3cac44b..34196c107de64 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -3054,6 +3054,14 @@ static const struct x86_cpu_id hwp_support_ids[] __initconst = {
+ 	{}
+ };
+ 
++static bool intel_pstate_hwp_is_enabled(void)
++{
++	u64 value;
++
++	rdmsrl(MSR_PM_ENABLE, value);
++	return !!(value & 0x1);
++}
++
+ static int __init intel_pstate_init(void)
+ {
+ 	const struct x86_cpu_id *id;
+@@ -3072,8 +3080,12 @@ static int __init intel_pstate_init(void)
+ 		 * Avoid enabling HWP for processors without EPP support,
+ 		 * because that means incomplete HWP implementation which is a
+ 		 * corner case and supporting it is generally problematic.
++		 *
++		 * If HWP is enabled already, though, there is no choice but to
++		 * deal with it.
+ 		 */
+-		if (!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) {
++		if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) ||
++		    intel_pstate_hwp_is_enabled()) {
+ 			hwp_active++;
+ 			hwp_mode_bdw = id->driver_data;
+ 			intel_pstate.attr = hwp_cpufreq_attrs;
+diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
+index 8fd43c1acac13..3e0d1d6922bab 100644
+--- a/drivers/crypto/ccp/sev-dev.c
++++ b/drivers/crypto/ccp/sev-dev.c
+@@ -990,7 +990,7 @@ int sev_dev_init(struct psp_device *psp)
+ 	if (!sev->vdata) {
+ 		ret = -ENODEV;
+ 		dev_err(dev, "sev: missing driver data\n");
+-		goto e_err;
++		goto e_sev;
+ 	}
+ 
+ 	psp_set_sev_irq_handler(psp, sev_irq_handler, sev);
+@@ -1005,6 +1005,8 @@ int sev_dev_init(struct psp_device *psp)
+ 
+ e_irq:
+ 	psp_clear_sev_irq_handler(psp);
++e_sev:
++	devm_kfree(dev, sev);
+ e_err:
+ 	psp->sev_data = NULL;
+ 
+diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
+index 0db9b82ed8cf5..1d8a3876b7452 100644
+--- a/drivers/dma/idxd/cdev.c
++++ b/drivers/dma/idxd/cdev.c
+@@ -39,15 +39,15 @@ struct idxd_user_context {
+ 	struct iommu_sva *sva;
+ };
+ 
+-enum idxd_cdev_cleanup {
+-	CDEV_NORMAL = 0,
+-	CDEV_FAILED,
+-};
+-
+ static void idxd_cdev_dev_release(struct device *dev)
+ {
+-	dev_dbg(dev, "releasing cdev device\n");
+-	kfree(dev);
++	struct idxd_cdev *idxd_cdev = container_of(dev, struct idxd_cdev, dev);
++	struct idxd_cdev_context *cdev_ctx;
++	struct idxd_wq *wq = idxd_cdev->wq;
++
++	cdev_ctx = &ictx[wq->idxd->type];
++	ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor);
++	kfree(idxd_cdev);
+ }
+ 
+ static struct device_type idxd_cdev_device_type = {
+@@ -62,14 +62,11 @@ static inline struct idxd_cdev *inode_idxd_cdev(struct inode *inode)
+ 	return container_of(cdev, struct idxd_cdev, cdev);
+ }
+ 
+-static inline struct idxd_wq *idxd_cdev_wq(struct idxd_cdev *idxd_cdev)
+-{
+-	return container_of(idxd_cdev, struct idxd_wq, idxd_cdev);
+-}
+-
+ static inline struct idxd_wq *inode_wq(struct inode *inode)
+ {
+-	return idxd_cdev_wq(inode_idxd_cdev(inode));
++	struct idxd_cdev *idxd_cdev = inode_idxd_cdev(inode);
++
++	return idxd_cdev->wq;
+ }
+ 
+ static int idxd_cdev_open(struct inode *inode, struct file *filp)
+@@ -220,11 +217,10 @@ static __poll_t idxd_cdev_poll(struct file *filp,
+ 	struct idxd_user_context *ctx = filp->private_data;
+ 	struct idxd_wq *wq = ctx->wq;
+ 	struct idxd_device *idxd = wq->idxd;
+-	struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
+ 	unsigned long flags;
+ 	__poll_t out = 0;
+ 
+-	poll_wait(filp, &idxd_cdev->err_queue, wait);
++	poll_wait(filp, &wq->err_queue, wait);
+ 	spin_lock_irqsave(&idxd->dev_lock, flags);
+ 	if (idxd->sw_err.valid)
+ 		out = EPOLLIN | EPOLLRDNORM;
+@@ -246,98 +242,67 @@ int idxd_cdev_get_major(struct idxd_device *idxd)
+ 	return MAJOR(ictx[idxd->type].devt);
+ }
+ 
+-static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq)
++int idxd_wq_add_cdev(struct idxd_wq *wq)
+ {
+ 	struct idxd_device *idxd = wq->idxd;
+-	struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
+-	struct idxd_cdev_context *cdev_ctx;
++	struct idxd_cdev *idxd_cdev;
++	struct cdev *cdev;
+ 	struct device *dev;
+-	int minor, rc;
++	struct idxd_cdev_context *cdev_ctx;
++	int rc, minor;
+ 
+-	idxd_cdev->dev = kzalloc(sizeof(*idxd_cdev->dev), GFP_KERNEL);
+-	if (!idxd_cdev->dev)
++	idxd_cdev = kzalloc(sizeof(*idxd_cdev), GFP_KERNEL);
++	if (!idxd_cdev)
+ 		return -ENOMEM;
+ 
+-	dev = idxd_cdev->dev;
+-	dev->parent = &idxd->pdev->dev;
+-	dev_set_name(dev, "%s/wq%u.%u", idxd_get_dev_name(idxd),
+-		     idxd->id, wq->id);
+-	dev->bus = idxd_get_bus_type(idxd);
+-
++	idxd_cdev->wq = wq;
++	cdev = &idxd_cdev->cdev;
++	dev = &idxd_cdev->dev;
+ 	cdev_ctx = &ictx[wq->idxd->type];
+ 	minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL);
+ 	if (minor < 0) {
+-		rc = minor;
+-		kfree(dev);
+-		goto ida_err;
+-	}
+-
+-	dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor);
+-	dev->type = &idxd_cdev_device_type;
+-	rc = device_register(dev);
+-	if (rc < 0) {
+-		dev_err(&idxd->pdev->dev, "device register failed\n");
+-		goto dev_reg_err;
++		kfree(idxd_cdev);
++		return minor;
+ 	}
+ 	idxd_cdev->minor = minor;
+ 
+-	return 0;
+-
+- dev_reg_err:
+-	ida_simple_remove(&cdev_ctx->minor_ida, MINOR(dev->devt));
+-	put_device(dev);
+- ida_err:
+-	idxd_cdev->dev = NULL;
+-	return rc;
+-}
+-
+-static void idxd_wq_cdev_cleanup(struct idxd_wq *wq,
+-				 enum idxd_cdev_cleanup cdev_state)
+-{
+-	struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
+-	struct idxd_cdev_context *cdev_ctx;
+-
+-	cdev_ctx = &ictx[wq->idxd->type];
+-	if (cdev_state == CDEV_NORMAL)
+-		cdev_del(&idxd_cdev->cdev);
+-	device_unregister(idxd_cdev->dev);
+-	/*
+-	 * The device_type->release() will be called on the device and free
+-	 * the allocated struct device. We can just forget it.
+-	 */
+-	ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor);
+-	idxd_cdev->dev = NULL;
+-	idxd_cdev->minor = -1;
+-}
+-
+-int idxd_wq_add_cdev(struct idxd_wq *wq)
+-{
+-	struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
+-	struct cdev *cdev = &idxd_cdev->cdev;
+-	struct device *dev;
+-	int rc;
++	device_initialize(dev);
++	dev->parent = &wq->conf_dev;
++	dev->bus = idxd_get_bus_type(idxd);
++	dev->type = &idxd_cdev_device_type;
++	dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor);
+ 
+-	rc = idxd_wq_cdev_dev_setup(wq);
++	rc = dev_set_name(dev, "%s/wq%u.%u", idxd_get_dev_name(idxd),
++			  idxd->id, wq->id);
+ 	if (rc < 0)
+-		return rc;
++		goto err;
+ 
+-	dev = idxd_cdev->dev;
++	wq->idxd_cdev = idxd_cdev;
+ 	cdev_init(cdev, &idxd_cdev_fops);
+-	cdev_set_parent(cdev, &dev->kobj);
+-	rc = cdev_add(cdev, dev->devt, 1);
++	rc = cdev_device_add(cdev, dev);
+ 	if (rc) {
+ 		dev_dbg(&wq->idxd->pdev->dev, "cdev_add failed: %d\n", rc);
+-		idxd_wq_cdev_cleanup(wq, CDEV_FAILED);
+-		return rc;
++		goto err;
+ 	}
+ 
+-	init_waitqueue_head(&idxd_cdev->err_queue);
+ 	return 0;
++
++ err:
++	put_device(dev);
++	wq->idxd_cdev = NULL;
++	return rc;
+ }
+ 
+ void idxd_wq_del_cdev(struct idxd_wq *wq)
+ {
+-	idxd_wq_cdev_cleanup(wq, CDEV_NORMAL);
++	struct idxd_cdev *idxd_cdev;
++	struct idxd_cdev_context *cdev_ctx;
++
++	cdev_ctx = &ictx[wq->idxd->type];
++	idxd_cdev = wq->idxd_cdev;
++	wq->idxd_cdev = NULL;
++	cdev_device_del(&idxd_cdev->cdev, &idxd_cdev->dev);
++	put_device(&idxd_cdev->dev);
+ }
+ 
+ int idxd_cdev_register(void)
+diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
+index 31c819544a229..4fef57717049e 100644
+--- a/drivers/dma/idxd/device.c
++++ b/drivers/dma/idxd/device.c
+@@ -19,7 +19,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
+ /* Interrupt control bits */
+ void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
+ {
+-	struct irq_data *data = irq_get_irq_data(idxd->msix_entries[vec_id].vector);
++	struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector);
+ 
+ 	pci_msi_mask_irq(data);
+ }
+@@ -36,7 +36,7 @@ void idxd_mask_msix_vectors(struct idxd_device *idxd)
+ 
+ void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id)
+ {
+-	struct irq_data *data = irq_get_irq_data(idxd->msix_entries[vec_id].vector);
++	struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector);
+ 
+ 	pci_msi_unmask_irq(data);
+ }
+@@ -186,8 +186,6 @@ int idxd_wq_alloc_resources(struct idxd_wq *wq)
+ 		desc->id = i;
+ 		desc->wq = wq;
+ 		desc->cpu = -1;
+-		dma_async_tx_descriptor_init(&desc->txd, &wq->dma_chan);
+-		desc->txd.tx_submit = idxd_dma_tx_submit;
+ 	}
+ 
+ 	return 0;
+@@ -451,7 +449,8 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
+ 
+ 	if (idxd_device_is_halted(idxd)) {
+ 		dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
+-		*status = IDXD_CMDSTS_HW_ERR;
++		if (status)
++			*status = IDXD_CMDSTS_HW_ERR;
+ 		return;
+ 	}
+ 
+@@ -521,7 +520,7 @@ void idxd_device_wqs_clear_state(struct idxd_device *idxd)
+ 	lockdep_assert_held(&idxd->dev_lock);
+ 
+ 	for (i = 0; i < idxd->max_wqs; i++) {
+-		struct idxd_wq *wq = &idxd->wqs[i];
++		struct idxd_wq *wq = idxd->wqs[i];
+ 
+ 		if (wq->state == IDXD_WQ_ENABLED) {
+ 			idxd_wq_disable_cleanup(wq);
+@@ -660,7 +659,7 @@ static int idxd_groups_config_write(struct idxd_device *idxd)
+ 		ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET));
+ 
+ 	for (i = 0; i < idxd->max_groups; i++) {
+-		struct idxd_group *group = &idxd->groups[i];
++		struct idxd_group *group = idxd->groups[i];
+ 
+ 		idxd_group_config_write(group);
+ 	}
+@@ -739,7 +738,7 @@ static int idxd_wqs_config_write(struct idxd_device *idxd)
+ 	int i, rc;
+ 
+ 	for (i = 0; i < idxd->max_wqs; i++) {
+-		struct idxd_wq *wq = &idxd->wqs[i];
++		struct idxd_wq *wq = idxd->wqs[i];
+ 
+ 		rc = idxd_wq_config_write(wq);
+ 		if (rc < 0)
+@@ -755,7 +754,7 @@ static void idxd_group_flags_setup(struct idxd_device *idxd)
+ 
+ 	/* TC-A 0 and TC-B 1 should be defaults */
+ 	for (i = 0; i < idxd->max_groups; i++) {
+-		struct idxd_group *group = &idxd->groups[i];
++		struct idxd_group *group = idxd->groups[i];
+ 
+ 		if (group->tc_a == -1)
+ 			group->tc_a = group->grpcfg.flags.tc_a = 0;
+@@ -782,12 +781,12 @@ static int idxd_engines_setup(struct idxd_device *idxd)
+ 	struct idxd_group *group;
+ 
+ 	for (i = 0; i < idxd->max_groups; i++) {
+-		group = &idxd->groups[i];
++		group = idxd->groups[i];
+ 		group->grpcfg.engines = 0;
+ 	}
+ 
+ 	for (i = 0; i < idxd->max_engines; i++) {
+-		eng = &idxd->engines[i];
++		eng = idxd->engines[i];
+ 		group = eng->group;
+ 
+ 		if (!group)
+@@ -811,13 +810,13 @@ static int idxd_wqs_setup(struct idxd_device *idxd)
+ 	struct device *dev = &idxd->pdev->dev;
+ 
+ 	for (i = 0; i < idxd->max_groups; i++) {
+-		group = &idxd->groups[i];
++		group = idxd->groups[i];
+ 		for (j = 0; j < 4; j++)
+ 			group->grpcfg.wqs[j] = 0;
+ 	}
+ 
+ 	for (i = 0; i < idxd->max_wqs; i++) {
+-		wq = &idxd->wqs[i];
++		wq = idxd->wqs[i];
+ 		group = wq->group;
+ 
+ 		if (!wq->group)
+diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c
+index a15e50126434e..77439b6450448 100644
+--- a/drivers/dma/idxd/dma.c
++++ b/drivers/dma/idxd/dma.c
+@@ -14,7 +14,10 @@
+ 
+ static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c)
+ {
+-	return container_of(c, struct idxd_wq, dma_chan);
++	struct idxd_dma_chan *idxd_chan;
++
++	idxd_chan = container_of(c, struct idxd_dma_chan, chan);
++	return idxd_chan->wq;
+ }
+ 
+ void idxd_dma_complete_txd(struct idxd_desc *desc,
+@@ -135,7 +138,7 @@ static void idxd_dma_issue_pending(struct dma_chan *dma_chan)
+ {
+ }
+ 
+-dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
++static dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
+ {
+ 	struct dma_chan *c = tx->chan;
+ 	struct idxd_wq *wq = to_idxd_wq(c);
+@@ -156,14 +159,25 @@ dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
+ 
+ static void idxd_dma_release(struct dma_device *device)
+ {
++	struct idxd_dma_dev *idxd_dma = container_of(device, struct idxd_dma_dev, dma);
++
++	kfree(idxd_dma);
+ }
+ 
+ int idxd_register_dma_device(struct idxd_device *idxd)
+ {
+-	struct dma_device *dma = &idxd->dma_dev;
++	struct idxd_dma_dev *idxd_dma;
++	struct dma_device *dma;
++	struct device *dev = &idxd->pdev->dev;
++	int rc;
+ 
++	idxd_dma = kzalloc_node(sizeof(*idxd_dma), GFP_KERNEL, dev_to_node(dev));
++	if (!idxd_dma)
++		return -ENOMEM;
++
++	dma = &idxd_dma->dma;
+ 	INIT_LIST_HEAD(&dma->channels);
+-	dma->dev = &idxd->pdev->dev;
++	dma->dev = dev;
+ 
+ 	dma_cap_set(DMA_PRIVATE, dma->cap_mask);
+ 	dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask);
+@@ -179,35 +193,72 @@ int idxd_register_dma_device(struct idxd_device *idxd)
+ 	dma->device_alloc_chan_resources = idxd_dma_alloc_chan_resources;
+ 	dma->device_free_chan_resources = idxd_dma_free_chan_resources;
+ 
+-	return dma_async_device_register(&idxd->dma_dev);
++	rc = dma_async_device_register(dma);
++	if (rc < 0) {
++		kfree(idxd_dma);
++		return rc;
++	}
++
++	idxd_dma->idxd = idxd;
++	/*
++	 * This pointer is protected by the refs taken by the dma_chan. It will remain valid
++	 * as long as there are outstanding channels.
++	 */
++	idxd->idxd_dma = idxd_dma;
++	return 0;
+ }
+ 
+ void idxd_unregister_dma_device(struct idxd_device *idxd)
+ {
+-	dma_async_device_unregister(&idxd->dma_dev);
++	dma_async_device_unregister(&idxd->idxd_dma->dma);
+ }
+ 
+ int idxd_register_dma_channel(struct idxd_wq *wq)
+ {
+ 	struct idxd_device *idxd = wq->idxd;
+-	struct dma_device *dma = &idxd->dma_dev;
+-	struct dma_chan *chan = &wq->dma_chan;
+-	int rc;
++	struct dma_device *dma = &idxd->idxd_dma->dma;
++	struct device *dev = &idxd->pdev->dev;
++	struct idxd_dma_chan *idxd_chan;
++	struct dma_chan *chan;
++	int rc, i;
++
++	idxd_chan = kzalloc_node(sizeof(*idxd_chan), GFP_KERNEL, dev_to_node(dev));
++	if (!idxd_chan)
++		return -ENOMEM;
+ 
+-	memset(&wq->dma_chan, 0, sizeof(struct dma_chan));
++	chan = &idxd_chan->chan;
+ 	chan->device = dma;
+ 	list_add_tail(&chan->device_node, &dma->channels);
++
++	for (i = 0; i < wq->num_descs; i++) {
++		struct idxd_desc *desc = wq->descs[i];
++
++		dma_async_tx_descriptor_init(&desc->txd, chan);
++		desc->txd.tx_submit = idxd_dma_tx_submit;
++	}
++
+ 	rc = dma_async_device_channel_register(dma, chan);
+-	if (rc < 0)
++	if (rc < 0) {
++		kfree(idxd_chan);
+ 		return rc;
++	}
++
++	wq->idxd_chan = idxd_chan;
++	idxd_chan->wq = wq;
++	get_device(&wq->conf_dev);
+ 
+ 	return 0;
+ }
+ 
+ void idxd_unregister_dma_channel(struct idxd_wq *wq)
+ {
+-	struct dma_chan *chan = &wq->dma_chan;
++	struct idxd_dma_chan *idxd_chan = wq->idxd_chan;
++	struct dma_chan *chan = &idxd_chan->chan;
++	struct idxd_dma_dev *idxd_dma = wq->idxd->idxd_dma;
+ 
+-	dma_async_device_channel_unregister(&wq->idxd->dma_dev, chan);
++	dma_async_device_channel_unregister(&idxd_dma->dma, chan);
+ 	list_del(&chan->device_node);
++	kfree(wq->idxd_chan);
++	wq->idxd_chan = NULL;
++	put_device(&wq->conf_dev);
+ }
+diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
+index 76014c14f4732..89daf746d1215 100644
+--- a/drivers/dma/idxd/idxd.h
++++ b/drivers/dma/idxd/idxd.h
+@@ -8,12 +8,16 @@
+ #include <linux/percpu-rwsem.h>
+ #include <linux/wait.h>
+ #include <linux/cdev.h>
++#include <linux/idr.h>
+ #include "registers.h"
+ 
+ #define IDXD_DRIVER_VERSION	"1.00"
+ 
+ extern struct kmem_cache *idxd_desc_pool;
+ 
++struct idxd_device;
++struct idxd_wq;
++
+ #define IDXD_REG_TIMEOUT	50
+ #define IDXD_DRAIN_TIMEOUT	5000
+ 
+@@ -33,6 +37,7 @@ struct idxd_device_driver {
+ struct idxd_irq_entry {
+ 	struct idxd_device *idxd;
+ 	int id;
++	int vector;
+ 	struct llist_head pending_llist;
+ 	struct list_head work_list;
+ 	/*
+@@ -75,10 +80,10 @@ enum idxd_wq_type {
+ };
+ 
+ struct idxd_cdev {
++	struct idxd_wq *wq;
+ 	struct cdev cdev;
+-	struct device *dev;
++	struct device dev;
+ 	int minor;
+-	struct wait_queue_head err_queue;
+ };
+ 
+ #define IDXD_ALLOCATED_BATCH_SIZE	128U
+@@ -96,10 +101,16 @@ enum idxd_complete_type {
+ 	IDXD_COMPLETE_DEV_FAIL,
+ };
+ 
++struct idxd_dma_chan {
++	struct dma_chan chan;
++	struct idxd_wq *wq;
++};
++
+ struct idxd_wq {
+ 	void __iomem *portal;
+ 	struct device conf_dev;
+-	struct idxd_cdev idxd_cdev;
++	struct idxd_cdev *idxd_cdev;
++	struct wait_queue_head err_queue;
+ 	struct idxd_device *idxd;
+ 	int id;
+ 	enum idxd_wq_type type;
+@@ -125,7 +136,7 @@ struct idxd_wq {
+ 	int compls_size;
+ 	struct idxd_desc **descs;
+ 	struct sbitmap_queue sbq;
+-	struct dma_chan dma_chan;
++	struct idxd_dma_chan *idxd_chan;
+ 	char name[WQ_NAME_SIZE + 1];
+ 	u64 max_xfer_bytes;
+ 	u32 max_batch_size;
+@@ -162,6 +173,11 @@ enum idxd_device_flag {
+ 	IDXD_FLAG_PASID_ENABLED,
+ };
+ 
++struct idxd_dma_dev {
++	struct idxd_device *idxd;
++	struct dma_device dma;
++};
++
+ struct idxd_device {
+ 	enum idxd_type type;
+ 	struct device conf_dev;
+@@ -178,9 +194,9 @@ struct idxd_device {
+ 
+ 	spinlock_t dev_lock;	/* spinlock for device */
+ 	struct completion *cmd_done;
+-	struct idxd_group *groups;
+-	struct idxd_wq *wqs;
+-	struct idxd_engine *engines;
++	struct idxd_group **groups;
++	struct idxd_wq **wqs;
++	struct idxd_engine **engines;
+ 
+ 	struct iommu_sva *sva;
+ 	unsigned int pasid;
+@@ -206,11 +222,10 @@ struct idxd_device {
+ 
+ 	union sw_err_reg sw_err;
+ 	wait_queue_head_t cmd_waitq;
+-	struct msix_entry *msix_entries;
+ 	int num_wq_irqs;
+ 	struct idxd_irq_entry *irq_entries;
+ 
+-	struct dma_device dma_dev;
++	struct idxd_dma_dev *idxd_dma;
+ 	struct workqueue_struct *wq;
+ 	struct work_struct work;
+ };
+@@ -242,6 +257,43 @@ extern struct bus_type dsa_bus_type;
+ extern struct bus_type iax_bus_type;
+ 
+ extern bool support_enqcmd;
++extern struct device_type dsa_device_type;
++extern struct device_type iax_device_type;
++extern struct device_type idxd_wq_device_type;
++extern struct device_type idxd_engine_device_type;
++extern struct device_type idxd_group_device_type;
++
++static inline bool is_dsa_dev(struct device *dev)
++{
++	return dev->type == &dsa_device_type;
++}
++
++static inline bool is_iax_dev(struct device *dev)
++{
++	return dev->type == &iax_device_type;
++}
++
++static inline bool is_idxd_dev(struct device *dev)
++{
++	return is_dsa_dev(dev) || is_iax_dev(dev);
++}
++
++static inline bool is_idxd_wq_dev(struct device *dev)
++{
++	return dev->type == &idxd_wq_device_type;
++}
++
++static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
++{
++	if (wq->type == IDXD_WQT_KERNEL && strcmp(wq->name, "dmaengine") == 0)
++		return true;
++	return false;
++}
++
++static inline bool is_idxd_wq_cdev(struct idxd_wq *wq)
++{
++	return wq->type == IDXD_WQT_USER;
++}
+ 
+ static inline bool wq_dedicated(struct idxd_wq *wq)
+ {
+@@ -279,18 +331,6 @@ static inline int idxd_get_wq_portal_full_offset(int wq_id,
+ 	return ((wq_id * 4) << PAGE_SHIFT) + idxd_get_wq_portal_offset(prot);
+ }
+ 
+-static inline void idxd_set_type(struct idxd_device *idxd)
+-{
+-	struct pci_dev *pdev = idxd->pdev;
+-
+-	if (pdev->device == PCI_DEVICE_ID_INTEL_DSA_SPR0)
+-		idxd->type = IDXD_TYPE_DSA;
+-	else if (pdev->device == PCI_DEVICE_ID_INTEL_IAX_SPR0)
+-		idxd->type = IDXD_TYPE_IAX;
+-	else
+-		idxd->type = IDXD_TYPE_UNKNOWN;
+-}
+-
+ static inline void idxd_wq_get(struct idxd_wq *wq)
+ {
+ 	wq->client_count++;
+@@ -306,14 +346,16 @@ static inline int idxd_wq_refcount(struct idxd_wq *wq)
+ 	return wq->client_count;
+ };
+ 
++struct ida *idxd_ida(struct idxd_device *idxd);
+ const char *idxd_get_dev_name(struct idxd_device *idxd);
+ int idxd_register_bus_type(void);
+ void idxd_unregister_bus_type(void);
+-int idxd_setup_sysfs(struct idxd_device *idxd);
+-void idxd_cleanup_sysfs(struct idxd_device *idxd);
++int idxd_register_devices(struct idxd_device *idxd);
++void idxd_unregister_devices(struct idxd_device *idxd);
+ int idxd_register_driver(void);
+ void idxd_unregister_driver(void);
+ struct bus_type *idxd_get_bus_type(struct idxd_device *idxd);
++struct device_type *idxd_get_device_type(struct idxd_device *idxd);
+ 
+ /* device interrupt control */
+ void idxd_msix_perm_setup(struct idxd_device *idxd);
+@@ -363,7 +405,6 @@ void idxd_unregister_dma_channel(struct idxd_wq *wq);
+ void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
+ void idxd_dma_complete_txd(struct idxd_desc *desc,
+ 			   enum idxd_complete_type comp_type);
+-dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx);
+ 
+ /* cdev */
+ int idxd_cdev_register(void);
+diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
+index 6584b0ec07d54..07cf7977a0450 100644
+--- a/drivers/dma/idxd/init.c
++++ b/drivers/dma/idxd/init.c
+@@ -34,8 +34,7 @@ MODULE_PARM_DESC(sva, "Toggle SVA support on/off");
+ 
+ bool support_enqcmd;
+ 
+-static struct idr idxd_idrs[IDXD_TYPE_MAX];
+-static DEFINE_MUTEX(idxd_idr_lock);
++static struct ida idxd_idas[IDXD_TYPE_MAX];
+ 
+ static struct pci_device_id idxd_pci_tbl[] = {
+ 	/* DSA ver 1.0 platforms */
+@@ -52,6 +51,11 @@ static char *idxd_name[] = {
+ 	"iax"
+ };
+ 
++struct ida *idxd_ida(struct idxd_device *idxd)
++{
++	return &idxd_idas[idxd->type];
++}
++
+ const char *idxd_get_dev_name(struct idxd_device *idxd)
+ {
+ 	return idxd_name[idxd->type];
+@@ -61,7 +65,6 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
+ {
+ 	struct pci_dev *pdev = idxd->pdev;
+ 	struct device *dev = &pdev->dev;
+-	struct msix_entry *msix;
+ 	struct idxd_irq_entry *irq_entry;
+ 	int i, msixcnt;
+ 	int rc = 0;
+@@ -69,23 +72,13 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
+ 	msixcnt = pci_msix_vec_count(pdev);
+ 	if (msixcnt < 0) {
+ 		dev_err(dev, "Not MSI-X interrupt capable.\n");
+-		goto err_no_irq;
+-	}
+-
+-	idxd->msix_entries = devm_kzalloc(dev, sizeof(struct msix_entry) *
+-			msixcnt, GFP_KERNEL);
+-	if (!idxd->msix_entries) {
+-		rc = -ENOMEM;
+-		goto err_no_irq;
++		return -ENOSPC;
+ 	}
+ 
+-	for (i = 0; i < msixcnt; i++)
+-		idxd->msix_entries[i].entry = i;
+-
+-	rc = pci_enable_msix_exact(pdev, idxd->msix_entries, msixcnt);
+-	if (rc) {
+-		dev_err(dev, "Failed enabling %d MSIX entries.\n", msixcnt);
+-		goto err_no_irq;
++	rc = pci_alloc_irq_vectors(pdev, msixcnt, msixcnt, PCI_IRQ_MSIX);
++	if (rc != msixcnt) {
++		dev_err(dev, "Failed enabling %d MSIX entries: %d\n", msixcnt, rc);
++		return -ENOSPC;
+ 	}
+ 	dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
+ 
+@@ -93,119 +86,236 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
+ 	 * We implement 1 completion list per MSI-X entry except for
+ 	 * entry 0, which is for errors and others.
+ 	 */
+-	idxd->irq_entries = devm_kcalloc(dev, msixcnt,
+-					 sizeof(struct idxd_irq_entry),
+-					 GFP_KERNEL);
++	idxd->irq_entries = kcalloc_node(msixcnt, sizeof(struct idxd_irq_entry),
++					 GFP_KERNEL, dev_to_node(dev));
+ 	if (!idxd->irq_entries) {
+ 		rc = -ENOMEM;
+-		goto err_no_irq;
++		goto err_irq_entries;
+ 	}
+ 
+ 	for (i = 0; i < msixcnt; i++) {
+ 		idxd->irq_entries[i].id = i;
+ 		idxd->irq_entries[i].idxd = idxd;
++		idxd->irq_entries[i].vector = pci_irq_vector(pdev, i);
+ 		spin_lock_init(&idxd->irq_entries[i].list_lock);
+ 	}
+ 
+-	msix = &idxd->msix_entries[0];
+ 	irq_entry = &idxd->irq_entries[0];
+-	rc = devm_request_threaded_irq(dev, msix->vector, idxd_irq_handler,
+-				       idxd_misc_thread, 0, "idxd-misc",
+-				       irq_entry);
++	rc = request_threaded_irq(irq_entry->vector, idxd_irq_handler, idxd_misc_thread,
++				  0, "idxd-misc", irq_entry);
+ 	if (rc < 0) {
+ 		dev_err(dev, "Failed to allocate misc interrupt.\n");
+-		goto err_no_irq;
++		goto err_misc_irq;
+ 	}
+ 
+-	dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n",
+-		msix->vector);
++	dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n", irq_entry->vector);
+ 
+ 	/* first MSI-X entry is not for wq interrupts */
+ 	idxd->num_wq_irqs = msixcnt - 1;
+ 
+ 	for (i = 1; i < msixcnt; i++) {
+-		msix = &idxd->msix_entries[i];
+ 		irq_entry = &idxd->irq_entries[i];
+ 
+ 		init_llist_head(&idxd->irq_entries[i].pending_llist);
+ 		INIT_LIST_HEAD(&idxd->irq_entries[i].work_list);
+-		rc = devm_request_threaded_irq(dev, msix->vector,
+-					       idxd_irq_handler,
+-					       idxd_wq_thread, 0,
+-					       "idxd-portal", irq_entry);
++		rc = request_threaded_irq(irq_entry->vector, idxd_irq_handler,
++					  idxd_wq_thread, 0, "idxd-portal", irq_entry);
+ 		if (rc < 0) {
+-			dev_err(dev, "Failed to allocate irq %d.\n",
+-				msix->vector);
+-			goto err_no_irq;
++			dev_err(dev, "Failed to allocate irq %d.\n", irq_entry->vector);
++			goto err_wq_irqs;
+ 		}
+-		dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n",
+-			i, msix->vector);
++		dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n", i, irq_entry->vector);
+ 	}
+ 
+ 	idxd_unmask_error_interrupts(idxd);
+ 	idxd_msix_perm_setup(idxd);
+ 	return 0;
+ 
+- err_no_irq:
++ err_wq_irqs:
++	while (--i >= 0) {
++		irq_entry = &idxd->irq_entries[i];
++		free_irq(irq_entry->vector, irq_entry);
++	}
++ err_misc_irq:
+ 	/* Disable error interrupt generation */
+ 	idxd_mask_error_interrupts(idxd);
+-	pci_disable_msix(pdev);
++ err_irq_entries:
++	pci_free_irq_vectors(pdev);
+ 	dev_err(dev, "No usable interrupts\n");
+ 	return rc;
+ }
+ 
+-static int idxd_setup_internals(struct idxd_device *idxd)
++static int idxd_setup_wqs(struct idxd_device *idxd)
+ {
+ 	struct device *dev = &idxd->pdev->dev;
+-	int i;
+-
+-	init_waitqueue_head(&idxd->cmd_waitq);
+-	idxd->groups = devm_kcalloc(dev, idxd->max_groups,
+-				    sizeof(struct idxd_group), GFP_KERNEL);
+-	if (!idxd->groups)
+-		return -ENOMEM;
+-
+-	for (i = 0; i < idxd->max_groups; i++) {
+-		idxd->groups[i].idxd = idxd;
+-		idxd->groups[i].id = i;
+-		idxd->groups[i].tc_a = -1;
+-		idxd->groups[i].tc_b = -1;
+-	}
++	struct idxd_wq *wq;
++	int i, rc;
+ 
+-	idxd->wqs = devm_kcalloc(dev, idxd->max_wqs, sizeof(struct idxd_wq),
+-				 GFP_KERNEL);
++	idxd->wqs = kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *),
++				 GFP_KERNEL, dev_to_node(dev));
+ 	if (!idxd->wqs)
+ 		return -ENOMEM;
+ 
+-	idxd->engines = devm_kcalloc(dev, idxd->max_engines,
+-				     sizeof(struct idxd_engine), GFP_KERNEL);
+-	if (!idxd->engines)
+-		return -ENOMEM;
+-
+ 	for (i = 0; i < idxd->max_wqs; i++) {
+-		struct idxd_wq *wq = &idxd->wqs[i];
++		wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev));
++		if (!wq) {
++			rc = -ENOMEM;
++			goto err;
++		}
+ 
+ 		wq->id = i;
+ 		wq->idxd = idxd;
++		device_initialize(&wq->conf_dev);
++		wq->conf_dev.parent = &idxd->conf_dev;
++		wq->conf_dev.bus = idxd_get_bus_type(idxd);
++		wq->conf_dev.type = &idxd_wq_device_type;
++		rc = dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
++		if (rc < 0) {
++			put_device(&wq->conf_dev);
++			goto err;
++		}
++
+ 		mutex_init(&wq->wq_lock);
+-		wq->idxd_cdev.minor = -1;
++		init_waitqueue_head(&wq->err_queue);
+ 		wq->max_xfer_bytes = idxd->max_xfer_bytes;
+ 		wq->max_batch_size = idxd->max_batch_size;
+-		wq->wqcfg = devm_kzalloc(dev, idxd->wqcfg_size, GFP_KERNEL);
+-		if (!wq->wqcfg)
+-			return -ENOMEM;
++		wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
++		if (!wq->wqcfg) {
++			put_device(&wq->conf_dev);
++			rc = -ENOMEM;
++			goto err;
++		}
++		idxd->wqs[i] = wq;
+ 	}
+ 
++	return 0;
++
++ err:
++	while (--i >= 0)
++		put_device(&idxd->wqs[i]->conf_dev);
++	return rc;
++}
++
++static int idxd_setup_engines(struct idxd_device *idxd)
++{
++	struct idxd_engine *engine;
++	struct device *dev = &idxd->pdev->dev;
++	int i, rc;
++
++	idxd->engines = kcalloc_node(idxd->max_engines, sizeof(struct idxd_engine *),
++				     GFP_KERNEL, dev_to_node(dev));
++	if (!idxd->engines)
++		return -ENOMEM;
++
+ 	for (i = 0; i < idxd->max_engines; i++) {
+-		idxd->engines[i].idxd = idxd;
+-		idxd->engines[i].id = i;
++		engine = kzalloc_node(sizeof(*engine), GFP_KERNEL, dev_to_node(dev));
++		if (!engine) {
++			rc = -ENOMEM;
++			goto err;
++		}
++
++		engine->id = i;
++		engine->idxd = idxd;
++		device_initialize(&engine->conf_dev);
++		engine->conf_dev.parent = &idxd->conf_dev;
++		engine->conf_dev.type = &idxd_engine_device_type;
++		rc = dev_set_name(&engine->conf_dev, "engine%d.%d", idxd->id, engine->id);
++		if (rc < 0) {
++			put_device(&engine->conf_dev);
++			goto err;
++		}
++
++		idxd->engines[i] = engine;
+ 	}
+ 
+-	idxd->wq = create_workqueue(dev_name(dev));
+-	if (!idxd->wq)
++	return 0;
++
++ err:
++	while (--i >= 0)
++		put_device(&idxd->engines[i]->conf_dev);
++	return rc;
++}
++
++static int idxd_setup_groups(struct idxd_device *idxd)
++{
++	struct device *dev = &idxd->pdev->dev;
++	struct idxd_group *group;
++	int i, rc;
++
++	idxd->groups = kcalloc_node(idxd->max_groups, sizeof(struct idxd_group *),
++				    GFP_KERNEL, dev_to_node(dev));
++	if (!idxd->groups)
+ 		return -ENOMEM;
+ 
++	for (i = 0; i < idxd->max_groups; i++) {
++		group = kzalloc_node(sizeof(*group), GFP_KERNEL, dev_to_node(dev));
++		if (!group) {
++			rc = -ENOMEM;
++			goto err;
++		}
++
++		group->id = i;
++		group->idxd = idxd;
++		device_initialize(&group->conf_dev);
++		group->conf_dev.parent = &idxd->conf_dev;
++		group->conf_dev.bus = idxd_get_bus_type(idxd);
++		group->conf_dev.type = &idxd_group_device_type;
++		rc = dev_set_name(&group->conf_dev, "group%d.%d", idxd->id, group->id);
++		if (rc < 0) {
++			put_device(&group->conf_dev);
++			goto err;
++		}
++
++		idxd->groups[i] = group;
++		group->tc_a = -1;
++		group->tc_b = -1;
++	}
++
++	return 0;
++
++ err:
++	while (--i >= 0)
++		put_device(&idxd->groups[i]->conf_dev);
++	return rc;
++}
++
++static int idxd_setup_internals(struct idxd_device *idxd)
++{
++	struct device *dev = &idxd->pdev->dev;
++	int rc, i;
++
++	init_waitqueue_head(&idxd->cmd_waitq);
++
++	rc = idxd_setup_wqs(idxd);
++	if (rc < 0)
++		return rc;
++
++	rc = idxd_setup_engines(idxd);
++	if (rc < 0)
++		goto err_engine;
++
++	rc = idxd_setup_groups(idxd);
++	if (rc < 0)
++		goto err_group;
++
++	idxd->wq = create_workqueue(dev_name(dev));
++	if (!idxd->wq) {
++		rc = -ENOMEM;
++		goto err_wkq_create;
++	}
++
+ 	return 0;
++
++ err_wkq_create:
++	for (i = 0; i < idxd->max_groups; i++)
++		put_device(&idxd->groups[i]->conf_dev);
++ err_group:
++	for (i = 0; i < idxd->max_engines; i++)
++		put_device(&idxd->engines[i]->conf_dev);
++ err_engine:
++	for (i = 0; i < idxd->max_wqs; i++)
++		put_device(&idxd->wqs[i]->conf_dev);
++	return rc;
+ }
+ 
+ static void idxd_read_table_offsets(struct idxd_device *idxd)
+@@ -275,16 +385,44 @@ static void idxd_read_caps(struct idxd_device *idxd)
+ 	}
+ }
+ 
++static inline void idxd_set_type(struct idxd_device *idxd)
++{
++	struct pci_dev *pdev = idxd->pdev;
++
++	if (pdev->device == PCI_DEVICE_ID_INTEL_DSA_SPR0)
++		idxd->type = IDXD_TYPE_DSA;
++	else if (pdev->device == PCI_DEVICE_ID_INTEL_IAX_SPR0)
++		idxd->type = IDXD_TYPE_IAX;
++	else
++		idxd->type = IDXD_TYPE_UNKNOWN;
++}
++
+ static struct idxd_device *idxd_alloc(struct pci_dev *pdev)
+ {
+ 	struct device *dev = &pdev->dev;
+ 	struct idxd_device *idxd;
++	int rc;
+ 
+-	idxd = devm_kzalloc(dev, sizeof(struct idxd_device), GFP_KERNEL);
++	idxd = kzalloc_node(sizeof(*idxd), GFP_KERNEL, dev_to_node(dev));
+ 	if (!idxd)
+ 		return NULL;
+ 
+ 	idxd->pdev = pdev;
++	idxd_set_type(idxd);
++	idxd->id = ida_alloc(idxd_ida(idxd), GFP_KERNEL);
++	if (idxd->id < 0)
++		return NULL;
++
++	device_initialize(&idxd->conf_dev);
++	idxd->conf_dev.parent = dev;
++	idxd->conf_dev.bus = idxd_get_bus_type(idxd);
++	idxd->conf_dev.type = idxd_get_device_type(idxd);
++	rc = dev_set_name(&idxd->conf_dev, "%s%d", idxd_get_dev_name(idxd), idxd->id);
++	if (rc < 0) {
++		put_device(&idxd->conf_dev);
++		return NULL;
++	}
++
+ 	spin_lock_init(&idxd->dev_lock);
+ 
+ 	return idxd;
+@@ -352,31 +490,20 @@ static int idxd_probe(struct idxd_device *idxd)
+ 
+ 	rc = idxd_setup_internals(idxd);
+ 	if (rc)
+-		goto err_setup;
++		goto err;
+ 
+ 	rc = idxd_setup_interrupts(idxd);
+ 	if (rc)
+-		goto err_setup;
++		goto err;
+ 
+ 	dev_dbg(dev, "IDXD interrupt setup complete.\n");
+ 
+-	mutex_lock(&idxd_idr_lock);
+-	idxd->id = idr_alloc(&idxd_idrs[idxd->type], idxd, 0, 0, GFP_KERNEL);
+-	mutex_unlock(&idxd_idr_lock);
+-	if (idxd->id < 0) {
+-		rc = -ENOMEM;
+-		goto err_idr_fail;
+-	}
+-
+ 	idxd->major = idxd_cdev_get_major(idxd);
+ 
+ 	dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
+ 	return 0;
+ 
+- err_idr_fail:
+-	idxd_mask_error_interrupts(idxd);
+-	idxd_mask_msix_vectors(idxd);
+- err_setup:
++ err:
+ 	if (device_pasid_enabled(idxd))
+ 		idxd_disable_system_pasid(idxd);
+ 	return rc;
+@@ -396,34 +523,37 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	struct idxd_device *idxd;
+ 	int rc;
+ 
+-	rc = pcim_enable_device(pdev);
++	rc = pci_enable_device(pdev);
+ 	if (rc)
+ 		return rc;
+ 
+ 	dev_dbg(dev, "Alloc IDXD context\n");
+ 	idxd = idxd_alloc(pdev);
+-	if (!idxd)
+-		return -ENOMEM;
++	if (!idxd) {
++		rc = -ENOMEM;
++		goto err_idxd_alloc;
++	}
+ 
+ 	dev_dbg(dev, "Mapping BARs\n");
+-	idxd->reg_base = pcim_iomap(pdev, IDXD_MMIO_BAR, 0);
+-	if (!idxd->reg_base)
+-		return -ENOMEM;
++	idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0);
++	if (!idxd->reg_base) {
++		rc = -ENOMEM;
++		goto err_iomap;
++	}
+ 
+ 	dev_dbg(dev, "Set DMA masks\n");
+ 	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ 	if (rc)
+ 		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ 	if (rc)
+-		return rc;
++		goto err;
+ 
+ 	rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ 	if (rc)
+ 		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ 	if (rc)
+-		return rc;
++		goto err;
+ 
+-	idxd_set_type(idxd);
+ 
+ 	idxd_type_init(idxd);
+ 
+@@ -435,13 +565,13 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	rc = idxd_probe(idxd);
+ 	if (rc) {
+ 		dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
+-		return -ENODEV;
++		goto err;
+ 	}
+ 
+-	rc = idxd_setup_sysfs(idxd);
++	rc = idxd_register_devices(idxd);
+ 	if (rc) {
+ 		dev_err(dev, "IDXD sysfs setup failed\n");
+-		return -ENODEV;
++		goto err;
+ 	}
+ 
+ 	idxd->state = IDXD_DEV_CONF_READY;
+@@ -450,6 +580,14 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 		 idxd->hw.version);
+ 
+ 	return 0;
++
++ err:
++	pci_iounmap(pdev, idxd->reg_base);
++ err_iomap:
++	put_device(&idxd->conf_dev);
++ err_idxd_alloc:
++	pci_disable_device(pdev);
++	return rc;
+ }
+ 
+ static void idxd_flush_pending_llist(struct idxd_irq_entry *ie)
+@@ -495,7 +633,8 @@ static void idxd_shutdown(struct pci_dev *pdev)
+ 
+ 	for (i = 0; i < msixcnt; i++) {
+ 		irq_entry = &idxd->irq_entries[i];
+-		synchronize_irq(idxd->msix_entries[i].vector);
++		synchronize_irq(irq_entry->vector);
++		free_irq(irq_entry->vector, irq_entry);
+ 		if (i == 0)
+ 			continue;
+ 		idxd_flush_pending_llist(irq_entry);
+@@ -503,6 +642,9 @@ static void idxd_shutdown(struct pci_dev *pdev)
+ 	}
+ 
+ 	idxd_msix_perm_clear(idxd);
++	pci_free_irq_vectors(pdev);
++	pci_iounmap(pdev, idxd->reg_base);
++	pci_disable_device(pdev);
+ 	destroy_workqueue(idxd->wq);
+ }
+ 
+@@ -511,13 +653,10 @@ static void idxd_remove(struct pci_dev *pdev)
+ 	struct idxd_device *idxd = pci_get_drvdata(pdev);
+ 
+ 	dev_dbg(&pdev->dev, "%s called\n", __func__);
+-	idxd_cleanup_sysfs(idxd);
+ 	idxd_shutdown(pdev);
+ 	if (device_pasid_enabled(idxd))
+ 		idxd_disable_system_pasid(idxd);
+-	mutex_lock(&idxd_idr_lock);
+-	idr_remove(&idxd_idrs[idxd->type], idxd->id);
+-	mutex_unlock(&idxd_idr_lock);
++	idxd_unregister_devices(idxd);
+ }
+ 
+ static struct pci_driver idxd_pci_driver = {
+@@ -547,7 +686,7 @@ static int __init idxd_init_module(void)
+ 		support_enqcmd = true;
+ 
+ 	for (i = 0; i < IDXD_TYPE_MAX; i++)
+-		idr_init(&idxd_idrs[i]);
++		ida_init(&idxd_idas[i]);
+ 
+ 	err = idxd_register_bus_type();
+ 	if (err < 0)
+diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
+index f1463fc581125..fc0781e3f36d4 100644
+--- a/drivers/dma/idxd/irq.c
++++ b/drivers/dma/idxd/irq.c
+@@ -45,7 +45,7 @@ static void idxd_device_reinit(struct work_struct *work)
+ 		goto out;
+ 
+ 	for (i = 0; i < idxd->max_wqs; i++) {
+-		struct idxd_wq *wq = &idxd->wqs[i];
++		struct idxd_wq *wq = idxd->wqs[i];
+ 
+ 		if (wq->state == IDXD_WQ_ENABLED) {
+ 			rc = idxd_wq_enable(wq);
+@@ -130,18 +130,18 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
+ 
+ 		if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) {
+ 			int id = idxd->sw_err.wq_idx;
+-			struct idxd_wq *wq = &idxd->wqs[id];
++			struct idxd_wq *wq = idxd->wqs[id];
+ 
+ 			if (wq->type == IDXD_WQT_USER)
+-				wake_up_interruptible(&wq->idxd_cdev.err_queue);
++				wake_up_interruptible(&wq->err_queue);
+ 		} else {
+ 			int i;
+ 
+ 			for (i = 0; i < idxd->max_wqs; i++) {
+-				struct idxd_wq *wq = &idxd->wqs[i];
++				struct idxd_wq *wq = idxd->wqs[i];
+ 
+ 				if (wq->type == IDXD_WQT_USER)
+-					wake_up_interruptible(&wq->idxd_cdev.err_queue);
++					wake_up_interruptible(&wq->err_queue);
+ 			}
+ 		}
+ 
+diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
+index 18bf4d1489890..9586b55abce56 100644
+--- a/drivers/dma/idxd/sysfs.c
++++ b/drivers/dma/idxd/sysfs.c
+@@ -16,69 +16,6 @@ static char *idxd_wq_type_names[] = {
+ 	[IDXD_WQT_USER]		= "user",
+ };
+ 
+-static void idxd_conf_device_release(struct device *dev)
+-{
+-	dev_dbg(dev, "%s for %s\n", __func__, dev_name(dev));
+-}
+-
+-static struct device_type idxd_group_device_type = {
+-	.name = "group",
+-	.release = idxd_conf_device_release,
+-};
+-
+-static struct device_type idxd_wq_device_type = {
+-	.name = "wq",
+-	.release = idxd_conf_device_release,
+-};
+-
+-static struct device_type idxd_engine_device_type = {
+-	.name = "engine",
+-	.release = idxd_conf_device_release,
+-};
+-
+-static struct device_type dsa_device_type = {
+-	.name = "dsa",
+-	.release = idxd_conf_device_release,
+-};
+-
+-static struct device_type iax_device_type = {
+-	.name = "iax",
+-	.release = idxd_conf_device_release,
+-};
+-
+-static inline bool is_dsa_dev(struct device *dev)
+-{
+-	return dev ? dev->type == &dsa_device_type : false;
+-}
+-
+-static inline bool is_iax_dev(struct device *dev)
+-{
+-	return dev ? dev->type == &iax_device_type : false;
+-}
+-
+-static inline bool is_idxd_dev(struct device *dev)
+-{
+-	return is_dsa_dev(dev) || is_iax_dev(dev);
+-}
+-
+-static inline bool is_idxd_wq_dev(struct device *dev)
+-{
+-	return dev ? dev->type == &idxd_wq_device_type : false;
+-}
+-
+-static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
+-{
+-	if (wq->type == IDXD_WQT_KERNEL &&
+-	    strcmp(wq->name, "dmaengine") == 0)
+-		return true;
+-	return false;
+-}
+-
+-static inline bool is_idxd_wq_cdev(struct idxd_wq *wq)
+-{
+-	return wq->type == IDXD_WQT_USER;
+-}
+-
+ static int idxd_config_bus_match(struct device *dev,
+ 				 struct device_driver *drv)
+ {
+@@ -322,7 +259,7 @@ static int idxd_config_bus_remove(struct device *dev)
+ 		dev_dbg(dev, "%s removing dev %s\n", __func__,
+ 			dev_name(&idxd->conf_dev));
+ 		for (i = 0; i < idxd->max_wqs; i++) {
+-			struct idxd_wq *wq = &idxd->wqs[i];
++			struct idxd_wq *wq = idxd->wqs[i];
+ 
+ 			if (wq->state == IDXD_WQ_DISABLED)
+ 				continue;
+@@ -334,7 +271,7 @@ static int idxd_config_bus_remove(struct device *dev)
+ 		idxd_unregister_dma_device(idxd);
+ 		rc = idxd_device_disable(idxd);
+ 		for (i = 0; i < idxd->max_wqs; i++) {
+-			struct idxd_wq *wq = &idxd->wqs[i];
++			struct idxd_wq *wq = idxd->wqs[i];
+ 
+ 			mutex_lock(&wq->wq_lock);
+ 			idxd_wq_disable_cleanup(wq);
+@@ -405,7 +342,7 @@ struct bus_type *idxd_get_bus_type(struct idxd_device *idxd)
+ 	return idxd_bus_types[idxd->type];
+ }
+ 
+-static struct device_type *idxd_get_device_type(struct idxd_device *idxd)
++struct device_type *idxd_get_device_type(struct idxd_device *idxd)
+ {
+ 	if (idxd->type == IDXD_TYPE_DSA)
+ 		return &dsa_device_type;
+@@ -488,7 +425,7 @@ static ssize_t engine_group_id_store(struct device *dev,
+ 
+ 	if (prevg)
+ 		prevg->num_engines--;
+-	engine->group = &idxd->groups[id];
++	engine->group = idxd->groups[id];
+ 	engine->group->num_engines++;
+ 
+ 	return count;
+@@ -512,6 +449,19 @@ static const struct attribute_group *idxd_engine_attribute_groups[] = {
+ 	NULL,
+ };
+ 
++static void idxd_conf_engine_release(struct device *dev)
++{
++	struct idxd_engine *engine = container_of(dev, struct idxd_engine, conf_dev);
++
++	kfree(engine);
++}
++
++struct device_type idxd_engine_device_type = {
++	.name = "engine",
++	.release = idxd_conf_engine_release,
++	.groups = idxd_engine_attribute_groups,
++};
++
+ /* Group attributes */
+ 
+ static void idxd_set_free_tokens(struct idxd_device *idxd)
+@@ -519,7 +469,7 @@ static void idxd_set_free_tokens(struct idxd_device *idxd)
+ 	int i, tokens;
+ 
+ 	for (i = 0, tokens = 0; i < idxd->max_groups; i++) {
+-		struct idxd_group *g = &idxd->groups[i];
++		struct idxd_group *g = idxd->groups[i];
+ 
+ 		tokens += g->tokens_reserved;
+ 	}
+@@ -674,7 +624,7 @@ static ssize_t group_engines_show(struct device *dev,
+ 	struct idxd_device *idxd = group->idxd;
+ 
+ 	for (i = 0; i < idxd->max_engines; i++) {
+-		struct idxd_engine *engine = &idxd->engines[i];
++		struct idxd_engine *engine = idxd->engines[i];
+ 
+ 		if (!engine->group)
+ 			continue;
+@@ -703,7 +653,7 @@ static ssize_t group_work_queues_show(struct device *dev,
+ 	struct idxd_device *idxd = group->idxd;
+ 
+ 	for (i = 0; i < idxd->max_wqs; i++) {
+-		struct idxd_wq *wq = &idxd->wqs[i];
++		struct idxd_wq *wq = idxd->wqs[i];
+ 
+ 		if (!wq->group)
+ 			continue;
+@@ -824,6 +774,19 @@ static const struct attribute_group *idxd_group_attribute_groups[] = {
+ 	NULL,
+ };
+ 
++static void idxd_conf_group_release(struct device *dev)
++{
++	struct idxd_group *group = container_of(dev, struct idxd_group, conf_dev);
++
++	kfree(group);
++}
++
++struct device_type idxd_group_device_type = {
++	.name = "group",
++	.release = idxd_conf_group_release,
++	.groups = idxd_group_attribute_groups,
++};
++
+ /* IDXD work queue attribs */
+ static ssize_t wq_clients_show(struct device *dev,
+ 			       struct device_attribute *attr, char *buf)
+@@ -896,7 +859,7 @@ static ssize_t wq_group_id_store(struct device *dev,
+ 		return count;
+ 	}
+ 
+-	group = &idxd->groups[id];
++	group = idxd->groups[id];
+ 	prevg = wq->group;
+ 
+ 	if (prevg)
+@@ -960,7 +923,7 @@ static int total_claimed_wq_size(struct idxd_device *idxd)
+ 	int wq_size = 0;
+ 
+ 	for (i = 0; i < idxd->max_wqs; i++) {
+-		struct idxd_wq *wq = &idxd->wqs[i];
++		struct idxd_wq *wq = idxd->wqs[i];
+ 
+ 		wq_size += wq->size;
+ 	}
+@@ -1206,8 +1169,16 @@ static ssize_t wq_cdev_minor_show(struct device *dev,
+ 				  struct device_attribute *attr, char *buf)
+ {
+ 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
++	int minor = -1;
+ 
+-	return sprintf(buf, "%d\n", wq->idxd_cdev.minor);
++	mutex_lock(&wq->wq_lock);
++	if (wq->idxd_cdev)
++		minor = wq->idxd_cdev->minor;
++	mutex_unlock(&wq->wq_lock);
++
++	if (minor == -1)
++		return -ENXIO;
++	return sysfs_emit(buf, "%d\n", minor);
+ }
+ 
+ static struct device_attribute dev_attr_wq_cdev_minor =
+@@ -1356,6 +1327,20 @@ static const struct attribute_group *idxd_wq_attribute_groups[] = {
+ 	NULL,
+ };
+ 
++static void idxd_conf_wq_release(struct device *dev)
++{
++	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
++
++	kfree(wq->wqcfg);
++	kfree(wq);
++}
++
++struct device_type idxd_wq_device_type = {
++	.name = "wq",
++	.release = idxd_conf_wq_release,
++	.groups = idxd_wq_attribute_groups,
++};
++
+ /* IDXD device attribs */
+ static ssize_t version_show(struct device *dev, struct device_attribute *attr,
+ 			    char *buf)
+@@ -1486,7 +1471,7 @@ static ssize_t clients_show(struct device *dev,
+ 
+ 	spin_lock_irqsave(&idxd->dev_lock, flags);
+ 	for (i = 0; i < idxd->max_wqs; i++) {
+-		struct idxd_wq *wq = &idxd->wqs[i];
++		struct idxd_wq *wq = idxd->wqs[i];
+ 
+ 		count += wq->client_count;
+ 	}
+@@ -1644,183 +1629,160 @@ static const struct attribute_group *idxd_attribute_groups[] = {
+ 	NULL,
+ };
+ 
+-static int idxd_setup_engine_sysfs(struct idxd_device *idxd)
++static void idxd_conf_device_release(struct device *dev)
+ {
+-	struct device *dev = &idxd->pdev->dev;
+-	int i, rc;
++	struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev);
++
++	kfree(idxd->groups);
++	kfree(idxd->wqs);
++	kfree(idxd->engines);
++	kfree(idxd->irq_entries);
++	ida_free(idxd_ida(idxd), idxd->id);
++	kfree(idxd);
++}
++
++struct device_type dsa_device_type = {
++	.name = "dsa",
++	.release = idxd_conf_device_release,
++	.groups = idxd_attribute_groups,
++};
++
++struct device_type iax_device_type = {
++	.name = "iax",
++	.release = idxd_conf_device_release,
++	.groups = idxd_attribute_groups,
++};
++
++static int idxd_register_engine_devices(struct idxd_device *idxd)
++{
++	int i, j, rc;
+ 
+ 	for (i = 0; i < idxd->max_engines; i++) {
+-		struct idxd_engine *engine = &idxd->engines[i];
+-
+-		engine->conf_dev.parent = &idxd->conf_dev;
+-		dev_set_name(&engine->conf_dev, "engine%d.%d",
+-			     idxd->id, engine->id);
+-		engine->conf_dev.bus = idxd_get_bus_type(idxd);
+-		engine->conf_dev.groups = idxd_engine_attribute_groups;
+-		engine->conf_dev.type = &idxd_engine_device_type;
+-		dev_dbg(dev, "Engine device register: %s\n",
+-			dev_name(&engine->conf_dev));
+-		rc = device_register(&engine->conf_dev);
+-		if (rc < 0) {
+-			put_device(&engine->conf_dev);
++		struct idxd_engine *engine = idxd->engines[i];
++
++		rc = device_add(&engine->conf_dev);
++		if (rc < 0)
+ 			goto cleanup;
+-		}
+ 	}
+ 
+ 	return 0;
+ 
+ cleanup:
+-	while (i--) {
+-		struct idxd_engine *engine = &idxd->engines[i];
++	j = i - 1;
++	for (; i < idxd->max_engines; i++)
++		put_device(&idxd->engines[i]->conf_dev);
+ 
+-		device_unregister(&engine->conf_dev);
+-	}
++	while (j--)
++		device_unregister(&idxd->engines[j]->conf_dev);
+ 	return rc;
+ }
+ 
+-static int idxd_setup_group_sysfs(struct idxd_device *idxd)
++static int idxd_register_group_devices(struct idxd_device *idxd)
+ {
+-	struct device *dev = &idxd->pdev->dev;
+-	int i, rc;
++	int i, j, rc;
+ 
+ 	for (i = 0; i < idxd->max_groups; i++) {
+-		struct idxd_group *group = &idxd->groups[i];
+-
+-		group->conf_dev.parent = &idxd->conf_dev;
+-		dev_set_name(&group->conf_dev, "group%d.%d",
+-			     idxd->id, group->id);
+-		group->conf_dev.bus = idxd_get_bus_type(idxd);
+-		group->conf_dev.groups = idxd_group_attribute_groups;
+-		group->conf_dev.type = &idxd_group_device_type;
+-		dev_dbg(dev, "Group device register: %s\n",
+-			dev_name(&group->conf_dev));
+-		rc = device_register(&group->conf_dev);
+-		if (rc < 0) {
+-			put_device(&group->conf_dev);
++		struct idxd_group *group = idxd->groups[i];
++
++		rc = device_add(&group->conf_dev);
++		if (rc < 0)
+ 			goto cleanup;
+-		}
+ 	}
+ 
+ 	return 0;
+ 
+ cleanup:
+-	while (i--) {
+-		struct idxd_group *group = &idxd->groups[i];
++	j = i - 1;
++	for (; i < idxd->max_groups; i++)
++		put_device(&idxd->groups[i]->conf_dev);
+ 
+-		device_unregister(&group->conf_dev);
+-	}
++	while (j--)
++		device_unregister(&idxd->groups[j]->conf_dev);
+ 	return rc;
+ }
+ 
+-static int idxd_setup_wq_sysfs(struct idxd_device *idxd)
++static int idxd_register_wq_devices(struct idxd_device *idxd)
+ {
+-	struct device *dev = &idxd->pdev->dev;
+-	int i, rc;
++	int i, rc, j;
+ 
+ 	for (i = 0; i < idxd->max_wqs; i++) {
+-		struct idxd_wq *wq = &idxd->wqs[i];
+-
+-		wq->conf_dev.parent = &idxd->conf_dev;
+-		dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
+-		wq->conf_dev.bus = idxd_get_bus_type(idxd);
+-		wq->conf_dev.groups = idxd_wq_attribute_groups;
+-		wq->conf_dev.type = &idxd_wq_device_type;
+-		dev_dbg(dev, "WQ device register: %s\n",
+-			dev_name(&wq->conf_dev));
+-		rc = device_register(&wq->conf_dev);
+-		if (rc < 0) {
+-			put_device(&wq->conf_dev);
++		struct idxd_wq *wq = idxd->wqs[i];
++
++		rc = device_add(&wq->conf_dev);
++		if (rc < 0)
+ 			goto cleanup;
+-		}
+ 	}
+ 
+ 	return 0;
+ 
+ cleanup:
+-	while (i--) {
+-		struct idxd_wq *wq = &idxd->wqs[i];
++	j = i - 1;
++	for (; i < idxd->max_wqs; i++)
++		put_device(&idxd->wqs[i]->conf_dev);
+ 
+-		device_unregister(&wq->conf_dev);
+-	}
++	while (j--)
++		device_unregister(&idxd->wqs[j]->conf_dev);
+ 	return rc;
+ }
+ 
+-static int idxd_setup_device_sysfs(struct idxd_device *idxd)
++int idxd_register_devices(struct idxd_device *idxd)
+ {
+ 	struct device *dev = &idxd->pdev->dev;
+-	int rc;
+-	char devname[IDXD_NAME_SIZE];
+-
+-	sprintf(devname, "%s%d", idxd_get_dev_name(idxd), idxd->id);
+-	idxd->conf_dev.parent = dev;
+-	dev_set_name(&idxd->conf_dev, "%s", devname);
+-	idxd->conf_dev.bus = idxd_get_bus_type(idxd);
+-	idxd->conf_dev.groups = idxd_attribute_groups;
+-	idxd->conf_dev.type = idxd_get_device_type(idxd);
+-
+-	dev_dbg(dev, "IDXD device register: %s\n", dev_name(&idxd->conf_dev));
+-	rc = device_register(&idxd->conf_dev);
+-	if (rc < 0) {
+-		put_device(&idxd->conf_dev);
+-		return rc;
+-	}
++	int rc, i;
+ 
+-	return 0;
+-}
+-
+-int idxd_setup_sysfs(struct idxd_device *idxd)
+-{
+-	struct device *dev = &idxd->pdev->dev;
+-	int rc;
+-
+-	rc = idxd_setup_device_sysfs(idxd);
+-	if (rc < 0) {
+-		dev_dbg(dev, "Device sysfs registering failed: %d\n", rc);
++	rc = device_add(&idxd->conf_dev);
++	if (rc < 0)
+ 		return rc;
+-	}
+ 
+-	rc = idxd_setup_wq_sysfs(idxd);
++	rc = idxd_register_wq_devices(idxd);
+ 	if (rc < 0) {
+-		/* unregister conf dev */
+-		dev_dbg(dev, "Work Queue sysfs registering failed: %d\n", rc);
+-		return rc;
++		dev_dbg(dev, "WQ devices registering failed: %d\n", rc);
++		goto err_wq;
+ 	}
+ 
+-	rc = idxd_setup_group_sysfs(idxd);
++	rc = idxd_register_engine_devices(idxd);
+ 	if (rc < 0) {
+-		/* unregister conf dev */
+-		dev_dbg(dev, "Group sysfs registering failed: %d\n", rc);
+-		return rc;
++		dev_dbg(dev, "Engine devices registering failed: %d\n", rc);
++		goto err_engine;
+ 	}
+ 
+-	rc = idxd_setup_engine_sysfs(idxd);
++	rc = idxd_register_group_devices(idxd);
+ 	if (rc < 0) {
+-		/* unregister conf dev */
+-		dev_dbg(dev, "Engine sysfs registering failed: %d\n", rc);
+-		return rc;
++		dev_dbg(dev, "Group device registering failed: %d\n", rc);
++		goto err_group;
+ 	}
+ 
+ 	return 0;
++
++ err_group:
++	for (i = 0; i < idxd->max_engines; i++)
++		device_unregister(&idxd->engines[i]->conf_dev);
++ err_engine:
++	for (i = 0; i < idxd->max_wqs; i++)
++		device_unregister(&idxd->wqs[i]->conf_dev);
++ err_wq:
++	device_del(&idxd->conf_dev);
++	return rc;
+ }
+ 
+-void idxd_cleanup_sysfs(struct idxd_device *idxd)
++void idxd_unregister_devices(struct idxd_device *idxd)
+ {
+ 	int i;
+ 
+ 	for (i = 0; i < idxd->max_wqs; i++) {
+-		struct idxd_wq *wq = &idxd->wqs[i];
++		struct idxd_wq *wq = idxd->wqs[i];
+ 
+ 		device_unregister(&wq->conf_dev);
+ 	}
+ 
+ 	for (i = 0; i < idxd->max_engines; i++) {
+-		struct idxd_engine *engine = &idxd->engines[i];
++		struct idxd_engine *engine = idxd->engines[i];
+ 
+ 		device_unregister(&engine->conf_dev);
+ 	}
+ 
+ 	for (i = 0; i < idxd->max_groups; i++) {
+-		struct idxd_group *group = &idxd->groups[i];
++		struct idxd_group *group = idxd->groups[i];
+ 
+ 		device_unregister(&group->conf_dev);
+ 	}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+index 7645223ea0ef2..97c11aa47ad0b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+@@ -77,6 +77,8 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ 		}
+ 
+ 		ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo);
++		/* flush the cache before commit the IB */
++		ib->flags = AMDGPU_IB_FLAG_EMIT_MEM_SYNC;
+ 
+ 		if (!vm)
+ 			ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+index 0cdbfcd475ec9..71a15f68514b0 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+@@ -644,6 +644,7 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct
+ 
+ 	/* File created at /sys/class/drm/card0/device/hdcp_srm*/
+ 	hdcp_work[0].attr = data_attr;
++	sysfs_bin_attr_init(&hdcp_work[0].attr);
+ 
+ 	if (sysfs_create_bin_file(&adev->dev->kobj, &hdcp_work[0].attr))
+ 		DRM_WARN("Failed to create device file hdcp_srm");
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index c0b827d162684..4781279024a96 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -2546,6 +2546,10 @@ static void commit_planes_for_stream(struct dc *dc,
+ 						plane_state->triplebuffer_flips = true;
+ 				}
+ 			}
++			if (update_type == UPDATE_TYPE_FULL) {
++				/* force vsync flip when reconfiguring pipes to prevent underflow */
++				plane_state->flip_immediate = false;
++			}
+ 		}
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+index bec7059f6d5d1..a1318c31bcfa5 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2012-17 Advanced Micro Devices, Inc.
++ * Copyright 2012-2021 Advanced Micro Devices, Inc.
+  *
+  * Permission is hereby granted, free of charge, to any person obtaining a
+  * copy of this software and associated documentation files (the "Software"),
+@@ -181,11 +181,14 @@ void hubp2_vready_at_or_After_vsync(struct hubp *hubp,
+ 	else
+ 		Set HUBP_VREADY_AT_OR_AFTER_VSYNC = 0
+ 	*/
+-	if ((pipe_dest->vstartup_start - (pipe_dest->vready_offset+pipe_dest->vupdate_width
+-		+ pipe_dest->vupdate_offset) / pipe_dest->htotal) <= pipe_dest->vblank_end) {
+-		value = 1;
+-	} else
+-		value = 0;
++	if (pipe_dest->htotal != 0) {
++		if ((pipe_dest->vstartup_start - (pipe_dest->vready_offset+pipe_dest->vupdate_width
++			+ pipe_dest->vupdate_offset) / pipe_dest->htotal) <= pipe_dest->vblank_end) {
++			value = 1;
++		} else
++			value = 0;
++	}
++
+ 	REG_UPDATE(DCHUBP_CNTL, HUBP_VREADY_AT_OR_AFTER_VSYNC, value);
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+index 904ce9b88088e..afbe8856468a0 100644
+--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
++++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+@@ -791,6 +791,8 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp)
+ 			   TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
+ 			hdcp->connection.is_hdcp2_revoked = 1;
+ 			status = MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED;
++		} else {
++			status = MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE;
+ 		}
+ 	}
+ 	mutex_unlock(&psp->hdcp_context.mutex);
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
+index 775d89b6c3fc4..5a51036325647 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -1174,44 +1174,6 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
+ 	return -EINVAL;
+ }
+ 
+-/* Optimize link config in order: max bpp, min lanes, min clock */
+-static int
+-intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
+-				  struct intel_crtc_state *pipe_config,
+-				  const struct link_config_limits *limits)
+-{
+-	const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+-	int bpp, clock, lane_count;
+-	int mode_rate, link_clock, link_avail;
+-
+-	for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
+-		int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp);
+-
+-		mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
+-						   output_bpp);
+-
+-		for (lane_count = limits->min_lane_count;
+-		     lane_count <= limits->max_lane_count;
+-		     lane_count <<= 1) {
+-			for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
+-				link_clock = intel_dp->common_rates[clock];
+-				link_avail = intel_dp_max_data_rate(link_clock,
+-								    lane_count);
+-
+-				if (mode_rate <= link_avail) {
+-					pipe_config->lane_count = lane_count;
+-					pipe_config->pipe_bpp = bpp;
+-					pipe_config->port_clock = link_clock;
+-
+-					return 0;
+-				}
+-			}
+-		}
+-	}
+-
+-	return -EINVAL;
+-}
+-
+ static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
+ {
+ 	int i, num_bpc;
+@@ -1461,22 +1423,11 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
+ 	    intel_dp_can_bigjoiner(intel_dp))
+ 		pipe_config->bigjoiner = true;
+ 
+-	if (intel_dp_is_edp(intel_dp))
+-		/*
+-		 * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4
+-		 * section A.1: "It is recommended that the minimum number of
+-		 * lanes be used, using the minimum link rate allowed for that
+-		 * lane configuration."
+-		 *
+-		 * Note that we fall back to the max clock and lane count for eDP
+-		 * panels that fail with the fast optimal settings (see
+-		 * intel_dp->use_max_params), in which case the fast vs. wide
+-		 * choice doesn't matter.
+-		 */
+-		ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config, &limits);
+-	else
+-		/* Optimize for slow and wide. */
+-		ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
++	/*
++	 * Optimize for slow and wide for everything, because there are some
++	 * eDP 1.3 and 1.4 panels don't work well with fast and narrow.
++	 */
++	ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
+ 
+ 	/* enable compression if the mode doesn't fit available BW */
+ 	drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en);
+diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
+index f455040fa989c..7cbc81da80b72 100644
+--- a/drivers/gpu/drm/i915/display/intel_overlay.c
++++ b/drivers/gpu/drm/i915/display/intel_overlay.c
+@@ -383,7 +383,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
+ 		i830_overlay_clock_gating(dev_priv, true);
+ }
+ 
+-static void
++__i915_active_call static void
+ intel_overlay_last_flip_retire(struct i915_active *active)
+ {
+ 	struct intel_overlay *overlay =
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+index ec28a6cde49bd..0b2434e29d002 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+@@ -189,7 +189,7 @@ compute_partial_view(const struct drm_i915_gem_object *obj,
+ 	struct i915_ggtt_view view;
+ 
+ 	if (i915_gem_object_is_tiled(obj))
+-		chunk = roundup(chunk, tile_row_pages(obj));
++		chunk = roundup(chunk, tile_row_pages(obj) ?: 1);
+ 
+ 	view.type = I915_GGTT_VIEW_PARTIAL;
+ 	view.partial.offset = rounddown(page_offset, chunk);
+diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+index 755522ced60de..3ae16945bd436 100644
+--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
++++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+@@ -630,7 +630,6 @@ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
+ 
+ 		err = pin_pt_dma(vm, pde->pt.base);
+ 		if (err) {
+-			i915_gem_object_put(pde->pt.base);
+ 			free_pd(vm, pde);
+ 			return err;
+ 		}
+diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+index 67de2b1895989..4b09490c20c0a 100644
+--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
++++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+@@ -670,8 +670,8 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
+ 		 * banks of memory are paired and unswizzled on the
+ 		 * uneven portion, so leave that as unknown.
+ 		 */
+-		if (intel_uncore_read(uncore, C0DRB3) ==
+-		    intel_uncore_read(uncore, C1DRB3)) {
++		if (intel_uncore_read16(uncore, C0DRB3) ==
++		    intel_uncore_read16(uncore, C1DRB3)) {
+ 			swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ 			swizzle_y = I915_BIT_6_SWIZZLE_9;
+ 		}
+diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
+index 3bc616cc1ad23..ea660e541c909 100644
+--- a/drivers/gpu/drm/i915/i915_active.c
++++ b/drivers/gpu/drm/i915/i915_active.c
+@@ -1156,7 +1156,8 @@ static int auto_active(struct i915_active *ref)
+ 	return 0;
+ }
+ 
+-static void auto_retire(struct i915_active *ref)
++__i915_active_call static void
++auto_retire(struct i915_active *ref)
+ {
+ 	i915_active_put(ref);
+ }
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+index d553f62f4eeb8..b4d8e1b01ee4f 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+@@ -1153,10 +1153,6 @@ static void a6xx_llc_slices_init(struct platform_device *pdev,
+ {
+ 	struct device_node *phandle;
+ 
+-	a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem", "gpu_cx");
+-	if (IS_ERR(a6xx_gpu->llc_mmio))
+-		return;
+-
+ 	/*
+ 	 * There is a different programming path for targets with an mmu500
+ 	 * attached, so detect if that is the case
+@@ -1166,6 +1162,11 @@ static void a6xx_llc_slices_init(struct platform_device *pdev,
+ 		of_device_is_compatible(phandle, "arm,mmu-500"));
+ 	of_node_put(phandle);
+ 
++	if (a6xx_gpu->have_mmu500)
++		a6xx_gpu->llc_mmio = NULL;
++	else
++		a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem", "gpu_cx");
++
+ 	a6xx_gpu->llc_slice = llcc_slice_getd(LLCC_GPU);
+ 	a6xx_gpu->htw_llc_slice = llcc_slice_getd(LLCC_GPUHTW);
+ 
+diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c
+index 82a8673ab8daf..d7e4a39a904e2 100644
+--- a/drivers/gpu/drm/msm/dp/dp_audio.c
++++ b/drivers/gpu/drm/msm/dp/dp_audio.c
+@@ -527,6 +527,7 @@ int dp_audio_hw_params(struct device *dev,
+ 	dp_audio_setup_acr(audio);
+ 	dp_audio_safe_to_exit_level(audio);
+ 	dp_audio_enable(audio, true);
++	dp_display_signal_audio_start(dp_display);
+ 	dp_display->audio_enabled = true;
+ 
+ end:
+diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
+index 5a39da6e1eaf2..1784e119269b7 100644
+--- a/drivers/gpu/drm/msm/dp/dp_display.c
++++ b/drivers/gpu/drm/msm/dp/dp_display.c
+@@ -178,6 +178,15 @@ static int dp_del_event(struct dp_display_private *dp_priv, u32 event)
+ 	return 0;
+ }
+ 
++void dp_display_signal_audio_start(struct msm_dp *dp_display)
++{
++	struct dp_display_private *dp;
++
++	dp = container_of(dp_display, struct dp_display_private, dp_display);
++
++	reinit_completion(&dp->audio_comp);
++}
++
+ void dp_display_signal_audio_complete(struct msm_dp *dp_display)
+ {
+ 	struct dp_display_private *dp;
+@@ -586,10 +595,8 @@ static int dp_connect_pending_timeout(struct dp_display_private *dp, u32 data)
+ 	mutex_lock(&dp->event_mutex);
+ 
+ 	state = dp->hpd_state;
+-	if (state == ST_CONNECT_PENDING) {
+-		dp_display_enable(dp, 0);
++	if (state == ST_CONNECT_PENDING)
+ 		dp->hpd_state = ST_CONNECTED;
+-	}
+ 
+ 	mutex_unlock(&dp->event_mutex);
+ 
+@@ -651,7 +658,6 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
+ 	dp_add_event(dp, EV_DISCONNECT_PENDING_TIMEOUT, 0, DP_TIMEOUT_5_SECOND);
+ 
+ 	/* signal the disconnect event early to ensure proper teardown */
+-	reinit_completion(&dp->audio_comp);
+ 	dp_display_handle_plugged_change(g_dp_display, false);
+ 
+ 	dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK |
+@@ -669,10 +675,8 @@ static int dp_disconnect_pending_timeout(struct dp_display_private *dp, u32 data
+ 	mutex_lock(&dp->event_mutex);
+ 
+ 	state =  dp->hpd_state;
+-	if (state == ST_DISCONNECT_PENDING) {
+-		dp_display_disable(dp, 0);
++	if (state == ST_DISCONNECT_PENDING)
+ 		dp->hpd_state = ST_DISCONNECTED;
+-	}
+ 
+ 	mutex_unlock(&dp->event_mutex);
+ 
+@@ -898,7 +902,6 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data)
+ 	/* wait only if audio was enabled */
+ 	if (dp_display->audio_enabled) {
+ 		/* signal the disconnect event */
+-		reinit_completion(&dp->audio_comp);
+ 		dp_display_handle_plugged_change(dp_display, false);
+ 		if (!wait_for_completion_timeout(&dp->audio_comp,
+ 				HZ * 5))
+@@ -1272,7 +1275,12 @@ static int dp_pm_resume(struct device *dev)
+ 
+ 	status = dp_catalog_link_is_connected(dp->catalog);
+ 
+-	if (status)
++	/*
++	 * can not declared display is connected unless
++	 * HDMI cable is plugged in and sink_count of
++	 * dongle become 1
++	 */
++	if (status && dp->link->sink_count)
+ 		dp->dp_display.is_connected = true;
+ 	else
+ 		dp->dp_display.is_connected = false;
+diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
+index 6092ba1ed85ed..5173c89eedf7e 100644
+--- a/drivers/gpu/drm/msm/dp/dp_display.h
++++ b/drivers/gpu/drm/msm/dp/dp_display.h
+@@ -34,6 +34,7 @@ int dp_display_get_modes(struct msm_dp *dp_display,
+ int dp_display_request_irq(struct msm_dp *dp_display);
+ bool dp_display_check_video_test(struct msm_dp *dp_display);
+ int dp_display_get_test_bpp(struct msm_dp *dp_display);
++void dp_display_signal_audio_start(struct msm_dp *dp_display);
+ void dp_display_signal_audio_complete(struct msm_dp *dp_display);
+ 
+ #endif /* _DP_DISPLAY_H_ */
+diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
+index 3effc8c71494c..ea44423376c4d 100644
+--- a/drivers/gpu/drm/radeon/radeon.h
++++ b/drivers/gpu/drm/radeon/radeon.h
+@@ -1558,6 +1558,7 @@ struct radeon_dpm {
+ 	void                    *priv;
+ 	u32			new_active_crtcs;
+ 	int			new_active_crtc_count;
++	int			high_pixelclock_count;
+ 	u32			current_active_crtcs;
+ 	int			current_active_crtc_count;
+ 	bool single_display;
+diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
+index 42301b4e56f50..28c4413f4dc8d 100644
+--- a/drivers/gpu/drm/radeon/radeon_atombios.c
++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
+@@ -2120,11 +2120,14 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
+ 		return state_index;
+ 	/* last mode is usually default, array is low to high */
+ 	for (i = 0; i < num_modes; i++) {
+-		rdev->pm.power_state[state_index].clock_info =
+-			kcalloc(1, sizeof(struct radeon_pm_clock_info),
+-				GFP_KERNEL);
++		/* avoid memory leaks from invalid modes or unknown frev. */
++		if (!rdev->pm.power_state[state_index].clock_info) {
++			rdev->pm.power_state[state_index].clock_info =
++				kzalloc(sizeof(struct radeon_pm_clock_info),
++					GFP_KERNEL);
++		}
+ 		if (!rdev->pm.power_state[state_index].clock_info)
+-			return state_index;
++			goto out;
+ 		rdev->pm.power_state[state_index].num_clock_modes = 1;
+ 		rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+ 		switch (frev) {
+@@ -2243,17 +2246,24 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
+ 			break;
+ 		}
+ 	}
++out:
++	/* free any unused clock_info allocation. */
++	if (state_index && state_index < num_modes) {
++		kfree(rdev->pm.power_state[state_index].clock_info);
++		rdev->pm.power_state[state_index].clock_info = NULL;
++	}
++
+ 	/* last mode is usually default */
+-	if (rdev->pm.default_power_state_index == -1) {
++	if (state_index && rdev->pm.default_power_state_index == -1) {
+ 		rdev->pm.power_state[state_index - 1].type =
+ 			POWER_STATE_TYPE_DEFAULT;
+ 		rdev->pm.default_power_state_index = state_index - 1;
+ 		rdev->pm.power_state[state_index - 1].default_clock_mode =
+ 			&rdev->pm.power_state[state_index - 1].clock_info[0];
+-		rdev->pm.power_state[state_index].flags &=
++		rdev->pm.power_state[state_index - 1].flags &=
+ 			~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+-		rdev->pm.power_state[state_index].misc = 0;
+-		rdev->pm.power_state[state_index].misc2 = 0;
++		rdev->pm.power_state[state_index - 1].misc = 0;
++		rdev->pm.power_state[state_index - 1].misc2 = 0;
+ 	}
+ 	return state_index;
+ }
+diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
+index 1995dad59dd09..2db4a8b1542d3 100644
+--- a/drivers/gpu/drm/radeon/radeon_pm.c
++++ b/drivers/gpu/drm/radeon/radeon_pm.c
+@@ -1775,6 +1775,7 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
+ 	struct drm_device *ddev = rdev->ddev;
+ 	struct drm_crtc *crtc;
+ 	struct radeon_crtc *radeon_crtc;
++	struct radeon_connector *radeon_connector;
+ 
+ 	if (!rdev->pm.dpm_enabled)
+ 		return;
+@@ -1784,6 +1785,7 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
+ 	/* update active crtc counts */
+ 	rdev->pm.dpm.new_active_crtcs = 0;
+ 	rdev->pm.dpm.new_active_crtc_count = 0;
++	rdev->pm.dpm.high_pixelclock_count = 0;
+ 	if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
+ 		list_for_each_entry(crtc,
+ 				    &ddev->mode_config.crtc_list, head) {
+@@ -1791,6 +1793,12 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
+ 			if (crtc->enabled) {
+ 				rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
+ 				rdev->pm.dpm.new_active_crtc_count++;
++				if (!radeon_crtc->connector)
++					continue;
++
++				radeon_connector = to_radeon_connector(radeon_crtc->connector);
++				if (radeon_connector->pixelclock_for_modeset > 297000)
++					rdev->pm.dpm.high_pixelclock_count++;
+ 			}
+ 		}
+ 	}
+diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
+index 91bfc4762767b..43b63705d0737 100644
+--- a/drivers/gpu/drm/radeon/si_dpm.c
++++ b/drivers/gpu/drm/radeon/si_dpm.c
+@@ -2979,6 +2979,9 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
+ 		    (rdev->pdev->device == 0x6605)) {
+ 			max_sclk = 75000;
+ 		}
++
++		if (rdev->pm.dpm.high_pixelclock_count > 1)
++			disable_sclk_switching = true;
+ 	}
+ 
+ 	if (rps->vce_active) {
+diff --git a/drivers/hwmon/ltc2992.c b/drivers/hwmon/ltc2992.c
+index 4382105bf1420..2a4bed0ab226b 100644
+--- a/drivers/hwmon/ltc2992.c
++++ b/drivers/hwmon/ltc2992.c
+@@ -900,11 +900,15 @@ static int ltc2992_parse_dt(struct ltc2992_state *st)
+ 
+ 	fwnode_for_each_available_child_node(fwnode, child) {
+ 		ret = fwnode_property_read_u32(child, "reg", &addr);
+-		if (ret < 0)
++		if (ret < 0) {
++			fwnode_handle_put(child);
+ 			return ret;
++		}
+ 
+-		if (addr > 1)
++		if (addr > 1) {
++			fwnode_handle_put(child);
+ 			return -EINVAL;
++		}
+ 
+ 		ret = fwnode_property_read_u32(child, "shunt-resistor-micro-ohms", &val);
+ 		if (!ret)
+diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
+index 7a5e539b567bf..580e63d7daa00 100644
+--- a/drivers/hwmon/occ/common.c
++++ b/drivers/hwmon/occ/common.c
+@@ -217,9 +217,9 @@ int occ_update_response(struct occ *occ)
+ 		return rc;
+ 
+ 	/* limit the maximum rate of polling the OCC */
+-	if (time_after(jiffies, occ->last_update + OCC_UPDATE_FREQUENCY)) {
++	if (time_after(jiffies, occ->next_update)) {
+ 		rc = occ_poll(occ);
+-		occ->last_update = jiffies;
++		occ->next_update = jiffies + OCC_UPDATE_FREQUENCY;
+ 	} else {
+ 		rc = occ->last_error;
+ 	}
+@@ -1164,6 +1164,7 @@ int occ_setup(struct occ *occ, const char *name)
+ 		return rc;
+ 	}
+ 
++	occ->next_update = jiffies + OCC_UPDATE_FREQUENCY;
+ 	occ_parse_poll_response(occ);
+ 
+ 	rc = occ_setup_sensor_attrs(occ);
+diff --git a/drivers/hwmon/occ/common.h b/drivers/hwmon/occ/common.h
+index 67e6968b8978e..e6df719770e81 100644
+--- a/drivers/hwmon/occ/common.h
++++ b/drivers/hwmon/occ/common.h
+@@ -99,7 +99,7 @@ struct occ {
+ 	u8 poll_cmd_data;		/* to perform OCC poll command */
+ 	int (*send_cmd)(struct occ *occ, u8 *cmd);
+ 
+-	unsigned long last_update;
++	unsigned long next_update;
+ 	struct mutex lock;		/* lock OCC access */
+ 
+ 	struct device *hwmon;
+diff --git a/drivers/hwtracing/coresight/coresight-platform.c b/drivers/hwtracing/coresight/coresight-platform.c
+index 3629b7885aca9..c594f45319fc5 100644
+--- a/drivers/hwtracing/coresight/coresight-platform.c
++++ b/drivers/hwtracing/coresight/coresight-platform.c
+@@ -90,6 +90,12 @@ static void of_coresight_get_ports_legacy(const struct device_node *node,
+ 	struct of_endpoint endpoint;
+ 	int in = 0, out = 0;
+ 
++	/*
++	 * Avoid warnings in of_graph_get_next_endpoint()
++	 * if the device doesn't have any graph connections
++	 */
++	if (!of_graph_is_present(node))
++		return;
+ 	do {
+ 		ep = of_graph_get_next_endpoint(node, ep);
+ 		if (!ep)
+diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
+index 4acee6f9e5a30..99d446763530e 100644
+--- a/drivers/i2c/busses/i2c-i801.c
++++ b/drivers/i2c/busses/i2c-i801.c
+@@ -73,6 +73,7 @@
+  * Comet Lake-V (PCH)		0xa3a3	32	hard	yes	yes	yes
+  * Alder Lake-S (PCH)		0x7aa3	32	hard	yes	yes	yes
+  * Alder Lake-P (PCH)		0x51a3	32	hard	yes	yes	yes
++ * Alder Lake-M (PCH)		0x54a3	32	hard	yes	yes	yes
+  *
+  * Features supported by this driver:
+  * Software PEC				no
+@@ -230,6 +231,7 @@
+ #define PCI_DEVICE_ID_INTEL_ELKHART_LAKE_SMBUS		0x4b23
+ #define PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS		0x4da3
+ #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS		0x51a3
++#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_M_SMBUS		0x54a3
+ #define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS		0x5ad4
+ #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS		0x7aa3
+ #define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS		0x8c22
+@@ -1087,6 +1089,7 @@ static const struct pci_device_id i801_ids[] = {
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS) },
++	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ALDER_LAKE_M_SMBUS) },
+ 	{ 0, }
+ };
+ 
+@@ -1771,6 +1774,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ 	case PCI_DEVICE_ID_INTEL_EBG_SMBUS:
+ 	case PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS:
+ 	case PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS:
++	case PCI_DEVICE_ID_INTEL_ALDER_LAKE_M_SMBUS:
+ 		priv->features |= FEATURE_BLOCK_PROC;
+ 		priv->features |= FEATURE_I2C_BLOCK_READ;
+ 		priv->features |= FEATURE_IRQ;
+diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
+index dc9c4b4cc25a1..dc5ca71906dbc 100644
+--- a/drivers/i2c/busses/i2c-imx.c
++++ b/drivers/i2c/busses/i2c-imx.c
+@@ -801,7 +801,7 @@ static int i2c_imx_reg_slave(struct i2c_client *client)
+ 	i2c_imx->last_slave_event = I2C_SLAVE_STOP;
+ 
+ 	/* Resume */
+-	ret = pm_runtime_get_sync(i2c_imx->adapter.dev.parent);
++	ret = pm_runtime_resume_and_get(i2c_imx->adapter.dev.parent);
+ 	if (ret < 0) {
+ 		dev_err(&i2c_imx->adapter.dev, "failed to resume i2c controller");
+ 		return ret;
+diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
+index 86f70c7513192..bf25acba2ed53 100644
+--- a/drivers/i2c/busses/i2c-mt65xx.c
++++ b/drivers/i2c/busses/i2c-mt65xx.c
+@@ -564,7 +564,7 @@ static const struct i2c_spec_values *mtk_i2c_get_spec(unsigned int speed)
+ 
+ static int mtk_i2c_max_step_cnt(unsigned int target_speed)
+ {
+-	if (target_speed > I2C_MAX_FAST_MODE_FREQ)
++	if (target_speed > I2C_MAX_FAST_MODE_PLUS_FREQ)
+ 		return MAX_HS_STEP_CNT_DIV;
+ 	else
+ 		return MAX_STEP_CNT_DIV;
+@@ -635,7 +635,7 @@ static int mtk_i2c_check_ac_timing(struct mtk_i2c *i2c,
+ 	if (sda_min > sda_max)
+ 		return -3;
+ 
+-	if (check_speed > I2C_MAX_FAST_MODE_FREQ) {
++	if (check_speed > I2C_MAX_FAST_MODE_PLUS_FREQ) {
+ 		if (i2c->dev_comp->ltiming_adjust) {
+ 			i2c->ac_timing.hs = I2C_TIME_DEFAULT_VALUE |
+ 				(sample_cnt << 12) | (high_cnt << 8);
+@@ -850,7 +850,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
+ 
+ 	control_reg = mtk_i2c_readw(i2c, OFFSET_CONTROL) &
+ 			~(I2C_CONTROL_DIR_CHANGE | I2C_CONTROL_RS);
+-	if ((i2c->speed_hz > I2C_MAX_FAST_MODE_FREQ) || (left_num >= 1))
++	if ((i2c->speed_hz > I2C_MAX_FAST_MODE_PLUS_FREQ) || (left_num >= 1))
+ 		control_reg |= I2C_CONTROL_RS;
+ 
+ 	if (i2c->op == I2C_MASTER_WRRD)
+@@ -1067,7 +1067,8 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap,
+ 		}
+ 	}
+ 
+-	if (i2c->auto_restart && num >= 2 && i2c->speed_hz > I2C_MAX_FAST_MODE_FREQ)
++	if (i2c->auto_restart && num >= 2 &&
++		i2c->speed_hz > I2C_MAX_FAST_MODE_PLUS_FREQ)
+ 		/* ignore the first restart irq after the master code,
+ 		 * otherwise the first transfer will be discarded.
+ 		 */
+diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
+index 6ceb11cc4be18..6ef38a8ee95cb 100644
+--- a/drivers/i2c/i2c-dev.c
++++ b/drivers/i2c/i2c-dev.c
+@@ -440,8 +440,13 @@ static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 				   sizeof(rdwr_arg)))
+ 			return -EFAULT;
+ 
+-		/* Put an arbitrary limit on the number of messages that can
+-		 * be sent at once */
++		if (!rdwr_arg.msgs || rdwr_arg.nmsgs == 0)
++			return -EINVAL;
++
++		/*
++		 * Put an arbitrary limit on the number of messages that can
++		 * be sent at once
++		 */
+ 		if (rdwr_arg.nmsgs > I2C_RDWR_IOCTL_MAX_MSGS)
+ 			return -EINVAL;
+ 
+diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
+index 2e0c62c391550..8acf277b8b258 100644
+--- a/drivers/iio/accel/Kconfig
++++ b/drivers/iio/accel/Kconfig
+@@ -211,7 +211,6 @@ config DMARD10
+ config HID_SENSOR_ACCEL_3D
+ 	depends on HID_SENSOR_HUB
+ 	select IIO_BUFFER
+-	select IIO_TRIGGERED_BUFFER
+ 	select HID_SENSOR_IIO_COMMON
+ 	select HID_SENSOR_IIO_TRIGGER
+ 	tristate "HID Accelerometers 3D"
+diff --git a/drivers/iio/common/hid-sensors/Kconfig b/drivers/iio/common/hid-sensors/Kconfig
+index 24d4925673363..2a3dd3b907bee 100644
+--- a/drivers/iio/common/hid-sensors/Kconfig
++++ b/drivers/iio/common/hid-sensors/Kconfig
+@@ -19,6 +19,7 @@ config HID_SENSOR_IIO_TRIGGER
+ 	tristate "Common module (trigger) for all HID Sensor IIO drivers"
+ 	depends on HID_SENSOR_HUB && HID_SENSOR_IIO_COMMON && IIO_BUFFER
+ 	select IIO_TRIGGER
++	select IIO_TRIGGERED_BUFFER
+ 	help
+ 	  Say yes here to build trigger support for HID sensors.
+ 	  Triggers will be send if all requested attributes were read.
+diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig
+index 5824f2edf9758..20b5ac7ab66af 100644
+--- a/drivers/iio/gyro/Kconfig
++++ b/drivers/iio/gyro/Kconfig
+@@ -111,7 +111,6 @@ config FXAS21002C_SPI
+ config HID_SENSOR_GYRO_3D
+ 	depends on HID_SENSOR_HUB
+ 	select IIO_BUFFER
+-	select IIO_TRIGGERED_BUFFER
+ 	select HID_SENSOR_IIO_COMMON
+ 	select HID_SENSOR_IIO_TRIGGER
+ 	tristate "HID Gyroscope 3D"
+diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
+index ac90be03332af..f17a935195352 100644
+--- a/drivers/iio/gyro/mpu3050-core.c
++++ b/drivers/iio/gyro/mpu3050-core.c
+@@ -272,7 +272,16 @@ static int mpu3050_read_raw(struct iio_dev *indio_dev,
+ 	case IIO_CHAN_INFO_OFFSET:
+ 		switch (chan->type) {
+ 		case IIO_TEMP:
+-			/* The temperature scaling is (x+23000)/280 Celsius */
++			/*
++			 * The temperature scaling is (x+23000)/280 Celsius
++			 * for the "best fit straight line" temperature range
++			 * of -30C..85C.  The 23000 includes room temperature
++			 * offset of +35C, 280 is the precision scale and x is
++			 * the 16-bit signed integer reported by hardware.
++			 *
++			 * Temperature value itself represents temperature of
++			 * the sensor die.
++			 */
+ 			*val = 23000;
+ 			return IIO_VAL_INT;
+ 		default:
+@@ -329,7 +338,7 @@ static int mpu3050_read_raw(struct iio_dev *indio_dev,
+ 				goto out_read_raw_unlock;
+ 			}
+ 
+-			*val = be16_to_cpu(raw_val);
++			*val = (s16)be16_to_cpu(raw_val);
+ 			ret = IIO_VAL_INT;
+ 
+ 			goto out_read_raw_unlock;
+diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig
+index 6549fcf6db698..2de5494e7c225 100644
+--- a/drivers/iio/humidity/Kconfig
++++ b/drivers/iio/humidity/Kconfig
+@@ -52,7 +52,6 @@ config HID_SENSOR_HUMIDITY
+ 	tristate "HID Environmental humidity sensor"
+ 	depends on HID_SENSOR_HUB
+ 	select IIO_BUFFER
+-	select IIO_TRIGGERED_BUFFER
+ 	select HID_SENSOR_IIO_COMMON
+ 	select HID_SENSOR_IIO_TRIGGER
+ 	help
+diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
+index 7db761afa578c..36f3a900878db 100644
+--- a/drivers/iio/industrialio-core.c
++++ b/drivers/iio/industrialio-core.c
+@@ -1734,7 +1734,6 @@ static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ 	if (!indio_dev->info)
+ 		goto out_unlock;
+ 
+-	ret = -EINVAL;
+ 	list_for_each_entry(h, &iio_dev_opaque->ioctl_handlers, entry) {
+ 		ret = h->ioctl(indio_dev, filp, cmd, arg);
+ 		if (ret != IIO_IOCTL_UNHANDLED)
+@@ -1742,7 +1741,7 @@ static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ 	}
+ 
+ 	if (ret == IIO_IOCTL_UNHANDLED)
+-		ret = -EINVAL;
++		ret = -ENODEV;
+ 
+ out_unlock:
+ 	mutex_unlock(&indio_dev->info_exist_lock);
+@@ -1864,9 +1863,6 @@ EXPORT_SYMBOL(__iio_device_register);
+  **/
+ void iio_device_unregister(struct iio_dev *indio_dev)
+ {
+-	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
+-	struct iio_ioctl_handler *h, *t;
+-
+ 	cdev_device_del(&indio_dev->chrdev, &indio_dev->dev);
+ 
+ 	mutex_lock(&indio_dev->info_exist_lock);
+@@ -1877,9 +1873,6 @@ void iio_device_unregister(struct iio_dev *indio_dev)
+ 
+ 	indio_dev->info = NULL;
+ 
+-	list_for_each_entry_safe(h, t, &iio_dev_opaque->ioctl_handlers, entry)
+-		list_del(&h->entry);
+-
+ 	iio_device_wakeup_eventset(indio_dev);
+ 	iio_buffer_wakeup_poll(indio_dev);
+ 
+diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
+index 33ad4dd0b5c7b..917f9becf9c75 100644
+--- a/drivers/iio/light/Kconfig
++++ b/drivers/iio/light/Kconfig
+@@ -256,7 +256,6 @@ config ISL29125
+ config HID_SENSOR_ALS
+ 	depends on HID_SENSOR_HUB
+ 	select IIO_BUFFER
+-	select IIO_TRIGGERED_BUFFER
+ 	select HID_SENSOR_IIO_COMMON
+ 	select HID_SENSOR_IIO_TRIGGER
+ 	tristate "HID ALS"
+@@ -270,7 +269,6 @@ config HID_SENSOR_ALS
+ config HID_SENSOR_PROX
+ 	depends on HID_SENSOR_HUB
+ 	select IIO_BUFFER
+-	select IIO_TRIGGERED_BUFFER
+ 	select HID_SENSOR_IIO_COMMON
+ 	select HID_SENSOR_IIO_TRIGGER
+ 	tristate "HID PROX"
+diff --git a/drivers/iio/light/gp2ap002.c b/drivers/iio/light/gp2ap002.c
+index 7ba7aa59437c3..040d8429a6e00 100644
+--- a/drivers/iio/light/gp2ap002.c
++++ b/drivers/iio/light/gp2ap002.c
+@@ -583,7 +583,7 @@ static int gp2ap002_probe(struct i2c_client *client,
+ 					"gp2ap002", indio_dev);
+ 	if (ret) {
+ 		dev_err(dev, "unable to request IRQ\n");
+-		goto out_disable_vio;
++		goto out_put_pm;
+ 	}
+ 	gp2ap002->irq = client->irq;
+ 
+@@ -613,8 +613,9 @@ static int gp2ap002_probe(struct i2c_client *client,
+ 
+ 	return 0;
+ 
+-out_disable_pm:
++out_put_pm:
+ 	pm_runtime_put_noidle(dev);
++out_disable_pm:
+ 	pm_runtime_disable(dev);
+ out_disable_vio:
+ 	regulator_disable(gp2ap002->vio);
+diff --git a/drivers/iio/light/tsl2583.c b/drivers/iio/light/tsl2583.c
+index 0f787bfc88fc4..c9d8f07a6fcdd 100644
+--- a/drivers/iio/light/tsl2583.c
++++ b/drivers/iio/light/tsl2583.c
+@@ -341,6 +341,14 @@ static int tsl2583_als_calibrate(struct iio_dev *indio_dev)
+ 		return lux_val;
+ 	}
+ 
++	/* Avoid division by zero of lux_value later on */
++	if (lux_val == 0) {
++		dev_err(&chip->client->dev,
++			"%s: lux_val of 0 will produce out of range trim_value\n",
++			__func__);
++		return -ENODATA;
++	}
++
+ 	gain_trim_val = (unsigned int)(((chip->als_settings.als_cal_target)
+ 			* chip->als_settings.als_gain_trim) / lux_val);
+ 	if ((gain_trim_val < 250) || (gain_trim_val > 4000)) {
+diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
+index 5d4ffd66032e9..74ad5701c6c29 100644
+--- a/drivers/iio/magnetometer/Kconfig
++++ b/drivers/iio/magnetometer/Kconfig
+@@ -95,7 +95,6 @@ config MAG3110
+ config HID_SENSOR_MAGNETOMETER_3D
+ 	depends on HID_SENSOR_HUB
+ 	select IIO_BUFFER
+-	select IIO_TRIGGERED_BUFFER
+ 	select HID_SENSOR_IIO_COMMON
+ 	select HID_SENSOR_IIO_TRIGGER
+ 	tristate "HID Magenetometer 3D"
+diff --git a/drivers/iio/orientation/Kconfig b/drivers/iio/orientation/Kconfig
+index a505583cc2fda..396cbbb867f4c 100644
+--- a/drivers/iio/orientation/Kconfig
++++ b/drivers/iio/orientation/Kconfig
+@@ -9,7 +9,6 @@ menu "Inclinometer sensors"
+ config HID_SENSOR_INCLINOMETER_3D
+ 	depends on HID_SENSOR_HUB
+ 	select IIO_BUFFER
+-	select IIO_TRIGGERED_BUFFER
+ 	select HID_SENSOR_IIO_COMMON
+ 	select HID_SENSOR_IIO_TRIGGER
+ 	tristate "HID Inclinometer 3D"
+@@ -20,7 +19,6 @@ config HID_SENSOR_INCLINOMETER_3D
+ config HID_SENSOR_DEVICE_ROTATION
+ 	depends on HID_SENSOR_HUB
+ 	select IIO_BUFFER
+-	select IIO_TRIGGERED_BUFFER
+ 	select HID_SENSOR_IIO_COMMON
+ 	select HID_SENSOR_IIO_TRIGGER
+ 	tristate "HID Device Rotation"
+diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
+index 689b978db4f95..fc0d3cfca4186 100644
+--- a/drivers/iio/pressure/Kconfig
++++ b/drivers/iio/pressure/Kconfig
+@@ -79,7 +79,6 @@ config DPS310
+ config HID_SENSOR_PRESS
+ 	depends on HID_SENSOR_HUB
+ 	select IIO_BUFFER
+-	select IIO_TRIGGERED_BUFFER
+ 	select HID_SENSOR_IIO_COMMON
+ 	select HID_SENSOR_IIO_TRIGGER
+ 	tristate "HID PRESS"
+diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
+index c685f10b5ae48..cc206bfa09c78 100644
+--- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
++++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
+@@ -160,6 +160,7 @@ static int lidar_get_measurement(struct lidar_data *data, u16 *reg)
+ 	ret = lidar_write_control(data, LIDAR_REG_CONTROL_ACQUIRE);
+ 	if (ret < 0) {
+ 		dev_err(&client->dev, "cannot send start measurement command");
++		pm_runtime_put_noidle(&client->dev);
+ 		return ret;
+ 	}
+ 
+diff --git a/drivers/iio/temperature/Kconfig b/drivers/iio/temperature/Kconfig
+index f1f2a1499c9e2..4df60082c1fa8 100644
+--- a/drivers/iio/temperature/Kconfig
++++ b/drivers/iio/temperature/Kconfig
+@@ -45,7 +45,6 @@ config HID_SENSOR_TEMP
+ 	tristate "HID Environmental temperature sensor"
+ 	depends on HID_SENSOR_HUB
+ 	select IIO_BUFFER
+-	select IIO_TRIGGERED_BUFFER
+ 	select HID_SENSOR_IIO_COMMON
+ 	select HID_SENSOR_IIO_TRIGGER
+ 	help
+diff --git a/drivers/infiniband/hw/hfi1/ipoib.h b/drivers/infiniband/hw/hfi1/ipoib.h
+index f650cac9d424c..d30c23b6527aa 100644
+--- a/drivers/infiniband/hw/hfi1/ipoib.h
++++ b/drivers/infiniband/hw/hfi1/ipoib.h
+@@ -52,8 +52,9 @@ union hfi1_ipoib_flow {
+  * @producer_lock: producer sync lock
+  * @consumer_lock: consumer sync lock
+  */
++struct ipoib_txreq;
+ struct hfi1_ipoib_circ_buf {
+-	void **items;
++	struct ipoib_txreq **items;
+ 	unsigned long head;
+ 	unsigned long tail;
+ 	unsigned long max_items;
+diff --git a/drivers/infiniband/hw/hfi1/ipoib_tx.c b/drivers/infiniband/hw/hfi1/ipoib_tx.c
+index edd4eeac8dd1d..cdc26ee3cf52d 100644
+--- a/drivers/infiniband/hw/hfi1/ipoib_tx.c
++++ b/drivers/infiniband/hw/hfi1/ipoib_tx.c
+@@ -702,14 +702,14 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
+ 
+ 	priv->tx_napis = kcalloc_node(dev->num_tx_queues,
+ 				      sizeof(struct napi_struct),
+-				      GFP_ATOMIC,
++				      GFP_KERNEL,
+ 				      priv->dd->node);
+ 	if (!priv->tx_napis)
+ 		goto free_txreq_cache;
+ 
+ 	priv->txqs = kcalloc_node(dev->num_tx_queues,
+ 				  sizeof(struct hfi1_ipoib_txq),
+-				  GFP_ATOMIC,
++				  GFP_KERNEL,
+ 				  priv->dd->node);
+ 	if (!priv->txqs)
+ 		goto free_tx_napis;
+@@ -741,9 +741,9 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
+ 					     priv->dd->node);
+ 
+ 		txq->tx_ring.items =
+-			vzalloc_node(array_size(tx_ring_size,
+-						sizeof(struct ipoib_txreq)),
+-				     priv->dd->node);
++			kcalloc_node(tx_ring_size,
++				     sizeof(struct ipoib_txreq *),
++				     GFP_KERNEL, priv->dd->node);
+ 		if (!txq->tx_ring.items)
+ 			goto free_txqs;
+ 
+@@ -764,7 +764,7 @@ free_txqs:
+ 		struct hfi1_ipoib_txq *txq = &priv->txqs[i];
+ 
+ 		netif_napi_del(txq->napi);
+-		vfree(txq->tx_ring.items);
++		kfree(txq->tx_ring.items);
+ 	}
+ 
+ 	kfree(priv->txqs);
+@@ -817,7 +817,7 @@ void hfi1_ipoib_txreq_deinit(struct hfi1_ipoib_dev_priv *priv)
+ 		hfi1_ipoib_drain_tx_list(txq);
+ 		netif_napi_del(txq->napi);
+ 		(void)hfi1_ipoib_drain_tx_ring(txq, txq->tx_ring.max_items);
+-		vfree(txq->tx_ring.items);
++		kfree(txq->tx_ring.items);
+ 	}
+ 
+ 	kfree(priv->txqs);
+diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
+index f7e31018cd0b9..df7b19ff0a9ed 100644
+--- a/drivers/iommu/amd/init.c
++++ b/drivers/iommu/amd/init.c
+@@ -12,7 +12,6 @@
+ #include <linux/acpi.h>
+ #include <linux/list.h>
+ #include <linux/bitmap.h>
+-#include <linux/delay.h>
+ #include <linux/slab.h>
+ #include <linux/syscore_ops.h>
+ #include <linux/interrupt.h>
+@@ -257,8 +256,6 @@ static enum iommu_init_state init_state = IOMMU_START_STATE;
+ static int amd_iommu_enable_interrupts(void);
+ static int __init iommu_go_to_state(enum iommu_init_state state);
+ static void init_device_table_dma(void);
+-static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
+-				u8 fxn, u64 *value, bool is_write);
+ 
+ static bool amd_iommu_pre_enabled = true;
+ 
+@@ -1717,53 +1714,16 @@ static int __init init_iommu_all(struct acpi_table_header *table)
+ 	return 0;
+ }
+ 
+-static void __init init_iommu_perf_ctr(struct amd_iommu *iommu)
++static void init_iommu_perf_ctr(struct amd_iommu *iommu)
+ {
+-	int retry;
++	u64 val;
+ 	struct pci_dev *pdev = iommu->dev;
+-	u64 val = 0xabcd, val2 = 0, save_reg, save_src;
+ 
+ 	if (!iommu_feature(iommu, FEATURE_PC))
+ 		return;
+ 
+ 	amd_iommu_pc_present = true;
+ 
+-	/* save the value to restore, if writable */
+-	if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false) ||
+-	    iommu_pc_get_set_reg(iommu, 0, 0, 8, &save_src, false))
+-		goto pc_false;
+-
+-	/*
+-	 * Disable power gating by programing the performance counter
+-	 * source to 20 (i.e. counts the reads and writes from/to IOMMU
+-	 * Reserved Register [MMIO Offset 1FF8h] that are ignored.),
+-	 * which never get incremented during this init phase.
+-	 * (Note: The event is also deprecated.)
+-	 */
+-	val = 20;
+-	if (iommu_pc_get_set_reg(iommu, 0, 0, 8, &val, true))
+-		goto pc_false;
+-
+-	/* Check if the performance counters can be written to */
+-	val = 0xabcd;
+-	for (retry = 5; retry; retry--) {
+-		if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true) ||
+-		    iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false) ||
+-		    val2)
+-			break;
+-
+-		/* Wait about 20 msec for power gating to disable and retry. */
+-		msleep(20);
+-	}
+-
+-	/* restore */
+-	if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true) ||
+-	    iommu_pc_get_set_reg(iommu, 0, 0, 8, &save_src, true))
+-		goto pc_false;
+-
+-	if (val != val2)
+-		goto pc_false;
+-
+ 	pci_info(pdev, "IOMMU performance counters supported\n");
+ 
+ 	val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
+@@ -1771,11 +1731,6 @@ static void __init init_iommu_perf_ctr(struct amd_iommu *iommu)
+ 	iommu->max_counters = (u8) ((val >> 7) & 0xf);
+ 
+ 	return;
+-
+-pc_false:
+-	pci_err(pdev, "Unable to read/write to IOMMU perf counter.\n");
+-	amd_iommu_pc_present = false;
+-	return;
+ }
+ 
+ static ssize_t amd_iommu_show_cap(struct device *dev,
+diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+index 8594b4a830437..941ba5484731e 100644
+--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
++++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+@@ -2305,6 +2305,9 @@ static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
+ {
+ 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ 
++	if (!gather->pgsize)
++		return;
++
+ 	arm_smmu_tlb_inv_range_domain(gather->start,
+ 				      gather->end - gather->start + 1,
+ 				      gather->pgsize, true, smmu_domain);
+diff --git a/drivers/leds/blink/Kconfig b/drivers/leds/blink/Kconfig
+index 265b53476a80f..6dedc58c47b3e 100644
+--- a/drivers/leds/blink/Kconfig
++++ b/drivers/leds/blink/Kconfig
+@@ -9,6 +9,7 @@ if LEDS_BLINK
+ 
+ config LEDS_BLINK_LGM
+ 	tristate "LED support for Intel LGM SoC series"
++	depends on GPIOLIB
+ 	depends on LEDS_CLASS
+ 	depends on MFD_SYSCON
+ 	depends on OF
+diff --git a/drivers/net/can/dev/skb.c b/drivers/net/can/dev/skb.c
+index 6a64fe410987e..c3508109263ec 100644
+--- a/drivers/net/can/dev/skb.c
++++ b/drivers/net/can/dev/skb.c
+@@ -151,7 +151,11 @@ void can_free_echo_skb(struct net_device *dev, unsigned int idx)
+ {
+ 	struct can_priv *priv = netdev_priv(dev);
+ 
+-	BUG_ON(idx >= priv->echo_skb_max);
++	if (idx >= priv->echo_skb_max) {
++		netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
++			   __func__, idx, priv->echo_skb_max);
++		return;
++	}
+ 
+ 	if (priv->echo_skb[idx]) {
+ 		dev_kfree_skb_any(priv->echo_skb[idx]);
+diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
+index 0c8d36bc668c8..f71127229caf7 100644
+--- a/drivers/net/can/m_can/m_can.c
++++ b/drivers/net/can/m_can/m_can.c
+@@ -1455,6 +1455,8 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
+ 	int i;
+ 	int putidx;
+ 
++	cdev->tx_skb = NULL;
++
+ 	/* Generate ID field for TX buffer Element */
+ 	/* Common to all supported M_CAN versions */
+ 	if (cf->can_id & CAN_EFF_FLAG) {
+@@ -1571,7 +1573,6 @@ static void m_can_tx_work_queue(struct work_struct *ws)
+ 						   tx_work);
+ 
+ 	m_can_tx_handler(cdev);
+-	cdev->tx_skb = NULL;
+ }
+ 
+ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
+diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
+index a57da43680d8f..bd7d0251be104 100644
+--- a/drivers/net/can/spi/mcp251x.c
++++ b/drivers/net/can/spi/mcp251x.c
+@@ -956,8 +956,6 @@ static int mcp251x_stop(struct net_device *net)
+ 
+ 	priv->force_quit = 1;
+ 	free_irq(spi->irq, priv);
+-	destroy_workqueue(priv->wq);
+-	priv->wq = NULL;
+ 
+ 	mutex_lock(&priv->mcp_lock);
+ 
+@@ -1224,24 +1222,15 @@ static int mcp251x_open(struct net_device *net)
+ 		goto out_close;
+ 	}
+ 
+-	priv->wq = alloc_workqueue("mcp251x_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM,
+-				   0);
+-	if (!priv->wq) {
+-		ret = -ENOMEM;
+-		goto out_clean;
+-	}
+-	INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
+-	INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
+-
+ 	ret = mcp251x_hw_wake(spi);
+ 	if (ret)
+-		goto out_free_wq;
++		goto out_free_irq;
+ 	ret = mcp251x_setup(net, spi);
+ 	if (ret)
+-		goto out_free_wq;
++		goto out_free_irq;
+ 	ret = mcp251x_set_normal_mode(spi);
+ 	if (ret)
+-		goto out_free_wq;
++		goto out_free_irq;
+ 
+ 	can_led_event(net, CAN_LED_EVENT_OPEN);
+ 
+@@ -1250,9 +1239,7 @@ static int mcp251x_open(struct net_device *net)
+ 
+ 	return 0;
+ 
+-out_free_wq:
+-	destroy_workqueue(priv->wq);
+-out_clean:
++out_free_irq:
+ 	free_irq(spi->irq, priv);
+ 	mcp251x_hw_sleep(spi);
+ out_close:
+@@ -1373,6 +1360,15 @@ static int mcp251x_can_probe(struct spi_device *spi)
+ 	if (ret)
+ 		goto out_clk;
+ 
++	priv->wq = alloc_workqueue("mcp251x_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM,
++				   0);
++	if (!priv->wq) {
++		ret = -ENOMEM;
++		goto out_clk;
++	}
++	INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
++	INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
++
+ 	priv->spi = spi;
+ 	mutex_init(&priv->mcp_lock);
+ 
+@@ -1417,6 +1413,8 @@ static int mcp251x_can_probe(struct spi_device *spi)
+ 	return 0;
+ 
+ error_probe:
++	destroy_workqueue(priv->wq);
++	priv->wq = NULL;
+ 	mcp251x_power_enable(priv->power, 0);
+ 
+ out_clk:
+@@ -1438,6 +1436,9 @@ static int mcp251x_can_remove(struct spi_device *spi)
+ 
+ 	mcp251x_power_enable(priv->power, 0);
+ 
++	destroy_workqueue(priv->wq);
++	priv->wq = NULL;
++
+ 	clk_disable_unprepare(priv->clk);
+ 
+ 	free_candev(net);
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+index 799e9d5d34813..4a742aa5c417b 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+@@ -2856,8 +2856,8 @@ static int mcp251xfd_probe(struct spi_device *spi)
+ 
+ 	clk = devm_clk_get(&spi->dev, NULL);
+ 	if (IS_ERR(clk))
+-		dev_err_probe(&spi->dev, PTR_ERR(clk),
+-			      "Failed to get Oscillator (clock)!\n");
++		return dev_err_probe(&spi->dev, PTR_ERR(clk),
++				     "Failed to get Oscillator (clock)!\n");
+ 	freq = clk_get_rate(clk);
+ 
+ 	/* Sanity check */
+@@ -2957,10 +2957,12 @@ static int mcp251xfd_probe(struct spi_device *spi)
+ 
+ 	err = mcp251xfd_register(priv);
+ 	if (err)
+-		goto out_free_candev;
++		goto out_can_rx_offload_del;
+ 
+ 	return 0;
+ 
++ out_can_rx_offload_del:
++	can_rx_offload_del(&priv->offload);
+  out_free_candev:
+ 	spi->max_speed_hz = priv->spi_max_speed_hz_orig;
+ 
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 73239d3eaca15..cf4249d59383c 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -122,7 +122,10 @@ enum board_idx {
+ 	NETXTREME_E_VF,
+ 	NETXTREME_C_VF,
+ 	NETXTREME_S_VF,
++	NETXTREME_C_VF_HV,
++	NETXTREME_E_VF_HV,
+ 	NETXTREME_E_P5_VF,
++	NETXTREME_E_P5_VF_HV,
+ };
+ 
+ /* indexed by enum above */
+@@ -170,7 +173,10 @@ static const struct {
+ 	[NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
+ 	[NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
+ 	[NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
++	[NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
++	[NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
+ 	[NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
++	[NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
+ };
+ 
+ static const struct pci_device_id bnxt_pci_tbl[] = {
+@@ -222,15 +228,25 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
+ 	{ PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
+ #ifdef CONFIG_BNXT_SRIOV
+ 	{ PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
++	{ PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
++	{ PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
+ 	{ PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
++	{ PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
+ 	{ PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
++	{ PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
++	{ PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
++	{ PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
++	{ PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
+ 	{ PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
+ 	{ PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
+ 	{ PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
+ 	{ PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
+ 	{ PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
++	{ PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
+ 	{ PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
+ 	{ PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
++	{ PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
++	{ PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
+ 	{ PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
+ #endif
+ 	{ 0 }
+@@ -265,7 +281,8 @@ static struct workqueue_struct *bnxt_pf_wq;
+ static bool bnxt_vf_pciid(enum board_idx idx)
+ {
+ 	return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
+-		idx == NETXTREME_S_VF || idx == NETXTREME_E_P5_VF);
++		idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
++		idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF);
+ }
+ 
+ #define DB_CP_REARM_FLAGS	(DB_KEY_CP | DB_IDX_VALID)
+diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
+index f04ec53544ae5..b1443ff439de9 100644
+--- a/drivers/net/ethernet/cisco/enic/enic_main.c
++++ b/drivers/net/ethernet/cisco/enic/enic_main.c
+@@ -768,7 +768,7 @@ static inline int enic_queue_wq_skb_encap(struct enic *enic, struct vnic_wq *wq,
+ 	return err;
+ }
+ 
+-static inline void enic_queue_wq_skb(struct enic *enic,
++static inline int enic_queue_wq_skb(struct enic *enic,
+ 	struct vnic_wq *wq, struct sk_buff *skb)
+ {
+ 	unsigned int mss = skb_shinfo(skb)->gso_size;
+@@ -814,6 +814,7 @@ static inline void enic_queue_wq_skb(struct enic *enic,
+ 		wq->to_use = buf->next;
+ 		dev_kfree_skb(skb);
+ 	}
++	return err;
+ }
+ 
+ /* netif_tx_lock held, process context with BHs disabled, or BH */
+@@ -857,7 +858,8 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
+ 		return NETDEV_TX_BUSY;
+ 	}
+ 
+-	enic_queue_wq_skb(enic, wq, skb);
++	if (enic_queue_wq_skb(enic, wq, skb))
++		goto error;
+ 
+ 	if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
+ 		netif_tx_stop_queue(txq);
+@@ -865,6 +867,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
+ 	if (!netdev_xmit_more() || netif_xmit_stopped(txq))
+ 		vnic_wq_doorbell(wq);
+ 
++error:
+ 	spin_unlock(&enic->wq_lock[txq_map]);
+ 
+ 	return NETDEV_TX_OK;
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index 3db882322b2bd..70aea9c274fed 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -2048,6 +2048,8 @@ static int fec_enet_mii_probe(struct net_device *ndev)
+ 	fep->link = 0;
+ 	fep->full_duplex = 0;
+ 
++	phy_dev->mac_managed_pm = 1;
++
+ 	phy_attached_info(phy_dev);
+ 
+ 	return 0;
+@@ -3864,6 +3866,7 @@ static int __maybe_unused fec_resume(struct device *dev)
+ 		netif_device_attach(ndev);
+ 		netif_tx_unlock_bh(ndev);
+ 		napi_enable(&fep->napi);
++		phy_init_hw(ndev->phydev);
+ 		phy_start(ndev->phydev);
+ 	}
+ 	rtnl_unlock();
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+index 65752f363f431..0f70158c2551d 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -576,8 +576,8 @@ static int hns3_nic_net_stop(struct net_device *netdev)
+ 	if (h->ae_algo->ops->set_timer_task)
+ 		h->ae_algo->ops->set_timer_task(priv->ae_handle, false);
+ 
+-	netif_tx_stop_all_queues(netdev);
+ 	netif_carrier_off(netdev);
++	netif_tx_disable(netdev);
+ 
+ 	hns3_nic_net_down(netdev);
+ 
+@@ -823,7 +823,7 @@ static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
+  * and it is udp packet, which has a dest port as the IANA assigned.
+  * the hardware is expected to do the checksum offload, but the
+  * hardware will not do the checksum offload when udp dest port is
+- * 4789 or 6081.
++ * 4789, 4790 or 6081.
+  */
+ static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
+ {
+@@ -841,7 +841,8 @@ static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
+ 
+ 	if (!(!skb->encapsulation &&
+ 	      (l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) ||
+-	      l4.udp->dest == htons(GENEVE_UDP_PORT))))
++	      l4.udp->dest == htons(GENEVE_UDP_PORT) ||
++	      l4.udp->dest == htons(4790))))
+ 		return false;
+ 
+ 	skb_checksum_help(skb);
+@@ -1277,23 +1278,21 @@ static unsigned int hns3_skb_bd_num(struct sk_buff *skb, unsigned int *bd_size,
+ }
+ 
+ static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size,
+-				   u8 max_non_tso_bd_num)
++				   u8 max_non_tso_bd_num, unsigned int bd_num,
++				   unsigned int recursion_level)
+ {
++#define HNS3_MAX_RECURSION_LEVEL	24
++
+ 	struct sk_buff *frag_skb;
+-	unsigned int bd_num = 0;
+ 
+ 	/* If the total len is within the max bd limit */
+-	if (likely(skb->len <= HNS3_MAX_BD_SIZE && !skb_has_frag_list(skb) &&
++	if (likely(skb->len <= HNS3_MAX_BD_SIZE && !recursion_level &&
++		   !skb_has_frag_list(skb) &&
+ 		   skb_shinfo(skb)->nr_frags < max_non_tso_bd_num))
+ 		return skb_shinfo(skb)->nr_frags + 1U;
+ 
+-	/* The below case will always be linearized, return
+-	 * HNS3_MAX_BD_NUM_TSO + 1U to make sure it is linearized.
+-	 */
+-	if (unlikely(skb->len > HNS3_MAX_TSO_SIZE ||
+-		     (!skb_is_gso(skb) && skb->len >
+-		      HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num))))
+-		return HNS3_MAX_TSO_BD_NUM + 1U;
++	if (unlikely(recursion_level >= HNS3_MAX_RECURSION_LEVEL))
++		return UINT_MAX;
+ 
+ 	bd_num = hns3_skb_bd_num(skb, bd_size, bd_num);
+ 
+@@ -1301,7 +1300,8 @@ static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size,
+ 		return bd_num;
+ 
+ 	skb_walk_frags(skb, frag_skb) {
+-		bd_num = hns3_skb_bd_num(frag_skb, bd_size, bd_num);
++		bd_num = hns3_tx_bd_num(frag_skb, bd_size, max_non_tso_bd_num,
++					bd_num, recursion_level + 1);
+ 		if (bd_num > HNS3_MAX_TSO_BD_NUM)
+ 			return bd_num;
+ 	}
+@@ -1361,6 +1361,43 @@ void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size)
+ 		size[i] = skb_frag_size(&shinfo->frags[i]);
+ }
+ 
++static int hns3_skb_linearize(struct hns3_enet_ring *ring,
++			      struct sk_buff *skb,
++			      u8 max_non_tso_bd_num,
++			      unsigned int bd_num)
++{
++	/* 'bd_num == UINT_MAX' means the skb' fraglist has a
++	 * recursion level of over HNS3_MAX_RECURSION_LEVEL.
++	 */
++	if (bd_num == UINT_MAX) {
++		u64_stats_update_begin(&ring->syncp);
++		ring->stats.over_max_recursion++;
++		u64_stats_update_end(&ring->syncp);
++		return -ENOMEM;
++	}
++
++	/* The skb->len has exceeded the hw limitation, linearization
++	 * will not help.
++	 */
++	if (skb->len > HNS3_MAX_TSO_SIZE ||
++	    (!skb_is_gso(skb) && skb->len >
++	     HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num))) {
++		u64_stats_update_begin(&ring->syncp);
++		ring->stats.hw_limitation++;
++		u64_stats_update_end(&ring->syncp);
++		return -ENOMEM;
++	}
++
++	if (__skb_linearize(skb)) {
++		u64_stats_update_begin(&ring->syncp);
++		ring->stats.sw_err_cnt++;
++		u64_stats_update_end(&ring->syncp);
++		return -ENOMEM;
++	}
++
++	return 0;
++}
++
+ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
+ 				  struct net_device *netdev,
+ 				  struct sk_buff *skb)
+@@ -1370,7 +1407,7 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
+ 	unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U];
+ 	unsigned int bd_num;
+ 
+-	bd_num = hns3_tx_bd_num(skb, bd_size, max_non_tso_bd_num);
++	bd_num = hns3_tx_bd_num(skb, bd_size, max_non_tso_bd_num, 0, 0);
+ 	if (unlikely(bd_num > max_non_tso_bd_num)) {
+ 		if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) &&
+ 		    !hns3_skb_need_linearized(skb, bd_size, bd_num,
+@@ -1379,16 +1416,11 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
+ 			goto out;
+ 		}
+ 
+-		if (__skb_linearize(skb))
++		if (hns3_skb_linearize(ring, skb, max_non_tso_bd_num,
++				       bd_num))
+ 			return -ENOMEM;
+ 
+ 		bd_num = hns3_tx_bd_count(skb->len);
+-		if ((skb_is_gso(skb) && bd_num > HNS3_MAX_TSO_BD_NUM) ||
+-		    (!skb_is_gso(skb) &&
+-		     bd_num > max_non_tso_bd_num)) {
+-			trace_hns3_over_max_bd(skb);
+-			return -ENOMEM;
+-		}
+ 
+ 		u64_stats_update_begin(&ring->syncp);
+ 		ring->stats.tx_copy++;
+@@ -1412,6 +1444,10 @@ out:
+ 		return bd_num;
+ 	}
+ 
++	u64_stats_update_begin(&ring->syncp);
++	ring->stats.tx_busy++;
++	u64_stats_update_end(&ring->syncp);
++
+ 	return -EBUSY;
+ }
+ 
+@@ -1459,6 +1495,7 @@ static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring,
+ 				 struct sk_buff *skb, enum hns_desc_type type)
+ {
+ 	unsigned int size = skb_headlen(skb);
++	struct sk_buff *frag_skb;
+ 	int i, ret, bd_num = 0;
+ 
+ 	if (size) {
+@@ -1483,6 +1520,15 @@ static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring,
+ 		bd_num += ret;
+ 	}
+ 
++	skb_walk_frags(skb, frag_skb) {
++		ret = hns3_fill_skb_to_desc(ring, frag_skb,
++					    DESC_TYPE_FRAGLIST_SKB);
++		if (unlikely(ret < 0))
++			return ret;
++
++		bd_num += ret;
++	}
++
+ 	return bd_num;
+ }
+ 
+@@ -1513,8 +1559,6 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
+ 	struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping];
+ 	struct netdev_queue *dev_queue;
+ 	int pre_ntu, next_to_use_head;
+-	struct sk_buff *frag_skb;
+-	int bd_num = 0;
+ 	bool doorbell;
+ 	int ret;
+ 
+@@ -1530,15 +1574,8 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
+ 	ret = hns3_nic_maybe_stop_tx(ring, netdev, skb);
+ 	if (unlikely(ret <= 0)) {
+ 		if (ret == -EBUSY) {
+-			u64_stats_update_begin(&ring->syncp);
+-			ring->stats.tx_busy++;
+-			u64_stats_update_end(&ring->syncp);
+ 			hns3_tx_doorbell(ring, 0, true);
+ 			return NETDEV_TX_BUSY;
+-		} else if (ret == -ENOMEM) {
+-			u64_stats_update_begin(&ring->syncp);
+-			ring->stats.sw_err_cnt++;
+-			u64_stats_update_end(&ring->syncp);
+ 		}
+ 
+ 		hns3_rl_err(netdev, "xmit error: %d!\n", ret);
+@@ -1551,21 +1588,14 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
+ 	if (unlikely(ret < 0))
+ 		goto fill_err;
+ 
++	/* 'ret < 0' means filling error, 'ret == 0' means skb->len is
++	 * zero, which is unlikely, and 'ret > 0' means how many tx desc
++	 * need to be notified to the hw.
++	 */
+ 	ret = hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB);
+-	if (unlikely(ret < 0))
++	if (unlikely(ret <= 0))
+ 		goto fill_err;
+ 
+-	bd_num += ret;
+-
+-	skb_walk_frags(skb, frag_skb) {
+-		ret = hns3_fill_skb_to_desc(ring, frag_skb,
+-					    DESC_TYPE_FRAGLIST_SKB);
+-		if (unlikely(ret < 0))
+-			goto fill_err;
+-
+-		bd_num += ret;
+-	}
+-
+ 	pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) :
+ 					(ring->desc_num - 1);
+ 	ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |=
+@@ -1576,7 +1606,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
+ 	dev_queue = netdev_get_tx_queue(netdev, ring->queue_index);
+ 	doorbell = __netdev_tx_sent_queue(dev_queue, skb->len,
+ 					  netdev_xmit_more());
+-	hns3_tx_doorbell(ring, bd_num, doorbell);
++	hns3_tx_doorbell(ring, ret, doorbell);
+ 
+ 	return NETDEV_TX_OK;
+ 
+@@ -1748,11 +1778,15 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
+ 			tx_drop += ring->stats.tx_l4_proto_err;
+ 			tx_drop += ring->stats.tx_l2l3l4_err;
+ 			tx_drop += ring->stats.tx_tso_err;
++			tx_drop += ring->stats.over_max_recursion;
++			tx_drop += ring->stats.hw_limitation;
+ 			tx_errors += ring->stats.sw_err_cnt;
+ 			tx_errors += ring->stats.tx_vlan_err;
+ 			tx_errors += ring->stats.tx_l4_proto_err;
+ 			tx_errors += ring->stats.tx_l2l3l4_err;
+ 			tx_errors += ring->stats.tx_tso_err;
++			tx_errors += ring->stats.over_max_recursion;
++			tx_errors += ring->stats.hw_limitation;
+ 		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ 
+ 		/* fetch the rx stats */
+@@ -4555,6 +4589,11 @@ static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
+ 	struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev);
+ 	int ret = 0;
+ 
++	if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
++		netdev_err(kinfo->netdev, "device is not initialized yet\n");
++		return -EFAULT;
++	}
++
+ 	clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
+ 
+ 	if (netif_running(kinfo->netdev)) {
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+index d069b04ee5873..e44224e233150 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+@@ -376,6 +376,8 @@ struct ring_stats {
+ 			u64 tx_l4_proto_err;
+ 			u64 tx_l2l3l4_err;
+ 			u64 tx_tso_err;
++			u64 over_max_recursion;
++			u64 hw_limitation;
+ 		};
+ 		struct {
+ 			u64 rx_pkts;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+index adcec4ea7cb91..d20f2e2460178 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+@@ -44,6 +44,8 @@ static const struct hns3_stats hns3_txq_stats[] = {
+ 	HNS3_TQP_STAT("l4_proto_err", tx_l4_proto_err),
+ 	HNS3_TQP_STAT("l2l3l4_err", tx_l2l3l4_err),
+ 	HNS3_TQP_STAT("tso_err", tx_tso_err),
++	HNS3_TQP_STAT("over_max_recursion", over_max_recursion),
++	HNS3_TQP_STAT("hw_limitation", hw_limitation),
+ };
+ 
+ #define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats)
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+index 0ca7f1b984bfb..78d3eb142df83 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+@@ -753,8 +753,9 @@ static int hclge_config_igu_egu_hw_err_int(struct hclge_dev *hdev, bool en)
+ 
+ 	/* configure IGU,EGU error interrupts */
+ 	hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_COMMON_INT_EN, false);
++	desc.data[0] = cpu_to_le32(HCLGE_IGU_ERR_INT_TYPE);
+ 	if (en)
+-		desc.data[0] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN);
++		desc.data[0] |= cpu_to_le32(HCLGE_IGU_ERR_INT_EN);
+ 
+ 	desc.data[1] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN_MASK);
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
+index 608fe26fc3fed..d647f3c841345 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
+@@ -32,7 +32,8 @@
+ #define HCLGE_TQP_ECC_ERR_INT_EN_MASK	0x0FFF
+ #define HCLGE_MSIX_SRAM_ECC_ERR_INT_EN_MASK	0x0F000000
+ #define HCLGE_MSIX_SRAM_ECC_ERR_INT_EN	0x0F000000
+-#define HCLGE_IGU_ERR_INT_EN	0x0000066F
++#define HCLGE_IGU_ERR_INT_EN	0x0000000F
++#define HCLGE_IGU_ERR_INT_TYPE	0x00000660
+ #define HCLGE_IGU_ERR_INT_EN_MASK	0x000F
+ #define HCLGE_IGU_TNL_ERR_INT_EN    0x0002AABF
+ #define HCLGE_IGU_TNL_ERR_INT_EN_MASK  0x003F
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index b0dbe6dcaa7b5..7a560d0e19b9c 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -11379,7 +11379,6 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
+ #define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
+ #define REG_SEPARATOR_LINE	1
+ #define REG_NUM_REMAIN_MASK	3
+-#define BD_LIST_MAX_NUM		30
+ 
+ int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
+ {
+@@ -11473,15 +11472,19 @@ static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
+ {
+ 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
+ 	int data_len_per_desc, bd_num, i;
+-	int bd_num_list[BD_LIST_MAX_NUM];
++	int *bd_num_list;
+ 	u32 data_len;
+ 	int ret;
+ 
++	bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
++	if (!bd_num_list)
++		return -ENOMEM;
++
+ 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
+ 	if (ret) {
+ 		dev_err(&hdev->pdev->dev,
+ 			"Get dfx reg bd num fail, status is %d.\n", ret);
+-		return ret;
++		goto out;
+ 	}
+ 
+ 	data_len_per_desc = sizeof_field(struct hclge_desc, data);
+@@ -11492,6 +11495,8 @@ static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
+ 		*len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
+ 	}
+ 
++out:
++	kfree(bd_num_list);
+ 	return ret;
+ }
+ 
+@@ -11499,16 +11504,20 @@ static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
+ {
+ 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
+ 	int bd_num, bd_num_max, buf_len, i;
+-	int bd_num_list[BD_LIST_MAX_NUM];
+ 	struct hclge_desc *desc_src;
++	int *bd_num_list;
+ 	u32 *reg = data;
+ 	int ret;
+ 
++	bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
++	if (!bd_num_list)
++		return -ENOMEM;
++
+ 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
+ 	if (ret) {
+ 		dev_err(&hdev->pdev->dev,
+ 			"Get dfx reg bd num fail, status is %d.\n", ret);
+-		return ret;
++		goto out;
+ 	}
+ 
+ 	bd_num_max = bd_num_list[0];
+@@ -11517,8 +11526,10 @@ static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
+ 
+ 	buf_len = sizeof(*desc_src) * bd_num_max;
+ 	desc_src = kzalloc(buf_len, GFP_KERNEL);
+-	if (!desc_src)
+-		return -ENOMEM;
++	if (!desc_src) {
++		ret = -ENOMEM;
++		goto out;
++	}
+ 
+ 	for (i = 0; i < dfx_reg_type_num; i++) {
+ 		bd_num = bd_num_list[i];
+@@ -11534,6 +11545,8 @@ static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
+ 	}
+ 
+ 	kfree(desc_src);
++out:
++	kfree(bd_num_list);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+index 51a36e74f0881..c3bb16b1f0600 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+@@ -535,7 +535,7 @@ static void hclge_get_link_mode(struct hclge_vport *vport,
+ 	unsigned long advertising;
+ 	unsigned long supported;
+ 	unsigned long send_data;
+-	u8 msg_data[10];
++	u8 msg_data[10] = {};
+ 	u8 dest_vfid;
+ 
+ 	advertising = hdev->hw.mac.advertising[0];
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+index e898207025406..c194bba187d6c 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+@@ -255,6 +255,8 @@ void hclge_mac_start_phy(struct hclge_dev *hdev)
+ 	if (!phydev)
+ 		return;
+ 
++	phy_loopback(phydev, false);
++
+ 	phy_start(phydev);
+ }
+ 
+diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
+index 15f93b3550990..5069f690cf0b8 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e.h
++++ b/drivers/net/ethernet/intel/i40e/i40e.h
+@@ -1142,7 +1142,6 @@ static inline bool i40e_is_sw_dcb(struct i40e_pf *pf)
+ 	return !!(pf->flags & I40E_FLAG_DISABLE_FW_LLDP);
+ }
+ 
+-void i40e_set_lldp_forwarding(struct i40e_pf *pf, bool enable);
+ #ifdef CONFIG_I40E_DCB
+ void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
+ 			   struct i40e_dcbx_config *old_cfg,
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+index ce626eace692a..140b677f114db 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+@@ -1566,8 +1566,10 @@ enum i40e_aq_phy_type {
+ 	I40E_PHY_TYPE_25GBASE_LR		= 0x22,
+ 	I40E_PHY_TYPE_25GBASE_AOC		= 0x23,
+ 	I40E_PHY_TYPE_25GBASE_ACC		= 0x24,
+-	I40E_PHY_TYPE_2_5GBASE_T		= 0x30,
+-	I40E_PHY_TYPE_5GBASE_T			= 0x31,
++	I40E_PHY_TYPE_2_5GBASE_T		= 0x26,
++	I40E_PHY_TYPE_5GBASE_T			= 0x27,
++	I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS	= 0x30,
++	I40E_PHY_TYPE_5GBASE_T_LINK_STATUS	= 0x31,
+ 	I40E_PHY_TYPE_MAX,
+ 	I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP	= 0xFD,
+ 	I40E_PHY_TYPE_EMPTY			= 0xFE,
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
+index a2dba32383f63..32f3facbed1a5 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
+@@ -375,6 +375,7 @@ void i40e_client_subtask(struct i40e_pf *pf)
+ 				clear_bit(__I40E_CLIENT_INSTANCE_OPENED,
+ 					  &cdev->state);
+ 				i40e_client_del_instance(pf);
++				return;
+ 			}
+ 		}
+ 	}
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
+index ec19e18305ecf..ce35e064cf607 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
+@@ -1154,8 +1154,8 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
+ 		break;
+ 	case I40E_PHY_TYPE_100BASE_TX:
+ 	case I40E_PHY_TYPE_1000BASE_T:
+-	case I40E_PHY_TYPE_2_5GBASE_T:
+-	case I40E_PHY_TYPE_5GBASE_T:
++	case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS:
++	case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS:
+ 	case I40E_PHY_TYPE_10GBASE_T:
+ 		media = I40E_MEDIA_TYPE_BASET;
+ 		break;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+index 0e92668012e36..93dd58fda272f 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+@@ -841,8 +841,8 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
+ 							     10000baseT_Full);
+ 		break;
+ 	case I40E_PHY_TYPE_10GBASE_T:
+-	case I40E_PHY_TYPE_5GBASE_T:
+-	case I40E_PHY_TYPE_2_5GBASE_T:
++	case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS:
++	case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS:
+ 	case I40E_PHY_TYPE_1000BASE_T:
+ 	case I40E_PHY_TYPE_100BASE_TX:
+ 		ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+@@ -1409,7 +1409,8 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
+ 
+ 		memset(&config, 0, sizeof(config));
+ 		config.phy_type = abilities.phy_type;
+-		config.abilities = abilities.abilities;
++		config.abilities = abilities.abilities |
++				   I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+ 		config.phy_type_ext = abilities.phy_type_ext;
+ 		config.link_speed = abilities.link_speed;
+ 		config.eee_capability = abilities.eee_capability;
+@@ -5287,7 +5288,6 @@ flags_complete:
+ 			i40e_aq_cfg_lldp_mib_change_event(&pf->hw, false, NULL);
+ 			i40e_aq_stop_lldp(&pf->hw, true, false, NULL);
+ 		} else {
+-			i40e_set_lldp_forwarding(pf, false);
+ 			status = i40e_aq_start_lldp(&pf->hw, false, NULL);
+ 			if (status) {
+ 				adq_err = pf->hw.aq.asq_last_status;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 527023ee4c076..ac4b44fc19f17 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -6878,40 +6878,6 @@ out:
+ }
+ #endif /* CONFIG_I40E_DCB */
+ 
+-/**
+- * i40e_set_lldp_forwarding - set forwarding of lldp frames
+- * @pf: PF being configured
+- * @enable: if forwarding to OS shall be enabled
+- *
+- * Toggle forwarding of lldp frames behavior,
+- * When passing DCB control from firmware to software
+- * lldp frames must be forwarded to the software based
+- * lldp agent.
+- */
+-void i40e_set_lldp_forwarding(struct i40e_pf *pf, bool enable)
+-{
+-	if (pf->lan_vsi == I40E_NO_VSI)
+-		return;
+-
+-	if (!pf->vsi[pf->lan_vsi])
+-		return;
+-
+-	/* No need to check the outcome, commands may fail
+-	 * if desired value is already set
+-	 */
+-	i40e_aq_add_rem_control_packet_filter(&pf->hw, NULL, ETH_P_LLDP,
+-					      I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX |
+-					      I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC,
+-					      pf->vsi[pf->lan_vsi]->seid, 0,
+-					      enable, NULL, NULL);
+-
+-	i40e_aq_add_rem_control_packet_filter(&pf->hw, NULL, ETH_P_LLDP,
+-					      I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX |
+-					      I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC,
+-					      pf->vsi[pf->lan_vsi]->seid, 0,
+-					      enable, NULL, NULL);
+-}
+-
+ /**
+  * i40e_print_link_message - print link up or down
+  * @vsi: the VSI for which link needs a message
+@@ -10735,10 +10701,6 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
+ 	 */
+ 	i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
+ 						       pf->main_vsi_seid);
+-#ifdef CONFIG_I40E_DCB
+-	if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP)
+-		i40e_set_lldp_forwarding(pf, true);
+-#endif /* CONFIG_I40E_DCB */
+ 
+ 	/* restart the VSIs that were rebuilt and running before the reset */
+ 	i40e_pf_unquiesce_all_vsi(pf);
+@@ -15753,10 +15715,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	 */
+ 	i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
+ 						       pf->main_vsi_seid);
+-#ifdef CONFIG_I40E_DCB
+-	if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP)
+-		i40e_set_lldp_forwarding(pf, true);
+-#endif /* CONFIG_I40E_DCB */
+ 
+ 	if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
+ 		(pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+index 06b4271219b14..70b515049540f 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+@@ -1961,10 +1961,6 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
+ 				 union i40e_rx_desc *rx_desc)
+ 
+ {
+-	/* XDP packets use error pointer so abort at this point */
+-	if (IS_ERR(skb))
+-		return true;
+-
+ 	/* ERR_MASK will only have valid bits if EOP set, and
+ 	 * what we are doing here is actually checking
+ 	 * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
+@@ -2534,7 +2530,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
+ 		}
+ 
+ 		/* exit if we failed to retrieve a buffer */
+-		if (!skb) {
++		if (!xdp_res && !skb) {
+ 			rx_ring->rx_stats.alloc_buff_failed++;
+ 			rx_buffer->pagecnt_bias++;
+ 			break;
+@@ -2547,7 +2543,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
+ 		if (i40e_is_non_eop(rx_ring, rx_desc))
+ 			continue;
+ 
+-		if (i40e_cleanup_headers(rx_ring, skb, rx_desc)) {
++		if (xdp_res || i40e_cleanup_headers(rx_ring, skb, rx_desc)) {
+ 			skb = NULL;
+ 			continue;
+ 		}
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
+index 5c10faaca790e..c81109a63e90c 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
+@@ -239,11 +239,8 @@ struct i40e_phy_info {
+ #define I40E_CAP_PHY_TYPE_25GBASE_ACC BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC + \
+ 					     I40E_PHY_TYPE_OFFSET)
+ /* Offset for 2.5G/5G PHY Types value to bit number conversion */
+-#define I40E_PHY_TYPE_OFFSET2 (-10)
+-#define I40E_CAP_PHY_TYPE_2_5GBASE_T BIT_ULL(I40E_PHY_TYPE_2_5GBASE_T + \
+-					     I40E_PHY_TYPE_OFFSET2)
+-#define I40E_CAP_PHY_TYPE_5GBASE_T BIT_ULL(I40E_PHY_TYPE_5GBASE_T + \
+-					     I40E_PHY_TYPE_OFFSET2)
++#define I40E_CAP_PHY_TYPE_2_5GBASE_T BIT_ULL(I40E_PHY_TYPE_2_5GBASE_T)
++#define I40E_CAP_PHY_TYPE_5GBASE_T BIT_ULL(I40E_PHY_TYPE_5GBASE_T)
+ #define I40E_HW_CAP_MAX_GPIO			30
+ /* Capabilities of a PF or a VF or the whole device */
+ struct i40e_hw_capabilities {
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
+index dc5b3c06d1e01..ebd08543791bd 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
+@@ -3899,8 +3899,6 @@ static void iavf_remove(struct pci_dev *pdev)
+ 
+ 	iounmap(hw->hw_addr);
+ 	pci_release_regions(pdev);
+-	iavf_free_all_tx_resources(adapter);
+-	iavf_free_all_rx_resources(adapter);
+ 	iavf_free_queues(adapter);
+ 	kfree(adapter->vf_res);
+ 	spin_lock_bh(&adapter->mac_vlan_list_lock);
+diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
+index d13c7fc8fb0a2..195d122c9cb22 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
+@@ -2818,38 +2818,46 @@ int ice_vsi_release(struct ice_vsi *vsi)
+ }
+ 
+ /**
+- * ice_vsi_rebuild_update_coalesce - set coalesce for a q_vector
++ * ice_vsi_rebuild_update_coalesce_intrl - set interrupt rate limit for a q_vector
+  * @q_vector: pointer to q_vector which is being updated
+- * @coalesce: pointer to array of struct with stored coalesce
++ * @stored_intrl_setting: original INTRL setting
+  *
+  * Set coalesce param in q_vector and update these parameters in HW.
+  */
+ static void
+-ice_vsi_rebuild_update_coalesce(struct ice_q_vector *q_vector,
+-				struct ice_coalesce_stored *coalesce)
++ice_vsi_rebuild_update_coalesce_intrl(struct ice_q_vector *q_vector,
++				      u16 stored_intrl_setting)
+ {
+-	struct ice_ring_container *rx_rc = &q_vector->rx;
+-	struct ice_ring_container *tx_rc = &q_vector->tx;
+ 	struct ice_hw *hw = &q_vector->vsi->back->hw;
+ 
+-	tx_rc->itr_setting = coalesce->itr_tx;
+-	rx_rc->itr_setting = coalesce->itr_rx;
+-
+-	/* dynamic ITR values will be updated during Tx/Rx */
+-	if (!ITR_IS_DYNAMIC(tx_rc->itr_setting))
+-		wr32(hw, GLINT_ITR(tx_rc->itr_idx, q_vector->reg_idx),
+-		     ITR_REG_ALIGN(tx_rc->itr_setting) >>
+-		     ICE_ITR_GRAN_S);
+-	if (!ITR_IS_DYNAMIC(rx_rc->itr_setting))
+-		wr32(hw, GLINT_ITR(rx_rc->itr_idx, q_vector->reg_idx),
+-		     ITR_REG_ALIGN(rx_rc->itr_setting) >>
+-		     ICE_ITR_GRAN_S);
+-
+-	q_vector->intrl = coalesce->intrl;
++	q_vector->intrl = stored_intrl_setting;
+ 	wr32(hw, GLINT_RATE(q_vector->reg_idx),
+ 	     ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran));
+ }
+ 
++/**
++ * ice_vsi_rebuild_update_coalesce_itr - set coalesce for a q_vector
++ * @q_vector: pointer to q_vector which is being updated
++ * @rc: pointer to ring container
++ * @stored_itr_setting: original ITR setting
++ *
++ * Set coalesce param in q_vector and update these parameters in HW.
++ */
++static void
++ice_vsi_rebuild_update_coalesce_itr(struct ice_q_vector *q_vector,
++				    struct ice_ring_container *rc,
++				    u16 stored_itr_setting)
++{
++	struct ice_hw *hw = &q_vector->vsi->back->hw;
++
++	rc->itr_setting = stored_itr_setting;
++
++	/* dynamic ITR values will be updated during Tx/Rx */
++	if (!ITR_IS_DYNAMIC(rc->itr_setting))
++		wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
++		     ITR_REG_ALIGN(rc->itr_setting) >> ICE_ITR_GRAN_S);
++}
++
+ /**
+  * ice_vsi_rebuild_get_coalesce - get coalesce from all q_vectors
+  * @vsi: VSI connected with q_vectors
+@@ -2869,6 +2877,11 @@ ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi,
+ 		coalesce[i].itr_tx = q_vector->tx.itr_setting;
+ 		coalesce[i].itr_rx = q_vector->rx.itr_setting;
+ 		coalesce[i].intrl = q_vector->intrl;
++
++		if (i < vsi->num_txq)
++			coalesce[i].tx_valid = true;
++		if (i < vsi->num_rxq)
++			coalesce[i].rx_valid = true;
+ 	}
+ 
+ 	return vsi->num_q_vectors;
+@@ -2893,17 +2906,59 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
+ 	if ((size && !coalesce) || !vsi)
+ 		return;
+ 
+-	for (i = 0; i < size && i < vsi->num_q_vectors; i++)
+-		ice_vsi_rebuild_update_coalesce(vsi->q_vectors[i],
+-						&coalesce[i]);
+-
+-	/* number of q_vectors increased, so assume coalesce settings were
+-	 * changed globally (i.e. ethtool -C eth0 instead of per-queue) and use
+-	 * the previous settings from q_vector 0 for all of the new q_vectors
++	/* There are a couple of cases that have to be handled here:
++	 *   1. The case where the number of queue vectors stays the same, but
++	 *      the number of Tx or Rx rings changes (the first for loop)
++	 *   2. The case where the number of queue vectors increased (the
++	 *      second for loop)
+ 	 */
+-	for (; i < vsi->num_q_vectors; i++)
+-		ice_vsi_rebuild_update_coalesce(vsi->q_vectors[i],
+-						&coalesce[0]);
++	for (i = 0; i < size && i < vsi->num_q_vectors; i++) {
++		/* There are 2 cases to handle here and they are the same for
++		 * both Tx and Rx:
++		 *   if the entry was valid previously (coalesce[i].[tr]x_valid
++		 *   and the loop variable is less than the number of rings
++		 *   allocated, then write the previous values
++		 *
++		 *   if the entry was not valid previously, but the number of
++		 *   rings is less than are allocated (this means the number of
++		 *   rings increased from previously), then write out the
++		 *   values in the first element
++		 */
++		if (i < vsi->alloc_rxq && coalesce[i].rx_valid)
++			ice_vsi_rebuild_update_coalesce_itr(vsi->q_vectors[i],
++							    &vsi->q_vectors[i]->rx,
++							    coalesce[i].itr_rx);
++		else if (i < vsi->alloc_rxq)
++			ice_vsi_rebuild_update_coalesce_itr(vsi->q_vectors[i],
++							    &vsi->q_vectors[i]->rx,
++							    coalesce[0].itr_rx);
++
++		if (i < vsi->alloc_txq && coalesce[i].tx_valid)
++			ice_vsi_rebuild_update_coalesce_itr(vsi->q_vectors[i],
++							    &vsi->q_vectors[i]->tx,
++							    coalesce[i].itr_tx);
++		else if (i < vsi->alloc_txq)
++			ice_vsi_rebuild_update_coalesce_itr(vsi->q_vectors[i],
++							    &vsi->q_vectors[i]->tx,
++							    coalesce[0].itr_tx);
++
++		ice_vsi_rebuild_update_coalesce_intrl(vsi->q_vectors[i],
++						      coalesce[i].intrl);
++	}
++
++	/* the number of queue vectors increased so write whatever is in
++	 * the first element
++	 */
++	for (; i < vsi->num_q_vectors; i++) {
++		ice_vsi_rebuild_update_coalesce_itr(vsi->q_vectors[i],
++						    &vsi->q_vectors[i]->tx,
++						    coalesce[0].itr_tx);
++		ice_vsi_rebuild_update_coalesce_itr(vsi->q_vectors[i],
++						    &vsi->q_vectors[i]->rx,
++						    coalesce[0].itr_rx);
++		ice_vsi_rebuild_update_coalesce_intrl(vsi->q_vectors[i],
++						      coalesce[0].intrl);
++	}
+ }
+ 
+ /**
+@@ -2932,9 +2987,11 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
+ 
+ 	coalesce = kcalloc(vsi->num_q_vectors,
+ 			   sizeof(struct ice_coalesce_stored), GFP_KERNEL);
+-	if (coalesce)
+-		prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi,
+-								  coalesce);
++	if (!coalesce)
++		return -ENOMEM;
++
++	prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
++
+ 	ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
+ 	ice_vsi_free_q_vectors(vsi);
+ 
+diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
+index 5dab77504fa5b..672a7ff0ee364 100644
+--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
++++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
+@@ -351,6 +351,8 @@ struct ice_coalesce_stored {
+ 	u16 itr_tx;
+ 	u16 itr_rx;
+ 	u8 intrl;
++	u8 tx_valid;
++	u8 rx_valid;
+ };
+ 
+ /* iterator for handling rings in ring container */
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+index 01d3ee4b58292..bcd5e7ae8482f 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -1319,7 +1319,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
+ 		skb->protocol = eth_type_trans(skb, netdev);
+ 
+ 		if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
+-		    RX_DMA_VID(trxd.rxd3))
++		    (trxd.rxd2 & RX_DMA_VTAG))
+ 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ 					       RX_DMA_VID(trxd.rxd3));
+ 		skb_record_rx_queue(skb, 0);
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+index fd3cec8f06bae..c47272100615d 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -296,6 +296,7 @@
+ #define RX_DMA_LSO		BIT(30)
+ #define RX_DMA_PLEN0(_x)	(((_x) & 0x3fff) << 16)
+ #define RX_DMA_GET_PLEN0(_x)	(((_x) >> 16) & 0x3fff)
++#define RX_DMA_VTAG		BIT(15)
+ 
+ /* QDMA descriptor rxd3 */
+ #define RX_DMA_VID(_x)		((_x) & 0xfff)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+index bdbffe484fce4..d2efe24559555 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+@@ -576,7 +576,7 @@ static void mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq *sq,
+ 
+ 	pi = mlx5e_txqsq_get_next_pi(sq, MLX5E_TX_MPW_MAX_WQEBBS);
+ 	wqe = MLX5E_TX_FETCH_WQE(sq, pi);
+-	prefetchw(wqe->data);
++	net_prefetchw(wqe->data);
+ 
+ 	*session = (struct mlx5e_tx_mpwqe) {
+ 		.wqe = wqe,
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+index bf3250e0e59ca..749585fe6fc96 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+@@ -352,6 +352,8 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
+ 	plat_dat->bsp_priv = gmac;
+ 	plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed;
+ 	plat_dat->multicast_filter_bins = 0;
++	plat_dat->tx_fifo_size = 8192;
++	plat_dat->rx_fifo_size = 8192;
+ 
+ 	err = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ 	if (err)
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+index 29f765a246a05..aaf37598cbd3c 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+@@ -638,6 +638,7 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
+ 	value &= ~GMAC_PACKET_FILTER_PCF;
+ 	value &= ~GMAC_PACKET_FILTER_PM;
+ 	value &= ~GMAC_PACKET_FILTER_PR;
++	value &= ~GMAC_PACKET_FILTER_RA;
+ 	if (dev->flags & IFF_PROMISC) {
+ 		/* VLAN Tag Filter Fail Packets Queuing */
+ 		if (hw->vlan_fail_q_en) {
+diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
+index 390d3403386aa..144892060718e 100644
+--- a/drivers/net/ipa/gsi.c
++++ b/drivers/net/ipa/gsi.c
+@@ -211,8 +211,8 @@ static void gsi_irq_setup(struct gsi *gsi)
+ 	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
+ 
+ 	/* The inter-EE registers are in the non-adjusted address range */
+-	iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_CH_IRQ_OFFSET);
+-	iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_EV_CH_IRQ_OFFSET);
++	iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET);
++	iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET);
+ 
+ 	iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
+ }
+diff --git a/drivers/net/ipa/gsi_reg.h b/drivers/net/ipa/gsi_reg.h
+index 1622d8cf8dea4..48ef04afab79f 100644
+--- a/drivers/net/ipa/gsi_reg.h
++++ b/drivers/net/ipa/gsi_reg.h
+@@ -53,15 +53,15 @@
+ #define GSI_EE_REG_ADJUST			0x0000d000	/* IPA v4.5+ */
+ 
+ /* The two inter-EE IRQ register offsets are relative to gsi->virt_raw */
+-#define GSI_INTER_EE_SRC_CH_IRQ_OFFSET \
+-			GSI_INTER_EE_N_SRC_CH_IRQ_OFFSET(GSI_EE_AP)
+-#define GSI_INTER_EE_N_SRC_CH_IRQ_OFFSET(ee) \
+-			(0x0000c018 + 0x1000 * (ee))
+-
+-#define GSI_INTER_EE_SRC_EV_CH_IRQ_OFFSET \
+-			GSI_INTER_EE_N_SRC_EV_CH_IRQ_OFFSET(GSI_EE_AP)
+-#define GSI_INTER_EE_N_SRC_EV_CH_IRQ_OFFSET(ee) \
+-			(0x0000c01c + 0x1000 * (ee))
++#define GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET \
++			GSI_INTER_EE_N_SRC_CH_IRQ_MSK_OFFSET(GSI_EE_AP)
++#define GSI_INTER_EE_N_SRC_CH_IRQ_MSK_OFFSET(ee) \
++			(0x0000c020 + 0x1000 * (ee))
++
++#define GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET \
++			GSI_INTER_EE_N_SRC_EV_CH_IRQ_MSK_OFFSET(GSI_EE_AP)
++#define GSI_INTER_EE_N_SRC_EV_CH_IRQ_MSK_OFFSET(ee) \
++			(0x0000c024 + 0x1000 * (ee))
+ 
+ /* All other register offsets are relative to gsi->virt */
+ #define GSI_CH_C_CNTXT_0_OFFSET(ch) \
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index cc38e326405a6..af2e1759b5231 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -273,6 +273,9 @@ static __maybe_unused int mdio_bus_phy_suspend(struct device *dev)
+ {
+ 	struct phy_device *phydev = to_phy_device(dev);
+ 
++	if (phydev->mac_managed_pm)
++		return 0;
++
+ 	/* We must stop the state machine manually, otherwise it stops out of
+ 	 * control, possibly with the phydev->lock held. Upon resume, netdev
+ 	 * may call phy routines that try to grab the same lock, and that may
+@@ -294,6 +297,9 @@ static __maybe_unused int mdio_bus_phy_resume(struct device *dev)
+ 	struct phy_device *phydev = to_phy_device(dev);
+ 	int ret;
+ 
++	if (phydev->mac_managed_pm)
++		return 0;
++
+ 	if (!phydev->suspended_by_mdio_bus)
+ 		goto no_resume;
+ 
+diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
+index cccfd3bd4d27e..ca5cda890d58e 100644
+--- a/drivers/net/wireless/ath/ath11k/wmi.c
++++ b/drivers/net/wireless/ath/ath11k/wmi.c
+@@ -5417,31 +5417,6 @@ int ath11k_wmi_pull_fw_stats(struct ath11k_base *ab, struct sk_buff *skb,
+ 	return 0;
+ }
+ 
+-static int
+-ath11k_pull_pdev_temp_ev(struct ath11k_base *ab, u8 *evt_buf,
+-			 u32 len, const struct wmi_pdev_temperature_event *ev)
+-{
+-	const void **tb;
+-	int ret;
+-
+-	tb = ath11k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
+-	if (IS_ERR(tb)) {
+-		ret = PTR_ERR(tb);
+-		ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+-		return ret;
+-	}
+-
+-	ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT];
+-	if (!ev) {
+-		ath11k_warn(ab, "failed to fetch pdev temp ev");
+-		kfree(tb);
+-		return -EPROTO;
+-	}
+-
+-	kfree(tb);
+-	return 0;
+-}
+-
+ size_t ath11k_wmi_fw_stats_num_vdevs(struct list_head *head)
+ {
+ 	struct ath11k_fw_stats_vdev *i;
+@@ -6849,23 +6824,37 @@ ath11k_wmi_pdev_temperature_event(struct ath11k_base *ab,
+ 				  struct sk_buff *skb)
+ {
+ 	struct ath11k *ar;
+-	struct wmi_pdev_temperature_event ev = {0};
++	const void **tb;
++	const struct wmi_pdev_temperature_event *ev;
++	int ret;
++
++	tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
++	if (IS_ERR(tb)) {
++		ret = PTR_ERR(tb);
++		ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
++		return;
++	}
+ 
+-	if (ath11k_pull_pdev_temp_ev(ab, skb->data, skb->len, &ev) != 0) {
+-		ath11k_warn(ab, "failed to extract pdev temperature event");
++	ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT];
++	if (!ev) {
++		ath11k_warn(ab, "failed to fetch pdev temp ev");
++		kfree(tb);
+ 		return;
+ 	}
+ 
+ 	ath11k_dbg(ab, ATH11K_DBG_WMI,
+-		   "pdev temperature ev temp %d pdev_id %d\n", ev.temp, ev.pdev_id);
++		   "pdev temperature ev temp %d pdev_id %d\n", ev->temp, ev->pdev_id);
+ 
+-	ar = ath11k_mac_get_ar_by_pdev_id(ab, ev.pdev_id);
++	ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
+ 	if (!ar) {
+-		ath11k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev.pdev_id);
++		ath11k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev->pdev_id);
++		kfree(tb);
+ 		return;
+ 	}
+ 
+-	ath11k_thermal_event_temperature(ar, ev.temp);
++	ath11k_thermal_event_temperature(ar, ev->temp);
++
++	kfree(tb);
+ }
+ 
+ static void ath11k_fils_discovery_event(struct ath11k_base *ab,
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
+index 60e0db4a5e201..9236f91068261 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
+@@ -2,7 +2,7 @@
+ /*
+  * Copyright (C) 2015 Intel Mobile Communications GmbH
+  * Copyright (C) 2016-2017 Intel Deutschland GmbH
+- * Copyright (C) 2019-2020 Intel Corporation
++ * Copyright (C) 2019-2021 Intel Corporation
+  */
+ #include <linux/kernel.h>
+ #include <linux/bsearch.h>
+@@ -21,7 +21,6 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
+ 				  const struct iwl_cfg_trans_params *cfg_trans)
+ {
+ 	struct iwl_trans *trans;
+-	int txcmd_size, txcmd_align;
+ #ifdef CONFIG_LOCKDEP
+ 	static struct lock_class_key __key;
+ #endif
+@@ -31,10 +30,40 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
+ 		return NULL;
+ 
+ 	trans->trans_cfg = cfg_trans;
+-	if (!cfg_trans->gen2) {
++
++#ifdef CONFIG_LOCKDEP
++	lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map",
++			 &__key, 0);
++#endif
++
++	trans->dev = dev;
++	trans->ops = ops;
++	trans->num_rx_queues = 1;
++
++	WARN_ON(!ops->wait_txq_empty && !ops->wait_tx_queues_empty);
++
++	if (trans->trans_cfg->use_tfh) {
++		trans->txqs.tfd.addr_size = 64;
++		trans->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS;
++		trans->txqs.tfd.size = sizeof(struct iwl_tfh_tfd);
++	} else {
++		trans->txqs.tfd.addr_size = 36;
++		trans->txqs.tfd.max_tbs = IWL_NUM_OF_TBS;
++		trans->txqs.tfd.size = sizeof(struct iwl_tfd);
++	}
++	trans->max_skb_frags = IWL_TRANS_MAX_FRAGS(trans);
++
++	return trans;
++}
++
++int iwl_trans_init(struct iwl_trans *trans)
++{
++	int txcmd_size, txcmd_align;
++
++	if (!trans->trans_cfg->gen2) {
+ 		txcmd_size = sizeof(struct iwl_tx_cmd);
+ 		txcmd_align = sizeof(void *);
+-	} else if (cfg_trans->device_family < IWL_DEVICE_FAMILY_AX210) {
++	} else if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
+ 		txcmd_size = sizeof(struct iwl_tx_cmd_gen2);
+ 		txcmd_align = 64;
+ 	} else {
+@@ -46,17 +75,8 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
+ 	txcmd_size += 36; /* biggest possible 802.11 header */
+ 
+ 	/* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */
+-	if (WARN_ON(cfg_trans->gen2 && txcmd_size >= txcmd_align))
+-		return ERR_PTR(-EINVAL);
+-
+-#ifdef CONFIG_LOCKDEP
+-	lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map",
+-			 &__key, 0);
+-#endif
+-
+-	trans->dev = dev;
+-	trans->ops = ops;
+-	trans->num_rx_queues = 1;
++	if (WARN_ON(trans->trans_cfg->gen2 && txcmd_size >= txcmd_align))
++		return -EINVAL;
+ 
+ 	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ 		trans->txqs.bc_tbl_size = sizeof(struct iwl_gen3_bc_tbl);
+@@ -68,23 +88,16 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
+ 	 * allocate here.
+ 	 */
+ 	if (trans->trans_cfg->gen2) {
+-		trans->txqs.bc_pool = dmam_pool_create("iwlwifi:bc", dev,
++		trans->txqs.bc_pool = dmam_pool_create("iwlwifi:bc", trans->dev,
+ 						       trans->txqs.bc_tbl_size,
+ 						       256, 0);
+ 		if (!trans->txqs.bc_pool)
+-			return NULL;
++			return -ENOMEM;
+ 	}
+ 
+-	if (trans->trans_cfg->use_tfh) {
+-		trans->txqs.tfd.addr_size = 64;
+-		trans->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS;
+-		trans->txqs.tfd.size = sizeof(struct iwl_tfh_tfd);
+-	} else {
+-		trans->txqs.tfd.addr_size = 36;
+-		trans->txqs.tfd.max_tbs = IWL_NUM_OF_TBS;
+-		trans->txqs.tfd.size = sizeof(struct iwl_tfd);
+-	}
+-	trans->max_skb_frags = IWL_TRANS_MAX_FRAGS(trans);
++	/* Some things must not change even if the config does */
++	WARN_ON(trans->txqs.tfd.addr_size !=
++		(trans->trans_cfg->use_tfh ? 64 : 36));
+ 
+ 	snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
+ 		 "iwl_cmd_pool:%s", dev_name(trans->dev));
+@@ -93,35 +106,35 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
+ 				  txcmd_size, txcmd_align,
+ 				  SLAB_HWCACHE_ALIGN, NULL);
+ 	if (!trans->dev_cmd_pool)
+-		return NULL;
+-
+-	WARN_ON(!ops->wait_txq_empty && !ops->wait_tx_queues_empty);
++		return -ENOMEM;
+ 
+ 	trans->txqs.tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
+ 	if (!trans->txqs.tso_hdr_page) {
+ 		kmem_cache_destroy(trans->dev_cmd_pool);
+-		return NULL;
++		return -ENOMEM;
+ 	}
+ 
+ 	/* Initialize the wait queue for commands */
+ 	init_waitqueue_head(&trans->wait_command_queue);
+ 
+-	return trans;
++	return 0;
+ }
+ 
+ void iwl_trans_free(struct iwl_trans *trans)
+ {
+ 	int i;
+ 
+-	for_each_possible_cpu(i) {
+-		struct iwl_tso_hdr_page *p =
+-			per_cpu_ptr(trans->txqs.tso_hdr_page, i);
++	if (trans->txqs.tso_hdr_page) {
++		for_each_possible_cpu(i) {
++			struct iwl_tso_hdr_page *p =
++				per_cpu_ptr(trans->txqs.tso_hdr_page, i);
+ 
+-		if (p->page)
+-			__free_page(p->page);
+-	}
++			if (p && p->page)
++				__free_page(p->page);
++		}
+ 
+-	free_percpu(trans->txqs.tso_hdr_page);
++		free_percpu(trans->txqs.tso_hdr_page);
++	}
+ 
+ 	kmem_cache_destroy(trans->dev_cmd_pool);
+ }
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+index 4a5822c1be136..3e0df6fbb6424 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+@@ -1438,6 +1438,7 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
+ 			  struct device *dev,
+ 			  const struct iwl_trans_ops *ops,
+ 			  const struct iwl_cfg_trans_params *cfg_trans);
++int iwl_trans_init(struct iwl_trans *trans);
+ void iwl_trans_free(struct iwl_trans *trans);
+ 
+ /*****************************************************
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+index 558a0b2ef0fc8..66faf7914bd8c 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+@@ -17,10 +17,20 @@
+ #include "iwl-prph.h"
+ #include "internal.h"
+ 
++#define TRANS_CFG_MARKER BIT(0)
++#define _IS_A(cfg, _struct) __builtin_types_compatible_p(typeof(cfg),	\
++							 struct _struct)
++extern int _invalid_type;
++#define _TRANS_CFG_MARKER(cfg)						\
++	(__builtin_choose_expr(_IS_A(cfg, iwl_cfg_trans_params),	\
++			       TRANS_CFG_MARKER,			\
++	 __builtin_choose_expr(_IS_A(cfg, iwl_cfg), 0, _invalid_type)))
++#define _ASSIGN_CFG(cfg) (_TRANS_CFG_MARKER(cfg) + (kernel_ulong_t)&(cfg))
++
+ #define IWL_PCI_DEVICE(dev, subdev, cfg) \
+ 	.vendor = PCI_VENDOR_ID_INTEL,  .device = (dev), \
+ 	.subvendor = PCI_ANY_ID, .subdevice = (subdev), \
+-	.driver_data = (kernel_ulong_t)&(cfg)
++	.driver_data = _ASSIGN_CFG(cfg)
+ 
+ /* Hardware specific file defines the PCI IDs table for that hardware module */
+ static const struct pci_device_id iwl_hw_card_ids[] = {
+@@ -1075,19 +1085,22 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
+ 
+ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ {
+-	const struct iwl_cfg_trans_params *trans =
+-		(struct iwl_cfg_trans_params *)(ent->driver_data);
++	const struct iwl_cfg_trans_params *trans;
+ 	const struct iwl_cfg *cfg_7265d __maybe_unused = NULL;
+ 	struct iwl_trans *iwl_trans;
+ 	struct iwl_trans_pcie *trans_pcie;
+ 	int i, ret;
++	const struct iwl_cfg *cfg;
++
++	trans = (void *)(ent->driver_data & ~TRANS_CFG_MARKER);
++
+ 	/*
+ 	 * This is needed for backwards compatibility with the old
+ 	 * tables, so we don't need to change all the config structs
+ 	 * at the same time.  The cfg is used to compare with the old
+ 	 * full cfg structs.
+ 	 */
+-	const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
++	cfg = (void *)(ent->driver_data & ~TRANS_CFG_MARKER);
+ 
+ 	/* make sure trans is the first element in iwl_cfg */
+ 	BUILD_BUG_ON(offsetof(struct iwl_cfg, trans));
+@@ -1202,11 +1215,19 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 
+ #endif
+ 	/*
+-	 * If we didn't set the cfg yet, assume the trans is actually
+-	 * a full cfg from the old tables.
++	 * If we didn't set the cfg yet, the PCI ID table entry should have
++	 * been a full config - if yes, use it, otherwise fail.
+ 	 */
+-	if (!iwl_trans->cfg)
++	if (!iwl_trans->cfg) {
++		if (ent->driver_data & TRANS_CFG_MARKER) {
++			pr_err("No config found for PCI dev %04x/%04x, rev=0x%x, rfid=0x%x\n",
++			       pdev->device, pdev->subsystem_device,
++			       iwl_trans->hw_rev, iwl_trans->hw_rf_id);
++			ret = -EINVAL;
++			goto out_free_trans;
++		}
+ 		iwl_trans->cfg = cfg;
++	}
+ 
+ 	/* if we don't have a name yet, copy name from the old cfg */
+ 	if (!iwl_trans->name)
+@@ -1222,6 +1243,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 		trans_pcie->num_rx_bufs = RX_QUEUE_SIZE;
+ 	}
+ 
++	ret = iwl_trans_init(iwl_trans);
++	if (ret)
++		goto out_free_trans;
++
+ 	pci_set_drvdata(pdev, iwl_trans);
+ 	iwl_trans->drv = iwl_drv_start(iwl_trans);
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+index 94ffc1ae484dc..af9412bd697ee 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+ /*
+  * Copyright (C) 2017 Intel Deutschland GmbH
+- * Copyright (C) 2018-2020 Intel Corporation
++ * Copyright (C) 2018-2021 Intel Corporation
+  */
+ #include "iwl-trans.h"
+ #include "iwl-prph.h"
+@@ -143,7 +143,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
+ 	if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
+ 		IWL_DEBUG_INFO(trans,
+ 			       "DEVICE_ENABLED bit was set and is now cleared\n");
+-		iwl_txq_gen2_tx_stop(trans);
++		iwl_txq_gen2_tx_free(trans);
+ 		iwl_pcie_rx_stop(trans);
+ 	}
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
+index 833f43d1ca7a0..810dcb3df242c 100644
+--- a/drivers/net/wireless/intel/iwlwifi/queue/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
+@@ -13,30 +13,6 @@
+ #include "iwl-scd.h"
+ #include <linux/dmapool.h>
+ 
+-/*
+- * iwl_txq_gen2_tx_stop - Stop all Tx DMA channels
+- */
+-void iwl_txq_gen2_tx_stop(struct iwl_trans *trans)
+-{
+-	int txq_id;
+-
+-	/*
+-	 * This function can be called before the op_mode disabled the
+-	 * queues. This happens when we have an rfkill interrupt.
+-	 * Since we stop Tx altogether - mark the queues as stopped.
+-	 */
+-	memset(trans->txqs.queue_stopped, 0,
+-	       sizeof(trans->txqs.queue_stopped));
+-	memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
+-
+-	/* Unmap DMA from host system and free skb's */
+-	for (txq_id = 0; txq_id < ARRAY_SIZE(trans->txqs.txq); txq_id++) {
+-		if (!trans->txqs.txq[txq_id])
+-			continue;
+-		iwl_txq_gen2_unmap(trans, txq_id);
+-	}
+-}
+-
+ /*
+  * iwl_txq_update_byte_tbl - Set up entry in Tx byte-count array
+  */
+@@ -1189,6 +1165,12 @@ static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq,
+ 		goto error_free_resp;
+ 	}
+ 
++	if (WARN_ONCE(trans->txqs.txq[qid],
++		      "queue %d already allocated\n", qid)) {
++		ret = -EIO;
++		goto error_free_resp;
++	}
++
+ 	txq->id = qid;
+ 	trans->txqs.txq[qid] = txq;
+ 	wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
+diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.h b/drivers/net/wireless/intel/iwlwifi/queue/tx.h
+index af1dbdf5617a0..20efc62acf133 100644
+--- a/drivers/net/wireless/intel/iwlwifi/queue/tx.h
++++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.h
+@@ -1,6 +1,6 @@
+ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+ /*
+- * Copyright (C) 2020 Intel Corporation
++ * Copyright (C) 2020-2021 Intel Corporation
+  */
+ #ifndef __iwl_trans_queue_tx_h__
+ #define __iwl_trans_queue_tx_h__
+@@ -123,7 +123,6 @@ int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
+ void iwl_txq_dyn_free(struct iwl_trans *trans, int queue);
+ void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
+ void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq);
+-void iwl_txq_gen2_tx_stop(struct iwl_trans *trans);
+ void iwl_txq_gen2_tx_free(struct iwl_trans *trans);
+ int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
+ 		 bool cmd_queue);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
+index 8bf45497cfca1..36a430f09f64c 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76.h
++++ b/drivers/net/wireless/mediatek/mt76/mt76.h
+@@ -222,6 +222,7 @@ struct mt76_wcid {
+ 
+ 	u16 idx;
+ 	u8 hw_key_idx;
++	u8 hw_key_idx2;
+ 
+ 	u8 sta:1;
+ 	u8 ext_phy:1;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
+index 2eab23898c778..6dbaaf95ee385 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
+@@ -86,6 +86,7 @@ static int mt7615_check_eeprom(struct mt76_dev *dev)
+ 	switch (val) {
+ 	case 0x7615:
+ 	case 0x7622:
++	case 0x7663:
+ 		return 0;
+ 	default:
+ 		return -EINVAL;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+index d73841480544a..8dccb589b756d 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+@@ -1037,7 +1037,7 @@ EXPORT_SYMBOL_GPL(mt7615_mac_set_rates);
+ static int
+ mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+ 			   struct ieee80211_key_conf *key,
+-			   enum mt7615_cipher_type cipher,
++			   enum mt7615_cipher_type cipher, u16 cipher_mask,
+ 			   enum set_key_cmd cmd)
+ {
+ 	u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx) + 30 * 4;
+@@ -1054,22 +1054,22 @@ mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+ 			memcpy(data + 16, key->key + 24, 8);
+ 			memcpy(data + 24, key->key + 16, 8);
+ 		} else {
+-			if (cipher != MT_CIPHER_BIP_CMAC_128 && wcid->cipher)
+-				memmove(data + 16, data, 16);
+-			if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher)
++			if (cipher_mask == BIT(cipher))
+ 				memcpy(data, key->key, key->keylen);
+-			else if (cipher == MT_CIPHER_BIP_CMAC_128)
++			else if (cipher != MT_CIPHER_BIP_CMAC_128)
++				memcpy(data, key->key, 16);
++			if (cipher == MT_CIPHER_BIP_CMAC_128)
+ 				memcpy(data + 16, key->key, 16);
+ 		}
+ 	} else {
+-		if (wcid->cipher & ~BIT(cipher)) {
+-			if (cipher != MT_CIPHER_BIP_CMAC_128)
+-				memmove(data, data + 16, 16);
++		if (cipher == MT_CIPHER_BIP_CMAC_128)
+ 			memset(data + 16, 0, 16);
+-		} else {
++		else if (cipher_mask)
++			memset(data, 0, 16);
++		if (!cipher_mask)
+ 			memset(data, 0, sizeof(data));
+-		}
+ 	}
++
+ 	mt76_wr_copy(dev, addr, data, sizeof(data));
+ 
+ 	return 0;
+@@ -1077,7 +1077,7 @@ mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+ 
+ static int
+ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+-			  enum mt7615_cipher_type cipher,
++			  enum mt7615_cipher_type cipher, u16 cipher_mask,
+ 			  int keyidx, enum set_key_cmd cmd)
+ {
+ 	u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx), w0, w1;
+@@ -1087,20 +1087,23 @@ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+ 
+ 	w0 = mt76_rr(dev, addr);
+ 	w1 = mt76_rr(dev, addr + 4);
+-	if (cmd == SET_KEY) {
+-		w0 |= MT_WTBL_W0_RX_KEY_VALID |
+-		      FIELD_PREP(MT_WTBL_W0_RX_IK_VALID,
+-				 cipher == MT_CIPHER_BIP_CMAC_128);
+-		if (cipher != MT_CIPHER_BIP_CMAC_128 ||
+-		    !wcid->cipher)
+-			w0 |= FIELD_PREP(MT_WTBL_W0_KEY_IDX, keyidx);
+-	}  else {
+-		if (!(wcid->cipher & ~BIT(cipher)))
+-			w0 &= ~(MT_WTBL_W0_RX_KEY_VALID |
+-				MT_WTBL_W0_KEY_IDX);
+-		if (cipher == MT_CIPHER_BIP_CMAC_128)
+-			w0 &= ~MT_WTBL_W0_RX_IK_VALID;
++
++	if (cipher_mask)
++		w0 |= MT_WTBL_W0_RX_KEY_VALID;
++	else
++		w0 &= ~(MT_WTBL_W0_RX_KEY_VALID | MT_WTBL_W0_KEY_IDX);
++	if (cipher_mask & BIT(MT_CIPHER_BIP_CMAC_128))
++		w0 |= MT_WTBL_W0_RX_IK_VALID;
++	else
++		w0 &= ~MT_WTBL_W0_RX_IK_VALID;
++
++	if (cmd == SET_KEY &&
++	    (cipher != MT_CIPHER_BIP_CMAC_128 ||
++	     cipher_mask == BIT(cipher))) {
++		w0 &= ~MT_WTBL_W0_KEY_IDX;
++		w0 |= FIELD_PREP(MT_WTBL_W0_KEY_IDX, keyidx);
+ 	}
++
+ 	mt76_wr(dev, MT_WTBL_RICR0, w0);
+ 	mt76_wr(dev, MT_WTBL_RICR1, w1);
+ 
+@@ -1113,24 +1116,25 @@ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+ 
+ static void
+ mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+-			      enum mt7615_cipher_type cipher,
++			      enum mt7615_cipher_type cipher, u16 cipher_mask,
+ 			      enum set_key_cmd cmd)
+ {
+ 	u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx);
+ 
+-	if (cmd == SET_KEY) {
+-		if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher)
+-			mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE,
+-				 FIELD_PREP(MT_WTBL_W2_KEY_TYPE, cipher));
+-	} else {
+-		if (cipher != MT_CIPHER_BIP_CMAC_128 &&
+-		    wcid->cipher & BIT(MT_CIPHER_BIP_CMAC_128))
+-			mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE,
+-				 FIELD_PREP(MT_WTBL_W2_KEY_TYPE,
+-					    MT_CIPHER_BIP_CMAC_128));
+-		else if (!(wcid->cipher & ~BIT(cipher)))
+-			mt76_clear(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE);
++	if (!cipher_mask) {
++		mt76_clear(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE);
++		return;
+ 	}
++
++	if (cmd != SET_KEY)
++		return;
++
++	if (cipher == MT_CIPHER_BIP_CMAC_128 &&
++	    cipher_mask & ~BIT(MT_CIPHER_BIP_CMAC_128))
++		return;
++
++	mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE,
++		 FIELD_PREP(MT_WTBL_W2_KEY_TYPE, cipher));
+ }
+ 
+ int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
+@@ -1139,25 +1143,30 @@ int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
+ 			      enum set_key_cmd cmd)
+ {
+ 	enum mt7615_cipher_type cipher;
++	u16 cipher_mask = wcid->cipher;
+ 	int err;
+ 
+ 	cipher = mt7615_mac_get_cipher(key->cipher);
+ 	if (cipher == MT_CIPHER_NONE)
+ 		return -EOPNOTSUPP;
+ 
+-	mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cmd);
+-	err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cmd);
++	if (cmd == SET_KEY)
++		cipher_mask |= BIT(cipher);
++	else
++		cipher_mask &= ~BIT(cipher);
++
++	mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cipher_mask, cmd);
++	err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cipher_mask,
++					 cmd);
+ 	if (err < 0)
+ 		return err;
+ 
+-	err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, key->keyidx, cmd);
++	err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, cipher_mask,
++					key->keyidx, cmd);
+ 	if (err < 0)
+ 		return err;
+ 
+-	if (cmd == SET_KEY)
+-		wcid->cipher |= BIT(cipher);
+-	else
+-		wcid->cipher &= ~BIT(cipher);
++	wcid->cipher = cipher_mask;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+index 6107e827b3836..d334491667a4e 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+@@ -334,7 +334,8 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 	struct mt7615_sta *msta = sta ? (struct mt7615_sta *)sta->drv_priv :
+ 				  &mvif->sta;
+ 	struct mt76_wcid *wcid = &msta->wcid;
+-	int idx = key->keyidx, err;
++	int idx = key->keyidx, err = 0;
++	u8 *wcid_keyidx = &wcid->hw_key_idx;
+ 
+ 	/* The hardware does not support per-STA RX GTK, fallback
+ 	 * to software mode for these.
+@@ -349,6 +350,7 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 	/* fall back to sw encryption for unsupported ciphers */
+ 	switch (key->cipher) {
+ 	case WLAN_CIPHER_SUITE_AES_CMAC:
++		wcid_keyidx = &wcid->hw_key_idx2;
+ 		key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
+ 		break;
+ 	case WLAN_CIPHER_SUITE_TKIP:
+@@ -366,12 +368,13 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 
+ 	mt7615_mutex_acquire(dev);
+ 
+-	if (cmd == SET_KEY) {
+-		key->hw_key_idx = wcid->idx;
+-		wcid->hw_key_idx = idx;
+-	} else if (idx == wcid->hw_key_idx) {
+-		wcid->hw_key_idx = -1;
+-	}
++	if (cmd == SET_KEY)
++		*wcid_keyidx = idx;
++	else if (idx == *wcid_keyidx)
++		*wcid_keyidx = -1;
++	else
++		goto out;
++
+ 	mt76_wcid_key_setup(&dev->mt76, wcid,
+ 			    cmd == SET_KEY ? key : NULL);
+ 
+@@ -380,6 +383,7 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 	else
+ 		err = __mt7615_mac_wtbl_set_key(dev, wcid, key, cmd);
+ 
++out:
+ 	mt7615_mutex_release(dev);
+ 
+ 	return err;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+index 631596fc2f362..198e9025b6818 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+@@ -291,12 +291,20 @@ static int mt7615_mcu_drv_pmctrl(struct mt7615_dev *dev)
+ 	u32 addr;
+ 	int err;
+ 
+-	addr = is_mt7663(mdev) ? MT_PCIE_DOORBELL_PUSH : MT_CFG_LPCR_HOST;
++	if (is_mt7663(mdev)) {
++		/* Clear firmware own via N9 eint */
++		mt76_wr(dev, MT_PCIE_DOORBELL_PUSH, MT_CFG_LPCR_HOST_DRV_OWN);
++		mt76_poll(dev, MT_CONN_ON_MISC, MT_CFG_LPCR_HOST_FW_OWN, 0, 3000);
++
++		addr = MT_CONN_HIF_ON_LPCTL;
++	} else {
++		addr = MT_CFG_LPCR_HOST;
++	}
++
+ 	mt76_wr(dev, addr, MT_CFG_LPCR_HOST_DRV_OWN);
+ 
+ 	mt7622_trigger_hif_int(dev, true);
+ 
+-	addr = is_mt7663(mdev) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST;
+ 	err = !mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, 0, 3000);
+ 
+ 	mt7622_trigger_hif_int(dev, false);
+@@ -1040,6 +1048,9 @@ mt7615_mcu_sta_ba(struct mt7615_dev *dev,
+ 
+ 	wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
+ 						  WTBL_SET, sta_wtbl, &skb);
++	if (IS_ERR(wtbl_hdr))
++		return PTR_ERR(wtbl_hdr);
++
+ 	mt76_connac_mcu_wtbl_ba_tlv(&dev->mt76, skb, params, enable, tx,
+ 				    sta_wtbl, wtbl_hdr);
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+index 76a61e8b7fb96..cefd33b74a875 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+@@ -833,6 +833,9 @@ int mt76_connac_mcu_add_sta_cmd(struct mt76_phy *phy,
+ 	wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(dev, wcid,
+ 						  WTBL_RESET_AND_SET,
+ 						  sta_wtbl, &skb);
++	if (IS_ERR(wtbl_hdr))
++		return PTR_ERR(wtbl_hdr);
++
+ 	if (enable) {
+ 		mt76_connac_mcu_wtbl_generic_tlv(dev, skb, vif, sta, sta_wtbl,
+ 						 wtbl_hdr);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+index ab671e21f8827..02db5d66735dd 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+@@ -447,6 +447,10 @@ int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 	    !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ 		return -EOPNOTSUPP;
+ 
++	/* MT76x0 GTK offloading does not work with more than one VIF */
++	if (is_mt76x0(dev) && !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
++		return -EOPNOTSUPP;
++
+ 	msta = sta ? (struct mt76x02_sta *)sta->drv_priv : NULL;
+ 	wcid = msta ? &msta->wcid : &mvif->group_wcid;
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
+index 660398ac53c24..738ecf8f4fa2f 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
+@@ -124,7 +124,7 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
+ 				   struct ieee80211_channel *chan,
+ 				   u8 chain_idx)
+ {
+-	int index;
++	int index, target_power;
+ 	bool tssi_on;
+ 
+ 	if (chain_idx > 3)
+@@ -133,15 +133,22 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
+ 	tssi_on = mt7915_tssi_enabled(dev, chan->band);
+ 
+ 	if (chan->band == NL80211_BAND_2GHZ) {
+-		index = MT_EE_TX0_POWER_2G + chain_idx * 3 + !tssi_on;
++		index = MT_EE_TX0_POWER_2G + chain_idx * 3;
++		target_power = mt7915_eeprom_read(dev, index);
++
++		if (!tssi_on)
++			target_power += mt7915_eeprom_read(dev, index + 1);
+ 	} else {
+-		int group = tssi_on ?
+-			    mt7915_get_channel_group(chan->hw_value) : 8;
++		int group = mt7915_get_channel_group(chan->hw_value);
++
++		index = MT_EE_TX0_POWER_5G + chain_idx * 12;
++		target_power = mt7915_eeprom_read(dev, index + group);
+ 
+-		index = MT_EE_TX0_POWER_5G + chain_idx * 12 + group;
++		if (!tssi_on)
++			target_power += mt7915_eeprom_read(dev, index + 8);
+ 	}
+ 
+-	return mt7915_eeprom_read(dev, index);
++	return target_power;
+ }
+ 
+ static const u8 sku_cck_delta_map[] = {
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+index 894016fdcf070..c7d4268d860af 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+@@ -4,6 +4,7 @@
+ #include <linux/etherdevice.h>
+ #include "mt7915.h"
+ #include "mac.h"
++#include "mcu.h"
+ #include "eeprom.h"
+ 
+ #define CCK_RATE(_idx, _rate) {						\
+@@ -283,9 +284,50 @@ static void mt7915_init_work(struct work_struct *work)
+ 	mt7915_register_ext_phy(dev);
+ }
+ 
++static void mt7915_wfsys_reset(struct mt7915_dev *dev)
++{
++	u32 val = MT_TOP_PWR_KEY | MT_TOP_PWR_SW_PWR_ON | MT_TOP_PWR_PWR_ON;
++	u32 reg = mt7915_reg_map_l1(dev, MT_TOP_MISC);
++
++#define MT_MCU_DUMMY_RANDOM	GENMASK(15, 0)
++#define MT_MCU_DUMMY_DEFAULT	GENMASK(31, 16)
++
++	mt76_wr(dev, MT_MCU_WFDMA0_DUMMY_CR, MT_MCU_DUMMY_RANDOM);
++
++	/* change to software control */
++	val |= MT_TOP_PWR_SW_RST;
++	mt76_wr(dev, MT_TOP_PWR_CTRL, val);
++
++	/* reset wfsys */
++	val &= ~MT_TOP_PWR_SW_RST;
++	mt76_wr(dev, MT_TOP_PWR_CTRL, val);
++
++	/* release wfsys then mcu re-excutes romcode */
++	val |= MT_TOP_PWR_SW_RST;
++	mt76_wr(dev, MT_TOP_PWR_CTRL, val);
++
++	/* switch to hw control */
++	val &= ~MT_TOP_PWR_SW_RST;
++	val |= MT_TOP_PWR_HW_CTRL;
++	mt76_wr(dev, MT_TOP_PWR_CTRL, val);
++
++	/* check whether mcu resets to default */
++	if (!mt76_poll_msec(dev, MT_MCU_WFDMA0_DUMMY_CR, MT_MCU_DUMMY_DEFAULT,
++			    MT_MCU_DUMMY_DEFAULT, 1000)) {
++		dev_err(dev->mt76.dev, "wifi subsystem reset failure\n");
++		return;
++	}
++
++	/* wfsys reset won't clear host registers */
++	mt76_clear(dev, reg, MT_TOP_MISC_FW_STATE);
++
++	msleep(100);
++}
++
+ static int mt7915_init_hardware(struct mt7915_dev *dev)
+ {
+ 	int ret, idx;
++	u32 val;
+ 
+ 	mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
+ 
+@@ -295,6 +337,12 @@ static int mt7915_init_hardware(struct mt7915_dev *dev)
+ 
+ 	dev->dbdc_support = !!(mt7915_l1_rr(dev, MT_HW_BOUND) & BIT(5));
+ 
++	val = mt76_rr(dev, mt7915_reg_map_l1(dev, MT_TOP_MISC));
++
++	/* If MCU was already running, it is likely in a bad state */
++	if (FIELD_GET(MT_TOP_MISC_FW_STATE, val) > FW_STATE_FW_DOWNLOAD)
++		mt7915_wfsys_reset(dev);
++
+ 	ret = mt7915_dma_init(dev);
+ 	if (ret)
+ 		return ret;
+@@ -308,8 +356,14 @@ static int mt7915_init_hardware(struct mt7915_dev *dev)
+ 	mt76_wr(dev, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE);
+ 
+ 	ret = mt7915_mcu_init(dev);
+-	if (ret)
+-		return ret;
++	if (ret) {
++		/* Reset and try again */
++		mt7915_wfsys_reset(dev);
++
++		ret = mt7915_mcu_init(dev);
++		if (ret)
++			return ret;
++	}
+ 
+ 	ret = mt7915_eeprom_init(dev);
+ 	if (ret < 0)
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+index 98f4b49642a8c..bf032d943f744 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+@@ -317,7 +317,9 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 	struct mt7915_sta *msta = sta ? (struct mt7915_sta *)sta->drv_priv :
+ 				  &mvif->sta;
+ 	struct mt76_wcid *wcid = &msta->wcid;
++	u8 *wcid_keyidx = &wcid->hw_key_idx;
+ 	int idx = key->keyidx;
++	int err = 0;
+ 
+ 	/* The hardware does not support per-STA RX GTK, fallback
+ 	 * to software mode for these.
+@@ -332,6 +334,7 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 	/* fall back to sw encryption for unsupported ciphers */
+ 	switch (key->cipher) {
+ 	case WLAN_CIPHER_SUITE_AES_CMAC:
++		wcid_keyidx = &wcid->hw_key_idx2;
+ 		key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
+ 		break;
+ 	case WLAN_CIPHER_SUITE_TKIP:
+@@ -347,16 +350,24 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 		return -EOPNOTSUPP;
+ 	}
+ 
+-	if (cmd == SET_KEY) {
+-		key->hw_key_idx = wcid->idx;
+-		wcid->hw_key_idx = idx;
+-	} else if (idx == wcid->hw_key_idx) {
+-		wcid->hw_key_idx = -1;
+-	}
++	mutex_lock(&dev->mt76.mutex);
++
++	if (cmd == SET_KEY)
++		*wcid_keyidx = idx;
++	else if (idx == *wcid_keyidx)
++		*wcid_keyidx = -1;
++	else
++		goto out;
++
+ 	mt76_wcid_key_setup(&dev->mt76, wcid,
+ 			    cmd == SET_KEY ? key : NULL);
+ 
+-	return mt7915_mcu_add_key(dev, vif, msta, key, cmd);
++	err = mt7915_mcu_add_key(dev, vif, msta, key, cmd);
++
++out:
++	mutex_unlock(&dev->mt76.mutex);
++
++	return err;
+ }
+ 
+ static int mt7915_config(struct ieee80211_hw *hw, u32 changed)
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+index 443cb09ae7cbd..f069a5a03e145 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+@@ -1198,6 +1198,9 @@ mt7915_mcu_sta_ba(struct mt7915_dev *dev,
+ 
+ 	wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl,
+ 					     &skb);
++	if (IS_ERR(wtbl_hdr))
++		return PTR_ERR(wtbl_hdr);
++
+ 	mt7915_mcu_wtbl_ba_tlv(skb, params, enable, tx, sta_wtbl, wtbl_hdr);
+ 
+ 	ret = mt76_mcu_skb_send_msg(&dev->mt76, skb,
+@@ -1714,6 +1717,9 @@ int mt7915_mcu_sta_update_hdr_trans(struct mt7915_dev *dev,
+ 		return -ENOMEM;
+ 
+ 	wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, NULL, &skb);
++	if (IS_ERR(wtbl_hdr))
++		return PTR_ERR(wtbl_hdr);
++
+ 	mt7915_mcu_wtbl_hdr_trans_tlv(skb, vif, sta, NULL, wtbl_hdr);
+ 
+ 	return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_EXT_CMD(WTBL_UPDATE),
+@@ -1738,6 +1744,9 @@ int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ 
+ 	wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl,
+ 					     &skb);
++	if (IS_ERR(wtbl_hdr))
++		return PTR_ERR(wtbl_hdr);
++
+ 	mt7915_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_hdr);
+ 
+ 	return mt76_mcu_skb_send_msg(&dev->mt76, skb,
+@@ -2263,6 +2272,9 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ 
+ 	wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_RESET_AND_SET,
+ 					     sta_wtbl, &skb);
++	if (IS_ERR(wtbl_hdr))
++		return PTR_ERR(wtbl_hdr);
++
+ 	if (enable) {
+ 		mt7915_mcu_wtbl_generic_tlv(skb, vif, sta, sta_wtbl, wtbl_hdr);
+ 		mt7915_mcu_wtbl_hdr_trans_tlv(skb, vif, sta, sta_wtbl, wtbl_hdr);
+@@ -2752,21 +2764,8 @@ out:
+ 
+ static int mt7915_load_firmware(struct mt7915_dev *dev)
+ {
++	u32 reg = mt7915_reg_map_l1(dev, MT_TOP_MISC);
+ 	int ret;
+-	u32 val, reg = mt7915_reg_map_l1(dev, MT_TOP_MISC);
+-
+-	val = FIELD_PREP(MT_TOP_MISC_FW_STATE, FW_STATE_FW_DOWNLOAD);
+-
+-	if (!mt76_poll_msec(dev, reg, MT_TOP_MISC_FW_STATE, val, 1000)) {
+-		/* restart firmware once */
+-		__mt76_mcu_restart(&dev->mt76);
+-		if (!mt76_poll_msec(dev, reg, MT_TOP_MISC_FW_STATE,
+-				    val, 1000)) {
+-			dev_err(dev->mt76.dev,
+-				"Firmware is not ready for download\n");
+-			return -EIO;
+-		}
+-	}
+ 
+ 	ret = mt7915_load_patch(dev);
+ 	if (ret)
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
+index ed0c9a24bb53d..dfb8880657bf6 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
+@@ -4,6 +4,11 @@
+ #ifndef __MT7915_REGS_H
+ #define __MT7915_REGS_H
+ 
++/* MCU WFDMA0 */
++#define MT_MCU_WFDMA0_BASE		0x2000
++#define MT_MCU_WFDMA0(ofs)		(MT_MCU_WFDMA0_BASE + (ofs))
++#define MT_MCU_WFDMA0_DUMMY_CR		MT_MCU_WFDMA0(0x120)
++
+ /* MCU WFDMA1 */
+ #define MT_MCU_WFDMA1_BASE		0x3000
+ #define MT_MCU_WFDMA1(ofs)		(MT_MCU_WFDMA1_BASE + (ofs))
+@@ -396,6 +401,14 @@
+ #define MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO1	BIT(1)
+ #define MT_WFDMA1_PCIE1_BUSY_ENA_RX_FIFO	BIT(2)
+ 
++#define MT_TOP_RGU_BASE				0xf0000
++#define MT_TOP_PWR_CTRL				(MT_TOP_RGU_BASE + (0x0))
++#define MT_TOP_PWR_KEY				(0x5746 << 16)
++#define MT_TOP_PWR_SW_RST			BIT(0)
++#define MT_TOP_PWR_SW_PWR_ON			GENMASK(3, 2)
++#define MT_TOP_PWR_HW_CTRL			BIT(4)
++#define MT_TOP_PWR_PWR_ON			BIT(7)
++
+ #define MT_INFRA_CFG_BASE		0xf1000
+ #define MT_INFRA(ofs)			(MT_INFRA_CFG_BASE + (ofs))
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+index cd9fd0e24e3e6..ada943c7a9500 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+@@ -413,7 +413,8 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 	struct mt7921_sta *msta = sta ? (struct mt7921_sta *)sta->drv_priv :
+ 				  &mvif->sta;
+ 	struct mt76_wcid *wcid = &msta->wcid;
+-	int idx = key->keyidx;
++	u8 *wcid_keyidx = &wcid->hw_key_idx;
++	int idx = key->keyidx, err = 0;
+ 
+ 	/* The hardware does not support per-STA RX GTK, fallback
+ 	 * to software mode for these.
+@@ -429,6 +430,7 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 	switch (key->cipher) {
+ 	case WLAN_CIPHER_SUITE_AES_CMAC:
+ 		key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
++		wcid_keyidx = &wcid->hw_key_idx2;
+ 		break;
+ 	case WLAN_CIPHER_SUITE_TKIP:
+ 	case WLAN_CIPHER_SUITE_CCMP:
+@@ -443,16 +445,23 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 		return -EOPNOTSUPP;
+ 	}
+ 
+-	if (cmd == SET_KEY) {
+-		key->hw_key_idx = wcid->idx;
+-		wcid->hw_key_idx = idx;
+-	} else if (idx == wcid->hw_key_idx) {
+-		wcid->hw_key_idx = -1;
+-	}
++	mt7921_mutex_acquire(dev);
++
++	if (cmd == SET_KEY)
++		*wcid_keyidx = idx;
++	else if (idx == *wcid_keyidx)
++		*wcid_keyidx = -1;
++	else
++		goto out;
++
+ 	mt76_wcid_key_setup(&dev->mt76, wcid,
+ 			    cmd == SET_KEY ? key : NULL);
+ 
+-	return mt7921_mcu_add_key(dev, vif, msta, key, cmd);
++	err = mt7921_mcu_add_key(dev, vif, msta, key, cmd);
++out:
++	mt7921_mutex_release(dev);
++
++	return err;
+ }
+ 
+ static int mt7921_config(struct ieee80211_hw *hw, u32 changed)
+diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c
+index 1b205e7d97a81..37f40039e4caf 100644
+--- a/drivers/net/wireless/microchip/wilc1000/netdev.c
++++ b/drivers/net/wireless/microchip/wilc1000/netdev.c
+@@ -575,7 +575,6 @@ static int wilc_mac_open(struct net_device *ndev)
+ {
+ 	struct wilc_vif *vif = netdev_priv(ndev);
+ 	struct wilc *wl = vif->wilc;
+-	unsigned char mac_add[ETH_ALEN] = {0};
+ 	int ret = 0;
+ 	struct mgmt_frame_regs mgmt_regs = {};
+ 
+@@ -598,9 +597,12 @@ static int wilc_mac_open(struct net_device *ndev)
+ 
+ 	wilc_set_operation_mode(vif, wilc_get_vif_idx(vif), vif->iftype,
+ 				vif->idx);
+-	wilc_get_mac_address(vif, mac_add);
+-	netdev_dbg(ndev, "Mac address: %pM\n", mac_add);
+-	ether_addr_copy(ndev->dev_addr, mac_add);
++
++	if (is_valid_ether_addr(ndev->dev_addr))
++		wilc_set_mac_address(vif, ndev->dev_addr);
++	else
++		wilc_get_mac_address(vif, ndev->dev_addr);
++	netdev_dbg(ndev, "Mac address: %pM\n", ndev->dev_addr);
+ 
+ 	if (!is_valid_ether_addr(ndev->dev_addr)) {
+ 		netdev_err(ndev, "Wrong MAC address\n");
+@@ -639,7 +641,14 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p)
+ 	int srcu_idx;
+ 
+ 	if (!is_valid_ether_addr(addr->sa_data))
+-		return -EINVAL;
++		return -EADDRNOTAVAIL;
++
++	if (!vif->mac_opened) {
++		eth_commit_mac_addr_change(dev, p);
++		return 0;
++	}
++
++	/* Verify MAC Address is not already in use: */
+ 
+ 	srcu_idx = srcu_read_lock(&wilc->srcu);
+ 	list_for_each_entry_rcu(tmp_vif, &wilc->vif_list, list) {
+@@ -647,7 +656,7 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p)
+ 		if (ether_addr_equal(addr->sa_data, mac_addr)) {
+ 			if (vif != tmp_vif) {
+ 				srcu_read_unlock(&wilc->srcu, srcu_idx);
+-				return -EINVAL;
++				return -EADDRNOTAVAIL;
+ 			}
+ 			srcu_read_unlock(&wilc->srcu, srcu_idx);
+ 			return 0;
+@@ -659,9 +668,7 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p)
+ 	if (result)
+ 		return result;
+ 
+-	ether_addr_copy(vif->bssid, addr->sa_data);
+-	ether_addr_copy(vif->ndev->dev_addr, addr->sa_data);
+-
++	eth_commit_mac_addr_change(dev, p);
+ 	return result;
+ }
+ 
+diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c
+index c775c177933b2..8dc80574d08d9 100644
+--- a/drivers/net/wireless/quantenna/qtnfmac/event.c
++++ b/drivers/net/wireless/quantenna/qtnfmac/event.c
+@@ -570,8 +570,10 @@ qtnf_event_handle_external_auth(struct qtnf_vif *vif,
+ 		return 0;
+ 
+ 	if (ev->ssid_len) {
+-		memcpy(auth.ssid.ssid, ev->ssid, ev->ssid_len);
+-		auth.ssid.ssid_len = ev->ssid_len;
++		int len = clamp_val(ev->ssid_len, 0, IEEE80211_MAX_SSID_LEN);
++
++		memcpy(auth.ssid.ssid, ev->ssid, len);
++		auth.ssid.ssid_len = len;
+ 	}
+ 
+ 	auth.key_mgmt_suite = le32_to_cpu(ev->akm_suite);
+diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
+index 35afea91fd299..92b9cf1f95252 100644
+--- a/drivers/net/wireless/realtek/rtw88/main.h
++++ b/drivers/net/wireless/realtek/rtw88/main.h
+@@ -1166,6 +1166,7 @@ struct rtw_chip_info {
+ 	bool en_dis_dpd;
+ 	u16 dpd_ratemask;
+ 	u8 iqk_threshold;
++	u8 lck_threshold;
+ 	const struct rtw_pwr_track_tbl *pwr_track_tbl;
+ 
+ 	u8 bfer_su_max_num;
+@@ -1534,6 +1535,7 @@ struct rtw_dm_info {
+ 	u32 rrsr_mask_min;
+ 	u8 thermal_avg[RTW_RF_PATH_MAX];
+ 	u8 thermal_meter_k;
++	u8 thermal_meter_lck;
+ 	s8 delta_power_index[RTW_RF_PATH_MAX];
+ 	s8 delta_power_index_last[RTW_RF_PATH_MAX];
+ 	u8 default_ofdm_index;
+diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
+index 0b3da5bef7036..21e77fcfa4d53 100644
+--- a/drivers/net/wireless/realtek/rtw88/phy.c
++++ b/drivers/net/wireless/realtek/rtw88/phy.c
+@@ -2220,6 +2220,20 @@ s8 rtw_phy_pwrtrack_get_pwridx(struct rtw_dev *rtwdev,
+ }
+ EXPORT_SYMBOL(rtw_phy_pwrtrack_get_pwridx);
+ 
++bool rtw_phy_pwrtrack_need_lck(struct rtw_dev *rtwdev)
++{
++	struct rtw_dm_info *dm_info = &rtwdev->dm_info;
++	u8 delta_lck;
++
++	delta_lck = abs(dm_info->thermal_avg[0] - dm_info->thermal_meter_lck);
++	if (delta_lck >= rtwdev->chip->lck_threshold) {
++		dm_info->thermal_meter_lck = dm_info->thermal_avg[0];
++		return true;
++	}
++	return false;
++}
++EXPORT_SYMBOL(rtw_phy_pwrtrack_need_lck);
++
+ bool rtw_phy_pwrtrack_need_iqk(struct rtw_dev *rtwdev)
+ {
+ 	struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+diff --git a/drivers/net/wireless/realtek/rtw88/phy.h b/drivers/net/wireless/realtek/rtw88/phy.h
+index a4fcfb8785504..a0742a69446de 100644
+--- a/drivers/net/wireless/realtek/rtw88/phy.h
++++ b/drivers/net/wireless/realtek/rtw88/phy.h
+@@ -55,6 +55,7 @@ u8 rtw_phy_pwrtrack_get_delta(struct rtw_dev *rtwdev, u8 path);
+ s8 rtw_phy_pwrtrack_get_pwridx(struct rtw_dev *rtwdev,
+ 			       struct rtw_swing_table *swing_table,
+ 			       u8 tbl_path, u8 therm_path, u8 delta);
++bool rtw_phy_pwrtrack_need_lck(struct rtw_dev *rtwdev);
+ bool rtw_phy_pwrtrack_need_iqk(struct rtw_dev *rtwdev);
+ void rtw_phy_config_swing_table(struct rtw_dev *rtwdev,
+ 				struct rtw_swing_table *swing_table);
+diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h
+index ea518aa78552e..819af34dac34b 100644
+--- a/drivers/net/wireless/realtek/rtw88/reg.h
++++ b/drivers/net/wireless/realtek/rtw88/reg.h
+@@ -652,8 +652,13 @@
+ #define RF_TXATANK	0x64
+ #define RF_TRXIQ	0x66
+ #define RF_RXIQGEN	0x8d
++#define RF_SYN_PFD	0xb0
+ #define RF_XTALX2	0xb8
++#define RF_SYN_CTRL	0xbb
+ #define RF_MALSEL	0xbe
++#define RF_SYN_AAC	0xc9
++#define RF_AAC_CTRL	0xca
++#define RF_FAST_LCK	0xcc
+ #define RF_RCKD		0xde
+ #define RF_TXADBG	0xde
+ #define RF_LUTDBG	0xdf
+diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+index dd560c28abb2f..448922cb2e63d 100644
+--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
++++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+@@ -1126,6 +1126,7 @@ static void rtw8822c_pwrtrack_init(struct rtw_dev *rtwdev)
+ 
+ 	dm_info->pwr_trk_triggered = false;
+ 	dm_info->thermal_meter_k = rtwdev->efuse.thermal_meter_k;
++	dm_info->thermal_meter_lck = rtwdev->efuse.thermal_meter_k;
+ }
+ 
+ static void rtw8822c_phy_set_param(struct rtw_dev *rtwdev)
+@@ -2108,6 +2109,26 @@ static void rtw8822c_false_alarm_statistics(struct rtw_dev *rtwdev)
+ 	rtw_write32_set(rtwdev, REG_RX_BREAK, BIT_COM_RX_GCK_EN);
+ }
+ 
++static void rtw8822c_do_lck(struct rtw_dev *rtwdev)
++{
++	u32 val;
++
++	rtw_write_rf(rtwdev, RF_PATH_A, RF_SYN_CTRL, RFREG_MASK, 0x80010);
++	rtw_write_rf(rtwdev, RF_PATH_A, RF_SYN_PFD, RFREG_MASK, 0x1F0FA);
++	fsleep(1);
++	rtw_write_rf(rtwdev, RF_PATH_A, RF_AAC_CTRL, RFREG_MASK, 0x80000);
++	rtw_write_rf(rtwdev, RF_PATH_A, RF_SYN_AAC, RFREG_MASK, 0x80001);
++	read_poll_timeout(rtw_read_rf, val, val != 0x1, 1000, 100000,
++			  true, rtwdev, RF_PATH_A, RF_AAC_CTRL, 0x1000);
++	rtw_write_rf(rtwdev, RF_PATH_A, RF_SYN_PFD, RFREG_MASK, 0x1F0F8);
++	rtw_write_rf(rtwdev, RF_PATH_B, RF_SYN_CTRL, RFREG_MASK, 0x80010);
++
++	rtw_write_rf(rtwdev, RF_PATH_A, RF_FAST_LCK, RFREG_MASK, 0x0f000);
++	rtw_write_rf(rtwdev, RF_PATH_A, RF_FAST_LCK, RFREG_MASK, 0x4f000);
++	fsleep(1);
++	rtw_write_rf(rtwdev, RF_PATH_A, RF_FAST_LCK, RFREG_MASK, 0x0f000);
++}
++
+ static void rtw8822c_do_iqk(struct rtw_dev *rtwdev)
+ {
+ 	struct rtw_iqk_para para = {0};
+@@ -3538,11 +3559,12 @@ static void __rtw8822c_pwr_track(struct rtw_dev *rtwdev)
+ 
+ 	rtw_phy_config_swing_table(rtwdev, &swing_table);
+ 
++	if (rtw_phy_pwrtrack_need_lck(rtwdev))
++		rtw8822c_do_lck(rtwdev);
++
+ 	for (i = 0; i < rtwdev->hal.rf_path_num; i++)
+ 		rtw8822c_pwr_track_path(rtwdev, &swing_table, i);
+ 
+-	if (rtw_phy_pwrtrack_need_iqk(rtwdev))
+-		rtw8822c_do_iqk(rtwdev);
+ }
+ 
+ static void rtw8822c_pwr_track(struct rtw_dev *rtwdev)
+@@ -4351,6 +4373,7 @@ struct rtw_chip_info rtw8822c_hw_spec = {
+ 	.dpd_ratemask = DIS_DPD_RATEALL,
+ 	.pwr_track_tbl = &rtw8822c_rtw_pwr_track_tbl,
+ 	.iqk_threshold = 8,
++	.lck_threshold = 8,
+ 	.bfer_su_max_num = 2,
+ 	.bfer_mu_max_num = 1,
+ 	.rx_ldpc = true,
+diff --git a/drivers/net/wireless/wl3501.h b/drivers/net/wireless/wl3501.h
+index e98e04ee9a2c0..59b7b93c59636 100644
+--- a/drivers/net/wireless/wl3501.h
++++ b/drivers/net/wireless/wl3501.h
+@@ -379,16 +379,7 @@ struct wl3501_get_confirm {
+ 	u8	mib_value[100];
+ };
+ 
+-struct wl3501_join_req {
+-	u16			    next_blk;
+-	u8			    sig_id;
+-	u8			    reserved;
+-	struct iw_mgmt_data_rset    operational_rset;
+-	u16			    reserved2;
+-	u16			    timeout;
+-	u16			    probe_delay;
+-	u8			    timestamp[8];
+-	u8			    local_time[8];
++struct wl3501_req {
+ 	u16			    beacon_period;
+ 	u16			    dtim_period;
+ 	u16			    cap_info;
+@@ -401,6 +392,19 @@ struct wl3501_join_req {
+ 	struct iw_mgmt_data_rset    bss_basic_rset;
+ };
+ 
++struct wl3501_join_req {
++	u16			    next_blk;
++	u8			    sig_id;
++	u8			    reserved;
++	struct iw_mgmt_data_rset    operational_rset;
++	u16			    reserved2;
++	u16			    timeout;
++	u16			    probe_delay;
++	u8			    timestamp[8];
++	u8			    local_time[8];
++	struct wl3501_req	    req;
++};
++
+ struct wl3501_join_confirm {
+ 	u16	next_blk;
+ 	u8	sig_id;
+@@ -443,16 +447,7 @@ struct wl3501_scan_confirm {
+ 	u16			    status;
+ 	char			    timestamp[8];
+ 	char			    localtime[8];
+-	u16			    beacon_period;
+-	u16			    dtim_period;
+-	u16			    cap_info;
+-	u8			    bss_type;
+-	u8			    bssid[ETH_ALEN];
+-	struct iw_mgmt_essid_pset   ssid;
+-	struct iw_mgmt_ds_pset	    ds_pset;
+-	struct iw_mgmt_cf_pset	    cf_pset;
+-	struct iw_mgmt_ibss_pset    ibss_pset;
+-	struct iw_mgmt_data_rset    bss_basic_rset;
++	struct wl3501_req	    req;
+ 	u8			    rssi;
+ };
+ 
+@@ -471,8 +466,10 @@ struct wl3501_md_req {
+ 	u16	size;
+ 	u8	pri;
+ 	u8	service_class;
+-	u8	daddr[ETH_ALEN];
+-	u8	saddr[ETH_ALEN];
++	struct {
++		u8	daddr[ETH_ALEN];
++		u8	saddr[ETH_ALEN];
++	} addr;
+ };
+ 
+ struct wl3501_md_ind {
+@@ -484,8 +481,10 @@ struct wl3501_md_ind {
+ 	u8	reception;
+ 	u8	pri;
+ 	u8	service_class;
+-	u8	daddr[ETH_ALEN];
+-	u8	saddr[ETH_ALEN];
++	struct {
++		u8	daddr[ETH_ALEN];
++		u8	saddr[ETH_ALEN];
++	} addr;
+ };
+ 
+ struct wl3501_md_confirm {
+diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
+index 8ca5789c7b378..672f5d5f3f2c7 100644
+--- a/drivers/net/wireless/wl3501_cs.c
++++ b/drivers/net/wireless/wl3501_cs.c
+@@ -469,6 +469,7 @@ static int wl3501_send_pkt(struct wl3501_card *this, u8 *data, u16 len)
+ 	struct wl3501_md_req sig = {
+ 		.sig_id = WL3501_SIG_MD_REQ,
+ 	};
++	size_t sig_addr_len = sizeof(sig.addr);
+ 	u8 *pdata = (char *)data;
+ 	int rc = -EIO;
+ 
+@@ -484,9 +485,9 @@ static int wl3501_send_pkt(struct wl3501_card *this, u8 *data, u16 len)
+ 			goto out;
+ 		}
+ 		rc = 0;
+-		memcpy(&sig.daddr[0], pdata, 12);
+-		pktlen = len - 12;
+-		pdata += 12;
++		memcpy(&sig.addr, pdata, sig_addr_len);
++		pktlen = len - sig_addr_len;
++		pdata += sig_addr_len;
+ 		sig.data = bf;
+ 		if (((*pdata) * 256 + (*(pdata + 1))) > 1500) {
+ 			u8 addr4[ETH_ALEN] = {
+@@ -589,7 +590,7 @@ static int wl3501_mgmt_join(struct wl3501_card *this, u16 stas)
+ 	struct wl3501_join_req sig = {
+ 		.sig_id		  = WL3501_SIG_JOIN_REQ,
+ 		.timeout	  = 10,
+-		.ds_pset = {
++		.req.ds_pset = {
+ 			.el = {
+ 				.id  = IW_MGMT_INFO_ELEMENT_DS_PARAMETER_SET,
+ 				.len = 1,
+@@ -598,7 +599,7 @@ static int wl3501_mgmt_join(struct wl3501_card *this, u16 stas)
+ 		},
+ 	};
+ 
+-	memcpy(&sig.beacon_period, &this->bss_set[stas].beacon_period, 72);
++	memcpy(&sig.req, &this->bss_set[stas].req, sizeof(sig.req));
+ 	return wl3501_esbq_exec(this, &sig, sizeof(sig));
+ }
+ 
+@@ -666,35 +667,37 @@ static void wl3501_mgmt_scan_confirm(struct wl3501_card *this, u16 addr)
+ 	if (sig.status == WL3501_STATUS_SUCCESS) {
+ 		pr_debug("success");
+ 		if ((this->net_type == IW_MODE_INFRA &&
+-		     (sig.cap_info & WL3501_MGMT_CAPABILITY_ESS)) ||
++		     (sig.req.cap_info & WL3501_MGMT_CAPABILITY_ESS)) ||
+ 		    (this->net_type == IW_MODE_ADHOC &&
+-		     (sig.cap_info & WL3501_MGMT_CAPABILITY_IBSS)) ||
++		     (sig.req.cap_info & WL3501_MGMT_CAPABILITY_IBSS)) ||
+ 		    this->net_type == IW_MODE_AUTO) {
+ 			if (!this->essid.el.len)
+ 				matchflag = 1;
+ 			else if (this->essid.el.len == 3 &&
+ 				 !memcmp(this->essid.essid, "ANY", 3))
+ 				matchflag = 1;
+-			else if (this->essid.el.len != sig.ssid.el.len)
++			else if (this->essid.el.len != sig.req.ssid.el.len)
+ 				matchflag = 0;
+-			else if (memcmp(this->essid.essid, sig.ssid.essid,
++			else if (memcmp(this->essid.essid, sig.req.ssid.essid,
+ 					this->essid.el.len))
+ 				matchflag = 0;
+ 			else
+ 				matchflag = 1;
+ 			if (matchflag) {
+ 				for (i = 0; i < this->bss_cnt; i++) {
+-					if (ether_addr_equal_unaligned(this->bss_set[i].bssid, sig.bssid)) {
++					if (ether_addr_equal_unaligned(this->bss_set[i].req.bssid,
++								       sig.req.bssid)) {
+ 						matchflag = 0;
+ 						break;
+ 					}
+ 				}
+ 			}
+ 			if (matchflag && (i < 20)) {
+-				memcpy(&this->bss_set[i].beacon_period,
+-				       &sig.beacon_period, 73);
++				memcpy(&this->bss_set[i].req,
++				       &sig.req, sizeof(sig.req));
+ 				this->bss_cnt++;
+ 				this->rssi = sig.rssi;
++				this->bss_set[i].rssi = sig.rssi;
+ 			}
+ 		}
+ 	} else if (sig.status == WL3501_STATUS_TIMEOUT) {
+@@ -886,19 +889,19 @@ static void wl3501_mgmt_join_confirm(struct net_device *dev, u16 addr)
+ 			if (this->join_sta_bss < this->bss_cnt) {
+ 				const int i = this->join_sta_bss;
+ 				memcpy(this->bssid,
+-				       this->bss_set[i].bssid, ETH_ALEN);
+-				this->chan = this->bss_set[i].ds_pset.chan;
++				       this->bss_set[i].req.bssid, ETH_ALEN);
++				this->chan = this->bss_set[i].req.ds_pset.chan;
+ 				iw_copy_mgmt_info_element(&this->keep_essid.el,
+-						     &this->bss_set[i].ssid.el);
++						     &this->bss_set[i].req.ssid.el);
+ 				wl3501_mgmt_auth(this);
+ 			}
+ 		} else {
+ 			const int i = this->join_sta_bss;
+ 
+-			memcpy(&this->bssid, &this->bss_set[i].bssid, ETH_ALEN);
+-			this->chan = this->bss_set[i].ds_pset.chan;
++			memcpy(&this->bssid, &this->bss_set[i].req.bssid, ETH_ALEN);
++			this->chan = this->bss_set[i].req.ds_pset.chan;
+ 			iw_copy_mgmt_info_element(&this->keep_essid.el,
+-						  &this->bss_set[i].ssid.el);
++						  &this->bss_set[i].req.ssid.el);
+ 			wl3501_online(dev);
+ 		}
+ 	} else {
+@@ -980,7 +983,8 @@ static inline void wl3501_md_ind_interrupt(struct net_device *dev,
+ 	} else {
+ 		skb->dev = dev;
+ 		skb_reserve(skb, 2); /* IP headers on 16 bytes boundaries */
+-		skb_copy_to_linear_data(skb, (unsigned char *)&sig.daddr, 12);
++		skb_copy_to_linear_data(skb, (unsigned char *)&sig.addr,
++					sizeof(sig.addr));
+ 		wl3501_receive(this, skb->data, pkt_len);
+ 		skb_put(skb, pkt_len);
+ 		skb->protocol	= eth_type_trans(skb, dev);
+@@ -1571,30 +1575,30 @@ static int wl3501_get_scan(struct net_device *dev, struct iw_request_info *info,
+ 	for (i = 0; i < this->bss_cnt; ++i) {
+ 		iwe.cmd			= SIOCGIWAP;
+ 		iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+-		memcpy(iwe.u.ap_addr.sa_data, this->bss_set[i].bssid, ETH_ALEN);
++		memcpy(iwe.u.ap_addr.sa_data, this->bss_set[i].req.bssid, ETH_ALEN);
+ 		current_ev = iwe_stream_add_event(info, current_ev,
+ 						  extra + IW_SCAN_MAX_DATA,
+ 						  &iwe, IW_EV_ADDR_LEN);
+ 		iwe.cmd		  = SIOCGIWESSID;
+ 		iwe.u.data.flags  = 1;
+-		iwe.u.data.length = this->bss_set[i].ssid.el.len;
++		iwe.u.data.length = this->bss_set[i].req.ssid.el.len;
+ 		current_ev = iwe_stream_add_point(info, current_ev,
+ 						  extra + IW_SCAN_MAX_DATA,
+ 						  &iwe,
+-						  this->bss_set[i].ssid.essid);
++						  this->bss_set[i].req.ssid.essid);
+ 		iwe.cmd	   = SIOCGIWMODE;
+-		iwe.u.mode = this->bss_set[i].bss_type;
++		iwe.u.mode = this->bss_set[i].req.bss_type;
+ 		current_ev = iwe_stream_add_event(info, current_ev,
+ 						  extra + IW_SCAN_MAX_DATA,
+ 						  &iwe, IW_EV_UINT_LEN);
+ 		iwe.cmd = SIOCGIWFREQ;
+-		iwe.u.freq.m = this->bss_set[i].ds_pset.chan;
++		iwe.u.freq.m = this->bss_set[i].req.ds_pset.chan;
+ 		iwe.u.freq.e = 0;
+ 		current_ev = iwe_stream_add_event(info, current_ev,
+ 						  extra + IW_SCAN_MAX_DATA,
+ 						  &iwe, IW_EV_FREQ_LEN);
+ 		iwe.cmd = SIOCGIWENCODE;
+-		if (this->bss_set[i].cap_info & WL3501_MGMT_CAPABILITY_PRIVACY)
++		if (this->bss_set[i].req.cap_info & WL3501_MGMT_CAPABILITY_PRIVACY)
+ 			iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+ 		else
+ 			iwe.u.data.flags = IW_ENCODE_DISABLED;
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 0896e21642beb..d5d7e0cdd78d8 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -2681,7 +2681,8 @@ static void nvme_set_latency_tolerance(struct device *dev, s32 val)
+ 
+ 	if (ctrl->ps_max_latency_us != latency) {
+ 		ctrl->ps_max_latency_us = latency;
+-		nvme_configure_apst(ctrl);
++		if (ctrl->state == NVME_CTRL_LIVE)
++			nvme_configure_apst(ctrl);
+ 	}
+ }
+ 
+diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
+index 9a8b3726a37c4..429263ca9b978 100644
+--- a/drivers/nvme/target/io-cmd-bdev.c
++++ b/drivers/nvme/target/io-cmd-bdev.c
+@@ -258,7 +258,7 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
+ 
+ 	sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
+ 
+-	if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
++	if (nvmet_use_inline_bvec(req)) {
+ 		bio = &req->b.inline_bio;
+ 		bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
+ 	} else {
+diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
+index 4b84edb49f22c..5aad34b106dc2 100644
+--- a/drivers/nvme/target/nvmet.h
++++ b/drivers/nvme/target/nvmet.h
+@@ -614,4 +614,10 @@ static inline sector_t nvmet_lba_to_sect(struct nvmet_ns *ns, __le64 lba)
+ 	return le64_to_cpu(lba) << (ns->blksize_shift - SECTOR_SHIFT);
+ }
+ 
++static inline bool nvmet_use_inline_bvec(struct nvmet_req *req)
++{
++	return req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN &&
++	       req->sg_cnt <= NVMET_MAX_INLINE_BIOVEC;
++}
++
+ #endif /* _NVMET_H */
+diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
+index 2798944899b73..39b1473f7204e 100644
+--- a/drivers/nvme/target/passthru.c
++++ b/drivers/nvme/target/passthru.c
+@@ -194,7 +194,7 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
+ 	if (req->sg_cnt > BIO_MAX_VECS)
+ 		return -EINVAL;
+ 
+-	if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
++	if (nvmet_use_inline_bvec(req)) {
+ 		bio = &req->p.inline_bio;
+ 		bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
+ 	} else {
+diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
+index 6c1f3ab7649c7..7d607f435e366 100644
+--- a/drivers/nvme/target/rdma.c
++++ b/drivers/nvme/target/rdma.c
+@@ -700,7 +700,7 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
+ {
+ 	struct nvmet_rdma_rsp *rsp =
+ 		container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe);
+-	struct nvmet_rdma_queue *queue = cq->cq_context;
++	struct nvmet_rdma_queue *queue = wc->qp->qp_context;
+ 
+ 	nvmet_rdma_release_rsp(rsp);
+ 
+@@ -786,7 +786,7 @@ static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc)
+ {
+ 	struct nvmet_rdma_rsp *rsp =
+ 		container_of(wc->wr_cqe, struct nvmet_rdma_rsp, write_cqe);
+-	struct nvmet_rdma_queue *queue = cq->cq_context;
++	struct nvmet_rdma_queue *queue = wc->qp->qp_context;
+ 	struct rdma_cm_id *cm_id = rsp->queue->cm_id;
+ 	u16 status;
+ 
+diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
+index e330e6811f0b6..08bc788d9422c 100644
+--- a/drivers/pci/controller/pcie-brcmstb.c
++++ b/drivers/pci/controller/pcie-brcmstb.c
+@@ -1148,6 +1148,7 @@ static int brcm_pcie_suspend(struct device *dev)
+ 
+ 	brcm_pcie_turn_off(pcie);
+ 	ret = brcm_phy_stop(pcie);
++	reset_control_rearm(pcie->rescal);
+ 	clk_disable_unprepare(pcie->clk);
+ 
+ 	return ret;
+@@ -1163,9 +1164,13 @@ static int brcm_pcie_resume(struct device *dev)
+ 	base = pcie->base;
+ 	clk_prepare_enable(pcie->clk);
+ 
++	ret = reset_control_reset(pcie->rescal);
++	if (ret)
++		goto err_disable_clk;
++
+ 	ret = brcm_phy_start(pcie);
+ 	if (ret)
+-		goto err;
++		goto err_reset;
+ 
+ 	/* Take bridge out of reset so we can access the SERDES reg */
+ 	pcie->bridge_sw_init_set(pcie, 0);
+@@ -1180,14 +1185,16 @@ static int brcm_pcie_resume(struct device *dev)
+ 
+ 	ret = brcm_pcie_setup(pcie);
+ 	if (ret)
+-		goto err;
++		goto err_reset;
+ 
+ 	if (pcie->msi)
+ 		brcm_msi_set_regs(pcie->msi);
+ 
+ 	return 0;
+ 
+-err:
++err_reset:
++	reset_control_rearm(pcie->rescal);
++err_disable_clk:
+ 	clk_disable_unprepare(pcie->clk);
+ 	return ret;
+ }
+@@ -1197,7 +1204,7 @@ static void __brcm_pcie_remove(struct brcm_pcie *pcie)
+ 	brcm_msi_remove(pcie);
+ 	brcm_pcie_turn_off(pcie);
+ 	brcm_phy_stop(pcie);
+-	reset_control_assert(pcie->rescal);
++	reset_control_rearm(pcie->rescal);
+ 	clk_disable_unprepare(pcie->clk);
+ }
+ 
+@@ -1278,13 +1285,13 @@ static int brcm_pcie_probe(struct platform_device *pdev)
+ 		return PTR_ERR(pcie->perst_reset);
+ 	}
+ 
+-	ret = reset_control_deassert(pcie->rescal);
++	ret = reset_control_reset(pcie->rescal);
+ 	if (ret)
+ 		dev_err(&pdev->dev, "failed to deassert 'rescal'\n");
+ 
+ 	ret = brcm_phy_start(pcie);
+ 	if (ret) {
+-		reset_control_assert(pcie->rescal);
++		reset_control_rearm(pcie->rescal);
+ 		clk_disable_unprepare(pcie->clk);
+ 		return ret;
+ 	}
+@@ -1296,6 +1303,7 @@ static int brcm_pcie_probe(struct platform_device *pdev)
+ 	pcie->hw_rev = readl(pcie->base + PCIE_MISC_REVISION);
+ 	if (pcie->type == BCM4908 && pcie->hw_rev >= BRCM_PCIE_HW_REV_3_20) {
+ 		dev_err(pcie->dev, "hardware revision with unsupported PERST# setup\n");
++		ret = -ENODEV;
+ 		goto fail;
+ 	}
+ 
+diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c
+index 908475d27e0e7..eede4e8f3f75a 100644
+--- a/drivers/pci/controller/pcie-iproc-msi.c
++++ b/drivers/pci/controller/pcie-iproc-msi.c
+@@ -271,7 +271,7 @@ static int iproc_msi_irq_domain_alloc(struct irq_domain *domain,
+ 				    NULL, NULL);
+ 	}
+ 
+-	return hwirq;
++	return 0;
+ }
+ 
+ static void iproc_msi_irq_domain_free(struct irq_domain *domain,
+diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
+index c0ac4e9cbe72f..f9760e73d568c 100644
+--- a/drivers/pci/endpoint/functions/pci-epf-test.c
++++ b/drivers/pci/endpoint/functions/pci-epf-test.c
+@@ -833,15 +833,18 @@ static int pci_epf_test_bind(struct pci_epf *epf)
+ 		return -EINVAL;
+ 
+ 	epc_features = pci_epc_get_features(epc, epf->func_no);
+-	if (epc_features) {
+-		linkup_notifier = epc_features->linkup_notifier;
+-		core_init_notifier = epc_features->core_init_notifier;
+-		test_reg_bar = pci_epc_get_first_free_bar(epc_features);
+-		if (test_reg_bar < 0)
+-			return -EINVAL;
+-		pci_epf_configure_bar(epf, epc_features);
++	if (!epc_features) {
++		dev_err(&epf->dev, "epc_features not implemented\n");
++		return -EOPNOTSUPP;
+ 	}
+ 
++	linkup_notifier = epc_features->linkup_notifier;
++	core_init_notifier = epc_features->core_init_notifier;
++	test_reg_bar = pci_epc_get_first_free_bar(epc_features);
++	if (test_reg_bar < 0)
++		return -EINVAL;
++	pci_epf_configure_bar(epf, epc_features);
++
+ 	epf_test->test_reg_bar = test_reg_bar;
+ 	epf_test->epc_features = epc_features;
+ 
+@@ -922,6 +925,7 @@ static int __init pci_epf_test_init(void)
+ 
+ 	ret = pci_epf_register_driver(&test_driver);
+ 	if (ret) {
++		destroy_workqueue(kpcitest_workqueue);
+ 		pr_err("Failed to register pci epf test driver --> %d\n", ret);
+ 		return ret;
+ 	}
+@@ -932,6 +936,8 @@ module_init(pci_epf_test_init);
+ 
+ static void __exit pci_epf_test_exit(void)
+ {
++	if (kpcitest_workqueue)
++		destroy_workqueue(kpcitest_workqueue);
+ 	pci_epf_unregister_driver(&test_driver);
+ }
+ module_exit(pci_epf_test_exit);
+diff --git a/drivers/pci/pcie/rcec.c b/drivers/pci/pcie/rcec.c
+index 2c5c552994e4c..d0bcd141ac9c6 100644
+--- a/drivers/pci/pcie/rcec.c
++++ b/drivers/pci/pcie/rcec.c
+@@ -32,7 +32,7 @@ static bool rcec_assoc_rciep(struct pci_dev *rcec, struct pci_dev *rciep)
+ 
+ 	/* Same bus, so check bitmap */
+ 	for_each_set_bit(devn, &bitmap, 32)
+-		if (devn == rciep->devfn)
++		if (devn == PCI_SLOT(rciep->devfn))
+ 			return true;
+ 
+ 	return false;
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index 953f15abc850a..be51670572fa6 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -2353,6 +2353,7 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
+ 	pci_set_of_node(dev);
+ 
+ 	if (pci_setup_device(dev)) {
++		pci_release_of_node(dev);
+ 		pci_bus_put(dev->bus);
+ 		kfree(dev);
+ 		return NULL;
+diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
+index 0cd7f33cdf257..2b99f4130e1e5 100644
+--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
++++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
+@@ -55,7 +55,7 @@ static void exynos_irq_mask(struct irq_data *irqd)
+ 	struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
+ 	struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
+ 	unsigned long reg_mask = our_chip->eint_mask + bank->eint_offset;
+-	unsigned long mask;
++	unsigned int mask;
+ 	unsigned long flags;
+ 
+ 	raw_spin_lock_irqsave(&bank->slock, flags);
+@@ -83,7 +83,7 @@ static void exynos_irq_unmask(struct irq_data *irqd)
+ 	struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
+ 	struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
+ 	unsigned long reg_mask = our_chip->eint_mask + bank->eint_offset;
+-	unsigned long mask;
++	unsigned int mask;
+ 	unsigned long flags;
+ 
+ 	/*
+@@ -483,7 +483,7 @@ static void exynos_irq_eint0_15(struct irq_desc *desc)
+ 	chained_irq_exit(chip, desc);
+ }
+ 
+-static inline void exynos_irq_demux_eint(unsigned long pend,
++static inline void exynos_irq_demux_eint(unsigned int pend,
+ 						struct irq_domain *domain)
+ {
+ 	unsigned int irq;
+@@ -500,8 +500,8 @@ static void exynos_irq_demux_eint16_31(struct irq_desc *desc)
+ {
+ 	struct irq_chip *chip = irq_desc_get_chip(desc);
+ 	struct exynos_muxed_weint_data *eintd = irq_desc_get_handler_data(desc);
+-	unsigned long pend;
+-	unsigned long mask;
++	unsigned int pend;
++	unsigned int mask;
+ 	int i;
+ 
+ 	chained_irq_enter(chip, desc);
+diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c
+index 5813339b597b9..3292158157b68 100644
+--- a/drivers/pwm/pwm-atmel.c
++++ b/drivers/pwm/pwm-atmel.c
+@@ -319,7 +319,7 @@ static void atmel_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ 
+ 		cdty = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm,
+ 					  atmel_pwm->data->regs.duty);
+-		tmp = (u64)cdty * NSEC_PER_SEC;
++		tmp = (u64)(cprd - cdty) * NSEC_PER_SEC;
+ 		tmp <<= pres;
+ 		state->duty_cycle = DIV64_U64_ROUND_UP(tmp, rate);
+ 
+diff --git a/drivers/remoteproc/pru_rproc.c b/drivers/remoteproc/pru_rproc.c
+index dcb380e868dfd..549ed3fed6259 100644
+--- a/drivers/remoteproc/pru_rproc.c
++++ b/drivers/remoteproc/pru_rproc.c
+@@ -266,12 +266,17 @@ static void pru_rproc_create_debug_entries(struct rproc *rproc)
+ 
+ static void pru_dispose_irq_mapping(struct pru_rproc *pru)
+ {
+-	while (pru->evt_count--) {
++	if (!pru->mapped_irq)
++		return;
++
++	while (pru->evt_count) {
++		pru->evt_count--;
+ 		if (pru->mapped_irq[pru->evt_count] > 0)
+ 			irq_dispose_mapping(pru->mapped_irq[pru->evt_count]);
+ 	}
+ 
+ 	kfree(pru->mapped_irq);
++	pru->mapped_irq = NULL;
+ }
+ 
+ /*
+@@ -284,7 +289,7 @@ static int pru_handle_intrmap(struct rproc *rproc)
+ 	struct pru_rproc *pru = rproc->priv;
+ 	struct pru_irq_rsc *rsc = pru->pru_interrupt_map;
+ 	struct irq_fwspec fwspec;
+-	struct device_node *irq_parent;
++	struct device_node *parent, *irq_parent;
+ 	int i, ret = 0;
+ 
+ 	/* not having pru_interrupt_map is not an error */
+@@ -307,16 +312,31 @@ static int pru_handle_intrmap(struct rproc *rproc)
+ 	pru->evt_count = rsc->num_evts;
+ 	pru->mapped_irq = kcalloc(pru->evt_count, sizeof(unsigned int),
+ 				  GFP_KERNEL);
+-	if (!pru->mapped_irq)
++	if (!pru->mapped_irq) {
++		pru->evt_count = 0;
+ 		return -ENOMEM;
++	}
+ 
+ 	/*
+ 	 * parse and fill in system event to interrupt channel and
+-	 * channel-to-host mapping
++	 * channel-to-host mapping. The interrupt controller to be used
++	 * for these mappings for a given PRU remoteproc is always its
++	 * corresponding sibling PRUSS INTC node.
+ 	 */
+-	irq_parent = of_irq_find_parent(pru->dev->of_node);
++	parent = of_get_parent(dev_of_node(pru->dev));
++	if (!parent) {
++		kfree(pru->mapped_irq);
++		pru->mapped_irq = NULL;
++		pru->evt_count = 0;
++		return -ENODEV;
++	}
++
++	irq_parent = of_get_child_by_name(parent, "interrupt-controller");
++	of_node_put(parent);
+ 	if (!irq_parent) {
+ 		kfree(pru->mapped_irq);
++		pru->mapped_irq = NULL;
++		pru->evt_count = 0;
+ 		return -ENODEV;
+ 	}
+ 
+@@ -332,16 +352,20 @@ static int pru_handle_intrmap(struct rproc *rproc)
+ 
+ 		pru->mapped_irq[i] = irq_create_fwspec_mapping(&fwspec);
+ 		if (!pru->mapped_irq[i]) {
+-			dev_err(dev, "failed to get virq\n");
+-			ret = pru->mapped_irq[i];
++			dev_err(dev, "failed to get virq for fw mapping %d: event %d chnl %d host %d\n",
++				i, fwspec.param[0], fwspec.param[1],
++				fwspec.param[2]);
++			ret = -EINVAL;
+ 			goto map_fail;
+ 		}
+ 	}
++	of_node_put(irq_parent);
+ 
+ 	return ret;
+ 
+ map_fail:
+ 	pru_dispose_irq_mapping(pru);
++	of_node_put(irq_parent);
+ 
+ 	return ret;
+ }
+@@ -387,8 +411,7 @@ static int pru_rproc_stop(struct rproc *rproc)
+ 	pru_control_write_reg(pru, PRU_CTRL_CTRL, val);
+ 
+ 	/* dispose irq mapping - new firmware can provide new mapping */
+-	if (pru->mapped_irq)
+-		pru_dispose_irq_mapping(pru);
++	pru_dispose_irq_mapping(pru);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
+index 66106ba25ba30..14e0ce5f18f5f 100644
+--- a/drivers/remoteproc/qcom_q6v5_mss.c
++++ b/drivers/remoteproc/qcom_q6v5_mss.c
+@@ -1210,6 +1210,14 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
+ 			goto release_firmware;
+ 		}
+ 
++		if (phdr->p_filesz > phdr->p_memsz) {
++			dev_err(qproc->dev,
++				"refusing to load segment %d with p_filesz > p_memsz\n",
++				i);
++			ret = -EINVAL;
++			goto release_firmware;
++		}
++
+ 		ptr = memremap(qproc->mpss_phys + offset, phdr->p_memsz, MEMREMAP_WC);
+ 		if (!ptr) {
+ 			dev_err(qproc->dev,
+@@ -1241,6 +1249,16 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
+ 				goto release_firmware;
+ 			}
+ 
++			if (seg_fw->size != phdr->p_filesz) {
++				dev_err(qproc->dev,
++					"failed to load segment %d from truncated file %s\n",
++					i, fw_name);
++				ret = -EINVAL;
++				release_firmware(seg_fw);
++				memunmap(ptr);
++				goto release_firmware;
++			}
++
+ 			release_firmware(seg_fw);
+ 		}
+ 
+diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
+index 27a05167c18c3..4840886532ff7 100644
+--- a/drivers/rpmsg/qcom_glink_native.c
++++ b/drivers/rpmsg/qcom_glink_native.c
+@@ -857,6 +857,7 @@ static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail)
+ 			dev_err(glink->dev,
+ 				"no intent found for channel %s intent %d",
+ 				channel->name, liid);
++			ret = -ENOENT;
+ 			goto advance_rx;
+ 		}
+ 	}
+diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
+index cd8e438bc9c46..8752620d8e34a 100644
+--- a/drivers/rtc/rtc-ds1307.c
++++ b/drivers/rtc/rtc-ds1307.c
+@@ -296,7 +296,11 @@ static int ds1307_get_time(struct device *dev, struct rtc_time *t)
+ 	t->tm_min = bcd2bin(regs[DS1307_REG_MIN] & 0x7f);
+ 	tmp = regs[DS1307_REG_HOUR] & 0x3f;
+ 	t->tm_hour = bcd2bin(tmp);
+-	t->tm_wday = bcd2bin(regs[DS1307_REG_WDAY] & 0x07) - 1;
++	/* rx8130 is bit position, not BCD */
++	if (ds1307->type == rx_8130)
++		t->tm_wday = fls(regs[DS1307_REG_WDAY] & 0x7f);
++	else
++		t->tm_wday = bcd2bin(regs[DS1307_REG_WDAY] & 0x07) - 1;
+ 	t->tm_mday = bcd2bin(regs[DS1307_REG_MDAY] & 0x3f);
+ 	tmp = regs[DS1307_REG_MONTH] & 0x1f;
+ 	t->tm_mon = bcd2bin(tmp) - 1;
+@@ -343,7 +347,11 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
+ 	regs[DS1307_REG_SECS] = bin2bcd(t->tm_sec);
+ 	regs[DS1307_REG_MIN] = bin2bcd(t->tm_min);
+ 	regs[DS1307_REG_HOUR] = bin2bcd(t->tm_hour);
+-	regs[DS1307_REG_WDAY] = bin2bcd(t->tm_wday + 1);
++	/* rx8130 is bit position, not BCD */
++	if (ds1307->type == rx_8130)
++		regs[DS1307_REG_WDAY] = 1 << t->tm_wday;
++	else
++		regs[DS1307_REG_WDAY] = bin2bcd(t->tm_wday + 1);
+ 	regs[DS1307_REG_MDAY] = bin2bcd(t->tm_mday);
+ 	regs[DS1307_REG_MONTH] = bin2bcd(t->tm_mon + 1);
+ 
+diff --git a/drivers/rtc/rtc-fsl-ftm-alarm.c b/drivers/rtc/rtc-fsl-ftm-alarm.c
+index 57cc09d0a8067..c0df49fb978ce 100644
+--- a/drivers/rtc/rtc-fsl-ftm-alarm.c
++++ b/drivers/rtc/rtc-fsl-ftm-alarm.c
+@@ -310,6 +310,7 @@ static const struct of_device_id ftm_rtc_match[] = {
+ 	{ .compatible = "fsl,lx2160a-ftm-alarm", },
+ 	{ },
+ };
++MODULE_DEVICE_TABLE(of, ftm_rtc_match);
+ 
+ static const struct acpi_device_id ftm_imx_acpi_ids[] = {
+ 	{"NXP0014",},
+diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c
+index 288abb1abdb8d..bc89c62ccb9b5 100644
+--- a/drivers/rtc/rtc-tps65910.c
++++ b/drivers/rtc/rtc-tps65910.c
+@@ -18,6 +18,7 @@
+ #include <linux/rtc.h>
+ #include <linux/bcd.h>
+ #include <linux/math64.h>
++#include <linux/property.h>
+ #include <linux/platform_device.h>
+ #include <linux/interrupt.h>
+ #include <linux/mfd/tps65910.h>
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index f01f07116bd3e..8cb0574cfa91b 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -1194,6 +1194,9 @@ static int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+ {
+ 	struct qla_work_evt *e;
+ 
++	if (vha->host->active_mode == MODE_TARGET)
++		return QLA_FUNCTION_FAILED;
++
+ 	e = qla2x00_alloc_work(vha, QLA_EVT_PRLI);
+ 	if (!e)
+ 		return QLA_FUNCTION_FAILED;
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
+index d3d05e997c135..0c71a159d08f1 100644
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -8599,7 +8599,7 @@ static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
+ 	} else if (!ufshcd_is_ufs_dev_active(hba)) {
+ 		ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
+ 		vcc_off = true;
+-		if (!ufshcd_is_link_active(hba)) {
++		if (ufshcd_is_link_hibern8(hba) || ufshcd_is_link_off(hba)) {
+ 			ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
+ 			ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
+ 		}
+@@ -8621,7 +8621,7 @@ static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
+ 	    !hba->dev_info.is_lu_power_on_wp) {
+ 		ret = ufshcd_setup_vreg(hba, true);
+ 	} else if (!ufshcd_is_ufs_dev_active(hba)) {
+-		if (!ret && !ufshcd_is_link_active(hba)) {
++		if (!ufshcd_is_link_active(hba)) {
+ 			ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
+ 			if (ret)
+ 				goto vcc_disable;
+@@ -8978,10 +8978,13 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
+ 	if (!hba->is_powered)
+ 		return 0;
+ 
++	cancel_delayed_work_sync(&hba->rpm_dev_flush_recheck_work);
++
+ 	if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
+ 	     hba->curr_dev_pwr_mode) &&
+ 	    (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
+ 	     hba->uic_link_state) &&
++	     pm_runtime_suspended(hba->dev) &&
+ 	     !hba->dev_info.b_rpm_dev_flush_capable)
+ 		goto out;
+ 
+diff --git a/drivers/soc/mediatek/mt8173-pm-domains.h b/drivers/soc/mediatek/mt8173-pm-domains.h
+index 3e8ee5dabb437..654c717e54671 100644
+--- a/drivers/soc/mediatek/mt8173-pm-domains.h
++++ b/drivers/soc/mediatek/mt8173-pm-domains.h
+@@ -12,24 +12,28 @@
+ 
+ static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = {
+ 	[MT8173_POWER_DOMAIN_VDEC] = {
++		.name = "vdec",
+ 		.sta_mask = PWR_STATUS_VDEC,
+ 		.ctl_offs = SPM_VDE_PWR_CON,
+ 		.sram_pdn_bits = GENMASK(11, 8),
+ 		.sram_pdn_ack_bits = GENMASK(12, 12),
+ 	},
+ 	[MT8173_POWER_DOMAIN_VENC] = {
++		.name = "venc",
+ 		.sta_mask = PWR_STATUS_VENC,
+ 		.ctl_offs = SPM_VEN_PWR_CON,
+ 		.sram_pdn_bits = GENMASK(11, 8),
+ 		.sram_pdn_ack_bits = GENMASK(15, 12),
+ 	},
+ 	[MT8173_POWER_DOMAIN_ISP] = {
++		.name = "isp",
+ 		.sta_mask = PWR_STATUS_ISP,
+ 		.ctl_offs = SPM_ISP_PWR_CON,
+ 		.sram_pdn_bits = GENMASK(11, 8),
+ 		.sram_pdn_ack_bits = GENMASK(13, 12),
+ 	},
+ 	[MT8173_POWER_DOMAIN_MM] = {
++		.name = "mm",
+ 		.sta_mask = PWR_STATUS_DISP,
+ 		.ctl_offs = SPM_DIS_PWR_CON,
+ 		.sram_pdn_bits = GENMASK(11, 8),
+@@ -40,18 +44,21 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = {
+ 		},
+ 	},
+ 	[MT8173_POWER_DOMAIN_VENC_LT] = {
++		.name = "venc_lt",
+ 		.sta_mask = PWR_STATUS_VENC_LT,
+ 		.ctl_offs = SPM_VEN2_PWR_CON,
+ 		.sram_pdn_bits = GENMASK(11, 8),
+ 		.sram_pdn_ack_bits = GENMASK(15, 12),
+ 	},
+ 	[MT8173_POWER_DOMAIN_AUDIO] = {
++		.name = "audio",
+ 		.sta_mask = PWR_STATUS_AUDIO,
+ 		.ctl_offs = SPM_AUDIO_PWR_CON,
+ 		.sram_pdn_bits = GENMASK(11, 8),
+ 		.sram_pdn_ack_bits = GENMASK(15, 12),
+ 	},
+ 	[MT8173_POWER_DOMAIN_USB] = {
++		.name = "usb",
+ 		.sta_mask = PWR_STATUS_USB,
+ 		.ctl_offs = SPM_USB_PWR_CON,
+ 		.sram_pdn_bits = GENMASK(11, 8),
+@@ -59,18 +66,21 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = {
+ 		.caps = MTK_SCPD_ACTIVE_WAKEUP,
+ 	},
+ 	[MT8173_POWER_DOMAIN_MFG_ASYNC] = {
++		.name = "mfg_async",
+ 		.sta_mask = PWR_STATUS_MFG_ASYNC,
+ 		.ctl_offs = SPM_MFG_ASYNC_PWR_CON,
+ 		.sram_pdn_bits = GENMASK(11, 8),
+ 		.sram_pdn_ack_bits = 0,
+ 	},
+ 	[MT8173_POWER_DOMAIN_MFG_2D] = {
++		.name = "mfg_2d",
+ 		.sta_mask = PWR_STATUS_MFG_2D,
+ 		.ctl_offs = SPM_MFG_2D_PWR_CON,
+ 		.sram_pdn_bits = GENMASK(11, 8),
+ 		.sram_pdn_ack_bits = GENMASK(13, 12),
+ 	},
+ 	[MT8173_POWER_DOMAIN_MFG] = {
++		.name = "mfg",
+ 		.sta_mask = PWR_STATUS_MFG,
+ 		.ctl_offs = SPM_MFG_PWR_CON,
+ 		.sram_pdn_bits = GENMASK(13, 8),
+diff --git a/drivers/soc/mediatek/mt8183-pm-domains.h b/drivers/soc/mediatek/mt8183-pm-domains.h
+index aa5230e6c12f8..98a9940d05fbb 100644
+--- a/drivers/soc/mediatek/mt8183-pm-domains.h
++++ b/drivers/soc/mediatek/mt8183-pm-domains.h
+@@ -12,12 +12,14 @@
+ 
+ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
+ 	[MT8183_POWER_DOMAIN_AUDIO] = {
++		.name = "audio",
+ 		.sta_mask = PWR_STATUS_AUDIO,
+ 		.ctl_offs = 0x0314,
+ 		.sram_pdn_bits = GENMASK(11, 8),
+ 		.sram_pdn_ack_bits = GENMASK(15, 12),
+ 	},
+ 	[MT8183_POWER_DOMAIN_CONN] = {
++		.name = "conn",
+ 		.sta_mask = PWR_STATUS_CONN,
+ 		.ctl_offs = 0x032c,
+ 		.sram_pdn_bits = 0,
+@@ -28,12 +30,14 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
+ 		},
+ 	},
+ 	[MT8183_POWER_DOMAIN_MFG_ASYNC] = {
++		.name = "mfg_async",
+ 		.sta_mask = PWR_STATUS_MFG_ASYNC,
+ 		.ctl_offs = 0x0334,
+ 		.sram_pdn_bits = 0,
+ 		.sram_pdn_ack_bits = 0,
+ 	},
+ 	[MT8183_POWER_DOMAIN_MFG] = {
++		.name = "mfg",
+ 		.sta_mask = PWR_STATUS_MFG,
+ 		.ctl_offs = 0x0338,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+@@ -41,18 +45,21 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
+ 		.caps = MTK_SCPD_DOMAIN_SUPPLY,
+ 	},
+ 	[MT8183_POWER_DOMAIN_MFG_CORE0] = {
++		.name = "mfg_core0",
+ 		.sta_mask = BIT(7),
+ 		.ctl_offs = 0x034c,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+ 		.sram_pdn_ack_bits = GENMASK(12, 12),
+ 	},
+ 	[MT8183_POWER_DOMAIN_MFG_CORE1] = {
++		.name = "mfg_core1",
+ 		.sta_mask = BIT(20),
+ 		.ctl_offs = 0x0310,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+ 		.sram_pdn_ack_bits = GENMASK(12, 12),
+ 	},
+ 	[MT8183_POWER_DOMAIN_MFG_2D] = {
++		.name = "mfg_2d",
+ 		.sta_mask = PWR_STATUS_MFG_2D,
+ 		.ctl_offs = 0x0348,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+@@ -65,6 +72,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
+ 		},
+ 	},
+ 	[MT8183_POWER_DOMAIN_DISP] = {
++		.name = "disp",
+ 		.sta_mask = PWR_STATUS_DISP,
+ 		.ctl_offs = 0x030c,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+@@ -83,6 +91,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
+ 		},
+ 	},
+ 	[MT8183_POWER_DOMAIN_CAM] = {
++		.name = "cam",
+ 		.sta_mask = BIT(25),
+ 		.ctl_offs = 0x0344,
+ 		.sram_pdn_bits = GENMASK(9, 8),
+@@ -105,6 +114,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
+ 		},
+ 	},
+ 	[MT8183_POWER_DOMAIN_ISP] = {
++		.name = "isp",
+ 		.sta_mask = PWR_STATUS_ISP,
+ 		.ctl_offs = 0x0308,
+ 		.sram_pdn_bits = GENMASK(9, 8),
+@@ -127,6 +137,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
+ 		},
+ 	},
+ 	[MT8183_POWER_DOMAIN_VDEC] = {
++		.name = "vdec",
+ 		.sta_mask = BIT(31),
+ 		.ctl_offs = 0x0300,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+@@ -139,6 +150,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
+ 		},
+ 	},
+ 	[MT8183_POWER_DOMAIN_VENC] = {
++		.name = "venc",
+ 		.sta_mask = PWR_STATUS_VENC,
+ 		.ctl_offs = 0x0304,
+ 		.sram_pdn_bits = GENMASK(11, 8),
+@@ -151,6 +163,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
+ 		},
+ 	},
+ 	[MT8183_POWER_DOMAIN_VPU_TOP] = {
++		.name = "vpu_top",
+ 		.sta_mask = BIT(26),
+ 		.ctl_offs = 0x0324,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+@@ -177,6 +190,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
+ 		},
+ 	},
+ 	[MT8183_POWER_DOMAIN_VPU_CORE0] = {
++		.name = "vpu_core0",
+ 		.sta_mask = BIT(27),
+ 		.ctl_offs = 0x33c,
+ 		.sram_pdn_bits = GENMASK(11, 8),
+@@ -194,6 +208,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
+ 		.caps = MTK_SCPD_SRAM_ISO,
+ 	},
+ 	[MT8183_POWER_DOMAIN_VPU_CORE1] = {
++		.name = "vpu_core1",
+ 		.sta_mask = BIT(28),
+ 		.ctl_offs = 0x0340,
+ 		.sram_pdn_bits = GENMASK(11, 8),
+diff --git a/drivers/soc/mediatek/mt8192-pm-domains.h b/drivers/soc/mediatek/mt8192-pm-domains.h
+index 0fdf6dc6231f4..543dda70de014 100644
+--- a/drivers/soc/mediatek/mt8192-pm-domains.h
++++ b/drivers/soc/mediatek/mt8192-pm-domains.h
+@@ -12,6 +12,7 @@
+ 
+ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
+ 	[MT8192_POWER_DOMAIN_AUDIO] = {
++		.name = "audio",
+ 		.sta_mask = BIT(21),
+ 		.ctl_offs = 0x0354,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+@@ -24,6 +25,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
+ 		},
+ 	},
+ 	[MT8192_POWER_DOMAIN_CONN] = {
++		.name = "conn",
+ 		.sta_mask = PWR_STATUS_CONN,
+ 		.ctl_offs = 0x0304,
+ 		.sram_pdn_bits = 0,
+@@ -45,12 +47,14 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
+ 		.caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ 	},
+ 	[MT8192_POWER_DOMAIN_MFG0] = {
++		.name = "mfg0",
+ 		.sta_mask = BIT(2),
+ 		.ctl_offs = 0x0308,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+ 		.sram_pdn_ack_bits = GENMASK(12, 12),
+ 	},
+ 	[MT8192_POWER_DOMAIN_MFG1] = {
++		.name = "mfg1",
+ 		.sta_mask = BIT(3),
+ 		.ctl_offs = 0x030c,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+@@ -75,36 +79,42 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
+ 		},
+ 	},
+ 	[MT8192_POWER_DOMAIN_MFG2] = {
++		.name = "mfg2",
+ 		.sta_mask = BIT(4),
+ 		.ctl_offs = 0x0310,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+ 		.sram_pdn_ack_bits = GENMASK(12, 12),
+ 	},
+ 	[MT8192_POWER_DOMAIN_MFG3] = {
++		.name = "mfg3",
+ 		.sta_mask = BIT(5),
+ 		.ctl_offs = 0x0314,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+ 		.sram_pdn_ack_bits = GENMASK(12, 12),
+ 	},
+ 	[MT8192_POWER_DOMAIN_MFG4] = {
++		.name = "mfg4",
+ 		.sta_mask = BIT(6),
+ 		.ctl_offs = 0x0318,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+ 		.sram_pdn_ack_bits = GENMASK(12, 12),
+ 	},
+ 	[MT8192_POWER_DOMAIN_MFG5] = {
++		.name = "mfg5",
+ 		.sta_mask = BIT(7),
+ 		.ctl_offs = 0x031c,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+ 		.sram_pdn_ack_bits = GENMASK(12, 12),
+ 	},
+ 	[MT8192_POWER_DOMAIN_MFG6] = {
++		.name = "mfg6",
+ 		.sta_mask = BIT(8),
+ 		.ctl_offs = 0x0320,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+ 		.sram_pdn_ack_bits = GENMASK(12, 12),
+ 	},
+ 	[MT8192_POWER_DOMAIN_DISP] = {
++		.name = "disp",
+ 		.sta_mask = BIT(20),
+ 		.ctl_offs = 0x0350,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+@@ -133,6 +143,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
+ 		},
+ 	},
+ 	[MT8192_POWER_DOMAIN_IPE] = {
++		.name = "ipe",
+ 		.sta_mask = BIT(14),
+ 		.ctl_offs = 0x0338,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+@@ -149,6 +160,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
+ 		},
+ 	},
+ 	[MT8192_POWER_DOMAIN_ISP] = {
++		.name = "isp",
+ 		.sta_mask = BIT(12),
+ 		.ctl_offs = 0x0330,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+@@ -165,6 +177,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
+ 		},
+ 	},
+ 	[MT8192_POWER_DOMAIN_ISP2] = {
++		.name = "isp2",
+ 		.sta_mask = BIT(13),
+ 		.ctl_offs = 0x0334,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+@@ -181,6 +194,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
+ 		},
+ 	},
+ 	[MT8192_POWER_DOMAIN_MDP] = {
++		.name = "mdp",
+ 		.sta_mask = BIT(19),
+ 		.ctl_offs = 0x034c,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+@@ -197,6 +211,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
+ 		},
+ 	},
+ 	[MT8192_POWER_DOMAIN_VENC] = {
++		.name = "venc",
+ 		.sta_mask = BIT(17),
+ 		.ctl_offs = 0x0344,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+@@ -213,6 +228,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
+ 		},
+ 	},
+ 	[MT8192_POWER_DOMAIN_VDEC] = {
++		.name = "vdec",
+ 		.sta_mask = BIT(15),
+ 		.ctl_offs = 0x033c,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+@@ -229,12 +245,14 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
+ 		},
+ 	},
+ 	[MT8192_POWER_DOMAIN_VDEC2] = {
++		.name = "vdec2",
+ 		.sta_mask = BIT(16),
+ 		.ctl_offs = 0x0340,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+ 		.sram_pdn_ack_bits = GENMASK(12, 12),
+ 	},
+ 	[MT8192_POWER_DOMAIN_CAM] = {
++		.name = "cam",
+ 		.sta_mask = BIT(23),
+ 		.ctl_offs = 0x035c,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+@@ -263,18 +281,21 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
+ 		},
+ 	},
+ 	[MT8192_POWER_DOMAIN_CAM_RAWA] = {
++		.name = "cam_rawa",
+ 		.sta_mask = BIT(24),
+ 		.ctl_offs = 0x0360,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+ 		.sram_pdn_ack_bits = GENMASK(12, 12),
+ 	},
+ 	[MT8192_POWER_DOMAIN_CAM_RAWB] = {
++		.name = "cam_rawb",
+ 		.sta_mask = BIT(25),
+ 		.ctl_offs = 0x0364,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+ 		.sram_pdn_ack_bits = GENMASK(12, 12),
+ 	},
+ 	[MT8192_POWER_DOMAIN_CAM_RAWC] = {
++		.name = "cam_rawc",
+ 		.sta_mask = BIT(26),
+ 		.ctl_offs = 0x0368,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+diff --git a/drivers/soc/mediatek/mtk-pm-domains.c b/drivers/soc/mediatek/mtk-pm-domains.c
+index 06aaf03b194c0..0af00efa0ef83 100644
+--- a/drivers/soc/mediatek/mtk-pm-domains.c
++++ b/drivers/soc/mediatek/mtk-pm-domains.c
+@@ -438,7 +438,11 @@ generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_no
+ 		goto err_unprepare_subsys_clocks;
+ 	}
+ 
+-	pd->genpd.name = node->name;
++	if (!pd->data->name)
++		pd->genpd.name = node->name;
++	else
++		pd->genpd.name = pd->data->name;
++
+ 	pd->genpd.power_off = scpsys_power_off;
+ 	pd->genpd.power_on = scpsys_power_on;
+ 
+diff --git a/drivers/soc/mediatek/mtk-pm-domains.h b/drivers/soc/mediatek/mtk-pm-domains.h
+index 141dc76054e69..21a4e113bbecb 100644
+--- a/drivers/soc/mediatek/mtk-pm-domains.h
++++ b/drivers/soc/mediatek/mtk-pm-domains.h
+@@ -76,6 +76,7 @@ struct scpsys_bus_prot_data {
+ 
+ /**
+  * struct scpsys_domain_data - scp domain data for power on/off flow
++ * @name: The name of the power domain.
+  * @sta_mask: The mask for power on/off status bit.
+  * @ctl_offs: The offset for main power control register.
+  * @sram_pdn_bits: The mask for sram power control bits.
+@@ -85,6 +86,7 @@ struct scpsys_bus_prot_data {
+  * @bp_smi: bus protection for smi subsystem
+  */
+ struct scpsys_domain_data {
++	const char *name;
+ 	u32 sta_mask;
+ 	int ctl_offs;
+ 	u32 sram_pdn_bits;
+diff --git a/drivers/staging/media/rkvdec/rkvdec.c b/drivers/staging/media/rkvdec/rkvdec.c
+index 5f0219d117fb8..d821661d30f38 100644
+--- a/drivers/staging/media/rkvdec/rkvdec.c
++++ b/drivers/staging/media/rkvdec/rkvdec.c
+@@ -1072,7 +1072,7 @@ static struct platform_driver rkvdec_driver = {
+ 	.remove = rkvdec_remove,
+ 	.driver = {
+ 		   .name = "rkvdec",
+-		   .of_match_table = of_match_ptr(of_rkvdec_match),
++		   .of_match_table = of_rkvdec_match,
+ 		   .pm = &rkvdec_pm_ops,
+ 	},
+ };
+diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
+index d8ce3a687b80d..3c4c0516e58ab 100644
+--- a/drivers/thermal/qcom/tsens.c
++++ b/drivers/thermal/qcom/tsens.c
+@@ -755,8 +755,10 @@ int __init init_common(struct tsens_priv *priv)
+ 		for (i = VER_MAJOR; i <= VER_STEP; i++) {
+ 			priv->rf[i] = devm_regmap_field_alloc(dev, priv->srot_map,
+ 							      priv->fields[i]);
+-			if (IS_ERR(priv->rf[i]))
+-				return PTR_ERR(priv->rf[i]);
++			if (IS_ERR(priv->rf[i])) {
++				ret = PTR_ERR(priv->rf[i]);
++				goto err_put_device;
++			}
+ 		}
+ 		ret = regmap_field_read(priv->rf[VER_MINOR], &ver_minor);
+ 		if (ret)
+diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
+index 69ef12f852b7d..5b76f9a1280d5 100644
+--- a/drivers/thermal/thermal_of.c
++++ b/drivers/thermal/thermal_of.c
+@@ -704,14 +704,17 @@ static int thermal_of_populate_bind_params(struct device_node *np,
+ 
+ 	count = of_count_phandle_with_args(np, "cooling-device",
+ 					   "#cooling-cells");
+-	if (!count) {
++	if (count <= 0) {
+ 		pr_err("Add a cooling_device property with at least one device\n");
++		ret = -ENOENT;
+ 		goto end;
+ 	}
+ 
+ 	__tcbp = kcalloc(count, sizeof(*__tcbp), GFP_KERNEL);
+-	if (!__tcbp)
++	if (!__tcbp) {
++		ret = -ENOMEM;
+ 		goto end;
++	}
+ 
+ 	for (i = 0; i < count; i++) {
+ 		ret = of_parse_phandle_with_args(np, "cooling-device",
+diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
+index 508b1c3f8b731..d1e4a7379bebd 100644
+--- a/drivers/usb/class/cdc-wdm.c
++++ b/drivers/usb/class/cdc-wdm.c
+@@ -321,12 +321,23 @@ exit:
+ 
+ }
+ 
+-static void kill_urbs(struct wdm_device *desc)
++static void poison_urbs(struct wdm_device *desc)
+ {
+ 	/* the order here is essential */
+-	usb_kill_urb(desc->command);
+-	usb_kill_urb(desc->validity);
+-	usb_kill_urb(desc->response);
++	usb_poison_urb(desc->command);
++	usb_poison_urb(desc->validity);
++	usb_poison_urb(desc->response);
++}
++
++static void unpoison_urbs(struct wdm_device *desc)
++{
++	/*
++	 *  the order here is not essential
++	 *  it is symmetrical just to be nice
++	 */
++	usb_unpoison_urb(desc->response);
++	usb_unpoison_urb(desc->validity);
++	usb_unpoison_urb(desc->command);
+ }
+ 
+ static void free_urbs(struct wdm_device *desc)
+@@ -741,11 +752,12 @@ static int wdm_release(struct inode *inode, struct file *file)
+ 	if (!desc->count) {
+ 		if (!test_bit(WDM_DISCONNECTING, &desc->flags)) {
+ 			dev_dbg(&desc->intf->dev, "wdm_release: cleanup\n");
+-			kill_urbs(desc);
++			poison_urbs(desc);
+ 			spin_lock_irq(&desc->iuspin);
+ 			desc->resp_count = 0;
+ 			spin_unlock_irq(&desc->iuspin);
+ 			desc->manage_power(desc->intf, 0);
++			unpoison_urbs(desc);
+ 		} else {
+ 			/* must avoid dev_printk here as desc->intf is invalid */
+ 			pr_debug(KBUILD_MODNAME " %s: device gone - cleaning up\n", __func__);
+@@ -1037,9 +1049,9 @@ static void wdm_disconnect(struct usb_interface *intf)
+ 	wake_up_all(&desc->wait);
+ 	mutex_lock(&desc->rlock);
+ 	mutex_lock(&desc->wlock);
++	poison_urbs(desc);
+ 	cancel_work_sync(&desc->rxwork);
+ 	cancel_work_sync(&desc->service_outs_intr);
+-	kill_urbs(desc);
+ 	mutex_unlock(&desc->wlock);
+ 	mutex_unlock(&desc->rlock);
+ 
+@@ -1080,9 +1092,10 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message)
+ 		set_bit(WDM_SUSPENDING, &desc->flags);
+ 		spin_unlock_irq(&desc->iuspin);
+ 		/* callback submits work - order is essential */
+-		kill_urbs(desc);
++		poison_urbs(desc);
+ 		cancel_work_sync(&desc->rxwork);
+ 		cancel_work_sync(&desc->service_outs_intr);
++		unpoison_urbs(desc);
+ 	}
+ 	if (!PMSG_IS_AUTO(message)) {
+ 		mutex_unlock(&desc->wlock);
+@@ -1140,7 +1153,7 @@ static int wdm_pre_reset(struct usb_interface *intf)
+ 	wake_up_all(&desc->wait);
+ 	mutex_lock(&desc->rlock);
+ 	mutex_lock(&desc->wlock);
+-	kill_urbs(desc);
++	poison_urbs(desc);
+ 	cancel_work_sync(&desc->rxwork);
+ 	cancel_work_sync(&desc->service_outs_intr);
+ 	return 0;
+@@ -1151,6 +1164,7 @@ static int wdm_post_reset(struct usb_interface *intf)
+ 	struct wdm_device *desc = wdm_find_device(intf);
+ 	int rv;
+ 
++	unpoison_urbs(desc);
+ 	clear_bit(WDM_OVERFLOW, &desc->flags);
+ 	clear_bit(WDM_RESETTING, &desc->flags);
+ 	rv = recover_from_urb_loss(desc);
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 404507d1b76f1..13fe37fbbd2c8 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -3593,9 +3593,6 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
+ 		 * sequence.
+ 		 */
+ 		status = hub_port_status(hub, port1, &portstatus, &portchange);
+-
+-		/* TRSMRCY = 10 msec */
+-		msleep(10);
+ 	}
+ 
+  SuspendCleared:
+@@ -3610,6 +3607,9 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
+ 				usb_clear_port_feature(hub->hdev, port1,
+ 						USB_PORT_FEAT_C_SUSPEND);
+ 		}
++
++		/* TRSMRCY = 10 msec */
++		msleep(10);
+ 	}
+ 
+ 	if (udev->persist_enabled)
+diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
+index 7161344c65221..641e4251cb7f1 100644
+--- a/drivers/usb/dwc2/core.h
++++ b/drivers/usb/dwc2/core.h
+@@ -112,6 +112,7 @@ struct dwc2_hsotg_req;
+  * @debugfs: File entry for debugfs file for this endpoint.
+  * @dir_in: Set to true if this endpoint is of the IN direction, which
+  *          means that it is sending data to the Host.
++ * @map_dir: Set to the value of dir_in when the DMA buffer is mapped.
+  * @index: The index for the endpoint registers.
+  * @mc: Multi Count - number of transactions per microframe
+  * @interval: Interval for periodic endpoints, in frames or microframes.
+@@ -161,6 +162,7 @@ struct dwc2_hsotg_ep {
+ 	unsigned short		fifo_index;
+ 
+ 	unsigned char           dir_in;
++	unsigned char           map_dir;
+ 	unsigned char           index;
+ 	unsigned char           mc;
+ 	u16                     interval;
+diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
+index ad4c94366dadf..d2f623d83bf78 100644
+--- a/drivers/usb/dwc2/gadget.c
++++ b/drivers/usb/dwc2/gadget.c
+@@ -422,7 +422,7 @@ static void dwc2_hsotg_unmap_dma(struct dwc2_hsotg *hsotg,
+ {
+ 	struct usb_request *req = &hs_req->req;
+ 
+-	usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->dir_in);
++	usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->map_dir);
+ }
+ 
+ /*
+@@ -1242,6 +1242,7 @@ static int dwc2_hsotg_map_dma(struct dwc2_hsotg *hsotg,
+ {
+ 	int ret;
+ 
++	hs_ep->map_dir = hs_ep->dir_in;
+ 	ret = usb_gadget_map_request(&hsotg->gadget, req, hs_ep->dir_in);
+ 	if (ret)
+ 		goto dma_error;
+diff --git a/drivers/usb/dwc3/dwc3-imx8mp.c b/drivers/usb/dwc3/dwc3-imx8mp.c
+index 75f0042b998b1..84c1a4ac24449 100644
+--- a/drivers/usb/dwc3/dwc3-imx8mp.c
++++ b/drivers/usb/dwc3/dwc3-imx8mp.c
+@@ -167,6 +167,7 @@ static int dwc3_imx8mp_probe(struct platform_device *pdev)
+ 
+ 	dwc3_np = of_get_child_by_name(node, "dwc3");
+ 	if (!dwc3_np) {
++		err = -ENODEV;
+ 		dev_err(dev, "failed to find dwc3 core child\n");
+ 		goto disable_rpm;
+ 	}
+diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
+index 3db17806e92e7..e196673f5c647 100644
+--- a/drivers/usb/dwc3/dwc3-omap.c
++++ b/drivers/usb/dwc3/dwc3-omap.c
+@@ -437,8 +437,13 @@ static int dwc3_omap_extcon_register(struct dwc3_omap *omap)
+ 
+ 		if (extcon_get_state(edev, EXTCON_USB) == true)
+ 			dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_VALID);
++		else
++			dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_OFF);
++
+ 		if (extcon_get_state(edev, EXTCON_USB_HOST) == true)
+ 			dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_GROUND);
++		else
++			dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_FLOAT);
+ 
+ 		omap->edev = edev;
+ 	}
+diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
+index 95f954a1d7be9..19789e94bbd00 100644
+--- a/drivers/usb/dwc3/dwc3-pci.c
++++ b/drivers/usb/dwc3/dwc3-pci.c
+@@ -123,6 +123,7 @@ static const struct property_entry dwc3_pci_mrfld_properties[] = {
+ 	PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"),
+ 	PROPERTY_ENTRY_BOOL("snps,dis_u3_susphy_quirk"),
+ 	PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
++	PROPERTY_ENTRY_BOOL("snps,usb2-gadget-lpm-disable"),
+ 	PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
+ 	{}
+ };
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index f85eda6bc988e..8585b56d9f2df 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -1676,7 +1676,9 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
+ 		}
+ 	}
+ 
+-	return __dwc3_gadget_kick_transfer(dep);
++	__dwc3_gadget_kick_transfer(dep);
++
++	return 0;
+ }
+ 
+ static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
+@@ -2302,6 +2304,10 @@ static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
+ 	if (DWC3_VER_IS_PRIOR(DWC3, 250A))
+ 		reg |= DWC3_DEVTEN_ULSTCNGEN;
+ 
++	/* On 2.30a and above this bit enables U3/L2-L1 Suspend Events */
++	if (!DWC3_VER_IS_PRIOR(DWC3, 230A))
++		reg |= DWC3_DEVTEN_EOPFEN;
++
+ 	dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
+ }
+ 
+@@ -4024,8 +4030,9 @@ err0:
+ 
+ void dwc3_gadget_exit(struct dwc3 *dwc)
+ {
+-	usb_del_gadget_udc(dwc->gadget);
++	usb_del_gadget(dwc->gadget);
+ 	dwc3_gadget_free_endpoints(dwc);
++	usb_put_gadget(dwc->gadget);
+ 	dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce,
+ 			  dwc->bounce_addr);
+ 	kfree(dwc->setup_buf);
+diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
+index 5617ef30530a6..f0e4a315cc81b 100644
+--- a/drivers/usb/host/fotg210-hcd.c
++++ b/drivers/usb/host/fotg210-hcd.c
+@@ -5568,7 +5568,7 @@ static int fotg210_hcd_probe(struct platform_device *pdev)
+ 	struct usb_hcd *hcd;
+ 	struct resource *res;
+ 	int irq;
+-	int retval = -ENODEV;
++	int retval;
+ 	struct fotg210_hcd *fotg210;
+ 
+ 	if (usb_disabled())
+@@ -5588,7 +5588,7 @@ static int fotg210_hcd_probe(struct platform_device *pdev)
+ 	hcd = usb_create_hcd(&fotg210_fotg210_hc_driver, dev,
+ 			dev_name(dev));
+ 	if (!hcd) {
+-		dev_err(dev, "failed to create hcd with err %d\n", retval);
++		dev_err(dev, "failed to create hcd\n");
+ 		retval = -ENOMEM;
+ 		goto fail_create_hcd;
+ 	}
+diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h
+index fa59b242cd515..e8af0a125f84b 100644
+--- a/drivers/usb/host/xhci-ext-caps.h
++++ b/drivers/usb/host/xhci-ext-caps.h
+@@ -7,8 +7,9 @@
+  * Author: Sarah Sharp
+  * Some code borrowed from the Linux EHCI driver.
+  */
+-/* Up to 16 ms to halt an HC */
+-#define XHCI_MAX_HALT_USEC	(16*1000)
++
++/* HC should halt within 16 ms, but use 32 ms as some hosts take longer */
++#define XHCI_MAX_HALT_USEC	(32 * 1000)
+ /* HC not running - set to 1 when run/stop bit is cleared. */
+ #define XHCI_STS_HALT		(1<<0)
+ 
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 5bbccc9a0179f..7bc18cf8042cc 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -57,6 +57,7 @@
+ #define PCI_DEVICE_ID_INTEL_CML_XHCI			0xa3af
+ #define PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI		0x9a13
+ #define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI		0x1138
++#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI		0x461e
+ 
+ #define PCI_DEVICE_ID_AMD_PROMONTORYA_4			0x43b9
+ #define PCI_DEVICE_ID_AMD_PROMONTORYA_3			0x43ba
+@@ -166,8 +167,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 	    (pdev->device == 0x15e0 || pdev->device == 0x15e1))
+ 		xhci->quirks |= XHCI_SNPS_BROKEN_SUSPEND;
+ 
+-	if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x15e5)
++	if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x15e5) {
+ 		xhci->quirks |= XHCI_DISABLE_SPARSE;
++		xhci->quirks |= XHCI_RESET_ON_RESUME;
++	}
+ 
+ 	if (pdev->vendor == PCI_VENDOR_ID_AMD)
+ 		xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+@@ -243,7 +246,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 	     pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI ||
+ 	     pdev->device == PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI ||
+ 	     pdev->device == PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI ||
+-	     pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI))
++	     pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI ||
++	     pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI))
+ 		xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
+ 
+ 	if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 59d41d2c200df..6cdea0d00d194 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -863,7 +863,7 @@ done:
+ 	return ret;
+ }
+ 
+-static void xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
++static int xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
+ 				struct xhci_virt_ep *ep, unsigned int stream_id,
+ 				struct xhci_td *td,
+ 				enum xhci_ep_reset_type reset_type)
+@@ -876,7 +876,7 @@ static void xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
+ 	 * Device will be reset soon to recover the link so don't do anything
+ 	 */
+ 	if (ep->vdev->flags & VDEV_PORT_ERROR)
+-		return;
++		return -ENODEV;
+ 
+ 	/* add td to cancelled list and let reset ep handler take care of it */
+ 	if (reset_type == EP_HARD_RESET) {
+@@ -889,16 +889,18 @@ static void xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
+ 
+ 	if (ep->ep_state & EP_HALTED) {
+ 		xhci_dbg(xhci, "Reset ep command already pending\n");
+-		return;
++		return 0;
+ 	}
+ 
+ 	err = xhci_reset_halted_ep(xhci, slot_id, ep->ep_index, reset_type);
+ 	if (err)
+-		return;
++		return err;
+ 
+ 	ep->ep_state |= EP_HALTED;
+ 
+ 	xhci_ring_cmd_db(xhci);
++
++	return 0;
+ }
+ 
+ /*
+@@ -1015,6 +1017,7 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
+ 	struct xhci_td *td = NULL;
+ 	enum xhci_ep_reset_type reset_type;
+ 	struct xhci_command *command;
++	int err;
+ 
+ 	if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
+ 		if (!xhci->devs[slot_id])
+@@ -1059,7 +1062,10 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
+ 					td->status = -EPROTO;
+ 			}
+ 			/* reset ep, reset handler cleans up cancelled tds */
+-			xhci_handle_halted_endpoint(xhci, ep, 0, td, reset_type);
++			err = xhci_handle_halted_endpoint(xhci, ep, 0, td,
++							  reset_type);
++			if (err)
++				break;
+ 			xhci_stop_watchdog_timer_in_irq(xhci, ep);
+ 			return;
+ 		case EP_STATE_RUNNING:
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 6672c2f403034..0d2f1c37ab745 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -1514,7 +1514,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
+  * we need to issue an evaluate context command and wait on it.
+  */
+ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
+-		unsigned int ep_index, struct urb *urb)
++		unsigned int ep_index, struct urb *urb, gfp_t mem_flags)
+ {
+ 	struct xhci_container_ctx *out_ctx;
+ 	struct xhci_input_control_ctx *ctrl_ctx;
+@@ -1545,7 +1545,7 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
+ 		 * changes max packet sizes.
+ 		 */
+ 
+-		command = xhci_alloc_command(xhci, true, GFP_KERNEL);
++		command = xhci_alloc_command(xhci, true, mem_flags);
+ 		if (!command)
+ 			return -ENOMEM;
+ 
+@@ -1639,7 +1639,7 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
+ 		 */
+ 		if (urb->dev->speed == USB_SPEED_FULL) {
+ 			ret = xhci_check_maxpacket(xhci, slot_id,
+-					ep_index, urb);
++					ep_index, urb, mem_flags);
+ 			if (ret < 0) {
+ 				xhci_urb_free_priv(urb_priv);
+ 				urb->hcpriv = NULL;
+diff --git a/drivers/usb/musb/mediatek.c b/drivers/usb/musb/mediatek.c
+index eebeadd269461..6b92d037d8fc8 100644
+--- a/drivers/usb/musb/mediatek.c
++++ b/drivers/usb/musb/mediatek.c
+@@ -518,8 +518,8 @@ static int mtk_musb_probe(struct platform_device *pdev)
+ 
+ 	glue->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
+ 	if (IS_ERR(glue->xceiv)) {
+-		dev_err(dev, "fail to getting usb-phy %d\n", ret);
+ 		ret = PTR_ERR(glue->xceiv);
++		dev_err(dev, "fail to getting usb-phy %d\n", ret);
+ 		goto err_unregister_usb_phy;
+ 	}
+ 
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
+index 1a086ba254d23..52acc884a61f1 100644
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -1877,7 +1877,6 @@ static void vdm_run_state_machine(struct tcpm_port *port)
+ 			}
+ 
+ 			if (res < 0) {
+-				port->vdm_sm_running = false;
+ 				return;
+ 			}
+ 		}
+@@ -1893,6 +1892,7 @@ static void vdm_run_state_machine(struct tcpm_port *port)
+ 		port->vdo_data[0] = port->vdo_retry;
+ 		port->vdo_count = 1;
+ 		port->vdm_state = VDM_STATE_READY;
++		tcpm_ams_finish(port);
+ 		break;
+ 	case VDM_STATE_BUSY:
+ 		port->vdm_state = VDM_STATE_ERR_TMOUT;
+@@ -1958,7 +1958,7 @@ static void vdm_state_machine_work(struct kthread_work *work)
+ 		 port->vdm_state != VDM_STATE_BUSY &&
+ 		 port->vdm_state != VDM_STATE_SEND_MESSAGE);
+ 
+-	if (port->vdm_state == VDM_STATE_ERR_TMOUT)
++	if (port->vdm_state < VDM_STATE_READY)
+ 		port->vdm_sm_running = false;
+ 
+ 	mutex_unlock(&port->lock);
+@@ -2387,7 +2387,7 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
+ 		port->nr_sink_caps = cnt;
+ 		port->sink_cap_done = true;
+ 		if (port->ams == GET_SINK_CAPABILITIES)
+-			tcpm_pd_handle_state(port, ready_state(port), NONE_AMS, 0);
++			tcpm_set_state(port, ready_state(port), 0);
+ 		/* Unexpected Sink Capabilities */
+ 		else
+ 			tcpm_pd_handle_msg(port,
+@@ -2549,6 +2549,16 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
+ 			port->sink_cap_done = true;
+ 			tcpm_set_state(port, ready_state(port), 0);
+ 			break;
++		case SRC_READY:
++		case SNK_READY:
++			if (port->vdm_state > VDM_STATE_READY) {
++				port->vdm_state = VDM_STATE_DONE;
++				if (tcpm_vdm_ams(port))
++					tcpm_ams_finish(port);
++				mod_vdm_delayed_work(port, 0);
++				break;
++			}
++			fallthrough;
+ 		default:
+ 			tcpm_pd_handle_state(port,
+ 					     port->pwr_role == TYPEC_SOURCE ?
+@@ -3135,10 +3145,10 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
+ 		port->pps_data.req_max_volt = min(pdo_pps_apdo_max_voltage(src),
+ 						  pdo_pps_apdo_max_voltage(snk));
+ 		port->pps_data.req_max_curr = min_pps_apdo_current(src, snk);
+-		port->pps_data.req_out_volt = min(port->pps_data.max_volt,
+-						  max(port->pps_data.min_volt,
++		port->pps_data.req_out_volt = min(port->pps_data.req_max_volt,
++						  max(port->pps_data.req_min_volt,
+ 						      port->pps_data.req_out_volt));
+-		port->pps_data.req_op_curr = min(port->pps_data.max_curr,
++		port->pps_data.req_op_curr = min(port->pps_data.req_max_curr,
+ 						 port->pps_data.req_op_curr);
+ 	}
+ 
+diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
+index 244270755ae61..1e266f083bf8a 100644
+--- a/drivers/usb/typec/ucsi/ucsi.c
++++ b/drivers/usb/typec/ucsi/ucsi.c
+@@ -495,7 +495,8 @@ static void ucsi_unregister_altmodes(struct ucsi_connector *con, u8 recipient)
+ 	}
+ }
+ 
+-static void ucsi_get_pdos(struct ucsi_connector *con, int is_partner)
++static int ucsi_get_pdos(struct ucsi_connector *con, int is_partner,
++			 u32 *pdos, int offset, int num_pdos)
+ {
+ 	struct ucsi *ucsi = con->ucsi;
+ 	u64 command;
+@@ -503,17 +504,39 @@ static void ucsi_get_pdos(struct ucsi_connector *con, int is_partner)
+ 
+ 	command = UCSI_COMMAND(UCSI_GET_PDOS) | UCSI_CONNECTOR_NUMBER(con->num);
+ 	command |= UCSI_GET_PDOS_PARTNER_PDO(is_partner);
+-	command |= UCSI_GET_PDOS_NUM_PDOS(UCSI_MAX_PDOS - 1);
++	command |= UCSI_GET_PDOS_PDO_OFFSET(offset);
++	command |= UCSI_GET_PDOS_NUM_PDOS(num_pdos - 1);
+ 	command |= UCSI_GET_PDOS_SRC_PDOS;
+-	ret = ucsi_send_command(ucsi, command, con->src_pdos,
+-			       sizeof(con->src_pdos));
+-	if (ret < 0) {
++	ret = ucsi_send_command(ucsi, command, pdos + offset,
++				num_pdos * sizeof(u32));
++	if (ret < 0)
+ 		dev_err(ucsi->dev, "UCSI_GET_PDOS failed (%d)\n", ret);
++	if (ret == 0 && offset == 0)
++		dev_warn(ucsi->dev, "UCSI_GET_PDOS returned 0 bytes\n");
++
++	return ret;
++}
++
++static void ucsi_get_src_pdos(struct ucsi_connector *con, int is_partner)
++{
++	int ret;
++
++	/* UCSI max payload means only getting at most 4 PDOs at a time */
++	ret = ucsi_get_pdos(con, 1, con->src_pdos, 0, UCSI_MAX_PDOS);
++	if (ret < 0)
+ 		return;
+-	}
++
+ 	con->num_pdos = ret / sizeof(u32); /* number of bytes to 32-bit PDOs */
+-	if (ret == 0)
+-		dev_warn(ucsi->dev, "UCSI_GET_PDOS returned 0 bytes\n");
++	if (con->num_pdos < UCSI_MAX_PDOS)
++		return;
++
++	/* get the remaining PDOs, if any */
++	ret = ucsi_get_pdos(con, 1, con->src_pdos, UCSI_MAX_PDOS,
++			    PDO_MAX_OBJECTS - UCSI_MAX_PDOS);
++	if (ret < 0)
++		return;
++
++	con->num_pdos += ret / sizeof(u32);
+ }
+ 
+ static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
+@@ -522,7 +545,7 @@ static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
+ 	case UCSI_CONSTAT_PWR_OPMODE_PD:
+ 		con->rdo = con->status.request_data_obj;
+ 		typec_set_pwr_opmode(con->port, TYPEC_PWR_MODE_PD);
+-		ucsi_get_pdos(con, 1);
++		ucsi_get_src_pdos(con, 1);
+ 		break;
+ 	case UCSI_CONSTAT_PWR_OPMODE_TYPEC1_5:
+ 		con->rdo = 0;
+@@ -999,6 +1022,7 @@ static const struct typec_operations ucsi_ops = {
+ 	.pr_set = ucsi_pr_swap
+ };
+ 
++/* Caller must call fwnode_handle_put() after use */
+ static struct fwnode_handle *ucsi_find_fwnode(struct ucsi_connector *con)
+ {
+ 	struct fwnode_handle *fwnode;
+@@ -1033,7 +1057,7 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
+ 	command |= UCSI_CONNECTOR_NUMBER(con->num);
+ 	ret = ucsi_send_command(ucsi, command, &con->cap, sizeof(con->cap));
+ 	if (ret < 0)
+-		goto out;
++		goto out_unlock;
+ 
+ 	if (con->cap.op_mode & UCSI_CONCAP_OPMODE_DRP)
+ 		cap->data = TYPEC_PORT_DRD;
+@@ -1151,6 +1175,8 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
+ 	trace_ucsi_register_port(con->num, &con->status);
+ 
+ out:
++	fwnode_handle_put(cap->fwnode);
++out_unlock:
+ 	mutex_unlock(&con->lock);
+ 	return ret;
+ }
+diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
+index 3920e20a9e9ef..cee666790907e 100644
+--- a/drivers/usb/typec/ucsi/ucsi.h
++++ b/drivers/usb/typec/ucsi/ucsi.h
+@@ -8,6 +8,7 @@
+ #include <linux/power_supply.h>
+ #include <linux/types.h>
+ #include <linux/usb/typec.h>
++#include <linux/usb/pd.h>
+ #include <linux/usb/role.h>
+ 
+ /* -------------------------------------------------------------------------- */
+@@ -134,7 +135,9 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num);
+ 
+ /* GET_PDOS command bits */
+ #define UCSI_GET_PDOS_PARTNER_PDO(_r_)		((u64)(_r_) << 23)
++#define UCSI_GET_PDOS_PDO_OFFSET(_r_)		((u64)(_r_) << 24)
+ #define UCSI_GET_PDOS_NUM_PDOS(_r_)		((u64)(_r_) << 32)
++#define UCSI_MAX_PDOS				(4)
+ #define UCSI_GET_PDOS_SRC_PDOS			((u64)1 << 34)
+ 
+ /* -------------------------------------------------------------------------- */
+@@ -302,7 +305,6 @@ struct ucsi {
+ 
+ #define UCSI_MAX_SVID		5
+ #define UCSI_MAX_ALTMODES	(UCSI_MAX_SVID * 6)
+-#define UCSI_MAX_PDOS		(4)
+ 
+ #define UCSI_TYPEC_VSAFE5V	5000
+ #define UCSI_TYPEC_1_5_CURRENT	1500
+@@ -330,7 +332,7 @@ struct ucsi_connector {
+ 	struct power_supply *psy;
+ 	struct power_supply_desc psy_desc;
+ 	u32 rdo;
+-	u32 src_pdos[UCSI_MAX_PDOS];
++	u32 src_pdos[PDO_MAX_OBJECTS];
+ 	int num_pdos;
+ 
+ 	struct usb_role_switch *usb_role_sw;
+diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
+index f01d58c7a042e..a3e7be96527d7 100644
+--- a/drivers/xen/gntdev.c
++++ b/drivers/xen/gntdev.c
+@@ -1017,8 +1017,10 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
+ 		err = mmu_interval_notifier_insert_locked(
+ 			&map->notifier, vma->vm_mm, vma->vm_start,
+ 			vma->vm_end - vma->vm_start, &gntdev_mmu_ops);
+-		if (err)
++		if (err) {
++			map->vma = NULL;
+ 			goto out_unlock_put;
++		}
+ 	}
+ 	mutex_unlock(&priv->lock);
+ 
+diff --git a/drivers/xen/unpopulated-alloc.c b/drivers/xen/unpopulated-alloc.c
+index e64e6befc63b7..87e6b7db892f5 100644
+--- a/drivers/xen/unpopulated-alloc.c
++++ b/drivers/xen/unpopulated-alloc.c
+@@ -39,8 +39,10 @@ static int fill_list(unsigned int nr_pages)
+ 	}
+ 
+ 	pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL);
+-	if (!pgmap)
++	if (!pgmap) {
++		ret = -ENOMEM;
+ 		goto err_pgmap;
++	}
+ 
+ 	pgmap->type = MEMORY_DEVICE_GENERIC;
+ 	pgmap->range = (struct range) {
+diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
+index 649f04f112dc2..59c32c9b799fc 100644
+--- a/fs/9p/vfs_file.c
++++ b/fs/9p/vfs_file.c
+@@ -86,8 +86,8 @@ int v9fs_file_open(struct inode *inode, struct file *file)
+ 		 * to work.
+ 		 */
+ 		writeback_fid = v9fs_writeback_fid(file_dentry(file));
+-		if (IS_ERR(fid)) {
+-			err = PTR_ERR(fid);
++		if (IS_ERR(writeback_fid)) {
++			err = PTR_ERR(writeback_fid);
+ 			mutex_unlock(&v9inode->v_mutex);
+ 			goto out_error;
+ 		}
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index 9ae776ab39676..29ef969035df2 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -3110,7 +3110,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
+ 			       struct btrfs_inode *inode, u64 new_size,
+ 			       u32 min_type);
+ 
+-int btrfs_start_delalloc_snapshot(struct btrfs_root *root);
++int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context);
+ int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
+ 			       bool in_reclaim_context);
+ int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 36a3c973fda10..5b82050b871a7 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -1340,12 +1340,16 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
+ 		stripe = bbio->stripes;
+ 		for (i = 0; i < bbio->num_stripes; i++, stripe++) {
+ 			u64 bytes;
++			struct btrfs_device *device = stripe->dev;
+ 
+-			if (!stripe->dev->bdev) {
++			if (!device->bdev) {
+ 				ASSERT(btrfs_test_opt(fs_info, DEGRADED));
+ 				continue;
+ 			}
+ 
++			if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
++				continue;
++
+ 			ret = do_discard_extent(stripe, &bytes);
+ 			if (!ret) {
+ 				discarded_bytes += bytes;
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 6eb72c9b15a7d..abee4b62741da 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -2067,6 +2067,30 @@ static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
+ 	return ret;
+ }
+ 
++static inline bool skip_inode_logging(const struct btrfs_log_ctx *ctx)
++{
++	struct btrfs_inode *inode = BTRFS_I(ctx->inode);
++	struct btrfs_fs_info *fs_info = inode->root->fs_info;
++
++	if (btrfs_inode_in_log(inode, fs_info->generation) &&
++	    list_empty(&ctx->ordered_extents))
++		return true;
++
++	/*
++	 * If we are doing a fast fsync we can not bail out if the inode's
++	 * last_trans is <= then the last committed transaction, because we only
++	 * update the last_trans of the inode during ordered extent completion,
++	 * and for a fast fsync we don't wait for that, we only wait for the
++	 * writeback to complete.
++	 */
++	if (inode->last_trans <= fs_info->last_trans_committed &&
++	    (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) ||
++	     list_empty(&ctx->ordered_extents)))
++		return true;
++
++	return false;
++}
++
+ /*
+  * fsync call for both files and directories.  This logs the inode into
+  * the tree log instead of forcing full commits whenever possible.
+@@ -2185,17 +2209,8 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+ 
+ 	atomic_inc(&root->log_batch);
+ 
+-	/*
+-	 * If we are doing a fast fsync we can not bail out if the inode's
+-	 * last_trans is <= then the last committed transaction, because we only
+-	 * update the last_trans of the inode during ordered extent completion,
+-	 * and for a fast fsync we don't wait for that, we only wait for the
+-	 * writeback to complete.
+-	 */
+ 	smp_mb();
+-	if (btrfs_inode_in_log(BTRFS_I(inode), fs_info->generation) ||
+-	    (BTRFS_I(inode)->last_trans <= fs_info->last_trans_committed &&
+-	     (full_sync || list_empty(&ctx.ordered_extents)))) {
++	if (skip_inode_logging(&ctx)) {
+ 		/*
+ 		 * We've had everything committed since the last time we were
+ 		 * modified so clear this flag in case it was set for whatever
+diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
+index 9988decd5717b..ac9c2691376d6 100644
+--- a/fs/btrfs/free-space-cache.c
++++ b/fs/btrfs/free-space-cache.c
+@@ -3942,7 +3942,7 @@ static int cleanup_free_space_cache_v1(struct btrfs_fs_info *fs_info,
+ {
+ 	struct btrfs_block_group *block_group;
+ 	struct rb_node *node;
+-	int ret;
++	int ret = 0;
+ 
+ 	btrfs_info(fs_info, "cleaning free space cache v1");
+ 
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index a922c3bcb65e1..8c4d2eaa5d58b 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -9672,7 +9672,7 @@ out:
+ 	return ret;
+ }
+ 
+-int btrfs_start_delalloc_snapshot(struct btrfs_root *root)
++int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context)
+ {
+ 	struct writeback_control wbc = {
+ 		.nr_to_write = LONG_MAX,
+@@ -9685,7 +9685,7 @@ int btrfs_start_delalloc_snapshot(struct btrfs_root *root)
+ 	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
+ 		return -EROFS;
+ 
+-	return start_delalloc_inodes(root, &wbc, true, false);
++	return start_delalloc_inodes(root, &wbc, true, in_reclaim_context);
+ }
+ 
+ int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 5efd6c7fe6206..f9ecb6c0bf15e 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -1046,7 +1046,7 @@ static noinline int btrfs_mksnapshot(const struct path *parent,
+ 	 */
+ 	btrfs_drew_read_lock(&root->snapshot_lock);
+ 
+-	ret = btrfs_start_delalloc_snapshot(root);
++	ret = btrfs_start_delalloc_snapshot(root, false);
+ 	if (ret)
+ 		goto out;
+ 
+diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
+index 985a215584370..043e3fa961e03 100644
+--- a/fs/btrfs/ordered-data.c
++++ b/fs/btrfs/ordered-data.c
+@@ -995,7 +995,7 @@ int btrfs_split_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pre,
+ 
+ 	if (pre)
+ 		ret = clone_ordered_extent(ordered, 0, pre);
+-	if (post)
++	if (ret == 0 && post)
+ 		ret = clone_ordered_extent(ordered, pre + ordered->disk_num_bytes,
+ 					   post);
+ 
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index f0b9ef13153ad..2991287a71a87 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -3579,7 +3579,7 @@ static int try_flush_qgroup(struct btrfs_root *root)
+ 		return 0;
+ 	}
+ 
+-	ret = btrfs_start_delalloc_snapshot(root);
++	ret = btrfs_start_delalloc_snapshot(root, true);
+ 	if (ret < 0)
+ 		goto out;
+ 	btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
+index 8f323859156b5..8ae8f1732fd25 100644
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -7139,7 +7139,7 @@ static int flush_delalloc_roots(struct send_ctx *sctx)
+ 	int i;
+ 
+ 	if (root) {
+-		ret = btrfs_start_delalloc_snapshot(root);
++		ret = btrfs_start_delalloc_snapshot(root, false);
+ 		if (ret)
+ 			return ret;
+ 		btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
+@@ -7147,7 +7147,7 @@ static int flush_delalloc_roots(struct send_ctx *sctx)
+ 
+ 	for (i = 0; i < sctx->clone_roots_cnt; i++) {
+ 		root = sctx->clone_roots[i].root;
+-		ret = btrfs_start_delalloc_snapshot(root);
++		ret = btrfs_start_delalloc_snapshot(root, false);
+ 		if (ret)
+ 			return ret;
+ 		btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 72c4b66ed5163..47e76e79b3d6b 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -6060,7 +6060,8 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
+ 	 * (since logging them is pointless, a link count of 0 means they
+ 	 * will never be accessible).
+ 	 */
+-	if (btrfs_inode_in_log(inode, trans->transid) ||
++	if ((btrfs_inode_in_log(inode, trans->transid) &&
++	     list_empty(&ctx->ordered_extents)) ||
+ 	    inode->vfs_inode.i_nlink == 0) {
+ 		ret = BTRFS_NO_LOG_SYNC;
+ 		goto end_no_trans;
+diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
+index 70b23a0d03b10..304ce64c70a44 100644
+--- a/fs/btrfs/zoned.c
++++ b/fs/btrfs/zoned.c
+@@ -1126,6 +1126,11 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
+ 			goto out;
+ 		}
+ 
++		if (zone.type == BLK_ZONE_TYPE_CONVENTIONAL) {
++			ret = -EIO;
++			goto out;
++		}
++
+ 		switch (zone.cond) {
+ 		case BLK_ZONE_COND_OFFLINE:
+ 		case BLK_ZONE_COND_READONLY:
+diff --git a/fs/ceph/export.c b/fs/ceph/export.c
+index e088843a7734c..baa6368bece59 100644
+--- a/fs/ceph/export.c
++++ b/fs/ceph/export.c
+@@ -178,8 +178,10 @@ static struct dentry *__fh_to_dentry(struct super_block *sb, u64 ino)
+ 		return ERR_CAST(inode);
+ 	/* We need LINK caps to reliably check i_nlink */
+ 	err = ceph_do_getattr(inode, CEPH_CAP_LINK_SHARED, false);
+-	if (err)
++	if (err) {
++		iput(inode);
+ 		return ERR_PTR(err);
++	}
+ 	/* -ESTALE if inode as been unlinked and no file is open */
+ 	if ((inode->i_nlink == 0) && (atomic_read(&inode->i_count) == 1)) {
+ 		iput(inode);
+diff --git a/fs/dax.c b/fs/dax.c
+index b3d27fdc67752..df5485b4bddf1 100644
+--- a/fs/dax.c
++++ b/fs/dax.c
+@@ -144,6 +144,16 @@ struct wait_exceptional_entry_queue {
+ 	struct exceptional_entry_key key;
+ };
+ 
++/**
++ * enum dax_wake_mode: waitqueue wakeup behaviour
++ * @WAKE_ALL: wake all waiters in the waitqueue
++ * @WAKE_NEXT: wake only the first waiter in the waitqueue
++ */
++enum dax_wake_mode {
++	WAKE_ALL,
++	WAKE_NEXT,
++};
++
+ static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas,
+ 		void *entry, struct exceptional_entry_key *key)
+ {
+@@ -182,7 +192,8 @@ static int wake_exceptional_entry_func(wait_queue_entry_t *wait,
+  * The important information it's conveying is whether the entry at
+  * this index used to be a PMD entry.
+  */
+-static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all)
++static void dax_wake_entry(struct xa_state *xas, void *entry,
++			   enum dax_wake_mode mode)
+ {
+ 	struct exceptional_entry_key key;
+ 	wait_queue_head_t *wq;
+@@ -196,7 +207,7 @@ static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all)
+ 	 * must be in the waitqueue and the following check will see them.
+ 	 */
+ 	if (waitqueue_active(wq))
+-		__wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
++		__wake_up(wq, TASK_NORMAL, mode == WAKE_ALL ? 0 : 1, &key);
+ }
+ 
+ /*
+@@ -264,11 +275,11 @@ static void wait_entry_unlocked(struct xa_state *xas, void *entry)
+ 	finish_wait(wq, &ewait.wait);
+ }
+ 
+-static void put_unlocked_entry(struct xa_state *xas, void *entry)
++static void put_unlocked_entry(struct xa_state *xas, void *entry,
++			       enum dax_wake_mode mode)
+ {
+-	/* If we were the only waiter woken, wake the next one */
+ 	if (entry && !dax_is_conflict(entry))
+-		dax_wake_entry(xas, entry, false);
++		dax_wake_entry(xas, entry, mode);
+ }
+ 
+ /*
+@@ -286,7 +297,7 @@ static void dax_unlock_entry(struct xa_state *xas, void *entry)
+ 	old = xas_store(xas, entry);
+ 	xas_unlock_irq(xas);
+ 	BUG_ON(!dax_is_locked(old));
+-	dax_wake_entry(xas, entry, false);
++	dax_wake_entry(xas, entry, WAKE_NEXT);
+ }
+ 
+ /*
+@@ -524,7 +535,7 @@ retry:
+ 
+ 		dax_disassociate_entry(entry, mapping, false);
+ 		xas_store(xas, NULL);	/* undo the PMD join */
+-		dax_wake_entry(xas, entry, true);
++		dax_wake_entry(xas, entry, WAKE_ALL);
+ 		mapping->nrexceptional--;
+ 		entry = NULL;
+ 		xas_set(xas, index);
+@@ -622,7 +633,7 @@ struct page *dax_layout_busy_page_range(struct address_space *mapping,
+ 			entry = get_unlocked_entry(&xas, 0);
+ 		if (entry)
+ 			page = dax_busy_page(entry);
+-		put_unlocked_entry(&xas, entry);
++		put_unlocked_entry(&xas, entry, WAKE_NEXT);
+ 		if (page)
+ 			break;
+ 		if (++scanned % XA_CHECK_SCHED)
+@@ -664,7 +675,7 @@ static int __dax_invalidate_entry(struct address_space *mapping,
+ 	mapping->nrexceptional--;
+ 	ret = 1;
+ out:
+-	put_unlocked_entry(&xas, entry);
++	put_unlocked_entry(&xas, entry, WAKE_ALL);
+ 	xas_unlock_irq(&xas);
+ 	return ret;
+ }
+@@ -937,13 +948,13 @@ static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
+ 	xas_lock_irq(xas);
+ 	xas_store(xas, entry);
+ 	xas_clear_mark(xas, PAGECACHE_TAG_DIRTY);
+-	dax_wake_entry(xas, entry, false);
++	dax_wake_entry(xas, entry, WAKE_NEXT);
+ 
+ 	trace_dax_writeback_one(mapping->host, index, count);
+ 	return ret;
+ 
+  put_unlocked:
+-	put_unlocked_entry(xas, entry);
++	put_unlocked_entry(xas, entry, WAKE_NEXT);
+ 	return ret;
+ }
+ 
+@@ -1684,7 +1695,7 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
+ 	/* Did we race with someone splitting entry or so? */
+ 	if (!entry || dax_is_conflict(entry) ||
+ 	    (order == 0 && !dax_is_pte_entry(entry))) {
+-		put_unlocked_entry(&xas, entry);
++		put_unlocked_entry(&xas, entry, WAKE_NEXT);
+ 		xas_unlock_irq(&xas);
+ 		trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
+ 						      VM_FAULT_NOPAGE);
+diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
+index 22e86ae4dd5a8..1d252164d97b6 100644
+--- a/fs/debugfs/inode.c
++++ b/fs/debugfs/inode.c
+@@ -35,7 +35,7 @@
+ static struct vfsmount *debugfs_mount;
+ static int debugfs_mount_count;
+ static bool debugfs_registered;
+-static unsigned int debugfs_allow = DEFAULT_DEBUGFS_ALLOW_BITS;
++static unsigned int debugfs_allow __ro_after_init = DEFAULT_DEBUGFS_ALLOW_BITS;
+ 
+ /*
+  * Don't allow access attributes to be changed whilst the kernel is locked down
+diff --git a/fs/dlm/config.c b/fs/dlm/config.c
+index 49c5f9407098e..88d95d96e36c5 100644
+--- a/fs/dlm/config.c
++++ b/fs/dlm/config.c
+@@ -125,7 +125,7 @@ static ssize_t cluster_cluster_name_store(struct config_item *item,
+ CONFIGFS_ATTR(cluster_, cluster_name);
+ 
+ static ssize_t cluster_set(struct dlm_cluster *cl, unsigned int *cl_field,
+-			   int *info_field, bool (*check_cb)(unsigned int x),
++			   int *info_field, int (*check_cb)(unsigned int x),
+ 			   const char *buf, size_t len)
+ {
+ 	unsigned int x;
+@@ -137,8 +137,11 @@ static ssize_t cluster_set(struct dlm_cluster *cl, unsigned int *cl_field,
+ 	if (rc)
+ 		return rc;
+ 
+-	if (check_cb && check_cb(x))
+-		return -EINVAL;
++	if (check_cb) {
++		rc = check_cb(x);
++		if (rc)
++			return rc;
++	}
+ 
+ 	*cl_field = x;
+ 	*info_field = x;
+@@ -161,17 +164,53 @@ static ssize_t cluster_##name##_show(struct config_item *item, char *buf)     \
+ }                                                                             \
+ CONFIGFS_ATTR(cluster_, name);
+ 
+-static bool dlm_check_zero(unsigned int x)
++static int dlm_check_protocol_and_dlm_running(unsigned int x)
++{
++	switch (x) {
++	case 0:
++		/* TCP */
++		break;
++	case 1:
++		/* SCTP */
++		break;
++	default:
++		return -EINVAL;
++	}
++
++	if (dlm_allow_conn)
++		return -EBUSY;
++
++	return 0;
++}
++
++static int dlm_check_zero_and_dlm_running(unsigned int x)
++{
++	if (!x)
++		return -EINVAL;
++
++	if (dlm_allow_conn)
++		return -EBUSY;
++
++	return 0;
++}
++
++static int dlm_check_zero(unsigned int x)
+ {
+-	return !x;
++	if (!x)
++		return -EINVAL;
++
++	return 0;
+ }
+ 
+-static bool dlm_check_buffer_size(unsigned int x)
++static int dlm_check_buffer_size(unsigned int x)
+ {
+-	return (x < DEFAULT_BUFFER_SIZE);
++	if (x < DEFAULT_BUFFER_SIZE)
++		return -EINVAL;
++
++	return 0;
+ }
+ 
+-CLUSTER_ATTR(tcp_port, dlm_check_zero);
++CLUSTER_ATTR(tcp_port, dlm_check_zero_and_dlm_running);
+ CLUSTER_ATTR(buffer_size, dlm_check_buffer_size);
+ CLUSTER_ATTR(rsbtbl_size, dlm_check_zero);
+ CLUSTER_ATTR(recover_timer, dlm_check_zero);
+@@ -179,7 +218,7 @@ CLUSTER_ATTR(toss_secs, dlm_check_zero);
+ CLUSTER_ATTR(scan_secs, dlm_check_zero);
+ CLUSTER_ATTR(log_debug, NULL);
+ CLUSTER_ATTR(log_info, NULL);
+-CLUSTER_ATTR(protocol, NULL);
++CLUSTER_ATTR(protocol, dlm_check_protocol_and_dlm_running);
+ CLUSTER_ATTR(mark, NULL);
+ CLUSTER_ATTR(timewarn_cs, dlm_check_zero);
+ CLUSTER_ATTR(waitwarn_us, NULL);
+@@ -688,6 +727,7 @@ static ssize_t comm_mark_show(struct config_item *item, char *buf)
+ static ssize_t comm_mark_store(struct config_item *item, const char *buf,
+ 			       size_t len)
+ {
++	struct dlm_comm *comm;
+ 	unsigned int mark;
+ 	int rc;
+ 
+@@ -695,7 +735,15 @@ static ssize_t comm_mark_store(struct config_item *item, const char *buf,
+ 	if (rc)
+ 		return rc;
+ 
+-	config_item_to_comm(item)->mark = mark;
++	if (mark == 0)
++		mark = dlm_config.ci_mark;
++
++	comm = config_item_to_comm(item);
++	rc = dlm_lowcomms_nodes_set_mark(comm->nodeid, mark);
++	if (rc)
++		return rc;
++
++	comm->mark = mark;
+ 	return len;
+ }
+ 
+@@ -870,24 +918,6 @@ int dlm_comm_seq(int nodeid, uint32_t *seq)
+ 	return 0;
+ }
+ 
+-void dlm_comm_mark(int nodeid, unsigned int *mark)
+-{
+-	struct dlm_comm *cm;
+-
+-	cm = get_comm(nodeid);
+-	if (!cm) {
+-		*mark = dlm_config.ci_mark;
+-		return;
+-	}
+-
+-	if (cm->mark)
+-		*mark = cm->mark;
+-	else
+-		*mark = dlm_config.ci_mark;
+-
+-	put_comm(cm);
+-}
+-
+ int dlm_our_nodeid(void)
+ {
+ 	return local_comm ? local_comm->nodeid : 0;
+diff --git a/fs/dlm/config.h b/fs/dlm/config.h
+index c210250a25818..d2cd4bd20313f 100644
+--- a/fs/dlm/config.h
++++ b/fs/dlm/config.h
+@@ -48,7 +48,6 @@ void dlm_config_exit(void);
+ int dlm_config_nodes(char *lsname, struct dlm_config_node **nodes_out,
+ 		     int *count_out);
+ int dlm_comm_seq(int nodeid, uint32_t *seq);
+-void dlm_comm_mark(int nodeid, unsigned int *mark);
+ int dlm_our_nodeid(void);
+ int dlm_our_addr(struct sockaddr_storage *addr, int num);
+ 
+diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c
+index d6bbccb0ed152..d5bd990bcab8b 100644
+--- a/fs/dlm/debug_fs.c
++++ b/fs/dlm/debug_fs.c
+@@ -542,6 +542,7 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos)
+ 
+ 		if (bucket >= ls->ls_rsbtbl_size) {
+ 			kfree(ri);
++			++*pos;
+ 			return NULL;
+ 		}
+ 		tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep;
+diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
+index 561dcad08ad6e..c14cf2b7faab3 100644
+--- a/fs/dlm/lockspace.c
++++ b/fs/dlm/lockspace.c
+@@ -404,12 +404,6 @@ static int threads_start(void)
+ 	return error;
+ }
+ 
+-static void threads_stop(void)
+-{
+-	dlm_scand_stop();
+-	dlm_lowcomms_stop();
+-}
+-
+ static int new_lockspace(const char *name, const char *cluster,
+ 			 uint32_t flags, int lvblen,
+ 			 const struct dlm_lockspace_ops *ops, void *ops_arg,
+@@ -702,8 +696,11 @@ int dlm_new_lockspace(const char *name, const char *cluster,
+ 		ls_count++;
+ 	if (error > 0)
+ 		error = 0;
+-	if (!ls_count)
+-		threads_stop();
++	if (!ls_count) {
++		dlm_scand_stop();
++		dlm_lowcomms_shutdown();
++		dlm_lowcomms_stop();
++	}
+  out:
+ 	mutex_unlock(&ls_lock);
+ 	return error;
+@@ -788,6 +785,11 @@ static int release_lockspace(struct dlm_ls *ls, int force)
+ 
+ 	dlm_recoverd_stop(ls);
+ 
++	if (ls_count == 1) {
++		dlm_scand_stop();
++		dlm_lowcomms_shutdown();
++	}
++
+ 	dlm_callback_stop(ls);
+ 
+ 	remove_lockspace(ls);
+@@ -880,7 +882,7 @@ int dlm_release_lockspace(void *lockspace, int force)
+ 	if (!error)
+ 		ls_count--;
+ 	if (!ls_count)
+-		threads_stop();
++		dlm_lowcomms_stop();
+ 	mutex_unlock(&ls_lock);
+ 
+ 	return error;
+diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
+index f7d2c52791f8f..45c2fdaf34c4d 100644
+--- a/fs/dlm/lowcomms.c
++++ b/fs/dlm/lowcomms.c
+@@ -116,6 +116,7 @@ struct writequeue_entry {
+ struct dlm_node_addr {
+ 	struct list_head list;
+ 	int nodeid;
++	int mark;
+ 	int addr_count;
+ 	int curr_addr_index;
+ 	struct sockaddr_storage *addr[DLM_MAX_ADDR_COUNT];
+@@ -134,7 +135,7 @@ static DEFINE_SPINLOCK(dlm_node_addrs_spin);
+ static struct listen_connection listen_con;
+ static struct sockaddr_storage *dlm_local_addr[DLM_MAX_ADDR_COUNT];
+ static int dlm_local_count;
+-static int dlm_allow_conn;
++int dlm_allow_conn;
+ 
+ /* Work queues */
+ static struct workqueue_struct *recv_workqueue;
+@@ -303,7 +304,8 @@ static int addr_compare(const struct sockaddr_storage *x,
+ }
+ 
+ static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out,
+-			  struct sockaddr *sa_out, bool try_new_addr)
++			  struct sockaddr *sa_out, bool try_new_addr,
++			  unsigned int *mark)
+ {
+ 	struct sockaddr_storage sas;
+ 	struct dlm_node_addr *na;
+@@ -331,6 +333,8 @@ static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out,
+ 	if (!na->addr_count)
+ 		return -ENOENT;
+ 
++	*mark = na->mark;
++
+ 	if (sas_out)
+ 		memcpy(sas_out, &sas, sizeof(struct sockaddr_storage));
+ 
+@@ -350,7 +354,8 @@ static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out,
+ 	return 0;
+ }
+ 
+-static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid)
++static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid,
++			  unsigned int *mark)
+ {
+ 	struct dlm_node_addr *na;
+ 	int rv = -EEXIST;
+@@ -364,6 +369,7 @@ static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid)
+ 		for (addr_i = 0; addr_i < na->addr_count; addr_i++) {
+ 			if (addr_compare(na->addr[addr_i], addr)) {
+ 				*nodeid = na->nodeid;
++				*mark = na->mark;
+ 				rv = 0;
+ 				goto unlock;
+ 			}
+@@ -412,6 +418,7 @@ int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len)
+ 		new_node->nodeid = nodeid;
+ 		new_node->addr[0] = new_addr;
+ 		new_node->addr_count = 1;
++		new_node->mark = dlm_config.ci_mark;
+ 		list_add(&new_node->list, &dlm_node_addrs);
+ 		spin_unlock(&dlm_node_addrs_spin);
+ 		return 0;
+@@ -519,6 +526,23 @@ int dlm_lowcomms_connect_node(int nodeid)
+ 	return 0;
+ }
+ 
++int dlm_lowcomms_nodes_set_mark(int nodeid, unsigned int mark)
++{
++	struct dlm_node_addr *na;
++
++	spin_lock(&dlm_node_addrs_spin);
++	na = find_node_addr(nodeid);
++	if (!na) {
++		spin_unlock(&dlm_node_addrs_spin);
++		return -ENOENT;
++	}
++
++	na->mark = mark;
++	spin_unlock(&dlm_node_addrs_spin);
++
++	return 0;
++}
++
+ static void lowcomms_error_report(struct sock *sk)
+ {
+ 	struct connection *con;
+@@ -685,10 +709,7 @@ static void shutdown_connection(struct connection *con)
+ {
+ 	int ret;
+ 
+-	if (cancel_work_sync(&con->swork)) {
+-		log_print("canceled swork for node %d", con->nodeid);
+-		clear_bit(CF_WRITE_PENDING, &con->flags);
+-	}
++	flush_work(&con->swork);
+ 
+ 	mutex_lock(&con->sock_mutex);
+ 	/* nothing to shutdown */
+@@ -867,7 +888,7 @@ static int accept_from_sock(struct listen_connection *con)
+ 
+ 	/* Get the new node's NODEID */
+ 	make_sockaddr(&peeraddr, 0, &len);
+-	if (addr_to_nodeid(&peeraddr, &nodeid)) {
++	if (addr_to_nodeid(&peeraddr, &nodeid, &mark)) {
+ 		unsigned char *b=(unsigned char *)&peeraddr;
+ 		log_print("connect from non cluster node");
+ 		print_hex_dump_bytes("ss: ", DUMP_PREFIX_NONE, 
+@@ -876,9 +897,6 @@ static int accept_from_sock(struct listen_connection *con)
+ 		return -1;
+ 	}
+ 
+-	dlm_comm_mark(nodeid, &mark);
+-	sock_set_mark(newsock->sk, mark);
+-
+ 	log_print("got connection from %d", nodeid);
+ 
+ 	/*  Check to see if we already have a connection to this node. This
+@@ -892,6 +910,8 @@ static int accept_from_sock(struct listen_connection *con)
+ 		goto accept_err;
+ 	}
+ 
++	sock_set_mark(newsock->sk, mark);
++
+ 	mutex_lock(&newcon->sock_mutex);
+ 	if (newcon->sock) {
+ 		struct connection *othercon = newcon->othercon;
+@@ -1016,8 +1036,6 @@ static void sctp_connect_to_sock(struct connection *con)
+ 	struct socket *sock;
+ 	unsigned int mark;
+ 
+-	dlm_comm_mark(con->nodeid, &mark);
+-
+ 	mutex_lock(&con->sock_mutex);
+ 
+ 	/* Some odd races can cause double-connects, ignore them */
+@@ -1030,7 +1048,7 @@ static void sctp_connect_to_sock(struct connection *con)
+ 	}
+ 
+ 	memset(&daddr, 0, sizeof(daddr));
+-	result = nodeid_to_addr(con->nodeid, &daddr, NULL, true);
++	result = nodeid_to_addr(con->nodeid, &daddr, NULL, true, &mark);
+ 	if (result < 0) {
+ 		log_print("no address for nodeid %d", con->nodeid);
+ 		goto out;
+@@ -1105,13 +1123,11 @@ out:
+ static void tcp_connect_to_sock(struct connection *con)
+ {
+ 	struct sockaddr_storage saddr, src_addr;
++	unsigned int mark;
+ 	int addr_len;
+ 	struct socket *sock = NULL;
+-	unsigned int mark;
+ 	int result;
+ 
+-	dlm_comm_mark(con->nodeid, &mark);
+-
+ 	mutex_lock(&con->sock_mutex);
+ 	if (con->retries++ > MAX_CONNECT_RETRIES)
+ 		goto out;
+@@ -1126,15 +1142,15 @@ static void tcp_connect_to_sock(struct connection *con)
+ 	if (result < 0)
+ 		goto out_err;
+ 
+-	sock_set_mark(sock->sk, mark);
+-
+ 	memset(&saddr, 0, sizeof(saddr));
+-	result = nodeid_to_addr(con->nodeid, &saddr, NULL, false);
++	result = nodeid_to_addr(con->nodeid, &saddr, NULL, false, &mark);
+ 	if (result < 0) {
+ 		log_print("no address for nodeid %d", con->nodeid);
+ 		goto out_err;
+ 	}
+ 
++	sock_set_mark(sock->sk, mark);
++
+ 	add_sock(sock, con);
+ 
+ 	/* Bind to our cluster-known address connecting to avoid
+@@ -1356,9 +1372,11 @@ void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc)
+ 	struct writequeue_entry *e;
+ 	int offset = 0;
+ 
+-	if (len > LOWCOMMS_MAX_TX_BUFFER_LEN) {
+-		BUILD_BUG_ON(PAGE_SIZE < LOWCOMMS_MAX_TX_BUFFER_LEN);
++	if (len > DEFAULT_BUFFER_SIZE ||
++	    len < sizeof(struct dlm_header)) {
++		BUILD_BUG_ON(PAGE_SIZE < DEFAULT_BUFFER_SIZE);
+ 		log_print("failed to allocate a buffer of size %d", len);
++		WARN_ON(1);
+ 		return NULL;
+ 	}
+ 
+@@ -1590,6 +1608,29 @@ static int work_start(void)
+ 	return 0;
+ }
+ 
++static void shutdown_conn(struct connection *con)
++{
++	if (con->shutdown_action)
++		con->shutdown_action(con);
++}
++
++void dlm_lowcomms_shutdown(void)
++{
++	/* Set all the flags to prevent any
++	 * socket activity.
++	 */
++	dlm_allow_conn = 0;
++
++	if (recv_workqueue)
++		flush_workqueue(recv_workqueue);
++	if (send_workqueue)
++		flush_workqueue(send_workqueue);
++
++	dlm_close_sock(&listen_con.sock);
++
++	foreach_conn(shutdown_conn);
++}
++
+ static void _stop_conn(struct connection *con, bool and_other)
+ {
+ 	mutex_lock(&con->sock_mutex);
+@@ -1611,12 +1652,6 @@ static void stop_conn(struct connection *con)
+ 	_stop_conn(con, true);
+ }
+ 
+-static void shutdown_conn(struct connection *con)
+-{
+-	if (con->shutdown_action)
+-		con->shutdown_action(con);
+-}
+-
+ static void connection_release(struct rcu_head *rcu)
+ {
+ 	struct connection *con = container_of(rcu, struct connection, rcu);
+@@ -1673,19 +1708,6 @@ static void work_flush(void)
+ 
+ void dlm_lowcomms_stop(void)
+ {
+-	/* Set all the flags to prevent any
+-	   socket activity.
+-	*/
+-	dlm_allow_conn = 0;
+-
+-	if (recv_workqueue)
+-		flush_workqueue(recv_workqueue);
+-	if (send_workqueue)
+-		flush_workqueue(send_workqueue);
+-
+-	dlm_close_sock(&listen_con.sock);
+-
+-	foreach_conn(shutdown_conn);
+ 	work_flush();
+ 	foreach_conn(free_conn);
+ 	work_stop();
+diff --git a/fs/dlm/lowcomms.h b/fs/dlm/lowcomms.h
+index 0918f9376489f..48bbc4e187619 100644
+--- a/fs/dlm/lowcomms.h
++++ b/fs/dlm/lowcomms.h
+@@ -14,13 +14,18 @@
+ 
+ #define LOWCOMMS_MAX_TX_BUFFER_LEN	4096
+ 
++/* switch to check if dlm is running */
++extern int dlm_allow_conn;
++
+ int dlm_lowcomms_start(void);
++void dlm_lowcomms_shutdown(void);
+ void dlm_lowcomms_stop(void);
+ void dlm_lowcomms_exit(void);
+ int dlm_lowcomms_close(int nodeid);
+ void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc);
+ void dlm_lowcomms_commit_buffer(void *mh);
+ int dlm_lowcomms_connect_node(int nodeid);
++int dlm_lowcomms_nodes_set_mark(int nodeid, unsigned int mark);
+ int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len);
+ 
+ #endif				/* __LOWCOMMS_DOT_H__ */
+diff --git a/fs/dlm/midcomms.c b/fs/dlm/midcomms.c
+index fde3a6afe4bea..0bedfa8606a26 100644
+--- a/fs/dlm/midcomms.c
++++ b/fs/dlm/midcomms.c
+@@ -49,9 +49,10 @@ int dlm_process_incoming_buffer(int nodeid, unsigned char *buf, int len)
+ 		 * cannot deliver this message to upper layers
+ 		 */
+ 		msglen = get_unaligned_le16(&hd->h_length);
+-		if (msglen > DEFAULT_BUFFER_SIZE) {
+-			log_print("received invalid length header: %u, will abort message parsing",
+-				  msglen);
++		if (msglen > DEFAULT_BUFFER_SIZE ||
++		    msglen < sizeof(struct dlm_header)) {
++			log_print("received invalid length header: %u from node %d, will abort message parsing",
++				  msglen, nodeid);
+ 			return -EBADMSG;
+ 		}
+ 
+diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c
+index 312273ed8a9fe..eda14f630def4 100644
+--- a/fs/ext4/fast_commit.c
++++ b/fs/ext4/fast_commit.c
+@@ -1736,7 +1736,7 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
+ 		}
+ 
+ 		/* Range is mapped and needs a state change */
+-		jbd_debug(1, "Converting from %d to %d %lld",
++		jbd_debug(1, "Converting from %ld to %d %lld",
+ 				map.m_flags & EXT4_MAP_UNWRITTEN,
+ 			ext4_ext_is_unwritten(ex), map.m_pblk);
+ 		ret = ext4_ext_replay_update_ex(inode, cur, map.m_len,
+diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
+index 77fa342de38f1..66dd525a85542 100644
+--- a/fs/f2fs/compress.c
++++ b/fs/f2fs/compress.c
+@@ -123,19 +123,6 @@ static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
+ 	f2fs_drop_rpages(cc, len, true);
+ }
+ 
+-static void f2fs_put_rpages_mapping(struct address_space *mapping,
+-				pgoff_t start, int len)
+-{
+-	int i;
+-
+-	for (i = 0; i < len; i++) {
+-		struct page *page = find_get_page(mapping, start + i);
+-
+-		put_page(page);
+-		put_page(page);
+-	}
+-}
+-
+ static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
+ 		struct writeback_control *wbc, bool redirty, int unlock)
+ {
+@@ -164,13 +151,14 @@ int f2fs_init_compress_ctx(struct compress_ctx *cc)
+ 	return cc->rpages ? 0 : -ENOMEM;
+ }
+ 
+-void f2fs_destroy_compress_ctx(struct compress_ctx *cc)
++void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
+ {
+ 	page_array_free(cc->inode, cc->rpages, cc->cluster_size);
+ 	cc->rpages = NULL;
+ 	cc->nr_rpages = 0;
+ 	cc->nr_cpages = 0;
+-	cc->cluster_idx = NULL_CLUSTER;
++	if (!reuse)
++		cc->cluster_idx = NULL_CLUSTER;
+ }
+ 
+ void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
+@@ -1048,7 +1036,7 @@ retry:
+ 		}
+ 
+ 		if (PageUptodate(page))
+-			unlock_page(page);
++			f2fs_put_page(page, 1);
+ 		else
+ 			f2fs_compress_ctx_add_page(cc, page);
+ 	}
+@@ -1058,33 +1046,35 @@ retry:
+ 
+ 		ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
+ 					&last_block_in_bio, false, true);
+-		f2fs_destroy_compress_ctx(cc);
++		f2fs_put_rpages(cc);
++		f2fs_destroy_compress_ctx(cc, true);
+ 		if (ret)
+-			goto release_pages;
++			goto out;
+ 		if (bio)
+ 			f2fs_submit_bio(sbi, bio, DATA);
+ 
+ 		ret = f2fs_init_compress_ctx(cc);
+ 		if (ret)
+-			goto release_pages;
++			goto out;
+ 	}
+ 
+ 	for (i = 0; i < cc->cluster_size; i++) {
+ 		f2fs_bug_on(sbi, cc->rpages[i]);
+ 
+ 		page = find_lock_page(mapping, start_idx + i);
+-		f2fs_bug_on(sbi, !page);
++		if (!page) {
++			/* page can be truncated */
++			goto release_and_retry;
++		}
+ 
+ 		f2fs_wait_on_page_writeback(page, DATA, true, true);
+-
+ 		f2fs_compress_ctx_add_page(cc, page);
+-		f2fs_put_page(page, 0);
+ 
+ 		if (!PageUptodate(page)) {
++release_and_retry:
++			f2fs_put_rpages(cc);
+ 			f2fs_unlock_rpages(cc, i + 1);
+-			f2fs_put_rpages_mapping(mapping, start_idx,
+-					cc->cluster_size);
+-			f2fs_destroy_compress_ctx(cc);
++			f2fs_destroy_compress_ctx(cc, true);
+ 			goto retry;
+ 		}
+ 	}
+@@ -1115,10 +1105,10 @@ retry:
+ 	}
+ 
+ unlock_pages:
++	f2fs_put_rpages(cc);
+ 	f2fs_unlock_rpages(cc, i);
+-release_pages:
+-	f2fs_put_rpages_mapping(mapping, start_idx, i);
+-	f2fs_destroy_compress_ctx(cc);
++	f2fs_destroy_compress_ctx(cc, true);
++out:
+ 	return ret;
+ }
+ 
+@@ -1153,7 +1143,7 @@ bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
+ 		set_cluster_dirty(&cc);
+ 
+ 	f2fs_put_rpages_wbc(&cc, NULL, false, 1);
+-	f2fs_destroy_compress_ctx(&cc);
++	f2fs_destroy_compress_ctx(&cc, false);
+ 
+ 	return first_index;
+ }
+@@ -1372,7 +1362,7 @@ unlock_continue:
+ 	f2fs_put_rpages(cc);
+ 	page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
+ 	cc->cpages = NULL;
+-	f2fs_destroy_compress_ctx(cc);
++	f2fs_destroy_compress_ctx(cc, false);
+ 	return 0;
+ 
+ out_destroy_crypt:
+@@ -1383,7 +1373,8 @@ out_destroy_crypt:
+ 	for (i = 0; i < cc->nr_cpages; i++) {
+ 		if (!cc->cpages[i])
+ 			continue;
+-		f2fs_put_page(cc->cpages[i], 1);
++		f2fs_compress_free_page(cc->cpages[i]);
++		cc->cpages[i] = NULL;
+ 	}
+ out_put_cic:
+ 	kmem_cache_free(cic_entry_slab, cic);
+@@ -1533,7 +1524,7 @@ write:
+ 	err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
+ 	f2fs_put_rpages_wbc(cc, wbc, false, 0);
+ destroy_out:
+-	f2fs_destroy_compress_ctx(cc);
++	f2fs_destroy_compress_ctx(cc, false);
+ 	return err;
+ }
+ 
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index 4e5257c763d01..8804a5d513801 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -2276,7 +2276,7 @@ static int f2fs_mpage_readpages(struct inode *inode,
+ 							max_nr_pages,
+ 							&last_block_in_bio,
+ 							rac != NULL, false);
+-				f2fs_destroy_compress_ctx(&cc);
++				f2fs_destroy_compress_ctx(&cc, false);
+ 				if (ret)
+ 					goto set_error_page;
+ 			}
+@@ -2321,7 +2321,7 @@ next_page:
+ 							max_nr_pages,
+ 							&last_block_in_bio,
+ 							rac != NULL, false);
+-				f2fs_destroy_compress_ctx(&cc);
++				f2fs_destroy_compress_ctx(&cc, false);
+ 			}
+ 		}
+ #endif
+@@ -3022,7 +3022,7 @@ next:
+ 		}
+ 	}
+ 	if (f2fs_compressed_file(inode))
+-		f2fs_destroy_compress_ctx(&cc);
++		f2fs_destroy_compress_ctx(&cc, false);
+ #endif
+ 	if (retry) {
+ 		index = 0;
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index e2d302ae3a46d..f3fabb1edfe97 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -3376,6 +3376,7 @@ block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi);
+ int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable);
+ void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi);
+ int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
++bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno);
+ void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi);
+ void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi);
+ void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi);
+@@ -3383,7 +3384,7 @@ void f2fs_get_new_segment(struct f2fs_sb_info *sbi,
+ 			unsigned int *newseg, bool new_sec, int dir);
+ void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
+ 					unsigned int start, unsigned int end);
+-void f2fs_allocate_new_segment(struct f2fs_sb_info *sbi, int type);
++void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type);
+ void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi);
+ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range);
+ bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
+@@ -3547,7 +3548,7 @@ void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi);
+ int f2fs_start_gc_thread(struct f2fs_sb_info *sbi);
+ void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi);
+ block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode);
+-int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background,
++int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background, bool force,
+ 			unsigned int segno);
+ void f2fs_build_gc_manager(struct f2fs_sb_info *sbi);
+ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count);
+@@ -3949,7 +3950,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
+ void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed);
+ void f2fs_put_page_dic(struct page *page);
+ int f2fs_init_compress_ctx(struct compress_ctx *cc);
+-void f2fs_destroy_compress_ctx(struct compress_ctx *cc);
++void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
+ void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
+ int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi);
+ void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi);
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index d26ff2ae3f5eb..dc79694e512ce 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -1619,9 +1619,10 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
+ 	struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
+ 			.m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
+ 			.m_may_create = true };
+-	pgoff_t pg_end;
++	pgoff_t pg_start, pg_end;
+ 	loff_t new_size = i_size_read(inode);
+ 	loff_t off_end;
++	block_t expanded = 0;
+ 	int err;
+ 
+ 	err = inode_newsize_ok(inode, (len + offset));
+@@ -1634,11 +1635,12 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
+ 
+ 	f2fs_balance_fs(sbi, true);
+ 
++	pg_start = ((unsigned long long)offset) >> PAGE_SHIFT;
+ 	pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
+ 	off_end = (offset + len) & (PAGE_SIZE - 1);
+ 
+-	map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT;
+-	map.m_len = pg_end - map.m_lblk;
++	map.m_lblk = pg_start;
++	map.m_len = pg_end - pg_start;
+ 	if (off_end)
+ 		map.m_len++;
+ 
+@@ -1646,19 +1648,15 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
+ 		return 0;
+ 
+ 	if (f2fs_is_pinned_file(inode)) {
+-		block_t len = (map.m_len >> sbi->log_blocks_per_seg) <<
+-					sbi->log_blocks_per_seg;
+-		block_t done = 0;
+-
+-		if (map.m_len % sbi->blocks_per_seg)
+-			len += sbi->blocks_per_seg;
++		block_t sec_blks = BLKS_PER_SEC(sbi);
++		block_t sec_len = roundup(map.m_len, sec_blks);
+ 
+-		map.m_len = sbi->blocks_per_seg;
++		map.m_len = sec_blks;
+ next_alloc:
+ 		if (has_not_enough_free_secs(sbi, 0,
+ 			GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
+ 			down_write(&sbi->gc_lock);
+-			err = f2fs_gc(sbi, true, false, NULL_SEGNO);
++			err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
+ 			if (err && err != -ENODATA && err != -EAGAIN)
+ 				goto out_err;
+ 		}
+@@ -1666,7 +1664,7 @@ next_alloc:
+ 		down_write(&sbi->pin_sem);
+ 
+ 		f2fs_lock_op(sbi);
+-		f2fs_allocate_new_segment(sbi, CURSEG_COLD_DATA_PINNED);
++		f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED);
+ 		f2fs_unlock_op(sbi);
+ 
+ 		map.m_seg_type = CURSEG_COLD_DATA_PINNED;
+@@ -1674,24 +1672,25 @@ next_alloc:
+ 
+ 		up_write(&sbi->pin_sem);
+ 
+-		done += map.m_len;
+-		len -= map.m_len;
++		expanded += map.m_len;
++		sec_len -= map.m_len;
+ 		map.m_lblk += map.m_len;
+-		if (!err && len)
++		if (!err && sec_len)
+ 			goto next_alloc;
+ 
+-		map.m_len = done;
++		map.m_len = expanded;
+ 	} else {
+ 		err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
++		expanded = map.m_len;
+ 	}
+ out_err:
+ 	if (err) {
+ 		pgoff_t last_off;
+ 
+-		if (!map.m_len)
++		if (!expanded)
+ 			return err;
+ 
+-		last_off = map.m_lblk + map.m_len - 1;
++		last_off = pg_start + expanded - 1;
+ 
+ 		/* update new size to the failed position */
+ 		new_size = (last_off == pg_end) ? offset + len :
+@@ -2489,7 +2488,7 @@ static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
+ 		down_write(&sbi->gc_lock);
+ 	}
+ 
+-	ret = f2fs_gc(sbi, sync, true, NULL_SEGNO);
++	ret = f2fs_gc(sbi, sync, true, false, NULL_SEGNO);
+ out:
+ 	mnt_drop_write_file(filp);
+ 	return ret;
+@@ -2525,7 +2524,8 @@ do_more:
+ 		down_write(&sbi->gc_lock);
+ 	}
+ 
+-	ret = f2fs_gc(sbi, range->sync, true, GET_SEGNO(sbi, range->start));
++	ret = f2fs_gc(sbi, range->sync, true, false,
++				GET_SEGNO(sbi, range->start));
+ 	if (ret) {
+ 		if (ret == -EBUSY)
+ 			ret = -EAGAIN;
+@@ -2978,7 +2978,7 @@ static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
+ 		sm->last_victim[GC_CB] = end_segno + 1;
+ 		sm->last_victim[GC_GREEDY] = end_segno + 1;
+ 		sm->last_victim[ALLOC_NEXT] = end_segno + 1;
+-		ret = f2fs_gc(sbi, true, true, start_segno);
++		ret = f2fs_gc(sbi, true, true, true, start_segno);
+ 		if (ret == -EAGAIN)
+ 			ret = 0;
+ 		else if (ret < 0)
+diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
+index 39330ad3c44e6..a8567cb476213 100644
+--- a/fs/f2fs/gc.c
++++ b/fs/f2fs/gc.c
+@@ -112,7 +112,7 @@ do_gc:
+ 		sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC;
+ 
+ 		/* if return value is not zero, no victim was selected */
+-		if (f2fs_gc(sbi, sync_mode, true, NULL_SEGNO))
++		if (f2fs_gc(sbi, sync_mode, true, false, NULL_SEGNO))
+ 			wait_ms = gc_th->no_gc_sleep_time;
+ 
+ 		trace_f2fs_background_gc(sbi->sb, wait_ms,
+@@ -392,10 +392,6 @@ static void add_victim_entry(struct f2fs_sb_info *sbi,
+ 		if (p->gc_mode == GC_AT &&
+ 			get_valid_blocks(sbi, segno, true) == 0)
+ 			return;
+-
+-		if (p->alloc_mode == AT_SSR &&
+-			get_seg_entry(sbi, segno)->ckpt_valid_blocks == 0)
+-			return;
+ 	}
+ 
+ 	for (i = 0; i < sbi->segs_per_sec; i++)
+@@ -728,11 +724,27 @@ retry:
+ 
+ 		if (sec_usage_check(sbi, secno))
+ 			goto next;
++
+ 		/* Don't touch checkpointed data */
+-		if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
+-					get_ckpt_valid_blocks(sbi, segno) &&
+-					p.alloc_mode == LFS))
+-			goto next;
++		if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
++			if (p.alloc_mode == LFS) {
++				/*
++				 * LFS is set to find source section during GC.
++				 * The victim should have no checkpointed data.
++				 */
++				if (get_ckpt_valid_blocks(sbi, segno, true))
++					goto next;
++			} else {
++				/*
++				 * SSR | AT_SSR are set to find target segment
++				 * for writes which can be full by checkpointed
++				 * and newly written blocks.
++				 */
++				if (!f2fs_segment_has_free_slot(sbi, segno))
++					goto next;
++			}
++		}
++
+ 		if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
+ 			goto next;
+ 
+@@ -1354,7 +1366,8 @@ out:
+  * the victim data block is ignored.
+  */
+ static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
+-		struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
++		struct gc_inode_list *gc_list, unsigned int segno, int gc_type,
++		bool force_migrate)
+ {
+ 	struct super_block *sb = sbi->sb;
+ 	struct f2fs_summary *entry;
+@@ -1383,8 +1396,8 @@ next_step:
+ 		 * race condition along with SSR block allocation.
+ 		 */
+ 		if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
+-				get_valid_blocks(sbi, segno, true) ==
+-							BLKS_PER_SEC(sbi))
++			(!force_migrate && get_valid_blocks(sbi, segno, true) ==
++							BLKS_PER_SEC(sbi)))
+ 			return submitted;
+ 
+ 		if (check_valid_map(sbi, segno, off) == 0)
+@@ -1519,7 +1532,8 @@ static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
+ 
+ static int do_garbage_collect(struct f2fs_sb_info *sbi,
+ 				unsigned int start_segno,
+-				struct gc_inode_list *gc_list, int gc_type)
++				struct gc_inode_list *gc_list, int gc_type,
++				bool force_migrate)
+ {
+ 	struct page *sum_page;
+ 	struct f2fs_summary_block *sum;
+@@ -1606,7 +1620,8 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
+ 								gc_type);
+ 		else
+ 			submitted += gc_data_segment(sbi, sum->entries, gc_list,
+-							segno, gc_type);
++							segno, gc_type,
++							force_migrate);
+ 
+ 		stat_inc_seg_count(sbi, type, gc_type);
+ 		migrated++;
+@@ -1634,7 +1649,7 @@ skip:
+ }
+ 
+ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
+-			bool background, unsigned int segno)
++			bool background, bool force, unsigned int segno)
+ {
+ 	int gc_type = sync ? FG_GC : BG_GC;
+ 	int sec_freed = 0, seg_freed = 0, total_freed = 0;
+@@ -1696,7 +1711,7 @@ gc_more:
+ 	if (ret)
+ 		goto stop;
+ 
+-	seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type);
++	seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type, force);
+ 	if (gc_type == FG_GC &&
+ 		seg_freed == f2fs_usable_segs_in_sec(sbi, segno))
+ 		sec_freed++;
+@@ -1835,7 +1850,7 @@ static int free_segment_range(struct f2fs_sb_info *sbi,
+ 			.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
+ 		};
+ 
+-		do_garbage_collect(sbi, segno, &gc_list, FG_GC);
++		do_garbage_collect(sbi, segno, &gc_list, FG_GC, true);
+ 		put_gc_inode(&gc_list);
+ 
+ 		if (!gc_only && get_valid_blocks(sbi, segno, true)) {
+@@ -1974,7 +1989,20 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
+ 
+ 	/* stop CP to protect MAIN_SEC in free_segment_range */
+ 	f2fs_lock_op(sbi);
++
++	spin_lock(&sbi->stat_lock);
++	if (shrunk_blocks + valid_user_blocks(sbi) +
++		sbi->current_reserved_blocks + sbi->unusable_block_count +
++		F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
++		err = -ENOSPC;
++	spin_unlock(&sbi->stat_lock);
++
++	if (err)
++		goto out_unlock;
++
+ 	err = free_segment_range(sbi, secs, true);
++
++out_unlock:
+ 	f2fs_unlock_op(sbi);
+ 	up_write(&sbi->gc_lock);
+ 	if (err)
+diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
+index 993caefcd2bb0..92652ca7a7c8b 100644
+--- a/fs/f2fs/inline.c
++++ b/fs/f2fs/inline.c
+@@ -219,7 +219,8 @@ out:
+ 
+ 	f2fs_put_page(page, 1);
+ 
+-	f2fs_balance_fs(sbi, dn.node_changed);
++	if (!err)
++		f2fs_balance_fs(sbi, dn.node_changed);
+ 
+ 	return err;
+ }
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
+index c2866561263e9..77456d228f2a1 100644
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -324,23 +324,27 @@ void f2fs_drop_inmem_pages(struct inode *inode)
+ 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ 	struct f2fs_inode_info *fi = F2FS_I(inode);
+ 
+-	while (!list_empty(&fi->inmem_pages)) {
++	do {
+ 		mutex_lock(&fi->inmem_lock);
++		if (list_empty(&fi->inmem_pages)) {
++			fi->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
++
++			spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
++			if (!list_empty(&fi->inmem_ilist))
++				list_del_init(&fi->inmem_ilist);
++			if (f2fs_is_atomic_file(inode)) {
++				clear_inode_flag(inode, FI_ATOMIC_FILE);
++				sbi->atomic_files--;
++			}
++			spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
++
++			mutex_unlock(&fi->inmem_lock);
++			break;
++		}
+ 		__revoke_inmem_pages(inode, &fi->inmem_pages,
+ 						true, false, true);
+ 		mutex_unlock(&fi->inmem_lock);
+-	}
+-
+-	fi->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
+-
+-	spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
+-	if (!list_empty(&fi->inmem_ilist))
+-		list_del_init(&fi->inmem_ilist);
+-	if (f2fs_is_atomic_file(inode)) {
+-		clear_inode_flag(inode, FI_ATOMIC_FILE);
+-		sbi->atomic_files--;
+-	}
+-	spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
++	} while (1);
+ }
+ 
+ void f2fs_drop_inmem_page(struct inode *inode, struct page *page)
+@@ -504,7 +508,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
+ 	 */
+ 	if (has_not_enough_free_secs(sbi, 0, 0)) {
+ 		down_write(&sbi->gc_lock);
+-		f2fs_gc(sbi, false, false, NULL_SEGNO);
++		f2fs_gc(sbi, false, false, false, NULL_SEGNO);
+ 	}
+ }
+ 
+@@ -861,7 +865,7 @@ static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
+ 	mutex_lock(&dirty_i->seglist_lock);
+ 
+ 	valid_blocks = get_valid_blocks(sbi, segno, false);
+-	ckpt_valid_blocks = get_ckpt_valid_blocks(sbi, segno);
++	ckpt_valid_blocks = get_ckpt_valid_blocks(sbi, segno, false);
+ 
+ 	if (valid_blocks == 0 && (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) ||
+ 		ckpt_valid_blocks == usable_blocks)) {
+@@ -946,7 +950,7 @@ static unsigned int get_free_segment(struct f2fs_sb_info *sbi)
+ 	for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
+ 		if (get_valid_blocks(sbi, segno, false))
+ 			continue;
+-		if (get_ckpt_valid_blocks(sbi, segno))
++		if (get_ckpt_valid_blocks(sbi, segno, false))
+ 			continue;
+ 		mutex_unlock(&dirty_i->seglist_lock);
+ 		return segno;
+@@ -2636,6 +2640,23 @@ static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
+ 		seg->next_blkoff++;
+ }
+ 
++bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno)
++{
++	struct seg_entry *se = get_seg_entry(sbi, segno);
++	int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
++	unsigned long *target_map = SIT_I(sbi)->tmp_map;
++	unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
++	unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
++	int i, pos;
++
++	for (i = 0; i < entries; i++)
++		target_map[i] = ckpt_map[i] | cur_map[i];
++
++	pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, 0);
++
++	return pos < sbi->blocks_per_seg;
++}
++
+ /*
+  * This function always allocates a used segment(from dirty seglist) by SSR
+  * manner, so it should recover the existing segment information of valid blocks
+@@ -2893,7 +2914,8 @@ unlock:
+ 	up_read(&SM_I(sbi)->curseg_lock);
+ }
+ 
+-static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type)
++static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type,
++								bool new_sec)
+ {
+ 	struct curseg_info *curseg = CURSEG_I(sbi, type);
+ 	unsigned int old_segno;
+@@ -2901,32 +2923,42 @@ static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type)
+ 	if (!curseg->inited)
+ 		goto alloc;
+ 
+-	if (!curseg->next_blkoff &&
+-		!get_valid_blocks(sbi, curseg->segno, false) &&
+-		!get_ckpt_valid_blocks(sbi, curseg->segno))
+-		return;
++	if (curseg->next_blkoff ||
++		get_valid_blocks(sbi, curseg->segno, new_sec))
++		goto alloc;
+ 
++	if (!get_ckpt_valid_blocks(sbi, curseg->segno, new_sec))
++		return;
+ alloc:
+ 	old_segno = curseg->segno;
+ 	SIT_I(sbi)->s_ops->allocate_segment(sbi, type, true);
+ 	locate_dirty_segment(sbi, old_segno);
+ }
+ 
+-void f2fs_allocate_new_segment(struct f2fs_sb_info *sbi, int type)
++static void __allocate_new_section(struct f2fs_sb_info *sbi, int type)
+ {
++	__allocate_new_segment(sbi, type, true);
++}
++
++void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type)
++{
++	down_read(&SM_I(sbi)->curseg_lock);
+ 	down_write(&SIT_I(sbi)->sentry_lock);
+-	__allocate_new_segment(sbi, type);
++	__allocate_new_section(sbi, type);
+ 	up_write(&SIT_I(sbi)->sentry_lock);
++	up_read(&SM_I(sbi)->curseg_lock);
+ }
+ 
+ void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi)
+ {
+ 	int i;
+ 
++	down_read(&SM_I(sbi)->curseg_lock);
+ 	down_write(&SIT_I(sbi)->sentry_lock);
+ 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++)
+-		__allocate_new_segment(sbi, i);
++		__allocate_new_segment(sbi, i, false);
+ 	up_write(&SIT_I(sbi)->sentry_lock);
++	up_read(&SM_I(sbi)->curseg_lock);
+ }
+ 
+ static const struct segment_allocation default_salloc_ops = {
+@@ -3365,12 +3397,12 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
+ 		f2fs_inode_chksum_set(sbi, page);
+ 	}
+ 
+-	if (F2FS_IO_ALIGNED(sbi))
+-		fio->retry = false;
+-
+ 	if (fio) {
+ 		struct f2fs_bio_info *io;
+ 
++		if (F2FS_IO_ALIGNED(sbi))
++			fio->retry = false;
++
+ 		INIT_LIST_HEAD(&fio->list);
+ 		fio->in_list = true;
+ 		io = sbi->write_io[fio->type] + fio->temp;
+diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
+index e9a7a637d6887..afb175739de5e 100644
+--- a/fs/f2fs/segment.h
++++ b/fs/f2fs/segment.h
+@@ -361,8 +361,20 @@ static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi,
+ }
+ 
+ static inline unsigned int get_ckpt_valid_blocks(struct f2fs_sb_info *sbi,
+-				unsigned int segno)
++				unsigned int segno, bool use_section)
+ {
++	if (use_section && __is_large_section(sbi)) {
++		unsigned int start_segno = START_SEGNO(segno);
++		unsigned int blocks = 0;
++		int i;
++
++		for (i = 0; i < sbi->segs_per_sec; i++, start_segno++) {
++			struct seg_entry *se = get_seg_entry(sbi, start_segno);
++
++			blocks += se->ckpt_valid_blocks;
++		}
++		return blocks;
++	}
+ 	return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
+ }
+ 
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index 82592b19b4e02..d852d96773a32 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -1865,7 +1865,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
+ 
+ 	while (!f2fs_time_over(sbi, DISABLE_TIME)) {
+ 		down_write(&sbi->gc_lock);
+-		err = f2fs_gc(sbi, true, false, NULL_SEGNO);
++		err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
+ 		if (err == -ENODATA) {
+ 			err = 0;
+ 			break;
+@@ -3929,10 +3929,18 @@ try_onemore:
+ 		 * previous checkpoint was not done by clean system shutdown.
+ 		 */
+ 		if (f2fs_hw_is_readonly(sbi)) {
+-			if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG))
+-				f2fs_err(sbi, "Need to recover fsync data, but write access unavailable");
+-			else
+-				f2fs_info(sbi, "write access unavailable, skipping recovery");
++			if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
++				err = f2fs_recover_fsync_data(sbi, true);
++				if (err > 0) {
++					err = -EROFS;
++					f2fs_err(sbi, "Need to recover fsync data, but "
++						"write access unavailable, please try "
++						"mount w/ disable_roll_forward or norecovery");
++				}
++				if (err < 0)
++					goto free_meta;
++			}
++			f2fs_info(sbi, "write access unavailable, skipping recovery");
+ 			goto reset_checkpoint;
+ 		}
+ 
+diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
+index 45082269e6982..a37528b51798b 100644
+--- a/fs/fuse/cuse.c
++++ b/fs/fuse/cuse.c
+@@ -627,6 +627,8 @@ static int __init cuse_init(void)
+ 	cuse_channel_fops.owner		= THIS_MODULE;
+ 	cuse_channel_fops.open		= cuse_channel_open;
+ 	cuse_channel_fops.release	= cuse_channel_release;
++	/* CUSE is not prepared for FUSE_DEV_IOC_CLONE */
++	cuse_channel_fops.unlocked_ioctl	= NULL;
+ 
+ 	cuse_class = class_create(THIS_MODULE, "cuse");
+ 	if (IS_ERR(cuse_class))
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index eff4abaa87da0..6e6d1e5998691 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -1776,8 +1776,17 @@ static void fuse_writepage_end(struct fuse_mount *fm, struct fuse_args *args,
+ 		container_of(args, typeof(*wpa), ia.ap.args);
+ 	struct inode *inode = wpa->inode;
+ 	struct fuse_inode *fi = get_fuse_inode(inode);
++	struct fuse_conn *fc = get_fuse_conn(inode);
+ 
+ 	mapping_set_error(inode->i_mapping, error);
++	/*
++	 * A writeback finished and this might have updated mtime/ctime on
++	 * server making local mtime/ctime stale.  Hence invalidate attrs.
++	 * Do this only if writeback_cache is not enabled.  If writeback_cache
++	 * is enabled, we trust local ctime/mtime.
++	 */
++	if (!fc->writeback_cache)
++		fuse_invalidate_attr(inode);
+ 	spin_lock(&fi->lock);
+ 	rb_erase(&wpa->writepages_entry, &fi->writepages);
+ 	while (wpa->next) {
+diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c
+index 1e5affed158e9..005209b1cd50e 100644
+--- a/fs/fuse/virtio_fs.c
++++ b/fs/fuse/virtio_fs.c
+@@ -1437,8 +1437,7 @@ static int virtio_fs_get_tree(struct fs_context *fsc)
+ 	if (!fm)
+ 		goto out_err;
+ 
+-	fuse_conn_init(fc, fm, get_user_ns(current_user_ns()),
+-		       &virtio_fs_fiq_ops, fs);
++	fuse_conn_init(fc, fm, fsc->user_ns, &virtio_fs_fiq_ops, fs);
+ 	fc->release = fuse_free_conn;
+ 	fc->delete_stale = true;
+ 	fc->auto_submounts = true;
+diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
+index a930ddd156819..7054a542689f9 100644
+--- a/fs/hfsplus/extents.c
++++ b/fs/hfsplus/extents.c
+@@ -598,13 +598,15 @@ void hfsplus_file_truncate(struct inode *inode)
+ 		res = __hfsplus_ext_cache_extent(&fd, inode, alloc_cnt);
+ 		if (res)
+ 			break;
+-		hfs_brec_remove(&fd);
+ 
+-		mutex_unlock(&fd.tree->tree_lock);
+ 		start = hip->cached_start;
++		if (blk_cnt <= start)
++			hfs_brec_remove(&fd);
++		mutex_unlock(&fd.tree->tree_lock);
+ 		hfsplus_free_extents(sb, hip->cached_extents,
+ 				     alloc_cnt - start, alloc_cnt - blk_cnt);
+ 		hfsplus_dump_extent(hip->cached_extents);
++		mutex_lock(&fd.tree->tree_lock);
+ 		if (blk_cnt > start) {
+ 			hip->extent_state |= HFSPLUS_EXT_DIRTY;
+ 			break;
+@@ -612,7 +614,6 @@ void hfsplus_file_truncate(struct inode *inode)
+ 		alloc_cnt = start;
+ 		hip->cached_start = hip->cached_blocks = 0;
+ 		hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
+-		mutex_lock(&fd.tree->tree_lock);
+ 	}
+ 	hfs_find_exit(&fd);
+ 
+diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
+index 701c82c361383..99df69b848224 100644
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -131,6 +131,7 @@ static void huge_pagevec_release(struct pagevec *pvec)
+ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
+ {
+ 	struct inode *inode = file_inode(file);
++	struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
+ 	loff_t len, vma_len;
+ 	int ret;
+ 	struct hstate *h = hstate_file(file);
+@@ -146,6 +147,10 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
+ 	vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
+ 	vma->vm_ops = &hugetlb_vm_ops;
+ 
++	ret = seal_check_future_write(info->seals, vma);
++	if (ret)
++		return ret;
++
+ 	/*
+ 	 * page based offset in vm_pgoff could be sufficiently large to
+ 	 * overflow a loff_t when converted to byte offset.  This can
+diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
+index 69f18fe209236..d47a0d96bf309 100644
+--- a/fs/jbd2/recovery.c
++++ b/fs/jbd2/recovery.c
+@@ -245,15 +245,14 @@ static int fc_do_one_pass(journal_t *journal,
+ 		return 0;
+ 
+ 	while (next_fc_block <= journal->j_fc_last) {
+-		jbd_debug(3, "Fast commit replay: next block %ld",
++		jbd_debug(3, "Fast commit replay: next block %ld\n",
+ 			  next_fc_block);
+ 		err = jread(&bh, journal, next_fc_block);
+ 		if (err) {
+-			jbd_debug(3, "Fast commit replay: read error");
++			jbd_debug(3, "Fast commit replay: read error\n");
+ 			break;
+ 		}
+ 
+-		jbd_debug(3, "Processing fast commit blk with seq %d");
+ 		err = journal->j_fc_replay_callback(journal, bh, pass,
+ 					next_fc_block - journal->j_fc_first,
+ 					expected_commit_id);
+diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
+index f7786e00a6a7f..ed9d580826f5a 100644
+--- a/fs/nfs/callback_proc.c
++++ b/fs/nfs/callback_proc.c
+@@ -137,12 +137,12 @@ static struct inode *nfs_layout_find_inode_by_stateid(struct nfs_client *clp,
+ 		list_for_each_entry_rcu(lo, &server->layouts, plh_layouts) {
+ 			if (!pnfs_layout_is_valid(lo))
+ 				continue;
+-			if (stateid != NULL &&
+-			    !nfs4_stateid_match_other(stateid, &lo->plh_stateid))
++			if (!nfs4_stateid_match_other(stateid, &lo->plh_stateid))
+ 				continue;
+-			if (!nfs_sb_active(server->super))
+-				continue;
+-			inode = igrab(lo->plh_inode);
++			if (nfs_sb_active(server->super))
++				inode = igrab(lo->plh_inode);
++			else
++				inode = ERR_PTR(-EAGAIN);
+ 			rcu_read_unlock();
+ 			if (inode)
+ 				return inode;
+@@ -176,9 +176,10 @@ static struct inode *nfs_layout_find_inode_by_fh(struct nfs_client *clp,
+ 				continue;
+ 			if (nfsi->layout != lo)
+ 				continue;
+-			if (!nfs_sb_active(server->super))
+-				continue;
+-			inode = igrab(lo->plh_inode);
++			if (nfs_sb_active(server->super))
++				inode = igrab(lo->plh_inode);
++			else
++				inode = ERR_PTR(-EAGAIN);
+ 			rcu_read_unlock();
+ 			if (inode)
+ 				return inode;
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index fc4f490f2d785..0cd7c59a6601c 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -866,6 +866,8 @@ static int nfs_readdir_xdr_to_array(struct nfs_readdir_descriptor *desc,
+ 			break;
+ 		}
+ 
++		verf_arg = verf_res;
++
+ 		status = nfs_readdir_page_filler(desc, entry, pages, pglen,
+ 						 arrays, narrays);
+ 	} while (!status && nfs_readdir_page_needs_filling(page));
+@@ -927,7 +929,12 @@ static int find_and_lock_cache_page(struct nfs_readdir_descriptor *desc)
+ 			}
+ 			return res;
+ 		}
+-		memcpy(nfsi->cookieverf, verf, sizeof(nfsi->cookieverf));
++		/*
++		 * Set the cookie verifier if the page cache was empty
++		 */
++		if (desc->page_index == 0)
++			memcpy(nfsi->cookieverf, verf,
++			       sizeof(nfsi->cookieverf));
+ 	}
+ 	res = nfs_readdir_search_array(desc);
+ 	if (res == 0) {
+@@ -974,10 +981,10 @@ static int readdir_search_pagecache(struct nfs_readdir_descriptor *desc)
+ /*
+  * Once we've found the start of the dirent within a page: fill 'er up...
+  */
+-static void nfs_do_filldir(struct nfs_readdir_descriptor *desc)
++static void nfs_do_filldir(struct nfs_readdir_descriptor *desc,
++			   const __be32 *verf)
+ {
+ 	struct file	*file = desc->file;
+-	struct nfs_inode *nfsi = NFS_I(file_inode(file));
+ 	struct nfs_cache_array *array;
+ 	unsigned int i = 0;
+ 
+@@ -991,7 +998,7 @@ static void nfs_do_filldir(struct nfs_readdir_descriptor *desc)
+ 			desc->eof = true;
+ 			break;
+ 		}
+-		memcpy(desc->verf, nfsi->cookieverf, sizeof(desc->verf));
++		memcpy(desc->verf, verf, sizeof(desc->verf));
+ 		if (i < (array->size-1))
+ 			desc->dir_cookie = array->array[i+1].cookie;
+ 		else
+@@ -1048,7 +1055,7 @@ static int uncached_readdir(struct nfs_readdir_descriptor *desc)
+ 
+ 	for (i = 0; !desc->eof && i < sz && arrays[i]; i++) {
+ 		desc->page = arrays[i];
+-		nfs_do_filldir(desc);
++		nfs_do_filldir(desc, verf);
+ 	}
+ 	desc->page = NULL;
+ 
+@@ -1069,6 +1076,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
+ {
+ 	struct dentry	*dentry = file_dentry(file);
+ 	struct inode	*inode = d_inode(dentry);
++	struct nfs_inode *nfsi = NFS_I(inode);
+ 	struct nfs_open_dir_context *dir_ctx = file->private_data;
+ 	struct nfs_readdir_descriptor *desc;
+ 	int res;
+@@ -1122,7 +1130,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
+ 			break;
+ 		}
+ 		if (res == -ETOOSMALL && desc->plus) {
+-			clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags);
++			clear_bit(NFS_INO_ADVISE_RDPLUS, &nfsi->flags);
+ 			nfs_zap_caches(inode);
+ 			desc->page_index = 0;
+ 			desc->plus = false;
+@@ -1132,7 +1140,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
+ 		if (res < 0)
+ 			break;
+ 
+-		nfs_do_filldir(desc);
++		nfs_do_filldir(desc, nfsi->cookieverf);
+ 		nfs_readdir_page_unlock_and_put_cached(desc);
+ 	} while (!desc->eof);
+ 
+diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
+index 872112bffcab2..d383de00d4868 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayout.c
++++ b/fs/nfs/flexfilelayout/flexfilelayout.c
+@@ -106,7 +106,7 @@ static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
+ 	if (unlikely(!p))
+ 		return -ENOBUFS;
+ 	fh->size = be32_to_cpup(p++);
+-	if (fh->size > sizeof(struct nfs_fh)) {
++	if (fh->size > NFS_MAXFHSIZE) {
+ 		printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
+ 		       fh->size);
+ 		return -EOVERFLOW;
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index a7fb076a5f44b..7cfeee3eeef7f 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -1662,10 +1662,10 @@ EXPORT_SYMBOL_GPL(_nfs_display_fhandle);
+  */
+ static int nfs_inode_attrs_need_update(const struct inode *inode, const struct nfs_fattr *fattr)
+ {
+-	const struct nfs_inode *nfsi = NFS_I(inode);
++	unsigned long attr_gencount = NFS_I(inode)->attr_gencount;
+ 
+-	return ((long)fattr->gencount - (long)nfsi->attr_gencount) > 0 ||
+-		((long)nfsi->attr_gencount - (long)nfs_read_attr_generation_counter() > 0);
++	return (long)(fattr->gencount - attr_gencount) > 0 ||
++	       (long)(attr_gencount - nfs_read_attr_generation_counter()) > 0;
+ }
+ 
+ static int nfs_refresh_inode_locked(struct inode *inode, struct nfs_fattr *fattr)
+@@ -2094,7 +2094,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+ 			nfsi->attrtimeo_timestamp = now;
+ 		}
+ 		/* Set the barrier to be more recent than this fattr */
+-		if ((long)fattr->gencount - (long)nfsi->attr_gencount > 0)
++		if ((long)(fattr->gencount - nfsi->attr_gencount) > 0)
+ 			nfsi->attr_gencount = fattr->gencount;
+ 	}
+ 
+diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
+index 094024b0aca19..3875120ef3ef0 100644
+--- a/fs/nfs/nfs42proc.c
++++ b/fs/nfs/nfs42proc.c
+@@ -46,11 +46,12 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
+ {
+ 	struct inode *inode = file_inode(filep);
+ 	struct nfs_server *server = NFS_SERVER(inode);
++	u32 bitmask[3];
+ 	struct nfs42_falloc_args args = {
+ 		.falloc_fh	= NFS_FH(inode),
+ 		.falloc_offset	= offset,
+ 		.falloc_length	= len,
+-		.falloc_bitmask	= nfs4_fattr_bitmap,
++		.falloc_bitmask	= bitmask,
+ 	};
+ 	struct nfs42_falloc_res res = {
+ 		.falloc_server	= server,
+@@ -68,6 +69,10 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
+ 		return status;
+ 	}
+ 
++	memcpy(bitmask, server->cache_consistency_bitmask, sizeof(bitmask));
++	if (server->attr_bitmask[1] & FATTR4_WORD1_SPACE_USED)
++		bitmask[1] |= FATTR4_WORD1_SPACE_USED;
++
+ 	res.falloc_fattr = nfs_alloc_fattr();
+ 	if (!res.falloc_fattr)
+ 		return -ENOMEM;
+@@ -75,7 +80,8 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
+ 	status = nfs4_call_sync(server->client, server, msg,
+ 				&args.seq_args, &res.seq_res, 0);
+ 	if (status == 0)
+-		status = nfs_post_op_update_inode(inode, res.falloc_fattr);
++		status = nfs_post_op_update_inode_force_wcc(inode,
++							    res.falloc_fattr);
+ 
+ 	kfree(res.falloc_fattr);
+ 	return status;
+@@ -84,7 +90,8 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
+ static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
+ 				loff_t offset, loff_t len)
+ {
+-	struct nfs_server *server = NFS_SERVER(file_inode(filep));
++	struct inode *inode = file_inode(filep);
++	struct nfs_server *server = NFS_SERVER(inode);
+ 	struct nfs4_exception exception = { };
+ 	struct nfs_lock_context *lock;
+ 	int err;
+@@ -93,9 +100,13 @@ static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
+ 	if (IS_ERR(lock))
+ 		return PTR_ERR(lock);
+ 
+-	exception.inode = file_inode(filep);
++	exception.inode = inode;
+ 	exception.state = lock->open_context->state;
+ 
++	err = nfs_sync_inode(inode);
++	if (err)
++		goto out;
++
+ 	do {
+ 		err = _nfs42_proc_fallocate(msg, filep, lock, offset, len);
+ 		if (err == -ENOTSUPP) {
+@@ -104,7 +115,7 @@ static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
+ 		}
+ 		err = nfs4_handle_exception(server, err, &exception);
+ 	} while (exception.retry);
+-
++out:
+ 	nfs_put_lock_context(lock);
+ 	return err;
+ }
+@@ -142,16 +153,13 @@ int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len)
+ 		return -EOPNOTSUPP;
+ 
+ 	inode_lock(inode);
+-	err = nfs_sync_inode(inode);
+-	if (err)
+-		goto out_unlock;
+ 
+ 	err = nfs42_proc_fallocate(&msg, filep, offset, len);
+ 	if (err == 0)
+ 		truncate_pagecache_range(inode, offset, (offset + len) -1);
+ 	if (err == -EOPNOTSUPP)
+ 		NFS_SERVER(inode)->caps &= ~NFS_CAP_DEALLOCATE;
+-out_unlock:
++
+ 	inode_unlock(inode);
+ 	return err;
+ }
+@@ -261,6 +269,33 @@ out:
+ 	return status;
+ }
+ 
++/**
++ * nfs42_copy_dest_done - perform inode cache updates after clone/copy offload
++ * @inode: pointer to destination inode
++ * @pos: destination offset
++ * @len: copy length
++ *
++ * Punch a hole in the inode page cache, so that the NFS client will
++ * know to retrieve new data.
++ * Update the file size if necessary, and then mark the inode as having
++ * invalid cached values for change attribute, ctime, mtime and space used.
++ */
++static void nfs42_copy_dest_done(struct inode *inode, loff_t pos, loff_t len)
++{
++	loff_t newsize = pos + len;
++	loff_t end = newsize - 1;
++
++	truncate_pagecache_range(inode, pos, end);
++	spin_lock(&inode->i_lock);
++	if (newsize > i_size_read(inode))
++		i_size_write(inode, newsize);
++	nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE |
++					     NFS_INO_INVALID_CTIME |
++					     NFS_INO_INVALID_MTIME |
++					     NFS_INO_INVALID_BLOCKS);
++	spin_unlock(&inode->i_lock);
++}
++
+ static ssize_t _nfs42_proc_copy(struct file *src,
+ 				struct nfs_lock_context *src_lock,
+ 				struct file *dst,
+@@ -354,14 +389,8 @@ static ssize_t _nfs42_proc_copy(struct file *src,
+ 			goto out;
+ 	}
+ 
+-	truncate_pagecache_range(dst_inode, pos_dst,
+-				 pos_dst + res->write_res.count);
+-	spin_lock(&dst_inode->i_lock);
+-	nfs_set_cache_invalid(
+-		dst_inode, NFS_INO_REVAL_PAGECACHE | NFS_INO_REVAL_FORCED |
+-				   NFS_INO_INVALID_SIZE | NFS_INO_INVALID_ATTR |
+-				   NFS_INO_INVALID_DATA);
+-	spin_unlock(&dst_inode->i_lock);
++	nfs42_copy_dest_done(dst_inode, pos_dst, res->write_res.count);
++
+ 	spin_lock(&src_inode->i_lock);
+ 	nfs_set_cache_invalid(src_inode, NFS_INO_REVAL_PAGECACHE |
+ 						 NFS_INO_REVAL_FORCED |
+@@ -659,7 +688,10 @@ static loff_t _nfs42_proc_llseek(struct file *filep,
+ 	if (status)
+ 		return status;
+ 
+-	return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes);
++	if (whence == SEEK_DATA && res.sr_eof)
++		return -NFS4ERR_NXIO;
++	else
++		return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes);
+ }
+ 
+ loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
+@@ -1044,8 +1076,10 @@ static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f,
+ 
+ 	status = nfs4_call_sync(server->client, server, msg,
+ 				&args.seq_args, &res.seq_res, 0);
+-	if (status == 0)
++	if (status == 0) {
++		nfs42_copy_dest_done(dst_inode, dst_offset, count);
+ 		status = nfs_post_op_update_inode(dst_inode, res.dst_fattr);
++	}
+ 
+ 	kfree(res.dst_fattr);
+ 	return status;
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index c65c4b41e2c19..820abae88cf04 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -108,9 +108,10 @@ static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *,
+ static int nfs41_free_stateid(struct nfs_server *, const nfs4_stateid *,
+ 		const struct cred *, bool);
+ #endif
+-static void nfs4_bitmask_adjust(__u32 *bitmask, struct inode *inode,
+-		struct nfs_server *server,
+-		struct nfs4_label *label);
++static void nfs4_bitmask_set(__u32 bitmask[NFS4_BITMASK_SZ],
++			     const __u32 *src, struct inode *inode,
++			     struct nfs_server *server,
++			     struct nfs4_label *label);
+ 
+ #ifdef CONFIG_NFS_V4_SECURITY_LABEL
+ static inline struct nfs4_label *
+@@ -3591,6 +3592,7 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
+ 	struct nfs4_closedata *calldata = data;
+ 	struct nfs4_state *state = calldata->state;
+ 	struct inode *inode = calldata->inode;
++	struct nfs_server *server = NFS_SERVER(inode);
+ 	struct pnfs_layout_hdr *lo;
+ 	bool is_rdonly, is_wronly, is_rdwr;
+ 	int call_close = 0;
+@@ -3647,8 +3649,10 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
+ 	if (calldata->arg.fmode == 0 || calldata->arg.fmode == FMODE_READ) {
+ 		/* Close-to-open cache consistency revalidation */
+ 		if (!nfs4_have_delegation(inode, FMODE_READ)) {
+-			calldata->arg.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
+-			nfs4_bitmask_adjust(calldata->arg.bitmask, inode, NFS_SERVER(inode), NULL);
++			nfs4_bitmask_set(calldata->arg.bitmask_store,
++					 server->cache_consistency_bitmask,
++					 inode, server, NULL);
++			calldata->arg.bitmask = calldata->arg.bitmask_store;
+ 		} else
+ 			calldata->arg.bitmask = NULL;
+ 	}
+@@ -5416,19 +5420,17 @@ bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr)
+ 	return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0;
+ }
+ 
+-static void nfs4_bitmask_adjust(__u32 *bitmask, struct inode *inode,
+-				struct nfs_server *server,
+-				struct nfs4_label *label)
++static void nfs4_bitmask_set(__u32 bitmask[NFS4_BITMASK_SZ], const __u32 *src,
++			     struct inode *inode, struct nfs_server *server,
++			     struct nfs4_label *label)
+ {
+-
+ 	unsigned long cache_validity = READ_ONCE(NFS_I(inode)->cache_validity);
++	unsigned int i;
+ 
+-	if ((cache_validity & NFS_INO_INVALID_DATA) ||
+-		(cache_validity & NFS_INO_REVAL_PAGECACHE) ||
+-		(cache_validity & NFS_INO_REVAL_FORCED) ||
+-		(cache_validity & NFS_INO_INVALID_OTHER))
+-		nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, label), inode);
++	memcpy(bitmask, src, sizeof(*bitmask) * NFS4_BITMASK_SZ);
+ 
++	if (cache_validity & (NFS_INO_INVALID_CHANGE | NFS_INO_REVAL_PAGECACHE))
++		bitmask[0] |= FATTR4_WORD0_CHANGE;
+ 	if (cache_validity & NFS_INO_INVALID_ATIME)
+ 		bitmask[1] |= FATTR4_WORD1_TIME_ACCESS;
+ 	if (cache_validity & NFS_INO_INVALID_OTHER)
+@@ -5437,16 +5439,22 @@ static void nfs4_bitmask_adjust(__u32 *bitmask, struct inode *inode,
+ 				FATTR4_WORD1_NUMLINKS;
+ 	if (label && label->len && cache_validity & NFS_INO_INVALID_LABEL)
+ 		bitmask[2] |= FATTR4_WORD2_SECURITY_LABEL;
+-	if (cache_validity & NFS_INO_INVALID_CHANGE)
+-		bitmask[0] |= FATTR4_WORD0_CHANGE;
+ 	if (cache_validity & NFS_INO_INVALID_CTIME)
+ 		bitmask[1] |= FATTR4_WORD1_TIME_METADATA;
+ 	if (cache_validity & NFS_INO_INVALID_MTIME)
+ 		bitmask[1] |= FATTR4_WORD1_TIME_MODIFY;
+-	if (cache_validity & NFS_INO_INVALID_SIZE)
+-		bitmask[0] |= FATTR4_WORD0_SIZE;
+ 	if (cache_validity & NFS_INO_INVALID_BLOCKS)
+ 		bitmask[1] |= FATTR4_WORD1_SPACE_USED;
++
++	if (nfs4_have_delegation(inode, FMODE_READ) &&
++	    !(cache_validity & NFS_INO_REVAL_FORCED))
++		bitmask[0] &= ~FATTR4_WORD0_SIZE;
++	else if (cache_validity &
++		 (NFS_INO_INVALID_SIZE | NFS_INO_REVAL_PAGECACHE))
++		bitmask[0] |= FATTR4_WORD0_SIZE;
++
++	for (i = 0; i < NFS4_BITMASK_SZ; i++)
++		bitmask[i] &= server->attr_bitmask[i];
+ }
+ 
+ static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
+@@ -5459,8 +5467,10 @@ static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
+ 		hdr->args.bitmask = NULL;
+ 		hdr->res.fattr = NULL;
+ 	} else {
+-		hdr->args.bitmask = server->cache_consistency_bitmask;
+-		nfs4_bitmask_adjust(hdr->args.bitmask, hdr->inode, server, NULL);
++		nfs4_bitmask_set(hdr->args.bitmask_store,
++				 server->cache_consistency_bitmask,
++				 hdr->inode, server, NULL);
++		hdr->args.bitmask = hdr->args.bitmask_store;
+ 	}
+ 
+ 	if (!hdr->pgio_done_cb)
+@@ -6502,8 +6512,10 @@ static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred,
+ 
+ 	data->args.fhandle = &data->fh;
+ 	data->args.stateid = &data->stateid;
+-	data->args.bitmask = server->cache_consistency_bitmask;
+-	nfs4_bitmask_adjust(data->args.bitmask, inode, server, NULL);
++	nfs4_bitmask_set(data->args.bitmask_store,
++			 server->cache_consistency_bitmask, inode, server,
++			 NULL);
++	data->args.bitmask = data->args.bitmask_store;
+ 	nfs_copy_fh(&data->fh, NFS_FH(inode));
+ 	nfs4_stateid_copy(&data->stateid, stateid);
+ 	data->res.fattr = &data->fattr;
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 97447a64bad0b..886e50ed07c20 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -4869,6 +4869,11 @@ static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
+ 	if (nf)
+ 		nfsd_file_put(nf);
+ 
++	status = nfserrno(nfsd_open_break_lease(cur_fh->fh_dentry->d_inode,
++								access));
++	if (status)
++		goto out_put_access;
++
+ 	status = nfsd4_truncate(rqstp, cur_fh, open);
+ 	if (status)
+ 		goto out_put_access;
+@@ -6849,11 +6854,20 @@ out:
+ static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
+ {
+ 	struct nfsd_file *nf;
+-	__be32 err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf);
+-	if (!err) {
+-		err = nfserrno(vfs_test_lock(nf->nf_file, lock));
+-		nfsd_file_put(nf);
+-	}
++	__be32 err;
++
++	err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf);
++	if (err)
++		return err;
++	fh_lock(fhp); /* to block new leases till after test_lock: */
++	err = nfserrno(nfsd_open_break_lease(fhp->fh_dentry->d_inode,
++							NFSD_MAY_READ));
++	if (err)
++		goto out;
++	err = nfserrno(vfs_test_lock(nf->nf_file, lock));
++out:
++	fh_unlock(fhp);
++	nfsd_file_put(nf);
+ 	return err;
+ }
+ 
+diff --git a/fs/proc/generic.c b/fs/proc/generic.c
+index bc86aa87cc41a..5600da30e289b 100644
+--- a/fs/proc/generic.c
++++ b/fs/proc/generic.c
+@@ -756,7 +756,7 @@ int remove_proc_subtree(const char *name, struct proc_dir_entry *parent)
+ 	while (1) {
+ 		next = pde_subdir_first(de);
+ 		if (next) {
+-			if (unlikely(pde_is_permanent(root))) {
++			if (unlikely(pde_is_permanent(next))) {
+ 				write_unlock(&proc_subdir_lock);
+ 				WARN(1, "removing permanent /proc entry '%s/%s'",
+ 					next->parent->name, next->name);
+diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
+index 7b1128398976e..89d492916deaf 100644
+--- a/fs/squashfs/file.c
++++ b/fs/squashfs/file.c
+@@ -211,11 +211,11 @@ failure:
+  * If the skip factor is limited in this way then the file will use multiple
+  * slots.
+  */
+-static inline int calculate_skip(int blocks)
++static inline int calculate_skip(u64 blocks)
+ {
+-	int skip = blocks / ((SQUASHFS_META_ENTRIES + 1)
++	u64 skip = blocks / ((SQUASHFS_META_ENTRIES + 1)
+ 		 * SQUASHFS_META_INDEXES);
+-	return min(SQUASHFS_CACHED_BLKS - 1, skip + 1);
++	return min((u64) SQUASHFS_CACHED_BLKS - 1, skip + 1);
+ }
+ 
+ 
+diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
+index f14adb8823381..cc7c3fda2aa6d 100644
+--- a/include/linux/cpuhotplug.h
++++ b/include/linux/cpuhotplug.h
+@@ -135,6 +135,7 @@ enum cpuhp_state {
+ 	CPUHP_AP_RISCV_TIMER_STARTING,
+ 	CPUHP_AP_CLINT_TIMER_STARTING,
+ 	CPUHP_AP_CSKY_TIMER_STARTING,
++	CPUHP_AP_TI_GP_TIMER_STARTING,
+ 	CPUHP_AP_HYPERV_TIMER_STARTING,
+ 	CPUHP_AP_KVM_STARTING,
+ 	CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING,
+diff --git a/include/linux/elevator.h b/include/linux/elevator.h
+index 1fe8e105b83bf..dcb2f9022c1df 100644
+--- a/include/linux/elevator.h
++++ b/include/linux/elevator.h
+@@ -34,7 +34,7 @@ struct elevator_mq_ops {
+ 	void (*depth_updated)(struct blk_mq_hw_ctx *);
+ 
+ 	bool (*allow_merge)(struct request_queue *, struct request *, struct bio *);
+-	bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *, unsigned int);
++	bool (*bio_merge)(struct request_queue *, struct bio *, unsigned int);
+ 	int (*request_merge)(struct request_queue *q, struct request **, struct bio *);
+ 	void (*request_merged)(struct request_queue *, struct request *, enum elv_merge);
+ 	void (*requests_merged)(struct request_queue *, struct request *, struct request *);
+diff --git a/include/linux/i2c.h b/include/linux/i2c.h
+index 56622658b2158..a670ae129f4b9 100644
+--- a/include/linux/i2c.h
++++ b/include/linux/i2c.h
+@@ -687,6 +687,8 @@ struct i2c_adapter_quirks {
+ #define I2C_AQ_NO_ZERO_LEN_READ		BIT(5)
+ #define I2C_AQ_NO_ZERO_LEN_WRITE	BIT(6)
+ #define I2C_AQ_NO_ZERO_LEN		(I2C_AQ_NO_ZERO_LEN_READ | I2C_AQ_NO_ZERO_LEN_WRITE)
++/* adapter cannot do repeated START */
++#define I2C_AQ_NO_REP_START		BIT(7)
+ 
+ /*
+  * i2c_adapter is the structure used to identify a physical i2c bus along
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index 8ba434287387b..6c1b29bb35636 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -3170,5 +3170,37 @@ extern int sysctl_nr_trim_pages;
+ 
+ void mem_dump_obj(void *object);
+ 
++/**
++ * seal_check_future_write - Check for F_SEAL_FUTURE_WRITE flag and handle it
++ * @seals: the seals to check
++ * @vma: the vma to operate on
++ *
++ * Check whether F_SEAL_FUTURE_WRITE is set; if so, do proper check/handling on
++ * the vma flags.  Return 0 if check pass, or <0 for errors.
++ */
++static inline int seal_check_future_write(int seals, struct vm_area_struct *vma)
++{
++	if (seals & F_SEAL_FUTURE_WRITE) {
++		/*
++		 * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
++		 * "future write" seal active.
++		 */
++		if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
++			return -EPERM;
++
++		/*
++		 * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as
++		 * MAP_SHARED and read-only, take care to not allow mprotect to
++		 * revert protections on such mappings. Do this only for shared
++		 * mappings. For private mappings, don't need to mask
++		 * VM_MAYWRITE as we still want them to be COW-writable.
++		 */
++		if (vma->vm_flags & VM_SHARED)
++			vma->vm_flags &= ~(VM_MAYWRITE);
++	}
++
++	return 0;
++}
++
+ #endif /* __KERNEL__ */
+ #endif /* _LINUX_MM_H */
+diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
+index 6613b26a88946..5aacc1c10a45a 100644
+--- a/include/linux/mm_types.h
++++ b/include/linux/mm_types.h
+@@ -97,10 +97,10 @@ struct page {
+ 		};
+ 		struct {	/* page_pool used by netstack */
+ 			/**
+-			 * @dma_addr: might require a 64-bit value even on
++			 * @dma_addr: might require a 64-bit value on
+ 			 * 32-bit architectures.
+ 			 */
+-			dma_addr_t dma_addr;
++			unsigned long dma_addr[2];
+ 		};
+ 		struct {	/* slab, slob and slub */
+ 			union {
+diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
+index 3327239fa2f9a..cc29dee508f74 100644
+--- a/include/linux/nfs_xdr.h
++++ b/include/linux/nfs_xdr.h
+@@ -15,6 +15,8 @@
+ #define NFS_DEF_FILE_IO_SIZE	(4096U)
+ #define NFS_MIN_FILE_IO_SIZE	(1024U)
+ 
++#define NFS_BITMASK_SZ		3
++
+ struct nfs4_string {
+ 	unsigned int len;
+ 	char *data;
+@@ -525,7 +527,8 @@ struct nfs_closeargs {
+ 	struct nfs_seqid *	seqid;
+ 	fmode_t			fmode;
+ 	u32			share_access;
+-	u32 *			bitmask;
++	const u32 *		bitmask;
++	u32			bitmask_store[NFS_BITMASK_SZ];
+ 	struct nfs4_layoutreturn_args *lr_args;
+ };
+ 
+@@ -608,7 +611,8 @@ struct nfs4_delegreturnargs {
+ 	struct nfs4_sequence_args	seq_args;
+ 	const struct nfs_fh *fhandle;
+ 	const nfs4_stateid *stateid;
+-	u32 * bitmask;
++	const u32 *bitmask;
++	u32 bitmask_store[NFS_BITMASK_SZ];
+ 	struct nfs4_layoutreturn_args *lr_args;
+ };
+ 
+@@ -648,7 +652,8 @@ struct nfs_pgio_args {
+ 	union {
+ 		unsigned int		replen;			/* used by read */
+ 		struct {
+-			u32 *			bitmask;	/* used by write */
++			const u32 *		bitmask;	/* used by write */
++			u32 bitmask_store[NFS_BITMASK_SZ];	/* used by write */
+ 			enum nfs3_stable_how	stable;		/* used by write */
+ 		};
+ 	};
+diff --git a/include/linux/phy.h b/include/linux/phy.h
+index 1a12e4436b5b0..8644b097dea30 100644
+--- a/include/linux/phy.h
++++ b/include/linux/phy.h
+@@ -493,6 +493,7 @@ struct macsec_ops;
+  * @loopback_enabled: Set true if this PHY has been loopbacked successfully.
+  * @downshifted_rate: Set true if link speed has been downshifted.
+  * @is_on_sfp_module: Set true if PHY is located on an SFP module.
++ * @mac_managed_pm: Set true if MAC driver takes of suspending/resuming PHY
+  * @state: State of the PHY for management purposes
+  * @dev_flags: Device-specific flags used by the PHY driver.
+  * @irq: IRQ number of the PHY's interrupt (-1 if none)
+@@ -567,6 +568,7 @@ struct phy_device {
+ 	unsigned loopback_enabled:1;
+ 	unsigned downshifted_rate:1;
+ 	unsigned is_on_sfp_module:1;
++	unsigned mac_managed_pm:1;
+ 
+ 	unsigned autoneg:1;
+ 	/* The most recently read link state */
+diff --git a/include/linux/pm.h b/include/linux/pm.h
+index 482313a8ccfc1..628718697679a 100644
+--- a/include/linux/pm.h
++++ b/include/linux/pm.h
+@@ -602,6 +602,7 @@ struct dev_pm_info {
+ 	unsigned int		idle_notification:1;
+ 	unsigned int		request_pending:1;
+ 	unsigned int		deferred_resume:1;
++	unsigned int		needs_force_resume:1;
+ 	unsigned int		runtime_auto:1;
+ 	bool			ignore_children:1;
+ 	unsigned int		no_callbacks:1;
+diff --git a/include/net/page_pool.h b/include/net/page_pool.h
+index b5b1953053468..e05744b9a1bc2 100644
+--- a/include/net/page_pool.h
++++ b/include/net/page_pool.h
+@@ -198,7 +198,17 @@ static inline void page_pool_recycle_direct(struct page_pool *pool,
+ 
+ static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
+ {
+-	return page->dma_addr;
++	dma_addr_t ret = page->dma_addr[0];
++	if (sizeof(dma_addr_t) > sizeof(unsigned long))
++		ret |= (dma_addr_t)page->dma_addr[1] << 16 << 16;
++	return ret;
++}
++
++static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
++{
++	page->dma_addr[0] = addr;
++	if (sizeof(dma_addr_t) > sizeof(unsigned long))
++		page->dma_addr[1] = upper_32_bits(addr);
+ }
+ 
+ static inline bool is_page_pool_compiled_in(void)
+diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
+index 036eb1f5c1335..2f01314de73a6 100644
+--- a/include/trace/events/sunrpc.h
++++ b/include/trace/events/sunrpc.h
+@@ -1141,7 +1141,6 @@ DECLARE_EVENT_CLASS(xprt_writelock_event,
+ 
+ DEFINE_WRITELOCK_EVENT(reserve_xprt);
+ DEFINE_WRITELOCK_EVENT(release_xprt);
+-DEFINE_WRITELOCK_EVENT(transmit_queued);
+ 
+ DECLARE_EVENT_CLASS(xprt_cong_event,
+ 	TP_PROTO(
+diff --git a/include/uapi/linux/netfilter/xt_SECMARK.h b/include/uapi/linux/netfilter/xt_SECMARK.h
+index 1f2a708413f5d..beb2cadba8a9c 100644
+--- a/include/uapi/linux/netfilter/xt_SECMARK.h
++++ b/include/uapi/linux/netfilter/xt_SECMARK.h
+@@ -20,4 +20,10 @@ struct xt_secmark_target_info {
+ 	char secctx[SECMARK_SECCTX_MAX];
+ };
+ 
++struct xt_secmark_target_info_v1 {
++	__u8 mode;
++	char secctx[SECMARK_SECCTX_MAX];
++	__u32 secid;
++};
++
+ #endif /*_XT_SECMARK_H_target */
+diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
+index c10e855a03bc1..fe4c01c14ab2c 100644
+--- a/kernel/dma/swiotlb.c
++++ b/kernel/dma/swiotlb.c
+@@ -608,7 +608,8 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
+ 		enum dma_data_direction dir, unsigned long attrs)
+ {
+ 	unsigned int offset = swiotlb_align_offset(dev, orig_addr);
+-	unsigned int index, i;
++	unsigned int i;
++	int index;
+ 	phys_addr_t tlb_addr;
+ 
+ 	if (no_iotlb_memory)
+diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
+index 5c3447cf7ad58..33400ff051a84 100644
+--- a/kernel/kexec_file.c
++++ b/kernel/kexec_file.c
+@@ -740,8 +740,10 @@ static int kexec_calculate_store_digests(struct kimage *image)
+ 
+ 	sha_region_sz = KEXEC_SEGMENT_MAX * sizeof(struct kexec_sha_region);
+ 	sha_regions = vzalloc(sha_region_sz);
+-	if (!sha_regions)
++	if (!sha_regions) {
++		ret = -ENOMEM;
+ 		goto out_free_desc;
++	}
+ 
+ 	desc->tfm   = tfm;
+ 
+diff --git a/kernel/resource.c b/kernel/resource.c
+index 627e61b0c1241..16e0c7e8ed241 100644
+--- a/kernel/resource.c
++++ b/kernel/resource.c
+@@ -457,7 +457,7 @@ int walk_system_ram_res(u64 start, u64 end, void *arg,
+ {
+ 	unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
+ 
+-	return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, true,
++	return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, false,
+ 				     arg, func);
+ }
+ 
+@@ -470,7 +470,7 @@ int walk_mem_res(u64 start, u64 end, void *arg,
+ {
+ 	unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ 
+-	return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, true,
++	return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, false,
+ 				     arg, func);
+ }
+ 
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 17ad829a114c8..814200541f8f5 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -928,7 +928,7 @@ DEFINE_STATIC_KEY_FALSE(sched_uclamp_used);
+ 
+ static inline unsigned int uclamp_bucket_id(unsigned int clamp_value)
+ {
+-	return clamp_value / UCLAMP_BUCKET_DELTA;
++	return min_t(unsigned int, clamp_value / UCLAMP_BUCKET_DELTA, UCLAMP_BUCKETS - 1);
+ }
+ 
+ static inline unsigned int uclamp_none(enum uclamp_id clamp_id)
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index d078767f677f4..a073a839cd065 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -6212,7 +6212,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool
+ 	}
+ 
+ 	if (has_idle_core)
+-		set_idle_cores(this, false);
++		set_idle_cores(target, false);
+ 
+ 	if (sched_feat(SIS_PROP) && !has_idle_core) {
+ 		time = cpu_clock(this) - time;
+@@ -10915,16 +10915,22 @@ static void propagate_entity_cfs_rq(struct sched_entity *se)
+ {
+ 	struct cfs_rq *cfs_rq;
+ 
++	list_add_leaf_cfs_rq(cfs_rq_of(se));
++
+ 	/* Start to propagate at parent */
+ 	se = se->parent;
+ 
+ 	for_each_sched_entity(se) {
+ 		cfs_rq = cfs_rq_of(se);
+ 
+-		if (cfs_rq_throttled(cfs_rq))
+-			break;
++		if (!cfs_rq_throttled(cfs_rq)){
++			update_load_avg(cfs_rq, se, UPDATE_TG);
++			list_add_leaf_cfs_rq(cfs_rq);
++			continue;
++		}
+ 
+-		update_load_avg(cfs_rq, se, UPDATE_TG);
++		if (list_add_leaf_cfs_rq(cfs_rq))
++			break;
+ 	}
+ }
+ #else
+diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
+index 4d94e2b5499d8..a7924fedf479e 100644
+--- a/kernel/time/alarmtimer.c
++++ b/kernel/time/alarmtimer.c
+@@ -92,7 +92,7 @@ static int alarmtimer_rtc_add_device(struct device *dev,
+ 	if (rtcdev)
+ 		return -EBUSY;
+ 
+-	if (!rtc->ops->set_alarm)
++	if (!test_bit(RTC_FEATURE_ALARM, rtc->features))
+ 		return -1;
+ 	if (!device_may_wakeup(rtc->dev.parent))
+ 		return -1;
+diff --git a/kernel/watchdog.c b/kernel/watchdog.c
+index 107bc38b19450..8cf0678378d29 100644
+--- a/kernel/watchdog.c
++++ b/kernel/watchdog.c
+@@ -154,7 +154,11 @@ static void lockup_detector_update_enable(void)
+ 
+ #ifdef CONFIG_SOFTLOCKUP_DETECTOR
+ 
+-#define SOFTLOCKUP_RESET	ULONG_MAX
++/*
++ * Delay the soflockup report when running a known slow code.
++ * It does _not_ affect the timestamp of the last successdul reschedule.
++ */
++#define SOFTLOCKUP_DELAY_REPORT	ULONG_MAX
+ 
+ #ifdef CONFIG_SMP
+ int __read_mostly sysctl_softlockup_all_cpu_backtrace;
+@@ -169,10 +173,12 @@ unsigned int __read_mostly softlockup_panic =
+ static bool softlockup_initialized __read_mostly;
+ static u64 __read_mostly sample_period;
+ 
++/* Timestamp taken after the last successful reschedule. */
+ static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
++/* Timestamp of the last softlockup report. */
++static DEFINE_PER_CPU(unsigned long, watchdog_report_ts);
+ static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
+ static DEFINE_PER_CPU(bool, softlockup_touch_sync);
+-static DEFINE_PER_CPU(bool, soft_watchdog_warn);
+ static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
+ static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
+ static unsigned long soft_lockup_nmi_warn;
+@@ -235,10 +241,16 @@ static void set_sample_period(void)
+ 	watchdog_update_hrtimer_threshold(sample_period);
+ }
+ 
++static void update_report_ts(void)
++{
++	__this_cpu_write(watchdog_report_ts, get_timestamp());
++}
++
+ /* Commands for resetting the watchdog */
+-static void __touch_watchdog(void)
++static void update_touch_ts(void)
+ {
+ 	__this_cpu_write(watchdog_touch_ts, get_timestamp());
++	update_report_ts();
+ }
+ 
+ /**
+@@ -252,10 +264,10 @@ static void __touch_watchdog(void)
+ notrace void touch_softlockup_watchdog_sched(void)
+ {
+ 	/*
+-	 * Preemption can be enabled.  It doesn't matter which CPU's timestamp
+-	 * gets zeroed here, so use the raw_ operation.
++	 * Preemption can be enabled.  It doesn't matter which CPU's watchdog
++	 * report period gets restarted here, so use the raw_ operation.
+ 	 */
+-	raw_cpu_write(watchdog_touch_ts, SOFTLOCKUP_RESET);
++	raw_cpu_write(watchdog_report_ts, SOFTLOCKUP_DELAY_REPORT);
+ }
+ 
+ notrace void touch_softlockup_watchdog(void)
+@@ -279,7 +291,7 @@ void touch_all_softlockup_watchdogs(void)
+ 	 * the softlockup check.
+ 	 */
+ 	for_each_cpu(cpu, &watchdog_allowed_mask) {
+-		per_cpu(watchdog_touch_ts, cpu) = SOFTLOCKUP_RESET;
++		per_cpu(watchdog_report_ts, cpu) = SOFTLOCKUP_DELAY_REPORT;
+ 		wq_watchdog_touch(cpu);
+ 	}
+ }
+@@ -287,16 +299,16 @@ void touch_all_softlockup_watchdogs(void)
+ void touch_softlockup_watchdog_sync(void)
+ {
+ 	__this_cpu_write(softlockup_touch_sync, true);
+-	__this_cpu_write(watchdog_touch_ts, SOFTLOCKUP_RESET);
++	__this_cpu_write(watchdog_report_ts, SOFTLOCKUP_DELAY_REPORT);
+ }
+ 
+-static int is_softlockup(unsigned long touch_ts)
++static int is_softlockup(unsigned long touch_ts, unsigned long period_ts)
+ {
+ 	unsigned long now = get_timestamp();
+ 
+ 	if ((watchdog_enabled & SOFT_WATCHDOG_ENABLED) && watchdog_thresh){
+ 		/* Warn about unreasonable delays. */
+-		if (time_after(now, touch_ts + get_softlockup_thresh()))
++		if (time_after(now, period_ts + get_softlockup_thresh()))
+ 			return now - touch_ts;
+ 	}
+ 	return 0;
+@@ -332,7 +344,7 @@ static DEFINE_PER_CPU(struct cpu_stop_work, softlockup_stop_work);
+  */
+ static int softlockup_fn(void *data)
+ {
+-	__touch_watchdog();
++	update_touch_ts();
+ 	complete(this_cpu_ptr(&softlockup_completion));
+ 
+ 	return 0;
+@@ -342,6 +354,7 @@ static int softlockup_fn(void *data)
+ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
+ {
+ 	unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
++	unsigned long period_ts = __this_cpu_read(watchdog_report_ts);
+ 	struct pt_regs *regs = get_irq_regs();
+ 	int duration;
+ 	int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
+@@ -363,7 +376,8 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
+ 	/* .. and repeat */
+ 	hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
+ 
+-	if (touch_ts == SOFTLOCKUP_RESET) {
++	/* Reset the interval when touched externally by a known slow code. */
++	if (period_ts == SOFTLOCKUP_DELAY_REPORT) {
+ 		if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
+ 			/*
+ 			 * If the time stamp was touched atomically
+@@ -375,7 +389,8 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
+ 
+ 		/* Clear the guest paused flag on watchdog reset */
+ 		kvm_check_and_clear_guest_paused();
+-		__touch_watchdog();
++		update_report_ts();
++
+ 		return HRTIMER_RESTART;
+ 	}
+ 
+@@ -385,7 +400,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
+ 	 * indicate it is getting cpu time.  If it hasn't then
+ 	 * this is a good indication some task is hogging the cpu
+ 	 */
+-	duration = is_softlockup(touch_ts);
++	duration = is_softlockup(touch_ts, period_ts);
+ 	if (unlikely(duration)) {
+ 		/*
+ 		 * If a virtual machine is stopped by the host it can look to
+@@ -395,21 +410,18 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
+ 		if (kvm_check_and_clear_guest_paused())
+ 			return HRTIMER_RESTART;
+ 
+-		/* only warn once */
+-		if (__this_cpu_read(soft_watchdog_warn) == true)
+-			return HRTIMER_RESTART;
+-
++		/*
++		 * Prevent multiple soft-lockup reports if one cpu is already
++		 * engaged in dumping all cpu back traces.
++		 */
+ 		if (softlockup_all_cpu_backtrace) {
+-			/* Prevent multiple soft-lockup reports if one cpu is already
+-			 * engaged in dumping cpu back traces
+-			 */
+-			if (test_and_set_bit(0, &soft_lockup_nmi_warn)) {
+-				/* Someone else will report us. Let's give up */
+-				__this_cpu_write(soft_watchdog_warn, true);
++			if (test_and_set_bit_lock(0, &soft_lockup_nmi_warn))
+ 				return HRTIMER_RESTART;
+-			}
+ 		}
+ 
++		/* Start period for the next softlockup warning. */
++		update_report_ts();
++
+ 		pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
+ 			smp_processor_id(), duration,
+ 			current->comm, task_pid_nr(current));
+@@ -421,22 +433,14 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
+ 			dump_stack();
+ 
+ 		if (softlockup_all_cpu_backtrace) {
+-			/* Avoid generating two back traces for current
+-			 * given that one is already made above
+-			 */
+ 			trigger_allbutself_cpu_backtrace();
+-
+-			clear_bit(0, &soft_lockup_nmi_warn);
+-			/* Barrier to sync with other cpus */
+-			smp_mb__after_atomic();
++			clear_bit_unlock(0, &soft_lockup_nmi_warn);
+ 		}
+ 
+ 		add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
+ 		if (softlockup_panic)
+ 			panic("softlockup: hung tasks");
+-		__this_cpu_write(soft_watchdog_warn, true);
+-	} else
+-		__this_cpu_write(soft_watchdog_warn, false);
++	}
+ 
+ 	return HRTIMER_RESTART;
+ }
+@@ -461,7 +465,7 @@ static void watchdog_enable(unsigned int cpu)
+ 		      HRTIMER_MODE_REL_PINNED_HARD);
+ 
+ 	/* Initialize timestamp */
+-	__touch_watchdog();
++	update_touch_ts();
+ 	/* Enable the perf event */
+ 	if (watchdog_enabled & NMI_WATCHDOG_ENABLED)
+ 		watchdog_nmi_enable(cpu);
+diff --git a/lib/Kconfig.kfence b/lib/Kconfig.kfence
+index 78f50ccb3b45f..e641add339475 100644
+--- a/lib/Kconfig.kfence
++++ b/lib/Kconfig.kfence
+@@ -7,6 +7,7 @@ menuconfig KFENCE
+ 	bool "KFENCE: low-overhead sampling-based memory safety error detector"
+ 	depends on HAVE_ARCH_KFENCE && (SLAB || SLUB)
+ 	select STACKTRACE
++	select IRQ_WORK
+ 	help
+ 	  KFENCE is a low-overhead sampling-based detector of heap out-of-bounds
+ 	  access, use-after-free, and invalid-free errors. KFENCE is designed
+diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
+index 7998affa45d49..c87d5b6a8a55a 100644
+--- a/lib/kobject_uevent.c
++++ b/lib/kobject_uevent.c
+@@ -251,12 +251,13 @@ static int kobj_usermode_filter(struct kobject *kobj)
+ 
+ static int init_uevent_argv(struct kobj_uevent_env *env, const char *subsystem)
+ {
++	int buffer_size = sizeof(env->buf) - env->buflen;
+ 	int len;
+ 
+-	len = strlcpy(&env->buf[env->buflen], subsystem,
+-		      sizeof(env->buf) - env->buflen);
+-	if (len >= (sizeof(env->buf) - env->buflen)) {
+-		WARN(1, KERN_ERR "init_uevent_argv: buffer size too small\n");
++	len = strlcpy(&env->buf[env->buflen], subsystem, buffer_size);
++	if (len >= buffer_size) {
++		pr_warn("init_uevent_argv: buffer size of %d too small, needed %d\n",
++			buffer_size, len);
+ 		return -ENOMEM;
+ 	}
+ 
+diff --git a/lib/nlattr.c b/lib/nlattr.c
+index 5b6116e81f9f2..1d051ef66afe5 100644
+--- a/lib/nlattr.c
++++ b/lib/nlattr.c
+@@ -828,7 +828,7 @@ int nla_strcmp(const struct nlattr *nla, const char *str)
+ 	int attrlen = nla_len(nla);
+ 	int d;
+ 
+-	if (attrlen > 0 && buf[attrlen - 1] == '\0')
++	while (attrlen > 0 && buf[attrlen - 1] == '\0')
+ 		attrlen--;
+ 
+ 	d = attrlen - len;
+diff --git a/lib/test_kasan.c b/lib/test_kasan.c
+index e5647d147b350..be69c3aa615a7 100644
+--- a/lib/test_kasan.c
++++ b/lib/test_kasan.c
+@@ -646,8 +646,20 @@ static char global_array[10];
+ 
+ static void kasan_global_oob(struct kunit *test)
+ {
+-	volatile int i = 3;
+-	char *p = &global_array[ARRAY_SIZE(global_array) + i];
++	/*
++	 * Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS
++	 * from failing here and panicing the kernel, access the array via a
++	 * volatile pointer, which will prevent the compiler from being able to
++	 * determine the array bounds.
++	 *
++	 * This access uses a volatile pointer to char (char *volatile) rather
++	 * than the more conventional pointer to volatile char (volatile char *)
++	 * because we want to prevent the compiler from making inferences about
++	 * the pointer itself (i.e. its array bounds), not the data that it
++	 * refers to.
++	 */
++	char *volatile array = global_array;
++	char *p = &array[ARRAY_SIZE(global_array) + 3];
+ 
+ 	/* Only generic mode instruments globals. */
+ 	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
+@@ -695,8 +707,9 @@ static void ksize_uaf(struct kunit *test)
+ static void kasan_stack_oob(struct kunit *test)
+ {
+ 	char stack_array[10];
+-	volatile int i = OOB_TAG_OFF;
+-	char *p = &stack_array[ARRAY_SIZE(stack_array) + i];
++	/* See comment in kasan_global_oob. */
++	char *volatile array = stack_array;
++	char *p = &array[ARRAY_SIZE(stack_array) + OOB_TAG_OFF];
+ 
+ 	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
+ 
+@@ -707,7 +720,9 @@ static void kasan_alloca_oob_left(struct kunit *test)
+ {
+ 	volatile int i = 10;
+ 	char alloca_array[i];
+-	char *p = alloca_array - 1;
++	/* See comment in kasan_global_oob. */
++	char *volatile array = alloca_array;
++	char *p = array - 1;
+ 
+ 	/* Only generic mode instruments dynamic allocas. */
+ 	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
+@@ -720,7 +735,9 @@ static void kasan_alloca_oob_right(struct kunit *test)
+ {
+ 	volatile int i = 10;
+ 	char alloca_array[i];
+-	char *p = alloca_array + i;
++	/* See comment in kasan_global_oob. */
++	char *volatile array = alloca_array;
++	char *p = array + i;
+ 
+ 	/* Only generic mode instruments dynamic allocas. */
+ 	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
+diff --git a/mm/gup.c b/mm/gup.c
+index ef7d2da9f03ff..333f5dfd89423 100644
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -1551,54 +1551,60 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm,
+ 					struct vm_area_struct **vmas,
+ 					unsigned int gup_flags)
+ {
+-	unsigned long i;
+-	unsigned long step;
+-	bool drain_allow = true;
+-	bool migrate_allow = true;
++	unsigned long i, isolation_error_count;
++	bool drain_allow;
+ 	LIST_HEAD(cma_page_list);
+ 	long ret = nr_pages;
++	struct page *prev_head, *head;
+ 	struct migration_target_control mtc = {
+ 		.nid = NUMA_NO_NODE,
+ 		.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_NOWARN,
+ 	};
+ 
+ check_again:
+-	for (i = 0; i < nr_pages;) {
+-
+-		struct page *head = compound_head(pages[i]);
+-
+-		/*
+-		 * gup may start from a tail page. Advance step by the left
+-		 * part.
+-		 */
+-		step = compound_nr(head) - (pages[i] - head);
++	prev_head = NULL;
++	isolation_error_count = 0;
++	drain_allow = true;
++	for (i = 0; i < nr_pages; i++) {
++		head = compound_head(pages[i]);
++		if (head == prev_head)
++			continue;
++		prev_head = head;
+ 		/*
+ 		 * If we get a page from the CMA zone, since we are going to
+ 		 * be pinning these entries, we might as well move them out
+ 		 * of the CMA zone if possible.
+ 		 */
+ 		if (is_migrate_cma_page(head)) {
+-			if (PageHuge(head))
+-				isolate_huge_page(head, &cma_page_list);
+-			else {
++			if (PageHuge(head)) {
++				if (!isolate_huge_page(head, &cma_page_list))
++					isolation_error_count++;
++			} else {
+ 				if (!PageLRU(head) && drain_allow) {
+ 					lru_add_drain_all();
+ 					drain_allow = false;
+ 				}
+ 
+-				if (!isolate_lru_page(head)) {
+-					list_add_tail(&head->lru, &cma_page_list);
+-					mod_node_page_state(page_pgdat(head),
+-							    NR_ISOLATED_ANON +
+-							    page_is_file_lru(head),
+-							    thp_nr_pages(head));
++				if (isolate_lru_page(head)) {
++					isolation_error_count++;
++					continue;
+ 				}
++				list_add_tail(&head->lru, &cma_page_list);
++				mod_node_page_state(page_pgdat(head),
++						    NR_ISOLATED_ANON +
++						    page_is_file_lru(head),
++						    thp_nr_pages(head));
+ 			}
+ 		}
+-
+-		i += step;
+ 	}
+ 
++	/*
++	 * If list is empty, and no isolation errors, means that all pages are
++	 * in the correct zone.
++	 */
++	if (list_empty(&cma_page_list) && !isolation_error_count)
++		return ret;
++
+ 	if (!list_empty(&cma_page_list)) {
+ 		/*
+ 		 * drop the above get_user_pages reference.
+@@ -1609,34 +1615,28 @@ check_again:
+ 			for (i = 0; i < nr_pages; i++)
+ 				put_page(pages[i]);
+ 
+-		if (migrate_pages(&cma_page_list, alloc_migration_target, NULL,
+-			(unsigned long)&mtc, MIGRATE_SYNC, MR_CONTIG_RANGE)) {
+-			/*
+-			 * some of the pages failed migration. Do get_user_pages
+-			 * without migration.
+-			 */
+-			migrate_allow = false;
+-
++		ret = migrate_pages(&cma_page_list, alloc_migration_target,
++				    NULL, (unsigned long)&mtc, MIGRATE_SYNC,
++				    MR_CONTIG_RANGE);
++		if (ret) {
+ 			if (!list_empty(&cma_page_list))
+ 				putback_movable_pages(&cma_page_list);
++			return ret > 0 ? -ENOMEM : ret;
+ 		}
+-		/*
+-		 * We did migrate all the pages, Try to get the page references
+-		 * again migrating any new CMA pages which we failed to isolate
+-		 * earlier.
+-		 */
+-		ret = __get_user_pages_locked(mm, start, nr_pages,
+-						   pages, vmas, NULL,
+-						   gup_flags);
+-
+-		if ((ret > 0) && migrate_allow) {
+-			nr_pages = ret;
+-			drain_allow = true;
+-			goto check_again;
+-		}
++
++		/* We unpinned pages before migration, pin them again */
++		ret = __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
++					      NULL, gup_flags);
++		if (ret <= 0)
++			return ret;
++		nr_pages = ret;
+ 	}
+ 
+-	return ret;
++	/*
++	 * check again because pages were unpinned, and we also might have
++	 * had isolation errors and need more pages to migrate.
++	 */
++	goto check_again;
+ }
+ #else
+ static long check_and_migrate_cma_pages(struct mm_struct *mm,
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index a86a58ef132d5..96b722af092e7 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -743,13 +743,20 @@ void hugetlb_fix_reserve_counts(struct inode *inode)
+ {
+ 	struct hugepage_subpool *spool = subpool_inode(inode);
+ 	long rsv_adjust;
++	bool reserved = false;
+ 
+ 	rsv_adjust = hugepage_subpool_get_pages(spool, 1);
+-	if (rsv_adjust) {
++	if (rsv_adjust > 0) {
+ 		struct hstate *h = hstate_inode(inode);
+ 
+-		hugetlb_acct_memory(h, 1);
++		if (!hugetlb_acct_memory(h, 1))
++			reserved = true;
++	} else if (!rsv_adjust) {
++		reserved = true;
+ 	}
++
++	if (!reserved)
++		pr_warn("hugetlb: Huge Page Reserved count may go negative.\n");
+ }
+ 
+ /*
+@@ -3898,6 +3905,7 @@ again:
+ 				 * See Documentation/vm/mmu_notifier.rst
+ 				 */
+ 				huge_ptep_set_wrprotect(src, addr, src_pte);
++				entry = huge_pte_wrprotect(entry);
+ 			}
+ 
+ 			page_dup_rmap(ptepage, true);
+diff --git a/mm/kfence/core.c b/mm/kfence/core.c
+index d53c91f881a41..f0be2c5038b5d 100644
+--- a/mm/kfence/core.c
++++ b/mm/kfence/core.c
+@@ -10,6 +10,7 @@
+ #include <linux/atomic.h>
+ #include <linux/bug.h>
+ #include <linux/debugfs.h>
++#include <linux/irq_work.h>
+ #include <linux/kcsan-checks.h>
+ #include <linux/kfence.h>
+ #include <linux/kmemleak.h>
+@@ -586,6 +587,17 @@ late_initcall(kfence_debugfs_init);
+ 
+ /* === Allocation Gate Timer ================================================ */
+ 
++#ifdef CONFIG_KFENCE_STATIC_KEYS
++/* Wait queue to wake up allocation-gate timer task. */
++static DECLARE_WAIT_QUEUE_HEAD(allocation_wait);
++
++static void wake_up_kfence_timer(struct irq_work *work)
++{
++	wake_up(&allocation_wait);
++}
++static DEFINE_IRQ_WORK(wake_up_kfence_timer_work, wake_up_kfence_timer);
++#endif
++
+ /*
+  * Set up delayed work, which will enable and disable the static key. We need to
+  * use a work queue (rather than a simple timer), since enabling and disabling a
+@@ -603,25 +615,13 @@ static void toggle_allocation_gate(struct work_struct *work)
+ 	if (!READ_ONCE(kfence_enabled))
+ 		return;
+ 
+-	/* Enable static key, and await allocation to happen. */
+ 	atomic_set(&kfence_allocation_gate, 0);
+ #ifdef CONFIG_KFENCE_STATIC_KEYS
++	/* Enable static key, and await allocation to happen. */
+ 	static_branch_enable(&kfence_allocation_key);
+-	/*
+-	 * Await an allocation. Timeout after 1 second, in case the kernel stops
+-	 * doing allocations, to avoid stalling this worker task for too long.
+-	 */
+-	{
+-		unsigned long end_wait = jiffies + HZ;
+-
+-		do {
+-			set_current_state(TASK_UNINTERRUPTIBLE);
+-			if (atomic_read(&kfence_allocation_gate) != 0)
+-				break;
+-			schedule_timeout(1);
+-		} while (time_before(jiffies, end_wait));
+-		__set_current_state(TASK_RUNNING);
+-	}
++
++	wait_event_timeout(allocation_wait, atomic_read(&kfence_allocation_gate), HZ);
++
+ 	/* Disable static key and reset timer. */
+ 	static_branch_disable(&kfence_allocation_key);
+ #endif
+@@ -728,6 +728,19 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
+ 	 */
+ 	if (atomic_read(&kfence_allocation_gate) || atomic_inc_return(&kfence_allocation_gate) > 1)
+ 		return NULL;
++#ifdef CONFIG_KFENCE_STATIC_KEYS
++	/*
++	 * waitqueue_active() is fully ordered after the update of
++	 * kfence_allocation_gate per atomic_inc_return().
++	 */
++	if (waitqueue_active(&allocation_wait)) {
++		/*
++		 * Calling wake_up() here may deadlock when allocations happen
++		 * from within timer code. Use an irq_work to defer it.
++		 */
++		irq_work_queue(&wake_up_kfence_timer_work);
++	}
++#endif
+ 
+ 	if (!READ_ONCE(kfence_enabled))
+ 		return NULL;
+diff --git a/mm/khugepaged.c b/mm/khugepaged.c
+index a7d6cb912b051..2680d5ffee7f2 100644
+--- a/mm/khugepaged.c
++++ b/mm/khugepaged.c
+@@ -716,17 +716,17 @@ next:
+ 		if (pte_write(pteval))
+ 			writable = true;
+ 	}
+-	if (likely(writable)) {
+-		if (likely(referenced)) {
+-			result = SCAN_SUCCEED;
+-			trace_mm_collapse_huge_page_isolate(page, none_or_zero,
+-							    referenced, writable, result);
+-			return 1;
+-		}
+-	} else {
++
++	if (unlikely(!writable)) {
+ 		result = SCAN_PAGE_RO;
++	} else if (unlikely(!referenced)) {
++		result = SCAN_LACK_REFERENCED_PAGE;
++	} else {
++		result = SCAN_SUCCEED;
++		trace_mm_collapse_huge_page_isolate(page, none_or_zero,
++						    referenced, writable, result);
++		return 1;
+ 	}
+-
+ out:
+ 	release_pte_pages(pte, _pte, compound_pagelist);
+ 	trace_mm_collapse_huge_page_isolate(page, none_or_zero,
+diff --git a/mm/ksm.c b/mm/ksm.c
+index 9694ee2c71de5..b32391ccf6d57 100644
+--- a/mm/ksm.c
++++ b/mm/ksm.c
+@@ -794,6 +794,7 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
+ 		stable_node->rmap_hlist_len--;
+ 
+ 		put_anon_vma(rmap_item->anon_vma);
++		rmap_item->head = NULL;
+ 		rmap_item->address &= PAGE_MASK;
+ 
+ 	} else if (rmap_item->address & UNSTABLE_FLAG) {
+diff --git a/mm/migrate.c b/mm/migrate.c
+index 62b81d5257aaa..773622cffe779 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -2973,6 +2973,13 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
+ 
+ 			swp_entry = make_device_private_entry(page, vma->vm_flags & VM_WRITE);
+ 			entry = swp_entry_to_pte(swp_entry);
++		} else {
++			/*
++			 * For now we only support migrating to un-addressable
++			 * device memory.
++			 */
++			pr_warn_once("Unsupported ZONE_DEVICE page type.\n");
++			goto abort;
+ 		}
+ 	} else {
+ 		entry = mk_pte(page, vma->vm_page_prot);
+diff --git a/mm/shmem.c b/mm/shmem.c
+index b2db4ed0fbc7c..6e99a4ad6e1f3 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -2258,25 +2258,11 @@ out_nomem:
+ static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
+ {
+ 	struct shmem_inode_info *info = SHMEM_I(file_inode(file));
++	int ret;
+ 
+-	if (info->seals & F_SEAL_FUTURE_WRITE) {
+-		/*
+-		 * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
+-		 * "future write" seal active.
+-		 */
+-		if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
+-			return -EPERM;
+-
+-		/*
+-		 * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as
+-		 * MAP_SHARED and read-only, take care to not allow mprotect to
+-		 * revert protections on such mappings. Do this only for shared
+-		 * mappings. For private mappings, don't need to mask
+-		 * VM_MAYWRITE as we still want them to be COW-writable.
+-		 */
+-		if (vma->vm_flags & VM_SHARED)
+-			vma->vm_flags &= ~(VM_MAYWRITE);
+-	}
++	ret = seal_check_future_write(info->seals, vma);
++	if (ret)
++		return ret;
+ 
+ 	/* arm64 - allow memory tagging on RAM-based files */
+ 	vma->vm_flags |= VM_MTE_ALLOWED;
+@@ -2375,8 +2361,18 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
+ 	pgoff_t offset, max_off;
+ 
+ 	ret = -ENOMEM;
+-	if (!shmem_inode_acct_block(inode, 1))
++	if (!shmem_inode_acct_block(inode, 1)) {
++		/*
++		 * We may have got a page, returned -ENOENT triggering a retry,
++		 * and now we find ourselves with -ENOMEM. Release the page, to
++		 * avoid a BUG_ON in our caller.
++		 */
++		if (unlikely(*pagep)) {
++			put_page(*pagep);
++			*pagep = NULL;
++		}
+ 		goto out;
++	}
+ 
+ 	if (!*pagep) {
+ 		page = shmem_alloc_page(gfp, info, pgoff);
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 7a3e42e752350..82f4973a011d9 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -5912,7 +5912,7 @@ static void hci_le_phy_update_evt(struct hci_dev *hdev, struct sk_buff *skb)
+ 
+ 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
+ 
+-	if (!ev->status)
++	if (ev->status)
+ 		return;
+ 
+ 	hci_dev_lock(hdev);
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 72c2f5226d673..53ddbee459b99 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -451,6 +451,8 @@ struct l2cap_chan *l2cap_chan_create(void)
+ 	if (!chan)
+ 		return NULL;
+ 
++	skb_queue_head_init(&chan->tx_q);
++	skb_queue_head_init(&chan->srej_q);
+ 	mutex_init(&chan->lock);
+ 
+ 	/* Set default lock nesting level */
+@@ -516,7 +518,9 @@ void l2cap_chan_set_defaults(struct l2cap_chan *chan)
+ 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
+ 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
+ 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
++
+ 	chan->conf_state = 0;
++	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
+ 
+ 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
+ }
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index f1b1edd0b6974..c99d65ef13b1e 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -179,9 +179,17 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr,
+ 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
+ 	struct sockaddr_l2 la;
+ 	int len, err = 0;
++	bool zapped;
+ 
+ 	BT_DBG("sk %p", sk);
+ 
++	lock_sock(sk);
++	zapped = sock_flag(sk, SOCK_ZAPPED);
++	release_sock(sk);
++
++	if (zapped)
++		return -EINVAL;
++
+ 	if (!addr || alen < offsetofend(struct sockaddr, sa_family) ||
+ 	    addr->sa_family != AF_BLUETOOTH)
+ 		return -EINVAL;
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index 74971b4bd4570..939c6f77fecc2 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -7976,7 +7976,6 @@ static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
+ 		goto unlock;
+ 	}
+ 
+-	hdev->cur_adv_instance = cp->instance;
+ 	/* Submit request for advertising params if ext adv available */
+ 	if (ext_adv_capable(hdev)) {
+ 		hci_req_init(&req, hdev);
+diff --git a/net/bridge/br_arp_nd_proxy.c b/net/bridge/br_arp_nd_proxy.c
+index dfec65eca8a6e..3db1def4437b3 100644
+--- a/net/bridge/br_arp_nd_proxy.c
++++ b/net/bridge/br_arp_nd_proxy.c
+@@ -160,7 +160,9 @@ void br_do_proxy_suppress_arp(struct sk_buff *skb, struct net_bridge *br,
+ 	if (br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED)) {
+ 		if (p && (p->flags & BR_NEIGH_SUPPRESS))
+ 			return;
+-		if (ipv4_is_zeronet(sip) || sip == tip) {
++		if (parp->ar_op != htons(ARPOP_RREQUEST) &&
++		    parp->ar_op != htons(ARPOP_RREPLY) &&
++		    (ipv4_is_zeronet(sip) || sip == tip)) {
+ 			/* prevent flooding to neigh suppress ports */
+ 			BR_INPUT_SKB_CB(skb)->proxyarp_replied = 1;
+ 			return;
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index 229309d7b4ff9..226bb05c3b42d 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -1593,7 +1593,8 @@ out:
+ 	spin_unlock(&br->multicast_lock);
+ }
+ 
+-static void br_mc_disabled_update(struct net_device *dev, bool value)
++static int br_mc_disabled_update(struct net_device *dev, bool value,
++				 struct netlink_ext_ack *extack)
+ {
+ 	struct switchdev_attr attr = {
+ 		.orig_dev = dev,
+@@ -1602,11 +1603,13 @@ static void br_mc_disabled_update(struct net_device *dev, bool value)
+ 		.u.mc_disabled = !value,
+ 	};
+ 
+-	switchdev_port_attr_set(dev, &attr, NULL);
++	return switchdev_port_attr_set(dev, &attr, extack);
+ }
+ 
+ int br_multicast_add_port(struct net_bridge_port *port)
+ {
++	int err;
++
+ 	port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
+ 	port->multicast_eht_hosts_limit = BR_MCAST_DEFAULT_EHT_HOSTS_LIMIT;
+ 
+@@ -1618,8 +1621,12 @@ int br_multicast_add_port(struct net_bridge_port *port)
+ 	timer_setup(&port->ip6_own_query.timer,
+ 		    br_ip6_multicast_port_query_expired, 0);
+ #endif
+-	br_mc_disabled_update(port->dev,
+-			      br_opt_get(port->br, BROPT_MULTICAST_ENABLED));
++	err = br_mc_disabled_update(port->dev,
++				    br_opt_get(port->br,
++					       BROPT_MULTICAST_ENABLED),
++				    NULL);
++	if (err && err != -EOPNOTSUPP)
++		return err;
+ 
+ 	port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
+ 	if (!port->mcast_stats)
+@@ -3543,16 +3550,23 @@ static void br_multicast_start_querier(struct net_bridge *br,
+ 	rcu_read_unlock();
+ }
+ 
+-int br_multicast_toggle(struct net_bridge *br, unsigned long val)
++int br_multicast_toggle(struct net_bridge *br, unsigned long val,
++			struct netlink_ext_ack *extack)
+ {
+ 	struct net_bridge_port *port;
+ 	bool change_snoopers = false;
++	int err = 0;
+ 
+ 	spin_lock_bh(&br->multicast_lock);
+ 	if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val)
+ 		goto unlock;
+ 
+-	br_mc_disabled_update(br->dev, val);
++	err = br_mc_disabled_update(br->dev, val, extack);
++	if (err == -EOPNOTSUPP)
++		err = 0;
++	if (err)
++		goto unlock;
++
+ 	br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val);
+ 	if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
+ 		change_snoopers = true;
+@@ -3590,7 +3604,7 @@ unlock:
+ 			br_multicast_leave_snoopers(br);
+ 	}
+ 
+-	return 0;
++	return err;
+ }
+ 
+ bool br_multicast_enabled(const struct net_device *dev)
+diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
+index f2b1343f8332a..0456593aceec1 100644
+--- a/net/bridge/br_netlink.c
++++ b/net/bridge/br_netlink.c
+@@ -1293,7 +1293,9 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
+ 	if (data[IFLA_BR_MCAST_SNOOPING]) {
+ 		u8 mcast_snooping = nla_get_u8(data[IFLA_BR_MCAST_SNOOPING]);
+ 
+-		br_multicast_toggle(br, mcast_snooping);
++		err = br_multicast_toggle(br, mcast_snooping, extack);
++		if (err)
++			return err;
+ 	}
+ 
+ 	if (data[IFLA_BR_MCAST_QUERY_USE_IFADDR]) {
+diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
+index d7d167e10b705..af3430c2d6ea8 100644
+--- a/net/bridge/br_private.h
++++ b/net/bridge/br_private.h
+@@ -810,7 +810,8 @@ void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
+ 			struct sk_buff *skb, bool local_rcv, bool local_orig);
+ int br_multicast_set_router(struct net_bridge *br, unsigned long val);
+ int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val);
+-int br_multicast_toggle(struct net_bridge *br, unsigned long val);
++int br_multicast_toggle(struct net_bridge *br, unsigned long val,
++			struct netlink_ext_ack *extack);
+ int br_multicast_set_querier(struct net_bridge *br, unsigned long val);
+ int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val);
+ int br_multicast_set_igmp_version(struct net_bridge *br, unsigned long val);
+diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
+index 072e29840082a..381467b691d5f 100644
+--- a/net/bridge/br_sysfs_br.c
++++ b/net/bridge/br_sysfs_br.c
+@@ -409,17 +409,11 @@ static ssize_t multicast_snooping_show(struct device *d,
+ 	return sprintf(buf, "%d\n", br_opt_get(br, BROPT_MULTICAST_ENABLED));
+ }
+ 
+-static int toggle_multicast(struct net_bridge *br, unsigned long val,
+-			    struct netlink_ext_ack *extack)
+-{
+-	return br_multicast_toggle(br, val);
+-}
+-
+ static ssize_t multicast_snooping_store(struct device *d,
+ 					struct device_attribute *attr,
+ 					const char *buf, size_t len)
+ {
+-	return store_bridge_parm(d, buf, len, toggle_multicast);
++	return store_bridge_parm(d, buf, len, br_multicast_toggle);
+ }
+ static DEVICE_ATTR_RW(multicast_snooping);
+ 
+diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
+index a96a4f5de0ce2..3f36b04d86a0a 100644
+--- a/net/core/flow_dissector.c
++++ b/net/core/flow_dissector.c
+@@ -828,8 +828,10 @@ static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys,
+ 		key_addrs = skb_flow_dissector_target(flow_dissector,
+ 						      FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+ 						      target_container);
+-		memcpy(&key_addrs->v6addrs, &flow_keys->ipv6_src,
+-		       sizeof(key_addrs->v6addrs));
++		memcpy(&key_addrs->v6addrs.src, &flow_keys->ipv6_src,
++		       sizeof(key_addrs->v6addrs.src));
++		memcpy(&key_addrs->v6addrs.dst, &flow_keys->ipv6_dst,
++		       sizeof(key_addrs->v6addrs.dst));
+ 		key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+ 	}
+ 
+diff --git a/net/core/page_pool.c b/net/core/page_pool.c
+index ad8b0707af04b..f014fd8c19a6b 100644
+--- a/net/core/page_pool.c
++++ b/net/core/page_pool.c
+@@ -174,8 +174,10 @@ static void page_pool_dma_sync_for_device(struct page_pool *pool,
+ 					  struct page *page,
+ 					  unsigned int dma_sync_size)
+ {
++	dma_addr_t dma_addr = page_pool_get_dma_addr(page);
++
+ 	dma_sync_size = min(dma_sync_size, pool->p.max_len);
+-	dma_sync_single_range_for_device(pool->p.dev, page->dma_addr,
++	dma_sync_single_range_for_device(pool->p.dev, dma_addr,
+ 					 pool->p.offset, dma_sync_size,
+ 					 pool->p.dma_dir);
+ }
+@@ -226,7 +228,7 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
+ 		put_page(page);
+ 		return NULL;
+ 	}
+-	page->dma_addr = dma;
++	page_pool_set_dma_addr(page, dma);
+ 
+ 	if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
+ 		page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
+@@ -294,13 +296,13 @@ void page_pool_release_page(struct page_pool *pool, struct page *page)
+ 		 */
+ 		goto skip_dma_unmap;
+ 
+-	dma = page->dma_addr;
++	dma = page_pool_get_dma_addr(page);
+ 
+-	/* When page is unmapped, it cannot be returned our pool */
++	/* When page is unmapped, it cannot be returned to our pool */
+ 	dma_unmap_page_attrs(pool->p.dev, dma,
+ 			     PAGE_SIZE << pool->p.order, pool->p.dma_dir,
+ 			     DMA_ATTR_SKIP_CPU_SYNC);
+-	page->dma_addr = 0;
++	page_pool_set_dma_addr(page, 0);
+ skip_dma_unmap:
+ 	/* This may be the last page returned, releasing the pool, so
+ 	 * it is not safe to reference pool afterwards.
+diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
+index 771688e1b0da9..2603966da904d 100644
+--- a/net/ethtool/ioctl.c
++++ b/net/ethtool/ioctl.c
+@@ -489,7 +489,7 @@ store_link_ksettings_for_user(void __user *to,
+ {
+ 	struct ethtool_link_usettings link_usettings;
+ 
+-	memcpy(&link_usettings.base, &from->base, sizeof(link_usettings));
++	memcpy(&link_usettings, from, sizeof(link_usettings));
+ 	bitmap_to_arr32(link_usettings.link_modes.supported,
+ 			from->link_modes.supported,
+ 			__ETHTOOL_LINK_MODE_MASK_NBITS);
+diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c
+index 50d3c8896f917..25a55086d2b66 100644
+--- a/net/ethtool/netlink.c
++++ b/net/ethtool/netlink.c
+@@ -384,7 +384,8 @@ static int ethnl_default_dump_one(struct sk_buff *skb, struct net_device *dev,
+ 	int ret;
+ 
+ 	ehdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+-			   &ethtool_genl_family, 0, ctx->ops->reply_cmd);
++			   &ethtool_genl_family, NLM_F_MULTI,
++			   ctx->ops->reply_cmd);
+ 	if (!ehdr)
+ 		return -EMSGSIZE;
+ 
+diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
+index e0cc32e458801..932b15b13053d 100644
+--- a/net/ipv6/ip6_vti.c
++++ b/net/ipv6/ip6_vti.c
+@@ -193,7 +193,6 @@ static int vti6_tnl_create2(struct net_device *dev)
+ 
+ 	strcpy(t->parms.name, dev->name);
+ 
+-	dev_hold(dev);
+ 	vti6_tnl_link(ip6n, t);
+ 
+ 	return 0;
+@@ -934,6 +933,7 @@ static inline int vti6_dev_init_gen(struct net_device *dev)
+ 	dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ 	if (!dev->tstats)
+ 		return -ENOMEM;
++	dev_hold(dev);
+ 	return 0;
+ }
+ 
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 96f487fc00713..0fe91dc9817eb 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -1295,6 +1295,11 @@ static void ieee80211_chswitch_post_beacon(struct ieee80211_sub_if_data *sdata)
+ 
+ 	sdata->vif.csa_active = false;
+ 	ifmgd->csa_waiting_bcn = false;
++	/*
++	 * If the CSA IE is still present on the beacon after the switch,
++	 * we need to consider it as a new CSA (possibly to self).
++	 */
++	ifmgd->beacon_crc_valid = false;
+ 
+ 	ret = drv_post_channel_switch(sdata);
+ 	if (ret) {
+@@ -1400,11 +1405,8 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
+ 		ch_switch.delay = csa_ie.max_switch_time;
+ 	}
+ 
+-	if (res < 0) {
+-		ieee80211_queue_work(&local->hw,
+-				     &ifmgd->csa_connection_drop_work);
+-		return;
+-	}
++	if (res < 0)
++		goto lock_and_drop_connection;
+ 
+ 	if (beacon && sdata->vif.csa_active && !ifmgd->csa_waiting_bcn) {
+ 		if (res)
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index 3b3bcefbf6577..28422d6870967 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -2267,17 +2267,6 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
+ 						    payload[7]);
+ 	}
+ 
+-	/* Initialize skb->priority for QoS frames. If the DONT_REORDER flag
+-	 * is set, stick to the default value for skb->priority to assure
+-	 * frames injected with this flag are not reordered relative to each
+-	 * other.
+-	 */
+-	if (ieee80211_is_data_qos(hdr->frame_control) &&
+-	    !(info->control.flags & IEEE80211_TX_CTRL_DONT_REORDER)) {
+-		u8 *p = ieee80211_get_qos_ctl(hdr);
+-		skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK;
+-	}
+-
+ 	rcu_read_lock();
+ 
+ 	/*
+@@ -2341,6 +2330,15 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
+ 
+ 	info->band = chandef->chan->band;
+ 
++	/* Initialize skb->priority according to frame type and TID class,
++	 * with respect to the sub interface that the frame will actually
++	 * be transmitted on. If the DONT_REORDER flag is set, the original
++	 * skb-priority is preserved to assure frames injected with this
++	 * flag are not reordered relative to each other.
++	 */
++	ieee80211_select_queue_80211(sdata, skb, hdr);
++	skb_set_queue_mapping(skb, ieee80211_ac_from_tid(skb->priority));
++
+ 	/* remove the injection radiotap header */
+ 	skb_pull(skb, len_rthdr);
+ 
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index d17d39ccdf34b..4fe7acaa472f2 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -524,8 +524,7 @@ static void mptcp_sock_destruct(struct sock *sk)
+ 	 * ESTABLISHED state and will not have the SOCK_DEAD flag.
+ 	 * Both result in warnings from inet_sock_destruct.
+ 	 */
+-
+-	if (sk->sk_state == TCP_ESTABLISHED) {
++	if ((1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
+ 		sk->sk_state = TCP_CLOSE;
+ 		WARN_ON_ONCE(sk->sk_socket);
+ 		sock_orphan(sk);
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 589d2f6978d38..878ed49d0c569 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -6246,9 +6246,9 @@ err_obj_ht:
+ 	INIT_LIST_HEAD(&obj->list);
+ 	return err;
+ err_trans:
+-	kfree(obj->key.name);
+-err_userdata:
+ 	kfree(obj->udata);
++err_userdata:
++	kfree(obj->key.name);
+ err_strdup:
+ 	if (obj->ops->destroy)
+ 		obj->ops->destroy(&ctx, obj);
+diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c
+index 916a3c7f9eafe..79fbf37291f38 100644
+--- a/net/netfilter/nfnetlink_osf.c
++++ b/net/netfilter/nfnetlink_osf.c
+@@ -186,6 +186,8 @@ static const struct tcphdr *nf_osf_hdr_ctx_init(struct nf_osf_hdr_ctx *ctx,
+ 
+ 		ctx->optp = skb_header_pointer(skb, ip_hdrlen(skb) +
+ 				sizeof(struct tcphdr), ctx->optsize, opts);
++		if (!ctx->optp)
++			return NULL;
+ 	}
+ 
+ 	return tcp;
+diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
+index bf618b7ec1aea..560c2cda52ee3 100644
+--- a/net/netfilter/nft_set_hash.c
++++ b/net/netfilter/nft_set_hash.c
+@@ -406,9 +406,17 @@ static void nft_rhash_destroy(const struct nft_set *set)
+ 				    (void *)set);
+ }
+ 
++/* Number of buckets is stored in u32, so cap our result to 1U<<31 */
++#define NFT_MAX_BUCKETS (1U << 31)
++
+ static u32 nft_hash_buckets(u32 size)
+ {
+-	return roundup_pow_of_two(size * 4 / 3);
++	u64 val = div_u64((u64)size * 4, 3);
++
++	if (val >= NFT_MAX_BUCKETS)
++		return NFT_MAX_BUCKETS;
++
++	return roundup_pow_of_two(val);
+ }
+ 
+ static bool nft_rhash_estimate(const struct nft_set_desc *desc, u32 features,
+diff --git a/net/netfilter/xt_SECMARK.c b/net/netfilter/xt_SECMARK.c
+index 75625d13e976c..498a0bf6f0444 100644
+--- a/net/netfilter/xt_SECMARK.c
++++ b/net/netfilter/xt_SECMARK.c
+@@ -24,10 +24,9 @@ MODULE_ALIAS("ip6t_SECMARK");
+ static u8 mode;
+ 
+ static unsigned int
+-secmark_tg(struct sk_buff *skb, const struct xt_action_param *par)
++secmark_tg(struct sk_buff *skb, const struct xt_secmark_target_info_v1 *info)
+ {
+ 	u32 secmark = 0;
+-	const struct xt_secmark_target_info *info = par->targinfo;
+ 
+ 	switch (mode) {
+ 	case SECMARK_MODE_SEL:
+@@ -41,7 +40,7 @@ secmark_tg(struct sk_buff *skb, const struct xt_action_param *par)
+ 	return XT_CONTINUE;
+ }
+ 
+-static int checkentry_lsm(struct xt_secmark_target_info *info)
++static int checkentry_lsm(struct xt_secmark_target_info_v1 *info)
+ {
+ 	int err;
+ 
+@@ -73,15 +72,15 @@ static int checkentry_lsm(struct xt_secmark_target_info *info)
+ 	return 0;
+ }
+ 
+-static int secmark_tg_check(const struct xt_tgchk_param *par)
++static int
++secmark_tg_check(const char *table, struct xt_secmark_target_info_v1 *info)
+ {
+-	struct xt_secmark_target_info *info = par->targinfo;
+ 	int err;
+ 
+-	if (strcmp(par->table, "mangle") != 0 &&
+-	    strcmp(par->table, "security") != 0) {
++	if (strcmp(table, "mangle") != 0 &&
++	    strcmp(table, "security") != 0) {
+ 		pr_info_ratelimited("only valid in \'mangle\' or \'security\' table, not \'%s\'\n",
+-				    par->table);
++				    table);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -116,25 +115,76 @@ static void secmark_tg_destroy(const struct xt_tgdtor_param *par)
+ 	}
+ }
+ 
+-static struct xt_target secmark_tg_reg __read_mostly = {
+-	.name       = "SECMARK",
+-	.revision   = 0,
+-	.family     = NFPROTO_UNSPEC,
+-	.checkentry = secmark_tg_check,
+-	.destroy    = secmark_tg_destroy,
+-	.target     = secmark_tg,
+-	.targetsize = sizeof(struct xt_secmark_target_info),
+-	.me         = THIS_MODULE,
++static int secmark_tg_check_v0(const struct xt_tgchk_param *par)
++{
++	struct xt_secmark_target_info *info = par->targinfo;
++	struct xt_secmark_target_info_v1 newinfo = {
++		.mode	= info->mode,
++	};
++	int ret;
++
++	memcpy(newinfo.secctx, info->secctx, SECMARK_SECCTX_MAX);
++
++	ret = secmark_tg_check(par->table, &newinfo);
++	info->secid = newinfo.secid;
++
++	return ret;
++}
++
++static unsigned int
++secmark_tg_v0(struct sk_buff *skb, const struct xt_action_param *par)
++{
++	const struct xt_secmark_target_info *info = par->targinfo;
++	struct xt_secmark_target_info_v1 newinfo = {
++		.secid	= info->secid,
++	};
++
++	return secmark_tg(skb, &newinfo);
++}
++
++static int secmark_tg_check_v1(const struct xt_tgchk_param *par)
++{
++	return secmark_tg_check(par->table, par->targinfo);
++}
++
++static unsigned int
++secmark_tg_v1(struct sk_buff *skb, const struct xt_action_param *par)
++{
++	return secmark_tg(skb, par->targinfo);
++}
++
++static struct xt_target secmark_tg_reg[] __read_mostly = {
++	{
++		.name		= "SECMARK",
++		.revision	= 0,
++		.family		= NFPROTO_UNSPEC,
++		.checkentry	= secmark_tg_check_v0,
++		.destroy	= secmark_tg_destroy,
++		.target		= secmark_tg_v0,
++		.targetsize	= sizeof(struct xt_secmark_target_info),
++		.me		= THIS_MODULE,
++	},
++	{
++		.name		= "SECMARK",
++		.revision	= 1,
++		.family		= NFPROTO_UNSPEC,
++		.checkentry	= secmark_tg_check_v1,
++		.destroy	= secmark_tg_destroy,
++		.target		= secmark_tg_v1,
++		.targetsize	= sizeof(struct xt_secmark_target_info_v1),
++		.usersize	= offsetof(struct xt_secmark_target_info_v1, secid),
++		.me		= THIS_MODULE,
++	},
+ };
+ 
+ static int __init secmark_tg_init(void)
+ {
+-	return xt_register_target(&secmark_tg_reg);
++	return xt_register_targets(secmark_tg_reg, ARRAY_SIZE(secmark_tg_reg));
+ }
+ 
+ static void __exit secmark_tg_exit(void)
+ {
+-	xt_unregister_target(&secmark_tg_reg);
++	xt_unregister_targets(secmark_tg_reg, ARRAY_SIZE(secmark_tg_reg));
+ }
+ 
+ module_init(secmark_tg_init);
+diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
+index c69a4ba9c33fd..3035f96c6e6c8 100644
+--- a/net/sched/cls_flower.c
++++ b/net/sched/cls_flower.c
+@@ -209,16 +209,16 @@ static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
+ 				  struct fl_flow_key *key,
+ 				  struct fl_flow_key *mkey)
+ {
+-	__be16 min_mask, max_mask, min_val, max_val;
++	u16 min_mask, max_mask, min_val, max_val;
+ 
+-	min_mask = htons(filter->mask->key.tp_range.tp_min.dst);
+-	max_mask = htons(filter->mask->key.tp_range.tp_max.dst);
+-	min_val = htons(filter->key.tp_range.tp_min.dst);
+-	max_val = htons(filter->key.tp_range.tp_max.dst);
++	min_mask = ntohs(filter->mask->key.tp_range.tp_min.dst);
++	max_mask = ntohs(filter->mask->key.tp_range.tp_max.dst);
++	min_val = ntohs(filter->key.tp_range.tp_min.dst);
++	max_val = ntohs(filter->key.tp_range.tp_max.dst);
+ 
+ 	if (min_mask && max_mask) {
+-		if (htons(key->tp_range.tp.dst) < min_val ||
+-		    htons(key->tp_range.tp.dst) > max_val)
++		if (ntohs(key->tp_range.tp.dst) < min_val ||
++		    ntohs(key->tp_range.tp.dst) > max_val)
+ 			return false;
+ 
+ 		/* skb does not have min and max values */
+@@ -232,16 +232,16 @@ static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
+ 				  struct fl_flow_key *key,
+ 				  struct fl_flow_key *mkey)
+ {
+-	__be16 min_mask, max_mask, min_val, max_val;
++	u16 min_mask, max_mask, min_val, max_val;
+ 
+-	min_mask = htons(filter->mask->key.tp_range.tp_min.src);
+-	max_mask = htons(filter->mask->key.tp_range.tp_max.src);
+-	min_val = htons(filter->key.tp_range.tp_min.src);
+-	max_val = htons(filter->key.tp_range.tp_max.src);
++	min_mask = ntohs(filter->mask->key.tp_range.tp_min.src);
++	max_mask = ntohs(filter->mask->key.tp_range.tp_max.src);
++	min_val = ntohs(filter->key.tp_range.tp_min.src);
++	max_val = ntohs(filter->key.tp_range.tp_max.src);
+ 
+ 	if (min_mask && max_mask) {
+-		if (htons(key->tp_range.tp.src) < min_val ||
+-		    htons(key->tp_range.tp.src) > max_val)
++		if (ntohs(key->tp_range.tp.src) < min_val ||
++		    ntohs(key->tp_range.tp.src) > max_val)
+ 			return false;
+ 
+ 		/* skb does not have min and max values */
+@@ -783,16 +783,16 @@ static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
+ 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src));
+ 
+ 	if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
+-	    htons(key->tp_range.tp_max.dst) <=
+-	    htons(key->tp_range.tp_min.dst)) {
++	    ntohs(key->tp_range.tp_max.dst) <=
++	    ntohs(key->tp_range.tp_min.dst)) {
+ 		NL_SET_ERR_MSG_ATTR(extack,
+ 				    tb[TCA_FLOWER_KEY_PORT_DST_MIN],
+ 				    "Invalid destination port range (min must be strictly smaller than max)");
+ 		return -EINVAL;
+ 	}
+ 	if (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src &&
+-	    htons(key->tp_range.tp_max.src) <=
+-	    htons(key->tp_range.tp_min.src)) {
++	    ntohs(key->tp_range.tp_max.src) <=
++	    ntohs(key->tp_range.tp_min.src)) {
+ 		NL_SET_ERR_MSG_ATTR(extack,
+ 				    tb[TCA_FLOWER_KEY_PORT_SRC_MIN],
+ 				    "Invalid source port range (min must be strictly smaller than max)");
+diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
+index 8287894541e3c..909c798b74030 100644
+--- a/net/sched/sch_taprio.c
++++ b/net/sched/sch_taprio.c
+@@ -901,6 +901,12 @@ static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
+ 
+ 		list_for_each_entry(entry, &new->entries, list)
+ 			cycle = ktime_add_ns(cycle, entry->interval);
++
++		if (!cycle) {
++			NL_SET_ERR_MSG(extack, "'cycle_time' can never be 0");
++			return -EINVAL;
++		}
++
+ 		new->cycle_time = cycle;
+ 	}
+ 
+diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
+index f77484df097b7..da4ce0947c3aa 100644
+--- a/net/sctp/sm_make_chunk.c
++++ b/net/sctp/sm_make_chunk.c
+@@ -3147,7 +3147,7 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
+ 		 * primary.
+ 		 */
+ 		if (af->is_any(&addr))
+-			memcpy(&addr.v4, sctp_source(asconf), sizeof(addr));
++			memcpy(&addr, sctp_source(asconf), sizeof(addr));
+ 
+ 		if (security_sctp_bind_connect(asoc->ep->base.sk,
+ 					       SCTP_PARAM_SET_PRIMARY,
+diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
+index af2b7041fa4eb..73bb4c6e9201a 100644
+--- a/net/sctp/sm_statefuns.c
++++ b/net/sctp/sm_statefuns.c
+@@ -1852,20 +1852,35 @@ static enum sctp_disposition sctp_sf_do_dupcook_a(
+ 			SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
+ 	sctp_add_cmd_sf(commands, SCTP_CMD_PURGE_ASCONF_QUEUE, SCTP_NULL());
+ 
+-	repl = sctp_make_cookie_ack(new_asoc, chunk);
++	/* Update the content of current association. */
++	if (sctp_assoc_update((struct sctp_association *)asoc, new_asoc)) {
++		struct sctp_chunk *abort;
++
++		abort = sctp_make_abort(asoc, NULL, sizeof(struct sctp_errhdr));
++		if (abort) {
++			sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, 0);
++			sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
++		}
++		sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNABORTED));
++		sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
++				SCTP_PERR(SCTP_ERROR_RSRC_LOW));
++		SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
++		SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
++		goto nomem;
++	}
++
++	repl = sctp_make_cookie_ack(asoc, chunk);
+ 	if (!repl)
+ 		goto nomem;
+ 
+ 	/* Report association restart to upper layer. */
+ 	ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_RESTART, 0,
+-					     new_asoc->c.sinit_num_ostreams,
+-					     new_asoc->c.sinit_max_instreams,
++					     asoc->c.sinit_num_ostreams,
++					     asoc->c.sinit_max_instreams,
+ 					     NULL, GFP_ATOMIC);
+ 	if (!ev)
+ 		goto nomem_ev;
+ 
+-	/* Update the content of current association. */
+-	sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
+ 	sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
+ 	if ((sctp_state(asoc, SHUTDOWN_PENDING) ||
+ 	     sctp_state(asoc, SHUTDOWN_SENT)) &&
+@@ -1929,7 +1944,8 @@ static enum sctp_disposition sctp_sf_do_dupcook_b(
+ 	sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
+ 	sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
+ 			SCTP_STATE(SCTP_STATE_ESTABLISHED));
+-	SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
++	if (asoc->state < SCTP_STATE_ESTABLISHED)
++		SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
+ 	sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
+ 
+ 	repl = sctp_make_cookie_ack(new_asoc, chunk);
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index 47340b3b514f3..cb23cca72c24c 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -2162,6 +2162,9 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
+ 	struct smc_sock *smc;
+ 	int val, rc;
+ 
++	if (level == SOL_TCP && optname == TCP_ULP)
++		return -EOPNOTSUPP;
++
+ 	smc = smc_sk(sk);
+ 
+ 	/* generic setsockopts reaching us here always apply to the
+@@ -2186,7 +2189,6 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
+ 	if (rc || smc->use_fallback)
+ 		goto out;
+ 	switch (optname) {
+-	case TCP_ULP:
+ 	case TCP_FASTOPEN:
+ 	case TCP_FASTOPEN_CONNECT:
+ 	case TCP_FASTOPEN_KEY:
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index 612f0a641f4cf..f555d335e910d 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -1799,7 +1799,6 @@ call_allocate(struct rpc_task *task)
+ 
+ 	status = xprt->ops->buf_alloc(task);
+ 	trace_rpc_buf_alloc(task, status);
+-	xprt_inject_disconnect(xprt);
+ 	if (status == 0)
+ 		return;
+ 	if (status != -ENOMEM) {
+@@ -2457,12 +2456,6 @@ call_decode(struct rpc_task *task)
+ 		task->tk_flags &= ~RPC_CALL_MAJORSEEN;
+ 	}
+ 
+-	/*
+-	 * Ensure that we see all writes made by xprt_complete_rqst()
+-	 * before it changed req->rq_reply_bytes_recvd.
+-	 */
+-	smp_rmb();
+-
+ 	/*
+ 	 * Did we ever call xprt_complete_rqst()? If not, we should assume
+ 	 * the message is incomplete.
+@@ -2471,6 +2464,11 @@ call_decode(struct rpc_task *task)
+ 	if (!req->rq_reply_bytes_recvd)
+ 		goto out;
+ 
++	/* Ensure that we see all writes made by xprt_complete_rqst()
++	 * before it changed req->rq_reply_bytes_recvd.
++	 */
++	smp_rmb();
++
+ 	req->rq_rcv_buf.len = req->rq_private_buf.len;
+ 	trace_rpc_xdr_recvfrom(task, &req->rq_rcv_buf);
+ 
+diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
+index d76dc9d95d163..0de918cb3d90d 100644
+--- a/net/sunrpc/svc.c
++++ b/net/sunrpc/svc.c
+@@ -846,7 +846,8 @@ void
+ svc_rqst_free(struct svc_rqst *rqstp)
+ {
+ 	svc_release_buffer(rqstp);
+-	put_page(rqstp->rq_scratch_page);
++	if (rqstp->rq_scratch_page)
++		put_page(rqstp->rq_scratch_page);
+ 	kfree(rqstp->rq_resp);
+ 	kfree(rqstp->rq_argp);
+ 	kfree(rqstp->rq_auth_data);
+diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
+index 2e2f007dfc9f1..7cde41a936a43 100644
+--- a/net/sunrpc/svcsock.c
++++ b/net/sunrpc/svcsock.c
+@@ -1171,7 +1171,7 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp)
+ 	tcp_sock_set_cork(svsk->sk_sk, true);
+ 	err = svc_tcp_sendmsg(svsk->sk_sock, xdr, marker, &sent);
+ 	xdr_free_bvec(xdr);
+-	trace_svcsock_tcp_send(xprt, err < 0 ? err : sent);
++	trace_svcsock_tcp_send(xprt, err < 0 ? (long)err : sent);
+ 	if (err < 0 || sent != (xdr->len + sizeof(marker)))
+ 		goto out_close;
+ 	if (atomic_dec_and_test(&svsk->sk_sendqlen))
+diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
+index 691ccf8049a48..20fe31b1b776f 100644
+--- a/net/sunrpc/xprt.c
++++ b/net/sunrpc/xprt.c
+@@ -698,9 +698,9 @@ int xprt_adjust_timeout(struct rpc_rqst *req)
+ 	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
+ 	int status = 0;
+ 
+-	if (time_before(jiffies, req->rq_minortimeo))
+-		return status;
+ 	if (time_before(jiffies, req->rq_majortimeo)) {
++		if (time_before(jiffies, req->rq_minortimeo))
++			return status;
+ 		if (to->to_exponential)
+ 			req->rq_timeout <<= 1;
+ 		else
+@@ -1469,8 +1469,6 @@ bool xprt_prepare_transmit(struct rpc_task *task)
+ 	struct rpc_xprt	*xprt = req->rq_xprt;
+ 
+ 	if (!xprt_lock_write(xprt, task)) {
+-		trace_xprt_transmit_queued(xprt, task);
+-
+ 		/* Race breaker: someone may have transmitted us */
+ 		if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
+ 			rpc_wake_up_queued_task_set_status(&xprt->sending,
+@@ -1483,7 +1481,10 @@ bool xprt_prepare_transmit(struct rpc_task *task)
+ 
+ void xprt_end_transmit(struct rpc_task *task)
+ {
+-	xprt_release_write(task->tk_rqstp->rq_xprt, task);
++	struct rpc_xprt	*xprt = task->tk_rqstp->rq_xprt;
++
++	xprt_inject_disconnect(xprt);
++	xprt_release_write(xprt, task);
+ }
+ 
+ /**
+@@ -1885,7 +1886,6 @@ void xprt_release(struct rpc_task *task)
+ 	spin_unlock(&xprt->transport_lock);
+ 	if (req->rq_buffer)
+ 		xprt->ops->buf_free(task);
+-	xprt_inject_disconnect(xprt);
+ 	xdr_free_bvec(&req->rq_rcv_buf);
+ 	xdr_free_bvec(&req->rq_snd_buf);
+ 	if (req->rq_cred != NULL)
+diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
+index 766a1048a48ad..aca2228095db3 100644
+--- a/net/sunrpc/xprtrdma/frwr_ops.c
++++ b/net/sunrpc/xprtrdma/frwr_ops.c
+@@ -257,6 +257,7 @@ int frwr_query_device(struct rpcrdma_ep *ep, const struct ib_device *device)
+ 	ep->re_attr.cap.max_send_wr += 1; /* for ib_drain_sq */
+ 	ep->re_attr.cap.max_recv_wr = ep->re_max_requests;
+ 	ep->re_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
++	ep->re_attr.cap.max_recv_wr += RPCRDMA_MAX_RECV_BATCH;
+ 	ep->re_attr.cap.max_recv_wr += 1; /* for ib_drain_rq */
+ 
+ 	ep->re_max_rdma_segs =
+@@ -575,7 +576,6 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
+ 		mr = container_of(frwr, struct rpcrdma_mr, frwr);
+ 		bad_wr = bad_wr->next;
+ 
+-		list_del_init(&mr->mr_list);
+ 		frwr_mr_recycle(mr);
+ 	}
+ }
+diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
+index 292f066d006e5..21ddd78a8c351 100644
+--- a/net/sunrpc/xprtrdma/rpc_rdma.c
++++ b/net/sunrpc/xprtrdma/rpc_rdma.c
+@@ -1430,9 +1430,10 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
+ 		credits = 1;	/* don't deadlock */
+ 	else if (credits > r_xprt->rx_ep->re_max_requests)
+ 		credits = r_xprt->rx_ep->re_max_requests;
++	rpcrdma_post_recvs(r_xprt, credits + (buf->rb_bc_srv_max_requests << 1),
++			   false);
+ 	if (buf->rb_credits != credits)
+ 		rpcrdma_update_cwnd(r_xprt, credits);
+-	rpcrdma_post_recvs(r_xprt, false);
+ 
+ 	req = rpcr_to_rdmar(rqst);
+ 	if (unlikely(req->rl_reply))
+diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
+index 78d29d1bcc203..09953597d055a 100644
+--- a/net/sunrpc/xprtrdma/transport.c
++++ b/net/sunrpc/xprtrdma/transport.c
+@@ -262,8 +262,10 @@ xprt_rdma_connect_worker(struct work_struct *work)
+  * xprt_rdma_inject_disconnect - inject a connection fault
+  * @xprt: transport context
+  *
+- * If @xprt is connected, disconnect it to simulate spurious connection
+- * loss.
++ * If @xprt is connected, disconnect it to simulate spurious
++ * connection loss. Caller must hold @xprt's send lock to
++ * ensure that data structures and hardware resources are
++ * stable during the rdma_disconnect() call.
+  */
+ static void
+ xprt_rdma_inject_disconnect(struct rpc_xprt *xprt)
+diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
+index ec912cf9c618c..f3fffc74ab0fa 100644
+--- a/net/sunrpc/xprtrdma/verbs.c
++++ b/net/sunrpc/xprtrdma/verbs.c
+@@ -535,7 +535,7 @@ int rpcrdma_xprt_connect(struct rpcrdma_xprt *r_xprt)
+ 	 * outstanding Receives.
+ 	 */
+ 	rpcrdma_ep_get(ep);
+-	rpcrdma_post_recvs(r_xprt, true);
++	rpcrdma_post_recvs(r_xprt, 1, true);
+ 
+ 	rc = rdma_connect(ep->re_id, &ep->re_remote_cma);
+ 	if (rc)
+@@ -1364,21 +1364,21 @@ int rpcrdma_post_sends(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
+ /**
+  * rpcrdma_post_recvs - Refill the Receive Queue
+  * @r_xprt: controlling transport instance
+- * @temp: mark Receive buffers to be deleted after use
++ * @needed: current credit grant
++ * @temp: mark Receive buffers to be deleted after one use
+  *
+  */
+-void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
++void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed, bool temp)
+ {
+ 	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
+ 	struct rpcrdma_ep *ep = r_xprt->rx_ep;
+ 	struct ib_recv_wr *wr, *bad_wr;
+ 	struct rpcrdma_rep *rep;
+-	int needed, count, rc;
++	int count, rc;
+ 
+ 	rc = 0;
+ 	count = 0;
+ 
+-	needed = buf->rb_credits + (buf->rb_bc_srv_max_requests << 1);
+ 	if (likely(ep->re_receive_count > needed))
+ 		goto out;
+ 	needed -= ep->re_receive_count;
+diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
+index fe3be985e239a..28af11fbe6438 100644
+--- a/net/sunrpc/xprtrdma/xprt_rdma.h
++++ b/net/sunrpc/xprtrdma/xprt_rdma.h
+@@ -461,7 +461,7 @@ int rpcrdma_xprt_connect(struct rpcrdma_xprt *r_xprt);
+ void rpcrdma_xprt_disconnect(struct rpcrdma_xprt *r_xprt);
+ 
+ int rpcrdma_post_sends(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
+-void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp);
++void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed, bool temp);
+ 
+ /*
+  * Buffer calls - xprtrdma/verbs.c
+diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
+index 5a1ce64039f72..0749df80454d4 100644
+--- a/net/tipc/netlink_compat.c
++++ b/net/tipc/netlink_compat.c
+@@ -696,7 +696,7 @@ static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg,
+ 	if (err)
+ 		return err;
+ 
+-	link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]);
++	link_info.dest = htonl(nla_get_flag(link[TIPC_NLA_LINK_DEST]));
+ 	link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP]));
+ 	nla_strscpy(link_info.str, link[TIPC_NLA_LINK_NAME],
+ 		    TIPC_MAX_LINK_NAME);
+diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
+index 2823b7c3302d0..40f359bf20440 100644
+--- a/net/xdp/xsk_queue.h
++++ b/net/xdp/xsk_queue.h
+@@ -128,13 +128,12 @@ static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
+ static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool,
+ 					    struct xdp_desc *desc)
+ {
+-	u64 chunk, chunk_end;
++	u64 chunk;
+ 
+-	chunk = xp_aligned_extract_addr(pool, desc->addr);
+-	chunk_end = xp_aligned_extract_addr(pool, desc->addr + desc->len);
+-	if (chunk != chunk_end)
++	if (desc->len > pool->chunk_size)
+ 		return false;
+ 
++	chunk = xp_aligned_extract_addr(pool, desc->addr);
+ 	if (chunk >= pool->addrs_cnt)
+ 		return false;
+ 
+diff --git a/samples/bpf/tracex1_kern.c b/samples/bpf/tracex1_kern.c
+index 3f4599c9a2022..ef30d2b353b0f 100644
+--- a/samples/bpf/tracex1_kern.c
++++ b/samples/bpf/tracex1_kern.c
+@@ -26,7 +26,7 @@
+ SEC("kprobe/__netif_receive_skb_core")
+ int bpf_prog1(struct pt_regs *ctx)
+ {
+-	/* attaches to kprobe netif_receive_skb,
++	/* attaches to kprobe __netif_receive_skb_core,
+ 	 * looks for packets on loobpack device and prints them
+ 	 */
+ 	char devname[IFNAMSIZ];
+@@ -35,7 +35,7 @@ int bpf_prog1(struct pt_regs *ctx)
+ 	int len;
+ 
+ 	/* non-portable! works for the given kernel only */
+-	skb = (struct sk_buff *) PT_REGS_PARM1(ctx);
++	bpf_probe_read_kernel(&skb, sizeof(skb), (void *)PT_REGS_PARM1(ctx));
+ 	dev = _(skb->dev);
+ 	len = _(skb->len);
+ 
+diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
+index 066beffca09af..4ca5579af4e4a 100644
+--- a/scripts/Makefile.modpost
++++ b/scripts/Makefile.modpost
+@@ -68,7 +68,20 @@ else
+ ifeq ($(KBUILD_EXTMOD),)
+ 
+ input-symdump := vmlinux.symvers
+-output-symdump := Module.symvers
++output-symdump := modules-only.symvers
++
++quiet_cmd_cat = GEN     $@
++      cmd_cat = cat $(real-prereqs) > $@
++
++ifneq ($(wildcard vmlinux.symvers),)
++
++__modpost: Module.symvers
++Module.symvers: vmlinux.symvers modules-only.symvers FORCE
++	$(call if_changed,cat)
++
++targets += Module.symvers
++
++endif
+ 
+ else
+ 
+diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c
+index e0f9655291665..af814b39b8765 100644
+--- a/scripts/kconfig/nconf.c
++++ b/scripts/kconfig/nconf.c
+@@ -504,8 +504,8 @@ static int get_mext_match(const char *match_str, match_f flag)
+ 	else if (flag == FIND_NEXT_MATCH_UP)
+ 		--match_start;
+ 
++	match_start = (match_start + items_num) % items_num;
+ 	index = match_start;
+-	index = (index + items_num) % items_num;
+ 	while (true) {
+ 		char *str = k_menu_items[index].str;
+ 		if (strcasestr(str, match_str) != NULL)
+diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
+index 24725e50c7b4b..10c3fba26f03a 100644
+--- a/scripts/mod/modpost.c
++++ b/scripts/mod/modpost.c
+@@ -2423,19 +2423,6 @@ fail:
+ 	fatal("parse error in symbol dump file\n");
+ }
+ 
+-/* For normal builds always dump all symbols.
+- * For external modules only dump symbols
+- * that are not read from kernel Module.symvers.
+- **/
+-static int dump_sym(struct symbol *sym)
+-{
+-	if (!external_module)
+-		return 1;
+-	if (sym->module->from_dump)
+-		return 0;
+-	return 1;
+-}
+-
+ static void write_dump(const char *fname)
+ {
+ 	struct buffer buf = { };
+@@ -2446,7 +2433,7 @@ static void write_dump(const char *fname)
+ 	for (n = 0; n < SYMBOL_HASH_SIZE ; n++) {
+ 		symbol = symbolhash[n];
+ 		while (symbol) {
+-			if (dump_sym(symbol)) {
++			if (!symbol->module->from_dump) {
+ 				namespace = symbol->namespace;
+ 				buf_printf(&buf, "0x%08x\t%s\t%s\t%s\t%s\n",
+ 					   symbol->crc, symbol->name,
+diff --git a/security/keys/trusted-keys/trusted_tpm1.c b/security/keys/trusted-keys/trusted_tpm1.c
+index 1e13c9f7ea8c1..56c9b48460d9e 100644
+--- a/security/keys/trusted-keys/trusted_tpm1.c
++++ b/security/keys/trusted-keys/trusted_tpm1.c
+@@ -500,10 +500,12 @@ static int tpm_seal(struct tpm_buf *tb, uint16_t keytype,
+ 
+ 	ret = tpm_get_random(chip, td->nonceodd, TPM_NONCE_SIZE);
+ 	if (ret < 0)
+-		return ret;
++		goto out;
+ 
+-	if (ret != TPM_NONCE_SIZE)
+-		return -EIO;
++	if (ret != TPM_NONCE_SIZE) {
++		ret = -EIO;
++		goto out;
++	}
+ 
+ 	ordinal = htonl(TPM_ORD_SEAL);
+ 	datsize = htonl(datalen);
+diff --git a/sound/firewire/bebob/bebob_stream.c b/sound/firewire/bebob/bebob_stream.c
+index bbae04793c50e..c18017e0a3d95 100644
+--- a/sound/firewire/bebob/bebob_stream.c
++++ b/sound/firewire/bebob/bebob_stream.c
+@@ -517,20 +517,22 @@ int snd_bebob_stream_init_duplex(struct snd_bebob *bebob)
+ static int keep_resources(struct snd_bebob *bebob, struct amdtp_stream *stream,
+ 			  unsigned int rate, unsigned int index)
+ {
+-	struct snd_bebob_stream_formation *formation;
++	unsigned int pcm_channels;
++	unsigned int midi_ports;
+ 	struct cmp_connection *conn;
+ 	int err;
+ 
+ 	if (stream == &bebob->tx_stream) {
+-		formation = bebob->tx_stream_formations + index;
++		pcm_channels = bebob->tx_stream_formations[index].pcm;
++		midi_ports = bebob->midi_input_ports;
+ 		conn = &bebob->out_conn;
+ 	} else {
+-		formation = bebob->rx_stream_formations + index;
++		pcm_channels = bebob->rx_stream_formations[index].pcm;
++		midi_ports = bebob->midi_output_ports;
+ 		conn = &bebob->in_conn;
+ 	}
+ 
+-	err = amdtp_am824_set_parameters(stream, rate, formation->pcm,
+-					 formation->midi, false);
++	err = amdtp_am824_set_parameters(stream, rate, pcm_channels, midi_ports, false);
+ 	if (err < 0)
+ 		return err;
+ 
+diff --git a/sound/pci/hda/ideapad_s740_helper.c b/sound/pci/hda/ideapad_s740_helper.c
+new file mode 100644
+index 0000000000000..564b9086e52db
+--- /dev/null
++++ b/sound/pci/hda/ideapad_s740_helper.c
+@@ -0,0 +1,492 @@
++// SPDX-License-Identifier: GPL-2.0
++/* Fixes for Lenovo Ideapad S740, to be included from codec driver */
++
++static const struct hda_verb alc285_ideapad_s740_coefs[] = {
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x10 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0320 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0041 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0041 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x007f },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x007f },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x003c },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0011 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x003c },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0011 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x000c },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x001a },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x000c },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x001a },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x000f },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0042 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x000f },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0042 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0040 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0040 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0003 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0009 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0003 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0009 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x001c },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x004c },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x001c },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x004c },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x001d },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x004e },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x001d },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x004e },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x001b },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x001b },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0019 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0025 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0019 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0025 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0018 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0037 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0018 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0037 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x001a },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0040 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x001a },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0040 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0016 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0076 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0016 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0076 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0017 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0017 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0007 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0086 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0007 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0086 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0002 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0002 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0002 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0002 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0042 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0042 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x007f },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x007f },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x003c },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0011 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x003c },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0011 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x000c },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x002a },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x000c },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x002a },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x000f },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0046 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x000f },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0046 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0044 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0044 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0003 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0009 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0003 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0009 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x001c },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x004c },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x001c },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x004c },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x001b },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x001b },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0019 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0025 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0019 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0025 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0018 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0037 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0018 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0037 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x001a },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0040 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x001a },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0040 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0016 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0076 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0016 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0076 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0017 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0017 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0007 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0086 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0007 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0086 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0002 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0002 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0002 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{}
++};
++
++static void alc285_fixup_ideapad_s740_coef(struct hda_codec *codec,
++					   const struct hda_fixup *fix,
++					   int action)
++{
++	switch (action) {
++	case HDA_FIXUP_ACT_PRE_PROBE:
++		snd_hda_add_verbs(codec, alc285_ideapad_s740_coefs);
++		break;
++	}
++}
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 45ae845e82df6..4b2cc8cb55c49 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -1848,16 +1848,12 @@ static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid)
+ 	 */
+ 	if (spec->intel_hsw_fixup) {
+ 		/*
+-		 * On Intel platforms, device entries number is
+-		 * changed dynamically. If there is a DP MST
+-		 * hub connected, the device entries number is 3.
+-		 * Otherwise, it is 1.
+-		 * Here we manually set dev_num to 3, so that
+-		 * we can initialize all the device entries when
+-		 * bootup statically.
++		 * On Intel platforms, device entries count returned
++		 * by AC_PAR_DEVLIST_LEN is dynamic, and depends on
++		 * the type of receiver that is connected. Allocate pin
++		 * structures based on worst case.
+ 		 */
+-		dev_num = 3;
+-		spec->dev_num = 3;
++		dev_num = spec->dev_num;
+ 	} else if (spec->dyn_pcm_assign && codec->dp_mst) {
+ 		dev_num = snd_hda_get_num_devices(codec, pin_nid) + 1;
+ 		/*
+@@ -2658,7 +2654,7 @@ static void generic_acomp_pin_eld_notify(void *audio_ptr, int port, int dev_id)
+ 	/* skip notification during system suspend (but not in runtime PM);
+ 	 * the state will be updated at resume
+ 	 */
+-	if (snd_power_get_state(codec->card) != SNDRV_CTL_POWER_D0)
++	if (codec->core.dev.power.power_state.event == PM_EVENT_SUSPEND)
+ 		return;
+ 	/* ditto during suspend/resume process itself */
+ 	if (snd_hdac_is_in_pm(&codec->core))
+@@ -2844,7 +2840,7 @@ static void intel_pin_eld_notify(void *audio_ptr, int port, int pipe)
+ 	/* skip notification during system suspend (but not in runtime PM);
+ 	 * the state will be updated at resume
+ 	 */
+-	if (snd_power_get_state(codec->card) != SNDRV_CTL_POWER_D0)
++	if (codec->core.dev.power.power_state.event == PM_EVENT_SUSPEND)
+ 		return;
+ 	/* ditto during suspend/resume process itself */
+ 	if (snd_hdac_is_in_pm(&codec->core))
+@@ -2942,7 +2938,7 @@ static int parse_intel_hdmi(struct hda_codec *codec)
+ 
+ /* Intel Haswell and onwards; audio component with eld notifier */
+ static int intel_hsw_common_init(struct hda_codec *codec, hda_nid_t vendor_nid,
+-				 const int *port_map, int port_num)
++				 const int *port_map, int port_num, int dev_num)
+ {
+ 	struct hdmi_spec *spec;
+ 	int err;
+@@ -2957,6 +2953,7 @@ static int intel_hsw_common_init(struct hda_codec *codec, hda_nid_t vendor_nid,
+ 	spec->port_map = port_map;
+ 	spec->port_num = port_num;
+ 	spec->intel_hsw_fixup = true;
++	spec->dev_num = dev_num;
+ 
+ 	intel_haswell_enable_all_pins(codec, true);
+ 	intel_haswell_fixup_enable_dp12(codec);
+@@ -2982,12 +2979,12 @@ static int intel_hsw_common_init(struct hda_codec *codec, hda_nid_t vendor_nid,
+ 
+ static int patch_i915_hsw_hdmi(struct hda_codec *codec)
+ {
+-	return intel_hsw_common_init(codec, 0x08, NULL, 0);
++	return intel_hsw_common_init(codec, 0x08, NULL, 0, 3);
+ }
+ 
+ static int patch_i915_glk_hdmi(struct hda_codec *codec)
+ {
+-	return intel_hsw_common_init(codec, 0x0b, NULL, 0);
++	return intel_hsw_common_init(codec, 0x0b, NULL, 0, 3);
+ }
+ 
+ static int patch_i915_icl_hdmi(struct hda_codec *codec)
+@@ -2998,7 +2995,7 @@ static int patch_i915_icl_hdmi(struct hda_codec *codec)
+ 	 */
+ 	static const int map[] = {0x0, 0x4, 0x6, 0x8, 0xa, 0xb};
+ 
+-	return intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map));
++	return intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map), 3);
+ }
+ 
+ static int patch_i915_tgl_hdmi(struct hda_codec *codec)
+@@ -3010,7 +3007,7 @@ static int patch_i915_tgl_hdmi(struct hda_codec *codec)
+ 	static const int map[] = {0x4, 0x6, 0x8, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf};
+ 	int ret;
+ 
+-	ret = intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map));
++	ret = intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map), 4);
+ 	if (!ret) {
+ 		struct hdmi_spec *spec = codec->spec;
+ 
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 8ec57bd351dfe..1fe70f2fe4fe8 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -6282,6 +6282,9 @@ static void alc_fixup_thinkpad_acpi(struct hda_codec *codec,
+ /* for alc295_fixup_hp_top_speakers */
+ #include "hp_x360_helper.c"
+ 
++/* for alc285_fixup_ideapad_s740_coef() */
++#include "ideapad_s740_helper.c"
++
+ enum {
+ 	ALC269_FIXUP_GPIO2,
+ 	ALC269_FIXUP_SONY_VAIO,
+@@ -6481,6 +6484,7 @@ enum {
+ 	ALC282_FIXUP_ACER_DISABLE_LINEOUT,
+ 	ALC255_FIXUP_ACER_LIMIT_INT_MIC_BOOST,
+ 	ALC256_FIXUP_ACER_HEADSET_MIC,
++	ALC285_FIXUP_IDEAPAD_S740_COEF,
+ };
+ 
+ static const struct hda_fixup alc269_fixups[] = {
+@@ -7973,6 +7977,12 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
+ 	},
++	[ALC285_FIXUP_IDEAPAD_S740_COEF] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc285_fixup_ideapad_s740_coef,
++		.chained = true,
++		.chain_id = ALC269_FIXUP_THINKPAD_ACPI,
++	},
+ };
+ 
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -8320,6 +8330,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x3176, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME),
++	SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF),
+ 	SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
+ 	SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
+diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
+index 4cf879c42dc4c..720297cbdf875 100644
+--- a/sound/pci/rme9652/hdsp.c
++++ b/sound/pci/rme9652/hdsp.c
+@@ -5390,7 +5390,8 @@ static int snd_hdsp_free(struct hdsp *hdsp)
+ 	if (hdsp->port)
+ 		pci_release_regions(hdsp->pci);
+ 
+-	pci_disable_device(hdsp->pci);
++	if (pci_is_enabled(hdsp->pci))
++		pci_disable_device(hdsp->pci);
+ 	return 0;
+ }
+ 
+diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
+index 8d900c132f0f4..97a0bff96b288 100644
+--- a/sound/pci/rme9652/hdspm.c
++++ b/sound/pci/rme9652/hdspm.c
+@@ -6883,7 +6883,8 @@ static int snd_hdspm_free(struct hdspm * hdspm)
+ 	if (hdspm->port)
+ 		pci_release_regions(hdspm->pci);
+ 
+-	pci_disable_device(hdspm->pci);
++	if (pci_is_enabled(hdspm->pci))
++		pci_disable_device(hdspm->pci);
+ 	return 0;
+ }
+ 
+diff --git a/sound/pci/rme9652/rme9652.c b/sound/pci/rme9652/rme9652.c
+index 4df992e846f23..7a4d395abceeb 100644
+--- a/sound/pci/rme9652/rme9652.c
++++ b/sound/pci/rme9652/rme9652.c
+@@ -1731,7 +1731,8 @@ static int snd_rme9652_free(struct snd_rme9652 *rme9652)
+ 	if (rme9652->port)
+ 		pci_release_regions(rme9652->pci);
+ 
+-	pci_disable_device(rme9652->pci);
++	if (pci_is_enabled(rme9652->pci))
++		pci_disable_device(rme9652->pci);
+ 	return 0;
+ }
+ 
+diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c
+index 8abe232ca4a4c..ff23a7d4d2ac5 100644
+--- a/sound/soc/codecs/rt286.c
++++ b/sound/soc/codecs/rt286.c
+@@ -171,6 +171,9 @@ static bool rt286_readable_register(struct device *dev, unsigned int reg)
+ 	case RT286_PROC_COEF:
+ 	case RT286_SET_AMP_GAIN_ADC_IN1:
+ 	case RT286_SET_AMP_GAIN_ADC_IN2:
++	case RT286_SET_GPIO_MASK:
++	case RT286_SET_GPIO_DIRECTION:
++	case RT286_SET_GPIO_DATA:
+ 	case RT286_SET_POWER(RT286_DAC_OUT1):
+ 	case RT286_SET_POWER(RT286_DAC_OUT2):
+ 	case RT286_SET_POWER(RT286_ADC_IN1):
+@@ -1117,12 +1120,11 @@ static const struct dmi_system_id force_combo_jack_table[] = {
+ 	{ }
+ };
+ 
+-static const struct dmi_system_id dmi_dell_dino[] = {
++static const struct dmi_system_id dmi_dell[] = {
+ 	{
+-		.ident = "Dell Dino",
++		.ident = "Dell",
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9343")
+ 		}
+ 	},
+ 	{ }
+@@ -1133,7 +1135,7 @@ static int rt286_i2c_probe(struct i2c_client *i2c,
+ {
+ 	struct rt286_platform_data *pdata = dev_get_platdata(&i2c->dev);
+ 	struct rt286_priv *rt286;
+-	int i, ret, val;
++	int i, ret, vendor_id;
+ 
+ 	rt286 = devm_kzalloc(&i2c->dev,	sizeof(*rt286),
+ 				GFP_KERNEL);
+@@ -1149,14 +1151,15 @@ static int rt286_i2c_probe(struct i2c_client *i2c,
+ 	}
+ 
+ 	ret = regmap_read(rt286->regmap,
+-		RT286_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID), &val);
++		RT286_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID), &vendor_id);
+ 	if (ret != 0) {
+ 		dev_err(&i2c->dev, "I2C error %d\n", ret);
+ 		return ret;
+ 	}
+-	if (val != RT286_VENDOR_ID && val != RT288_VENDOR_ID) {
++	if (vendor_id != RT286_VENDOR_ID && vendor_id != RT288_VENDOR_ID) {
+ 		dev_err(&i2c->dev,
+-			"Device with ID register %#x is not rt286\n", val);
++			"Device with ID register %#x is not rt286\n",
++			vendor_id);
+ 		return -ENODEV;
+ 	}
+ 
+@@ -1180,8 +1183,8 @@ static int rt286_i2c_probe(struct i2c_client *i2c,
+ 	if (pdata)
+ 		rt286->pdata = *pdata;
+ 
+-	if (dmi_check_system(force_combo_jack_table) ||
+-		dmi_check_system(dmi_dell_dino))
++	if ((vendor_id == RT288_VENDOR_ID && dmi_check_system(dmi_dell)) ||
++		dmi_check_system(force_combo_jack_table))
+ 		rt286->pdata.cbj_en = true;
+ 
+ 	regmap_write(rt286->regmap, RT286_SET_AUDIO_POWER, AC_PWRST_D3);
+@@ -1220,7 +1223,7 @@ static int rt286_i2c_probe(struct i2c_client *i2c,
+ 	regmap_update_bits(rt286->regmap, RT286_DEPOP_CTRL3, 0xf777, 0x4737);
+ 	regmap_update_bits(rt286->regmap, RT286_DEPOP_CTRL4, 0x00ff, 0x003f);
+ 
+-	if (dmi_check_system(dmi_dell_dino)) {
++	if (vendor_id == RT288_VENDOR_ID && dmi_check_system(dmi_dell)) {
+ 		regmap_update_bits(rt286->regmap,
+ 			RT286_SET_GPIO_MASK, 0x40, 0x40);
+ 		regmap_update_bits(rt286->regmap,
+diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c
+index 4063aac2a4431..dd69d874bad2a 100644
+--- a/sound/soc/codecs/rt5670.c
++++ b/sound/soc/codecs/rt5670.c
+@@ -2980,6 +2980,18 @@ static const struct dmi_system_id dmi_platform_intel_quirks[] = {
+ 						 RT5670_GPIO1_IS_IRQ |
+ 						 RT5670_JD_MODE3),
+ 	},
++	{
++		.callback = rt5670_quirk_cb,
++		.ident = "Dell Venue 10 Pro 5055",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Venue 10 Pro 5055"),
++		},
++		.driver_data = (unsigned long *)(RT5670_DMIC_EN |
++						 RT5670_DMIC2_INR |
++						 RT5670_GPIO1_IS_IRQ |
++						 RT5670_JD_MODE1),
++	},
+ 	{
+ 		.callback = rt5670_quirk_cb,
+ 		.ident = "Aegex 10 tablet (RU2)",
+diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
+index 5d48cc359c3da..22912cab5e638 100644
+--- a/sound/soc/intel/boards/bytcr_rt5640.c
++++ b/sound/soc/intel/boards/bytcr_rt5640.c
+@@ -482,6 +482,9 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
+ 			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TAF"),
+ 		},
+ 		.driver_data = (void *)(BYT_RT5640_IN1_MAP |
++					BYT_RT5640_JD_SRC_JD2_IN4N |
++					BYT_RT5640_OVCD_TH_2000UA |
++					BYT_RT5640_OVCD_SF_0P75 |
+ 					BYT_RT5640_MONO_SPEAKER |
+ 					BYT_RT5640_DIFF_MIC |
+ 					BYT_RT5640_SSP0_AIF2 |
+@@ -515,6 +518,23 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
+ 					BYT_RT5640_SSP0_AIF1 |
+ 					BYT_RT5640_MCLK_EN),
+ 	},
++	{
++		/* Chuwi Hi8 (CWI509) */
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
++			DMI_MATCH(DMI_BOARD_NAME, "BYT-PA03C"),
++			DMI_MATCH(DMI_SYS_VENDOR, "ilife"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "S806"),
++		},
++		.driver_data = (void *)(BYT_RT5640_IN1_MAP |
++					BYT_RT5640_JD_SRC_JD2_IN4N |
++					BYT_RT5640_OVCD_TH_2000UA |
++					BYT_RT5640_OVCD_SF_0P75 |
++					BYT_RT5640_MONO_SPEAKER |
++					BYT_RT5640_DIFF_MIC |
++					BYT_RT5640_SSP0_AIF1 |
++					BYT_RT5640_MCLK_EN),
++	},
+ 	{
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Circuitco"),
+diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
+index 8adce6417b021..ecd3f90f4bbea 100644
+--- a/sound/soc/intel/boards/sof_sdw.c
++++ b/sound/soc/intel/boards/sof_sdw.c
+@@ -187,6 +187,17 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ 					SOF_RT715_DAI_ID_FIX |
+ 					SOF_SDW_FOUR_SPK),
+ 	},
++	/* AlderLake devices */
++	{
++		.callback = sof_sdw_quirk_cb,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Alder Lake Client Platform"),
++		},
++		.driver_data = (void *)(SOF_RT711_JD_SRC_JD1 |
++					SOF_SDW_TGL_HDMI |
++					SOF_SDW_PCH_DMIC),
++	},
+ 	{}
+ };
+ 
+diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
+index 1029d8d9d800a..d2b4632d9c2a4 100644
+--- a/sound/soc/sh/rcar/core.c
++++ b/sound/soc/sh/rcar/core.c
+@@ -1428,8 +1428,75 @@ static int rsnd_hw_params(struct snd_soc_component *component,
+ 		}
+ 		if (io->converted_chan)
+ 			dev_dbg(dev, "convert channels = %d\n", io->converted_chan);
+-		if (io->converted_rate)
++		if (io->converted_rate) {
++			/*
++			 * SRC supports convert rates from params_rate(hw_params)/k_down
++			 * to params_rate(hw_params)*k_up, where k_up is always 6, and
++			 * k_down depends on number of channels and SRC unit.
++			 * So all SRC units can upsample audio up to 6 times regardless
++			 * its number of channels. And all SRC units can downsample
++			 * 2 channel audio up to 6 times too.
++			 */
++			int k_up = 6;
++			int k_down = 6;
++			int channel;
++			struct rsnd_mod *src_mod = rsnd_io_to_mod_src(io);
++
+ 			dev_dbg(dev, "convert rate     = %d\n", io->converted_rate);
++
++			channel = io->converted_chan ? io->converted_chan :
++				  params_channels(hw_params);
++
++			switch (rsnd_mod_id(src_mod)) {
++			/*
++			 * SRC0 can downsample 4, 6 and 8 channel audio up to 4 times.
++			 * SRC1, SRC3 and SRC4 can downsample 4 channel audio
++			 * up to 4 times.
++			 * SRC1, SRC3 and SRC4 can downsample 6 and 8 channel audio
++			 * no more than twice.
++			 */
++			case 1:
++			case 3:
++			case 4:
++				if (channel > 4) {
++					k_down = 2;
++					break;
++				}
++				fallthrough;
++			case 0:
++				if (channel > 2)
++					k_down = 4;
++				break;
++
++			/* Other SRC units do not support more than 2 channels */
++			default:
++				if (channel > 2)
++					return -EINVAL;
++			}
++
++			if (params_rate(hw_params) > io->converted_rate * k_down) {
++				hw_param_interval(hw_params, SNDRV_PCM_HW_PARAM_RATE)->min =
++					io->converted_rate * k_down;
++				hw_param_interval(hw_params, SNDRV_PCM_HW_PARAM_RATE)->max =
++					io->converted_rate * k_down;
++				hw_params->cmask |= SNDRV_PCM_HW_PARAM_RATE;
++			} else if (params_rate(hw_params) * k_up < io->converted_rate) {
++				hw_param_interval(hw_params, SNDRV_PCM_HW_PARAM_RATE)->min =
++					(io->converted_rate + k_up - 1) / k_up;
++				hw_param_interval(hw_params, SNDRV_PCM_HW_PARAM_RATE)->max =
++					(io->converted_rate + k_up - 1) / k_up;
++				hw_params->cmask |= SNDRV_PCM_HW_PARAM_RATE;
++			}
++
++			/*
++			 * TBD: Max SRC input and output rates also depend on number
++			 * of channels and SRC unit:
++			 * SRC1, SRC3 and SRC4 do not support more than 128kHz
++			 * for 6 channel and 96kHz for 8 channel audio.
++			 * Perhaps this function should return EINVAL if the input or
++			 * the output rate exceeds the limitation.
++			 */
++		}
+ 	}
+ 
+ 	return rsnd_dai_call(hw_params, io, substream, hw_params);
+diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
+index d0ded427a8363..042207c116514 100644
+--- a/sound/soc/sh/rcar/ssi.c
++++ b/sound/soc/sh/rcar/ssi.c
+@@ -507,10 +507,15 @@ static int rsnd_ssi_init(struct rsnd_mod *mod,
+ 			 struct rsnd_priv *priv)
+ {
+ 	struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
++	int ret;
+ 
+ 	if (!rsnd_ssi_is_run_mods(mod, io))
+ 		return 0;
+ 
++	ret = rsnd_ssi_master_clk_start(mod, io);
++	if (ret < 0)
++		return ret;
++
+ 	ssi->usrcnt++;
+ 
+ 	rsnd_mod_power_on(mod);
+@@ -792,7 +797,6 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
+ 						       SSI_SYS_STATUS(i * 2),
+ 						       0xf << (id * 4));
+ 					stop = true;
+-					break;
+ 				}
+ 			}
+ 			break;
+@@ -810,7 +814,6 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
+ 						SSI_SYS_STATUS((i * 2) + 1),
+ 						0xf << 4);
+ 					stop = true;
+-					break;
+ 				}
+ 			}
+ 			break;
+@@ -1060,13 +1063,6 @@ static int rsnd_ssi_pio_pointer(struct rsnd_mod *mod,
+ 	return 0;
+ }
+ 
+-static int rsnd_ssi_prepare(struct rsnd_mod *mod,
+-			    struct rsnd_dai_stream *io,
+-			    struct rsnd_priv *priv)
+-{
+-	return rsnd_ssi_master_clk_start(mod, io);
+-}
+-
+ static struct rsnd_mod_ops rsnd_ssi_pio_ops = {
+ 	.name		= SSI_NAME,
+ 	.probe		= rsnd_ssi_common_probe,
+@@ -1079,7 +1075,6 @@ static struct rsnd_mod_ops rsnd_ssi_pio_ops = {
+ 	.pointer	= rsnd_ssi_pio_pointer,
+ 	.pcm_new	= rsnd_ssi_pcm_new,
+ 	.hw_params	= rsnd_ssi_hw_params,
+-	.prepare	= rsnd_ssi_prepare,
+ 	.get_status	= rsnd_ssi_get_status,
+ };
+ 
+@@ -1166,7 +1161,6 @@ static struct rsnd_mod_ops rsnd_ssi_dma_ops = {
+ 	.pcm_new	= rsnd_ssi_pcm_new,
+ 	.fallback	= rsnd_ssi_fallback,
+ 	.hw_params	= rsnd_ssi_hw_params,
+-	.prepare	= rsnd_ssi_prepare,
+ 	.get_status	= rsnd_ssi_get_status,
+ };
+ 
+diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
+index 246a5e32e22a2..b4810266f5e5d 100644
+--- a/sound/soc/soc-compress.c
++++ b/sound/soc/soc-compress.c
+@@ -153,7 +153,9 @@ static int soc_compr_open_fe(struct snd_compr_stream *cstream)
+ 	fe->dpcm[stream].state = SND_SOC_DPCM_STATE_OPEN;
+ 	fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
+ 
++	mutex_lock_nested(&fe->card->pcm_mutex, fe->card->pcm_subclass);
+ 	snd_soc_runtime_activate(fe, stream);
++	mutex_unlock(&fe->card->pcm_mutex);
+ 
+ 	mutex_unlock(&fe->card->mutex);
+ 
+@@ -181,7 +183,9 @@ static int soc_compr_free_fe(struct snd_compr_stream *cstream)
+ 
+ 	mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
+ 
++	mutex_lock_nested(&fe->card->pcm_mutex, fe->card->pcm_subclass);
+ 	snd_soc_runtime_deactivate(fe, stream);
++	mutex_unlock(&fe->card->pcm_mutex);
+ 
+ 	fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
+ 
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index 48facd2626585..8a8fe2b980a18 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -3827,6 +3827,69 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
+ 		}
+ 	}
+ },
++{
++	/*
++	 * Pioneer DJ DJM-850
++	 * 8 channels playback and 8 channels capture @ 44.1/48/96kHz S24LE
++	 * Playback on EP 0x05
++	 * Capture on EP 0x86
++	 */
++	USB_DEVICE_VENDOR_SPEC(0x08e4, 0x0163),
++	.driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
++		.ifnum = QUIRK_ANY_INTERFACE,
++		.type = QUIRK_COMPOSITE,
++		.data = (const struct snd_usb_audio_quirk[]) {
++			{
++				.ifnum = 0,
++				.type = QUIRK_AUDIO_FIXED_ENDPOINT,
++				.data = &(const struct audioformat) {
++					.formats = SNDRV_PCM_FMTBIT_S24_3LE,
++					.channels = 8,
++					.iface = 0,
++					.altsetting = 1,
++					.altset_idx = 1,
++					.endpoint = 0x05,
++					.ep_attr = USB_ENDPOINT_XFER_ISOC|
++					    USB_ENDPOINT_SYNC_ASYNC|
++						USB_ENDPOINT_USAGE_DATA,
++					.rates = SNDRV_PCM_RATE_44100|
++						SNDRV_PCM_RATE_48000|
++						SNDRV_PCM_RATE_96000,
++					.rate_min = 44100,
++					.rate_max = 96000,
++					.nr_rates = 3,
++					.rate_table = (unsigned int[]) { 44100, 48000, 96000 }
++				}
++			},
++			{
++				.ifnum = 0,
++				.type = QUIRK_AUDIO_FIXED_ENDPOINT,
++				.data = &(const struct audioformat) {
++					.formats = SNDRV_PCM_FMTBIT_S24_3LE,
++					.channels = 8,
++					.iface = 0,
++					.altsetting = 1,
++					.altset_idx = 1,
++					.endpoint = 0x86,
++					.ep_idx = 1,
++					.ep_attr = USB_ENDPOINT_XFER_ISOC|
++						USB_ENDPOINT_SYNC_ASYNC|
++						USB_ENDPOINT_USAGE_DATA,
++					.rates = SNDRV_PCM_RATE_44100|
++						SNDRV_PCM_RATE_48000|
++						SNDRV_PCM_RATE_96000,
++					.rate_min = 44100,
++					.rate_max = 96000,
++					.nr_rates = 3,
++					.rate_table = (unsigned int[]) { 44100, 48000, 96000 }
++				}
++			},
++			{
++				.ifnum = -1
++			}
++		}
++	}
++},
+ {
+ 	/*
+ 	 * Pioneer DJ DJM-450
+diff --git a/tools/lib/bpf/ringbuf.c b/tools/lib/bpf/ringbuf.c
+index e7a8d847161f2..1d80ad4e0de8d 100644
+--- a/tools/lib/bpf/ringbuf.c
++++ b/tools/lib/bpf/ringbuf.c
+@@ -202,9 +202,11 @@ static inline int roundup_len(__u32 len)
+ 	return (len + 7) / 8 * 8;
+ }
+ 
+-static int ringbuf_process_ring(struct ring* r)
++static int64_t ringbuf_process_ring(struct ring* r)
+ {
+-	int *len_ptr, len, err, cnt = 0;
++	int *len_ptr, len, err;
++	/* 64-bit to avoid overflow in case of extreme application behavior */
++	int64_t cnt = 0;
+ 	unsigned long cons_pos, prod_pos;
+ 	bool got_new_data;
+ 	void *sample;
+@@ -244,12 +246,14 @@ done:
+ }
+ 
+ /* Consume available ring buffer(s) data without event polling.
+- * Returns number of records consumed across all registered ring buffers, or
+- * negative number if any of the callbacks return error.
++ * Returns number of records consumed across all registered ring buffers (or
++ * INT_MAX, whichever is less), or negative number if any of the callbacks
++ * return error.
+  */
+ int ring_buffer__consume(struct ring_buffer *rb)
+ {
+-	int i, err, res = 0;
++	int64_t err, res = 0;
++	int i;
+ 
+ 	for (i = 0; i < rb->ring_cnt; i++) {
+ 		struct ring *ring = &rb->rings[i];
+@@ -259,18 +263,24 @@ int ring_buffer__consume(struct ring_buffer *rb)
+ 			return err;
+ 		res += err;
+ 	}
++	if (res > INT_MAX)
++		return INT_MAX;
+ 	return res;
+ }
+ 
+ /* Poll for available data and consume records, if any are available.
+- * Returns number of records consumed, or negative number, if any of the
+- * registered callbacks returned error.
++ * Returns number of records consumed (or INT_MAX, whichever is less), or
++ * negative number, if any of the registered callbacks returned error.
+  */
+ int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms)
+ {
+-	int i, cnt, err, res = 0;
++	int i, cnt;
++	int64_t err, res = 0;
+ 
+ 	cnt = epoll_wait(rb->epoll_fd, rb->events, rb->ring_cnt, timeout_ms);
++	if (cnt < 0)
++		return -errno;
++
+ 	for (i = 0; i < cnt; i++) {
+ 		__u32 ring_id = rb->events[i].data.fd;
+ 		struct ring *ring = &rb->rings[ring_id];
+@@ -280,7 +290,9 @@ int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms)
+ 			return err;
+ 		res += err;
+ 	}
+-	return cnt < 0 ? -errno : res;
++	if (res > INT_MAX)
++		return INT_MAX;
++	return res;
+ }
+ 
+ /* Get an fd that can be used to sleep until data is available in the ring(s) */
+diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
+index d8e59d31399a5..c955cd683e220 100644
+--- a/tools/perf/Makefile.config
++++ b/tools/perf/Makefile.config
+@@ -530,6 +530,7 @@ ifndef NO_LIBELF
+       ifdef LIBBPF_DYNAMIC
+         ifeq ($(feature-libbpf), 1)
+           EXTLIBS += -lbpf
++          $(call detected,CONFIG_LIBBPF_DYNAMIC)
+         else
+           dummy := $(error Error: No libbpf devel library found, please install libbpf-devel);
+         endif
+diff --git a/tools/perf/util/Build b/tools/perf/util/Build
+index e3e12f9d4733e..5a296ac694157 100644
+--- a/tools/perf/util/Build
++++ b/tools/perf/util/Build
+@@ -141,7 +141,14 @@ perf-$(CONFIG_LIBELF) += symbol-elf.o
+ perf-$(CONFIG_LIBELF) += probe-file.o
+ perf-$(CONFIG_LIBELF) += probe-event.o
+ 
++ifdef CONFIG_LIBBPF_DYNAMIC
++  hashmap := 1
++endif
+ ifndef CONFIG_LIBBPF
++  hashmap := 1
++endif
++
++ifdef hashmap
+ perf-y += hashmap.o
+ endif
+ 
+diff --git a/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh
+index 6f3a70df63bc6..e00435753008a 100644
+--- a/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh
++++ b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh
+@@ -120,12 +120,13 @@ __mirror_gre_test()
+ 	sleep 5
+ 
+ 	for ((i = 0; i < count; ++i)); do
++		local sip=$(mirror_gre_ipv6_addr 1 $i)::1
+ 		local dip=$(mirror_gre_ipv6_addr 1 $i)::2
+ 		local htun=h3-gt6-$i
+ 		local message
+ 
+ 		icmp6_capture_install $htun
+-		mirror_test v$h1 "" $dip $htun 100 10
++		mirror_test v$h1 $sip $dip $htun 100 10
+ 		icmp6_capture_uninstall $htun
+ 	done
+ }
+diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh
+index b0cb1aaffddab..33ddd01689bee 100644
+--- a/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh
++++ b/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh
+@@ -507,8 +507,8 @@ do_red_test()
+ 	check_err $? "backlog $backlog / $limit Got $pct% marked packets, expected == 0."
+ 	local diff=$((limit - backlog))
+ 	pct=$((100 * diff / limit))
+-	((0 <= pct && pct <= 5))
+-	check_err $? "backlog $backlog / $limit expected <= 5% distance"
++	((0 <= pct && pct <= 10))
++	check_err $? "backlog $backlog / $limit expected <= 10% distance"
+ 	log_test "TC $((vlan - 10)): RED backlog > limit"
+ 
+ 	stop_traffic
+diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
+index be17462fe1467..0af84ad48aa77 100644
+--- a/tools/testing/selftests/lib.mk
++++ b/tools/testing/selftests/lib.mk
+@@ -1,6 +1,10 @@
+ # This mimics the top-level Makefile. We do it explicitly here so that this
+ # Makefile can operate with or without the kbuild infrastructure.
++ifneq ($(LLVM),)
++CC := clang
++else
+ CC := $(CROSS_COMPILE)gcc
++endif
+ 
+ ifeq (0,$(MAKELEVEL))
+     ifeq ($(OUTPUT),)
+diff --git a/tools/testing/selftests/net/forwarding/mirror_lib.sh b/tools/testing/selftests/net/forwarding/mirror_lib.sh
+index 13db1cb50e57b..6406cd76a19d8 100644
+--- a/tools/testing/selftests/net/forwarding/mirror_lib.sh
++++ b/tools/testing/selftests/net/forwarding/mirror_lib.sh
+@@ -20,6 +20,13 @@ mirror_uninstall()
+ 	tc filter del dev $swp1 $direction pref 1000
+ }
+ 
++is_ipv6()
++{
++	local addr=$1; shift
++
++	[[ -z ${addr//[0-9a-fA-F:]/} ]]
++}
++
+ mirror_test()
+ {
+ 	local vrf_name=$1; shift
+@@ -29,9 +36,17 @@ mirror_test()
+ 	local pref=$1; shift
+ 	local expect=$1; shift
+ 
++	if is_ipv6 $dip; then
++		local proto=-6
++		local type="icmp6 type=128" # Echo request.
++	else
++		local proto=
++		local type="icmp echoreq"
++	fi
++
+ 	local t0=$(tc_rule_stats_get $dev $pref)
+-	$MZ $vrf_name ${sip:+-A $sip} -B $dip -a own -b bc -q \
+-	    -c 10 -d 100msec -t icmp type=8
++	$MZ $proto $vrf_name ${sip:+-A $sip} -B $dip -a own -b bc -q \
++	    -c 10 -d 100msec -t $type
+ 	sleep 0.5
+ 	local t1=$(tc_rule_stats_get $dev $pref)
+ 	local delta=$((t1 - t0))
+diff --git a/tools/testing/selftests/net/mptcp/diag.sh b/tools/testing/selftests/net/mptcp/diag.sh
+index 39edce4f541c2..2674ba20d5249 100755
+--- a/tools/testing/selftests/net/mptcp/diag.sh
++++ b/tools/testing/selftests/net/mptcp/diag.sh
+@@ -5,8 +5,9 @@ rndh=$(printf %x $sec)-$(mktemp -u XXXXXX)
+ ns="ns1-$rndh"
+ ksft_skip=4
+ test_cnt=1
++timeout_poll=100
++timeout_test=$((timeout_poll * 2 + 1))
+ ret=0
+-pids=()
+ 
+ flush_pids()
+ {
+@@ -14,18 +15,14 @@ flush_pids()
+ 	# give it some time
+ 	sleep 1.1
+ 
+-	for pid in ${pids[@]}; do
+-		[ -d /proc/$pid ] && kill -SIGUSR1 $pid >/dev/null 2>&1
+-	done
+-	pids=()
++	ip netns pids "${ns}" | xargs --no-run-if-empty kill -SIGUSR1 &>/dev/null
+ }
+ 
+ cleanup()
+ {
++	ip netns pids "${ns}" | xargs --no-run-if-empty kill -SIGKILL &>/dev/null
++
+ 	ip netns del $ns
+-	for pid in ${pids[@]}; do
+-		[ -d /proc/$pid ] && kill -9 $pid >/dev/null 2>&1
+-	done
+ }
+ 
+ ip -Version > /dev/null 2>&1
+@@ -79,39 +76,57 @@ trap cleanup EXIT
+ ip netns add $ns
+ ip -n $ns link set dev lo up
+ 
+-echo "a" | ip netns exec $ns ./mptcp_connect -p 10000 -l 0.0.0.0 -t 100 >/dev/null &
++echo "a" | \
++	timeout ${timeout_test} \
++		ip netns exec $ns \
++			./mptcp_connect -p 10000 -l -t ${timeout_poll} \
++				0.0.0.0 >/dev/null &
+ sleep 0.1
+-pids[0]=$!
+ chk_msk_nr 0 "no msk on netns creation"
+ 
+-echo "b" | ip netns exec $ns ./mptcp_connect -p 10000 127.0.0.1 -j -t 100 >/dev/null &
++echo "b" | \
++	timeout ${timeout_test} \
++		ip netns exec $ns \
++			./mptcp_connect -p 10000 -j -t ${timeout_poll} \
++				127.0.0.1 >/dev/null &
+ sleep 0.1
+-pids[1]=$!
+ chk_msk_nr 2 "after MPC handshake "
+ chk_msk_remote_key_nr 2 "....chk remote_key"
+ chk_msk_fallback_nr 0 "....chk no fallback"
+ flush_pids
+ 
+ 
+-echo "a" | ip netns exec $ns ./mptcp_connect -p 10001 -s TCP -l 0.0.0.0 -t 100 >/dev/null &
+-pids[0]=$!
++echo "a" | \
++	timeout ${timeout_test} \
++		ip netns exec $ns \
++			./mptcp_connect -p 10001 -l -s TCP -t ${timeout_poll} \
++				0.0.0.0 >/dev/null &
+ sleep 0.1
+-echo "b" | ip netns exec $ns ./mptcp_connect -p 10001 127.0.0.1 -j -t 100 >/dev/null &
+-pids[1]=$!
++echo "b" | \
++	timeout ${timeout_test} \
++		ip netns exec $ns \
++			./mptcp_connect -p 10001 -j -t ${timeout_poll} \
++				127.0.0.1 >/dev/null &
+ sleep 0.1
+ chk_msk_fallback_nr 1 "check fallback"
+ flush_pids
+ 
+ NR_CLIENTS=100
+ for I in `seq 1 $NR_CLIENTS`; do
+-	echo "a" | ip netns exec $ns ./mptcp_connect -p $((I+10001)) -l 0.0.0.0 -t 100 -w 10 >/dev/null  &
+-	pids[$((I*2))]=$!
++	echo "a" | \
++		timeout ${timeout_test} \
++			ip netns exec $ns \
++				./mptcp_connect -p $((I+10001)) -l -w 10 \
++					-t ${timeout_poll} 0.0.0.0 >/dev/null &
+ done
+ sleep 0.1
+ 
+ for I in `seq 1 $NR_CLIENTS`; do
+-	echo "b" | ip netns exec $ns ./mptcp_connect -p $((I+10001)) 127.0.0.1 -t 100 -w 10 >/dev/null &
+-	pids[$((I*2 + 1))]=$!
++	echo "b" | \
++		timeout ${timeout_test} \
++			ip netns exec $ns \
++				./mptcp_connect -p $((I+10001)) -w 10 \
++					-t ${timeout_poll} 127.0.0.1 >/dev/null &
+ done
+ sleep 1.5
+ 
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+index 10a030b53b23e..65b3b983efc26 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+@@ -11,7 +11,8 @@ cin=""
+ cout=""
+ ksft_skip=4
+ capture=false
+-timeout=30
++timeout_poll=30
++timeout_test=$((timeout_poll * 2 + 1))
+ ipv6=true
+ ethtool_random_on=true
+ tc_delay="$((RANDOM%50))"
+@@ -273,7 +274,7 @@ check_mptcp_disabled()
+ 	ip netns exec ${disabled_ns} sysctl -q net.mptcp.enabled=0
+ 
+ 	local err=0
+-	LANG=C ip netns exec ${disabled_ns} ./mptcp_connect -t $timeout -p 10000 -s MPTCP 127.0.0.1 < "$cin" 2>&1 | \
++	LANG=C ip netns exec ${disabled_ns} ./mptcp_connect -p 10000 -s MPTCP 127.0.0.1 < "$cin" 2>&1 | \
+ 		grep -q "^socket: Protocol not available$" && err=1
+ 	ip netns delete ${disabled_ns}
+ 
+@@ -430,14 +431,20 @@ do_transfer()
+ 	local stat_cookietx_last=$(get_mib_counter "${listener_ns}" "TcpExtSyncookiesSent")
+ 	local stat_cookierx_last=$(get_mib_counter "${listener_ns}" "TcpExtSyncookiesRecv")
+ 
+-	ip netns exec ${listener_ns} ./mptcp_connect -t $timeout -l -p $port -s ${srv_proto} $extra_args $local_addr < "$sin" > "$sout" &
++	timeout ${timeout_test} \
++		ip netns exec ${listener_ns} \
++			./mptcp_connect -t ${timeout_poll} -l -p $port -s ${srv_proto} \
++				$extra_args $local_addr < "$sin" > "$sout" &
+ 	local spid=$!
+ 
+ 	wait_local_port_listen "${listener_ns}" "${port}"
+ 
+ 	local start
+ 	start=$(date +%s%3N)
+-	ip netns exec ${connector_ns} ./mptcp_connect -t $timeout -p $port -s ${cl_proto} $extra_args $connect_addr < "$cin" > "$cout" &
++	timeout ${timeout_test} \
++		ip netns exec ${connector_ns} \
++			./mptcp_connect -t ${timeout_poll} -p $port -s ${cl_proto} \
++				$extra_args $connect_addr < "$cin" > "$cout" &
+ 	local cpid=$!
+ 
+ 	wait $cpid
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+index ad32240fbfdad..43ed99de77343 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -8,7 +8,8 @@ cin=""
+ cinsent=""
+ cout=""
+ ksft_skip=4
+-timeout=30
++timeout_poll=30
++timeout_test=$((timeout_poll * 2 + 1))
+ mptcp_connect=""
+ capture=0
+ do_all_tests=1
+@@ -245,17 +246,26 @@ do_transfer()
+ 		local_addr="0.0.0.0"
+ 	fi
+ 
+-	ip netns exec ${listener_ns} $mptcp_connect -t $timeout -l -p $port \
+-		-s ${srv_proto} ${local_addr} < "$sin" > "$sout" &
++	timeout ${timeout_test} \
++		ip netns exec ${listener_ns} \
++			$mptcp_connect -t ${timeout_poll} -l -p $port -s ${srv_proto} \
++				${local_addr} < "$sin" > "$sout" &
+ 	spid=$!
+ 
+ 	sleep 1
+ 
+ 	if [ "$test_link_fail" -eq 0 ];then
+-		ip netns exec ${connector_ns} $mptcp_connect -t $timeout -p $port -s ${cl_proto} $connect_addr < "$cin" > "$cout" &
++		timeout ${timeout_test} \
++			ip netns exec ${connector_ns} \
++				$mptcp_connect -t ${timeout_poll} -p $port -s ${cl_proto} \
++					$connect_addr < "$cin" > "$cout" &
+ 	else
+-		( cat "$cin" ; sleep 2; link_failure $listener_ns ; cat "$cin" ) | tee "$cinsent" | \
+-		ip netns exec ${connector_ns} $mptcp_connect -t $timeout -p $port -s ${cl_proto} $connect_addr > "$cout" &
++		( cat "$cin" ; sleep 2; link_failure $listener_ns ; cat "$cin" ) | \
++			tee "$cinsent" | \
++			timeout ${timeout_test} \
++				ip netns exec ${connector_ns} \
++					$mptcp_connect -t ${timeout_poll} -p $port -s ${cl_proto} \
++						$connect_addr > "$cout" &
+ 	fi
+ 	cpid=$!
+ 
+diff --git a/tools/testing/selftests/net/mptcp/simult_flows.sh b/tools/testing/selftests/net/mptcp/simult_flows.sh
+index f039ee57eb3c7..3aeef3bcb1018 100755
+--- a/tools/testing/selftests/net/mptcp/simult_flows.sh
++++ b/tools/testing/selftests/net/mptcp/simult_flows.sh
+@@ -7,7 +7,8 @@ ns2="ns2-$rndh"
+ ns3="ns3-$rndh"
+ capture=false
+ ksft_skip=4
+-timeout=30
++timeout_poll=30
++timeout_test=$((timeout_poll * 2 + 1))
+ test_cnt=1
+ ret=0
+ bail=0
+@@ -157,14 +158,20 @@ do_transfer()
+ 		sleep 1
+ 	fi
+ 
+-	ip netns exec ${ns3} ./mptcp_connect -jt $timeout -l -p $port 0.0.0.0 < "$sin" > "$sout" &
++	timeout ${timeout_test} \
++		ip netns exec ${ns3} \
++			./mptcp_connect -jt ${timeout_poll} -l -p $port \
++				0.0.0.0 < "$sin" > "$sout" &
+ 	local spid=$!
+ 
+ 	wait_local_port_listen "${ns3}" "${port}"
+ 
+ 	local start
+ 	start=$(date +%s%3N)
+-	ip netns exec ${ns1} ./mptcp_connect -jt $timeout -p $port 10.0.3.3 < "$cin" > "$cout" &
++	timeout ${timeout_test} \
++		ip netns exec ${ns1} \
++			./mptcp_connect -jt ${timeout_poll} -p $port \
++				10.0.3.3 < "$cin" > "$cout" &
+ 	local cpid=$!
+ 
+ 	wait $cpid
+diff --git a/tools/testing/selftests/powerpc/security/entry_flush.c b/tools/testing/selftests/powerpc/security/entry_flush.c
+index 78cf914fa3217..68ce377b205e9 100644
+--- a/tools/testing/selftests/powerpc/security/entry_flush.c
++++ b/tools/testing/selftests/powerpc/security/entry_flush.c
+@@ -53,7 +53,7 @@ int entry_flush_test(void)
+ 
+ 	entry_flush = entry_flush_orig;
+ 
+-	fd = perf_event_open_counter(PERF_TYPE_RAW, /* L1d miss */ 0x400f0, -1);
++	fd = perf_event_open_counter(PERF_TYPE_HW_CACHE, PERF_L1D_READ_MISS_CONFIG, -1);
+ 	FAIL_IF(fd < 0);
+ 
+ 	p = (char *)memalign(zero_size, CACHELINE_SIZE);
+diff --git a/tools/testing/selftests/powerpc/security/flush_utils.h b/tools/testing/selftests/powerpc/security/flush_utils.h
+index 07a5eb3014669..7a3d60292916e 100644
+--- a/tools/testing/selftests/powerpc/security/flush_utils.h
++++ b/tools/testing/selftests/powerpc/security/flush_utils.h
+@@ -9,6 +9,10 @@
+ 
+ #define CACHELINE_SIZE 128
+ 
++#define PERF_L1D_READ_MISS_CONFIG	((PERF_COUNT_HW_CACHE_L1D) | 		\
++					(PERF_COUNT_HW_CACHE_OP_READ << 8) |	\
++					(PERF_COUNT_HW_CACHE_RESULT_MISS << 16))
++
+ void syscall_loop(char *p, unsigned long iterations,
+ 		  unsigned long zero_size);
+ 
+diff --git a/tools/testing/selftests/powerpc/security/rfi_flush.c b/tools/testing/selftests/powerpc/security/rfi_flush.c
+index 7565fd786640f..f73484a6470fa 100644
+--- a/tools/testing/selftests/powerpc/security/rfi_flush.c
++++ b/tools/testing/selftests/powerpc/security/rfi_flush.c
+@@ -54,7 +54,7 @@ int rfi_flush_test(void)
+ 
+ 	rfi_flush = rfi_flush_orig;
+ 
+-	fd = perf_event_open_counter(PERF_TYPE_RAW, /* L1d miss */ 0x400f0, -1);
++	fd = perf_event_open_counter(PERF_TYPE_HW_CACHE, PERF_L1D_READ_MISS_CONFIG, -1);
+ 	FAIL_IF(fd < 0);
+ 
+ 	p = (char *)memalign(zero_size, CACHELINE_SIZE);
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index ab1fa6f92c825..5cabc6c748db1 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -2758,8 +2758,8 @@ static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
+ 	if (val < grow_start)
+ 		val = grow_start;
+ 
+-	if (val > halt_poll_ns)
+-		val = halt_poll_ns;
++	if (val > vcpu->kvm->max_halt_poll_ns)
++		val = vcpu->kvm->max_halt_poll_ns;
+ 
+ 	vcpu->halt_poll_ns = val;
+ out:
+@@ -2838,7 +2838,8 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
+ 				goto out;
+ 			}
+ 			poll_end = cur = ktime_get();
+-		} while (single_task_running() && ktime_before(cur, stop));
++		} while (single_task_running() && !need_resched() &&
++			 ktime_before(cur, stop));
+ 	}
+ 
+ 	prepare_to_rcuwait(&vcpu->wait);


^ permalink raw reply related	[flat|nested] 33+ messages in thread
* [gentoo-commits] proj/linux-patches:5.12 commit in: /
@ 2021-05-14 14:02 Alice Ferrazzi
  0 siblings, 0 replies; 33+ messages in thread
From: Alice Ferrazzi @ 2021-05-14 14:02 UTC (permalink / raw
  To: gentoo-commits

commit:     0c96d61b12324bd9d6e4ea92c8c39ad05645f1af
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Fri May 14 14:01:09 2021 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Fri May 14 14:01:26 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0c96d61b

Linux patch 5.12.4

Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>

 0000_README             |     4 +
 1003_linux-5.12.4.patch | 25407 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 25411 insertions(+)

diff --git a/0000_README b/0000_README
index 00f2c30..9e34155 100644
--- a/0000_README
+++ b/0000_README
@@ -55,6 +55,10 @@ Patch:  1002_linux-5.12.3.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.12.3
 
+Patch:  1003_linux-5.12.4.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.12.4
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1003_linux-5.12.4.patch b/1003_linux-5.12.4.patch
new file mode 100644
index 0000000..29dd42c
--- /dev/null
+++ b/1003_linux-5.12.4.patch
@@ -0,0 +1,25407 @@
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index 04545725f187f..835f810f2f26d 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -1869,13 +1869,6 @@
+ 			bypassed by not enabling DMAR with this option. In
+ 			this case, gfx device will use physical address for
+ 			DMA.
+-		forcedac [X86-64]
+-			With this option iommu will not optimize to look
+-			for io virtual address below 32-bit forcing dual
+-			address cycle on pci bus for cards supporting greater
+-			than 32-bit addressing. The default is to look
+-			for translation below 32-bit and if not available
+-			then look in the higher range.
+ 		strict [Default Off]
+ 			With this option on every unmap_single operation will
+ 			result in a hardware IOTLB flush operation as opposed
+@@ -1964,6 +1957,14 @@
+ 		nobypass	[PPC/POWERNV]
+ 			Disable IOMMU bypass, using IOMMU for PCI devices.
+ 
++	iommu.forcedac=	[ARM64, X86] Control IOVA allocation for PCI devices.
++			Format: { "0" | "1" }
++			0 - Try to allocate a 32-bit DMA address first, before
++			  falling back to the full range if needed.
++			1 - Allocate directly from the full usable range,
++			  forcing Dual Address Cycle for PCI cards supporting
++			  greater than 32-bit addressing.
++
+ 	iommu.strict=	[ARM64] Configure TLB invalidation behaviour
+ 			Format: { "0" | "1" }
+ 			0 - Lazy mode.
+diff --git a/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml b/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml
+index 8631678283f9a..865be05083c3d 100644
+--- a/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml
++++ b/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml
+@@ -80,7 +80,8 @@ required:
+   - interrupts
+   - clocks
+ 
+-additionalProperties: false
++additionalProperties:
++  type: object
+ 
+ examples:
+   - |
+diff --git a/Documentation/driver-api/xilinx/eemi.rst b/Documentation/driver-api/xilinx/eemi.rst
+index 9dcbc6f18d75d..c1bc47b9000dc 100644
+--- a/Documentation/driver-api/xilinx/eemi.rst
++++ b/Documentation/driver-api/xilinx/eemi.rst
+@@ -16,35 +16,8 @@ components running across different processing clusters on a chip or
+ device to communicate with a power management controller (PMC) on a
+ device to issue or respond to power management requests.
+ 
+-EEMI ops is a structure containing all eemi APIs supported by Zynq MPSoC.
+-The zynqmp-firmware driver maintain all EEMI APIs in zynqmp_eemi_ops
+-structure. Any driver who want to communicate with PMC using EEMI APIs
+-can call zynqmp_pm_get_eemi_ops().
+-
+-Example of EEMI ops::
+-
+-	/* zynqmp-firmware driver maintain all EEMI APIs */
+-	struct zynqmp_eemi_ops {
+-		int (*get_api_version)(u32 *version);
+-		int (*query_data)(struct zynqmp_pm_query_data qdata, u32 *out);
+-	};
+-
+-	static const struct zynqmp_eemi_ops eemi_ops = {
+-		.get_api_version = zynqmp_pm_get_api_version,
+-		.query_data = zynqmp_pm_query_data,
+-	};
+-
+-Example of EEMI ops usage::
+-
+-	static const struct zynqmp_eemi_ops *eemi_ops;
+-	u32 ret_payload[PAYLOAD_ARG_CNT];
+-	int ret;
+-
+-	eemi_ops = zynqmp_pm_get_eemi_ops();
+-	if (IS_ERR(eemi_ops))
+-		return PTR_ERR(eemi_ops);
+-
+-	ret = eemi_ops->query_data(qdata, ret_payload);
++Any driver who wants to communicate with PMC using EEMI APIs use the
++functions provided for each function.
+ 
+ IOCTL
+ ------
+diff --git a/Documentation/userspace-api/media/v4l/subdev-formats.rst b/Documentation/userspace-api/media/v4l/subdev-formats.rst
+index 7f16cbe46e5c2..e6a9faa811973 100644
+--- a/Documentation/userspace-api/media/v4l/subdev-formats.rst
++++ b/Documentation/userspace-api/media/v4l/subdev-formats.rst
+@@ -1567,8 +1567,8 @@ The following tables list existing packed RGB formats.
+       - MEDIA_BUS_FMT_RGB101010_1X30
+       - 0x1018
+       -
+-      - 0
+-      - 0
++      -
++      -
+       - r\ :sub:`9`
+       - r\ :sub:`8`
+       - r\ :sub:`7`
+diff --git a/Makefile b/Makefile
+index 53a4b1cb7bb06..0b1852621615c 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 12
+-SUBLEVEL = 3
++SUBLEVEL = 4
+ EXTRAVERSION =
+ NAME = Frozen Wasteland
+ 
+diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
+index 6c9804d2f3b4f..6df1ce5450618 100644
+--- a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
++++ b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
+@@ -713,9 +713,9 @@
+ 	multi-master;
+ 	status = "okay";
+ 
+-	si7021-a20@20 {
++	si7021-a20@40 {
+ 		compatible = "silabs,si7020";
+-		reg = <0x20>;
++		reg = <0x40>;
+ 	};
+ 
+ 	tmp275@48 {
+diff --git a/arch/arm/boot/dts/exynos4210-i9100.dts b/arch/arm/boot/dts/exynos4210-i9100.dts
+index 304a8ee2364c5..d98c78207aaf1 100644
+--- a/arch/arm/boot/dts/exynos4210-i9100.dts
++++ b/arch/arm/boot/dts/exynos4210-i9100.dts
+@@ -136,7 +136,7 @@
+ 			compatible = "maxim,max17042";
+ 
+ 			interrupt-parent = <&gpx2>;
+-			interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
++			interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+ 
+ 			pinctrl-0 = <&max17042_fuel_irq>;
+ 			pinctrl-names = "default";
+diff --git a/arch/arm/boot/dts/exynos4412-midas.dtsi b/arch/arm/boot/dts/exynos4412-midas.dtsi
+index 111c32bae02c0..fc77c1bfd844e 100644
+--- a/arch/arm/boot/dts/exynos4412-midas.dtsi
++++ b/arch/arm/boot/dts/exynos4412-midas.dtsi
+@@ -173,7 +173,7 @@
+ 		pmic@66 {
+ 			compatible = "maxim,max77693";
+ 			interrupt-parent = <&gpx1>;
+-			interrupts = <5 IRQ_TYPE_EDGE_FALLING>;
++			interrupts = <5 IRQ_TYPE_LEVEL_LOW>;
+ 			pinctrl-names = "default";
+ 			pinctrl-0 = <&max77693_irq>;
+ 			reg = <0x66>;
+@@ -221,7 +221,7 @@
+ 		fuel-gauge@36 {
+ 			compatible = "maxim,max17047";
+ 			interrupt-parent = <&gpx2>;
+-			interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
++			interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+ 			pinctrl-names = "default";
+ 			pinctrl-0 = <&max77693_fuel_irq>;
+ 			reg = <0x36>;
+@@ -665,7 +665,7 @@
+ 	max77686: pmic@9 {
+ 		compatible = "maxim,max77686";
+ 		interrupt-parent = <&gpx0>;
+-		interrupts = <7 IRQ_TYPE_NONE>;
++		interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
+ 		pinctrl-0 = <&max77686_irq>;
+ 		pinctrl-names = "default";
+ 		reg = <0x09>;
+diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
+index 2b20d9095d9f2..eebe6a3952ce8 100644
+--- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
++++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
+@@ -278,7 +278,7 @@
+ 	max77686: pmic@9 {
+ 		compatible = "maxim,max77686";
+ 		interrupt-parent = <&gpx3>;
+-		interrupts = <2 IRQ_TYPE_NONE>;
++		interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&max77686_irq>;
+ 		reg = <0x09>;
+diff --git a/arch/arm/boot/dts/exynos4412-p4note.dtsi b/arch/arm/boot/dts/exynos4412-p4note.dtsi
+index b2f9d5448a188..9e750890edb87 100644
+--- a/arch/arm/boot/dts/exynos4412-p4note.dtsi
++++ b/arch/arm/boot/dts/exynos4412-p4note.dtsi
+@@ -146,7 +146,7 @@
+ 			pinctrl-0 = <&fuel_alert_irq>;
+ 			pinctrl-names = "default";
+ 			interrupt-parent = <&gpx2>;
+-			interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
++			interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+ 			maxim,rsns-microohm = <10000>;
+ 			maxim,over-heat-temp = <600>;
+ 			maxim,over-volt = <4300>;
+@@ -322,7 +322,7 @@
+ 	max77686: pmic@9 {
+ 		compatible = "maxim,max77686";
+ 		interrupt-parent = <&gpx0>;
+-		interrupts = <7 IRQ_TYPE_NONE>;
++		interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
+ 		pinctrl-0 = <&max77686_irq>;
+ 		pinctrl-names = "default";
+ 		reg = <0x09>;
+diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts
+index 8b5a79a8720c6..39bbe18145cf2 100644
+--- a/arch/arm/boot/dts/exynos5250-smdk5250.dts
++++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts
+@@ -134,7 +134,7 @@
+ 		compatible = "maxim,max77686";
+ 		reg = <0x09>;
+ 		interrupt-parent = <&gpx3>;
+-		interrupts = <2 IRQ_TYPE_NONE>;
++		interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&max77686_irq>;
+ 		#clock-cells = <1>;
+diff --git a/arch/arm/boot/dts/exynos5250-snow-common.dtsi b/arch/arm/boot/dts/exynos5250-snow-common.dtsi
+index 6635f6184051e..2335c46873494 100644
+--- a/arch/arm/boot/dts/exynos5250-snow-common.dtsi
++++ b/arch/arm/boot/dts/exynos5250-snow-common.dtsi
+@@ -292,7 +292,7 @@
+ 	max77686: pmic@9 {
+ 		compatible = "maxim,max77686";
+ 		interrupt-parent = <&gpx3>;
+-		interrupts = <2 IRQ_TYPE_NONE>;
++		interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&max77686_irq>;
+ 		wakeup-source;
+diff --git a/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts b/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts
+index 0cda654371ae7..56ee02ceba7d4 100644
+--- a/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts
++++ b/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts
+@@ -575,7 +575,7 @@
+ 			maxim,rcomp = /bits/ 8 <0x4d>;
+ 
+ 			interrupt-parent = <&msmgpio>;
+-			interrupts = <9 IRQ_TYPE_EDGE_FALLING>;
++			interrupts = <9 IRQ_TYPE_LEVEL_LOW>;
+ 
+ 			pinctrl-names = "default";
+ 			pinctrl-0 = <&fuelgauge_pin>;
+diff --git a/arch/arm/boot/dts/qcom-msm8974-samsung-klte.dts b/arch/arm/boot/dts/qcom-msm8974-samsung-klte.dts
+index a0f7f461f48c8..2dadb836c5fed 100644
+--- a/arch/arm/boot/dts/qcom-msm8974-samsung-klte.dts
++++ b/arch/arm/boot/dts/qcom-msm8974-samsung-klte.dts
+@@ -717,7 +717,7 @@
+ 			maxim,rcomp = /bits/ 8 <0x56>;
+ 
+ 			interrupt-parent = <&pma8084_gpios>;
+-			interrupts = <21 IRQ_TYPE_EDGE_FALLING>;
++			interrupts = <21 IRQ_TYPE_LEVEL_LOW>;
+ 
+ 			pinctrl-names = "default";
+ 			pinctrl-0 = <&fuelgauge_pin>;
+diff --git a/arch/arm/boot/dts/r8a7790-lager.dts b/arch/arm/boot/dts/r8a7790-lager.dts
+index 09a152b915575..1d6f0c5d02e9a 100644
+--- a/arch/arm/boot/dts/r8a7790-lager.dts
++++ b/arch/arm/boot/dts/r8a7790-lager.dts
+@@ -53,6 +53,9 @@
+ 		i2c11 = &i2cexio1;
+ 		i2c12 = &i2chdmi;
+ 		i2c13 = &i2cpwr;
++		mmc0 = &mmcif1;
++		mmc1 = &sdhi0;
++		mmc2 = &sdhi2;
+ 	};
+ 
+ 	chosen {
+diff --git a/arch/arm/boot/dts/r8a7791-koelsch.dts b/arch/arm/boot/dts/r8a7791-koelsch.dts
+index f603cba5441fc..6af1727b82690 100644
+--- a/arch/arm/boot/dts/r8a7791-koelsch.dts
++++ b/arch/arm/boot/dts/r8a7791-koelsch.dts
+@@ -53,6 +53,9 @@
+ 		i2c12 = &i2cexio1;
+ 		i2c13 = &i2chdmi;
+ 		i2c14 = &i2cexio4;
++		mmc0 = &sdhi0;
++		mmc1 = &sdhi1;
++		mmc2 = &sdhi2;
+ 	};
+ 
+ 	chosen {
+diff --git a/arch/arm/boot/dts/r8a7791-porter.dts b/arch/arm/boot/dts/r8a7791-porter.dts
+index c6d563fb7ec7c..bf51e29c793a3 100644
+--- a/arch/arm/boot/dts/r8a7791-porter.dts
++++ b/arch/arm/boot/dts/r8a7791-porter.dts
+@@ -28,6 +28,8 @@
+ 		serial0 = &scif0;
+ 		i2c9 = &gpioi2c2;
+ 		i2c10 = &i2chdmi;
++		mmc0 = &sdhi0;
++		mmc1 = &sdhi2;
+ 	};
+ 
+ 	chosen {
+diff --git a/arch/arm/boot/dts/r8a7793-gose.dts b/arch/arm/boot/dts/r8a7793-gose.dts
+index abf487e8fe0f3..2b59a04913500 100644
+--- a/arch/arm/boot/dts/r8a7793-gose.dts
++++ b/arch/arm/boot/dts/r8a7793-gose.dts
+@@ -49,6 +49,9 @@
+ 		i2c10 = &gpioi2c4;
+ 		i2c11 = &i2chdmi;
+ 		i2c12 = &i2cexio4;
++		mmc0 = &sdhi0;
++		mmc1 = &sdhi1;
++		mmc2 = &sdhi2;
+ 	};
+ 
+ 	chosen {
+diff --git a/arch/arm/boot/dts/r8a7794-alt.dts b/arch/arm/boot/dts/r8a7794-alt.dts
+index 3f1cc5bbf3297..32025986b3b9b 100644
+--- a/arch/arm/boot/dts/r8a7794-alt.dts
++++ b/arch/arm/boot/dts/r8a7794-alt.dts
+@@ -19,6 +19,9 @@
+ 		i2c10 = &gpioi2c4;
+ 		i2c11 = &i2chdmi;
+ 		i2c12 = &i2cexio4;
++		mmc0 = &mmcif0;
++		mmc1 = &sdhi0;
++		mmc2 = &sdhi1;
+ 	};
+ 
+ 	chosen {
+diff --git a/arch/arm/boot/dts/r8a7794-silk.dts b/arch/arm/boot/dts/r8a7794-silk.dts
+index 677596f6c9c9a..af066ee5e2754 100644
+--- a/arch/arm/boot/dts/r8a7794-silk.dts
++++ b/arch/arm/boot/dts/r8a7794-silk.dts
+@@ -31,6 +31,8 @@
+ 		serial0 = &scif2;
+ 		i2c9 = &gpioi2c1;
+ 		i2c10 = &i2chdmi;
++		mmc0 = &mmcif0;
++		mmc1 = &sdhi1;
+ 	};
+ 
+ 	chosen {
+diff --git a/arch/arm/boot/dts/s5pv210-fascinate4g.dts b/arch/arm/boot/dts/s5pv210-fascinate4g.dts
+index ca064359dd308..b47d8300e536e 100644
+--- a/arch/arm/boot/dts/s5pv210-fascinate4g.dts
++++ b/arch/arm/boot/dts/s5pv210-fascinate4g.dts
+@@ -115,7 +115,7 @@
+ 	compatible = "maxim,max77836-battery";
+ 
+ 	interrupt-parent = <&gph3>;
+-	interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
++	interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+ 
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&fg_irq>;
+diff --git a/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi b/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
+index 7b4249ed19833..060baa8b7e9d4 100644
+--- a/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
++++ b/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
+@@ -1891,10 +1891,15 @@
+ 	usart2_idle_pins_c: usart2-idle-2 {
+ 		pins1 {
+ 			pinmux = <STM32_PINMUX('D', 5, ANALOG)>, /* USART2_TX */
+-				 <STM32_PINMUX('D', 4, ANALOG)>, /* USART2_RTS */
+ 				 <STM32_PINMUX('D', 3, ANALOG)>; /* USART2_CTS_NSS */
+ 		};
+ 		pins2 {
++			pinmux = <STM32_PINMUX('D', 4, AF7)>; /* USART2_RTS */
++			bias-disable;
++			drive-push-pull;
++			slew-rate = <3>;
++		};
++		pins3 {
+ 			pinmux = <STM32_PINMUX('D', 6, AF7)>; /* USART2_RX */
+ 			bias-disable;
+ 		};
+@@ -1940,10 +1945,15 @@
+ 	usart3_idle_pins_b: usart3-idle-1 {
+ 		pins1 {
+ 			pinmux = <STM32_PINMUX('B', 10, ANALOG)>, /* USART3_TX */
+-				 <STM32_PINMUX('G', 8, ANALOG)>, /* USART3_RTS */
+ 				 <STM32_PINMUX('I', 10, ANALOG)>; /* USART3_CTS_NSS */
+ 		};
+ 		pins2 {
++			pinmux = <STM32_PINMUX('G', 8, AF8)>; /* USART3_RTS */
++			bias-disable;
++			drive-push-pull;
++			slew-rate = <0>;
++		};
++		pins3 {
+ 			pinmux = <STM32_PINMUX('B', 12, AF8)>; /* USART3_RX */
+ 			bias-disable;
+ 		};
+@@ -1976,10 +1986,15 @@
+ 	usart3_idle_pins_c: usart3-idle-2 {
+ 		pins1 {
+ 			pinmux = <STM32_PINMUX('B', 10, ANALOG)>, /* USART3_TX */
+-				 <STM32_PINMUX('G', 8, ANALOG)>, /* USART3_RTS */
+ 				 <STM32_PINMUX('B', 13, ANALOG)>; /* USART3_CTS_NSS */
+ 		};
+ 		pins2 {
++			pinmux = <STM32_PINMUX('G', 8, AF8)>; /* USART3_RTS */
++			bias-disable;
++			drive-push-pull;
++			slew-rate = <0>;
++		};
++		pins3 {
+ 			pinmux = <STM32_PINMUX('B', 12, AF8)>; /* USART3_RX */
+ 			bias-disable;
+ 		};
+diff --git a/arch/arm/boot/dts/uniphier-pxs2.dtsi b/arch/arm/boot/dts/uniphier-pxs2.dtsi
+index b0b15c97306b8..e81e5937a60ae 100644
+--- a/arch/arm/boot/dts/uniphier-pxs2.dtsi
++++ b/arch/arm/boot/dts/uniphier-pxs2.dtsi
+@@ -583,7 +583,7 @@
+ 			clocks = <&sys_clk 6>;
+ 			reset-names = "ether";
+ 			resets = <&sys_rst 6>;
+-			phy-mode = "rgmii";
++			phy-mode = "rgmii-id";
+ 			local-mac-address = [00 00 00 00 00 00];
+ 			socionext,syscon-phy-mode = <&soc_glue 0>;
+ 
+diff --git a/arch/arm/crypto/blake2s-core.S b/arch/arm/crypto/blake2s-core.S
+index bed897e9a181a..86345751bbf3a 100644
+--- a/arch/arm/crypto/blake2s-core.S
++++ b/arch/arm/crypto/blake2s-core.S
+@@ -8,6 +8,7 @@
+  */
+ 
+ #include <linux/linkage.h>
++#include <asm/assembler.h>
+ 
+ 	// Registers used to hold message words temporarily.  There aren't
+ 	// enough ARM registers to hold the whole message block, so we have to
+@@ -38,6 +39,23 @@
+ #endif
+ .endm
+ 
++.macro _le32_bswap	a, tmp
++#ifdef __ARMEB__
++	rev_l		\a, \tmp
++#endif
++.endm
++
++.macro _le32_bswap_8x	a, b, c, d, e, f, g, h,  tmp
++	_le32_bswap	\a, \tmp
++	_le32_bswap	\b, \tmp
++	_le32_bswap	\c, \tmp
++	_le32_bswap	\d, \tmp
++	_le32_bswap	\e, \tmp
++	_le32_bswap	\f, \tmp
++	_le32_bswap	\g, \tmp
++	_le32_bswap	\h, \tmp
++.endm
++
+ // Execute a quarter-round of BLAKE2s by mixing two columns or two diagonals.
+ // (a0, b0, c0, d0) and (a1, b1, c1, d1) give the registers containing the two
+ // columns/diagonals.  s0-s1 are the word offsets to the message words the first
+@@ -180,8 +198,10 @@ ENTRY(blake2s_compress_arch)
+ 	tst		r1, #3
+ 	bne		.Lcopy_block_misaligned
+ 	ldmia		r1!, {r2-r9}
++	_le32_bswap_8x	r2, r3, r4, r5, r6, r7, r8, r9,  r14
+ 	stmia		r12!, {r2-r9}
+ 	ldmia		r1!, {r2-r9}
++	_le32_bswap_8x	r2, r3, r4, r5, r6, r7, r8, r9,  r14
+ 	stmia		r12, {r2-r9}
+ .Lcopy_block_done:
+ 	str		r1, [sp, #68]		// Update message pointer
+@@ -268,6 +288,7 @@ ENTRY(blake2s_compress_arch)
+ 1:
+ #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ 	ldr		r3, [r1], #4
++	_le32_bswap	r3, r4
+ #else
+ 	ldrb		r3, [r1, #0]
+ 	ldrb		r4, [r1, #1]
+diff --git a/arch/arm/crypto/poly1305-glue.c b/arch/arm/crypto/poly1305-glue.c
+index 3023c1acfa194..c31bd8f7c0927 100644
+--- a/arch/arm/crypto/poly1305-glue.c
++++ b/arch/arm/crypto/poly1305-glue.c
+@@ -29,7 +29,7 @@ void __weak poly1305_blocks_neon(void *state, const u8 *src, u32 len, u32 hibit)
+ 
+ static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
+ 
+-void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
++void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
+ {
+ 	poly1305_init_arm(&dctx->h, key);
+ 	dctx->s[0] = get_unaligned_le32(key + 16);
+diff --git a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908-asus-gt-ac5300.dts b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908-asus-gt-ac5300.dts
+index 6e4ad66ff5360..8d5d368dbe90e 100644
+--- a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908-asus-gt-ac5300.dts
++++ b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908-asus-gt-ac5300.dts
+@@ -65,6 +65,7 @@
+ 	port@7 {
+ 		label = "sw";
+ 		reg = <7>;
++		phy-mode = "rgmii";
+ 
+ 		fixed-link {
+ 			speed = <1000>;
+diff --git a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi
+index 9354077f74cd6..9e799328c6dbc 100644
+--- a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi
++++ b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi
+@@ -131,7 +131,7 @@
+ 			status = "disabled";
+ 		};
+ 
+-		ethernet-switch@80000 {
++		bus@80000 {
+ 			compatible = "simple-bus";
+ 			#size-cells = <1>;
+ 			#address-cells = <1>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
+index 6dffada2e66b4..28aa634c9780e 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
++++ b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
+@@ -294,7 +294,7 @@
+ 
+ &pwrap {
+ 	/* Only MT8173 E1 needs USB power domain */
+-	power-domains = <&scpsys MT8173_POWER_DOMAIN_USB>;
++	power-domains = <&spm MT8173_POWER_DOMAIN_USB>;
+ 
+ 	pmic: mt6397 {
+ 		compatible = "mediatek,mt6397";
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+index 80519a145f13f..16f4b1fc0fb92 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+@@ -983,6 +983,9 @@
+ 			compatible = "mediatek,mt8183-mmsys", "syscon";
+ 			reg = <0 0x14000000 0 0x1000>;
+ 			#clock-cells = <1>;
++			mboxes = <&gce 0 CMDQ_THR_PRIO_HIGHEST>,
++				 <&gce 1 CMDQ_THR_PRIO_HIGHEST>;
++			mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0 0x1000>;
+ 		};
+ 
+ 		ovl0: ovl@14008000 {
+@@ -1058,6 +1061,7 @@
+ 			interrupts = <GIC_SPI 232 IRQ_TYPE_LEVEL_LOW>;
+ 			power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
+ 			clocks = <&mmsys CLK_MM_DISP_CCORR0>;
++			mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0xf000 0x1000>;
+ 		};
+ 
+ 		aal0: aal@14010000 {
+@@ -1067,6 +1071,7 @@
+ 			interrupts = <GIC_SPI 233 IRQ_TYPE_LEVEL_LOW>;
+ 			power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
+ 			clocks = <&mmsys CLK_MM_DISP_AAL0>;
++			mediatek,gce-client-reg = <&gce SUBSYS_1401XXXX 0 0x1000>;
+ 		};
+ 
+ 		gamma0: gamma@14011000 {
+@@ -1075,6 +1080,7 @@
+ 			interrupts = <GIC_SPI 234 IRQ_TYPE_LEVEL_LOW>;
+ 			power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
+ 			clocks = <&mmsys CLK_MM_DISP_GAMMA0>;
++			mediatek,gce-client-reg = <&gce SUBSYS_1401XXXX 0x1000 0x1000>;
+ 		};
+ 
+ 		dither0: dither@14012000 {
+@@ -1083,6 +1089,7 @@
+ 			interrupts = <GIC_SPI 235 IRQ_TYPE_LEVEL_LOW>;
+ 			power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
+ 			clocks = <&mmsys CLK_MM_DISP_DITHER0>;
++			mediatek,gce-client-reg = <&gce SUBSYS_1401XXXX 0x2000 0x1000>;
+ 		};
+ 
+ 		dsi0: dsi@14014000 {
+diff --git a/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi b/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi
+index 63fd70086bb85..9f27e7ed5e225 100644
+--- a/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi
++++ b/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi
+@@ -56,7 +56,7 @@
+ 	tca6416: gpio@20 {
+ 		compatible = "ti,tca6416";
+ 		reg = <0x20>;
+-		reset-gpios = <&pio 65 GPIO_ACTIVE_HIGH>;
++		reset-gpios = <&pio 65 GPIO_ACTIVE_LOW>;
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&tca6416_pins>;
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
+index 07c8b2c926c0c..b8f7cf5cbdab1 100644
+--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
+@@ -22,9 +22,11 @@
+ 			thermal-sensors = <&pm6150_adc_tm 1>;
+ 
+ 			trips {
+-				temperature = <125000>;
+-				hysteresis = <1000>;
+-				type = "critical";
++				charger-crit {
++					temperature = <125000>;
++					hysteresis = <1000>;
++					type = "critical";
++				};
+ 			};
+ 		};
+ 	};
+@@ -768,17 +770,17 @@ hp_i2c: &i2c9 {
+ };
+ 
+ &spi0 {
+-	pinctrl-0 = <&qup_spi0_cs_gpio>;
++	pinctrl-0 = <&qup_spi0_cs_gpio_init_high>, <&qup_spi0_cs_gpio>;
+ 	cs-gpios = <&tlmm 37 GPIO_ACTIVE_LOW>;
+ };
+ 
+ &spi6 {
+-	pinctrl-0 = <&qup_spi6_cs_gpio>;
++	pinctrl-0 = <&qup_spi6_cs_gpio_init_high>, <&qup_spi6_cs_gpio>;
+ 	cs-gpios = <&tlmm 62 GPIO_ACTIVE_LOW>;
+ };
+ 
+ ap_spi_fp: &spi10 {
+-	pinctrl-0 = <&qup_spi10_cs_gpio>;
++	pinctrl-0 = <&qup_spi10_cs_gpio_init_high>, <&qup_spi10_cs_gpio>;
+ 	cs-gpios = <&tlmm 89 GPIO_ACTIVE_LOW>;
+ 
+ 	cros_ec_fp: ec@0 {
+@@ -1339,6 +1341,27 @@ ap_spi_fp: &spi10 {
+ 		};
+ 	};
+ 
++	qup_spi0_cs_gpio_init_high: qup-spi0-cs-gpio-init-high {
++		pinconf {
++			pins = "gpio37";
++			output-high;
++		};
++	};
++
++	qup_spi6_cs_gpio_init_high: qup-spi6-cs-gpio-init-high {
++		pinconf {
++			pins = "gpio62";
++			output-high;
++		};
++	};
++
++	qup_spi10_cs_gpio_init_high: qup-spi10-cs-gpio-init-high {
++		pinconf {
++			pins = "gpio89";
++			output-high;
++		};
++	};
++
+ 	qup_uart3_sleep: qup-uart3-sleep {
+ 		pinmux {
+ 			pins = "gpio38", "gpio39",
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+index c4ac6f5dc008d..96d36b38f2696 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
++++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+@@ -1015,7 +1015,7 @@
+ 		left_spkr: wsa8810-left{
+ 			compatible = "sdw10217201000";
+ 			reg = <0 1>;
+-			powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>;
++			powerdown-gpios = <&wcdgpio 1 GPIO_ACTIVE_HIGH>;
+ 			#thermal-sensor-cells = <0>;
+ 			sound-name-prefix = "SpkrLeft";
+ 			#sound-dai-cells = <0>;
+@@ -1023,7 +1023,7 @@
+ 
+ 		right_spkr: wsa8810-right{
+ 			compatible = "sdw10217201000";
+-			powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>;
++			powerdown-gpios = <&wcdgpio 1 GPIO_ACTIVE_HIGH>;
+ 			reg = <0 2>;
+ 			#thermal-sensor-cells = <0>;
+ 			sound-name-prefix = "SpkrRight";
+diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+index 454f794af5479..6a2ed02d383d0 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+@@ -2382,7 +2382,7 @@
+ 			#gpio-cells = <2>;
+ 			interrupt-controller;
+ 			#interrupt-cells = <2>;
+-			gpio-ranges = <&tlmm 0 0 150>;
++			gpio-ranges = <&tlmm 0 0 151>;
+ 			wakeup-parent = <&pdc_intc>;
+ 
+ 			cci0_default: cci0-default {
+diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+index e5bb17bc2f46b..778613d3410b1 100644
+--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+@@ -914,7 +914,7 @@
+ 			      <0x0 0x03D00000 0x0 0x300000>;
+ 			reg-names = "west", "east", "north", "south";
+ 			interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+-			gpio-ranges = <&tlmm 0 0 175>;
++			gpio-ranges = <&tlmm 0 0 176>;
+ 			gpio-controller;
+ 			#gpio-cells = <2>;
+ 			interrupt-controller;
+diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
+index 947e1accae3ab..46a6c18cea91e 100644
+--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
+@@ -279,7 +279,7 @@
+ 
+ 	pmu {
+ 		compatible = "arm,armv8-pmuv3";
+-		interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
++		interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
+ 	};
+ 
+ 	psci {
+@@ -2327,10 +2327,9 @@
+ 			reg = <0 0x0ae00000 0 0x1000>;
+ 			reg-names = "mdss";
+ 
+-			interconnects = <&gem_noc MASTER_AMPSS_M0 &config_noc SLAVE_DISPLAY_CFG>,
+-					<&mmss_noc MASTER_MDP_PORT0 &mc_virt SLAVE_EBI_CH0>,
++			interconnects = <&mmss_noc MASTER_MDP_PORT0 &mc_virt SLAVE_EBI_CH0>,
+ 					<&mmss_noc MASTER_MDP_PORT1 &mc_virt SLAVE_EBI_CH0>;
+-			interconnect-names = "notused", "mdp0-mem", "mdp1-mem";
++			interconnect-names = "mdp0-mem", "mdp1-mem";
+ 
+ 			power-domains = <&dispcc MDSS_GDSC>;
+ 
+@@ -2580,7 +2579,7 @@
+ 
+ 		dispcc: clock-controller@af00000 {
+ 			compatible = "qcom,sm8250-dispcc";
+-			reg = <0 0x0af00000 0 0x20000>;
++			reg = <0 0x0af00000 0 0x10000>;
+ 			mmcx-supply = <&mmcx_reg>;
+ 			clocks = <&rpmhcc RPMH_CXO_CLK>,
+ 				 <&dsi0_phy 0>,
+@@ -2588,28 +2587,14 @@
+ 				 <&dsi1_phy 0>,
+ 				 <&dsi1_phy 1>,
+ 				 <0>,
+-				 <0>,
+-				 <0>,
+-				 <0>,
+-				 <0>,
+-				 <0>,
+-				 <0>,
+-				 <0>,
+-				 <&sleep_clk>;
++				 <0>;
+ 			clock-names = "bi_tcxo",
+ 				      "dsi0_phy_pll_out_byteclk",
+ 				      "dsi0_phy_pll_out_dsiclk",
+ 				      "dsi1_phy_pll_out_byteclk",
+ 				      "dsi1_phy_pll_out_dsiclk",
+-				      "dp_link_clk_divsel_ten",
+-				      "dp_vco_divided_clk_src_mux",
+-				      "dptx1_phy_pll_link_clk",
+-				      "dptx1_phy_pll_vco_div_clk",
+-				      "dptx2_phy_pll_link_clk",
+-				      "dptx2_phy_pll_vco_div_clk",
+-				      "edp_phy_pll_link_clk",
+-				      "edp_phy_pll_vco_div_clk",
+-				      "sleep_clk";
++				      "dp_phy_pll_link_clk",
++				      "dp_phy_pll_vco_div_clk";
+ 			#clock-cells = <1>;
+ 			#reset-cells = <1>;
+ 			#power-domain-cells = <1>;
+@@ -2689,7 +2674,7 @@
+ 			#gpio-cells = <2>;
+ 			interrupt-controller;
+ 			#interrupt-cells = <2>;
+-			gpio-ranges = <&tlmm 0 0 180>;
++			gpio-ranges = <&tlmm 0 0 181>;
+ 			wakeup-parent = <&pdc>;
+ 
+ 			pri_mi2s_active: pri-mi2s-active {
+@@ -3754,7 +3739,7 @@
+ 				(GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ 			     <GIC_PPI 11
+ 				(GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+-			     <GIC_PPI 12
++			     <GIC_PPI 10
+ 				(GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>;
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi
+index 5ef460458f5c3..e2fca420e5183 100644
+--- a/arch/arm64/boot/dts/qcom/sm8350.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi
+@@ -153,7 +153,7 @@
+ 
+ 	pmu {
+ 		compatible = "arm,armv8-pmuv3";
+-		interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
++		interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
+ 	};
+ 
+ 	psci {
+@@ -382,7 +382,7 @@
+ 			#gpio-cells = <2>;
+ 			interrupt-controller;
+ 			#interrupt-cells = <2>;
+-			gpio-ranges = <&tlmm 0 0 203>;
++			gpio-ranges = <&tlmm 0 0 204>;
+ 
+ 			qup_uart3_default_state: qup-uart3-default-state {
+ 				rx {
+diff --git a/arch/arm64/boot/dts/renesas/hihope-common.dtsi b/arch/arm64/boot/dts/renesas/hihope-common.dtsi
+index 7a3da9b06f677..0c7e6f7905902 100644
+--- a/arch/arm64/boot/dts/renesas/hihope-common.dtsi
++++ b/arch/arm64/boot/dts/renesas/hihope-common.dtsi
+@@ -12,6 +12,9 @@
+ 	aliases {
+ 		serial0 = &scif2;
+ 		serial1 = &hscif0;
++		mmc0 = &sdhi3;
++		mmc1 = &sdhi0;
++		mmc2 = &sdhi2;
+ 	};
+ 
+ 	chosen {
+diff --git a/arch/arm64/boot/dts/renesas/r8a774a1-beacon-rzg2m-kit.dts b/arch/arm64/boot/dts/renesas/r8a774a1-beacon-rzg2m-kit.dts
+index 501cb05da228d..3cf2e076940f3 100644
+--- a/arch/arm64/boot/dts/renesas/r8a774a1-beacon-rzg2m-kit.dts
++++ b/arch/arm64/boot/dts/renesas/r8a774a1-beacon-rzg2m-kit.dts
+@@ -21,6 +21,9 @@
+ 		serial4 = &hscif2;
+ 		serial5 = &scif5;
+ 		ethernet0 = &avb;
++		mmc0 = &sdhi3;
++		mmc1 = &sdhi0;
++		mmc2 = &sdhi2;
+ 	};
+ 
+ 	chosen {
+diff --git a/arch/arm64/boot/dts/renesas/r8a774b1-beacon-rzg2n-kit.dts b/arch/arm64/boot/dts/renesas/r8a774b1-beacon-rzg2n-kit.dts
+index 71763f4402a7c..3c0d59def8ee5 100644
+--- a/arch/arm64/boot/dts/renesas/r8a774b1-beacon-rzg2n-kit.dts
++++ b/arch/arm64/boot/dts/renesas/r8a774b1-beacon-rzg2n-kit.dts
+@@ -22,6 +22,9 @@
+ 		serial5 = &scif5;
+ 		serial6 = &scif4;
+ 		ethernet0 = &avb;
++		mmc0 = &sdhi3;
++		mmc1 = &sdhi0;
++		mmc2 = &sdhi2;
+ 	};
+ 
+ 	chosen {
+diff --git a/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts b/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts
+index ea87cb5a459c8..33257c6440b2c 100644
+--- a/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts
++++ b/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts
+@@ -17,6 +17,8 @@
+ 	aliases {
+ 		serial0 = &scif2;
+ 		serial1 = &hscif2;
++		mmc0 = &sdhi0;
++		mmc1 = &sdhi3;
+ 	};
+ 
+ 	chosen {
+diff --git a/arch/arm64/boot/dts/renesas/r8a774e1-beacon-rzg2h-kit.dts b/arch/arm64/boot/dts/renesas/r8a774e1-beacon-rzg2h-kit.dts
+index 273f062f29093..7b6649a3ded02 100644
+--- a/arch/arm64/boot/dts/renesas/r8a774e1-beacon-rzg2h-kit.dts
++++ b/arch/arm64/boot/dts/renesas/r8a774e1-beacon-rzg2h-kit.dts
+@@ -22,6 +22,9 @@
+ 		serial5 = &scif5;
+ 		serial6 = &scif4;
+ 		ethernet0 = &avb;
++		mmc0 = &sdhi3;
++		mmc1 = &sdhi0;
++		mmc2 = &sdhi2;
+ 	};
+ 
+ 	chosen {
+diff --git a/arch/arm64/boot/dts/renesas/r8a77980.dtsi b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
+index ec7ca72399ec4..1ffa4a995a7ab 100644
+--- a/arch/arm64/boot/dts/renesas/r8a77980.dtsi
++++ b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
+@@ -992,8 +992,8 @@
+ 
+ 					reg = <1>;
+ 
+-					vin4csi41: endpoint@2 {
+-						reg = <2>;
++					vin4csi41: endpoint@3 {
++						reg = <3>;
+ 						remote-endpoint = <&csi41vin4>;
+ 					};
+ 				};
+@@ -1020,8 +1020,8 @@
+ 
+ 					reg = <1>;
+ 
+-					vin5csi41: endpoint@2 {
+-						reg = <2>;
++					vin5csi41: endpoint@3 {
++						reg = <3>;
+ 						remote-endpoint = <&csi41vin5>;
+ 					};
+ 				};
+@@ -1048,8 +1048,8 @@
+ 
+ 					reg = <1>;
+ 
+-					vin6csi41: endpoint@2 {
+-						reg = <2>;
++					vin6csi41: endpoint@3 {
++						reg = <3>;
+ 						remote-endpoint = <&csi41vin6>;
+ 					};
+ 				};
+@@ -1076,8 +1076,8 @@
+ 
+ 					reg = <1>;
+ 
+-					vin7csi41: endpoint@2 {
+-						reg = <2>;
++					vin7csi41: endpoint@3 {
++						reg = <3>;
+ 						remote-endpoint = <&csi41vin7>;
+ 					};
+ 				};
+diff --git a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
+index f74f8b9993f1d..6d6cdc4c324ba 100644
+--- a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
++++ b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
+@@ -16,6 +16,9 @@
+ 	aliases {
+ 		serial0 = &scif2;
+ 		ethernet0 = &avb;
++		mmc0 = &sdhi3;
++		mmc1 = &sdhi0;
++		mmc2 = &sdhi1;
+ 	};
+ 
+ 	chosen {
+diff --git a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
+index dfd6ae8b564fb..86ac48e2c8491 100644
+--- a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
++++ b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
+@@ -60,10 +60,7 @@
+ 
+ 	pmu_a76 {
+ 		compatible = "arm,cortex-a76-pmu";
+-		interrupts-extended = <&gic GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>,
+-				      <&gic GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>,
+-				      <&gic GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
+-				      <&gic GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
++		interrupts-extended = <&gic GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
+ 	};
+ 
+ 	/* External SCIF clock - to be overridden by boards that provide it */
+diff --git a/arch/arm64/boot/dts/renesas/salvator-common.dtsi b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
+index c22bb38994e80..15bb1eeb66010 100644
+--- a/arch/arm64/boot/dts/renesas/salvator-common.dtsi
++++ b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
+@@ -36,6 +36,9 @@
+ 		serial0 = &scif2;
+ 		serial1 = &hscif1;
+ 		ethernet0 = &avb;
++		mmc0 = &sdhi2;
++		mmc1 = &sdhi0;
++		mmc2 = &sdhi3;
+ 	};
+ 
+ 	chosen {
+diff --git a/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi b/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
+index e9ed2597f1c20..61bd4df09df0d 100644
+--- a/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
++++ b/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
+@@ -16,6 +16,7 @@
+ 	aliases {
+ 		serial1 = &hscif0;
+ 		serial2 = &scif1;
++		mmc2 = &sdhi3;
+ 	};
+ 
+ 	clksndsel: clksndsel {
+diff --git a/arch/arm64/boot/dts/renesas/ulcb.dtsi b/arch/arm64/boot/dts/renesas/ulcb.dtsi
+index a04eae55dd6c4..3d88e95c65a53 100644
+--- a/arch/arm64/boot/dts/renesas/ulcb.dtsi
++++ b/arch/arm64/boot/dts/renesas/ulcb.dtsi
+@@ -23,6 +23,8 @@
+ 	aliases {
+ 		serial0 = &scif2;
+ 		ethernet0 = &avb;
++		mmc0 = &sdhi2;
++		mmc1 = &sdhi0;
+ 	};
+ 
+ 	chosen {
+diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
+index a87b8a6787196..8f2c1c1e2c64e 100644
+--- a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
++++ b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
+@@ -734,7 +734,7 @@
+ 			clocks = <&sys_clk 6>;
+ 			reset-names = "ether";
+ 			resets = <&sys_rst 6>;
+-			phy-mode = "rgmii";
++			phy-mode = "rgmii-id";
+ 			local-mac-address = [00 00 00 00 00 00];
+ 			socionext,syscon-phy-mode = <&soc_glue 0>;
+ 
+diff --git a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
+index 0e52dadf54b3a..be97da1322580 100644
+--- a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
++++ b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
+@@ -564,7 +564,7 @@
+ 			clocks = <&sys_clk 6>;
+ 			reset-names = "ether";
+ 			resets = <&sys_rst 6>;
+-			phy-mode = "rgmii";
++			phy-mode = "rgmii-id";
+ 			local-mac-address = [00 00 00 00 00 00];
+ 			socionext,syscon-phy-mode = <&soc_glue 0>;
+ 
+@@ -585,7 +585,7 @@
+ 			clocks = <&sys_clk 7>;
+ 			reset-names = "ether";
+ 			resets = <&sys_rst 7>;
+-			phy-mode = "rgmii";
++			phy-mode = "rgmii-id";
+ 			local-mac-address = [00 00 00 00 00 00];
+ 			socionext,syscon-phy-mode = <&soc_glue 1>;
+ 
+diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
+index 8c84dafb7125c..f1e7da3dfa276 100644
+--- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
+@@ -1042,13 +1042,16 @@
+ 		assigned-clocks = <&k3_clks 91 1>;
+ 		assigned-clock-parents = <&k3_clks 91 2>;
+ 		bus-width = <8>;
+-		mmc-hs400-1_8v;
++		mmc-hs200-1_8v;
+ 		mmc-ddr-1_8v;
+ 		ti,otap-del-sel-legacy = <0xf>;
+ 		ti,otap-del-sel-mmc-hs = <0xf>;
+ 		ti,otap-del-sel-ddr52 = <0x5>;
+ 		ti,otap-del-sel-hs200 = <0x6>;
+ 		ti,otap-del-sel-hs400 = <0x0>;
++		ti,itap-del-sel-legacy = <0x10>;
++		ti,itap-del-sel-mmc-hs = <0xa>;
++		ti,itap-del-sel-ddr52 = <0x3>;
+ 		ti,trm-icp = <0x8>;
+ 		ti,strobe-sel = <0x77>;
+ 		dma-coherent;
+@@ -1069,9 +1072,15 @@
+ 		ti,otap-del-sel-sdr25 = <0xf>;
+ 		ti,otap-del-sel-sdr50 = <0xc>;
+ 		ti,otap-del-sel-ddr50 = <0xc>;
++		ti,itap-del-sel-legacy = <0x0>;
++		ti,itap-del-sel-sd-hs = <0x0>;
++		ti,itap-del-sel-sdr12 = <0x0>;
++		ti,itap-del-sel-sdr25 = <0x0>;
++		ti,itap-del-sel-ddr50 = <0x2>;
+ 		ti,trm-icp = <0x8>;
+ 		ti,clkbuf-sel = <0x7>;
+ 		dma-coherent;
++		sdhci-caps-mask = <0x2 0x0>;
+ 	};
+ 
+ 	main_sdhci2: mmc@4f98000 {
+@@ -1089,9 +1098,15 @@
+ 		ti,otap-del-sel-sdr25 = <0xf>;
+ 		ti,otap-del-sel-sdr50 = <0xc>;
+ 		ti,otap-del-sel-ddr50 = <0xc>;
++		ti,itap-del-sel-legacy = <0x0>;
++		ti,itap-del-sel-sd-hs = <0x0>;
++		ti,itap-del-sel-sdr12 = <0x0>;
++		ti,itap-del-sel-sdr25 = <0x0>;
++		ti,itap-del-sel-ddr50 = <0x2>;
+ 		ti,trm-icp = <0x8>;
+ 		ti,clkbuf-sel = <0x7>;
+ 		dma-coherent;
++		sdhci-caps-mask = <0x2 0x0>;
+ 	};
+ 
+ 	usbss0: cdns-usb@4104000 {
+diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S
+index bbdb54702aa7a..247011356d110 100644
+--- a/arch/arm64/crypto/aes-modes.S
++++ b/arch/arm64/crypto/aes-modes.S
+@@ -359,6 +359,7 @@ ST5(	mov		v4.16b, vctr.16b		)
+ 	ins		vctr.d[0], x8
+ 
+ 	/* apply carry to N counter blocks for N := x12 */
++	cbz		x12, 2f
+ 	adr		x16, 1f
+ 	sub		x16, x16, x12, lsl #3
+ 	br		x16
+diff --git a/arch/arm64/crypto/poly1305-glue.c b/arch/arm64/crypto/poly1305-glue.c
+index 683de671741a7..9c3d86e397bf3 100644
+--- a/arch/arm64/crypto/poly1305-glue.c
++++ b/arch/arm64/crypto/poly1305-glue.c
+@@ -25,7 +25,7 @@ asmlinkage void poly1305_emit(void *state, u8 *digest, const u32 *nonce);
+ 
+ static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
+ 
+-void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
++void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
+ {
+ 	poly1305_init_arm64(&dctx->h, key);
+ 	dctx->s[0] = get_unaligned_le32(key + 16);
+diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
+index 3d10e6527f7de..858c2fcfc043b 100644
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -713,6 +713,7 @@ static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
+ static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
+ 
+ void kvm_arm_init_debug(void);
++void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu);
+ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
+ void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
+ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
+diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
+index 7f06ba76698d8..84b5f79c9eab4 100644
+--- a/arch/arm64/kvm/arm.c
++++ b/arch/arm64/kvm/arm.c
+@@ -580,6 +580,8 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
+ 
+ 	vcpu->arch.has_run_once = true;
+ 
++	kvm_arm_vcpu_init_debug(vcpu);
++
+ 	if (likely(irqchip_in_kernel(kvm))) {
+ 		/*
+ 		 * Map the VGIC hardware resources before running a vcpu the
+@@ -1808,8 +1810,10 @@ static int init_hyp_mode(void)
+ 	if (is_protected_kvm_enabled()) {
+ 		init_cpu_logical_map();
+ 
+-		if (!init_psci_relay())
++		if (!init_psci_relay()) {
++			err = -ENODEV;
+ 			goto out_err;
++		}
+ 	}
+ 
+ 	return 0;
+diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c
+index dbc8905116311..2484b2cca74bc 100644
+--- a/arch/arm64/kvm/debug.c
++++ b/arch/arm64/kvm/debug.c
+@@ -68,6 +68,64 @@ void kvm_arm_init_debug(void)
+ 	__this_cpu_write(mdcr_el2, kvm_call_hyp_ret(__kvm_get_mdcr_el2));
+ }
+ 
++/**
++ * kvm_arm_setup_mdcr_el2 - configure vcpu mdcr_el2 value
++ *
++ * @vcpu:	the vcpu pointer
++ *
++ * This ensures we will trap access to:
++ *  - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
++ *  - Debug ROM Address (MDCR_EL2_TDRA)
++ *  - OS related registers (MDCR_EL2_TDOSA)
++ *  - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
++ *  - Self-hosted Trace Filter controls (MDCR_EL2_TTRF)
++ */
++static void kvm_arm_setup_mdcr_el2(struct kvm_vcpu *vcpu)
++{
++	/*
++	 * This also clears MDCR_EL2_E2PB_MASK to disable guest access
++	 * to the profiling buffer.
++	 */
++	vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
++	vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
++				MDCR_EL2_TPMS |
++				MDCR_EL2_TTRF |
++				MDCR_EL2_TPMCR |
++				MDCR_EL2_TDRA |
++				MDCR_EL2_TDOSA);
++
++	/* Is the VM being debugged by userspace? */
++	if (vcpu->guest_debug)
++		/* Route all software debug exceptions to EL2 */
++		vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
++
++	/*
++	 * Trap debug register access when one of the following is true:
++	 *  - Userspace is using the hardware to debug the guest
++	 *  (KVM_GUESTDBG_USE_HW is set).
++	 *  - The guest is not using debug (KVM_ARM64_DEBUG_DIRTY is clear).
++	 */
++	if ((vcpu->guest_debug & KVM_GUESTDBG_USE_HW) ||
++	    !(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
++		vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
++
++	trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
++}
++
++/**
++ * kvm_arm_vcpu_init_debug - setup vcpu debug traps
++ *
++ * @vcpu:	the vcpu pointer
++ *
++ * Set vcpu initial mdcr_el2 value.
++ */
++void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu)
++{
++	preempt_disable();
++	kvm_arm_setup_mdcr_el2(vcpu);
++	preempt_enable();
++}
++
+ /**
+  * kvm_arm_reset_debug_ptr - reset the debug ptr to point to the vcpu state
+  */
+@@ -83,13 +141,7 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
+  * @vcpu:	the vcpu pointer
+  *
+  * This is called before each entry into the hypervisor to setup any
+- * debug related registers. Currently this just ensures we will trap
+- * access to:
+- *  - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
+- *  - Debug ROM Address (MDCR_EL2_TDRA)
+- *  - OS related registers (MDCR_EL2_TDOSA)
+- *  - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
+- *  - Self-hosted Trace Filter controls (MDCR_EL2_TTRF)
++ * debug related registers.
+  *
+  * Additionally, KVM only traps guest accesses to the debug registers if
+  * the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY
+@@ -101,28 +153,14 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
+ 
+ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
+ {
+-	bool trap_debug = !(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY);
+ 	unsigned long mdscr, orig_mdcr_el2 = vcpu->arch.mdcr_el2;
+ 
+ 	trace_kvm_arm_setup_debug(vcpu, vcpu->guest_debug);
+ 
+-	/*
+-	 * This also clears MDCR_EL2_E2PB_MASK to disable guest access
+-	 * to the profiling buffer.
+-	 */
+-	vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
+-	vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
+-				MDCR_EL2_TPMS |
+-				MDCR_EL2_TTRF |
+-				MDCR_EL2_TPMCR |
+-				MDCR_EL2_TDRA |
+-				MDCR_EL2_TDOSA);
++	kvm_arm_setup_mdcr_el2(vcpu);
+ 
+ 	/* Is Guest debugging in effect? */
+ 	if (vcpu->guest_debug) {
+-		/* Route all software debug exceptions to EL2 */
+-		vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
+-
+ 		/* Save guest debug state */
+ 		save_guest_debug_regs(vcpu);
+ 
+@@ -176,7 +214,6 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
+ 
+ 			vcpu->arch.debug_ptr = &vcpu->arch.external_debug_state;
+ 			vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
+-			trap_debug = true;
+ 
+ 			trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
+ 						&vcpu->arch.debug_ptr->dbg_bcr[0],
+@@ -191,10 +228,6 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
+ 	BUG_ON(!vcpu->guest_debug &&
+ 		vcpu->arch.debug_ptr != &vcpu->arch.vcpu_debug_state);
+ 
+-	/* Trap debug register access */
+-	if (trap_debug)
+-		vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
+-
+ 	/* If KDE or MDE are set, perform a full save/restore cycle. */
+ 	if (vcpu_read_sys_reg(vcpu, MDSCR_EL1) & (DBG_MDSCR_KDE | DBG_MDSCR_MDE))
+ 		vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
+@@ -203,7 +236,6 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
+ 	if (has_vhe() && orig_mdcr_el2 != vcpu->arch.mdcr_el2)
+ 		write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
+ 
+-	trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
+ 	trace_kvm_arm_set_dreg32("MDSCR_EL1", vcpu_read_sys_reg(vcpu, MDSCR_EL1));
+ }
+ 
+diff --git a/arch/arm64/kvm/hyp/nvhe/gen-hyprel.c b/arch/arm64/kvm/hyp/nvhe/gen-hyprel.c
+index ead02c6a76289..6bc88a756cb7a 100644
+--- a/arch/arm64/kvm/hyp/nvhe/gen-hyprel.c
++++ b/arch/arm64/kvm/hyp/nvhe/gen-hyprel.c
+@@ -50,6 +50,18 @@
+ #ifndef R_AARCH64_ABS64
+ #define R_AARCH64_ABS64			257
+ #endif
++#ifndef R_AARCH64_PREL64
++#define R_AARCH64_PREL64		260
++#endif
++#ifndef R_AARCH64_PREL32
++#define R_AARCH64_PREL32		261
++#endif
++#ifndef R_AARCH64_PREL16
++#define R_AARCH64_PREL16		262
++#endif
++#ifndef R_AARCH64_PLT32
++#define R_AARCH64_PLT32			314
++#endif
+ #ifndef R_AARCH64_LD_PREL_LO19
+ #define R_AARCH64_LD_PREL_LO19		273
+ #endif
+@@ -371,6 +383,12 @@ static void emit_rela_section(Elf64_Shdr *sh_rela)
+ 		case R_AARCH64_ABS64:
+ 			emit_rela_abs64(rela, sh_orig_name);
+ 			break;
++		/* Allow position-relative data relocations. */
++		case R_AARCH64_PREL64:
++		case R_AARCH64_PREL32:
++		case R_AARCH64_PREL16:
++		case R_AARCH64_PLT32:
++			break;
+ 		/* Allow relocations to generate PC-relative addressing. */
+ 		case R_AARCH64_LD_PREL_LO19:
+ 		case R_AARCH64_ADR_PREL_LO21:
+diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
+index bd354cd45d286..4b5acd84b8c87 100644
+--- a/arch/arm64/kvm/reset.c
++++ b/arch/arm64/kvm/reset.c
+@@ -242,6 +242,11 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
+ 
+ 	/* Reset core registers */
+ 	memset(vcpu_gp_regs(vcpu), 0, sizeof(*vcpu_gp_regs(vcpu)));
++	memset(&vcpu->arch.ctxt.fp_regs, 0, sizeof(vcpu->arch.ctxt.fp_regs));
++	vcpu->arch.ctxt.spsr_abt = 0;
++	vcpu->arch.ctxt.spsr_und = 0;
++	vcpu->arch.ctxt.spsr_irq = 0;
++	vcpu->arch.ctxt.spsr_fiq = 0;
+ 	vcpu_gp_regs(vcpu)->pstate = pstate;
+ 
+ 	/* Reset system registers */
+diff --git a/arch/arm64/kvm/vgic/vgic-kvm-device.c b/arch/arm64/kvm/vgic/vgic-kvm-device.c
+index 44419679f91ad..7740995de982e 100644
+--- a/arch/arm64/kvm/vgic/vgic-kvm-device.c
++++ b/arch/arm64/kvm/vgic/vgic-kvm-device.c
+@@ -87,8 +87,8 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
+ 			r = vgic_v3_set_redist_base(kvm, 0, *addr, 0);
+ 			goto out;
+ 		}
+-		rdreg = list_first_entry(&vgic->rd_regions,
+-					 struct vgic_redist_region, list);
++		rdreg = list_first_entry_or_null(&vgic->rd_regions,
++						 struct vgic_redist_region, list);
+ 		if (!rdreg)
+ 			addr_ptr = &undef_value;
+ 		else
+@@ -226,6 +226,9 @@ static int vgic_get_common_attr(struct kvm_device *dev,
+ 		u64 addr;
+ 		unsigned long type = (unsigned long)attr->attr;
+ 
++		if (copy_from_user(&addr, uaddr, sizeof(addr)))
++			return -EFAULT;
++
+ 		r = kvm_vgic_addr(dev->kvm, type, &addr, false);
+ 		if (r)
+ 			return (r == -ENODEV) ? -ENXIO : r;
+diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
+index a5636524af769..e2af6b172200e 100644
+--- a/arch/ia64/kernel/acpi.c
++++ b/arch/ia64/kernel/acpi.c
+@@ -446,7 +446,8 @@ void __init acpi_numa_fixup(void)
+ 	if (srat_num_cpus == 0) {
+ 		node_set_online(0);
+ 		node_cpuid[0].phys_id = hard_smp_processor_id();
+-		return;
++		slit_distance(0, 0) = LOCAL_DISTANCE;
++		goto out;
+ 	}
+ 
+ 	/*
+@@ -489,7 +490,7 @@ void __init acpi_numa_fixup(void)
+ 			for (j = 0; j < MAX_NUMNODES; j++)
+ 				slit_distance(i, j) = i == j ?
+ 					LOCAL_DISTANCE : REMOTE_DISTANCE;
+-		return;
++		goto out;
+ 	}
+ 
+ 	memset(numa_slit, -1, sizeof(numa_slit));
+@@ -514,6 +515,8 @@ void __init acpi_numa_fixup(void)
+ 		printk("\n");
+ 	}
+ #endif
++out:
++	node_possible_map = node_online_map;
+ }
+ #endif				/* CONFIG_ACPI_NUMA */
+ 
+diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
+index c5fe21de46a81..31149e41f9be0 100644
+--- a/arch/ia64/kernel/efi.c
++++ b/arch/ia64/kernel/efi.c
+@@ -415,10 +415,10 @@ efi_get_pal_addr (void)
+ 		mask  = ~((1 << IA64_GRANULE_SHIFT) - 1);
+ 
+ 		printk(KERN_INFO "CPU %d: mapping PAL code "
+-                       "[0x%lx-0x%lx) into [0x%lx-0x%lx)\n",
+-                       smp_processor_id(), md->phys_addr,
+-                       md->phys_addr + efi_md_size(md),
+-                       vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
++			"[0x%llx-0x%llx) into [0x%llx-0x%llx)\n",
++			smp_processor_id(), md->phys_addr,
++			md->phys_addr + efi_md_size(md),
++			vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
+ #endif
+ 		return __va(md->phys_addr);
+ 	}
+@@ -560,6 +560,7 @@ efi_init (void)
+ 	{
+ 		efi_memory_desc_t *md;
+ 		void *p;
++		unsigned int i;
+ 
+ 		for (i = 0, p = efi_map_start; p < efi_map_end;
+ 		     ++i, p += efi_desc_size)
+@@ -586,7 +587,7 @@ efi_init (void)
+ 			}
+ 
+ 			printk("mem%02d: %s "
+-			       "range=[0x%016lx-0x%016lx) (%4lu%s)\n",
++			       "range=[0x%016llx-0x%016llx) (%4lu%s)\n",
+ 			       i, efi_md_typeattr_format(buf, sizeof(buf), md),
+ 			       md->phys_addr,
+ 			       md->phys_addr + efi_md_size(md), size, unit);
+diff --git a/arch/m68k/include/asm/mvme147hw.h b/arch/m68k/include/asm/mvme147hw.h
+index 257b29184af91..e28eb1c0e0bfb 100644
+--- a/arch/m68k/include/asm/mvme147hw.h
++++ b/arch/m68k/include/asm/mvme147hw.h
+@@ -66,6 +66,9 @@ struct pcc_regs {
+ #define PCC_INT_ENAB		0x08
+ 
+ #define PCC_TIMER_INT_CLR	0x80
++
++#define PCC_TIMER_TIC_EN	0x01
++#define PCC_TIMER_COC_EN	0x02
+ #define PCC_TIMER_CLR_OVF	0x04
+ 
+ #define PCC_LEVEL_ABORT		0x07
+diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c
+index 1c235d8f53f36..f55bdcb8e4f15 100644
+--- a/arch/m68k/kernel/sys_m68k.c
++++ b/arch/m68k/kernel/sys_m68k.c
+@@ -388,6 +388,8 @@ sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
+ 		ret = -EPERM;
+ 		if (!capable(CAP_SYS_ADMIN))
+ 			goto out;
++
++		mmap_read_lock(current->mm);
+ 	} else {
+ 		struct vm_area_struct *vma;
+ 
+diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c
+index cfdc7f912e14e..e1e90c49a4962 100644
+--- a/arch/m68k/mvme147/config.c
++++ b/arch/m68k/mvme147/config.c
+@@ -114,8 +114,10 @@ static irqreturn_t mvme147_timer_int (int irq, void *dev_id)
+ 	unsigned long flags;
+ 
+ 	local_irq_save(flags);
+-	m147_pcc->t1_int_cntrl = PCC_TIMER_INT_CLR;
+-	m147_pcc->t1_cntrl = PCC_TIMER_CLR_OVF;
++	m147_pcc->t1_cntrl = PCC_TIMER_CLR_OVF | PCC_TIMER_COC_EN |
++			     PCC_TIMER_TIC_EN;
++	m147_pcc->t1_int_cntrl = PCC_INT_ENAB | PCC_TIMER_INT_CLR |
++				 PCC_LEVEL_TIMER1;
+ 	clk_total += PCC_TIMER_CYCLES;
+ 	legacy_timer_tick(1);
+ 	local_irq_restore(flags);
+@@ -133,10 +135,10 @@ void mvme147_sched_init (void)
+ 	/* Init the clock with a value */
+ 	/* The clock counter increments until 0xFFFF then reloads */
+ 	m147_pcc->t1_preload = PCC_TIMER_PRELOAD;
+-	m147_pcc->t1_cntrl = 0x0;	/* clear timer */
+-	m147_pcc->t1_cntrl = 0x3;	/* start timer */
+-	m147_pcc->t1_int_cntrl = PCC_TIMER_INT_CLR;  /* clear pending ints */
+-	m147_pcc->t1_int_cntrl = PCC_INT_ENAB|PCC_LEVEL_TIMER1;
++	m147_pcc->t1_cntrl = PCC_TIMER_CLR_OVF | PCC_TIMER_COC_EN |
++			     PCC_TIMER_TIC_EN;
++	m147_pcc->t1_int_cntrl = PCC_INT_ENAB | PCC_TIMER_INT_CLR |
++				 PCC_LEVEL_TIMER1;
+ 
+ 	clocksource_register_hz(&mvme147_clk, PCC_TIMER_CLOCK_FREQ);
+ }
+diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c
+index 30357fe4ba6c8..b59593c7cfb9d 100644
+--- a/arch/m68k/mvme16x/config.c
++++ b/arch/m68k/mvme16x/config.c
+@@ -366,6 +366,7 @@ static u32 clk_total;
+ #define PCCTOVR1_COC_EN      0x02
+ #define PCCTOVR1_OVR_CLR     0x04
+ 
++#define PCCTIC1_INT_LEVEL    6
+ #define PCCTIC1_INT_CLR      0x08
+ #define PCCTIC1_INT_EN       0x10
+ 
+@@ -374,8 +375,8 @@ static irqreturn_t mvme16x_timer_int (int irq, void *dev_id)
+ 	unsigned long flags;
+ 
+ 	local_irq_save(flags);
+-	out_8(PCCTIC1, in_8(PCCTIC1) | PCCTIC1_INT_CLR);
+-	out_8(PCCTOVR1, PCCTOVR1_OVR_CLR);
++	out_8(PCCTOVR1, PCCTOVR1_OVR_CLR | PCCTOVR1_TIC_EN | PCCTOVR1_COC_EN);
++	out_8(PCCTIC1, PCCTIC1_INT_EN | PCCTIC1_INT_CLR | PCCTIC1_INT_LEVEL);
+ 	clk_total += PCC_TIMER_CYCLES;
+ 	legacy_timer_tick(1);
+ 	local_irq_restore(flags);
+@@ -389,14 +390,15 @@ void mvme16x_sched_init(void)
+     int irq;
+ 
+     /* Using PCCchip2 or MC2 chip tick timer 1 */
+-    out_be32(PCCTCNT1, 0);
+-    out_be32(PCCTCMP1, PCC_TIMER_CYCLES);
+-    out_8(PCCTOVR1, in_8(PCCTOVR1) | PCCTOVR1_TIC_EN | PCCTOVR1_COC_EN);
+-    out_8(PCCTIC1, PCCTIC1_INT_EN | 6);
+     if (request_irq(MVME16x_IRQ_TIMER, mvme16x_timer_int, IRQF_TIMER, "timer",
+                     NULL))
+ 	panic ("Couldn't register timer int");
+ 
++    out_be32(PCCTCNT1, 0);
++    out_be32(PCCTCMP1, PCC_TIMER_CYCLES);
++    out_8(PCCTOVR1, PCCTOVR1_OVR_CLR | PCCTOVR1_TIC_EN | PCCTOVR1_COC_EN);
++    out_8(PCCTIC1, PCCTIC1_INT_EN | PCCTIC1_INT_CLR | PCCTIC1_INT_LEVEL);
++
+     clocksource_register_hz(&mvme16x_clk, PCC_TIMER_CLOCK_FREQ);
+ 
+     if (brdno == 0x0162 || brdno == 0x172)
+diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
+index d89efba3d8a44..e89d63cd92d10 100644
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -6,6 +6,7 @@ config MIPS
+ 	select ARCH_BINFMT_ELF_STATE if MIPS_FP_SUPPORT
+ 	select ARCH_HAS_FORTIFY_SOURCE
+ 	select ARCH_HAS_KCOV
++	select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE if !EVA
+ 	select ARCH_HAS_PTE_SPECIAL if !(32BIT && CPU_HAS_RIXI)
+ 	select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+ 	select ARCH_HAS_UBSAN_SANITIZE_ALL
+diff --git a/arch/mips/boot/dts/brcm/bcm3368.dtsi b/arch/mips/boot/dts/brcm/bcm3368.dtsi
+index 69cbef4723775..d4b2b430dad01 100644
+--- a/arch/mips/boot/dts/brcm/bcm3368.dtsi
++++ b/arch/mips/boot/dts/brcm/bcm3368.dtsi
+@@ -59,7 +59,7 @@
+ 
+ 		periph_cntl: syscon@fff8c008 {
+ 			compatible = "syscon";
+-			reg = <0xfff8c000 0x4>;
++			reg = <0xfff8c008 0x4>;
+ 			native-endian;
+ 		};
+ 
+diff --git a/arch/mips/boot/dts/brcm/bcm63268.dtsi b/arch/mips/boot/dts/brcm/bcm63268.dtsi
+index e0021ff9f144d..9405944368726 100644
+--- a/arch/mips/boot/dts/brcm/bcm63268.dtsi
++++ b/arch/mips/boot/dts/brcm/bcm63268.dtsi
+@@ -59,7 +59,7 @@
+ 
+ 		periph_cntl: syscon@10000008 {
+ 			compatible = "syscon";
+-			reg = <0x10000000 0xc>;
++			reg = <0x10000008 0x4>;
+ 			native-endian;
+ 		};
+ 
+diff --git a/arch/mips/boot/dts/brcm/bcm6358.dtsi b/arch/mips/boot/dts/brcm/bcm6358.dtsi
+index 9d93e7f5e6fc7..d79c88c2fc9ca 100644
+--- a/arch/mips/boot/dts/brcm/bcm6358.dtsi
++++ b/arch/mips/boot/dts/brcm/bcm6358.dtsi
+@@ -59,7 +59,7 @@
+ 
+ 		periph_cntl: syscon@fffe0008 {
+ 			compatible = "syscon";
+-			reg = <0xfffe0000 0x4>;
++			reg = <0xfffe0008 0x4>;
+ 			native-endian;
+ 		};
+ 
+diff --git a/arch/mips/boot/dts/brcm/bcm6362.dtsi b/arch/mips/boot/dts/brcm/bcm6362.dtsi
+index eb10341b75bae..8a21cb761ffd4 100644
+--- a/arch/mips/boot/dts/brcm/bcm6362.dtsi
++++ b/arch/mips/boot/dts/brcm/bcm6362.dtsi
+@@ -59,7 +59,7 @@
+ 
+ 		periph_cntl: syscon@10000008 {
+ 			compatible = "syscon";
+-			reg = <0x10000000 0xc>;
++			reg = <0x10000008 0x4>;
+ 			native-endian;
+ 		};
+ 
+diff --git a/arch/mips/boot/dts/brcm/bcm6368.dtsi b/arch/mips/boot/dts/brcm/bcm6368.dtsi
+index 52c19f40b9cca..8e87867ebc04a 100644
+--- a/arch/mips/boot/dts/brcm/bcm6368.dtsi
++++ b/arch/mips/boot/dts/brcm/bcm6368.dtsi
+@@ -59,7 +59,7 @@
+ 
+ 		periph_cntl: syscon@100000008 {
+ 			compatible = "syscon";
+-			reg = <0x10000000 0xc>;
++			reg = <0x10000008 0x4>;
+ 			native-endian;
+ 		};
+ 
+diff --git a/arch/mips/crypto/poly1305-glue.c b/arch/mips/crypto/poly1305-glue.c
+index fc881b46d9111..bc6110fb98e0a 100644
+--- a/arch/mips/crypto/poly1305-glue.c
++++ b/arch/mips/crypto/poly1305-glue.c
+@@ -17,7 +17,7 @@ asmlinkage void poly1305_init_mips(void *state, const u8 *key);
+ asmlinkage void poly1305_blocks_mips(void *state, const u8 *src, u32 len, u32 hibit);
+ asmlinkage void poly1305_emit_mips(void *state, u8 *digest, const u32 *nonce);
+ 
+-void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
++void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
+ {
+ 	poly1305_init_mips(&dctx->h, key);
+ 	dctx->s[0] = get_unaligned_le32(key + 16);
+diff --git a/arch/mips/generic/board-boston.its.S b/arch/mips/generic/board-boston.its.S
+index a7f51f97b9102..c45ad27594218 100644
+--- a/arch/mips/generic/board-boston.its.S
++++ b/arch/mips/generic/board-boston.its.S
+@@ -1,22 +1,22 @@
+ / {
+ 	images {
+-		fdt@boston {
++		fdt-boston {
+ 			description = "img,boston Device Tree";
+ 			data = /incbin/("boot/dts/img/boston.dtb");
+ 			type = "flat_dt";
+ 			arch = "mips";
+ 			compression = "none";
+-			hash@0 {
++			hash {
+ 				algo = "sha1";
+ 			};
+ 		};
+ 	};
+ 
+ 	configurations {
+-		conf@boston {
++		conf-boston {
+ 			description = "Boston Linux kernel";
+-			kernel = "kernel@0";
+-			fdt = "fdt@boston";
++			kernel = "kernel";
++			fdt = "fdt-boston";
+ 		};
+ 	};
+ };
+diff --git a/arch/mips/generic/board-jaguar2.its.S b/arch/mips/generic/board-jaguar2.its.S
+index fb0e589eeff71..c2b8d479b26cd 100644
+--- a/arch/mips/generic/board-jaguar2.its.S
++++ b/arch/mips/generic/board-jaguar2.its.S
+@@ -1,23 +1,23 @@
+ /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+ / {
+ 	images {
+-		fdt@jaguar2_pcb110 {
++		fdt-jaguar2_pcb110 {
+ 			description = "MSCC Jaguar2 PCB110 Device Tree";
+ 			data = /incbin/("boot/dts/mscc/jaguar2_pcb110.dtb");
+ 			type = "flat_dt";
+ 			arch = "mips";
+ 			compression = "none";
+-			hash@0 {
++			hash {
+ 				algo = "sha1";
+ 			};
+ 		};
+-		fdt@jaguar2_pcb111 {
++		fdt-jaguar2_pcb111 {
+ 			description = "MSCC Jaguar2 PCB111 Device Tree";
+ 			data = /incbin/("boot/dts/mscc/jaguar2_pcb111.dtb");
+ 			type = "flat_dt";
+ 			arch = "mips";
+ 			compression = "none";
+-			hash@0 {
++			hash {
+ 				algo = "sha1";
+ 			};
+ 		};
+@@ -26,14 +26,14 @@
+ 	configurations {
+ 		pcb110 {
+ 			description = "Jaguar2 Linux kernel";
+-			kernel = "kernel@0";
+-			fdt = "fdt@jaguar2_pcb110";
++			kernel = "kernel";
++			fdt = "fdt-jaguar2_pcb110";
+ 			ramdisk = "ramdisk";
+ 		};
+ 		pcb111 {
+ 			description = "Jaguar2 Linux kernel";
+-			kernel = "kernel@0";
+-			fdt = "fdt@jaguar2_pcb111";
++			kernel = "kernel";
++			fdt = "fdt-jaguar2_pcb111";
+ 			ramdisk = "ramdisk";
+ 		};
+ 	};
+diff --git a/arch/mips/generic/board-luton.its.S b/arch/mips/generic/board-luton.its.S
+index 39a543f62f258..bd9837c9af976 100644
+--- a/arch/mips/generic/board-luton.its.S
++++ b/arch/mips/generic/board-luton.its.S
+@@ -1,13 +1,13 @@
+ /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+ / {
+ 	images {
+-		fdt@luton_pcb091 {
++		fdt-luton_pcb091 {
+ 			description = "MSCC Luton PCB091 Device Tree";
+ 			data = /incbin/("boot/dts/mscc/luton_pcb091.dtb");
+ 			type = "flat_dt";
+ 			arch = "mips";
+ 			compression = "none";
+-			hash@0 {
++			hash {
+ 				algo = "sha1";
+ 			};
+ 		};
+@@ -16,8 +16,8 @@
+ 	configurations {
+ 		pcb091 {
+ 			description = "Luton Linux kernel";
+-			kernel = "kernel@0";
+-			fdt = "fdt@luton_pcb091";
++			kernel = "kernel";
++			fdt = "fdt-luton_pcb091";
+ 		};
+ 	};
+ };
+diff --git a/arch/mips/generic/board-ni169445.its.S b/arch/mips/generic/board-ni169445.its.S
+index e4cb4f95a8cc1..0a2e8f7a8526f 100644
+--- a/arch/mips/generic/board-ni169445.its.S
++++ b/arch/mips/generic/board-ni169445.its.S
+@@ -1,22 +1,22 @@
+ / {
+ 	images {
+-		fdt@ni169445 {
++		fdt-ni169445 {
+ 			description = "NI 169445 device tree";
+ 			data = /incbin/("boot/dts/ni/169445.dtb");
+ 			type = "flat_dt";
+ 			arch = "mips";
+ 			compression = "none";
+-			hash@0 {
++			hash {
+ 				algo = "sha1";
+ 			};
+ 		};
+ 	};
+ 
+ 	configurations {
+-		conf@ni169445 {
++		conf-ni169445 {
+ 			description = "NI 169445 Linux Kernel";
+-			kernel = "kernel@0";
+-			fdt = "fdt@ni169445";
++			kernel = "kernel";
++			fdt = "fdt-ni169445";
+ 		};
+ 	};
+ };
+diff --git a/arch/mips/generic/board-ocelot.its.S b/arch/mips/generic/board-ocelot.its.S
+index 3da23988149a6..8c7e3a1b68d3d 100644
+--- a/arch/mips/generic/board-ocelot.its.S
++++ b/arch/mips/generic/board-ocelot.its.S
+@@ -1,40 +1,40 @@
+ /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+ / {
+ 	images {
+-		fdt@ocelot_pcb123 {
++		fdt-ocelot_pcb123 {
+ 			description = "MSCC Ocelot PCB123 Device Tree";
+ 			data = /incbin/("boot/dts/mscc/ocelot_pcb123.dtb");
+ 			type = "flat_dt";
+ 			arch = "mips";
+ 			compression = "none";
+-			hash@0 {
++			hash {
+ 				algo = "sha1";
+ 			};
+ 		};
+ 
+-		fdt@ocelot_pcb120 {
++		fdt-ocelot_pcb120 {
+ 			description = "MSCC Ocelot PCB120 Device Tree";
+ 			data = /incbin/("boot/dts/mscc/ocelot_pcb120.dtb");
+ 			type = "flat_dt";
+ 			arch = "mips";
+ 			compression = "none";
+-			hash@0 {
++			hash {
+ 				algo = "sha1";
+ 			};
+ 		};
+ 	};
+ 
+ 	configurations {
+-		conf@ocelot_pcb123 {
++		conf-ocelot_pcb123 {
+ 			description = "Ocelot Linux kernel";
+-			kernel = "kernel@0";
+-			fdt = "fdt@ocelot_pcb123";
++			kernel = "kernel";
++			fdt = "fdt-ocelot_pcb123";
+ 		};
+ 
+-		conf@ocelot_pcb120 {
++		conf-ocelot_pcb120 {
+ 			description = "Ocelot Linux kernel";
+-			kernel = "kernel@0";
+-			fdt = "fdt@ocelot_pcb120";
++			kernel = "kernel";
++			fdt = "fdt-ocelot_pcb120";
+ 		};
+ 	};
+ };
+diff --git a/arch/mips/generic/board-serval.its.S b/arch/mips/generic/board-serval.its.S
+index 4ea4fc9d757f3..dde833efe980a 100644
+--- a/arch/mips/generic/board-serval.its.S
++++ b/arch/mips/generic/board-serval.its.S
+@@ -1,13 +1,13 @@
+ /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+ / {
+ 	images {
+-		fdt@serval_pcb105 {
++		fdt-serval_pcb105 {
+ 			description = "MSCC Serval PCB105 Device Tree";
+ 			data = /incbin/("boot/dts/mscc/serval_pcb105.dtb");
+ 			type = "flat_dt";
+ 			arch = "mips";
+ 			compression = "none";
+-			hash@0 {
++			hash {
+ 				algo = "sha1";
+ 			};
+ 		};
+@@ -16,8 +16,8 @@
+ 	configurations {
+ 		pcb105 {
+ 			description = "Serval Linux kernel";
+-			kernel = "kernel@0";
+-			fdt = "fdt@serval_pcb105";
++			kernel = "kernel";
++			fdt = "fdt-serval_pcb105";
+ 			ramdisk = "ramdisk";
+ 		};
+ 	};
+diff --git a/arch/mips/generic/board-xilfpga.its.S b/arch/mips/generic/board-xilfpga.its.S
+index a2e773d3f14f4..08c1e900eb4ed 100644
+--- a/arch/mips/generic/board-xilfpga.its.S
++++ b/arch/mips/generic/board-xilfpga.its.S
+@@ -1,22 +1,22 @@
+ / {
+ 	images {
+-		fdt@xilfpga {
++		fdt-xilfpga {
+ 			description = "MIPSfpga (xilfpga) Device Tree";
+ 			data = /incbin/("boot/dts/xilfpga/nexys4ddr.dtb");
+ 			type = "flat_dt";
+ 			arch = "mips";
+ 			compression = "none";
+-			hash@0 {
++			hash {
+ 				algo = "sha1";
+ 			};
+ 		};
+ 	};
+ 
+ 	configurations {
+-		conf@xilfpga {
++		conf-xilfpga {
+ 			description = "MIPSfpga Linux kernel";
+-			kernel = "kernel@0";
+-			fdt = "fdt@xilfpga";
++			kernel = "kernel";
++			fdt = "fdt-xilfpga";
+ 		};
+ 	};
+ };
+diff --git a/arch/mips/generic/vmlinux.its.S b/arch/mips/generic/vmlinux.its.S
+index 1a08438fd8930..3e254676540f4 100644
+--- a/arch/mips/generic/vmlinux.its.S
++++ b/arch/mips/generic/vmlinux.its.S
+@@ -6,7 +6,7 @@
+ 	#address-cells = <ADDR_CELLS>;
+ 
+ 	images {
+-		kernel@0 {
++		kernel {
+ 			description = KERNEL_NAME;
+ 			data = /incbin/(VMLINUX_BINARY);
+ 			type = "kernel";
+@@ -15,18 +15,18 @@
+ 			compression = VMLINUX_COMPRESSION;
+ 			load = /bits/ ADDR_BITS <VMLINUX_LOAD_ADDRESS>;
+ 			entry = /bits/ ADDR_BITS <VMLINUX_ENTRY_ADDRESS>;
+-			hash@0 {
++			hash {
+ 				algo = "sha1";
+ 			};
+ 		};
+ 	};
+ 
+ 	configurations {
+-		default = "conf@default";
++		default = "conf-default";
+ 
+-		conf@default {
++		conf-default {
+ 			description = "Generic Linux kernel";
+-			kernel = "kernel@0";
++			kernel = "kernel";
+ 		};
+ 	};
+ };
+diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
+index 86f2323ebe6bc..ca83ada7015f5 100644
+--- a/arch/mips/include/asm/asmmacro.h
++++ b/arch/mips/include/asm/asmmacro.h
+@@ -44,8 +44,7 @@
+ 	.endm
+ #endif
+ 
+-#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5) || \
+-    defined(CONFIG_CPU_MIPSR6)
++#ifdef CONFIG_CPU_HAS_DIEI
+ 	.macro	local_irq_enable reg=t0
+ 	ei
+ 	irq_enable_hazard
+diff --git a/arch/mips/loongson64/init.c b/arch/mips/loongson64/init.c
+index cfa788bca8719..1c664b23c0f90 100644
+--- a/arch/mips/loongson64/init.c
++++ b/arch/mips/loongson64/init.c
+@@ -126,7 +126,7 @@ static int __init add_legacy_isa_io(struct fwnode_handle *fwnode, resource_size_
+ 		return -ENOMEM;
+ 
+ 	range->fwnode = fwnode;
+-	range->size = size;
++	range->size = size = round_up(size, PAGE_SIZE);
+ 	range->hw_start = hw_start;
+ 	range->flags = LOGIC_PIO_CPU_MMIO;
+ 
+diff --git a/arch/mips/pci/pci-legacy.c b/arch/mips/pci/pci-legacy.c
+index 39052de915f34..3a909194284a6 100644
+--- a/arch/mips/pci/pci-legacy.c
++++ b/arch/mips/pci/pci-legacy.c
+@@ -166,8 +166,13 @@ void pci_load_of_ranges(struct pci_controller *hose, struct device_node *node)
+ 			res = hose->mem_resource;
+ 			break;
+ 		}
+-		if (res != NULL)
+-			of_pci_range_to_resource(&range, node, res);
++		if (res != NULL) {
++			res->name = node->full_name;
++			res->flags = range.flags;
++			res->start = range.cpu_addr;
++			res->end = range.cpu_addr + range.size - 1;
++			res->parent = res->child = res->sibling = NULL;
++		}
+ 	}
+ }
+ 
+diff --git a/arch/mips/pci/pci-mt7620.c b/arch/mips/pci/pci-mt7620.c
+index d360616037525..e032932348d6f 100644
+--- a/arch/mips/pci/pci-mt7620.c
++++ b/arch/mips/pci/pci-mt7620.c
+@@ -30,6 +30,7 @@
+ #define RALINK_GPIOMODE			0x60
+ 
+ #define PPLL_CFG1			0x9c
++#define PPLL_LD				BIT(23)
+ 
+ #define PPLL_DRV			0xa0
+ #define PDRV_SW_SET			BIT(31)
+@@ -239,8 +240,8 @@ static int mt7620_pci_hw_init(struct platform_device *pdev)
+ 	rt_sysc_m32(0, RALINK_PCIE0_CLK_EN, RALINK_CLKCFG1);
+ 	mdelay(100);
+ 
+-	if (!(rt_sysc_r32(PPLL_CFG1) & PDRV_SW_SET)) {
+-		dev_err(&pdev->dev, "MT7620 PPLL unlock\n");
++	if (!(rt_sysc_r32(PPLL_CFG1) & PPLL_LD)) {
++		dev_err(&pdev->dev, "pcie PLL not locked, aborting init\n");
+ 		reset_control_assert(rstpcie0);
+ 		rt_sysc_m32(RALINK_PCIE0_CLK_EN, 0, RALINK_CLKCFG1);
+ 		return -1;
+diff --git a/arch/mips/pci/pci-rt2880.c b/arch/mips/pci/pci-rt2880.c
+index e1f12e3981363..f1538d2be89e5 100644
+--- a/arch/mips/pci/pci-rt2880.c
++++ b/arch/mips/pci/pci-rt2880.c
+@@ -180,7 +180,6 @@ static inline void rt2880_pci_write_u32(unsigned long reg, u32 val)
+ 
+ int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+ {
+-	u16 cmd;
+ 	int irq = -1;
+ 
+ 	if (dev->bus->number != 0)
+@@ -188,8 +187,6 @@ int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+ 
+ 	switch (PCI_SLOT(dev->devfn)) {
+ 	case 0x00:
+-		rt2880_pci_write_u32(PCI_BASE_ADDRESS_0, 0x08000000);
+-		(void) rt2880_pci_read_u32(PCI_BASE_ADDRESS_0);
+ 		break;
+ 	case 0x11:
+ 		irq = RT288X_CPU_IRQ_PCI;
+@@ -201,16 +198,6 @@ int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+ 		break;
+ 	}
+ 
+-	pci_write_config_byte((struct pci_dev *) dev,
+-		PCI_CACHE_LINE_SIZE, 0x14);
+-	pci_write_config_byte((struct pci_dev *) dev, PCI_LATENCY_TIMER, 0xFF);
+-	pci_read_config_word((struct pci_dev *) dev, PCI_COMMAND, &cmd);
+-	cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
+-		PCI_COMMAND_INVALIDATE | PCI_COMMAND_FAST_BACK |
+-		PCI_COMMAND_SERR | PCI_COMMAND_WAIT | PCI_COMMAND_PARITY;
+-	pci_write_config_word((struct pci_dev *) dev, PCI_COMMAND, cmd);
+-	pci_write_config_byte((struct pci_dev *) dev, PCI_INTERRUPT_LINE,
+-			      dev->irq);
+ 	return irq;
+ }
+ 
+@@ -251,6 +238,30 @@ static int rt288x_pci_probe(struct platform_device *pdev)
+ 
+ int pcibios_plat_dev_init(struct pci_dev *dev)
+ {
++	static bool slot0_init;
++
++	/*
++	 * Nobody seems to initialize slot 0, but this platform requires it, so
++	 * do it once when some other slot is being enabled. The PCI subsystem
++	 * should configure other slots properly, so no need to do anything
++	 * special for those.
++	 */
++	if (!slot0_init && dev->bus->number == 0) {
++		u16 cmd;
++		u32 bar0;
++
++		slot0_init = true;
++
++		pci_bus_write_config_dword(dev->bus, 0, PCI_BASE_ADDRESS_0,
++					   0x08000000);
++		pci_bus_read_config_dword(dev->bus, 0, PCI_BASE_ADDRESS_0,
++					  &bar0);
++
++		pci_bus_read_config_word(dev->bus, 0, PCI_COMMAND, &cmd);
++		cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
++		pci_bus_write_config_word(dev->bus, 0, PCI_COMMAND, cmd);
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index 386ae12d8523b..57c0ab71d51e7 100644
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -224,7 +224,7 @@ config PPC
+ 	select HAVE_LIVEPATCH			if HAVE_DYNAMIC_FTRACE_WITH_REGS
+ 	select HAVE_MOD_ARCH_SPECIFIC
+ 	select HAVE_NMI				if PERF_EVENTS || (PPC64 && PPC_BOOK3S)
+-	select HAVE_HARDLOCKUP_DETECTOR_ARCH	if (PPC64 && PPC_BOOK3S)
++	select HAVE_HARDLOCKUP_DETECTOR_ARCH	if PPC64 && PPC_BOOK3S && SMP
+ 	select HAVE_OPTPROBES			if PPC64
+ 	select HAVE_PERF_EVENTS
+ 	select HAVE_PERF_EVENTS_NMI		if PPC64
+diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
+index ae084357994e8..6342f9da45455 100644
+--- a/arch/powerpc/Kconfig.debug
++++ b/arch/powerpc/Kconfig.debug
+@@ -353,6 +353,7 @@ config PPC_EARLY_DEBUG_CPM_ADDR
+ config FAIL_IOMMU
+ 	bool "Fault-injection capability for IOMMU"
+ 	depends on FAULT_INJECTION
++	depends on PCI || IBMVIO
+ 	help
+ 	  Provide fault-injection capability for IOMMU. Each device can
+ 	  be selectively enabled via the fail_iommu property.
+diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
+index 058601efbc8a3..b703330459b80 100644
+--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
++++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
+@@ -7,6 +7,7 @@
+ #ifndef __ASSEMBLY__
+ #include <linux/mmdebug.h>
+ #include <linux/bug.h>
++#include <linux/sizes.h>
+ #endif
+ 
+ /*
+@@ -323,7 +324,8 @@ extern unsigned long pci_io_base;
+ #define  PHB_IO_END	(KERN_IO_START + FULL_IO_SIZE)
+ #define IOREMAP_BASE	(PHB_IO_END)
+ #define IOREMAP_START	(ioremap_bot)
+-#define IOREMAP_END	(KERN_IO_END)
++#define IOREMAP_END	(KERN_IO_END - FIXADDR_SIZE)
++#define FIXADDR_SIZE	SZ_32M
+ 
+ /* Advertise special mapping type for AGP */
+ #define HAVE_PAGE_AGP
+diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
+index c7813dc628fc9..59cab558e2f05 100644
+--- a/arch/powerpc/include/asm/book3s/64/radix.h
++++ b/arch/powerpc/include/asm/book3s/64/radix.h
+@@ -222,8 +222,10 @@ static inline void radix__set_pte_at(struct mm_struct *mm, unsigned long addr,
+ 	 * from ptesync, it should probably go into update_mmu_cache, rather
+ 	 * than set_pte_at (which is used to set ptes unrelated to faults).
+ 	 *
+-	 * Spurious faults to vmalloc region are not tolerated, so there is
+-	 * a ptesync in flush_cache_vmap.
++	 * Spurious faults from the kernel memory are not tolerated, so there
++	 * is a ptesync in flush_cache_vmap, and __map_kernel_page() follows
++	 * the pte update sequence from ISA Book III 6.10 Translation Table
++	 * Update Synchronization Requirements.
+ 	 */
+ }
+ 
+diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h
+index 8d03c16a36635..947b5b9c44241 100644
+--- a/arch/powerpc/include/asm/fixmap.h
++++ b/arch/powerpc/include/asm/fixmap.h
+@@ -23,12 +23,17 @@
+ #include <asm/kmap_size.h>
+ #endif
+ 
++#ifdef CONFIG_PPC64
++#define FIXADDR_TOP	(IOREMAP_END + FIXADDR_SIZE)
++#else
++#define FIXADDR_SIZE	0
+ #ifdef CONFIG_KASAN
+ #include <asm/kasan.h>
+ #define FIXADDR_TOP	(KASAN_SHADOW_START - PAGE_SIZE)
+ #else
+ #define FIXADDR_TOP	((unsigned long)(-PAGE_SIZE))
+ #endif
++#endif
+ 
+ /*
+  * Here we define all the compile-time 'special' virtual
+@@ -50,6 +55,7 @@
+  */
+ enum fixed_addresses {
+ 	FIX_HOLE,
++#ifdef CONFIG_PPC32
+ 	/* reserve the top 128K for early debugging purposes */
+ 	FIX_EARLY_DEBUG_TOP = FIX_HOLE,
+ 	FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+(ALIGN(SZ_128K, PAGE_SIZE)/PAGE_SIZE)-1,
+@@ -72,6 +78,7 @@ enum fixed_addresses {
+ 		       FIX_IMMR_SIZE,
+ #endif
+ 	/* FIX_PCIE_MCFG, */
++#endif /* CONFIG_PPC32 */
+ 	__end_of_permanent_fixed_addresses,
+ 
+ #define NR_FIX_BTMAPS		(SZ_256K / PAGE_SIZE)
+@@ -98,6 +105,8 @@ enum fixed_addresses {
+ static inline void __set_fixmap(enum fixed_addresses idx,
+ 				phys_addr_t phys, pgprot_t flags)
+ {
++	BUILD_BUG_ON(IS_ENABLED(CONFIG_PPC64) && __FIXADDR_SIZE > FIXADDR_SIZE);
++
+ 	if (__builtin_constant_p(idx))
+ 		BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
+ 	else if (WARN_ON(idx >= __end_of_fixed_addresses))
+diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
+index 6cb8aa3571917..57cd3892bfe05 100644
+--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
++++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
+@@ -6,6 +6,8 @@
+  * the ppc64 non-hashed page table.
+  */
+ 
++#include <linux/sizes.h>
++
+ #include <asm/nohash/64/pgtable-4k.h>
+ #include <asm/barrier.h>
+ #include <asm/asm-const.h>
+@@ -54,7 +56,8 @@
+ #define  PHB_IO_END	(KERN_IO_START + FULL_IO_SIZE)
+ #define IOREMAP_BASE	(PHB_IO_END)
+ #define IOREMAP_START	(ioremap_bot)
+-#define IOREMAP_END	(KERN_VIRT_START + KERN_VIRT_SIZE)
++#define IOREMAP_END	(KERN_VIRT_START + KERN_VIRT_SIZE - FIXADDR_SIZE)
++#define FIXADDR_SIZE	SZ_32M
+ 
+ 
+ /*
+diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
+index 7a13bc20f0a0c..47081a9e13ca4 100644
+--- a/arch/powerpc/include/asm/smp.h
++++ b/arch/powerpc/include/asm/smp.h
+@@ -121,6 +121,11 @@ static inline struct cpumask *cpu_sibling_mask(int cpu)
+ 	return per_cpu(cpu_sibling_map, cpu);
+ }
+ 
++static inline struct cpumask *cpu_core_mask(int cpu)
++{
++	return per_cpu(cpu_core_map, cpu);
++}
++
+ static inline struct cpumask *cpu_l2_cache_mask(int cpu)
+ {
+ 	return per_cpu(cpu_l2_cache_map, cpu);
+diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
+index 8482739d42f38..eddf362caedce 100644
+--- a/arch/powerpc/kernel/fadump.c
++++ b/arch/powerpc/kernel/fadump.c
+@@ -292,7 +292,7 @@ static void fadump_show_config(void)
+  * that is required for a kernel to boot successfully.
+  *
+  */
+-static inline u64 fadump_calculate_reserve_size(void)
++static __init u64 fadump_calculate_reserve_size(void)
+ {
+ 	u64 base, size, bootmem_min;
+ 	int ret;
+diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c
+index c475a229a42ac..352346e14a084 100644
+--- a/arch/powerpc/kernel/interrupt.c
++++ b/arch/powerpc/kernel/interrupt.c
+@@ -34,11 +34,11 @@ notrace long system_call_exception(long r3, long r4, long r5,
+ 	if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ 		BUG_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
+ 
++	trace_hardirqs_off(); /* finish reconciling */
++
+ 	CT_WARN_ON(ct_state() == CONTEXT_KERNEL);
+ 	user_exit_irqoff();
+ 
+-	trace_hardirqs_off(); /* finish reconciling */
+-
+ 	if (!IS_ENABLED(CONFIG_BOOKE) && !IS_ENABLED(CONFIG_40x))
+ 		BUG_ON(!(regs->msr & MSR_RI));
+ 	BUG_ON(!(regs->msr & MSR_PR));
+diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
+index 9a4797d1d40d5..a8b2d6bfc1ca7 100644
+--- a/arch/powerpc/kernel/prom.c
++++ b/arch/powerpc/kernel/prom.c
+@@ -267,7 +267,7 @@ static struct feature_property {
+ };
+ 
+ #if defined(CONFIG_44x) && defined(CONFIG_PPC_FPU)
+-static inline void identical_pvr_fixup(unsigned long node)
++static __init void identical_pvr_fixup(unsigned long node)
+ {
+ 	unsigned int pvr;
+ 	const char *model = of_get_flat_dt_prop(node, "model", NULL);
+diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
+index 5a4d59a1070d5..5c7ce1d506312 100644
+--- a/arch/powerpc/kernel/smp.c
++++ b/arch/powerpc/kernel/smp.c
+@@ -1057,17 +1057,12 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
+ 				local_memory_node(numa_cpu_lookup_table[cpu]));
+ 		}
+ #endif
+-		/*
+-		 * cpu_core_map is now more updated and exists only since
+-		 * its been exported for long. It only will have a snapshot
+-		 * of cpu_cpu_mask.
+-		 */
+-		cpumask_copy(per_cpu(cpu_core_map, cpu), cpu_cpu_mask(cpu));
+ 	}
+ 
+ 	/* Init the cpumasks so the boot CPU is related to itself */
+ 	cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
+ 	cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid));
++	cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
+ 
+ 	if (has_coregroup_support())
+ 		cpumask_set_cpu(boot_cpuid, cpu_coregroup_mask(boot_cpuid));
+@@ -1408,6 +1403,9 @@ static void remove_cpu_from_masks(int cpu)
+ 			set_cpus_unrelated(cpu, i, cpu_smallcore_mask);
+ 	}
+ 
++	for_each_cpu(i, cpu_core_mask(cpu))
++		set_cpus_unrelated(cpu, i, cpu_core_mask);
++
+ 	if (has_coregroup_support()) {
+ 		for_each_cpu(i, cpu_coregroup_mask(cpu))
+ 			set_cpus_unrelated(cpu, i, cpu_coregroup_mask);
+@@ -1468,8 +1466,11 @@ static void update_coregroup_mask(int cpu, cpumask_var_t *mask)
+ 
+ static void add_cpu_to_masks(int cpu)
+ {
++	struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
+ 	int first_thread = cpu_first_thread_sibling(cpu);
++	int chip_id = cpu_to_chip_id(cpu);
+ 	cpumask_var_t mask;
++	bool ret;
+ 	int i;
+ 
+ 	/*
+@@ -1485,12 +1486,36 @@ static void add_cpu_to_masks(int cpu)
+ 	add_cpu_to_smallcore_masks(cpu);
+ 
+ 	/* In CPU-hotplug path, hence use GFP_ATOMIC */
+-	alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu));
++	ret = alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu));
+ 	update_mask_by_l2(cpu, &mask);
+ 
+ 	if (has_coregroup_support())
+ 		update_coregroup_mask(cpu, &mask);
+ 
++	if (chip_id == -1 || !ret) {
++		cpumask_copy(per_cpu(cpu_core_map, cpu), cpu_cpu_mask(cpu));
++		goto out;
++	}
++
++	if (shared_caches)
++		submask_fn = cpu_l2_cache_mask;
++
++	/* Update core_mask with all the CPUs that are part of submask */
++	or_cpumasks_related(cpu, cpu, submask_fn, cpu_core_mask);
++
++	/* Skip all CPUs already part of current CPU core mask */
++	cpumask_andnot(mask, cpu_online_mask, cpu_core_mask(cpu));
++
++	for_each_cpu(i, mask) {
++		if (chip_id == cpu_to_chip_id(i)) {
++			or_cpumasks_related(cpu, i, submask_fn, cpu_core_mask);
++			cpumask_andnot(mask, mask, submask_fn(i));
++		} else {
++			cpumask_andnot(mask, mask, cpu_core_mask(i));
++		}
++	}
++
++out:
+ 	free_cpumask_var(mask);
+ }
+ 
+diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
+index 13bad6bf4c958..208a053c9adfd 100644
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -3728,7 +3728,10 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
+ 	vcpu->arch.dec_expires = dec + tb;
+ 	vcpu->cpu = -1;
+ 	vcpu->arch.thread_cpu = -1;
++	/* Save guest CTRL register, set runlatch to 1 */
+ 	vcpu->arch.ctrl = mfspr(SPRN_CTRLF);
++	if (!(vcpu->arch.ctrl & 1))
++		mtspr(SPRN_CTRLT, vcpu->arch.ctrl | 1);
+ 
+ 	vcpu->arch.iamr = mfspr(SPRN_IAMR);
+ 	vcpu->arch.pspb = mfspr(SPRN_PSPB);
+diff --git a/arch/powerpc/mm/book3s64/hash_pgtable.c b/arch/powerpc/mm/book3s64/hash_pgtable.c
+index 567e0c6b3978e..03819c259f0ab 100644
+--- a/arch/powerpc/mm/book3s64/hash_pgtable.c
++++ b/arch/powerpc/mm/book3s64/hash_pgtable.c
+@@ -428,12 +428,14 @@ static bool hash__change_memory_range(unsigned long start, unsigned long end,
+ 
+ void hash__mark_rodata_ro(void)
+ {
+-	unsigned long start, end;
++	unsigned long start, end, pp;
+ 
+ 	start = (unsigned long)_stext;
+ 	end = (unsigned long)__init_begin;
+ 
+-	WARN_ON(!hash__change_memory_range(start, end, PP_RXXX));
++	pp = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL_ROX), HPTE_USE_KERNEL_KEY);
++
++	WARN_ON(!hash__change_memory_range(start, end, pp));
+ }
+ 
+ void hash__mark_initmem_nx(void)
+diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
+index 581b20a2feaf6..7719995323c3f 100644
+--- a/arch/powerpc/mm/book3s64/hash_utils.c
++++ b/arch/powerpc/mm/book3s64/hash_utils.c
+@@ -1545,10 +1545,10 @@ DEFINE_INTERRUPT_HANDLER_RET(__do_hash_fault)
+ 	if (user_mode(regs) || (region_id == USER_REGION_ID))
+ 		access &= ~_PAGE_PRIVILEGED;
+ 
+-	if (regs->trap == 0x400)
++	if (TRAP(regs) == 0x400)
+ 		access |= _PAGE_EXEC;
+ 
+-	err = hash_page_mm(mm, ea, access, regs->trap, flags);
++	err = hash_page_mm(mm, ea, access, TRAP(regs), flags);
+ 	if (unlikely(err < 0)) {
+ 		// failed to instert a hash PTE due to an hypervisor error
+ 		if (user_mode(regs)) {
+diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
+index 98f0b243c1ab2..39d488a212a04 100644
+--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
++++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
+@@ -108,7 +108,7 @@ static int early_map_kernel_page(unsigned long ea, unsigned long pa,
+ 
+ set_the_pte:
+ 	set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
+-	smp_wmb();
++	asm volatile("ptesync": : :"memory");
+ 	return 0;
+ }
+ 
+@@ -168,7 +168,7 @@ static int __map_kernel_page(unsigned long ea, unsigned long pa,
+ 
+ set_the_pte:
+ 	set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
+-	smp_wmb();
++	asm volatile("ptesync": : :"memory");
+ 	return 0;
+ }
+ 
+diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
+index 4e8ce6d852323..7a59a5c9aa5dc 100644
+--- a/arch/powerpc/mm/mem.c
++++ b/arch/powerpc/mm/mem.c
+@@ -54,7 +54,6 @@
+ 
+ #include <mm/mmu_decl.h>
+ 
+-static DEFINE_MUTEX(linear_mapping_mutex);
+ unsigned long long memory_limit;
+ bool init_mem_is_free;
+ 
+@@ -72,6 +71,7 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+ EXPORT_SYMBOL(phys_mem_access_prot);
+ 
+ #ifdef CONFIG_MEMORY_HOTPLUG
++static DEFINE_MUTEX(linear_mapping_mutex);
+ 
+ #ifdef CONFIG_NUMA
+ int memory_add_physaddr_to_nid(u64 start)
+diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
+index e4f577da33d8b..8b5eeb6fb2fb3 100644
+--- a/arch/powerpc/perf/isa207-common.c
++++ b/arch/powerpc/perf/isa207-common.c
+@@ -447,8 +447,8 @@ ebb_bhrb:
+ 	 * EBB events are pinned & exclusive, so this should never actually
+ 	 * hit, but we leave it as a fallback in case.
+ 	 */
+-	mask  |= CNST_EBB_VAL(ebb);
+-	value |= CNST_EBB_MASK;
++	mask  |= CNST_EBB_MASK;
++	value |= CNST_EBB_VAL(ebb);
+ 
+ 	*maskp = mask;
+ 	*valp = value;
+diff --git a/arch/powerpc/perf/power10-events-list.h b/arch/powerpc/perf/power10-events-list.h
+index e45dafe818ed4..93be7197d2502 100644
+--- a/arch/powerpc/perf/power10-events-list.h
++++ b/arch/powerpc/perf/power10-events-list.h
+@@ -75,5 +75,5 @@ EVENT(PM_RUN_INST_CMPL_ALT,			0x00002);
+  *     thresh end (TE)
+  */
+ 
+-EVENT(MEM_LOADS,				0x34340401e0);
+-EVENT(MEM_STORES,				0x343c0401e0);
++EVENT(MEM_LOADS,				0x35340401e0);
++EVENT(MEM_STORES,				0x353c0401e0);
+diff --git a/arch/powerpc/platforms/52xx/lite5200_sleep.S b/arch/powerpc/platforms/52xx/lite5200_sleep.S
+index 11475c58ea431..afee8b1515a8e 100644
+--- a/arch/powerpc/platforms/52xx/lite5200_sleep.S
++++ b/arch/powerpc/platforms/52xx/lite5200_sleep.S
+@@ -181,7 +181,7 @@ sram_code:
+   udelay: /* r11 - tb_ticks_per_usec, r12 - usecs, overwrites r13 */
+ 	mullw	r12, r12, r11
+ 	mftb	r13	/* start */
+-	addi	r12, r13, r12 /* end */
++	add	r12, r13, r12 /* end */
+     1:
+ 	mftb	r13	/* current */
+ 	cmp	cr0, r13, r12
+diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
+index 9fc5217f0c8e5..836cbbe0ecc56 100644
+--- a/arch/powerpc/platforms/pseries/iommu.c
++++ b/arch/powerpc/platforms/pseries/iommu.c
+@@ -1229,7 +1229,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
+ 	if (pmem_present) {
+ 		if (query.largest_available_block >=
+ 		    (1ULL << (MAX_PHYSMEM_BITS - page_shift)))
+-			len = MAX_PHYSMEM_BITS - page_shift;
++			len = MAX_PHYSMEM_BITS;
+ 		else
+ 			dev_info(&dev->dev, "Skipping ibm,pmemory");
+ 	}
+diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
+index 3805519a64697..cd38bd421f381 100644
+--- a/arch/powerpc/platforms/pseries/lpar.c
++++ b/arch/powerpc/platforms/pseries/lpar.c
+@@ -977,11 +977,13 @@ static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
+ 	slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
+ 	BUG_ON(slot == -1);
+ 
+-	flags = newpp & 7;
++	flags = newpp & (HPTE_R_PP | HPTE_R_N);
+ 	if (mmu_has_feature(MMU_FTR_KERNEL_RO))
+ 		/* Move pp0 into bit 8 (IBM 55) */
+ 		flags |= (newpp & HPTE_R_PP0) >> 55;
+ 
++	flags |= ((newpp & HPTE_R_KEY_HI) >> 48) | (newpp & HPTE_R_KEY_LO);
++
+ 	lpar_rc = plpar_pte_protect(flags, slot, 0);
+ 
+ 	BUG_ON(lpar_rc != H_SUCCESS);
+diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
+index f9ae17e8a0f46..a8f9140a24fa3 100644
+--- a/arch/powerpc/platforms/pseries/pci_dlpar.c
++++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
+@@ -50,6 +50,7 @@ EXPORT_SYMBOL_GPL(init_phb_dynamic);
+ int remove_phb_dynamic(struct pci_controller *phb)
+ {
+ 	struct pci_bus *b = phb->bus;
++	struct pci_host_bridge *host_bridge = to_pci_host_bridge(b->bridge);
+ 	struct resource *res;
+ 	int rc, i;
+ 
+@@ -76,7 +77,8 @@ int remove_phb_dynamic(struct pci_controller *phb)
+ 	/* Remove the PCI bus and unregister the bridge device from sysfs */
+ 	phb->bus = NULL;
+ 	pci_remove_bus(b);
+-	device_unregister(b->bridge);
++	host_bridge->bus = NULL;
++	device_unregister(&host_bridge->dev);
+ 
+ 	/* Now release the IO resource */
+ 	if (res->flags & IORESOURCE_IO)
+diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c
+index 9cb4fc839fd5d..429053d0402ad 100644
+--- a/arch/powerpc/platforms/pseries/vio.c
++++ b/arch/powerpc/platforms/pseries/vio.c
+@@ -1285,6 +1285,10 @@ static int vio_bus_remove(struct device *dev)
+ int __vio_register_driver(struct vio_driver *viodrv, struct module *owner,
+ 			  const char *mod_name)
+ {
++	// vio_bus_type is only initialised for pseries
++	if (!machine_is(pseries))
++		return -ENODEV;
++
+ 	pr_debug("%s: driver %s registering\n", __func__, viodrv->name);
+ 
+ 	/* fill in 'struct driver' fields */
+diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
+index 595310e056f4d..5cacb632eb37a 100644
+--- a/arch/powerpc/sysdev/xive/common.c
++++ b/arch/powerpc/sysdev/xive/common.c
+@@ -253,17 +253,20 @@ notrace void xmon_xive_do_dump(int cpu)
+ 	xmon_printf("\n");
+ }
+ 
++static struct irq_data *xive_get_irq_data(u32 hw_irq)
++{
++	unsigned int irq = irq_find_mapping(xive_irq_domain, hw_irq);
++
++	return irq ? irq_get_irq_data(irq) : NULL;
++}
++
+ int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d)
+ {
+-	struct irq_chip *chip = irq_data_get_irq_chip(d);
+ 	int rc;
+ 	u32 target;
+ 	u8 prio;
+ 	u32 lirq;
+ 
+-	if (!is_xive_irq(chip))
+-		return -EINVAL;
+-
+ 	rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq);
+ 	if (rc) {
+ 		xmon_printf("IRQ 0x%08x : no config rc=%d\n", hw_irq, rc);
+@@ -273,6 +276,9 @@ int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d)
+ 	xmon_printf("IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ",
+ 		    hw_irq, target, prio, lirq);
+ 
++	if (!d)
++		d = xive_get_irq_data(hw_irq);
++
+ 	if (d) {
+ 		struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
+ 		u64 val = xive_esb_read(xd, XIVE_ESB_GET);
+@@ -1599,6 +1605,8 @@ static void xive_debug_show_irq(struct seq_file *m, u32 hw_irq, struct irq_data
+ 	u32 target;
+ 	u8 prio;
+ 	u32 lirq;
++	struct xive_irq_data *xd;
++	u64 val;
+ 
+ 	if (!is_xive_irq(chip))
+ 		return;
+@@ -1612,17 +1620,14 @@ static void xive_debug_show_irq(struct seq_file *m, u32 hw_irq, struct irq_data
+ 	seq_printf(m, "IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ",
+ 		   hw_irq, target, prio, lirq);
+ 
+-	if (d) {
+-		struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
+-		u64 val = xive_esb_read(xd, XIVE_ESB_GET);
+-
+-		seq_printf(m, "flags=%c%c%c PQ=%c%c",
+-			   xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ',
+-			   xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ',
+-			   xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ',
+-			   val & XIVE_ESB_VAL_P ? 'P' : '-',
+-			   val & XIVE_ESB_VAL_Q ? 'Q' : '-');
+-	}
++	xd = irq_data_get_irq_handler_data(d);
++	val = xive_esb_read(xd, XIVE_ESB_GET);
++	seq_printf(m, "flags=%c%c%c PQ=%c%c",
++		   xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ',
++		   xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ',
++		   xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ',
++		   val & XIVE_ESB_VAL_P ? 'P' : '-',
++		   val & XIVE_ESB_VAL_Q ? 'Q' : '-');
+ 	seq_puts(m, "\n");
+ }
+ 
+diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
+index 72134f9f6ff52..5aab59ad56881 100644
+--- a/arch/s390/kernel/setup.c
++++ b/arch/s390/kernel/setup.c
+@@ -937,9 +937,9 @@ static int __init setup_hwcaps(void)
+ 	if (MACHINE_HAS_VX) {
+ 		elf_hwcap |= HWCAP_S390_VXRS;
+ 		if (test_facility(134))
+-			elf_hwcap |= HWCAP_S390_VXRS_EXT;
+-		if (test_facility(135))
+ 			elf_hwcap |= HWCAP_S390_VXRS_BCD;
++		if (test_facility(135))
++			elf_hwcap |= HWCAP_S390_VXRS_EXT;
+ 		if (test_facility(148))
+ 			elf_hwcap |= HWCAP_S390_VXRS_EXT2;
+ 		if (test_facility(152))
+diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
+index 6d6b57059493e..b9f85b2dc053f 100644
+--- a/arch/s390/kvm/gaccess.c
++++ b/arch/s390/kvm/gaccess.c
+@@ -976,7 +976,9 @@ int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra)
+  * kvm_s390_shadow_tables - walk the guest page table and create shadow tables
+  * @sg: pointer to the shadow guest address space structure
+  * @saddr: faulting address in the shadow gmap
+- * @pgt: pointer to the page table address result
++ * @pgt: pointer to the beginning of the page table for the given address if
++ *	 successful (return value 0), or to the first invalid DAT entry in
++ *	 case of exceptions (return value > 0)
+  * @fake: pgt references contiguous guest memory block, not a pgtable
+  */
+ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
+@@ -1034,6 +1036,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
+ 			rfte.val = ptr;
+ 			goto shadow_r2t;
+ 		}
++		*pgt = ptr + vaddr.rfx * 8;
+ 		rc = gmap_read_table(parent, ptr + vaddr.rfx * 8, &rfte.val);
+ 		if (rc)
+ 			return rc;
+@@ -1060,6 +1063,7 @@ shadow_r2t:
+ 			rste.val = ptr;
+ 			goto shadow_r3t;
+ 		}
++		*pgt = ptr + vaddr.rsx * 8;
+ 		rc = gmap_read_table(parent, ptr + vaddr.rsx * 8, &rste.val);
+ 		if (rc)
+ 			return rc;
+@@ -1087,6 +1091,7 @@ shadow_r3t:
+ 			rtte.val = ptr;
+ 			goto shadow_sgt;
+ 		}
++		*pgt = ptr + vaddr.rtx * 8;
+ 		rc = gmap_read_table(parent, ptr + vaddr.rtx * 8, &rtte.val);
+ 		if (rc)
+ 			return rc;
+@@ -1123,6 +1128,7 @@ shadow_sgt:
+ 			ste.val = ptr;
+ 			goto shadow_pgt;
+ 		}
++		*pgt = ptr + vaddr.sx * 8;
+ 		rc = gmap_read_table(parent, ptr + vaddr.sx * 8, &ste.val);
+ 		if (rc)
+ 			return rc;
+@@ -1157,6 +1163,8 @@ shadow_pgt:
+  * @vcpu: virtual cpu
+  * @sg: pointer to the shadow guest address space structure
+  * @saddr: faulting address in the shadow gmap
++ * @datptr: will contain the address of the faulting DAT table entry, or of
++ *	    the valid leaf, plus some flags
+  *
+  * Returns: - 0 if the shadow fault was successfully resolved
+  *	    - > 0 (pgm exception code) on exceptions while faulting
+@@ -1165,11 +1173,11 @@ shadow_pgt:
+  *	    - -ENOMEM if out of memory
+  */
+ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
+-			  unsigned long saddr)
++			  unsigned long saddr, unsigned long *datptr)
+ {
+ 	union vaddress vaddr;
+ 	union page_table_entry pte;
+-	unsigned long pgt;
++	unsigned long pgt = 0;
+ 	int dat_protection, fake;
+ 	int rc;
+ 
+@@ -1191,8 +1199,20 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
+ 		pte.val = pgt + vaddr.px * PAGE_SIZE;
+ 		goto shadow_page;
+ 	}
+-	if (!rc)
+-		rc = gmap_read_table(sg->parent, pgt + vaddr.px * 8, &pte.val);
++
++	switch (rc) {
++	case PGM_SEGMENT_TRANSLATION:
++	case PGM_REGION_THIRD_TRANS:
++	case PGM_REGION_SECOND_TRANS:
++	case PGM_REGION_FIRST_TRANS:
++		pgt |= PEI_NOT_PTE;
++		break;
++	case 0:
++		pgt += vaddr.px * 8;
++		rc = gmap_read_table(sg->parent, pgt, &pte.val);
++	}
++	if (datptr)
++		*datptr = pgt | dat_protection * PEI_DAT_PROT;
+ 	if (!rc && pte.i)
+ 		rc = PGM_PAGE_TRANSLATION;
+ 	if (!rc && pte.z)
+diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
+index f4c51756c4623..7c72a5e3449f8 100644
+--- a/arch/s390/kvm/gaccess.h
++++ b/arch/s390/kvm/gaccess.h
+@@ -18,17 +18,14 @@
+ 
+ /**
+  * kvm_s390_real_to_abs - convert guest real address to guest absolute address
+- * @vcpu - guest virtual cpu
++ * @prefix - guest prefix
+  * @gra - guest real address
+  *
+  * Returns the guest absolute address that corresponds to the passed guest real
+- * address @gra of a virtual guest cpu by applying its prefix.
++ * address @gra of by applying the given prefix.
+  */
+-static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
+-						 unsigned long gra)
++static inline unsigned long _kvm_s390_real_to_abs(u32 prefix, unsigned long gra)
+ {
+-	unsigned long prefix  = kvm_s390_get_prefix(vcpu);
+-
+ 	if (gra < 2 * PAGE_SIZE)
+ 		gra += prefix;
+ 	else if (gra >= prefix && gra < prefix + 2 * PAGE_SIZE)
+@@ -36,6 +33,43 @@ static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
+ 	return gra;
+ }
+ 
++/**
++ * kvm_s390_real_to_abs - convert guest real address to guest absolute address
++ * @vcpu - guest virtual cpu
++ * @gra - guest real address
++ *
++ * Returns the guest absolute address that corresponds to the passed guest real
++ * address @gra of a virtual guest cpu by applying its prefix.
++ */
++static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
++						 unsigned long gra)
++{
++	return _kvm_s390_real_to_abs(kvm_s390_get_prefix(vcpu), gra);
++}
++
++/**
++ * _kvm_s390_logical_to_effective - convert guest logical to effective address
++ * @psw: psw of the guest
++ * @ga: guest logical address
++ *
++ * Convert a guest logical address to an effective address by applying the
++ * rules of the addressing mode defined by bits 31 and 32 of the given PSW
++ * (extendended/basic addressing mode).
++ *
++ * Depending on the addressing mode, the upper 40 bits (24 bit addressing
++ * mode), 33 bits (31 bit addressing mode) or no bits (64 bit addressing
++ * mode) of @ga will be zeroed and the remaining bits will be returned.
++ */
++static inline unsigned long _kvm_s390_logical_to_effective(psw_t *psw,
++							   unsigned long ga)
++{
++	if (psw_bits(*psw).eaba == PSW_BITS_AMODE_64BIT)
++		return ga;
++	if (psw_bits(*psw).eaba == PSW_BITS_AMODE_31BIT)
++		return ga & ((1UL << 31) - 1);
++	return ga & ((1UL << 24) - 1);
++}
++
+ /**
+  * kvm_s390_logical_to_effective - convert guest logical to effective address
+  * @vcpu: guest virtual cpu
+@@ -52,13 +86,7 @@ static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
+ static inline unsigned long kvm_s390_logical_to_effective(struct kvm_vcpu *vcpu,
+ 							  unsigned long ga)
+ {
+-	psw_t *psw = &vcpu->arch.sie_block->gpsw;
+-
+-	if (psw_bits(*psw).eaba == PSW_BITS_AMODE_64BIT)
+-		return ga;
+-	if (psw_bits(*psw).eaba == PSW_BITS_AMODE_31BIT)
+-		return ga & ((1UL << 31) - 1);
+-	return ga & ((1UL << 24) - 1);
++	return _kvm_s390_logical_to_effective(&vcpu->arch.sie_block->gpsw, ga);
+ }
+ 
+ /*
+@@ -359,7 +387,11 @@ void ipte_unlock(struct kvm_vcpu *vcpu);
+ int ipte_lock_held(struct kvm_vcpu *vcpu);
+ int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra);
+ 
++/* MVPG PEI indication bits */
++#define PEI_DAT_PROT 2
++#define PEI_NOT_PTE 4
++
+ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *shadow,
+-			  unsigned long saddr);
++			  unsigned long saddr, unsigned long *datptr);
+ 
+ #endif /* __KVM_S390_GACCESS_H */
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index 2f09e9d7dc95a..24ad447e648c1 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -4307,16 +4307,16 @@ static void store_regs_fmt2(struct kvm_vcpu *vcpu)
+ 	kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
+ 	kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val;
+ 	if (MACHINE_HAS_GS) {
++		preempt_disable();
+ 		__ctl_set_bit(2, 4);
+ 		if (vcpu->arch.gs_enabled)
+ 			save_gs_cb(current->thread.gs_cb);
+-		preempt_disable();
+ 		current->thread.gs_cb = vcpu->arch.host_gscb;
+ 		restore_gs_cb(vcpu->arch.host_gscb);
+-		preempt_enable();
+ 		if (!vcpu->arch.host_gscb)
+ 			__ctl_clear_bit(2, 4);
+ 		vcpu->arch.host_gscb = NULL;
++		preempt_enable();
+ 	}
+ 	/* SIE will save etoken directly into SDNX and therefore kvm_run */
+ }
+diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
+index bd803e0919187..4002a24bc43a6 100644
+--- a/arch/s390/kvm/vsie.c
++++ b/arch/s390/kvm/vsie.c
+@@ -417,11 +417,6 @@ static void unshadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
+ 		memcpy((void *)((u64)scb_o + 0xc0),
+ 		       (void *)((u64)scb_s + 0xc0), 0xf0 - 0xc0);
+ 		break;
+-	case ICPT_PARTEXEC:
+-		/* MVPG only */
+-		memcpy((void *)((u64)scb_o + 0xc0),
+-		       (void *)((u64)scb_s + 0xc0), 0xd0 - 0xc0);
+-		break;
+ 	}
+ 
+ 	if (scb_s->ihcpu != 0xffffU)
+@@ -620,10 +615,10 @@ static int map_prefix(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
+ 	/* with mso/msl, the prefix lies at offset *mso* */
+ 	prefix += scb_s->mso;
+ 
+-	rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix);
++	rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix, NULL);
+ 	if (!rc && (scb_s->ecb & ECB_TE))
+ 		rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
+-					   prefix + PAGE_SIZE);
++					   prefix + PAGE_SIZE, NULL);
+ 	/*
+ 	 * We don't have to mprotect, we will be called for all unshadows.
+ 	 * SIE will detect if protection applies and trigger a validity.
+@@ -914,7 +909,7 @@ static int handle_fault(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
+ 				    current->thread.gmap_addr, 1);
+ 
+ 	rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
+-				   current->thread.gmap_addr);
++				   current->thread.gmap_addr, NULL);
+ 	if (rc > 0) {
+ 		rc = inject_fault(vcpu, rc,
+ 				  current->thread.gmap_addr,
+@@ -936,7 +931,7 @@ static void handle_last_fault(struct kvm_vcpu *vcpu,
+ {
+ 	if (vsie_page->fault_addr)
+ 		kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
+-				      vsie_page->fault_addr);
++				      vsie_page->fault_addr, NULL);
+ 	vsie_page->fault_addr = 0;
+ }
+ 
+@@ -983,6 +978,98 @@ static int handle_stfle(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
+ 	return 0;
+ }
+ 
++/*
++ * Get a register for a nested guest.
++ * @vcpu the vcpu of the guest
++ * @vsie_page the vsie_page for the nested guest
++ * @reg the register number, the upper 4 bits are ignored.
++ * returns: the value of the register.
++ */
++static u64 vsie_get_register(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, u8 reg)
++{
++	/* no need to validate the parameter and/or perform error handling */
++	reg &= 0xf;
++	switch (reg) {
++	case 15:
++		return vsie_page->scb_s.gg15;
++	case 14:
++		return vsie_page->scb_s.gg14;
++	default:
++		return vcpu->run->s.regs.gprs[reg];
++	}
++}
++
++static int vsie_handle_mvpg(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
++{
++	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
++	unsigned long pei_dest, pei_src, src, dest, mask, prefix;
++	u64 *pei_block = &vsie_page->scb_o->mcic;
++	int edat, rc_dest, rc_src;
++	union ctlreg0 cr0;
++
++	cr0.val = vcpu->arch.sie_block->gcr[0];
++	edat = cr0.edat && test_kvm_facility(vcpu->kvm, 8);
++	mask = _kvm_s390_logical_to_effective(&scb_s->gpsw, PAGE_MASK);
++	prefix = scb_s->prefix << GUEST_PREFIX_SHIFT;
++
++	dest = vsie_get_register(vcpu, vsie_page, scb_s->ipb >> 20) & mask;
++	dest = _kvm_s390_real_to_abs(prefix, dest) + scb_s->mso;
++	src = vsie_get_register(vcpu, vsie_page, scb_s->ipb >> 16) & mask;
++	src = _kvm_s390_real_to_abs(prefix, src) + scb_s->mso;
++
++	rc_dest = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, dest, &pei_dest);
++	rc_src = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, src, &pei_src);
++	/*
++	 * Either everything went well, or something non-critical went wrong
++	 * e.g. because of a race. In either case, simply retry.
++	 */
++	if (rc_dest == -EAGAIN || rc_src == -EAGAIN || (!rc_dest && !rc_src)) {
++		retry_vsie_icpt(vsie_page);
++		return -EAGAIN;
++	}
++	/* Something more serious went wrong, propagate the error */
++	if (rc_dest < 0)
++		return rc_dest;
++	if (rc_src < 0)
++		return rc_src;
++
++	/* The only possible suppressing exception: just deliver it */
++	if (rc_dest == PGM_TRANSLATION_SPEC || rc_src == PGM_TRANSLATION_SPEC) {
++		clear_vsie_icpt(vsie_page);
++		rc_dest = kvm_s390_inject_program_int(vcpu, PGM_TRANSLATION_SPEC);
++		WARN_ON_ONCE(rc_dest);
++		return 1;
++	}
++
++	/*
++	 * Forward the PEI intercept to the guest if it was a page fault, or
++	 * also for segment and region table faults if EDAT applies.
++	 */
++	if (edat) {
++		rc_dest = rc_dest == PGM_ASCE_TYPE ? rc_dest : 0;
++		rc_src = rc_src == PGM_ASCE_TYPE ? rc_src : 0;
++	} else {
++		rc_dest = rc_dest != PGM_PAGE_TRANSLATION ? rc_dest : 0;
++		rc_src = rc_src != PGM_PAGE_TRANSLATION ? rc_src : 0;
++	}
++	if (!rc_dest && !rc_src) {
++		pei_block[0] = pei_dest;
++		pei_block[1] = pei_src;
++		return 1;
++	}
++
++	retry_vsie_icpt(vsie_page);
++
++	/*
++	 * The host has edat, and the guest does not, or it was an ASCE type
++	 * exception. The host needs to inject the appropriate DAT interrupts
++	 * into the guest.
++	 */
++	if (rc_dest)
++		return inject_fault(vcpu, rc_dest, dest, 1);
++	return inject_fault(vcpu, rc_src, src, 0);
++}
++
+ /*
+  * Run the vsie on a shadow scb and a shadow gmap, without any further
+  * sanity checks, handling SIE faults.
+@@ -1071,6 +1158,10 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
+ 		if ((scb_s->ipa & 0xf000) != 0xf000)
+ 			scb_s->ipa += 0x1000;
+ 		break;
++	case ICPT_PARTEXEC:
++		if (scb_s->ipa == 0xb254)
++			rc = vsie_handle_mvpg(vcpu, vsie_page);
++		break;
+ 	}
+ 	return rc;
+ }
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 268b7d5c98354..861b1b794697b 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -571,6 +571,7 @@ config X86_UV
+ 	depends on X86_EXTENDED_PLATFORM
+ 	depends on NUMA
+ 	depends on EFI
++	depends on KEXEC_CORE
+ 	depends on X86_X2APIC
+ 	depends on PCI
+ 	help
+diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c
+index 646da46e8d104..1dfb8af48a3ca 100644
+--- a/arch/x86/crypto/poly1305_glue.c
++++ b/arch/x86/crypto/poly1305_glue.c
+@@ -16,7 +16,7 @@
+ #include <asm/simd.h>
+ 
+ asmlinkage void poly1305_init_x86_64(void *ctx,
+-				     const u8 key[POLY1305_KEY_SIZE]);
++				     const u8 key[POLY1305_BLOCK_SIZE]);
+ asmlinkage void poly1305_blocks_x86_64(void *ctx, const u8 *inp,
+ 				       const size_t len, const u32 padbit);
+ asmlinkage void poly1305_emit_x86_64(void *ctx, u8 mac[POLY1305_DIGEST_SIZE],
+@@ -81,7 +81,7 @@ static void convert_to_base2_64(void *ctx)
+ 	state->is_base2_26 = 0;
+ }
+ 
+-static void poly1305_simd_init(void *ctx, const u8 key[POLY1305_KEY_SIZE])
++static void poly1305_simd_init(void *ctx, const u8 key[POLY1305_BLOCK_SIZE])
+ {
+ 	poly1305_init_x86_64(ctx, key);
+ }
+@@ -129,7 +129,7 @@ static void poly1305_simd_emit(void *ctx, u8 mac[POLY1305_DIGEST_SIZE],
+ 		poly1305_emit_avx(ctx, mac, nonce);
+ }
+ 
+-void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
++void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
+ {
+ 	poly1305_simd_init(&dctx->h, key);
+ 	dctx->s[0] = get_unaligned_le32(&key[16]);
+diff --git a/arch/x86/entry/vdso/vdso2c.h b/arch/x86/entry/vdso/vdso2c.h
+index 1c7cfac7e64ac..5264daa8859f5 100644
+--- a/arch/x86/entry/vdso/vdso2c.h
++++ b/arch/x86/entry/vdso/vdso2c.h
+@@ -35,7 +35,7 @@ static void BITSFUNC(extract)(const unsigned char *data, size_t data_len,
+ 	if (offset + len > data_len)
+ 		fail("section to extract overruns input data");
+ 
+-	fprintf(outfile, "static const unsigned char %s[%lu] = {", name, len);
++	fprintf(outfile, "static const unsigned char %s[%zu] = {", name, len);
+ 	BITSFUNC(copy)(outfile, data + offset, len);
+ 	fprintf(outfile, "\n};\n\n");
+ }
+diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c
+index be50ef8572cce..6a98a76516214 100644
+--- a/arch/x86/events/amd/iommu.c
++++ b/arch/x86/events/amd/iommu.c
+@@ -81,12 +81,12 @@ static struct attribute_group amd_iommu_events_group = {
+ };
+ 
+ struct amd_iommu_event_desc {
+-	struct kobj_attribute attr;
++	struct device_attribute attr;
+ 	const char *event;
+ };
+ 
+-static ssize_t _iommu_event_show(struct kobject *kobj,
+-				struct kobj_attribute *attr, char *buf)
++static ssize_t _iommu_event_show(struct device *dev,
++				struct device_attribute *attr, char *buf)
+ {
+ 	struct amd_iommu_event_desc *event =
+ 		container_of(attr, struct amd_iommu_event_desc, attr);
+diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
+index 7f014d450bc28..582c0ffb5e983 100644
+--- a/arch/x86/events/amd/uncore.c
++++ b/arch/x86/events/amd/uncore.c
+@@ -275,14 +275,14 @@ static struct attribute_group amd_uncore_attr_group = {
+ };
+ 
+ #define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format)			\
+-static ssize_t __uncore_##_var##_show(struct kobject *kobj,		\
+-				struct kobj_attribute *attr,		\
++static ssize_t __uncore_##_var##_show(struct device *dev,		\
++				struct device_attribute *attr,		\
+ 				char *page)				\
+ {									\
+ 	BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);			\
+ 	return sprintf(page, _format "\n");				\
+ }									\
+-static struct kobj_attribute format_attr_##_var =			\
++static struct device_attribute format_attr_##_var =			\
+ 	__ATTR(_name, 0444, __uncore_##_var##_show, NULL)
+ 
+ DEFINE_UNCORE_FORMAT_ATTR(event12,	event,		"config:0-7,32-35");
+diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
+index 52bc217ca8c32..c9ddd233e32ff 100644
+--- a/arch/x86/kernel/apic/x2apic_uv_x.c
++++ b/arch/x86/kernel/apic/x2apic_uv_x.c
+@@ -1671,6 +1671,9 @@ static __init int uv_system_init_hubless(void)
+ 	if (rc < 0)
+ 		return rc;
+ 
++	/* Set section block size for current node memory */
++	set_block_size();
++
+ 	/* Create user access node */
+ 	if (rc >= 0)
+ 		uv_setup_proc_files(1);
+diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
+index b935e1b5f115e..6a6318e9590c8 100644
+--- a/arch/x86/kernel/cpu/microcode/core.c
++++ b/arch/x86/kernel/cpu/microcode/core.c
+@@ -629,16 +629,16 @@ static ssize_t reload_store(struct device *dev,
+ 	if (val != 1)
+ 		return size;
+ 
+-	tmp_ret = microcode_ops->request_microcode_fw(bsp, &microcode_pdev->dev, true);
+-	if (tmp_ret != UCODE_NEW)
+-		return size;
+-
+ 	get_online_cpus();
+ 
+ 	ret = check_online_cpus();
+ 	if (ret)
+ 		goto put;
+ 
++	tmp_ret = microcode_ops->request_microcode_fw(bsp, &microcode_pdev->dev, true);
++	if (tmp_ret != UCODE_NEW)
++		goto put;
++
+ 	mutex_lock(&microcode_mutex);
+ 	ret = microcode_reload_late();
+ 	mutex_unlock(&microcode_mutex);
+diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
+index 22aad412f965e..629c4994f1654 100644
+--- a/arch/x86/kernel/e820.c
++++ b/arch/x86/kernel/e820.c
+@@ -31,8 +31,8 @@
+  *       - inform the user about the firmware's notion of memory layout
+  *         via /sys/firmware/memmap
+  *
+- *       - the hibernation code uses it to generate a kernel-independent MD5
+- *         fingerprint of the physical memory layout of a system.
++ *       - the hibernation code uses it to generate a kernel-independent CRC32
++ *         checksum of the physical memory layout of a system.
+  *
+  * - 'e820_table_kexec': a slightly modified (by the kernel) firmware version
+  *   passed to us by the bootloader - the major difference between
+diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
+index df776cdca327d..0bb9fe021bbed 100644
+--- a/arch/x86/kernel/kprobes/core.c
++++ b/arch/x86/kernel/kprobes/core.c
+@@ -139,6 +139,8 @@ NOKPROBE_SYMBOL(synthesize_relcall);
+ int can_boost(struct insn *insn, void *addr)
+ {
+ 	kprobe_opcode_t opcode;
++	insn_byte_t prefix;
++	int i;
+ 
+ 	if (search_exception_tables((unsigned long)addr))
+ 		return 0;	/* Page fault may occur on this address. */
+@@ -151,9 +153,14 @@ int can_boost(struct insn *insn, void *addr)
+ 	if (insn->opcode.nbytes != 1)
+ 		return 0;
+ 
+-	/* Can't boost Address-size override prefix */
+-	if (unlikely(inat_is_address_size_prefix(insn->attr)))
+-		return 0;
++	for_each_insn_prefix(insn, i, prefix) {
++		insn_attr_t attr;
++
++		attr = inat_get_opcode_attribute(prefix);
++		/* Can't boost Address-size override prefix and CS override prefix */
++		if (prefix == 0x2e || inat_is_address_size_prefix(attr))
++			return 0;
++	}
+ 
+ 	opcode = insn->opcode.bytes[0];
+ 
+@@ -178,8 +185,8 @@ int can_boost(struct insn *insn, void *addr)
+ 		/* clear and set flags are boostable */
+ 		return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
+ 	default:
+-		/* CS override prefix and call are not boostable */
+-		return (opcode != 0x2e && opcode != 0x9a);
++		/* call is not boostable */
++		return opcode != 0x9a;
+ 	}
+ }
+ 
+@@ -448,7 +455,11 @@ static void set_resume_flags(struct kprobe *p, struct insn *insn)
+ 		break;
+ #endif
+ 	case 0xff:
+-		opcode = insn->opcode.bytes[1];
++		/*
++		 * Since the 0xff is an extended group opcode, the instruction
++		 * is determined by the MOD/RM byte.
++		 */
++		opcode = insn->modrm.bytes[0];
+ 		if ((opcode & 0x30) == 0x10) {
+ 			/*
+ 			 * call absolute, indirect
+diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
+index 16703c35a944f..6b08d1eb173fd 100644
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -458,29 +458,52 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
+ 	return false;
+ }
+ 
++static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
++{
++	if (c->phys_proc_id == o->phys_proc_id &&
++	    c->cpu_die_id == o->cpu_die_id)
++		return true;
++	return false;
++}
++
+ /*
+- * Define snc_cpu[] for SNC (Sub-NUMA Cluster) CPUs.
++ * Unlike the other levels, we do not enforce keeping a
++ * multicore group inside a NUMA node.  If this happens, we will
++ * discard the MC level of the topology later.
++ */
++static bool match_pkg(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
++{
++	if (c->phys_proc_id == o->phys_proc_id)
++		return true;
++	return false;
++}
++
++/*
++ * Define intel_cod_cpu[] for Intel COD (Cluster-on-Die) CPUs.
+  *
+- * These are Intel CPUs that enumerate an LLC that is shared by
+- * multiple NUMA nodes. The LLC on these systems is shared for
+- * off-package data access but private to the NUMA node (half
+- * of the package) for on-package access.
++ * Any Intel CPU that has multiple nodes per package and does not
++ * match intel_cod_cpu[] has the SNC (Sub-NUMA Cluster) topology.
+  *
+- * CPUID (the source of the information about the LLC) can only
+- * enumerate the cache as being shared *or* unshared, but not
+- * this particular configuration. The CPU in this case enumerates
+- * the cache to be shared across the entire package (spanning both
+- * NUMA nodes).
++ * When in SNC mode, these CPUs enumerate an LLC that is shared
++ * by multiple NUMA nodes. The LLC is shared for off-package data
++ * access but private to the NUMA node (half of the package) for
++ * on-package access. CPUID (the source of the information about
++ * the LLC) can only enumerate the cache as shared or unshared,
++ * but not this particular configuration.
+  */
+ 
+-static const struct x86_cpu_id snc_cpu[] = {
+-	X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL),
++static const struct x86_cpu_id intel_cod_cpu[] = {
++	X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, 0),	/* COD */
++	X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, 0),	/* COD */
++	X86_MATCH_INTEL_FAM6_MODEL(ANY, 1),		/* SNC */
+ 	{}
+ };
+ 
+ static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
+ {
++	const struct x86_cpu_id *id = x86_match_cpu(intel_cod_cpu);
+ 	int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
++	bool intel_snc = id && id->driver_data;
+ 
+ 	/* Do not match if we do not have a valid APICID for cpu: */
+ 	if (per_cpu(cpu_llc_id, cpu1) == BAD_APICID)
+@@ -495,32 +518,12 @@ static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
+ 	 * means 'c' does not share the LLC of 'o'. This will be
+ 	 * reflected to userspace.
+ 	 */
+-	if (!topology_same_node(c, o) && x86_match_cpu(snc_cpu))
++	if (match_pkg(c, o) && !topology_same_node(c, o) && intel_snc)
+ 		return false;
+ 
+ 	return topology_sane(c, o, "llc");
+ }
+ 
+-/*
+- * Unlike the other levels, we do not enforce keeping a
+- * multicore group inside a NUMA node.  If this happens, we will
+- * discard the MC level of the topology later.
+- */
+-static bool match_pkg(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
+-{
+-	if (c->phys_proc_id == o->phys_proc_id)
+-		return true;
+-	return false;
+-}
+-
+-static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
+-{
+-	if ((c->phys_proc_id == o->phys_proc_id) &&
+-		(c->cpu_die_id == o->cpu_die_id))
+-		return true;
+-	return false;
+-}
+-
+ 
+ #if defined(CONFIG_SCHED_SMT) || defined(CONFIG_SCHED_MC)
+ static inline int x86_sched_itmt_flags(void)
+@@ -592,14 +595,23 @@ void set_cpu_sibling_map(int cpu)
+ 	for_each_cpu(i, cpu_sibling_setup_mask) {
+ 		o = &cpu_data(i);
+ 
++		if (match_pkg(c, o) && !topology_same_node(c, o))
++			x86_has_numa_in_package = true;
++
+ 		if ((i == cpu) || (has_smt && match_smt(c, o)))
+ 			link_mask(topology_sibling_cpumask, cpu, i);
+ 
+ 		if ((i == cpu) || (has_mp && match_llc(c, o)))
+ 			link_mask(cpu_llc_shared_mask, cpu, i);
+ 
++		if ((i == cpu) || (has_mp && match_die(c, o)))
++			link_mask(topology_die_cpumask, cpu, i);
+ 	}
+ 
++	threads = cpumask_weight(topology_sibling_cpumask(cpu));
++	if (threads > __max_smt_threads)
++		__max_smt_threads = threads;
++
+ 	/*
+ 	 * This needs a separate iteration over the cpus because we rely on all
+ 	 * topology_sibling_cpumask links to be set-up.
+@@ -613,8 +625,7 @@ void set_cpu_sibling_map(int cpu)
+ 			/*
+ 			 *  Does this new cpu bringup a new core?
+ 			 */
+-			if (cpumask_weight(
+-			    topology_sibling_cpumask(cpu)) == 1) {
++			if (threads == 1) {
+ 				/*
+ 				 * for each core in package, increment
+ 				 * the booted_cores for this new cpu
+@@ -631,16 +642,7 @@ void set_cpu_sibling_map(int cpu)
+ 			} else if (i != cpu && !c->booted_cores)
+ 				c->booted_cores = cpu_data(i).booted_cores;
+ 		}
+-		if (match_pkg(c, o) && !topology_same_node(c, o))
+-			x86_has_numa_in_package = true;
+-
+-		if ((i == cpu) || (has_mp && match_die(c, o)))
+-			link_mask(topology_die_cpumask, cpu, i);
+ 	}
+-
+-	threads = cpumask_weight(topology_sibling_cpumask(cpu));
+-	if (threads > __max_smt_threads)
+-		__max_smt_threads = threads;
+ }
+ 
+ /* maps the cpu to the sched domain representing multi-core */
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index f7970ba6219fc..abd9a4db11a88 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -4220,7 +4220,7 @@ static bool valid_cr(int nr)
+ 	}
+ }
+ 
+-static int check_cr_read(struct x86_emulate_ctxt *ctxt)
++static int check_cr_access(struct x86_emulate_ctxt *ctxt)
+ {
+ 	if (!valid_cr(ctxt->modrm_reg))
+ 		return emulate_ud(ctxt);
+@@ -4228,80 +4228,6 @@ static int check_cr_read(struct x86_emulate_ctxt *ctxt)
+ 	return X86EMUL_CONTINUE;
+ }
+ 
+-static int check_cr_write(struct x86_emulate_ctxt *ctxt)
+-{
+-	u64 new_val = ctxt->src.val64;
+-	int cr = ctxt->modrm_reg;
+-	u64 efer = 0;
+-
+-	static u64 cr_reserved_bits[] = {
+-		0xffffffff00000000ULL,
+-		0, 0, 0, /* CR3 checked later */
+-		CR4_RESERVED_BITS,
+-		0, 0, 0,
+-		CR8_RESERVED_BITS,
+-	};
+-
+-	if (!valid_cr(cr))
+-		return emulate_ud(ctxt);
+-
+-	if (new_val & cr_reserved_bits[cr])
+-		return emulate_gp(ctxt, 0);
+-
+-	switch (cr) {
+-	case 0: {
+-		u64 cr4;
+-		if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
+-		    ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
+-			return emulate_gp(ctxt, 0);
+-
+-		cr4 = ctxt->ops->get_cr(ctxt, 4);
+-		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
+-
+-		if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
+-		    !(cr4 & X86_CR4_PAE))
+-			return emulate_gp(ctxt, 0);
+-
+-		break;
+-		}
+-	case 3: {
+-		u64 rsvd = 0;
+-
+-		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
+-		if (efer & EFER_LMA) {
+-			u64 maxphyaddr;
+-			u32 eax, ebx, ecx, edx;
+-
+-			eax = 0x80000008;
+-			ecx = 0;
+-			if (ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx,
+-						 &edx, true))
+-				maxphyaddr = eax & 0xff;
+-			else
+-				maxphyaddr = 36;
+-			rsvd = rsvd_bits(maxphyaddr, 63);
+-			if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PCIDE)
+-				rsvd &= ~X86_CR3_PCID_NOFLUSH;
+-		}
+-
+-		if (new_val & rsvd)
+-			return emulate_gp(ctxt, 0);
+-
+-		break;
+-		}
+-	case 4: {
+-		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
+-
+-		if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
+-			return emulate_gp(ctxt, 0);
+-
+-		break;
+-		}
+-	}
+-
+-	return X86EMUL_CONTINUE;
+-}
+-
+ static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
+ {
+ 	unsigned long dr7;
+@@ -4841,10 +4767,10 @@ static const struct opcode twobyte_table[256] = {
+ 	D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
+ 	D(ImplicitOps | ModRM | SrcMem | NoAccess), /* NOP + 7 * reserved NOP */
+ 	/* 0x20 - 0x2F */
+-	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
++	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_access),
+ 	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
+ 	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
+-						check_cr_write),
++						check_cr_access),
+ 	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
+ 						check_dr_write),
+ 	N, N, N, N,
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index cc369b9ad8f11..49a839d0567a5 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -296,6 +296,10 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
+ 
+ 		atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
+ 	}
++
++	/* Check if there are APF page ready requests pending */
++	if (enabled)
++		kvm_make_request(KVM_REQ_APF_READY, apic->vcpu);
+ }
+ 
+ static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
+@@ -2261,6 +2265,8 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
+ 		if (value & MSR_IA32_APICBASE_ENABLE) {
+ 			kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
+ 			static_branch_slow_dec_deferred(&apic_hw_disabled);
++			/* Check if there are APF page ready requests pending */
++			kvm_make_request(KVM_REQ_APF_READY, vcpu);
+ 		} else {
+ 			static_branch_inc(&apic_hw_disabled.key);
+ 			atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
+diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
+index 951dae4e71751..cd0faa1876743 100644
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -3193,14 +3193,14 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
+ 		if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
+ 		    (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) {
+ 			mmu_free_root_page(kvm, &mmu->root_hpa, &invalid_list);
+-		} else {
++		} else if (mmu->pae_root) {
+ 			for (i = 0; i < 4; ++i)
+ 				if (mmu->pae_root[i] != 0)
+ 					mmu_free_root_page(kvm,
+ 							   &mmu->pae_root[i],
+ 							   &invalid_list);
+-			mmu->root_hpa = INVALID_PAGE;
+ 		}
++		mmu->root_hpa = INVALID_PAGE;
+ 		mmu->root_pgd = 0;
+ 	}
+ 
+@@ -3312,9 +3312,23 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
+ 	 * the shadow page table may be a PAE or a long mode page table.
+ 	 */
+ 	pm_mask = PT_PRESENT_MASK;
+-	if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL)
++	if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
+ 		pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
+ 
++		/*
++		 * Allocate the page for the PDPTEs when shadowing 32-bit NPT
++		 * with 64-bit only when needed.  Unlike 32-bit NPT, it doesn't
++		 * need to be in low mem.  See also lm_root below.
++		 */
++		if (!vcpu->arch.mmu->pae_root) {
++			WARN_ON_ONCE(!tdp_enabled);
++
++			vcpu->arch.mmu->pae_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
++			if (!vcpu->arch.mmu->pae_root)
++				return -ENOMEM;
++		}
++	}
++
+ 	for (i = 0; i < 4; ++i) {
+ 		MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->pae_root[i]));
+ 		if (vcpu->arch.mmu->root_level == PT32E_ROOT_LEVEL) {
+@@ -3337,21 +3351,19 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
+ 	vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
+ 
+ 	/*
+-	 * If we shadow a 32 bit page table with a long mode page
+-	 * table we enter this path.
++	 * When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP
++	 * tables are allocated and initialized at MMU creation as there is no
++	 * equivalent level in the guest's NPT to shadow.  Allocate the tables
++	 * on demand, as running a 32-bit L1 VMM is very rare.  The PDP is
++	 * handled above (to share logic with PAE), deal with the PML4 here.
+ 	 */
+ 	if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
+ 		if (vcpu->arch.mmu->lm_root == NULL) {
+-			/*
+-			 * The additional page necessary for this is only
+-			 * allocated on demand.
+-			 */
+-
+ 			u64 *lm_root;
+ 
+ 			lm_root = (void*)get_zeroed_page(GFP_KERNEL_ACCOUNT);
+-			if (lm_root == NULL)
+-				return 1;
++			if (!lm_root)
++				return -ENOMEM;
+ 
+ 			lm_root[0] = __pa(vcpu->arch.mmu->pae_root) | pm_mask;
+ 
+@@ -3653,6 +3665,14 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
+ 	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
+ 	bool async;
+ 
++	/*
++	 * Retry the page fault if the gfn hit a memslot that is being deleted
++	 * or moved.  This ensures any existing SPTEs for the old memslot will
++	 * be zapped before KVM inserts a new MMIO SPTE for the gfn.
++	 */
++	if (slot && (slot->flags & KVM_MEMSLOT_INVALID))
++		return true;
++
+ 	/* Don't expose private memslots to L2. */
+ 	if (is_guest_mode(vcpu) && !kvm_is_visible_memslot(slot)) {
+ 		*pfn = KVM_PFN_NOSLOT;
+@@ -4615,12 +4635,17 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer,
+ 	struct kvm_mmu *context = &vcpu->arch.guest_mmu;
+ 	union kvm_mmu_role new_role = kvm_calc_shadow_npt_root_page_role(vcpu);
+ 
+-	context->shadow_root_level = new_role.base.level;
+-
+ 	__kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base, false, false);
+ 
+-	if (new_role.as_u64 != context->mmu_role.as_u64)
++	if (new_role.as_u64 != context->mmu_role.as_u64) {
+ 		shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role);
++
++		/*
++		 * Override the level set by the common init helper, nested TDP
++		 * always uses the host's TDP configuration.
++		 */
++		context->shadow_root_level = new_role.base.level;
++	}
+ }
+ EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu);
+ 
+@@ -5240,9 +5265,11 @@ static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
+ 	 * while the PDP table is a per-vCPU construct that's allocated at MMU
+ 	 * creation.  When emulating 32-bit mode, cr3 is only 32 bits even on
+ 	 * x86_64.  Therefore we need to allocate the PDP table in the first
+-	 * 4GB of memory, which happens to fit the DMA32 zone.  Except for
+-	 * SVM's 32-bit NPT support, TDP paging doesn't use PAE paging and can
+-	 * skip allocating the PDP table.
++	 * 4GB of memory, which happens to fit the DMA32 zone.  TDP paging
++	 * generally doesn't use PAE paging and can skip allocating the PDP
++	 * table.  The main exception, handled here, is SVM's 32-bit NPT.  The
++	 * other exception is for shadowing L1's 32-bit or PAE NPT on 64-bit
++	 * KVM; that horror is handled on-demand by mmu_alloc_shadow_roots().
+ 	 */
+ 	if (tdp_enabled && kvm_mmu_get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
+ 		return 0;
+diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
+index 874ea309279f5..019130011d0fc 100644
+--- a/arch/x86/kvm/svm/sev.c
++++ b/arch/x86/kvm/svm/sev.c
+@@ -87,7 +87,7 @@ static bool __sev_recycle_asids(int min_asid, int max_asid)
+ 	return true;
+ }
+ 
+-static int sev_asid_new(struct kvm_sev_info *sev)
++static int sev_asid_new(bool es_active)
+ {
+ 	int pos, min_asid, max_asid;
+ 	bool retry = true;
+@@ -98,8 +98,8 @@ static int sev_asid_new(struct kvm_sev_info *sev)
+ 	 * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
+ 	 * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
+ 	 */
+-	min_asid = sev->es_active ? 0 : min_sev_asid - 1;
+-	max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid;
++	min_asid = es_active ? 0 : min_sev_asid - 1;
++	max_asid = es_active ? min_sev_asid - 1 : max_sev_asid;
+ again:
+ 	pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_asid);
+ 	if (pos >= max_asid) {
+@@ -179,13 +179,17 @@ static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
+ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ {
+ 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
++	bool es_active = argp->id == KVM_SEV_ES_INIT;
+ 	int asid, ret;
+ 
++	if (kvm->created_vcpus)
++		return -EINVAL;
++
+ 	ret = -EBUSY;
+ 	if (unlikely(sev->active))
+ 		return ret;
+ 
+-	asid = sev_asid_new(sev);
++	asid = sev_asid_new(es_active);
+ 	if (asid < 0)
+ 		return ret;
+ 
+@@ -194,6 +198,7 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ 		goto e_free;
+ 
+ 	sev->active = true;
++	sev->es_active = es_active;
+ 	sev->asid = asid;
+ 	INIT_LIST_HEAD(&sev->regions_list);
+ 
+@@ -204,16 +209,6 @@ e_free:
+ 	return ret;
+ }
+ 
+-static int sev_es_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
+-{
+-	if (!sev_es)
+-		return -ENOTTY;
+-
+-	to_kvm_svm(kvm)->sev_info.es_active = true;
+-
+-	return sev_guest_init(kvm, argp);
+-}
+-
+ static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
+ {
+ 	struct sev_data_activate *data;
+@@ -564,6 +559,7 @@ static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ {
+ 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ 	struct sev_data_launch_update_vmsa *vmsa;
++	struct kvm_vcpu *vcpu;
+ 	int i, ret;
+ 
+ 	if (!sev_es_guest(kvm))
+@@ -573,8 +569,8 @@ static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ 	if (!vmsa)
+ 		return -ENOMEM;
+ 
+-	for (i = 0; i < kvm->created_vcpus; i++) {
+-		struct vcpu_svm *svm = to_svm(kvm->vcpus[i]);
++	kvm_for_each_vcpu(i, vcpu, kvm) {
++		struct vcpu_svm *svm = to_svm(vcpu);
+ 
+ 		/* Perform some pre-encryption checks against the VMSA */
+ 		ret = sev_es_sync_vmsa(svm);
+@@ -1127,12 +1123,15 @@ int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
+ 	mutex_lock(&kvm->lock);
+ 
+ 	switch (sev_cmd.id) {
++	case KVM_SEV_ES_INIT:
++		if (!sev_es) {
++			r = -ENOTTY;
++			goto out;
++		}
++		fallthrough;
+ 	case KVM_SEV_INIT:
+ 		r = sev_guest_init(kvm, &sev_cmd);
+ 		break;
+-	case KVM_SEV_ES_INIT:
+-		r = sev_es_guest_init(kvm, &sev_cmd);
+-		break;
+ 	case KVM_SEV_LAUNCH_START:
+ 		r = sev_launch_start(kvm, &sev_cmd);
+ 		break;
+@@ -1349,8 +1348,11 @@ void __init sev_hardware_setup(void)
+ 		goto out;
+ 
+ 	sev_reclaim_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
+-	if (!sev_reclaim_asid_bitmap)
++	if (!sev_reclaim_asid_bitmap) {
++		bitmap_free(sev_asid_bitmap);
++		sev_asid_bitmap = NULL;
+ 		goto out;
++	}
+ 
+ 	pr_info("SEV supported: %u ASIDs\n", max_sev_asid - min_sev_asid + 1);
+ 	sev_supported = true;
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index 58a45bb139f88..309725151313d 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -564,9 +564,8 @@ static int svm_cpu_init(int cpu)
+ 	clear_page(page_address(sd->save_area));
+ 
+ 	if (svm_sev_enabled()) {
+-		sd->sev_vmcbs = kmalloc_array(max_sev_asid + 1,
+-					      sizeof(void *),
+-					      GFP_KERNEL);
++		sd->sev_vmcbs = kcalloc(max_sev_asid + 1, sizeof(void *),
++					GFP_KERNEL);
+ 		if (!sd->sev_vmcbs)
+ 			goto free_save_area;
+ 	}
+@@ -969,21 +968,6 @@ static __init int svm_hardware_setup(void)
+ 		kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
+ 	}
+ 
+-	if (IS_ENABLED(CONFIG_KVM_AMD_SEV) && sev) {
+-		sev_hardware_setup();
+-	} else {
+-		sev = false;
+-		sev_es = false;
+-	}
+-
+-	svm_adjust_mmio_mask();
+-
+-	for_each_possible_cpu(cpu) {
+-		r = svm_cpu_init(cpu);
+-		if (r)
+-			goto err;
+-	}
+-
+ 	/*
+ 	 * KVM's MMU doesn't support using 2-level paging for itself, and thus
+ 	 * NPT isn't supported if the host is using 2-level paging since host
+@@ -998,6 +982,21 @@ static __init int svm_hardware_setup(void)
+ 	kvm_configure_mmu(npt_enabled, get_max_npt_level(), PG_LEVEL_1G);
+ 	pr_info("kvm: Nested Paging %sabled\n", npt_enabled ? "en" : "dis");
+ 
++	if (IS_ENABLED(CONFIG_KVM_AMD_SEV) && sev && npt_enabled) {
++		sev_hardware_setup();
++	} else {
++		sev = false;
++		sev_es = false;
++	}
++
++	svm_adjust_mmio_mask();
++
++	for_each_possible_cpu(cpu) {
++		r = svm_cpu_init(cpu);
++		if (r)
++			goto err;
++	}
++
+ 	if (nrips) {
+ 		if (!boot_cpu_has(X86_FEATURE_NRIPS))
+ 			nrips = false;
+@@ -1898,7 +1897,7 @@ static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
+ 
+ static int pf_interception(struct vcpu_svm *svm)
+ {
+-	u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
++	u64 fault_address = svm->vmcb->control.exit_info_2;
+ 	u64 error_code = svm->vmcb->control.exit_info_1;
+ 
+ 	return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address,
+@@ -2738,6 +2737,9 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ 	case MSR_TSC_AUX:
+ 		if (!boot_cpu_has(X86_FEATURE_RDTSCP))
+ 			return 1;
++		if (!msr_info->host_initiated &&
++		    !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
++			return 1;
+ 		msr_info->data = svm->tsc_aux;
+ 		break;
+ 	/*
+@@ -2946,6 +2948,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
+ 		if (!boot_cpu_has(X86_FEATURE_RDTSCP))
+ 			return 1;
+ 
++		if (!msr->host_initiated &&
++		    !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
++			return 1;
++
+ 		/*
+ 		 * This is rare, so we update the MSR here instead of using
+ 		 * direct_access_msrs.  Doing that would require a rdmsr in
+diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
+index bcca0b80e0d04..1727057c53130 100644
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -619,6 +619,7 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
+ 	}
+ 
+ 	/* KVM unconditionally exposes the FS/GS base MSRs to L1. */
++#ifdef CONFIG_X86_64
+ 	nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
+ 					     MSR_FS_BASE, MSR_TYPE_RW);
+ 
+@@ -627,6 +628,7 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
+ 
+ 	nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
+ 					     MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
++#endif
+ 
+ 	/*
+ 	 * Checking the L0->L1 bitmap is trying to verify two things:
+@@ -4601,9 +4603,9 @@ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
+ 	else if (addr_size == 0)
+ 		off = (gva_t)sign_extend64(off, 15);
+ 	if (base_is_valid)
+-		off += kvm_register_read(vcpu, base_reg);
++		off += kvm_register_readl(vcpu, base_reg);
+ 	if (index_is_valid)
+-		off += kvm_register_read(vcpu, index_reg) << scaling;
++		off += kvm_register_readl(vcpu, index_reg) << scaling;
+ 	vmx_get_segment(vcpu, &s, seg_reg);
+ 
+ 	/*
+@@ -5479,16 +5481,11 @@ static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
+ 		if (!nested_vmx_check_eptp(vcpu, new_eptp))
+ 			return 1;
+ 
+-		kvm_mmu_unload(vcpu);
+ 		mmu->ept_ad = accessed_dirty;
+ 		mmu->mmu_role.base.ad_disabled = !accessed_dirty;
+ 		vmcs12->ept_pointer = new_eptp;
+-		/*
+-		 * TODO: Check what's the correct approach in case
+-		 * mmu reload fails. Currently, we just let the next
+-		 * reload potentially fail
+-		 */
+-		kvm_mmu_reload(vcpu);
++
++		kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
+ 	}
+ 
+ 	return 0;
+@@ -5717,7 +5714,7 @@ static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu,
+ 
+ 	/* Decode instruction info and find the field to access */
+ 	vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+-	field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
++	field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
+ 
+ 	/* Out-of-range fields always cause a VM exit from L2 to L1 */
+ 	if (field >> 15)
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 29b40e092d13d..f705e0d9f1618 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -156,9 +156,11 @@ static u32 vmx_possible_passthrough_msrs[MAX_POSSIBLE_PASSTHROUGH_MSRS] = {
+ 	MSR_IA32_SPEC_CTRL,
+ 	MSR_IA32_PRED_CMD,
+ 	MSR_IA32_TSC,
++#ifdef CONFIG_X86_64
+ 	MSR_FS_BASE,
+ 	MSR_GS_BASE,
+ 	MSR_KERNEL_GS_BASE,
++#endif
+ 	MSR_IA32_SYSENTER_CS,
+ 	MSR_IA32_SYSENTER_ESP,
+ 	MSR_IA32_SYSENTER_EIP,
+@@ -5062,12 +5064,12 @@ static int handle_cr(struct kvm_vcpu *vcpu)
+ 		case 3:
+ 			WARN_ON_ONCE(enable_unrestricted_guest);
+ 			val = kvm_read_cr3(vcpu);
+-			kvm_register_write(vcpu, reg, val);
++			kvm_register_writel(vcpu, reg, val);
+ 			trace_kvm_cr_read(cr, val);
+ 			return kvm_skip_emulated_instruction(vcpu);
+ 		case 8:
+ 			val = kvm_get_cr8(vcpu);
+-			kvm_register_write(vcpu, reg, val);
++			kvm_register_writel(vcpu, reg, val);
+ 			trace_kvm_cr_read(cr, val);
+ 			return kvm_skip_emulated_instruction(vcpu);
+ 		}
+@@ -5140,7 +5142,7 @@ static int handle_dr(struct kvm_vcpu *vcpu)
+ 		unsigned long val;
+ 
+ 		kvm_get_dr(vcpu, dr, &val);
+-		kvm_register_write(vcpu, reg, val);
++		kvm_register_writel(vcpu, reg, val);
+ 		err = 0;
+ 	} else {
+ 		err = kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg));
+@@ -5792,7 +5794,6 @@ void dump_vmcs(void)
+ 	u32 vmentry_ctl, vmexit_ctl;
+ 	u32 cpu_based_exec_ctrl, pin_based_exec_ctrl, secondary_exec_control;
+ 	unsigned long cr4;
+-	u64 efer;
+ 
+ 	if (!dump_invalid_vmcs) {
+ 		pr_warn_ratelimited("set kvm_intel.dump_invalid_vmcs=1 to dump internal KVM state.\n");
+@@ -5804,7 +5805,6 @@ void dump_vmcs(void)
+ 	cpu_based_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
+ 	pin_based_exec_ctrl = vmcs_read32(PIN_BASED_VM_EXEC_CONTROL);
+ 	cr4 = vmcs_readl(GUEST_CR4);
+-	efer = vmcs_read64(GUEST_IA32_EFER);
+ 	secondary_exec_control = 0;
+ 	if (cpu_has_secondary_exec_ctrls())
+ 		secondary_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
+@@ -5816,9 +5816,7 @@ void dump_vmcs(void)
+ 	pr_err("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
+ 	       cr4, vmcs_readl(CR4_READ_SHADOW), vmcs_readl(CR4_GUEST_HOST_MASK));
+ 	pr_err("CR3 = 0x%016lx\n", vmcs_readl(GUEST_CR3));
+-	if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT) &&
+-	    (cr4 & X86_CR4_PAE) && !(efer & EFER_LMA))
+-	{
++	if (cpu_has_vmx_ept()) {
+ 		pr_err("PDPTR0 = 0x%016llx  PDPTR1 = 0x%016llx\n",
+ 		       vmcs_read64(GUEST_PDPTR0), vmcs_read64(GUEST_PDPTR1));
+ 		pr_err("PDPTR2 = 0x%016llx  PDPTR3 = 0x%016llx\n",
+@@ -5844,7 +5842,8 @@ void dump_vmcs(void)
+ 	if ((vmexit_ctl & (VM_EXIT_SAVE_IA32_PAT | VM_EXIT_SAVE_IA32_EFER)) ||
+ 	    (vmentry_ctl & (VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_IA32_EFER)))
+ 		pr_err("EFER =     0x%016llx  PAT = 0x%016llx\n",
+-		       efer, vmcs_read64(GUEST_IA32_PAT));
++		       vmcs_read64(GUEST_IA32_EFER),
++		       vmcs_read64(GUEST_IA32_PAT));
+ 	pr_err("DebugCtl = 0x%016llx  DebugExceptions = 0x%016lx\n",
+ 	       vmcs_read64(GUEST_IA32_DEBUGCTL),
+ 	       vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS));
+@@ -6938,9 +6937,11 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
+ 	bitmap_fill(vmx->shadow_msr_intercept.write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
+ 
+ 	vmx_disable_intercept_for_msr(vcpu, MSR_IA32_TSC, MSR_TYPE_R);
++#ifdef CONFIG_X86_64
+ 	vmx_disable_intercept_for_msr(vcpu, MSR_FS_BASE, MSR_TYPE_RW);
+ 	vmx_disable_intercept_for_msr(vcpu, MSR_GS_BASE, MSR_TYPE_RW);
+ 	vmx_disable_intercept_for_msr(vcpu, MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
++#endif
+ 	vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW);
+ 	vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW);
+ 	vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW);
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index ee0dc58ac3a51..3406ff421c1a3 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1072,10 +1072,15 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
+ 		return 0;
+ 	}
+ 
+-	if (is_long_mode(vcpu) && kvm_vcpu_is_illegal_gpa(vcpu, cr3))
++	/*
++	 * Do not condition the GPA check on long mode, this helper is used to
++	 * stuff CR3, e.g. for RSM emulation, and there is no guarantee that
++	 * the current vCPU mode is accurate.
++	 */
++	if (kvm_vcpu_is_illegal_gpa(vcpu, cr3))
+ 		return 1;
+-	else if (is_pae_paging(vcpu) &&
+-		 !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
++
++	if (is_pae_paging(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
+ 		return 1;
+ 
+ 	kvm_mmu_new_pgd(vcpu, cr3, skip_tlb_flush, skip_tlb_flush);
+@@ -11020,6 +11025,9 @@ bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
+ 
+ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
+ {
++	if (vcpu->arch.guest_state_protected)
++		return true;
++
+ 	return vcpu->arch.preempted_in_kernel;
+ }
+ 
+@@ -11290,7 +11298,7 @@ bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
+ 	if (!kvm_pv_async_pf_enabled(vcpu))
+ 		return true;
+ 	else
+-		return apf_pageready_slot_free(vcpu);
++		return kvm_lapic_enabled(vcpu) && apf_pageready_slot_free(vcpu);
+ }
+ 
+ void kvm_arch_start_assignment(struct kvm *kvm)
+@@ -11539,7 +11547,7 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
+ 
+ 		fallthrough;
+ 	case INVPCID_TYPE_ALL_INCL_GLOBAL:
+-		kvm_mmu_unload(vcpu);
++		kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
+ 		return kvm_skip_emulated_instruction(vcpu);
+ 
+ 	default:
+diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
+index ae17250e1efe0..7f27bb65a5721 100644
+--- a/arch/x86/kvm/xen.c
++++ b/arch/x86/kvm/xen.c
+@@ -673,7 +673,7 @@ int kvm_xen_hypercall(struct kvm_vcpu *vcpu)
+ 	bool longmode;
+ 	u64 input, params[6];
+ 
+-	input = (u64)kvm_register_read(vcpu, VCPU_REGS_RAX);
++	input = (u64)kvm_register_readl(vcpu, VCPU_REGS_RAX);
+ 
+ 	/* Hyper-V hypercalls get bit 31 set in EAX */
+ 	if ((input & 0x80000000) &&
+diff --git a/arch/x86/power/hibernate.c b/arch/x86/power/hibernate.c
+index cd3914fc9f3d4..e94e0050a583a 100644
+--- a/arch/x86/power/hibernate.c
++++ b/arch/x86/power/hibernate.c
+@@ -13,8 +13,8 @@
+ #include <linux/kdebug.h>
+ #include <linux/cpu.h>
+ #include <linux/pgtable.h>
+-
+-#include <crypto/hash.h>
++#include <linux/types.h>
++#include <linux/crc32.h>
+ 
+ #include <asm/e820/api.h>
+ #include <asm/init.h>
+@@ -54,95 +54,33 @@ int pfn_is_nosave(unsigned long pfn)
+ 	return pfn >= nosave_begin_pfn && pfn < nosave_end_pfn;
+ }
+ 
+-
+-#define MD5_DIGEST_SIZE 16
+-
+ struct restore_data_record {
+ 	unsigned long jump_address;
+ 	unsigned long jump_address_phys;
+ 	unsigned long cr3;
+ 	unsigned long magic;
+-	u8 e820_digest[MD5_DIGEST_SIZE];
++	unsigned long e820_checksum;
+ };
+ 
+-#if IS_BUILTIN(CONFIG_CRYPTO_MD5)
+ /**
+- * get_e820_md5 - calculate md5 according to given e820 table
++ * compute_e820_crc32 - calculate crc32 of a given e820 table
+  *
+  * @table: the e820 table to be calculated
+- * @buf: the md5 result to be stored to
++ *
++ * Return: the resulting checksum
+  */
+-static int get_e820_md5(struct e820_table *table, void *buf)
++static inline u32 compute_e820_crc32(struct e820_table *table)
+ {
+-	struct crypto_shash *tfm;
+-	struct shash_desc *desc;
+-	int size;
+-	int ret = 0;
+-
+-	tfm = crypto_alloc_shash("md5", 0, 0);
+-	if (IS_ERR(tfm))
+-		return -ENOMEM;
+-
+-	desc = kmalloc(sizeof(struct shash_desc) + crypto_shash_descsize(tfm),
+-		       GFP_KERNEL);
+-	if (!desc) {
+-		ret = -ENOMEM;
+-		goto free_tfm;
+-	}
+-
+-	desc->tfm = tfm;
+-
+-	size = offsetof(struct e820_table, entries) +
++	int size = offsetof(struct e820_table, entries) +
+ 		sizeof(struct e820_entry) * table->nr_entries;
+ 
+-	if (crypto_shash_digest(desc, (u8 *)table, size, buf))
+-		ret = -EINVAL;
+-
+-	kfree_sensitive(desc);
+-
+-free_tfm:
+-	crypto_free_shash(tfm);
+-	return ret;
+-}
+-
+-static int hibernation_e820_save(void *buf)
+-{
+-	return get_e820_md5(e820_table_firmware, buf);
+-}
+-
+-static bool hibernation_e820_mismatch(void *buf)
+-{
+-	int ret;
+-	u8 result[MD5_DIGEST_SIZE];
+-
+-	memset(result, 0, MD5_DIGEST_SIZE);
+-	/* If there is no digest in suspend kernel, let it go. */
+-	if (!memcmp(result, buf, MD5_DIGEST_SIZE))
+-		return false;
+-
+-	ret = get_e820_md5(e820_table_firmware, result);
+-	if (ret)
+-		return true;
+-
+-	return memcmp(result, buf, MD5_DIGEST_SIZE) ? true : false;
+-}
+-#else
+-static int hibernation_e820_save(void *buf)
+-{
+-	return 0;
+-}
+-
+-static bool hibernation_e820_mismatch(void *buf)
+-{
+-	/* If md5 is not builtin for restore kernel, let it go. */
+-	return false;
++	return ~crc32_le(~0, (unsigned char const *)table, size);
+ }
+-#endif
+ 
+ #ifdef CONFIG_X86_64
+-#define RESTORE_MAGIC	0x23456789ABCDEF01UL
++#define RESTORE_MAGIC	0x23456789ABCDEF02UL
+ #else
+-#define RESTORE_MAGIC	0x12345678UL
++#define RESTORE_MAGIC	0x12345679UL
+ #endif
+ 
+ /**
+@@ -179,7 +117,8 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size)
+ 	 */
+ 	rdr->cr3 = restore_cr3 & ~CR3_PCID_MASK;
+ 
+-	return hibernation_e820_save(rdr->e820_digest);
++	rdr->e820_checksum = compute_e820_crc32(e820_table_firmware);
++	return 0;
+ }
+ 
+ /**
+@@ -200,7 +139,7 @@ int arch_hibernation_header_restore(void *addr)
+ 	jump_address_phys = rdr->jump_address_phys;
+ 	restore_cr3 = rdr->cr3;
+ 
+-	if (hibernation_e820_mismatch(rdr->e820_digest)) {
++	if (rdr->e820_checksum != compute_e820_crc32(e820_table_firmware)) {
+ 		pr_crit("Hibernate inconsistent memory map detected!\n");
+ 		return -ENODEV;
+ 	}
+diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
+index a057ecb1288d2..6cd7f7025df47 100644
+--- a/crypto/async_tx/async_xor.c
++++ b/crypto/async_tx/async_xor.c
+@@ -233,6 +233,7 @@ async_xor_offs(struct page *dest, unsigned int offset,
+ 		if (submit->flags & ASYNC_TX_XOR_DROP_DST) {
+ 			src_cnt--;
+ 			src_list++;
++			src_offs++;
+ 		}
+ 
+ 		/* wait for any prerequisite operations */
+diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
+index 69057fcd2c047..a5e6fd0bafa1b 100644
+--- a/drivers/acpi/cppc_acpi.c
++++ b/drivers/acpi/cppc_acpi.c
+@@ -119,23 +119,15 @@ static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
+  */
+ #define NUM_RETRIES 500ULL
+ 
+-struct cppc_attr {
+-	struct attribute attr;
+-	ssize_t (*show)(struct kobject *kobj,
+-			struct attribute *attr, char *buf);
+-	ssize_t (*store)(struct kobject *kobj,
+-			struct attribute *attr, const char *c, ssize_t count);
+-};
+-
+ #define define_one_cppc_ro(_name)		\
+-static struct cppc_attr _name =			\
++static struct kobj_attribute _name =		\
+ __ATTR(_name, 0444, show_##_name, NULL)
+ 
+ #define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
+ 
+ #define show_cppc_data(access_fn, struct_name, member_name)		\
+ 	static ssize_t show_##member_name(struct kobject *kobj,		\
+-					struct attribute *attr,	char *buf) \
++				struct kobj_attribute *attr, char *buf)	\
+ 	{								\
+ 		struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);		\
+ 		struct struct_name st_name = {0};			\
+@@ -161,7 +153,7 @@ show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
+ show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
+ 
+ static ssize_t show_feedback_ctrs(struct kobject *kobj,
+-		struct attribute *attr, char *buf)
++		struct kobj_attribute *attr, char *buf)
+ {
+ 	struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
+ 	struct cppc_perf_fb_ctrs fb_ctrs = {0};
+diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
+index de638dafce21e..b2f5520882918 100644
+--- a/drivers/ata/libahci_platform.c
++++ b/drivers/ata/libahci_platform.c
+@@ -582,11 +582,13 @@ int ahci_platform_init_host(struct platform_device *pdev,
+ 	int i, irq, n_ports, rc;
+ 
+ 	irq = platform_get_irq(pdev, 0);
+-	if (irq <= 0) {
++	if (irq < 0) {
+ 		if (irq != -EPROBE_DEFER)
+ 			dev_err(dev, "no irq\n");
+ 		return irq;
+ 	}
++	if (!irq)
++		return -EINVAL;
+ 
+ 	hpriv->irq = irq;
+ 
+diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
+index e9cf31f384506..63f39440a9b42 100644
+--- a/drivers/ata/pata_arasan_cf.c
++++ b/drivers/ata/pata_arasan_cf.c
+@@ -818,12 +818,19 @@ static int arasan_cf_probe(struct platform_device *pdev)
+ 	else
+ 		quirk = CF_BROKEN_UDMA; /* as it is on spear1340 */
+ 
+-	/* if irq is 0, support only PIO */
+-	acdev->irq = platform_get_irq(pdev, 0);
+-	if (acdev->irq)
++	/*
++	 * If there's an error getting IRQ (or we do get IRQ0),
++	 * support only PIO
++	 */
++	ret = platform_get_irq(pdev, 0);
++	if (ret > 0) {
++		acdev->irq = ret;
+ 		irq_handler = arasan_cf_interrupt;
+-	else
++	} else	if (ret == -EPROBE_DEFER) {
++		return ret;
++	} else	{
+ 		quirk |= CF_BROKEN_MWDMA | CF_BROKEN_UDMA;
++	}
+ 
+ 	acdev->pbase = res->start;
+ 	acdev->vbase = devm_ioremap(&pdev->dev, res->start,
+diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
+index d1644a8ef9fa6..abc0e87ca1a8b 100644
+--- a/drivers/ata/pata_ixp4xx_cf.c
++++ b/drivers/ata/pata_ixp4xx_cf.c
+@@ -165,8 +165,12 @@ static int ixp4xx_pata_probe(struct platform_device *pdev)
+ 		return -ENOMEM;
+ 
+ 	irq = platform_get_irq(pdev, 0);
+-	if (irq)
++	if (irq > 0)
+ 		irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
++	else if (irq < 0)
++		return irq;
++	else
++		return -EINVAL;
+ 
+ 	/* Setup expansion bus chip selects */
+ 	*data->cs0_cfg = data->cs0_bits;
+diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
+index 664ef658a955f..b62446ea5f408 100644
+--- a/drivers/ata/sata_mv.c
++++ b/drivers/ata/sata_mv.c
+@@ -4097,6 +4097,10 @@ static int mv_platform_probe(struct platform_device *pdev)
+ 		n_ports = mv_platform_data->n_ports;
+ 		irq = platform_get_irq(pdev, 0);
+ 	}
++	if (irq < 0)
++		return irq;
++	if (!irq)
++		return -EINVAL;
+ 
+ 	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
+ 	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
+diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
+index 653c8c6ac7a73..aedeb2dc1a182 100644
+--- a/drivers/base/devtmpfs.c
++++ b/drivers/base/devtmpfs.c
+@@ -419,7 +419,6 @@ static int __init devtmpfs_setup(void *p)
+ 	init_chroot(".");
+ out:
+ 	*(int *)p = err;
+-	complete(&setup_done);
+ 	return err;
+ }
+ 
+@@ -432,6 +431,7 @@ static int __ref devtmpfsd(void *p)
+ {
+ 	int err = devtmpfs_setup(p);
+ 
++	complete(&setup_done);
+ 	if (err)
+ 		return err;
+ 	devtmpfs_work_loop();
+diff --git a/drivers/base/node.c b/drivers/base/node.c
+index f449dbb2c7466..2c36f61d30bcb 100644
+--- a/drivers/base/node.c
++++ b/drivers/base/node.c
+@@ -268,21 +268,20 @@ static void node_init_cache_dev(struct node *node)
+ 	if (!dev)
+ 		return;
+ 
++	device_initialize(dev);
+ 	dev->parent = &node->dev;
+ 	dev->release = node_cache_release;
+ 	if (dev_set_name(dev, "memory_side_cache"))
+-		goto free_dev;
++		goto put_device;
+ 
+-	if (device_register(dev))
+-		goto free_name;
++	if (device_add(dev))
++		goto put_device;
+ 
+ 	pm_runtime_no_callbacks(dev);
+ 	node->cache_dev = dev;
+ 	return;
+-free_name:
+-	kfree_const(dev->kobj.name);
+-free_dev:
+-	kfree(dev);
++put_device:
++	put_device(dev);
+ }
+ 
+ /**
+@@ -319,25 +318,24 @@ void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs)
+ 		return;
+ 
+ 	dev = &info->dev;
++	device_initialize(dev);
+ 	dev->parent = node->cache_dev;
+ 	dev->release = node_cacheinfo_release;
+ 	dev->groups = cache_groups;
+ 	if (dev_set_name(dev, "index%d", cache_attrs->level))
+-		goto free_cache;
++		goto put_device;
+ 
+ 	info->cache_attrs = *cache_attrs;
+-	if (device_register(dev)) {
++	if (device_add(dev)) {
+ 		dev_warn(&node->dev, "failed to add cache level:%d\n",
+ 			 cache_attrs->level);
+-		goto free_name;
++		goto put_device;
+ 	}
+ 	pm_runtime_no_callbacks(dev);
+ 	list_add_tail(&info->node, &node->cache_attrs);
+ 	return;
+-free_name:
+-	kfree_const(dev->kobj.name);
+-free_cache:
+-	kfree(info);
++put_device:
++	put_device(dev);
+ }
+ 
+ static void node_remove_caches(struct node *node)
+diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
+index ff2ee87987c7e..211a335a608d7 100644
+--- a/drivers/base/regmap/regmap-debugfs.c
++++ b/drivers/base/regmap/regmap-debugfs.c
+@@ -660,6 +660,7 @@ void regmap_debugfs_exit(struct regmap *map)
+ 		regmap_debugfs_free_dump_cache(map);
+ 		mutex_unlock(&map->cache_lock);
+ 		kfree(map->debugfs_name);
++		map->debugfs_name = NULL;
+ 	} else {
+ 		struct regmap_debugfs_node *node, *tmp;
+ 
+diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
+index fa3719ef80e4d..88310ac9ce906 100644
+--- a/drivers/base/swnode.c
++++ b/drivers/base/swnode.c
+@@ -1032,6 +1032,7 @@ int device_add_software_node(struct device *dev, const struct software_node *nod
+ 	}
+ 
+ 	set_secondary_fwnode(dev, &swnode->fwnode);
++	software_node_notify(dev, KOBJ_ADD);
+ 
+ 	return 0;
+ }
+@@ -1105,8 +1106,8 @@ int software_node_notify(struct device *dev, unsigned long action)
+ 
+ 	switch (action) {
+ 	case KOBJ_ADD:
+-		ret = sysfs_create_link(&dev->kobj, &swnode->kobj,
+-					"software_node");
++		ret = sysfs_create_link_nowarn(&dev->kobj, &swnode->kobj,
++					       "software_node");
+ 		if (ret)
+ 			break;
+ 
+diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
+index 104b713f4055a..d601e49f80e07 100644
+--- a/drivers/block/ataflop.c
++++ b/drivers/block/ataflop.c
+@@ -729,8 +729,12 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
+ 	unsigned long	flags;
+ 	int ret;
+ 
+-	if (type)
++	if (type) {
+ 		type--;
++		if (type >= NUM_DISK_MINORS ||
++		    minor2disktype[type].drive_types > DriveType)
++			return -EINVAL;
++	}
+ 
+ 	q = unit[drive].disk[type]->queue;
+ 	blk_mq_freeze_queue(q);
+@@ -742,11 +746,6 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
+ 	local_irq_restore(flags);
+ 
+ 	if (type) {
+-		if (type >= NUM_DISK_MINORS ||
+-		    minor2disktype[type].drive_types > DriveType) {
+-			ret = -EINVAL;
+-			goto out;
+-		}
+ 		type = minor2disktype[type].index;
+ 		UDT = &atari_disk_type[type];
+ 	}
+@@ -2002,7 +2001,10 @@ static void ataflop_probe(dev_t dev)
+ 	int drive = MINOR(dev) & 3;
+ 	int type  = MINOR(dev) >> 2;
+ 
+-	if (drive >= FD_MAX_UNITS || type > NUM_DISK_MINORS)
++	if (type)
++		type--;
++
++	if (drive >= FD_MAX_UNITS || type >= NUM_DISK_MINORS)
+ 		return;
+ 	mutex_lock(&ataflop_probe_lock);
+ 	if (!unit[drive].disk[type]) {
+diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c
+index bfcab1c782b53..dae54dd1aeac3 100644
+--- a/drivers/block/null_blk/zoned.c
++++ b/drivers/block/null_blk/zoned.c
+@@ -180,6 +180,7 @@ int null_register_zoned_dev(struct nullb *nullb)
+ void null_free_zoned_dev(struct nullb_device *dev)
+ {
+ 	kvfree(dev->zones);
++	dev->zones = NULL;
+ }
+ 
+ int null_report_zones(struct gendisk *disk, sector_t sector,
+diff --git a/drivers/block/rnbd/rnbd-clt-sysfs.c b/drivers/block/rnbd/rnbd-clt-sysfs.c
+index 526c77cd7a506..49ad400a52255 100644
+--- a/drivers/block/rnbd/rnbd-clt-sysfs.c
++++ b/drivers/block/rnbd/rnbd-clt-sysfs.c
+@@ -483,11 +483,7 @@ static int rnbd_clt_get_path_name(struct rnbd_clt_dev *dev, char *buf,
+ 	while ((s = strchr(pathname, '/')))
+ 		s[0] = '!';
+ 
+-	ret = snprintf(buf, len, "%s", pathname);
+-	if (ret >= len)
+-		return -ENAMETOOLONG;
+-
+-	ret = snprintf(buf, len, "%s@%s", buf, dev->sess->sessname);
++	ret = snprintf(buf, len, "%s@%s", pathname, dev->sess->sessname);
+ 	if (ret >= len)
+ 		return -ENAMETOOLONG;
+ 
+diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
+index b0c71d3a81a02..bda5c815e4415 100644
+--- a/drivers/block/xen-blkback/common.h
++++ b/drivers/block/xen-blkback/common.h
+@@ -313,6 +313,7 @@ struct xen_blkif {
+ 
+ 	struct work_struct	free_work;
+ 	unsigned int 		nr_ring_pages;
++	bool			multi_ref;
+ 	/* All rings for this device. */
+ 	struct xen_blkif_ring	*rings;
+ 	unsigned int		nr_rings;
+diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
+index c2aaf690352c7..125b22205d383 100644
+--- a/drivers/block/xen-blkback/xenbus.c
++++ b/drivers/block/xen-blkback/xenbus.c
+@@ -998,14 +998,17 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
+ 	for (i = 0; i < nr_grefs; i++) {
+ 		char ring_ref_name[RINGREF_NAME_LEN];
+ 
+-		snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
++		if (blkif->multi_ref)
++			snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
++		else {
++			WARN_ON(i != 0);
++			snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref");
++		}
++
+ 		err = xenbus_scanf(XBT_NIL, dir, ring_ref_name,
+ 				   "%u", &ring_ref[i]);
+ 
+ 		if (err != 1) {
+-			if (nr_grefs == 1)
+-				break;
+-
+ 			err = -EINVAL;
+ 			xenbus_dev_fatal(dev, err, "reading %s/%s",
+ 					 dir, ring_ref_name);
+@@ -1013,18 +1016,6 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
+ 		}
+ 	}
+ 
+-	if (err != 1) {
+-		WARN_ON(nr_grefs != 1);
+-
+-		err = xenbus_scanf(XBT_NIL, dir, "ring-ref", "%u",
+-				   &ring_ref[0]);
+-		if (err != 1) {
+-			err = -EINVAL;
+-			xenbus_dev_fatal(dev, err, "reading %s/ring-ref", dir);
+-			return err;
+-		}
+-	}
+-
+ 	err = -ENOMEM;
+ 	for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
+ 		req = kzalloc(sizeof(*req), GFP_KERNEL);
+@@ -1129,10 +1120,15 @@ static int connect_ring(struct backend_info *be)
+ 		 blkif->nr_rings, blkif->blk_protocol, protocol,
+ 		 blkif->vbd.feature_gnt_persistent ? "persistent grants" : "");
+ 
+-	ring_page_order = xenbus_read_unsigned(dev->otherend,
+-					       "ring-page-order", 0);
+-
+-	if (ring_page_order > xen_blkif_max_ring_order) {
++	err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
++			   &ring_page_order);
++	if (err != 1) {
++		blkif->nr_ring_pages = 1;
++		blkif->multi_ref = false;
++	} else if (ring_page_order <= xen_blkif_max_ring_order) {
++		blkif->nr_ring_pages = 1 << ring_page_order;
++		blkif->multi_ref = true;
++	} else {
+ 		err = -EINVAL;
+ 		xenbus_dev_fatal(dev, err,
+ 				 "requested ring page order %d exceed max:%d",
+@@ -1141,8 +1137,6 @@ static int connect_ring(struct backend_info *be)
+ 		return err;
+ 	}
+ 
+-	blkif->nr_ring_pages = 1 << ring_page_order;
+-
+ 	if (blkif->nr_rings == 1)
+ 		return read_per_ring_refs(&blkif->rings[0], dev->otherend);
+ 	else {
+diff --git a/drivers/bus/qcom-ebi2.c b/drivers/bus/qcom-ebi2.c
+index 03ddcf426887b..0b8f53a688b8a 100644
+--- a/drivers/bus/qcom-ebi2.c
++++ b/drivers/bus/qcom-ebi2.c
+@@ -353,8 +353,10 @@ static int qcom_ebi2_probe(struct platform_device *pdev)
+ 
+ 		/* Figure out the chipselect */
+ 		ret = of_property_read_u32(child, "reg", &csindex);
+-		if (ret)
++		if (ret) {
++			of_node_put(child);
+ 			return ret;
++		}
+ 
+ 		if (csindex > 5) {
+ 			dev_err(dev,
+diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
+index 9e535336689fd..68145e326eb90 100644
+--- a/drivers/bus/ti-sysc.c
++++ b/drivers/bus/ti-sysc.c
+@@ -901,9 +901,6 @@ static int sysc_map_and_check_registers(struct sysc *ddata)
+ 	struct device_node *np = ddata->dev->of_node;
+ 	int error;
+ 
+-	if (!of_get_property(np, "reg", NULL))
+-		return 0;
+-
+ 	error = sysc_parse_and_check_child_range(ddata);
+ 	if (error)
+ 		return error;
+@@ -914,6 +911,9 @@ static int sysc_map_and_check_registers(struct sysc *ddata)
+ 
+ 	sysc_check_children(ddata);
+ 
++	if (!of_get_property(np, "reg", NULL))
++		return 0;
++
+ 	error = sysc_parse_registers(ddata);
+ 	if (error)
+ 		return error;
+diff --git a/drivers/char/tpm/tpm_tis_i2c_cr50.c b/drivers/char/tpm/tpm_tis_i2c_cr50.c
+index ec9a65e7887dd..f19c227d20f48 100644
+--- a/drivers/char/tpm/tpm_tis_i2c_cr50.c
++++ b/drivers/char/tpm/tpm_tis_i2c_cr50.c
+@@ -483,6 +483,7 @@ static int tpm_cr50_i2c_tis_recv(struct tpm_chip *chip, u8 *buf, size_t buf_len)
+ 	expected = be32_to_cpup((__be32 *)(buf + 2));
+ 	if (expected > buf_len) {
+ 		dev_err(&chip->dev, "Buffer too small to receive i2c data\n");
++		rc = -E2BIG;
+ 		goto out_err;
+ 	}
+ 
+diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c
+index 6a0059e508e38..93f5d11c830b7 100644
+--- a/drivers/char/ttyprintk.c
++++ b/drivers/char/ttyprintk.c
+@@ -158,12 +158,23 @@ static int tpk_ioctl(struct tty_struct *tty,
+ 	return 0;
+ }
+ 
++/*
++ * TTY operations hangup function.
++ */
++static void tpk_hangup(struct tty_struct *tty)
++{
++	struct ttyprintk_port *tpkp = tty->driver_data;
++
++	tty_port_hangup(&tpkp->port);
++}
++
+ static const struct tty_operations ttyprintk_ops = {
+ 	.open = tpk_open,
+ 	.close = tpk_close,
+ 	.write = tpk_write,
+ 	.write_room = tpk_write_room,
+ 	.ioctl = tpk_ioctl,
++	.hangup = tpk_hangup,
+ };
+ 
+ static const struct tty_port_operations null_ops = { };
+diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c
+index a55b37fc2c8bd..bc3be5f3eae15 100644
+--- a/drivers/clk/clk-ast2600.c
++++ b/drivers/clk/clk-ast2600.c
+@@ -61,10 +61,10 @@ static void __iomem *scu_g6_base;
+ static const struct aspeed_gate_data aspeed_g6_gates[] = {
+ 	/*				    clk rst  name		parent	 flags */
+ 	[ASPEED_CLK_GATE_MCLK]		= {  0, -1, "mclk-gate",	"mpll",	 CLK_IS_CRITICAL }, /* SDRAM */
+-	[ASPEED_CLK_GATE_ECLK]		= {  1, -1, "eclk-gate",	"eclk",	 0 },	/* Video Engine */
++	[ASPEED_CLK_GATE_ECLK]		= {  1,  6, "eclk-gate",	"eclk",	 0 },	/* Video Engine */
+ 	[ASPEED_CLK_GATE_GCLK]		= {  2,  7, "gclk-gate",	NULL,	 0 },	/* 2D engine */
+ 	/* vclk parent - dclk/d1clk/hclk/mclk */
+-	[ASPEED_CLK_GATE_VCLK]		= {  3,  6, "vclk-gate",	NULL,	 0 },	/* Video Capture */
++	[ASPEED_CLK_GATE_VCLK]		= {  3, -1, "vclk-gate",	NULL,	 0 },	/* Video Capture */
+ 	[ASPEED_CLK_GATE_BCLK]		= {  4,  8, "bclk-gate",	"bclk",	 0 }, /* PCIe/PCI */
+ 	/* From dpll */
+ 	[ASPEED_CLK_GATE_DCLK]		= {  5, -1, "dclk-gate",	NULL,	 CLK_IS_CRITICAL }, /* DAC */
+diff --git a/drivers/clk/imx/clk-imx25.c b/drivers/clk/imx/clk-imx25.c
+index a66cabfbf94f1..66192fe0a898c 100644
+--- a/drivers/clk/imx/clk-imx25.c
++++ b/drivers/clk/imx/clk-imx25.c
+@@ -73,16 +73,6 @@ enum mx25_clks {
+ 
+ static struct clk *clk[clk_max];
+ 
+-static struct clk ** const uart_clks[] __initconst = {
+-	&clk[uart_ipg_per],
+-	&clk[uart1_ipg],
+-	&clk[uart2_ipg],
+-	&clk[uart3_ipg],
+-	&clk[uart4_ipg],
+-	&clk[uart5_ipg],
+-	NULL
+-};
+-
+ static int __init __mx25_clocks_init(void __iomem *ccm_base)
+ {
+ 	BUG_ON(!ccm_base);
+@@ -228,7 +218,7 @@ static int __init __mx25_clocks_init(void __iomem *ccm_base)
+ 	 */
+ 	clk_set_parent(clk[cko_sel], clk[ipg]);
+ 
+-	imx_register_uart_clocks(uart_clks);
++	imx_register_uart_clocks(6);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/clk/imx/clk-imx27.c b/drivers/clk/imx/clk-imx27.c
+index 5585ded8b8c6f..56a5fc402b10c 100644
+--- a/drivers/clk/imx/clk-imx27.c
++++ b/drivers/clk/imx/clk-imx27.c
+@@ -49,17 +49,6 @@ static const char *ssi_sel_clks[] = { "spll_gate", "mpll", };
+ static struct clk *clk[IMX27_CLK_MAX];
+ static struct clk_onecell_data clk_data;
+ 
+-static struct clk ** const uart_clks[] __initconst = {
+-	&clk[IMX27_CLK_PER1_GATE],
+-	&clk[IMX27_CLK_UART1_IPG_GATE],
+-	&clk[IMX27_CLK_UART2_IPG_GATE],
+-	&clk[IMX27_CLK_UART3_IPG_GATE],
+-	&clk[IMX27_CLK_UART4_IPG_GATE],
+-	&clk[IMX27_CLK_UART5_IPG_GATE],
+-	&clk[IMX27_CLK_UART6_IPG_GATE],
+-	NULL
+-};
+-
+ static void __init _mx27_clocks_init(unsigned long fref)
+ {
+ 	BUG_ON(!ccm);
+@@ -176,7 +165,7 @@ static void __init _mx27_clocks_init(unsigned long fref)
+ 
+ 	clk_prepare_enable(clk[IMX27_CLK_EMI_AHB_GATE]);
+ 
+-	imx_register_uart_clocks(uart_clks);
++	imx_register_uart_clocks(7);
+ 
+ 	imx_print_silicon_rev("i.MX27", mx27_revision());
+ }
+diff --git a/drivers/clk/imx/clk-imx35.c b/drivers/clk/imx/clk-imx35.c
+index c1df03665c09a..0fe5ac2101566 100644
+--- a/drivers/clk/imx/clk-imx35.c
++++ b/drivers/clk/imx/clk-imx35.c
+@@ -82,14 +82,6 @@ enum mx35_clks {
+ 
+ static struct clk *clk[clk_max];
+ 
+-static struct clk ** const uart_clks[] __initconst = {
+-	&clk[ipg],
+-	&clk[uart1_gate],
+-	&clk[uart2_gate],
+-	&clk[uart3_gate],
+-	NULL
+-};
+-
+ static void __init _mx35_clocks_init(void)
+ {
+ 	void __iomem *base;
+@@ -243,7 +235,7 @@ static void __init _mx35_clocks_init(void)
+ 	 */
+ 	clk_prepare_enable(clk[scc_gate]);
+ 
+-	imx_register_uart_clocks(uart_clks);
++	imx_register_uart_clocks(4);
+ 
+ 	imx_print_silicon_rev("i.MX35", mx35_revision());
+ }
+diff --git a/drivers/clk/imx/clk-imx5.c b/drivers/clk/imx/clk-imx5.c
+index 01e079b810261..e4493846454dd 100644
+--- a/drivers/clk/imx/clk-imx5.c
++++ b/drivers/clk/imx/clk-imx5.c
+@@ -128,30 +128,6 @@ static const char *ieee1588_sels[] = { "pll3_sw", "pll4_sw", "dummy" /* usbphy2_
+ static struct clk *clk[IMX5_CLK_END];
+ static struct clk_onecell_data clk_data;
+ 
+-static struct clk ** const uart_clks_mx51[] __initconst = {
+-	&clk[IMX5_CLK_UART1_IPG_GATE],
+-	&clk[IMX5_CLK_UART1_PER_GATE],
+-	&clk[IMX5_CLK_UART2_IPG_GATE],
+-	&clk[IMX5_CLK_UART2_PER_GATE],
+-	&clk[IMX5_CLK_UART3_IPG_GATE],
+-	&clk[IMX5_CLK_UART3_PER_GATE],
+-	NULL
+-};
+-
+-static struct clk ** const uart_clks_mx50_mx53[] __initconst = {
+-	&clk[IMX5_CLK_UART1_IPG_GATE],
+-	&clk[IMX5_CLK_UART1_PER_GATE],
+-	&clk[IMX5_CLK_UART2_IPG_GATE],
+-	&clk[IMX5_CLK_UART2_PER_GATE],
+-	&clk[IMX5_CLK_UART3_IPG_GATE],
+-	&clk[IMX5_CLK_UART3_PER_GATE],
+-	&clk[IMX5_CLK_UART4_IPG_GATE],
+-	&clk[IMX5_CLK_UART4_PER_GATE],
+-	&clk[IMX5_CLK_UART5_IPG_GATE],
+-	&clk[IMX5_CLK_UART5_PER_GATE],
+-	NULL
+-};
+-
+ static void __init mx5_clocks_common_init(void __iomem *ccm_base)
+ {
+ 	clk[IMX5_CLK_DUMMY]		= imx_clk_fixed("dummy", 0);
+@@ -382,7 +358,7 @@ static void __init mx50_clocks_init(struct device_node *np)
+ 	r = clk_round_rate(clk[IMX5_CLK_USBOH3_PER_GATE], 54000000);
+ 	clk_set_rate(clk[IMX5_CLK_USBOH3_PER_GATE], r);
+ 
+-	imx_register_uart_clocks(uart_clks_mx50_mx53);
++	imx_register_uart_clocks(5);
+ }
+ CLK_OF_DECLARE(imx50_ccm, "fsl,imx50-ccm", mx50_clocks_init);
+ 
+@@ -488,7 +464,7 @@ static void __init mx51_clocks_init(struct device_node *np)
+ 	val |= 1 << 23;
+ 	writel(val, MXC_CCM_CLPCR);
+ 
+-	imx_register_uart_clocks(uart_clks_mx51);
++	imx_register_uart_clocks(3);
+ }
+ CLK_OF_DECLARE(imx51_ccm, "fsl,imx51-ccm", mx51_clocks_init);
+ 
+@@ -633,6 +609,6 @@ static void __init mx53_clocks_init(struct device_node *np)
+ 	r = clk_round_rate(clk[IMX5_CLK_USBOH3_PER_GATE], 54000000);
+ 	clk_set_rate(clk[IMX5_CLK_USBOH3_PER_GATE], r);
+ 
+-	imx_register_uart_clocks(uart_clks_mx50_mx53);
++	imx_register_uart_clocks(5);
+ }
+ CLK_OF_DECLARE(imx53_ccm, "fsl,imx53-ccm", mx53_clocks_init);
+diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
+index 521d6136d22c5..496900de0b0bb 100644
+--- a/drivers/clk/imx/clk-imx6q.c
++++ b/drivers/clk/imx/clk-imx6q.c
+@@ -140,13 +140,6 @@ static inline int clk_on_imx6dl(void)
+ 	return of_machine_is_compatible("fsl,imx6dl");
+ }
+ 
+-static const int uart_clk_ids[] __initconst = {
+-	IMX6QDL_CLK_UART_IPG,
+-	IMX6QDL_CLK_UART_SERIAL,
+-};
+-
+-static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1] __initdata;
+-
+ static int ldb_di_sel_by_clock_id(int clock_id)
+ {
+ 	switch (clock_id) {
+@@ -440,7 +433,6 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
+ 	struct device_node *np;
+ 	void __iomem *anatop_base, *base;
+ 	int ret;
+-	int i;
+ 
+ 	clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
+ 					  IMX6QDL_CLK_END), GFP_KERNEL);
+@@ -982,12 +974,6 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
+ 			       hws[IMX6QDL_CLK_PLL3_USB_OTG]->clk);
+ 	}
+ 
+-	for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
+-		int index = uart_clk_ids[i];
+-
+-		uart_clks[i] = &hws[index]->clk;
+-	}
+-
+-	imx_register_uart_clocks(uart_clks);
++	imx_register_uart_clocks(1);
+ }
+ CLK_OF_DECLARE(imx6q, "fsl,imx6q-ccm", imx6q_clocks_init);
+diff --git a/drivers/clk/imx/clk-imx6sl.c b/drivers/clk/imx/clk-imx6sl.c
+index 29eab05c90689..2773659703202 100644
+--- a/drivers/clk/imx/clk-imx6sl.c
++++ b/drivers/clk/imx/clk-imx6sl.c
+@@ -179,19 +179,11 @@ void imx6sl_set_wait_clk(bool enter)
+ 		imx6sl_enable_pll_arm(false);
+ }
+ 
+-static const int uart_clk_ids[] __initconst = {
+-	IMX6SL_CLK_UART,
+-	IMX6SL_CLK_UART_SERIAL,
+-};
+-
+-static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1] __initdata;
+-
+ static void __init imx6sl_clocks_init(struct device_node *ccm_node)
+ {
+ 	struct device_node *np;
+ 	void __iomem *base;
+ 	int ret;
+-	int i;
+ 
+ 	clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
+ 					  IMX6SL_CLK_END), GFP_KERNEL);
+@@ -448,12 +440,6 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node)
+ 	clk_set_parent(hws[IMX6SL_CLK_LCDIF_AXI_SEL]->clk,
+ 		       hws[IMX6SL_CLK_PLL2_PFD2]->clk);
+ 
+-	for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
+-		int index = uart_clk_ids[i];
+-
+-		uart_clks[i] = &hws[index]->clk;
+-	}
+-
+-	imx_register_uart_clocks(uart_clks);
++	imx_register_uart_clocks(2);
+ }
+ CLK_OF_DECLARE(imx6sl, "fsl,imx6sl-ccm", imx6sl_clocks_init);
+diff --git a/drivers/clk/imx/clk-imx6sll.c b/drivers/clk/imx/clk-imx6sll.c
+index 8e8288bda4d0b..31d777f300395 100644
+--- a/drivers/clk/imx/clk-imx6sll.c
++++ b/drivers/clk/imx/clk-imx6sll.c
+@@ -76,26 +76,10 @@ static u32 share_count_ssi1;
+ static u32 share_count_ssi2;
+ static u32 share_count_ssi3;
+ 
+-static const int uart_clk_ids[] __initconst = {
+-	IMX6SLL_CLK_UART1_IPG,
+-	IMX6SLL_CLK_UART1_SERIAL,
+-	IMX6SLL_CLK_UART2_IPG,
+-	IMX6SLL_CLK_UART2_SERIAL,
+-	IMX6SLL_CLK_UART3_IPG,
+-	IMX6SLL_CLK_UART3_SERIAL,
+-	IMX6SLL_CLK_UART4_IPG,
+-	IMX6SLL_CLK_UART4_SERIAL,
+-	IMX6SLL_CLK_UART5_IPG,
+-	IMX6SLL_CLK_UART5_SERIAL,
+-};
+-
+-static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1] __initdata;
+-
+ static void __init imx6sll_clocks_init(struct device_node *ccm_node)
+ {
+ 	struct device_node *np;
+ 	void __iomem *base;
+-	int i;
+ 
+ 	clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
+ 					  IMX6SLL_CLK_END), GFP_KERNEL);
+@@ -356,13 +340,7 @@ static void __init imx6sll_clocks_init(struct device_node *ccm_node)
+ 
+ 	of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
+ 
+-	for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
+-		int index = uart_clk_ids[i];
+-
+-		uart_clks[i] = &hws[index]->clk;
+-	}
+-
+-	imx_register_uart_clocks(uart_clks);
++	imx_register_uart_clocks(5);
+ 
+ 	/* Lower the AHB clock rate before changing the clock source. */
+ 	clk_set_rate(hws[IMX6SLL_CLK_AHB]->clk, 99000000);
+diff --git a/drivers/clk/imx/clk-imx6sx.c b/drivers/clk/imx/clk-imx6sx.c
+index 20dcce526d072..fc1bd23d45834 100644
+--- a/drivers/clk/imx/clk-imx6sx.c
++++ b/drivers/clk/imx/clk-imx6sx.c
+@@ -117,18 +117,10 @@ static u32 share_count_ssi3;
+ static u32 share_count_sai1;
+ static u32 share_count_sai2;
+ 
+-static const int uart_clk_ids[] __initconst = {
+-	IMX6SX_CLK_UART_IPG,
+-	IMX6SX_CLK_UART_SERIAL,
+-};
+-
+-static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1] __initdata;
+-
+ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
+ {
+ 	struct device_node *np;
+ 	void __iomem *base;
+-	int i;
+ 
+ 	clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
+ 					  IMX6SX_CLK_CLK_END), GFP_KERNEL);
+@@ -556,12 +548,6 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
+ 	clk_set_parent(hws[IMX6SX_CLK_QSPI1_SEL]->clk, hws[IMX6SX_CLK_PLL2_BUS]->clk);
+ 	clk_set_parent(hws[IMX6SX_CLK_QSPI2_SEL]->clk, hws[IMX6SX_CLK_PLL2_BUS]->clk);
+ 
+-	for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
+-		int index = uart_clk_ids[i];
+-
+-		uart_clks[i] = &hws[index]->clk;
+-	}
+-
+-	imx_register_uart_clocks(uart_clks);
++	imx_register_uart_clocks(2);
+ }
+ CLK_OF_DECLARE(imx6sx, "fsl,imx6sx-ccm", imx6sx_clocks_init);
+diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c
+index 22d24a6a05e70..c4e0f1c07192f 100644
+--- a/drivers/clk/imx/clk-imx7d.c
++++ b/drivers/clk/imx/clk-imx7d.c
+@@ -377,23 +377,10 @@ static const char *pll_video_bypass_sel[] = { "pll_video_main", "pll_video_main_
+ static struct clk_hw **hws;
+ static struct clk_hw_onecell_data *clk_hw_data;
+ 
+-static const int uart_clk_ids[] __initconst = {
+-	IMX7D_UART1_ROOT_CLK,
+-	IMX7D_UART2_ROOT_CLK,
+-	IMX7D_UART3_ROOT_CLK,
+-	IMX7D_UART4_ROOT_CLK,
+-	IMX7D_UART5_ROOT_CLK,
+-	IMX7D_UART6_ROOT_CLK,
+-	IMX7D_UART7_ROOT_CLK,
+-};
+-
+-static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1] __initdata;
+-
+ static void __init imx7d_clocks_init(struct device_node *ccm_node)
+ {
+ 	struct device_node *np;
+ 	void __iomem *base;
+-	int i;
+ 
+ 	clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
+ 					  IMX7D_CLK_END), GFP_KERNEL);
+@@ -897,14 +884,7 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
+ 	hws[IMX7D_USB1_MAIN_480M_CLK] = imx_clk_hw_fixed_factor("pll_usb1_main_clk", "osc", 20, 1);
+ 	hws[IMX7D_USB_MAIN_480M_CLK] = imx_clk_hw_fixed_factor("pll_usb_main_clk", "osc", 20, 1);
+ 
+-	for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
+-		int index = uart_clk_ids[i];
+-
+-		uart_clks[i] = &hws[index]->clk;
+-	}
+-
+-
+-	imx_register_uart_clocks(uart_clks);
++	imx_register_uart_clocks(7);
+ 
+ }
+ CLK_OF_DECLARE(imx7d, "fsl,imx7d-ccm", imx7d_clocks_init);
+diff --git a/drivers/clk/imx/clk-imx7ulp.c b/drivers/clk/imx/clk-imx7ulp.c
+index 634c0b6636b0e..779e09105da7d 100644
+--- a/drivers/clk/imx/clk-imx7ulp.c
++++ b/drivers/clk/imx/clk-imx7ulp.c
+@@ -43,19 +43,6 @@ static const struct clk_div_table ulp_div_table[] = {
+ 	{ /* sentinel */ },
+ };
+ 
+-static const int pcc2_uart_clk_ids[] __initconst = {
+-	IMX7ULP_CLK_LPUART4,
+-	IMX7ULP_CLK_LPUART5,
+-};
+-
+-static const int pcc3_uart_clk_ids[] __initconst = {
+-	IMX7ULP_CLK_LPUART6,
+-	IMX7ULP_CLK_LPUART7,
+-};
+-
+-static struct clk **pcc2_uart_clks[ARRAY_SIZE(pcc2_uart_clk_ids) + 1] __initdata;
+-static struct clk **pcc3_uart_clks[ARRAY_SIZE(pcc3_uart_clk_ids) + 1] __initdata;
+-
+ static void __init imx7ulp_clk_scg1_init(struct device_node *np)
+ {
+ 	struct clk_hw_onecell_data *clk_data;
+@@ -150,7 +137,6 @@ static void __init imx7ulp_clk_pcc2_init(struct device_node *np)
+ 	struct clk_hw_onecell_data *clk_data;
+ 	struct clk_hw **hws;
+ 	void __iomem *base;
+-	int i;
+ 
+ 	clk_data = kzalloc(struct_size(clk_data, hws, IMX7ULP_CLK_PCC2_END),
+ 			   GFP_KERNEL);
+@@ -190,13 +176,7 @@ static void __init imx7ulp_clk_pcc2_init(struct device_node *np)
+ 
+ 	of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
+ 
+-	for (i = 0; i < ARRAY_SIZE(pcc2_uart_clk_ids); i++) {
+-		int index = pcc2_uart_clk_ids[i];
+-
+-		pcc2_uart_clks[i] = &hws[index]->clk;
+-	}
+-
+-	imx_register_uart_clocks(pcc2_uart_clks);
++	imx_register_uart_clocks(2);
+ }
+ CLK_OF_DECLARE(imx7ulp_clk_pcc2, "fsl,imx7ulp-pcc2", imx7ulp_clk_pcc2_init);
+ 
+@@ -205,7 +185,6 @@ static void __init imx7ulp_clk_pcc3_init(struct device_node *np)
+ 	struct clk_hw_onecell_data *clk_data;
+ 	struct clk_hw **hws;
+ 	void __iomem *base;
+-	int i;
+ 
+ 	clk_data = kzalloc(struct_size(clk_data, hws, IMX7ULP_CLK_PCC3_END),
+ 			   GFP_KERNEL);
+@@ -244,13 +223,7 @@ static void __init imx7ulp_clk_pcc3_init(struct device_node *np)
+ 
+ 	of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
+ 
+-	for (i = 0; i < ARRAY_SIZE(pcc3_uart_clk_ids); i++) {
+-		int index = pcc3_uart_clk_ids[i];
+-
+-		pcc3_uart_clks[i] = &hws[index]->clk;
+-	}
+-
+-	imx_register_uart_clocks(pcc3_uart_clks);
++	imx_register_uart_clocks(7);
+ }
+ CLK_OF_DECLARE(imx7ulp_clk_pcc3, "fsl,imx7ulp-pcc3", imx7ulp_clk_pcc3_init);
+ 
+diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c
+index 6a01eec36dd04..f1919fafb1247 100644
+--- a/drivers/clk/imx/clk-imx8mm.c
++++ b/drivers/clk/imx/clk-imx8mm.c
+@@ -296,20 +296,12 @@ static const char * const clkout_sels[] = {"audio_pll1_out", "audio_pll2_out", "
+ static struct clk_hw_onecell_data *clk_hw_data;
+ static struct clk_hw **hws;
+ 
+-static const int uart_clk_ids[] = {
+-	IMX8MM_CLK_UART1_ROOT,
+-	IMX8MM_CLK_UART2_ROOT,
+-	IMX8MM_CLK_UART3_ROOT,
+-	IMX8MM_CLK_UART4_ROOT,
+-};
+-static struct clk **uart_hws[ARRAY_SIZE(uart_clk_ids) + 1];
+-
+ static int imx8mm_clocks_probe(struct platform_device *pdev)
+ {
+ 	struct device *dev = &pdev->dev;
+ 	struct device_node *np = dev->of_node;
+ 	void __iomem *base;
+-	int ret, i;
++	int ret;
+ 
+ 	clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
+ 					  IMX8MM_CLK_END), GFP_KERNEL);
+@@ -634,13 +626,7 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
+ 		goto unregister_hws;
+ 	}
+ 
+-	for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
+-		int index = uart_clk_ids[i];
+-
+-		uart_hws[i] = &hws[index]->clk;
+-	}
+-
+-	imx_register_uart_clocks(uart_hws);
++	imx_register_uart_clocks(4);
+ 
+ 	return 0;
+ 
+diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c
+index 324c5fd0aa04f..88f6630cd472f 100644
+--- a/drivers/clk/imx/clk-imx8mn.c
++++ b/drivers/clk/imx/clk-imx8mn.c
+@@ -289,20 +289,12 @@ static const char * const clkout_sels[] = {"audio_pll1_out", "audio_pll2_out", "
+ static struct clk_hw_onecell_data *clk_hw_data;
+ static struct clk_hw **hws;
+ 
+-static const int uart_clk_ids[] = {
+-	IMX8MN_CLK_UART1_ROOT,
+-	IMX8MN_CLK_UART2_ROOT,
+-	IMX8MN_CLK_UART3_ROOT,
+-	IMX8MN_CLK_UART4_ROOT,
+-};
+-static struct clk **uart_hws[ARRAY_SIZE(uart_clk_ids) + 1];
+-
+ static int imx8mn_clocks_probe(struct platform_device *pdev)
+ {
+ 	struct device *dev = &pdev->dev;
+ 	struct device_node *np = dev->of_node;
+ 	void __iomem *base;
+-	int ret, i;
++	int ret;
+ 
+ 	clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
+ 					  IMX8MN_CLK_END), GFP_KERNEL);
+@@ -585,13 +577,7 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
+ 		goto unregister_hws;
+ 	}
+ 
+-	for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
+-		int index = uart_clk_ids[i];
+-
+-		uart_hws[i] = &hws[index]->clk;
+-	}
+-
+-	imx_register_uart_clocks(uart_hws);
++	imx_register_uart_clocks(4);
+ 
+ 	return 0;
+ 
+diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c
+index 2f4e1d674e1c1..3e6557e7d559b 100644
+--- a/drivers/clk/imx/clk-imx8mp.c
++++ b/drivers/clk/imx/clk-imx8mp.c
+@@ -414,20 +414,11 @@ static const char * const imx8mp_dram_core_sels[] = {"dram_pll_out", "dram_alt_r
+ static struct clk_hw **hws;
+ static struct clk_hw_onecell_data *clk_hw_data;
+ 
+-static const int uart_clk_ids[] = {
+-	IMX8MP_CLK_UART1_ROOT,
+-	IMX8MP_CLK_UART2_ROOT,
+-	IMX8MP_CLK_UART3_ROOT,
+-	IMX8MP_CLK_UART4_ROOT,
+-};
+-static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1];
+-
+ static int imx8mp_clocks_probe(struct platform_device *pdev)
+ {
+ 	struct device *dev = &pdev->dev;
+ 	struct device_node *np;
+ 	void __iomem *anatop_base, *ccm_base;
+-	int i;
+ 
+ 	np = of_find_compatible_node(NULL, NULL, "fsl,imx8mp-anatop");
+ 	anatop_base = of_iomap(np, 0);
+@@ -737,13 +728,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
+ 
+ 	of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
+ 
+-	for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
+-		int index = uart_clk_ids[i];
+-
+-		uart_clks[i] = &hws[index]->clk;
+-	}
+-
+-	imx_register_uart_clocks(uart_clks);
++	imx_register_uart_clocks(4);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
+index 4dd4ae9d022ba..3e1a10d3f55cc 100644
+--- a/drivers/clk/imx/clk-imx8mq.c
++++ b/drivers/clk/imx/clk-imx8mq.c
+@@ -281,20 +281,12 @@ static const char * const pllout_monitor_sels[] = {"osc_25m", "osc_27m", "dummy"
+ static struct clk_hw_onecell_data *clk_hw_data;
+ static struct clk_hw **hws;
+ 
+-static const int uart_clk_ids[] = {
+-	IMX8MQ_CLK_UART1_ROOT,
+-	IMX8MQ_CLK_UART2_ROOT,
+-	IMX8MQ_CLK_UART3_ROOT,
+-	IMX8MQ_CLK_UART4_ROOT,
+-};
+-static struct clk **uart_hws[ARRAY_SIZE(uart_clk_ids) + 1];
+-
+ static int imx8mq_clocks_probe(struct platform_device *pdev)
+ {
+ 	struct device *dev = &pdev->dev;
+ 	struct device_node *np = dev->of_node;
+ 	void __iomem *base;
+-	int err, i;
++	int err;
+ 
+ 	clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
+ 					  IMX8MQ_CLK_END), GFP_KERNEL);
+@@ -629,13 +621,7 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
+ 		goto unregister_hws;
+ 	}
+ 
+-	for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
+-		int index = uart_clk_ids[i];
+-
+-		uart_hws[i] = &hws[index]->clk;
+-	}
+-
+-	imx_register_uart_clocks(uart_hws);
++	imx_register_uart_clocks(4);
+ 
+ 	return 0;
+ 
+diff --git a/drivers/clk/imx/clk.c b/drivers/clk/imx/clk.c
+index 47882c51cb853..7cc669934253a 100644
+--- a/drivers/clk/imx/clk.c
++++ b/drivers/clk/imx/clk.c
+@@ -147,8 +147,10 @@ void imx_cscmr1_fixup(u32 *val)
+ }
+ 
+ #ifndef MODULE
+-static int imx_keep_uart_clocks;
+-static struct clk ** const *imx_uart_clocks;
++
++static bool imx_keep_uart_clocks;
++static int imx_enabled_uart_clocks;
++static struct clk **imx_uart_clocks;
+ 
+ static int __init imx_keep_uart_clocks_param(char *str)
+ {
+@@ -161,24 +163,45 @@ __setup_param("earlycon", imx_keep_uart_earlycon,
+ __setup_param("earlyprintk", imx_keep_uart_earlyprintk,
+ 	      imx_keep_uart_clocks_param, 0);
+ 
+-void imx_register_uart_clocks(struct clk ** const clks[])
++void imx_register_uart_clocks(unsigned int clk_count)
+ {
++	imx_enabled_uart_clocks = 0;
++
++/* i.MX boards use device trees now.  For build tests without CONFIG_OF, do nothing */
++#ifdef CONFIG_OF
+ 	if (imx_keep_uart_clocks) {
+ 		int i;
+ 
+-		imx_uart_clocks = clks;
+-		for (i = 0; imx_uart_clocks[i]; i++)
+-			clk_prepare_enable(*imx_uart_clocks[i]);
++		imx_uart_clocks = kcalloc(clk_count, sizeof(struct clk *), GFP_KERNEL);
++
++		if (!of_stdout)
++			return;
++
++		for (i = 0; i < clk_count; i++) {
++			imx_uart_clocks[imx_enabled_uart_clocks] = of_clk_get(of_stdout, i);
++
++			/* Stop if there are no more of_stdout references */
++			if (IS_ERR(imx_uart_clocks[imx_enabled_uart_clocks]))
++				return;
++
++			/* Only enable the clock if it's not NULL */
++			if (imx_uart_clocks[imx_enabled_uart_clocks])
++				clk_prepare_enable(imx_uart_clocks[imx_enabled_uart_clocks++]);
++		}
+ 	}
++#endif
+ }
+ 
+ static int __init imx_clk_disable_uart(void)
+ {
+-	if (imx_keep_uart_clocks && imx_uart_clocks) {
++	if (imx_keep_uart_clocks && imx_enabled_uart_clocks) {
+ 		int i;
+ 
+-		for (i = 0; imx_uart_clocks[i]; i++)
+-			clk_disable_unprepare(*imx_uart_clocks[i]);
++		for (i = 0; i < imx_enabled_uart_clocks; i++) {
++			clk_disable_unprepare(imx_uart_clocks[i]);
++			clk_put(imx_uart_clocks[i]);
++		}
++		kfree(imx_uart_clocks);
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
+index 4f04c8287286f..7571603bee23b 100644
+--- a/drivers/clk/imx/clk.h
++++ b/drivers/clk/imx/clk.h
+@@ -11,9 +11,9 @@ extern spinlock_t imx_ccm_lock;
+ void imx_check_clocks(struct clk *clks[], unsigned int count);
+ void imx_check_clk_hws(struct clk_hw *clks[], unsigned int count);
+ #ifndef MODULE
+-void imx_register_uart_clocks(struct clk ** const clks[]);
++void imx_register_uart_clocks(unsigned int clk_count);
+ #else
+-static inline void imx_register_uart_clocks(struct clk ** const clks[])
++static inline void imx_register_uart_clocks(unsigned int clk_count)
+ {
+ }
+ #endif
+diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c
+index f5746f9ea929f..32ac6b6b75306 100644
+--- a/drivers/clk/mvebu/armada-37xx-periph.c
++++ b/drivers/clk/mvebu/armada-37xx-periph.c
+@@ -84,6 +84,7 @@ struct clk_pm_cpu {
+ 	void __iomem *reg_div;
+ 	u8 shift_div;
+ 	struct regmap *nb_pm_base;
++	unsigned long l1_expiration;
+ };
+ 
+ #define to_clk_double_div(_hw) container_of(_hw, struct clk_double_div, hw)
+@@ -440,33 +441,6 @@ static u8 clk_pm_cpu_get_parent(struct clk_hw *hw)
+ 	return val;
+ }
+ 
+-static int clk_pm_cpu_set_parent(struct clk_hw *hw, u8 index)
+-{
+-	struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw);
+-	struct regmap *base = pm_cpu->nb_pm_base;
+-	int load_level;
+-
+-	/*
+-	 * We set the clock parent only if the DVFS is available but
+-	 * not enabled.
+-	 */
+-	if (IS_ERR(base) || armada_3700_pm_dvfs_is_enabled(base))
+-		return -EINVAL;
+-
+-	/* Set the parent clock for all the load level */
+-	for (load_level = 0; load_level < LOAD_LEVEL_NR; load_level++) {
+-		unsigned int reg, mask,  val,
+-			offset = ARMADA_37XX_NB_TBG_SEL_OFF;
+-
+-		armada_3700_pm_dvfs_update_regs(load_level, &reg, &offset);
+-
+-		val = index << offset;
+-		mask = ARMADA_37XX_NB_TBG_SEL_MASK << offset;
+-		regmap_update_bits(base, reg, mask, val);
+-	}
+-	return 0;
+-}
+-
+ static unsigned long clk_pm_cpu_recalc_rate(struct clk_hw *hw,
+ 					    unsigned long parent_rate)
+ {
+@@ -514,8 +488,10 @@ static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
+ }
+ 
+ /*
+- * Switching the CPU from the L2 or L3 frequencies (300 and 200 Mhz
+- * respectively) to L0 frequency (1.2 Ghz) requires a significant
++ * Workaround when base CPU frequnecy is 1000 or 1200 MHz
++ *
++ * Switching the CPU from the L2 or L3 frequencies (250/300 or 200 MHz
++ * respectively) to L0 frequency (1/1.2 GHz) requires a significant
+  * amount of time to let VDD stabilize to the appropriate
+  * voltage. This amount of time is large enough that it cannot be
+  * covered by the hardware countdown register. Due to this, the CPU
+@@ -525,26 +501,56 @@ static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
+  * To work around this problem, we prevent switching directly from the
+  * L2/L3 frequencies to the L0 frequency, and instead switch to the L1
+  * frequency in-between. The sequence therefore becomes:
+- * 1. First switch from L2/L3(200/300MHz) to L1(600MHZ)
++ * 1. First switch from L2/L3 (200/250/300 MHz) to L1 (500/600 MHz)
+  * 2. Sleep 20ms for stabling VDD voltage
+- * 3. Then switch from L1(600MHZ) to L0(1200Mhz).
++ * 3. Then switch from L1 (500/600 MHz) to L0 (1000/1200 MHz).
+  */
+-static void clk_pm_cpu_set_rate_wa(unsigned long rate, struct regmap *base)
++static void clk_pm_cpu_set_rate_wa(struct clk_pm_cpu *pm_cpu,
++				   unsigned int new_level, unsigned long rate,
++				   struct regmap *base)
+ {
+ 	unsigned int cur_level;
+ 
+-	if (rate != 1200 * 1000 * 1000)
+-		return;
+-
+ 	regmap_read(base, ARMADA_37XX_NB_CPU_LOAD, &cur_level);
+ 	cur_level &= ARMADA_37XX_NB_CPU_LOAD_MASK;
+-	if (cur_level <= ARMADA_37XX_DVFS_LOAD_1)
++
++	if (cur_level == new_level)
++		return;
++
++	/*
++	 * System wants to go to L1 on its own. If we are going from L2/L3,
++	 * remember when 20ms will expire. If from L0, set the value so that
++	 * next switch to L0 won't have to wait.
++	 */
++	if (new_level == ARMADA_37XX_DVFS_LOAD_1) {
++		if (cur_level == ARMADA_37XX_DVFS_LOAD_0)
++			pm_cpu->l1_expiration = jiffies;
++		else
++			pm_cpu->l1_expiration = jiffies + msecs_to_jiffies(20);
+ 		return;
++	}
++
++	/*
++	 * If we are setting to L2/L3, just invalidate L1 expiration time,
++	 * sleeping is not needed.
++	 */
++	if (rate < 1000*1000*1000)
++		goto invalidate_l1_exp;
++
++	/*
++	 * We are going to L0 with rate >= 1GHz. Check whether we have been at
++	 * L1 for long enough time. If not, go to L1 for 20ms.
++	 */
++	if (pm_cpu->l1_expiration && jiffies >= pm_cpu->l1_expiration)
++		goto invalidate_l1_exp;
+ 
+ 	regmap_update_bits(base, ARMADA_37XX_NB_CPU_LOAD,
+ 			   ARMADA_37XX_NB_CPU_LOAD_MASK,
+ 			   ARMADA_37XX_DVFS_LOAD_1);
+ 	msleep(20);
++
++invalidate_l1_exp:
++	pm_cpu->l1_expiration = 0;
+ }
+ 
+ static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
+@@ -578,7 +584,9 @@ static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
+ 			reg = ARMADA_37XX_NB_CPU_LOAD;
+ 			mask = ARMADA_37XX_NB_CPU_LOAD_MASK;
+ 
+-			clk_pm_cpu_set_rate_wa(rate, base);
++			/* Apply workaround when base CPU frequency is 1000 or 1200 MHz */
++			if (parent_rate >= 1000*1000*1000)
++				clk_pm_cpu_set_rate_wa(pm_cpu, load_level, rate, base);
+ 
+ 			regmap_update_bits(base, reg, mask, load_level);
+ 
+@@ -592,7 +600,6 @@ static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
+ 
+ static const struct clk_ops clk_pm_cpu_ops = {
+ 	.get_parent = clk_pm_cpu_get_parent,
+-	.set_parent = clk_pm_cpu_set_parent,
+ 	.round_rate = clk_pm_cpu_round_rate,
+ 	.set_rate = clk_pm_cpu_set_rate,
+ 	.recalc_rate = clk_pm_cpu_recalc_rate,
+diff --git a/drivers/clk/qcom/a53-pll.c b/drivers/clk/qcom/a53-pll.c
+index 45cfc57bff924..af6ac17c7daeb 100644
+--- a/drivers/clk/qcom/a53-pll.c
++++ b/drivers/clk/qcom/a53-pll.c
+@@ -93,6 +93,7 @@ static const struct of_device_id qcom_a53pll_match_table[] = {
+ 	{ .compatible = "qcom,msm8916-a53pll" },
+ 	{ }
+ };
++MODULE_DEVICE_TABLE(of, qcom_a53pll_match_table);
+ 
+ static struct platform_driver qcom_a53pll_driver = {
+ 	.probe = qcom_a53pll_probe,
+diff --git a/drivers/clk/qcom/a7-pll.c b/drivers/clk/qcom/a7-pll.c
+index e171d3caf2cf3..c4a53e5db229f 100644
+--- a/drivers/clk/qcom/a7-pll.c
++++ b/drivers/clk/qcom/a7-pll.c
+@@ -86,6 +86,7 @@ static const struct of_device_id qcom_a7pll_match_table[] = {
+ 	{ .compatible = "qcom,sdx55-a7pll" },
+ 	{ }
+ };
++MODULE_DEVICE_TABLE(of, qcom_a7pll_match_table);
+ 
+ static struct platform_driver qcom_a7pll_driver = {
+ 	.probe = qcom_a7pll_probe,
+diff --git a/drivers/clk/qcom/apss-ipq-pll.c b/drivers/clk/qcom/apss-ipq-pll.c
+index 30be87fb222aa..bef7899ad0d66 100644
+--- a/drivers/clk/qcom/apss-ipq-pll.c
++++ b/drivers/clk/qcom/apss-ipq-pll.c
+@@ -81,6 +81,7 @@ static const struct of_device_id apss_ipq_pll_match_table[] = {
+ 	{ .compatible = "qcom,ipq6018-a53pll" },
+ 	{ }
+ };
++MODULE_DEVICE_TABLE(of, apss_ipq_pll_match_table);
+ 
+ static struct platform_driver apss_ipq_pll_driver = {
+ 	.probe = apss_ipq_pll_probe,
+diff --git a/drivers/clk/uniphier/clk-uniphier-mux.c b/drivers/clk/uniphier/clk-uniphier-mux.c
+index 462c84321b2d2..1998e9d4cfc02 100644
+--- a/drivers/clk/uniphier/clk-uniphier-mux.c
++++ b/drivers/clk/uniphier/clk-uniphier-mux.c
+@@ -31,10 +31,10 @@ static int uniphier_clk_mux_set_parent(struct clk_hw *hw, u8 index)
+ static u8 uniphier_clk_mux_get_parent(struct clk_hw *hw)
+ {
+ 	struct uniphier_clk_mux *mux = to_uniphier_clk_mux(hw);
+-	int num_parents = clk_hw_get_num_parents(hw);
++	unsigned int num_parents = clk_hw_get_num_parents(hw);
+ 	int ret;
+ 	unsigned int val;
+-	u8 i;
++	unsigned int i;
+ 
+ 	ret = regmap_read(mux->regmap, mux->reg, &val);
+ 	if (ret)
+diff --git a/drivers/clk/zynqmp/pll.c b/drivers/clk/zynqmp/pll.c
+index 92f449ed38e51..abe6afbf3407b 100644
+--- a/drivers/clk/zynqmp/pll.c
++++ b/drivers/clk/zynqmp/pll.c
+@@ -14,10 +14,12 @@
+  * struct zynqmp_pll - PLL clock
+  * @hw:		Handle between common and hardware-specific interfaces
+  * @clk_id:	PLL clock ID
++ * @set_pll_mode:	Whether an IOCTL_SET_PLL_FRAC_MODE request be sent to ATF
+  */
+ struct zynqmp_pll {
+ 	struct clk_hw hw;
+ 	u32 clk_id;
++	bool set_pll_mode;
+ };
+ 
+ #define to_zynqmp_pll(_hw)	container_of(_hw, struct zynqmp_pll, hw)
+@@ -81,6 +83,8 @@ static inline void zynqmp_pll_set_mode(struct clk_hw *hw, bool on)
+ 	if (ret)
+ 		pr_warn_once("%s() PLL set frac mode failed for %s, ret = %d\n",
+ 			     __func__, clk_name, ret);
++	else
++		clk->set_pll_mode = true;
+ }
+ 
+ /**
+@@ -100,9 +104,7 @@ static long zynqmp_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ 	/* Enable the fractional mode if needed */
+ 	rate_div = (rate * FRAC_DIV) / *prate;
+ 	f = rate_div % FRAC_DIV;
+-	zynqmp_pll_set_mode(hw, !!f);
+-
+-	if (zynqmp_pll_get_mode(hw) == PLL_MODE_FRAC) {
++	if (f) {
+ 		if (rate > PS_PLL_VCO_MAX) {
+ 			fbdiv = rate / PS_PLL_VCO_MAX;
+ 			rate = rate / (fbdiv + 1);
+@@ -173,10 +175,12 @@ static int zynqmp_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ 	long rate_div, frac, m, f;
+ 	int ret;
+ 
+-	if (zynqmp_pll_get_mode(hw) == PLL_MODE_FRAC) {
+-		rate_div = (rate * FRAC_DIV) / parent_rate;
++	rate_div = (rate * FRAC_DIV) / parent_rate;
++	f = rate_div % FRAC_DIV;
++	zynqmp_pll_set_mode(hw, !!f);
++
++	if (f) {
+ 		m = rate_div / FRAC_DIV;
+-		f = rate_div % FRAC_DIV;
+ 		m = clamp_t(u32, m, (PLL_FBDIV_MIN), (PLL_FBDIV_MAX));
+ 		rate = parent_rate * m;
+ 		frac = (parent_rate * f) / FRAC_DIV;
+@@ -240,9 +244,15 @@ static int zynqmp_pll_enable(struct clk_hw *hw)
+ 	u32 clk_id = clk->clk_id;
+ 	int ret;
+ 
+-	if (zynqmp_pll_is_enabled(hw))
++	/*
++	 * Don't skip enabling clock if there is an IOCTL_SET_PLL_FRAC_MODE request
++	 * that has been sent to ATF.
++	 */
++	if (zynqmp_pll_is_enabled(hw) && (!clk->set_pll_mode))
+ 		return 0;
+ 
++	clk->set_pll_mode = false;
++
+ 	ret = zynqmp_pm_clock_enable(clk_id);
+ 	if (ret)
+ 		pr_warn_once("%s() clock enable failed for %s, ret = %d\n",
+diff --git a/drivers/clocksource/ingenic-ost.c b/drivers/clocksource/ingenic-ost.c
+index 029efc2731b49..6af2470136bd2 100644
+--- a/drivers/clocksource/ingenic-ost.c
++++ b/drivers/clocksource/ingenic-ost.c
+@@ -88,9 +88,9 @@ static int __init ingenic_ost_probe(struct platform_device *pdev)
+ 		return PTR_ERR(ost->regs);
+ 
+ 	map = device_node_to_regmap(dev->parent->of_node);
+-	if (!map) {
++	if (IS_ERR(map)) {
+ 		dev_err(dev, "regmap not found");
+-		return -EINVAL;
++		return PTR_ERR(map);
+ 	}
+ 
+ 	ost->clk = devm_clk_get(dev, "ost");
+diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c
+index 33b3e8aa2cc50..3fae9ebb58b83 100644
+--- a/drivers/clocksource/timer-ti-dm-systimer.c
++++ b/drivers/clocksource/timer-ti-dm-systimer.c
+@@ -449,13 +449,13 @@ static int dmtimer_set_next_event(unsigned long cycles,
+ 	struct dmtimer_systimer *t = &clkevt->t;
+ 	void __iomem *pend = t->base + t->pend;
+ 
+-	writel_relaxed(0xffffffff - cycles, t->base + t->counter);
+ 	while (readl_relaxed(pend) & WP_TCRR)
+ 		cpu_relax();
++	writel_relaxed(0xffffffff - cycles, t->base + t->counter);
+ 
+-	writel_relaxed(OMAP_TIMER_CTRL_ST, t->base + t->ctrl);
+ 	while (readl_relaxed(pend) & WP_TCLR)
+ 		cpu_relax();
++	writel_relaxed(OMAP_TIMER_CTRL_ST, t->base + t->ctrl);
+ 
+ 	return 0;
+ }
+@@ -490,18 +490,18 @@ static int dmtimer_set_periodic(struct clock_event_device *evt)
+ 	dmtimer_clockevent_shutdown(evt);
+ 
+ 	/* Looks like we need to first set the load value separately */
+-	writel_relaxed(clkevt->period, t->base + t->load);
+ 	while (readl_relaxed(pend) & WP_TLDR)
+ 		cpu_relax();
++	writel_relaxed(clkevt->period, t->base + t->load);
+ 
+-	writel_relaxed(clkevt->period, t->base + t->counter);
+ 	while (readl_relaxed(pend) & WP_TCRR)
+ 		cpu_relax();
++	writel_relaxed(clkevt->period, t->base + t->counter);
+ 
+-	writel_relaxed(OMAP_TIMER_CTRL_AR | OMAP_TIMER_CTRL_ST,
+-		       t->base + t->ctrl);
+ 	while (readl_relaxed(pend) & WP_TCLR)
+ 		cpu_relax();
++	writel_relaxed(OMAP_TIMER_CTRL_AR | OMAP_TIMER_CTRL_ST,
++		       t->base + t->ctrl);
+ 
+ 	return 0;
+ }
+@@ -554,6 +554,7 @@ static int __init dmtimer_clockevent_init(struct device_node *np)
+ 	dev->set_state_shutdown = dmtimer_clockevent_shutdown;
+ 	dev->set_state_periodic = dmtimer_set_periodic;
+ 	dev->set_state_oneshot = dmtimer_clockevent_shutdown;
++	dev->set_state_oneshot_stopped = dmtimer_clockevent_shutdown;
+ 	dev->tick_resume = dmtimer_clockevent_shutdown;
+ 	dev->cpumask = cpu_possible_mask;
+ 
+diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
+index b4af4094309b0..e4782f562e7a9 100644
+--- a/drivers/cpufreq/armada-37xx-cpufreq.c
++++ b/drivers/cpufreq/armada-37xx-cpufreq.c
+@@ -25,6 +25,10 @@
+ 
+ #include "cpufreq-dt.h"
+ 
++/* Clk register set */
++#define ARMADA_37XX_CLK_TBG_SEL		0
++#define ARMADA_37XX_CLK_TBG_SEL_CPU_OFF	22
++
+ /* Power management in North Bridge register set */
+ #define ARMADA_37XX_NB_L0L1	0x18
+ #define ARMADA_37XX_NB_L2L3	0x1C
+@@ -69,6 +73,8 @@
+ #define LOAD_LEVEL_NR	4
+ 
+ #define MIN_VOLT_MV 1000
++#define MIN_VOLT_MV_FOR_L1_1000MHZ 1108
++#define MIN_VOLT_MV_FOR_L1_1200MHZ 1155
+ 
+ /*  AVS value for the corresponding voltage (in mV) */
+ static int avs_map[] = {
+@@ -120,10 +126,15 @@ static struct armada_37xx_dvfs *armada_37xx_cpu_freq_info_get(u32 freq)
+  * will be configured then the DVFS will be enabled.
+  */
+ static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base,
+-						 struct clk *clk, u8 *divider)
++						 struct regmap *clk_base, u8 *divider)
+ {
++	u32 cpu_tbg_sel;
+ 	int load_lvl;
+-	struct clk *parent;
++
++	/* Determine to which TBG clock is CPU connected */
++	regmap_read(clk_base, ARMADA_37XX_CLK_TBG_SEL, &cpu_tbg_sel);
++	cpu_tbg_sel >>= ARMADA_37XX_CLK_TBG_SEL_CPU_OFF;
++	cpu_tbg_sel &= ARMADA_37XX_NB_TBG_SEL_MASK;
+ 
+ 	for (load_lvl = 0; load_lvl < LOAD_LEVEL_NR; load_lvl++) {
+ 		unsigned int reg, mask, val, offset = 0;
+@@ -142,6 +153,11 @@ static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base,
+ 		mask = (ARMADA_37XX_NB_CLK_SEL_MASK
+ 			<< ARMADA_37XX_NB_CLK_SEL_OFF);
+ 
++		/* Set TBG index, for all levels we use the same TBG */
++		val = cpu_tbg_sel << ARMADA_37XX_NB_TBG_SEL_OFF;
++		mask = (ARMADA_37XX_NB_TBG_SEL_MASK
++			<< ARMADA_37XX_NB_TBG_SEL_OFF);
++
+ 		/*
+ 		 * Set cpu divider based on the pre-computed array in
+ 		 * order to have balanced step.
+@@ -160,14 +176,6 @@ static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base,
+ 
+ 		regmap_update_bits(base, reg, mask, val);
+ 	}
+-
+-	/*
+-	 * Set cpu clock source, for all the level we keep the same
+-	 * clock source that the one already configured. For this one
+-	 * we need to use the clock framework
+-	 */
+-	parent = clk_get_parent(clk);
+-	clk_set_parent(clk, parent);
+ }
+ 
+ /*
+@@ -202,6 +210,8 @@ static u32 armada_37xx_avs_val_match(int target_vm)
+  * - L2 & L3 voltage should be about 150mv smaller than L0 voltage.
+  * This function calculates L1 & L2 & L3 AVS values dynamically based
+  * on L0 voltage and fill all AVS values to the AVS value table.
++ * When base CPU frequency is 1000 or 1200 MHz then there is additional
++ * minimal avs value for load L1.
+  */
+ static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
+ 						struct armada_37xx_dvfs *dvfs)
+@@ -233,6 +243,19 @@ static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
+ 		for (load_level = 1; load_level < LOAD_LEVEL_NR; load_level++)
+ 			dvfs->avs[load_level] = avs_min;
+ 
++		/*
++		 * Set the avs values for load L0 and L1 when base CPU frequency
++		 * is 1000/1200 MHz to its typical initial values according to
++		 * the Armada 3700 Hardware Specifications.
++		 */
++		if (dvfs->cpu_freq_max >= 1000*1000*1000) {
++			if (dvfs->cpu_freq_max >= 1200*1000*1000)
++				avs_min = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1200MHZ);
++			else
++				avs_min = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1000MHZ);
++			dvfs->avs[0] = dvfs->avs[1] = avs_min;
++		}
++
+ 		return;
+ 	}
+ 
+@@ -252,6 +275,26 @@ static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
+ 	target_vm = avs_map[l0_vdd_min] - 150;
+ 	target_vm = target_vm > MIN_VOLT_MV ? target_vm : MIN_VOLT_MV;
+ 	dvfs->avs[2] = dvfs->avs[3] = armada_37xx_avs_val_match(target_vm);
++
++	/*
++	 * Fix the avs value for load L1 when base CPU frequency is 1000/1200 MHz,
++	 * otherwise the CPU gets stuck when switching from load L1 to load L0.
++	 * Also ensure that avs value for load L1 is not higher than for L0.
++	 */
++	if (dvfs->cpu_freq_max >= 1000*1000*1000) {
++		u32 avs_min_l1;
++
++		if (dvfs->cpu_freq_max >= 1200*1000*1000)
++			avs_min_l1 = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1200MHZ);
++		else
++			avs_min_l1 = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1000MHZ);
++
++		if (avs_min_l1 > dvfs->avs[0])
++			avs_min_l1 = dvfs->avs[0];
++
++		if (dvfs->avs[1] < avs_min_l1)
++			dvfs->avs[1] = avs_min_l1;
++	}
+ }
+ 
+ static void __init armada37xx_cpufreq_avs_setup(struct regmap *base,
+@@ -358,11 +401,16 @@ static int __init armada37xx_cpufreq_driver_init(void)
+ 	struct platform_device *pdev;
+ 	unsigned long freq;
+ 	unsigned int cur_frequency, base_frequency;
+-	struct regmap *nb_pm_base, *avs_base;
++	struct regmap *nb_clk_base, *nb_pm_base, *avs_base;
+ 	struct device *cpu_dev;
+ 	int load_lvl, ret;
+ 	struct clk *clk, *parent;
+ 
++	nb_clk_base =
++		syscon_regmap_lookup_by_compatible("marvell,armada-3700-periph-clock-nb");
++	if (IS_ERR(nb_clk_base))
++		return -ENODEV;
++
+ 	nb_pm_base =
+ 		syscon_regmap_lookup_by_compatible("marvell,armada-3700-nb-pm");
+ 
+@@ -421,7 +469,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
+ 		return -EINVAL;
+ 	}
+ 
+-	dvfs = armada_37xx_cpu_freq_info_get(cur_frequency);
++	dvfs = armada_37xx_cpu_freq_info_get(base_frequency);
+ 	if (!dvfs) {
+ 		clk_put(clk);
+ 		return -EINVAL;
+@@ -439,7 +487,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
+ 	armada37xx_cpufreq_avs_configure(avs_base, dvfs);
+ 	armada37xx_cpufreq_avs_setup(avs_base, dvfs);
+ 
+-	armada37xx_cpufreq_dvfs_setup(nb_pm_base, clk, dvfs->divider);
++	armada37xx_cpufreq_dvfs_setup(nb_pm_base, nb_clk_base, dvfs->divider);
+ 	clk_put(clk);
+ 
+ 	for (load_lvl = ARMADA_37XX_DVFS_LOAD_0; load_lvl < LOAD_LEVEL_NR;
+@@ -473,7 +521,7 @@ disable_dvfs:
+ remove_opp:
+ 	/* clean-up the already added opp before leaving */
+ 	while (load_lvl-- > ARMADA_37XX_DVFS_LOAD_0) {
+-		freq = cur_frequency / dvfs->divider[load_lvl];
++		freq = base_frequency / dvfs->divider[load_lvl];
+ 		dev_pm_opp_remove(cpu_dev, freq);
+ 	}
+ 
+diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
+index 0844fadc4be85..334f83e56120c 100644
+--- a/drivers/cpuidle/Kconfig.arm
++++ b/drivers/cpuidle/Kconfig.arm
+@@ -107,7 +107,7 @@ config ARM_TEGRA_CPUIDLE
+ 
+ config ARM_QCOM_SPM_CPUIDLE
+ 	bool "CPU Idle Driver for Qualcomm Subsystem Power Manager (SPM)"
+-	depends on (ARCH_QCOM || COMPILE_TEST) && !ARM64
++	depends on (ARCH_QCOM || COMPILE_TEST) && !ARM64 && MMU
+ 	select ARM_CPU_SUSPEND
+ 	select CPU_IDLE_MULTIPLE_DRIVERS
+ 	select DT_IDLE_STATES
+diff --git a/drivers/crypto/allwinner/Kconfig b/drivers/crypto/allwinner/Kconfig
+index 856fb20456566..b8e75210a0e31 100644
+--- a/drivers/crypto/allwinner/Kconfig
++++ b/drivers/crypto/allwinner/Kconfig
+@@ -71,10 +71,10 @@ config CRYPTO_DEV_SUN8I_CE_DEBUG
+ config CRYPTO_DEV_SUN8I_CE_HASH
+ 	bool "Enable support for hash on sun8i-ce"
+ 	depends on CRYPTO_DEV_SUN8I_CE
+-	select MD5
+-	select SHA1
+-	select SHA256
+-	select SHA512
++	select CRYPTO_MD5
++	select CRYPTO_SHA1
++	select CRYPTO_SHA256
++	select CRYPTO_SHA512
+ 	help
+ 	  Say y to enable support for hash algorithms.
+ 
+@@ -132,8 +132,8 @@ config CRYPTO_DEV_SUN8I_SS_PRNG
+ config CRYPTO_DEV_SUN8I_SS_HASH
+ 	bool "Enable support for hash on sun8i-ss"
+ 	depends on CRYPTO_DEV_SUN8I_SS
+-	select MD5
+-	select SHA1
+-	select SHA256
++	select CRYPTO_MD5
++	select CRYPTO_SHA1
++	select CRYPTO_SHA256
+ 	help
+ 	  Say y to enable support for hash algorithms.
+diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
+index 11cbcbc83a7b6..64446b86c927f 100644
+--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
++++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
+@@ -348,8 +348,10 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
+ 	bf = (__le32 *)pad;
+ 
+ 	result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA);
+-	if (!result)
++	if (!result) {
++		kfree(pad);
+ 		return -ENOMEM;
++	}
+ 
+ 	for (i = 0; i < MAX_SG; i++) {
+ 		rctx->t_dst[i].addr = 0;
+@@ -435,11 +437,10 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
+ 	dma_unmap_sg(ss->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
+ 	dma_unmap_single(ss->dev, addr_res, digestsize, DMA_FROM_DEVICE);
+ 
+-	kfree(pad);
+-
+ 	memcpy(areq->result, result, algt->alg.hash.halg.digestsize);
+-	kfree(result);
+ theend:
++	kfree(pad);
++	kfree(result);
+ 	crypto_finalize_hash_request(engine, breq, err);
+ 	return 0;
+ }
+diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
+index 08a1473b21457..3191527928e41 100644
+--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
++++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
+@@ -103,7 +103,8 @@ int sun8i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
+ 	dma_iv = dma_map_single(ss->dev, ctx->seed, ctx->slen, DMA_TO_DEVICE);
+ 	if (dma_mapping_error(ss->dev, dma_iv)) {
+ 		dev_err(ss->dev, "Cannot DMA MAP IV\n");
+-		return -EFAULT;
++		err = -EFAULT;
++		goto err_free;
+ 	}
+ 
+ 	dma_dst = dma_map_single(ss->dev, d, todo, DMA_FROM_DEVICE);
+@@ -167,6 +168,7 @@ err_iv:
+ 		memcpy(ctx->seed, d + dlen, ctx->slen);
+ 	}
+ 	memzero_explicit(d, todo);
++err_free:
+ 	kfree(d);
+ 
+ 	return err;
+diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
+index cb9b4c4e371ed..8fd43c1acac13 100644
+--- a/drivers/crypto/ccp/sev-dev.c
++++ b/drivers/crypto/ccp/sev-dev.c
+@@ -150,6 +150,9 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
+ 
+ 	sev = psp->sev_data;
+ 
++	if (data && WARN_ON_ONCE(!virt_addr_valid(data)))
++		return -EINVAL;
++
+ 	/* Get the physical address of the command buffer */
+ 	phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0;
+ 	phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0;
+diff --git a/drivers/crypto/ccp/tee-dev.c b/drivers/crypto/ccp/tee-dev.c
+index 5e697a90ea7f4..bcb81fef42118 100644
+--- a/drivers/crypto/ccp/tee-dev.c
++++ b/drivers/crypto/ccp/tee-dev.c
+@@ -36,6 +36,7 @@ static int tee_alloc_ring(struct psp_tee_device *tee, int ring_size)
+ 	if (!start_addr)
+ 		return -ENOMEM;
+ 
++	memset(start_addr, 0x0, ring_size);
+ 	rb_mgr->ring_start = start_addr;
+ 	rb_mgr->ring_size = ring_size;
+ 	rb_mgr->ring_pa = __psp_pa(start_addr);
+@@ -244,41 +245,54 @@ static int tee_submit_cmd(struct psp_tee_device *tee, enum tee_cmd_id cmd_id,
+ 			  void *buf, size_t len, struct tee_ring_cmd **resp)
+ {
+ 	struct tee_ring_cmd *cmd;
+-	u32 rptr, wptr;
+ 	int nloop = 1000, ret = 0;
++	u32 rptr;
+ 
+ 	*resp = NULL;
+ 
+ 	mutex_lock(&tee->rb_mgr.mutex);
+ 
+-	wptr = tee->rb_mgr.wptr;
+-
+-	/* Check if ring buffer is full */
++	/* Loop until empty entry found in ring buffer */
+ 	do {
++		/* Get pointer to ring buffer command entry */
++		cmd = (struct tee_ring_cmd *)
++			(tee->rb_mgr.ring_start + tee->rb_mgr.wptr);
++
+ 		rptr = ioread32(tee->io_regs + tee->vdata->ring_rptr_reg);
+ 
+-		if (!(wptr + sizeof(struct tee_ring_cmd) == rptr))
++		/* Check if ring buffer is full or command entry is waiting
++		 * for response from TEE
++		 */
++		if (!(tee->rb_mgr.wptr + sizeof(struct tee_ring_cmd) == rptr ||
++		      cmd->flag == CMD_WAITING_FOR_RESPONSE))
+ 			break;
+ 
+-		dev_info(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n",
+-			 rptr, wptr);
++		dev_dbg(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n",
++			rptr, tee->rb_mgr.wptr);
+ 
+-		/* Wait if ring buffer is full */
++		/* Wait if ring buffer is full or TEE is processing data */
+ 		mutex_unlock(&tee->rb_mgr.mutex);
+ 		schedule_timeout_interruptible(msecs_to_jiffies(10));
+ 		mutex_lock(&tee->rb_mgr.mutex);
+ 
+ 	} while (--nloop);
+ 
+-	if (!nloop && (wptr + sizeof(struct tee_ring_cmd) == rptr)) {
+-		dev_err(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n",
+-			rptr, wptr);
++	if (!nloop &&
++	    (tee->rb_mgr.wptr + sizeof(struct tee_ring_cmd) == rptr ||
++	     cmd->flag == CMD_WAITING_FOR_RESPONSE)) {
++		dev_err(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u response flag %u\n",
++			rptr, tee->rb_mgr.wptr, cmd->flag);
+ 		ret = -EBUSY;
+ 		goto unlock;
+ 	}
+ 
+-	/* Pointer to empty data entry in ring buffer */
+-	cmd = (struct tee_ring_cmd *)(tee->rb_mgr.ring_start + wptr);
++	/* Do not submit command if PSP got disabled while processing any
++	 * command in another thread
++	 */
++	if (psp_dead) {
++		ret = -EBUSY;
++		goto unlock;
++	}
+ 
+ 	/* Write command data into ring buffer */
+ 	cmd->cmd_id = cmd_id;
+@@ -286,6 +300,9 @@ static int tee_submit_cmd(struct psp_tee_device *tee, enum tee_cmd_id cmd_id,
+ 	memset(&cmd->buf[0], 0, sizeof(cmd->buf));
+ 	memcpy(&cmd->buf[0], buf, len);
+ 
++	/* Indicate driver is waiting for response */
++	cmd->flag = CMD_WAITING_FOR_RESPONSE;
++
+ 	/* Update local copy of write pointer */
+ 	tee->rb_mgr.wptr += sizeof(struct tee_ring_cmd);
+ 	if (tee->rb_mgr.wptr >= tee->rb_mgr.ring_size)
+@@ -353,12 +370,16 @@ int psp_tee_process_cmd(enum tee_cmd_id cmd_id, void *buf, size_t len,
+ 		return ret;
+ 
+ 	ret = tee_wait_cmd_completion(tee, resp, TEE_DEFAULT_TIMEOUT);
+-	if (ret)
++	if (ret) {
++		resp->flag = CMD_RESPONSE_TIMEDOUT;
+ 		return ret;
++	}
+ 
+ 	memcpy(buf, &resp->buf[0], len);
+ 	*status = resp->status;
+ 
++	resp->flag = CMD_RESPONSE_COPIED;
++
+ 	return 0;
+ }
+ EXPORT_SYMBOL(psp_tee_process_cmd);
+diff --git a/drivers/crypto/ccp/tee-dev.h b/drivers/crypto/ccp/tee-dev.h
+index f099601121150..49d26158b71e3 100644
+--- a/drivers/crypto/ccp/tee-dev.h
++++ b/drivers/crypto/ccp/tee-dev.h
+@@ -1,6 +1,6 @@
+ /* SPDX-License-Identifier: MIT */
+ /*
+- * Copyright 2019 Advanced Micro Devices, Inc.
++ * Copyright (C) 2019,2021 Advanced Micro Devices, Inc.
+  *
+  * Author: Rijo Thomas <Rijo-john.Thomas@amd.com>
+  * Author: Devaraj Rangasamy <Devaraj.Rangasamy@amd.com>
+@@ -18,7 +18,7 @@
+ #include <linux/mutex.h>
+ 
+ #define TEE_DEFAULT_TIMEOUT		10
+-#define MAX_BUFFER_SIZE			992
++#define MAX_BUFFER_SIZE			988
+ 
+ /**
+  * enum tee_ring_cmd_id - TEE interface commands for ring buffer configuration
+@@ -81,6 +81,20 @@ enum tee_cmd_state {
+ 	TEE_CMD_STATE_COMPLETED,
+ };
+ 
++/**
++ * enum cmd_resp_state - TEE command's response status maintained by driver
++ * @CMD_RESPONSE_INVALID:      initial state when no command is written to ring
++ * @CMD_WAITING_FOR_RESPONSE:  driver waiting for response from TEE
++ * @CMD_RESPONSE_TIMEDOUT:     failed to get response from TEE
++ * @CMD_RESPONSE_COPIED:       driver has copied response from TEE
++ */
++enum cmd_resp_state {
++	CMD_RESPONSE_INVALID,
++	CMD_WAITING_FOR_RESPONSE,
++	CMD_RESPONSE_TIMEDOUT,
++	CMD_RESPONSE_COPIED,
++};
++
+ /**
+  * struct tee_ring_cmd - Structure of the command buffer in TEE ring
+  * @cmd_id:      refers to &enum tee_cmd_id. Command id for the ring buffer
+@@ -91,6 +105,7 @@ enum tee_cmd_state {
+  * @pdata:       private data (currently unused)
+  * @res1:        reserved region
+  * @buf:         TEE command specific buffer
++ * @flag:	 refers to &enum cmd_resp_state
+  */
+ struct tee_ring_cmd {
+ 	u32 cmd_id;
+@@ -100,6 +115,7 @@ struct tee_ring_cmd {
+ 	u64 pdata;
+ 	u32 res1[2];
+ 	u8 buf[MAX_BUFFER_SIZE];
++	u32 flag;
+ 
+ 	/* Total size: 1024 bytes */
+ } __packed;
+diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
+index f5a336634daa6..405ff957b8370 100644
+--- a/drivers/crypto/chelsio/chcr_algo.c
++++ b/drivers/crypto/chelsio/chcr_algo.c
+@@ -769,13 +769,14 @@ static inline void create_wreq(struct chcr_context *ctx,
+ 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ 	unsigned int tx_channel_id, rx_channel_id;
+ 	unsigned int txqidx = 0, rxqidx = 0;
+-	unsigned int qid, fid;
++	unsigned int qid, fid, portno;
+ 
+ 	get_qidxs(req, &txqidx, &rxqidx);
+ 	qid = u_ctx->lldi.rxq_ids[rxqidx];
+ 	fid = u_ctx->lldi.rxq_ids[0];
++	portno = rxqidx / ctx->rxq_perchan;
+ 	tx_channel_id = txqidx / ctx->txq_perchan;
+-	rx_channel_id = rxqidx / ctx->rxq_perchan;
++	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[portno]);
+ 
+ 
+ 	chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
+@@ -806,6 +807,7 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
+ {
+ 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
+ 	struct chcr_context *ctx = c_ctx(tfm);
++	struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+ 	struct sk_buff *skb = NULL;
+ 	struct chcr_wr *chcr_req;
+@@ -822,6 +824,7 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
+ 	struct adapter *adap = padap(ctx->dev);
+ 	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
+ 
++	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
+ 	nents = sg_nents_xlen(reqctx->dstsg,  wrparam->bytes, CHCR_DST_SG_SIZE,
+ 			      reqctx->dst_ofst);
+ 	dst_size = get_space_for_phys_dsgl(nents);
+@@ -1580,6 +1583,7 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
+ 	int error = 0;
+ 	unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan;
+ 
++	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
+ 	transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
+ 	req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
+ 				param->sg_len) <= SGE_MAX_WR_LEN;
+@@ -2438,6 +2442,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
+ {
+ 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ 	struct chcr_context *ctx = a_ctx(tfm);
++	struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
+ 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+@@ -2457,6 +2462,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
+ 	struct adapter *adap = padap(ctx->dev);
+ 	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
+ 
++	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
+ 	if (req->cryptlen == 0)
+ 		return NULL;
+ 
+@@ -2710,9 +2716,11 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
+ 	struct dsgl_walk dsgl_walk;
+ 	unsigned int authsize = crypto_aead_authsize(tfm);
+ 	struct chcr_context *ctx = a_ctx(tfm);
++	struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ 	u32 temp;
+ 	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
+ 
++	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
+ 	dsgl_walk_init(&dsgl_walk, phys_cpl);
+ 	dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
+ 	temp = req->assoclen + req->cryptlen +
+@@ -2752,9 +2760,11 @@ void chcr_add_cipher_dst_ent(struct skcipher_request *req,
+ 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
+ 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
+ 	struct chcr_context *ctx = c_ctx(tfm);
++	struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ 	struct dsgl_walk dsgl_walk;
+ 	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
+ 
++	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
+ 	dsgl_walk_init(&dsgl_walk, phys_cpl);
+ 	dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
+ 			 reqctx->dst_ofst);
+@@ -2958,6 +2968,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
+ {
+ 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ 	struct chcr_context *ctx = a_ctx(tfm);
++	struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ 	unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
+@@ -2967,6 +2978,8 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
+ 	unsigned int tag_offset = 0, auth_offset = 0;
+ 	unsigned int assoclen;
+ 
++	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
++
+ 	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
+ 		assoclen = req->assoclen - 8;
+ 	else
+@@ -3127,6 +3140,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
+ {
+ 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ 	struct chcr_context *ctx = a_ctx(tfm);
++	struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
+ 	struct sk_buff *skb = NULL;
+@@ -3143,6 +3157,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
+ 	struct adapter *adap = padap(ctx->dev);
+ 	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
+ 
++	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
+ 	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
+ 		assoclen = req->assoclen - 8;
+ 
+diff --git a/drivers/crypto/keembay/keembay-ocs-aes-core.c b/drivers/crypto/keembay/keembay-ocs-aes-core.c
+index b6b25d994af38..2ef312866338f 100644
+--- a/drivers/crypto/keembay/keembay-ocs-aes-core.c
++++ b/drivers/crypto/keembay/keembay-ocs-aes-core.c
+@@ -1649,8 +1649,10 @@ static int kmb_ocs_aes_probe(struct platform_device *pdev)
+ 
+ 	/* Initialize crypto engine */
+ 	aes_dev->engine = crypto_engine_alloc_init(dev, true);
+-	if (!aes_dev->engine)
++	if (!aes_dev->engine) {
++		rc = -ENOMEM;
+ 		goto list_del;
++	}
+ 
+ 	rc = crypto_engine_start(aes_dev->engine);
+ 	if (rc) {
+diff --git a/drivers/crypto/keembay/keembay-ocs-hcu-core.c b/drivers/crypto/keembay/keembay-ocs-hcu-core.c
+index c4b97b4160e9b..322c51a6936f3 100644
+--- a/drivers/crypto/keembay/keembay-ocs-hcu-core.c
++++ b/drivers/crypto/keembay/keembay-ocs-hcu-core.c
+@@ -1220,8 +1220,10 @@ static int kmb_ocs_hcu_probe(struct platform_device *pdev)
+ 
+ 	/* Initialize crypto engine */
+ 	hcu_dev->engine = crypto_engine_alloc_init(dev, 1);
+-	if (!hcu_dev->engine)
++	if (!hcu_dev->engine) {
++		rc = -ENOMEM;
+ 		goto list_del;
++	}
+ 
+ 	rc = crypto_engine_start(hcu_dev->engine);
+ 	if (rc) {
+diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
+index 1d1532e8fb6d9..067ca5e17d387 100644
+--- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
++++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
+@@ -184,12 +184,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	if (ret)
+ 		goto out_err_free_reg;
+ 
+-	set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+-
+ 	ret = adf_dev_init(accel_dev);
+ 	if (ret)
+ 		goto out_err_dev_shutdown;
+ 
++	set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
++
+ 	ret = adf_dev_start(accel_dev);
+ 	if (ret)
+ 		goto out_err_dev_stop;
+diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
+index 04742a6d91cae..51ea88c0b17d7 100644
+--- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c
++++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
+@@ -184,12 +184,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	if (ret)
+ 		goto out_err_free_reg;
+ 
+-	set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+-
+ 	ret = adf_dev_init(accel_dev);
+ 	if (ret)
+ 		goto out_err_dev_shutdown;
+ 
++	set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
++
+ 	ret = adf_dev_start(accel_dev);
+ 	if (ret)
+ 		goto out_err_dev_stop;
+diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c
+index c458534635306..e3ad5587be49e 100644
+--- a/drivers/crypto/qat/qat_common/adf_isr.c
++++ b/drivers/crypto/qat/qat_common/adf_isr.c
+@@ -291,19 +291,32 @@ int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
+ 
+ 	ret = adf_isr_alloc_msix_entry_table(accel_dev);
+ 	if (ret)
+-		return ret;
+-	if (adf_enable_msix(accel_dev))
+ 		goto err_out;
+ 
+-	if (adf_setup_bh(accel_dev))
+-		goto err_out;
++	ret = adf_enable_msix(accel_dev);
++	if (ret)
++		goto err_free_msix_table;
+ 
+-	if (adf_request_irqs(accel_dev))
+-		goto err_out;
++	ret = adf_setup_bh(accel_dev);
++	if (ret)
++		goto err_disable_msix;
++
++	ret = adf_request_irqs(accel_dev);
++	if (ret)
++		goto err_cleanup_bh;
+ 
+ 	return 0;
++
++err_cleanup_bh:
++	adf_cleanup_bh(accel_dev);
++
++err_disable_msix:
++	adf_disable_msix(&accel_dev->accel_pci_dev);
++
++err_free_msix_table:
++	adf_isr_free_msix_entry_table(accel_dev);
++
+ err_out:
+-	adf_isr_resource_free(accel_dev);
+-	return -EFAULT;
++	return ret;
+ }
+ EXPORT_SYMBOL_GPL(adf_isr_resource_alloc);
+diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c
+index 888c1e0472952..8ba28409fb74b 100644
+--- a/drivers/crypto/qat/qat_common/adf_transport.c
++++ b/drivers/crypto/qat/qat_common/adf_transport.c
+@@ -172,6 +172,7 @@ static int adf_init_ring(struct adf_etr_ring_data *ring)
+ 		dev_err(&GET_DEV(accel_dev), "Ring address not aligned\n");
+ 		dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes,
+ 				  ring->base_addr, ring->dma_addr);
++		ring->base_addr = NULL;
+ 		return -EFAULT;
+ 	}
+ 
+diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c
+index 38d316a42ba6f..888388acb6bd3 100644
+--- a/drivers/crypto/qat/qat_common/adf_vf_isr.c
++++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c
+@@ -261,17 +261,26 @@ int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
+ 		goto err_out;
+ 
+ 	if (adf_setup_pf2vf_bh(accel_dev))
+-		goto err_out;
++		goto err_disable_msi;
+ 
+ 	if (adf_setup_bh(accel_dev))
+-		goto err_out;
++		goto err_cleanup_pf2vf_bh;
+ 
+ 	if (adf_request_msi_irq(accel_dev))
+-		goto err_out;
++		goto err_cleanup_bh;
+ 
+ 	return 0;
++
++err_cleanup_bh:
++	adf_cleanup_bh(accel_dev);
++
++err_cleanup_pf2vf_bh:
++	adf_cleanup_pf2vf_bh(accel_dev);
++
++err_disable_msi:
++	adf_disable_msi(accel_dev);
++
+ err_out:
+-	adf_vf_isr_resource_free(accel_dev);
+ 	return -EFAULT;
+ }
+ EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc);
+diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
+index c972554a755e7..29999da716cc9 100644
+--- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
++++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
+@@ -184,12 +184,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	if (ret)
+ 		goto out_err_free_reg;
+ 
+-	set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+-
+ 	ret = adf_dev_init(accel_dev);
+ 	if (ret)
+ 		goto out_err_dev_shutdown;
+ 
++	set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
++
+ 	ret = adf_dev_start(accel_dev);
+ 	if (ret)
+ 		goto out_err_dev_stop;
+diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
+index d7b1628fb4848..b0f0502a5bb0f 100644
+--- a/drivers/crypto/sa2ul.c
++++ b/drivers/crypto/sa2ul.c
+@@ -1146,8 +1146,10 @@ static int sa_run(struct sa_req *req)
+ 		mapped_sg->sgt.sgl = src;
+ 		mapped_sg->sgt.orig_nents = src_nents;
+ 		ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
+-		if (ret)
++		if (ret) {
++			kfree(rxd);
+ 			return ret;
++		}
+ 
+ 		mapped_sg->dir = dir_src;
+ 		mapped_sg->mapped = true;
+@@ -1155,8 +1157,10 @@ static int sa_run(struct sa_req *req)
+ 		mapped_sg->sgt.sgl = req->src;
+ 		mapped_sg->sgt.orig_nents = sg_nents;
+ 		ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
+-		if (ret)
++		if (ret) {
++			kfree(rxd);
+ 			return ret;
++		}
+ 
+ 		mapped_sg->dir = dir_src;
+ 		mapped_sg->mapped = true;
+diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
+index bf3047896e41a..59ba59bea0f54 100644
+--- a/drivers/devfreq/devfreq.c
++++ b/drivers/devfreq/devfreq.c
+@@ -387,7 +387,7 @@ static int devfreq_set_target(struct devfreq *devfreq, unsigned long new_freq,
+ 	devfreq->previous_freq = new_freq;
+ 
+ 	if (devfreq->suspend_freq)
+-		devfreq->resume_freq = cur_freq;
++		devfreq->resume_freq = new_freq;
+ 
+ 	return err;
+ }
+@@ -821,7 +821,8 @@ struct devfreq *devfreq_add_device(struct device *dev,
+ 
+ 	if (devfreq->profile->timer < 0
+ 		|| devfreq->profile->timer >= DEVFREQ_TIMER_NUM) {
+-		goto err_out;
++		mutex_unlock(&devfreq->lock);
++		goto err_dev;
+ 	}
+ 
+ 	if (!devfreq->profile->max_state && !devfreq->profile->freq_table) {
+diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
+index 3f14dffb96696..5dd19dbd67a3b 100644
+--- a/drivers/firmware/Kconfig
++++ b/drivers/firmware/Kconfig
+@@ -237,6 +237,7 @@ config INTEL_STRATIX10_RSU
+ config QCOM_SCM
+ 	bool
+ 	depends on ARM || ARM64
++	depends on HAVE_ARM_SMCCC
+ 	select RESET_CONTROLLER
+ 
+ config QCOM_SCM_DOWNLOAD_MODE_DEFAULT
+diff --git a/drivers/firmware/qcom_scm-smc.c b/drivers/firmware/qcom_scm-smc.c
+index 497c13ba98d67..d111833364ba4 100644
+--- a/drivers/firmware/qcom_scm-smc.c
++++ b/drivers/firmware/qcom_scm-smc.c
+@@ -77,8 +77,10 @@ static void __scm_smc_do(const struct arm_smccc_args *smc,
+ 	}  while (res->a0 == QCOM_SCM_V2_EBUSY);
+ }
+ 
+-int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
+-		 struct qcom_scm_res *res, bool atomic)
++
++int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
++		   enum qcom_scm_convention qcom_convention,
++		   struct qcom_scm_res *res, bool atomic)
+ {
+ 	int arglen = desc->arginfo & 0xf;
+ 	int i;
+@@ -87,9 +89,8 @@ int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
+ 	size_t alloc_len;
+ 	gfp_t flag = atomic ? GFP_ATOMIC : GFP_KERNEL;
+ 	u32 smccc_call_type = atomic ? ARM_SMCCC_FAST_CALL : ARM_SMCCC_STD_CALL;
+-	u32 qcom_smccc_convention =
+-			(qcom_scm_convention == SMC_CONVENTION_ARM_32) ?
+-			ARM_SMCCC_SMC_32 : ARM_SMCCC_SMC_64;
++	u32 qcom_smccc_convention = (qcom_convention == SMC_CONVENTION_ARM_32) ?
++				    ARM_SMCCC_SMC_32 : ARM_SMCCC_SMC_64;
+ 	struct arm_smccc_res smc_res;
+ 	struct arm_smccc_args smc = {0};
+ 
+@@ -148,4 +149,5 @@ int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
+ 	}
+ 
+ 	return (long)smc_res.a0 ? qcom_scm_remap_error(smc_res.a0) : 0;
++
+ }
+diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
+index f57779fc7ee93..9ac84b5d6ce00 100644
+--- a/drivers/firmware/qcom_scm.c
++++ b/drivers/firmware/qcom_scm.c
+@@ -113,14 +113,10 @@ static void qcom_scm_clk_disable(void)
+ 	clk_disable_unprepare(__scm->bus_clk);
+ }
+ 
+-static int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
+-					u32 cmd_id);
++enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN;
++static DEFINE_SPINLOCK(scm_query_lock);
+ 
+-enum qcom_scm_convention qcom_scm_convention;
+-static bool has_queried __read_mostly;
+-static DEFINE_SPINLOCK(query_lock);
+-
+-static void __query_convention(void)
++static enum qcom_scm_convention __get_convention(void)
+ {
+ 	unsigned long flags;
+ 	struct qcom_scm_desc desc = {
+@@ -133,36 +129,50 @@ static void __query_convention(void)
+ 		.owner = ARM_SMCCC_OWNER_SIP,
+ 	};
+ 	struct qcom_scm_res res;
++	enum qcom_scm_convention probed_convention;
+ 	int ret;
++	bool forced = false;
+ 
+-	spin_lock_irqsave(&query_lock, flags);
+-	if (has_queried)
+-		goto out;
++	if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
++		return qcom_scm_convention;
+ 
+-	qcom_scm_convention = SMC_CONVENTION_ARM_64;
+-	// Device isn't required as there is only one argument - no device
+-	// needed to dma_map_single to secure world
+-	ret = scm_smc_call(NULL, &desc, &res, true);
++	/*
++	 * Device isn't required as there is only one argument - no device
++	 * needed to dma_map_single to secure world
++	 */
++	probed_convention = SMC_CONVENTION_ARM_64;
++	ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
+ 	if (!ret && res.result[0] == 1)
+-		goto out;
++		goto found;
++
++	/*
++	 * Some SC7180 firmwares didn't implement the
++	 * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64
++	 * calling conventions on these firmwares. Luckily we don't make any
++	 * early calls into the firmware on these SoCs so the device pointer
++	 * will be valid here to check if the compatible matches.
++	 */
++	if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) {
++		forced = true;
++		goto found;
++	}
+ 
+-	qcom_scm_convention = SMC_CONVENTION_ARM_32;
+-	ret = scm_smc_call(NULL, &desc, &res, true);
++	probed_convention = SMC_CONVENTION_ARM_32;
++	ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
+ 	if (!ret && res.result[0] == 1)
+-		goto out;
+-
+-	qcom_scm_convention = SMC_CONVENTION_LEGACY;
+-out:
+-	has_queried = true;
+-	spin_unlock_irqrestore(&query_lock, flags);
+-	pr_info("qcom_scm: convention: %s\n",
+-		qcom_scm_convention_names[qcom_scm_convention]);
+-}
++		goto found;
++
++	probed_convention = SMC_CONVENTION_LEGACY;
++found:
++	spin_lock_irqsave(&scm_query_lock, flags);
++	if (probed_convention != qcom_scm_convention) {
++		qcom_scm_convention = probed_convention;
++		pr_info("qcom_scm: convention: %s%s\n",
++			qcom_scm_convention_names[qcom_scm_convention],
++			forced ? " (forced)" : "");
++	}
++	spin_unlock_irqrestore(&scm_query_lock, flags);
+ 
+-static inline enum qcom_scm_convention __get_convention(void)
+-{
+-	if (unlikely(!has_queried))
+-		__query_convention();
+ 	return qcom_scm_convention;
+ }
+ 
+@@ -219,8 +229,8 @@ static int qcom_scm_call_atomic(struct device *dev,
+ 	}
+ }
+ 
+-static int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
+-					u32 cmd_id)
++static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
++					 u32 cmd_id)
+ {
+ 	int ret;
+ 	struct qcom_scm_desc desc = {
+@@ -247,7 +257,7 @@ static int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
+ 
+ 	ret = qcom_scm_call(dev, &desc, &res);
+ 
+-	return ret ? : res.result[0];
++	return ret ? false : !!res.result[0];
+ }
+ 
+ /**
+@@ -585,9 +595,8 @@ bool qcom_scm_pas_supported(u32 peripheral)
+ 	};
+ 	struct qcom_scm_res res;
+ 
+-	ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
+-					   QCOM_SCM_PIL_PAS_IS_SUPPORTED);
+-	if (ret <= 0)
++	if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
++					  QCOM_SCM_PIL_PAS_IS_SUPPORTED))
+ 		return false;
+ 
+ 	ret = qcom_scm_call(__scm->dev, &desc, &res);
+@@ -1060,17 +1069,18 @@ EXPORT_SYMBOL(qcom_scm_ice_set_key);
+  */
+ bool qcom_scm_hdcp_available(void)
+ {
++	bool avail;
+ 	int ret = qcom_scm_clk_enable();
+ 
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
++	avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
+ 						QCOM_SCM_HDCP_INVOKE);
+ 
+ 	qcom_scm_clk_disable();
+ 
+-	return ret > 0;
++	return avail;
+ }
+ EXPORT_SYMBOL(qcom_scm_hdcp_available);
+ 
+@@ -1242,7 +1252,7 @@ static int qcom_scm_probe(struct platform_device *pdev)
+ 	__scm = scm;
+ 	__scm->dev = &pdev->dev;
+ 
+-	__query_convention();
++	__get_convention();
+ 
+ 	/*
+ 	 * If requested enable "download mode", from this point on warmboot
+diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h
+index 95cd1ac30ab0b..632fe31424621 100644
+--- a/drivers/firmware/qcom_scm.h
++++ b/drivers/firmware/qcom_scm.h
+@@ -61,8 +61,11 @@ struct qcom_scm_res {
+ };
+ 
+ #define SCM_SMC_FNID(s, c)	((((s) & 0xFF) << 8) | ((c) & 0xFF))
+-extern int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
+-			struct qcom_scm_res *res, bool atomic);
++extern int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
++			  enum qcom_scm_convention qcom_convention,
++			  struct qcom_scm_res *res, bool atomic);
++#define scm_smc_call(dev, desc, res, atomic) \
++	__scm_smc_call((dev), (desc), qcom_scm_convention, (res), (atomic))
+ 
+ #define SCM_LEGACY_FNID(s, c)	(((s) << 10) | ((c) & 0x3ff))
+ extern int scm_legacy_call_atomic(struct device *dev,
+diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
+index 7eb9958662ddd..83082e2f2e441 100644
+--- a/drivers/firmware/xilinx/zynqmp.c
++++ b/drivers/firmware/xilinx/zynqmp.c
+@@ -2,7 +2,7 @@
+ /*
+  * Xilinx Zynq MPSoC Firmware layer
+  *
+- *  Copyright (C) 2014-2020 Xilinx, Inc.
++ *  Copyright (C) 2014-2021 Xilinx, Inc.
+  *
+  *  Michal Simek <michal.simek@xilinx.com>
+  *  Davorin Mista <davorin.mista@aggios.com>
+@@ -1280,12 +1280,13 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
+ static int zynqmp_firmware_remove(struct platform_device *pdev)
+ {
+ 	struct pm_api_feature_data *feature_data;
++	struct hlist_node *tmp;
+ 	int i;
+ 
+ 	mfd_remove_devices(&pdev->dev);
+ 	zynqmp_pm_api_debugfs_exit();
+ 
+-	hash_for_each(pm_api_features_map, i, feature_data, hentry) {
++	hash_for_each_safe(pm_api_features_map, i, tmp, feature_data, hentry) {
+ 		hash_del(&feature_data->hentry);
+ 		kfree(feature_data);
+ 	}
+diff --git a/drivers/fpga/xilinx-spi.c b/drivers/fpga/xilinx-spi.c
+index 27defa98092dd..fee4d0abf6bfe 100644
+--- a/drivers/fpga/xilinx-spi.c
++++ b/drivers/fpga/xilinx-spi.c
+@@ -233,25 +233,19 @@ static int xilinx_spi_probe(struct spi_device *spi)
+ 
+ 	/* PROGRAM_B is active low */
+ 	conf->prog_b = devm_gpiod_get(&spi->dev, "prog_b", GPIOD_OUT_LOW);
+-	if (IS_ERR(conf->prog_b)) {
+-		dev_err(&spi->dev, "Failed to get PROGRAM_B gpio: %ld\n",
+-			PTR_ERR(conf->prog_b));
+-		return PTR_ERR(conf->prog_b);
+-	}
++	if (IS_ERR(conf->prog_b))
++		return dev_err_probe(&spi->dev, PTR_ERR(conf->prog_b),
++				     "Failed to get PROGRAM_B gpio\n");
+ 
+ 	conf->init_b = devm_gpiod_get_optional(&spi->dev, "init-b", GPIOD_IN);
+-	if (IS_ERR(conf->init_b)) {
+-		dev_err(&spi->dev, "Failed to get INIT_B gpio: %ld\n",
+-			PTR_ERR(conf->init_b));
+-		return PTR_ERR(conf->init_b);
+-	}
++	if (IS_ERR(conf->init_b))
++		return dev_err_probe(&spi->dev, PTR_ERR(conf->init_b),
++				     "Failed to get INIT_B gpio\n");
+ 
+ 	conf->done = devm_gpiod_get(&spi->dev, "done", GPIOD_IN);
+-	if (IS_ERR(conf->done)) {
+-		dev_err(&spi->dev, "Failed to get DONE gpio: %ld\n",
+-			PTR_ERR(conf->done));
+-		return PTR_ERR(conf->done);
+-	}
++	if (IS_ERR(conf->done))
++		return dev_err_probe(&spi->dev, PTR_ERR(conf->done),
++				     "Failed to get DONE gpio\n");
+ 
+ 	mgr = devm_fpga_mgr_create(&spi->dev,
+ 				   "Xilinx Slave Serial FPGA Manager",
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+index f753e04fee99a..a2ac44cc2a6da 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+@@ -1355,7 +1355,7 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
+ 			}
+ 		}
+ 	}
+-	return r;
++	return 0;
+ }
+ 
+ int amdgpu_display_resume_helper(struct amdgpu_device *adev)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+index 94b069630db36..b4971e90b98cf 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+@@ -215,7 +215,11 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
+ 	/* Check if we have an idle VMID */
+ 	i = 0;
+ 	list_for_each_entry((*idle), &id_mgr->ids_lru, list) {
+-		fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, ring);
++		/* Don't use per engine and per process VMID at the same time */
++		struct amdgpu_ring *r = adev->vm_manager.concurrent_flush ?
++			NULL : ring;
++
++		fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, r);
+ 		if (!fences[i])
+ 			break;
+ 		++i;
+@@ -281,7 +285,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
+ 	if (updates && (*id)->flushed_updates &&
+ 	    updates->context == (*id)->flushed_updates->context &&
+ 	    !dma_fence_is_later(updates, (*id)->flushed_updates))
+-	    updates = NULL;
++		updates = NULL;
+ 
+ 	if ((*id)->owner != vm->immediate.fence_context ||
+ 	    job->vm_pd_addr != (*id)->pd_gpu_addr ||
+@@ -290,6 +294,10 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
+ 	     !dma_fence_is_signaled((*id)->last_flush))) {
+ 		struct dma_fence *tmp;
+ 
++		/* Don't use per engine and per process VMID at the same time */
++		if (adev->vm_manager.concurrent_flush)
++			ring = NULL;
++
+ 		/* to prevent one context starved by another context */
+ 		(*id)->pd_gpu_addr = 0;
+ 		tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
+@@ -365,12 +373,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
+ 		if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
+ 			needs_flush = true;
+ 
+-		/* Concurrent flushes are only possible starting with Vega10 and
+-		 * are broken on Navi10 and Navi14.
+-		 */
+-		if (needs_flush && (adev->asic_type < CHIP_VEGA10 ||
+-				    adev->asic_type == CHIP_NAVI10 ||
+-				    adev->asic_type == CHIP_NAVI14))
++		if (needs_flush && !adev->vm_manager.concurrent_flush)
+ 			continue;
+ 
+ 		/* Good, we can use this VMID. Remember this submission as
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
+index 19c0a3655228f..82e9ecf843523 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
+@@ -519,8 +519,10 @@ static int init_pmu_entry_by_type_and_add(struct amdgpu_pmu_entry *pmu_entry,
+ 	pmu_entry->pmu.attr_groups = kmemdup(attr_groups, sizeof(attr_groups),
+ 								GFP_KERNEL);
+ 
+-	if (!pmu_entry->pmu.attr_groups)
++	if (!pmu_entry->pmu.attr_groups) {
++		ret = -ENOMEM;
+ 		goto err_attr_group;
++	}
+ 
+ 	snprintf(pmu_name, PMU_NAME_SIZE, "%s_%d", pmu_entry->pmu_file_prefix,
+ 				adev_to_drm(pmu_entry->adev)->primary->index);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 326dae31b675d..a566bbe26bdd8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -92,13 +92,13 @@ struct amdgpu_prt_cb {
+ static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm)
+ {
+ 	mutex_lock(&vm->eviction_lock);
+-	vm->saved_flags = memalloc_nofs_save();
++	vm->saved_flags = memalloc_noreclaim_save();
+ }
+ 
+ static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
+ {
+ 	if (mutex_trylock(&vm->eviction_lock)) {
+-		vm->saved_flags = memalloc_nofs_save();
++		vm->saved_flags = memalloc_noreclaim_save();
+ 		return 1;
+ 	}
+ 	return 0;
+@@ -106,7 +106,7 @@ static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
+ 
+ static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm)
+ {
+-	memalloc_nofs_restore(vm->saved_flags);
++	memalloc_noreclaim_restore(vm->saved_flags);
+ 	mutex_unlock(&vm->eviction_lock);
+ }
+ 
+@@ -3147,6 +3147,12 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
+ {
+ 	unsigned i;
+ 
++	/* Concurrent flushes are only possible starting with Vega10 and
++	 * are broken on Navi10 and Navi14.
++	 */
++	adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 ||
++					      adev->asic_type == CHIP_NAVI10 ||
++					      adev->asic_type == CHIP_NAVI14);
+ 	amdgpu_vmid_mgr_init(adev);
+ 
+ 	adev->vm_manager.fence_context =
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+index 976a12e5a8b92..4e140288159cd 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+@@ -331,6 +331,7 @@ struct amdgpu_vm_manager {
+ 	/* Handling of VMIDs */
+ 	struct amdgpu_vmid_mgr			id_mgr[AMDGPU_MAX_VMHUBS];
+ 	unsigned int				first_kfd_vmid;
++	bool					concurrent_flush;
+ 
+ 	/* Handling of VM fences */
+ 	u64					fence_context;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+index 2d832fc231191..421d6069c5096 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+@@ -59,6 +59,7 @@ MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
+ MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
+ MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
+ MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
++MODULE_FIRMWARE("amdgpu/polaris12_32_mc.bin");
+ MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin");
+ MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin");
+ MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin");
+@@ -243,10 +244,16 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
+ 			chip_name = "polaris10";
+ 		break;
+ 	case CHIP_POLARIS12:
+-		if (ASICID_IS_P23(adev->pdev->device, adev->pdev->revision))
++		if (ASICID_IS_P23(adev->pdev->device, adev->pdev->revision)) {
+ 			chip_name = "polaris12_k";
+-		else
+-			chip_name = "polaris12";
++		} else {
++			WREG32(mmMC_SEQ_IO_DEBUG_INDEX, ixMC_IO_DEBUG_UP_159);
++			/* Polaris12 32bit ASIC needs a special MC firmware */
++			if (RREG32(mmMC_SEQ_IO_DEBUG_DATA) == 0x05b4dc40)
++				chip_name = "polaris12_32";
++			else
++				chip_name = "polaris12";
++		}
+ 		break;
+ 	case CHIP_FIJI:
+ 	case CHIP_CARRIZO:
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+index def583916294d..9b844e9fb16ff 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+@@ -584,6 +584,10 @@ static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
+ 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ 			VCN, inst_idx, mmUVD_VCPU_NONCACHE_SIZE0),
+ 			AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
++
++	/* VCN global tiling registers */
++	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
++		UVD, 0, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
+ }
+ 
+ static void vcn_v3_0_disable_static_power_gating(struct amdgpu_device *adev, int inst)
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
+index 66bbca61e3ef5..9318936aa8054 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
+@@ -20,6 +20,10 @@
+  * OTHER DEALINGS IN THE SOFTWARE.
+  */
+ 
++#include <linux/kconfig.h>
++
++#if IS_REACHABLE(CONFIG_AMD_IOMMU_V2)
++
+ #include <linux/printk.h>
+ #include <linux/device.h>
+ #include <linux/slab.h>
+@@ -355,3 +359,5 @@ int kfd_iommu_add_perf_counters(struct kfd_topology_device *kdev)
+ 
+ 	return 0;
+ }
++
++#endif
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h
+index dd23d9fdf6a82..afd420b01a0c2 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h
+@@ -23,7 +23,9 @@
+ #ifndef __KFD_IOMMU_H__
+ #define __KFD_IOMMU_H__
+ 
+-#if defined(CONFIG_AMD_IOMMU_V2_MODULE) || defined(CONFIG_AMD_IOMMU_V2)
++#include <linux/kconfig.h>
++
++#if IS_REACHABLE(CONFIG_AMD_IOMMU_V2)
+ 
+ #define KFD_SUPPORT_IOMMU_V2
+ 
+@@ -46,6 +48,9 @@ static inline int kfd_iommu_check_device(struct kfd_dev *kfd)
+ }
+ static inline int kfd_iommu_device_init(struct kfd_dev *kfd)
+ {
++#if IS_MODULE(CONFIG_AMD_IOMMU_V2)
++	WARN_ONCE(1, "iommu_v2 module is not usable by built-in KFD");
++#endif
+ 	return 0;
+ }
+ 
+@@ -73,6 +78,6 @@ static inline int kfd_iommu_add_perf_counters(struct kfd_topology_device *kdev)
+ 	return 0;
+ }
+ 
+-#endif /* defined(CONFIG_AMD_IOMMU_V2) */
++#endif /* IS_REACHABLE(CONFIG_AMD_IOMMU_V2) */
+ 
+ #endif /* __KFD_IOMMU_H__ */
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 29ca1708458c1..71e07ebc8f88a 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -3850,6 +3850,23 @@ static int fill_dc_scaling_info(const struct drm_plane_state *state,
+ 	scaling_info->src_rect.x = state->src_x >> 16;
+ 	scaling_info->src_rect.y = state->src_y >> 16;
+ 
++	/*
++	 * For reasons we don't (yet) fully understand a non-zero
++	 * src_y coordinate into an NV12 buffer can cause a
++	 * system hang. To avoid hangs (and maybe be overly cautious)
++	 * let's reject both non-zero src_x and src_y.
++	 *
++	 * We currently know of only one use-case to reproduce a
++	 * scenario with non-zero src_x and src_y for NV12, which
++	 * is to gesture the YouTube Android app into full screen
++	 * on ChromeOS.
++	 */
++	if (state->fb &&
++	    state->fb->format->format == DRM_FORMAT_NV12 &&
++	    (scaling_info->src_rect.x != 0 ||
++	     scaling_info->src_rect.y != 0))
++		return -EINVAL;
++
+ 	scaling_info->src_rect.width = state->src_w >> 16;
+ 	if (scaling_info->src_rect.width == 0)
+ 		return -EINVAL;
+@@ -9278,7 +9295,8 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
+ 
+ 	new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
+ 	new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
+-	if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
++	if (!new_cursor_state || !new_primary_state ||
++	    !new_cursor_state->fb || !new_primary_state->fb) {
+ 		return 0;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index bd0101013ec89..440bf0a0e12ae 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -1603,6 +1603,7 @@ static bool dc_link_construct(struct dc_link *link,
+ 	link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
+ 
+ 	DC_LOG_DC("BIOS object table - %s finished successfully.\n", __func__);
++	kfree(info);
+ 	return true;
+ device_tag_fail:
+ 	link->link_enc->funcs->destroy(&link->link_enc);
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+index 4e87e70237e3d..874b132fe1d78 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+@@ -283,7 +283,7 @@ struct abm *dce_abm_create(
+ 	const struct dce_abm_shift *abm_shift,
+ 	const struct dce_abm_mask *abm_mask)
+ {
+-	struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_KERNEL);
++	struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_ATOMIC);
+ 
+ 	if (abm_dce == NULL) {
+ 		BREAK_TO_DEBUGGER();
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+index ddc789daf3b13..09d4cb5c97b67 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+@@ -1049,7 +1049,7 @@ struct dmcu *dcn10_dmcu_create(
+ 	const struct dce_dmcu_shift *dmcu_shift,
+ 	const struct dce_dmcu_mask *dmcu_mask)
+ {
+-	struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
++	struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC);
+ 
+ 	if (dmcu_dce == NULL) {
+ 		BREAK_TO_DEBUGGER();
+@@ -1070,7 +1070,7 @@ struct dmcu *dcn20_dmcu_create(
+ 	const struct dce_dmcu_shift *dmcu_shift,
+ 	const struct dce_dmcu_mask *dmcu_mask)
+ {
+-	struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
++	struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC);
+ 
+ 	if (dmcu_dce == NULL) {
+ 		BREAK_TO_DEBUGGER();
+@@ -1091,7 +1091,7 @@ struct dmcu *dcn21_dmcu_create(
+ 	const struct dce_dmcu_shift *dmcu_shift,
+ 	const struct dce_dmcu_mask *dmcu_mask)
+ {
+-	struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
++	struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC);
+ 
+ 	if (dmcu_dce == NULL) {
+ 		BREAK_TO_DEBUGGER();
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
+index 62cc2651e00c1..8774406120fc1 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
+@@ -112,7 +112,7 @@ struct dccg *dccg2_create(
+ 	const struct dccg_shift *dccg_shift,
+ 	const struct dccg_mask *dccg_mask)
+ {
+-	struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_KERNEL);
++	struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_ATOMIC);
+ 	struct dccg *base;
+ 
+ 	if (dccg_dcn == NULL) {
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+index 2c2dbfcd89571..bfbc23b76cd5a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+@@ -1104,7 +1104,7 @@ struct dpp *dcn20_dpp_create(
+ 	uint32_t inst)
+ {
+ 	struct dcn20_dpp *dpp =
+-		kzalloc(sizeof(struct dcn20_dpp), GFP_KERNEL);
++		kzalloc(sizeof(struct dcn20_dpp), GFP_ATOMIC);
+ 
+ 	if (!dpp)
+ 		return NULL;
+@@ -1122,7 +1122,7 @@ struct input_pixel_processor *dcn20_ipp_create(
+ 	struct dc_context *ctx, uint32_t inst)
+ {
+ 	struct dcn10_ipp *ipp =
+-		kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL);
++		kzalloc(sizeof(struct dcn10_ipp), GFP_ATOMIC);
+ 
+ 	if (!ipp) {
+ 		BREAK_TO_DEBUGGER();
+@@ -1139,7 +1139,7 @@ struct output_pixel_processor *dcn20_opp_create(
+ 	struct dc_context *ctx, uint32_t inst)
+ {
+ 	struct dcn20_opp *opp =
+-		kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL);
++		kzalloc(sizeof(struct dcn20_opp), GFP_ATOMIC);
+ 
+ 	if (!opp) {
+ 		BREAK_TO_DEBUGGER();
+@@ -1156,7 +1156,7 @@ struct dce_aux *dcn20_aux_engine_create(
+ 	uint32_t inst)
+ {
+ 	struct aux_engine_dce110 *aux_engine =
+-		kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
++		kzalloc(sizeof(struct aux_engine_dce110), GFP_ATOMIC);
+ 
+ 	if (!aux_engine)
+ 		return NULL;
+@@ -1194,7 +1194,7 @@ struct dce_i2c_hw *dcn20_i2c_hw_create(
+ 	uint32_t inst)
+ {
+ 	struct dce_i2c_hw *dce_i2c_hw =
+-		kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
++		kzalloc(sizeof(struct dce_i2c_hw), GFP_ATOMIC);
+ 
+ 	if (!dce_i2c_hw)
+ 		return NULL;
+@@ -1207,7 +1207,7 @@ struct dce_i2c_hw *dcn20_i2c_hw_create(
+ struct mpc *dcn20_mpc_create(struct dc_context *ctx)
+ {
+ 	struct dcn20_mpc *mpc20 = kzalloc(sizeof(struct dcn20_mpc),
+-					  GFP_KERNEL);
++					  GFP_ATOMIC);
+ 
+ 	if (!mpc20)
+ 		return NULL;
+@@ -1225,7 +1225,7 @@ struct hubbub *dcn20_hubbub_create(struct dc_context *ctx)
+ {
+ 	int i;
+ 	struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub),
+-					  GFP_KERNEL);
++					  GFP_ATOMIC);
+ 
+ 	if (!hubbub)
+ 		return NULL;
+@@ -1253,7 +1253,7 @@ struct timing_generator *dcn20_timing_generator_create(
+ 		uint32_t instance)
+ {
+ 	struct optc *tgn10 =
+-		kzalloc(sizeof(struct optc), GFP_KERNEL);
++		kzalloc(sizeof(struct optc), GFP_ATOMIC);
+ 
+ 	if (!tgn10)
+ 		return NULL;
+@@ -1332,7 +1332,7 @@ static struct clock_source *dcn20_clock_source_create(
+ 	bool dp_clk_src)
+ {
+ 	struct dce110_clk_src *clk_src =
+-		kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
++		kzalloc(sizeof(struct dce110_clk_src), GFP_ATOMIC);
+ 
+ 	if (!clk_src)
+ 		return NULL;
+@@ -1438,7 +1438,7 @@ struct display_stream_compressor *dcn20_dsc_create(
+ 	struct dc_context *ctx, uint32_t inst)
+ {
+ 	struct dcn20_dsc *dsc =
+-		kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL);
++		kzalloc(sizeof(struct dcn20_dsc), GFP_ATOMIC);
+ 
+ 	if (!dsc) {
+ 		BREAK_TO_DEBUGGER();
+@@ -1572,7 +1572,7 @@ struct hubp *dcn20_hubp_create(
+ 	uint32_t inst)
+ {
+ 	struct dcn20_hubp *hubp2 =
+-		kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL);
++		kzalloc(sizeof(struct dcn20_hubp), GFP_ATOMIC);
+ 
+ 	if (!hubp2)
+ 		return NULL;
+@@ -3390,7 +3390,7 @@ bool dcn20_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
+ 
+ static struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx)
+ {
+-	struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
++	struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_ATOMIC);
+ 
+ 	if (!pp_smu)
+ 		return pp_smu;
+@@ -4034,7 +4034,7 @@ struct resource_pool *dcn20_create_resource_pool(
+ 		struct dc *dc)
+ {
+ 	struct dcn20_resource_pool *pool =
+-		kzalloc(sizeof(struct dcn20_resource_pool), GFP_KERNEL);
++		kzalloc(sizeof(struct dcn20_resource_pool), GFP_ATOMIC);
+ 
+ 	if (!pool)
+ 		return NULL;
+diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
+index 5e384a8a83dc2..51855a2624cf4 100644
+--- a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
++++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
+@@ -39,7 +39,7 @@
+ #define HDCP14_KSV_SIZE 5
+ #define HDCP14_MAX_KSV_FIFO_SIZE 127*HDCP14_KSV_SIZE
+ 
+-static const bool hdcp_cmd_is_read[] = {
++static const bool hdcp_cmd_is_read[HDCP_MESSAGE_ID_MAX] = {
+ 	[HDCP_MESSAGE_ID_READ_BKSV] = true,
+ 	[HDCP_MESSAGE_ID_READ_RI_R0] = true,
+ 	[HDCP_MESSAGE_ID_READ_PJ] = true,
+@@ -75,7 +75,7 @@ static const bool hdcp_cmd_is_read[] = {
+ 	[HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = false
+ };
+ 
+-static const uint8_t hdcp_i2c_offsets[] = {
++static const uint8_t hdcp_i2c_offsets[HDCP_MESSAGE_ID_MAX] = {
+ 	[HDCP_MESSAGE_ID_READ_BKSV] = 0x0,
+ 	[HDCP_MESSAGE_ID_READ_RI_R0] = 0x8,
+ 	[HDCP_MESSAGE_ID_READ_PJ] = 0xA,
+@@ -106,7 +106,8 @@ static const uint8_t hdcp_i2c_offsets[] = {
+ 	[HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x60,
+ 	[HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x60,
+ 	[HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x80,
+-	[HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x70
++	[HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x70,
++	[HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = 0x0,
+ };
+ 
+ struct protection_properties {
+@@ -184,7 +185,7 @@ static const struct protection_properties hdmi_14_protection = {
+ 	.process_transaction = hdmi_14_process_transaction
+ };
+ 
+-static const uint32_t hdcp_dpcd_addrs[] = {
++static const uint32_t hdcp_dpcd_addrs[HDCP_MESSAGE_ID_MAX] = {
+ 	[HDCP_MESSAGE_ID_READ_BKSV] = 0x68000,
+ 	[HDCP_MESSAGE_ID_READ_RI_R0] = 0x68005,
+ 	[HDCP_MESSAGE_ID_READ_PJ] = 0xFFFFFFFF,
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+index 42c4dbe3e3624..ec0037a213314 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+@@ -2086,6 +2086,7 @@ int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
+ 		dev_err(smu->adev->dev,
+ 			"New power limit (%d) is over the max allowed %d\n",
+ 			limit, smu->max_power_limit);
++		ret = -EINVAL;
+ 		goto out;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
+index e4110d6ca7b3c..bc60fc4728d70 100644
+--- a/drivers/gpu/drm/bridge/Kconfig
++++ b/drivers/gpu/drm/bridge/Kconfig
+@@ -67,6 +67,7 @@ config DRM_LONTIUM_LT9611UXC
+ 	depends on OF
+ 	select DRM_PANEL_BRIDGE
+ 	select DRM_KMS_HELPER
++	select DRM_MIPI_DSI
+ 	select REGMAP_I2C
+ 	help
+ 	  Driver for Lontium LT9611UXC DSI to HDMI bridge
+@@ -151,6 +152,7 @@ config DRM_SII902X
+ 	tristate "Silicon Image sii902x RGB/HDMI bridge"
+ 	depends on OF
+ 	select DRM_KMS_HELPER
++	select DRM_MIPI_DSI
+ 	select REGMAP_I2C
+ 	select I2C_MUX
+ 	select SND_SOC_HDMI_CODEC if SND_SOC
+@@ -200,6 +202,7 @@ config DRM_TOSHIBA_TC358767
+ 	tristate "Toshiba TC358767 eDP bridge"
+ 	depends on OF
+ 	select DRM_KMS_HELPER
++	select DRM_MIPI_DSI
+ 	select REGMAP_I2C
+ 	select DRM_PANEL
+ 	help
+diff --git a/drivers/gpu/drm/bridge/analogix/Kconfig b/drivers/gpu/drm/bridge/analogix/Kconfig
+index 024ea2a570e74..9160fd80dd704 100644
+--- a/drivers/gpu/drm/bridge/analogix/Kconfig
++++ b/drivers/gpu/drm/bridge/analogix/Kconfig
+@@ -30,6 +30,7 @@ config DRM_ANALOGIX_ANX7625
+ 	tristate "Analogix Anx7625 MIPI to DP interface support"
+ 	depends on DRM
+ 	depends on OF
++	select DRM_MIPI_DSI
+ 	help
+ 	  ANX7625 is an ultra-low power 4K mobile HD transmitter
+ 	  designed for portable devices. It converts MIPI/DPI to
+diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
+index 0ddc37551194e..c916f4b8907ef 100644
+--- a/drivers/gpu/drm/bridge/panel.c
++++ b/drivers/gpu/drm/bridge/panel.c
+@@ -87,6 +87,18 @@ static int panel_bridge_attach(struct drm_bridge *bridge,
+ 
+ static void panel_bridge_detach(struct drm_bridge *bridge)
+ {
++	struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
++	struct drm_connector *connector = &panel_bridge->connector;
++
++	/*
++	 * Cleanup the connector if we know it was initialized.
++	 *
++	 * FIXME: This wouldn't be needed if the panel_bridge structure was
++	 * allocated with drmm_kzalloc(). This might be tricky since the
++	 * drm_device pointer can only be retrieved when the bridge is attached.
++	 */
++	if (connector->dev)
++		drm_connector_cleanup(connector);
+ }
+ 
+ static void panel_bridge_pre_enable(struct drm_bridge *bridge)
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
+index 309afe61afdd5..9c75c88150569 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -1154,6 +1154,7 @@ static void build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
+ 
+ 	req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE;
+ 	drm_dp_encode_sideband_req(&req, msg);
++	msg->path_msg = true;
+ }
+ 
+ static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg,
+@@ -2824,15 +2825,21 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
+ 
+ 	req_type = txmsg->msg[0] & 0x7f;
+ 	if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
+-		req_type == DP_RESOURCE_STATUS_NOTIFY)
++		req_type == DP_RESOURCE_STATUS_NOTIFY ||
++		req_type == DP_CLEAR_PAYLOAD_ID_TABLE)
+ 		hdr->broadcast = 1;
+ 	else
+ 		hdr->broadcast = 0;
+ 	hdr->path_msg = txmsg->path_msg;
+-	hdr->lct = mstb->lct;
+-	hdr->lcr = mstb->lct - 1;
+-	if (mstb->lct > 1)
+-		memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
++	if (hdr->broadcast) {
++		hdr->lct = 1;
++		hdr->lcr = 6;
++	} else {
++		hdr->lct = mstb->lct;
++		hdr->lcr = mstb->lct - 1;
++	}
++
++	memcpy(hdr->rad, mstb->rad, hdr->lct / 2);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
+index ad59a51eab6d8..e7e1ee2aa3529 100644
+--- a/drivers/gpu/drm/drm_probe_helper.c
++++ b/drivers/gpu/drm/drm_probe_helper.c
+@@ -624,6 +624,7 @@ static void output_poll_execute(struct work_struct *work)
+ 	struct drm_connector_list_iter conn_iter;
+ 	enum drm_connector_status old_status;
+ 	bool repoll = false, changed;
++	u64 old_epoch_counter;
+ 
+ 	if (!dev->mode_config.poll_enabled)
+ 		return;
+@@ -660,8 +661,9 @@ static void output_poll_execute(struct work_struct *work)
+ 
+ 		repoll = true;
+ 
++		old_epoch_counter = connector->epoch_counter;
+ 		connector->status = drm_helper_probe_detect(connector, NULL, false);
+-		if (old_status != connector->status) {
++		if (old_epoch_counter != connector->epoch_counter) {
+ 			const char *old, *new;
+ 
+ 			/*
+@@ -690,6 +692,9 @@ static void output_poll_execute(struct work_struct *work)
+ 				      connector->base.id,
+ 				      connector->name,
+ 				      old, new);
++			DRM_DEBUG_KMS("[CONNECTOR:%d:%s] epoch counter %llu -> %llu\n",
++				      connector->base.id, connector->name,
++				      old_epoch_counter, connector->epoch_counter);
+ 
+ 			changed = true;
+ 		}
+diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
+index d1d8ee4a5f16a..57578bf28d774 100644
+--- a/drivers/gpu/drm/i915/gvt/gvt.c
++++ b/drivers/gpu/drm/i915/gvt/gvt.c
+@@ -126,7 +126,7 @@ static bool intel_get_gvt_attrs(struct attribute_group ***intel_vgpu_type_groups
+ 	return true;
+ }
+ 
+-static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
++static int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
+ {
+ 	int i, j;
+ 	struct intel_vgpu_type *type;
+@@ -144,7 +144,7 @@ static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
+ 		gvt_vgpu_type_groups[i] = group;
+ 	}
+ 
+-	return true;
++	return 0;
+ 
+ unwind:
+ 	for (j = 0; j < i; j++) {
+@@ -152,7 +152,7 @@ unwind:
+ 		kfree(group);
+ 	}
+ 
+-	return false;
++	return -ENOMEM;
+ }
+ 
+ static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
+@@ -360,7 +360,7 @@ int intel_gvt_init_device(struct drm_i915_private *i915)
+ 		goto out_clean_thread;
+ 
+ 	ret = intel_gvt_init_vgpu_type_groups(gvt);
+-	if (ret == false) {
++	if (ret) {
+ 		gvt_err("failed to init vgpu type groups: %d\n", ret);
+ 		goto out_clean_types;
+ 	}
+diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+index 7bb31fbee29da..fd8870edde0ed 100644
+--- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
++++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+@@ -554,7 +554,7 @@ static void ingenic_drm_plane_atomic_update(struct drm_plane *plane,
+ 		height = state->src_h >> 16;
+ 		cpp = state->fb->format->cpp[0];
+ 
+-		if (priv->soc_info->has_osd && plane->type == DRM_PLANE_TYPE_OVERLAY)
++		if (!priv->soc_info->has_osd || plane->type == DRM_PLANE_TYPE_OVERLAY)
+ 			hwdesc = &priv->dma_hwdescs->hwdesc_f0;
+ 		else
+ 			hwdesc = &priv->dma_hwdescs->hwdesc_f1;
+@@ -826,6 +826,7 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
+ 	const struct jz_soc_info *soc_info;
+ 	struct ingenic_drm *priv;
+ 	struct clk *parent_clk;
++	struct drm_plane *primary;
+ 	struct drm_bridge *bridge;
+ 	struct drm_panel *panel;
+ 	struct drm_encoder *encoder;
+@@ -940,9 +941,11 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
+ 	if (soc_info->has_osd)
+ 		priv->ipu_plane = drm_plane_from_index(drm, 0);
+ 
+-	drm_plane_helper_add(&priv->f1, &ingenic_drm_plane_helper_funcs);
++	primary = priv->soc_info->has_osd ? &priv->f1 : &priv->f0;
+ 
+-	ret = drm_universal_plane_init(drm, &priv->f1, 1,
++	drm_plane_helper_add(primary, &ingenic_drm_plane_helper_funcs);
++
++	ret = drm_universal_plane_init(drm, primary, 1,
+ 				       &ingenic_drm_primary_plane_funcs,
+ 				       priv->soc_info->formats_f1,
+ 				       priv->soc_info->num_formats_f1,
+@@ -954,7 +957,7 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
+ 
+ 	drm_crtc_helper_add(&priv->crtc, &ingenic_drm_crtc_helper_funcs);
+ 
+-	ret = drm_crtc_init_with_planes(drm, &priv->crtc, &priv->f1,
++	ret = drm_crtc_init_with_planes(drm, &priv->crtc, primary,
+ 					NULL, &ingenic_drm_crtc_funcs, NULL);
+ 	if (ret) {
+ 		dev_err(dev, "Failed to init CRTC: %i\n", ret);
+diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
+index 2314c81229920..b3fd3501c4127 100644
+--- a/drivers/gpu/drm/mcde/mcde_dsi.c
++++ b/drivers/gpu/drm/mcde/mcde_dsi.c
+@@ -760,7 +760,7 @@ static void mcde_dsi_start(struct mcde_dsi *d)
+ 		DSI_MCTL_MAIN_DATA_CTL_BTA_EN |
+ 		DSI_MCTL_MAIN_DATA_CTL_READ_EN |
+ 		DSI_MCTL_MAIN_DATA_CTL_REG_TE_EN;
+-	if (d->mdsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET)
++	if (!(d->mdsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET))
+ 		val |= DSI_MCTL_MAIN_DATA_CTL_HOST_EOT_GEN;
+ 	writel(val, d->regs + DSI_MCTL_MAIN_DATA_CTL);
+ 
+diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
+index 8ee55f9e29541..7fb358167f8d1 100644
+--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
++++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
+@@ -153,7 +153,7 @@ struct mtk_hdmi_conf {
+ struct mtk_hdmi {
+ 	struct drm_bridge bridge;
+ 	struct drm_bridge *next_bridge;
+-	struct drm_connector conn;
++	struct drm_connector *curr_conn;/* current connector (only valid when 'enabled') */
+ 	struct device *dev;
+ 	const struct mtk_hdmi_conf *conf;
+ 	struct phy *phy;
+@@ -186,11 +186,6 @@ static inline struct mtk_hdmi *hdmi_ctx_from_bridge(struct drm_bridge *b)
+ 	return container_of(b, struct mtk_hdmi, bridge);
+ }
+ 
+-static inline struct mtk_hdmi *hdmi_ctx_from_conn(struct drm_connector *c)
+-{
+-	return container_of(c, struct mtk_hdmi, conn);
+-}
+-
+ static u32 mtk_hdmi_read(struct mtk_hdmi *hdmi, u32 offset)
+ {
+ 	return readl(hdmi->regs + offset);
+@@ -974,7 +969,7 @@ static int mtk_hdmi_setup_avi_infoframe(struct mtk_hdmi *hdmi,
+ 	ssize_t err;
+ 
+ 	err = drm_hdmi_avi_infoframe_from_display_mode(&frame,
+-						       &hdmi->conn, mode);
++						       hdmi->curr_conn, mode);
+ 	if (err < 0) {
+ 		dev_err(hdmi->dev,
+ 			"Failed to get AVI infoframe from mode: %zd\n", err);
+@@ -1054,7 +1049,7 @@ static int mtk_hdmi_setup_vendor_specific_infoframe(struct mtk_hdmi *hdmi,
+ 	ssize_t err;
+ 
+ 	err = drm_hdmi_vendor_infoframe_from_display_mode(&frame,
+-							  &hdmi->conn, mode);
++							  hdmi->curr_conn, mode);
+ 	if (err) {
+ 		dev_err(hdmi->dev,
+ 			"Failed to get vendor infoframe from mode: %zd\n", err);
+@@ -1201,48 +1196,16 @@ mtk_hdmi_update_plugged_status(struct mtk_hdmi *hdmi)
+ 	       connector_status_connected : connector_status_disconnected;
+ }
+ 
+-static enum drm_connector_status hdmi_conn_detect(struct drm_connector *conn,
+-						  bool force)
++static enum drm_connector_status mtk_hdmi_detect(struct mtk_hdmi *hdmi)
+ {
+-	struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
+ 	return mtk_hdmi_update_plugged_status(hdmi);
+ }
+ 
+-static void hdmi_conn_destroy(struct drm_connector *conn)
+-{
+-	struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
+-
+-	mtk_cec_set_hpd_event(hdmi->cec_dev, NULL, NULL);
+-
+-	drm_connector_cleanup(conn);
+-}
+-
+-static int mtk_hdmi_conn_get_modes(struct drm_connector *conn)
+-{
+-	struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
+-	struct edid *edid;
+-	int ret;
+-
+-	if (!hdmi->ddc_adpt)
+-		return -ENODEV;
+-
+-	edid = drm_get_edid(conn, hdmi->ddc_adpt);
+-	if (!edid)
+-		return -ENODEV;
+-
+-	hdmi->dvi_mode = !drm_detect_monitor_audio(edid);
+-
+-	drm_connector_update_edid_property(conn, edid);
+-
+-	ret = drm_add_edid_modes(conn, edid);
+-	kfree(edid);
+-	return ret;
+-}
+-
+-static int mtk_hdmi_conn_mode_valid(struct drm_connector *conn,
+-				    struct drm_display_mode *mode)
++static int mtk_hdmi_bridge_mode_valid(struct drm_bridge *bridge,
++				      const struct drm_display_info *info,
++				      const struct drm_display_mode *mode)
+ {
+-	struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
++	struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
+ 	struct drm_bridge *next_bridge;
+ 
+ 	dev_dbg(hdmi->dev, "xres=%d, yres=%d, refresh=%d, intl=%d clock=%d\n",
+@@ -1267,74 +1230,57 @@ static int mtk_hdmi_conn_mode_valid(struct drm_connector *conn,
+ 	return drm_mode_validate_size(mode, 0x1fff, 0x1fff);
+ }
+ 
+-static struct drm_encoder *mtk_hdmi_conn_best_enc(struct drm_connector *conn)
+-{
+-	struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
+-
+-	return hdmi->bridge.encoder;
+-}
+-
+-static const struct drm_connector_funcs mtk_hdmi_connector_funcs = {
+-	.detect = hdmi_conn_detect,
+-	.fill_modes = drm_helper_probe_single_connector_modes,
+-	.destroy = hdmi_conn_destroy,
+-	.reset = drm_atomic_helper_connector_reset,
+-	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+-	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+-};
+-
+-static const struct drm_connector_helper_funcs
+-		mtk_hdmi_connector_helper_funcs = {
+-	.get_modes = mtk_hdmi_conn_get_modes,
+-	.mode_valid = mtk_hdmi_conn_mode_valid,
+-	.best_encoder = mtk_hdmi_conn_best_enc,
+-};
+-
+ static void mtk_hdmi_hpd_event(bool hpd, struct device *dev)
+ {
+ 	struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
+ 
+-	if (hdmi && hdmi->bridge.encoder && hdmi->bridge.encoder->dev)
++	if (hdmi && hdmi->bridge.encoder && hdmi->bridge.encoder->dev) {
++		static enum drm_connector_status status;
++
++		status = mtk_hdmi_detect(hdmi);
+ 		drm_helper_hpd_irq_event(hdmi->bridge.encoder->dev);
++		drm_bridge_hpd_notify(&hdmi->bridge, status);
++	}
+ }
+ 
+ /*
+  * Bridge callbacks
+  */
+ 
++static enum drm_connector_status mtk_hdmi_bridge_detect(struct drm_bridge *bridge)
++{
++	struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
++
++	return mtk_hdmi_detect(hdmi);
++}
++
++static struct edid *mtk_hdmi_bridge_get_edid(struct drm_bridge *bridge,
++					     struct drm_connector *connector)
++{
++	struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
++	struct edid *edid;
++
++	if (!hdmi->ddc_adpt)
++		return NULL;
++	edid = drm_get_edid(connector, hdmi->ddc_adpt);
++	if (!edid)
++		return NULL;
++	hdmi->dvi_mode = !drm_detect_monitor_audio(edid);
++	return edid;
++}
++
+ static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge,
+ 				  enum drm_bridge_attach_flags flags)
+ {
+ 	struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
+ 	int ret;
+ 
+-	if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+-		DRM_ERROR("Fix bridge driver to make connector optional!");
++	if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
++		DRM_ERROR("%s: The flag DRM_BRIDGE_ATTACH_NO_CONNECTOR must be supplied\n",
++			  __func__);
+ 		return -EINVAL;
+ 	}
+ 
+-	ret = drm_connector_init_with_ddc(bridge->encoder->dev, &hdmi->conn,
+-					  &mtk_hdmi_connector_funcs,
+-					  DRM_MODE_CONNECTOR_HDMIA,
+-					  hdmi->ddc_adpt);
+-	if (ret) {
+-		dev_err(hdmi->dev, "Failed to initialize connector: %d\n", ret);
+-		return ret;
+-	}
+-	drm_connector_helper_add(&hdmi->conn, &mtk_hdmi_connector_helper_funcs);
+-
+-	hdmi->conn.polled = DRM_CONNECTOR_POLL_HPD;
+-	hdmi->conn.interlace_allowed = true;
+-	hdmi->conn.doublescan_allowed = false;
+-
+-	ret = drm_connector_attach_encoder(&hdmi->conn,
+-						bridge->encoder);
+-	if (ret) {
+-		dev_err(hdmi->dev,
+-			"Failed to attach connector to encoder: %d\n", ret);
+-		return ret;
+-	}
+-
+ 	if (hdmi->next_bridge) {
+ 		ret = drm_bridge_attach(bridge->encoder, hdmi->next_bridge,
+ 					bridge, flags);
+@@ -1357,7 +1303,8 @@ static bool mtk_hdmi_bridge_mode_fixup(struct drm_bridge *bridge,
+ 	return true;
+ }
+ 
+-static void mtk_hdmi_bridge_disable(struct drm_bridge *bridge)
++static void mtk_hdmi_bridge_atomic_disable(struct drm_bridge *bridge,
++					   struct drm_bridge_state *old_bridge_state)
+ {
+ 	struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
+ 
+@@ -1368,10 +1315,13 @@ static void mtk_hdmi_bridge_disable(struct drm_bridge *bridge)
+ 	clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_HDMI_PIXEL]);
+ 	clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]);
+ 
++	hdmi->curr_conn = NULL;
++
+ 	hdmi->enabled = false;
+ }
+ 
+-static void mtk_hdmi_bridge_post_disable(struct drm_bridge *bridge)
++static void mtk_hdmi_bridge_atomic_post_disable(struct drm_bridge *bridge,
++						struct drm_bridge_state *old_state)
+ {
+ 	struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
+ 
+@@ -1406,7 +1356,8 @@ static void mtk_hdmi_bridge_mode_set(struct drm_bridge *bridge,
+ 	drm_mode_copy(&hdmi->mode, adjusted_mode);
+ }
+ 
+-static void mtk_hdmi_bridge_pre_enable(struct drm_bridge *bridge)
++static void mtk_hdmi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
++					      struct drm_bridge_state *old_state)
+ {
+ 	struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
+ 
+@@ -1426,10 +1377,16 @@ static void mtk_hdmi_send_infoframe(struct mtk_hdmi *hdmi,
+ 		mtk_hdmi_setup_vendor_specific_infoframe(hdmi, mode);
+ }
+ 
+-static void mtk_hdmi_bridge_enable(struct drm_bridge *bridge)
++static void mtk_hdmi_bridge_atomic_enable(struct drm_bridge *bridge,
++					  struct drm_bridge_state *old_state)
+ {
++	struct drm_atomic_state *state = old_state->base.state;
+ 	struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
+ 
++	/* Retrieve the connector through the atomic state. */
++	hdmi->curr_conn = drm_atomic_get_new_connector_for_encoder(state,
++								   bridge->encoder);
++
+ 	mtk_hdmi_output_set_display_mode(hdmi, &hdmi->mode);
+ 	clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]);
+ 	clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PIXEL]);
+@@ -1440,13 +1397,19 @@ static void mtk_hdmi_bridge_enable(struct drm_bridge *bridge)
+ }
+ 
+ static const struct drm_bridge_funcs mtk_hdmi_bridge_funcs = {
++	.mode_valid = mtk_hdmi_bridge_mode_valid,
++	.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
++	.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
++	.atomic_reset = drm_atomic_helper_bridge_reset,
+ 	.attach = mtk_hdmi_bridge_attach,
+ 	.mode_fixup = mtk_hdmi_bridge_mode_fixup,
+-	.disable = mtk_hdmi_bridge_disable,
+-	.post_disable = mtk_hdmi_bridge_post_disable,
++	.atomic_disable = mtk_hdmi_bridge_atomic_disable,
++	.atomic_post_disable = mtk_hdmi_bridge_atomic_post_disable,
+ 	.mode_set = mtk_hdmi_bridge_mode_set,
+-	.pre_enable = mtk_hdmi_bridge_pre_enable,
+-	.enable = mtk_hdmi_bridge_enable,
++	.atomic_pre_enable = mtk_hdmi_bridge_atomic_pre_enable,
++	.atomic_enable = mtk_hdmi_bridge_atomic_enable,
++	.detect = mtk_hdmi_bridge_detect,
++	.get_edid = mtk_hdmi_bridge_get_edid,
+ };
+ 
+ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
+@@ -1662,8 +1625,10 @@ static int mtk_hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf,
+ {
+ 	struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
+ 
+-	memcpy(buf, hdmi->conn.eld, min(sizeof(hdmi->conn.eld), len));
+-
++	if (hdmi->enabled)
++		memcpy(buf, hdmi->curr_conn->eld, min(sizeof(hdmi->curr_conn->eld), len));
++	else
++		memset(buf, 0, len);
+ 	return 0;
+ }
+ 
+@@ -1755,6 +1720,9 @@ static int mtk_drm_hdmi_probe(struct platform_device *pdev)
+ 
+ 	hdmi->bridge.funcs = &mtk_hdmi_bridge_funcs;
+ 	hdmi->bridge.of_node = pdev->dev.of_node;
++	hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID
++			 | DRM_BRIDGE_OP_HPD;
++	hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
+ 	drm_bridge_add(&hdmi->bridge);
+ 
+ 	ret = mtk_hdmi_clk_enable_audio(hdmi);
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+index 189f3533525c5..e4444452759c9 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+@@ -22,7 +22,7 @@
+ 	(VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED4))
+ 
+ #define VIG_SM8250_MASK \
+-	(VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3LITE))
++	(VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3LITE))
+ 
+ #define DMA_SDM845_MASK \
+ 	(BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\
+diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
+index 85ad0babc3260..d611cc8e54a45 100644
+--- a/drivers/gpu/drm/msm/msm_debugfs.c
++++ b/drivers/gpu/drm/msm/msm_debugfs.c
+@@ -111,23 +111,15 @@ static const struct file_operations msm_gpu_fops = {
+ static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
+ {
+ 	struct msm_drm_private *priv = dev->dev_private;
+-	struct msm_gpu *gpu = priv->gpu;
+ 	int ret;
+ 
+-	ret = mutex_lock_interruptible(&priv->mm_lock);
++	ret = mutex_lock_interruptible(&priv->obj_lock);
+ 	if (ret)
+ 		return ret;
+ 
+-	if (gpu) {
+-		seq_printf(m, "Active Objects (%s):\n", gpu->name);
+-		msm_gem_describe_objects(&gpu->active_list, m);
+-	}
+-
+-	seq_printf(m, "Inactive Objects:\n");
+-	msm_gem_describe_objects(&priv->inactive_dontneed, m);
+-	msm_gem_describe_objects(&priv->inactive_willneed, m);
++	msm_gem_describe_objects(&priv->objects, m);
+ 
+-	mutex_unlock(&priv->mm_lock);
++	mutex_unlock(&priv->obj_lock);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
+index 196907689c82e..18ea1c66de718 100644
+--- a/drivers/gpu/drm/msm/msm_drv.c
++++ b/drivers/gpu/drm/msm/msm_drv.c
+@@ -446,6 +446,9 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
+ 
+ 	priv->wq = alloc_ordered_workqueue("msm", 0);
+ 
++	INIT_LIST_HEAD(&priv->objects);
++	mutex_init(&priv->obj_lock);
++
+ 	INIT_LIST_HEAD(&priv->inactive_willneed);
+ 	INIT_LIST_HEAD(&priv->inactive_dontneed);
+ 	mutex_init(&priv->mm_lock);
+diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
+index 591c47a654e83..6b58e49754cbc 100644
+--- a/drivers/gpu/drm/msm/msm_drv.h
++++ b/drivers/gpu/drm/msm/msm_drv.h
+@@ -174,7 +174,14 @@ struct msm_drm_private {
+ 	struct msm_rd_state *hangrd;   /* debugfs to dump hanging submits */
+ 	struct msm_perf_state *perf;
+ 
+-	/*
++	/**
++	 * List of all GEM objects (mainly for debugfs, protected by obj_lock
++	 * (acquire before per GEM object lock)
++	 */
++	struct list_head objects;
++	struct mutex obj_lock;
++
++	/**
+ 	 * Lists of inactive GEM objects.  Every bo is either in one of the
+ 	 * inactive lists (depending on whether or not it is shrinkable) or
+ 	 * gpu->active_list (for the gpu it is active on[1])
+diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
+index f091c1e164fa8..aeba3eb8ce468 100644
+--- a/drivers/gpu/drm/msm/msm_gem.c
++++ b/drivers/gpu/drm/msm/msm_gem.c
+@@ -951,7 +951,7 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
+ 	size_t size = 0;
+ 
+ 	seq_puts(m, "   flags       id ref  offset   kaddr            size     madv      name\n");
+-	list_for_each_entry(msm_obj, list, mm_list) {
++	list_for_each_entry(msm_obj, list, node) {
+ 		struct drm_gem_object *obj = &msm_obj->base;
+ 		seq_puts(m, "   ");
+ 		msm_gem_describe(obj, m);
+@@ -970,6 +970,10 @@ void msm_gem_free_object(struct drm_gem_object *obj)
+ 	struct drm_device *dev = obj->dev;
+ 	struct msm_drm_private *priv = dev->dev_private;
+ 
++	mutex_lock(&priv->obj_lock);
++	list_del(&msm_obj->node);
++	mutex_unlock(&priv->obj_lock);
++
+ 	mutex_lock(&priv->mm_lock);
+ 	list_del(&msm_obj->mm_list);
+ 	mutex_unlock(&priv->mm_lock);
+@@ -1157,6 +1161,10 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
+ 	list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
+ 	mutex_unlock(&priv->mm_lock);
+ 
++	mutex_lock(&priv->obj_lock);
++	list_add_tail(&msm_obj->node, &priv->objects);
++	mutex_unlock(&priv->obj_lock);
++
+ 	return obj;
+ 
+ fail:
+@@ -1227,6 +1235,10 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
+ 	list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
+ 	mutex_unlock(&priv->mm_lock);
+ 
++	mutex_lock(&priv->obj_lock);
++	list_add_tail(&msm_obj->node, &priv->objects);
++	mutex_unlock(&priv->obj_lock);
++
+ 	return obj;
+ 
+ fail:
+diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
+index b3a0a880cbabe..99d4c0e9465ec 100644
+--- a/drivers/gpu/drm/msm/msm_gem.h
++++ b/drivers/gpu/drm/msm/msm_gem.h
+@@ -55,8 +55,16 @@ struct msm_gem_object {
+ 	 */
+ 	uint8_t vmap_count;
+ 
+-	/* And object is either:
+-	 *  inactive - on priv->inactive_list
++	/**
++	 * Node in list of all objects (mainly for debugfs, protected by
++	 * priv->obj_lock
++	 */
++	struct list_head node;
++
++	/**
++	 * An object is either:
++	 *  inactive - on priv->inactive_dontneed or priv->inactive_willneed
++	 *     (depending on purgability status)
+ 	 *  active   - on one one of the gpu's active_list..  well, at
+ 	 *     least for now we don't have (I don't think) hw sync between
+ 	 *     2d and 3d one devices which have both, meaning we need to
+diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
+index b31d750c425ac..5f1722b040f46 100644
+--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
++++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
+@@ -4327,7 +4327,8 @@ static int omap_dsi_register_te_irq(struct dsi_data *dsi,
+ 	irq_set_status_flags(te_irq, IRQ_NOAUTOEN);
+ 
+ 	err = request_threaded_irq(te_irq, NULL, omap_dsi_te_irq_handler,
+-				   IRQF_TRIGGER_RISING, "TE", dsi);
++				   IRQF_TRIGGER_RISING | IRQF_ONESHOT,
++				   "TE", dsi);
+ 	if (err) {
+ 		dev_err(dsi->dev, "request irq failed with %d\n", err);
+ 		gpiod_put(dsi->te_gpio);
+diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35510.c b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
+index b9a0e56f33e24..ef70140c5b09d 100644
+--- a/drivers/gpu/drm/panel/panel-novatek-nt35510.c
++++ b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
+@@ -898,8 +898,7 @@ static int nt35510_probe(struct mipi_dsi_device *dsi)
+ 	 */
+ 	dsi->hs_rate = 349440000;
+ 	dsi->lp_rate = 9600000;
+-	dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS |
+-		MIPI_DSI_MODE_EOT_PACKET;
++	dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS;
+ 
+ 	/*
+ 	 * Every new incarnation of this display must have a unique
+diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
+index 4aac0d1573dd0..70560cac53a99 100644
+--- a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
++++ b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
+@@ -184,9 +184,7 @@ static int s6d16d0_probe(struct mipi_dsi_device *dsi)
+ 	 * As we only send commands we do not need to be continuously
+ 	 * clocked.
+ 	 */
+-	dsi->mode_flags =
+-		MIPI_DSI_CLOCK_NON_CONTINUOUS |
+-		MIPI_DSI_MODE_EOT_PACKET;
++	dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS;
+ 
+ 	s6->supply = devm_regulator_get(dev, "vdd1");
+ 	if (IS_ERR(s6->supply))
+diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c
+index eec74c10dddaf..9c3563c61e8cc 100644
+--- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c
++++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c
+@@ -97,7 +97,6 @@ static int s6e63m0_dsi_probe(struct mipi_dsi_device *dsi)
+ 	dsi->hs_rate = 349440000;
+ 	dsi->lp_rate = 9600000;
+ 	dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
+-		MIPI_DSI_MODE_EOT_PACKET |
+ 		MIPI_DSI_MODE_VIDEO_BURST;
+ 
+ 	ret = s6e63m0_probe(dev, s6e63m0_dsi_dcs_read, s6e63m0_dsi_dcs_write,
+diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
+index 4e2dad314c795..e8b1a0e873eaa 100644
+--- a/drivers/gpu/drm/panel/panel-simple.c
++++ b/drivers/gpu/drm/panel/panel-simple.c
+@@ -406,7 +406,7 @@ static int panel_simple_prepare(struct drm_panel *panel)
+ 		if (IS_ERR(p->hpd_gpio)) {
+ 			err = panel_simple_get_hpd_gpio(panel->dev, p, false);
+ 			if (err)
+-				return err;
++				goto error;
+ 		}
+ 
+ 		err = readx_poll_timeout(gpiod_get_value_cansleep, p->hpd_gpio,
+@@ -418,13 +418,20 @@ static int panel_simple_prepare(struct drm_panel *panel)
+ 		if (err) {
+ 			dev_err(panel->dev,
+ 				"error waiting for hpd GPIO: %d\n", err);
+-			return err;
++			goto error;
+ 		}
+ 	}
+ 
+ 	p->prepared_time = ktime_get();
+ 
+ 	return 0;
++
++error:
++	gpiod_set_value_cansleep(p->enable_gpio, 0);
++	regulator_disable(p->supply);
++	p->unprepared_time = ktime_get();
++
++	return err;
+ }
+ 
+ static int panel_simple_enable(struct drm_panel *panel)
+diff --git a/drivers/gpu/drm/panel/panel-sony-acx424akp.c b/drivers/gpu/drm/panel/panel-sony-acx424akp.c
+index 065efae213f5b..95659a4d15e97 100644
+--- a/drivers/gpu/drm/panel/panel-sony-acx424akp.c
++++ b/drivers/gpu/drm/panel/panel-sony-acx424akp.c
+@@ -449,8 +449,7 @@ static int acx424akp_probe(struct mipi_dsi_device *dsi)
+ 			MIPI_DSI_MODE_VIDEO_BURST;
+ 	else
+ 		dsi->mode_flags =
+-			MIPI_DSI_CLOCK_NON_CONTINUOUS |
+-			MIPI_DSI_MODE_EOT_PACKET;
++			MIPI_DSI_CLOCK_NON_CONTINUOUS;
+ 
+ 	acx->supply = devm_regulator_get(dev, "vddi");
+ 	if (IS_ERR(acx->supply))
+diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
+index 7c1b3481b7850..21e552d1ac71a 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
++++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
+@@ -488,8 +488,14 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
+ 		}
+ 		bo->base.pages = pages;
+ 		bo->base.pages_use_count = 1;
+-	} else
++	} else {
+ 		pages = bo->base.pages;
++		if (pages[page_offset]) {
++			/* Pages are already mapped, bail out. */
++			mutex_unlock(&bo->base.pages_lock);
++			goto out;
++		}
++	}
+ 
+ 	mapping = bo->base.base.filp->f_mapping;
+ 	mapping_set_unevictable(mapping);
+@@ -522,6 +528,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
+ 
+ 	dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
+ 
++out:
+ 	panfrost_gem_mapping_put(bomapping);
+ 
+ 	return 0;
+@@ -593,6 +600,8 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
+ 		access_type = (fault_status >> 8) & 0x3;
+ 		source_id = (fault_status >> 16);
+ 
++		mmu_write(pfdev, MMU_INT_CLEAR, mask);
++
+ 		/* Page fault only */
+ 		ret = -1;
+ 		if ((status & mask) == BIT(i) && (exception_type & 0xF8) == 0xC0)
+@@ -616,8 +625,6 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
+ 				access_type, access_type_name(pfdev, fault_status),
+ 				source_id);
+ 
+-		mmu_write(pfdev, MMU_INT_CLEAR, mask);
+-
+ 		status &= ~mask;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
+index 54e3c3a974407..741cc983daf1c 100644
+--- a/drivers/gpu/drm/qxl/qxl_cmd.c
++++ b/drivers/gpu/drm/qxl/qxl_cmd.c
+@@ -268,7 +268,7 @@ int qxl_alloc_bo_reserved(struct qxl_device *qdev,
+ 	int ret;
+ 
+ 	ret = qxl_bo_create(qdev, size, false /* not kernel - device */,
+-			    false, QXL_GEM_DOMAIN_VRAM, NULL, &bo);
++			    false, QXL_GEM_DOMAIN_VRAM, 0, NULL, &bo);
+ 	if (ret) {
+ 		DRM_ERROR("failed to allocate VRAM BO\n");
+ 		return ret;
+diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
+index 56e0c6c625e9a..3f432ec8e771c 100644
+--- a/drivers/gpu/drm/qxl/qxl_display.c
++++ b/drivers/gpu/drm/qxl/qxl_display.c
+@@ -798,8 +798,8 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane,
+ 				qdev->dumb_shadow_bo = NULL;
+ 			}
+ 			qxl_bo_create(qdev, surf.height * surf.stride,
+-				      true, true, QXL_GEM_DOMAIN_SURFACE, &surf,
+-				      &qdev->dumb_shadow_bo);
++				      true, true, QXL_GEM_DOMAIN_SURFACE, 0,
++				      &surf, &qdev->dumb_shadow_bo);
+ 		}
+ 		if (user_bo->shadow != qdev->dumb_shadow_bo) {
+ 			if (user_bo->shadow) {
+diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
+index 1b09bbe980551..1864467f10639 100644
+--- a/drivers/gpu/drm/qxl/qxl_drv.c
++++ b/drivers/gpu/drm/qxl/qxl_drv.c
+@@ -144,8 +144,6 @@ static void qxl_drm_release(struct drm_device *dev)
+ 	 * reordering qxl_modeset_fini() + qxl_device_fini() calls is
+ 	 * non-trivial though.
+ 	 */
+-	if (!dev->registered)
+-		return;
+ 	qxl_modeset_fini(qdev);
+ 	qxl_device_fini(qdev);
+ }
+diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
+index 48e096285b4c6..a08da0bd9098b 100644
+--- a/drivers/gpu/drm/qxl/qxl_gem.c
++++ b/drivers/gpu/drm/qxl/qxl_gem.c
+@@ -55,7 +55,7 @@ int qxl_gem_object_create(struct qxl_device *qdev, int size,
+ 	/* At least align on page size */
+ 	if (alignment < PAGE_SIZE)
+ 		alignment = PAGE_SIZE;
+-	r = qxl_bo_create(qdev, size, kernel, false, initial_domain, surf, &qbo);
++	r = qxl_bo_create(qdev, size, kernel, false, initial_domain, 0, surf, &qbo);
+ 	if (r) {
+ 		if (r != -ERESTARTSYS)
+ 			DRM_ERROR(
+diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
+index ceebc5881f68d..a5806667697aa 100644
+--- a/drivers/gpu/drm/qxl/qxl_object.c
++++ b/drivers/gpu/drm/qxl/qxl_object.c
+@@ -103,8 +103,8 @@ static const struct drm_gem_object_funcs qxl_object_funcs = {
+ 	.print_info = drm_gem_ttm_print_info,
+ };
+ 
+-int qxl_bo_create(struct qxl_device *qdev,
+-		  unsigned long size, bool kernel, bool pinned, u32 domain,
++int qxl_bo_create(struct qxl_device *qdev, unsigned long size,
++		  bool kernel, bool pinned, u32 domain, u32 priority,
+ 		  struct qxl_surface *surf,
+ 		  struct qxl_bo **bo_ptr)
+ {
+@@ -137,6 +137,7 @@ int qxl_bo_create(struct qxl_device *qdev,
+ 
+ 	qxl_ttm_placement_from_domain(bo, domain);
+ 
++	bo->tbo.priority = priority;
+ 	r = ttm_bo_init_reserved(&qdev->mman.bdev, &bo->tbo, size, type,
+ 				 &bo->placement, 0, &ctx, size,
+ 				 NULL, NULL, &qxl_ttm_bo_destroy);
+diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
+index e60a8f88e2262..dc1659e717f14 100644
+--- a/drivers/gpu/drm/qxl/qxl_object.h
++++ b/drivers/gpu/drm/qxl/qxl_object.h
+@@ -61,6 +61,7 @@ static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
+ extern int qxl_bo_create(struct qxl_device *qdev,
+ 			 unsigned long size,
+ 			 bool kernel, bool pinned, u32 domain,
++			 u32 priority,
+ 			 struct qxl_surface *surf,
+ 			 struct qxl_bo **bo_ptr);
+ extern int qxl_bo_kmap(struct qxl_bo *bo, struct dma_buf_map *map);
+diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
+index b372455e27298..801ce77b1dac8 100644
+--- a/drivers/gpu/drm/qxl/qxl_release.c
++++ b/drivers/gpu/drm/qxl/qxl_release.c
+@@ -199,11 +199,12 @@ qxl_release_free(struct qxl_device *qdev,
+ }
+ 
+ static int qxl_release_bo_alloc(struct qxl_device *qdev,
+-				struct qxl_bo **bo)
++				struct qxl_bo **bo,
++				u32 priority)
+ {
+ 	/* pin releases bo's they are too messy to evict */
+ 	return qxl_bo_create(qdev, PAGE_SIZE, false, true,
+-			     QXL_GEM_DOMAIN_VRAM, NULL, bo);
++			     QXL_GEM_DOMAIN_VRAM, priority, NULL, bo);
+ }
+ 
+ int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
+@@ -326,13 +327,18 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
+ 	int ret = 0;
+ 	union qxl_release_info *info;
+ 	int cur_idx;
++	u32 priority;
+ 
+-	if (type == QXL_RELEASE_DRAWABLE)
++	if (type == QXL_RELEASE_DRAWABLE) {
+ 		cur_idx = 0;
+-	else if (type == QXL_RELEASE_SURFACE_CMD)
++		priority = 0;
++	} else if (type == QXL_RELEASE_SURFACE_CMD) {
+ 		cur_idx = 1;
+-	else if (type == QXL_RELEASE_CURSOR_CMD)
++		priority = 1;
++	} else if (type == QXL_RELEASE_CURSOR_CMD) {
+ 		cur_idx = 2;
++		priority = 1;
++	}
+ 	else {
+ 		DRM_ERROR("got illegal type: %d\n", type);
+ 		return -EINVAL;
+@@ -352,7 +358,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
+ 		qdev->current_release_bo[cur_idx] = NULL;
+ 	}
+ 	if (!qdev->current_release_bo[cur_idx]) {
+-		ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]);
++		ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx], priority);
+ 		if (ret) {
+ 			mutex_unlock(&qdev->release_mutex);
+ 			if (free_bo) {
+diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
+index 2c32186c4acd9..4e4c937c36c62 100644
+--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
++++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
+@@ -242,6 +242,9 @@ radeon_dp_mst_detect(struct drm_connector *connector,
+ 		to_radeon_connector(connector);
+ 	struct radeon_connector *master = radeon_connector->mst_port;
+ 
++	if (drm_connector_is_unregistered(connector))
++		return connector_status_disconnected;
++
+ 	return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr,
+ 				      radeon_connector->port);
+ }
+diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
+index 2479d6ab7a368..58876bb4ef2a1 100644
+--- a/drivers/gpu/drm/radeon/radeon_kms.c
++++ b/drivers/gpu/drm/radeon/radeon_kms.c
+@@ -518,6 +518,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+ 			*value = rdev->config.si.backend_enable_mask;
+ 		} else {
+ 			DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n");
++			return -EINVAL;
+ 		}
+ 		break;
+ 	case RADEON_INFO_MAX_SCLK:
+diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
+index 7812094f93d6b..6f3b523e16e8c 100644
+--- a/drivers/gpu/drm/stm/ltdc.c
++++ b/drivers/gpu/drm/stm/ltdc.c
+@@ -525,13 +525,42 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
+ {
+ 	struct ltdc_device *ldev = crtc_to_ltdc(crtc);
+ 	struct drm_device *ddev = crtc->dev;
++	struct drm_connector_list_iter iter;
++	struct drm_connector *connector = NULL;
++	struct drm_encoder *encoder = NULL;
++	struct drm_bridge *bridge = NULL;
+ 	struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ 	struct videomode vm;
+ 	u32 hsync, vsync, accum_hbp, accum_vbp, accum_act_w, accum_act_h;
+ 	u32 total_width, total_height;
++	u32 bus_flags = 0;
+ 	u32 val;
+ 	int ret;
+ 
++	/* get encoder from crtc */
++	drm_for_each_encoder(encoder, ddev)
++		if (encoder->crtc == crtc)
++			break;
++
++	if (encoder) {
++		/* get bridge from encoder */
++		list_for_each_entry(bridge, &encoder->bridge_chain, chain_node)
++			if (bridge->encoder == encoder)
++				break;
++
++		/* Get the connector from encoder */
++		drm_connector_list_iter_begin(ddev, &iter);
++		drm_for_each_connector_iter(connector, &iter)
++			if (connector->encoder == encoder)
++				break;
++		drm_connector_list_iter_end(&iter);
++	}
++
++	if (bridge && bridge->timings)
++		bus_flags = bridge->timings->input_bus_flags;
++	else if (connector)
++		bus_flags = connector->display_info.bus_flags;
++
+ 	if (!pm_runtime_active(ddev->dev)) {
+ 		ret = pm_runtime_get_sync(ddev->dev);
+ 		if (ret) {
+@@ -567,10 +596,10 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
+ 	if (vm.flags & DISPLAY_FLAGS_VSYNC_HIGH)
+ 		val |= GCR_VSPOL;
+ 
+-	if (vm.flags & DISPLAY_FLAGS_DE_LOW)
++	if (bus_flags & DRM_BUS_FLAG_DE_LOW)
+ 		val |= GCR_DEPOL;
+ 
+-	if (vm.flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
++	if (bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
+ 		val |= GCR_PCPOL;
+ 
+ 	reg_update_bits(ldev->regs, LTDC_GCR,
+diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+index 30213708fc990..d99afd19ca083 100644
+--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
++++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+@@ -515,6 +515,15 @@ static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown)
+ 
+ 	drm_crtc_vblank_off(crtc);
+ 
++	spin_lock_irq(&crtc->dev->event_lock);
++
++	if (crtc->state->event) {
++		drm_crtc_send_vblank_event(crtc, crtc->state->event);
++		crtc->state->event = NULL;
++	}
++
++	spin_unlock_irq(&crtc->dev->event_lock);
++
+ 	tilcdc_crtc_disable_irqs(dev);
+ 
+ 	pm_runtime_put_sync(dev->dev);
+diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c
+index 99158ee67d02b..59d1fb017da01 100644
+--- a/drivers/gpu/drm/xlnx/zynqmp_dp.c
++++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c
+@@ -866,7 +866,7 @@ static int zynqmp_dp_train(struct zynqmp_dp *dp)
+ 		return ret;
+ 
+ 	zynqmp_dp_write(dp, ZYNQMP_DP_SCRAMBLING_DISABLE, 1);
+-	memset(dp->train_set, 0, 4);
++	memset(dp->train_set, 0, sizeof(dp->train_set));
+ 	ret = zynqmp_dp_link_train_cr(dp);
+ 	if (ret)
+ 		return ret;
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 67fd8a2f5aba3..ba338973e968d 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -946,6 +946,7 @@
+ #define USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S	0x8003
+ 
+ #define USB_VENDOR_ID_PLANTRONICS	0x047f
++#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES	0xc056
+ 
+ #define USB_VENDOR_ID_PANASONIC		0x04da
+ #define USB_DEVICE_ID_PANABOARD_UBT780	0x1044
+diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
+index c6c8e20f3e8d5..0ff03fed97709 100644
+--- a/drivers/hid/hid-lenovo.c
++++ b/drivers/hid/hid-lenovo.c
+@@ -33,6 +33,9 @@
+ 
+ #include "hid-ids.h"
+ 
++/* Userspace expects F20 for mic-mute KEY_MICMUTE does not work */
++#define LENOVO_KEY_MICMUTE KEY_F20
++
+ struct lenovo_drvdata {
+ 	u8 led_report[3]; /* Must be first for proper alignment */
+ 	int led_state;
+@@ -62,8 +65,8 @@ struct lenovo_drvdata {
+ #define TP10UBKBD_LED_OFF		1
+ #define TP10UBKBD_LED_ON		2
+ 
+-static void lenovo_led_set_tp10ubkbd(struct hid_device *hdev, u8 led_code,
+-				     enum led_brightness value)
++static int lenovo_led_set_tp10ubkbd(struct hid_device *hdev, u8 led_code,
++				    enum led_brightness value)
+ {
+ 	struct lenovo_drvdata *data = hid_get_drvdata(hdev);
+ 	int ret;
+@@ -75,10 +78,18 @@ static void lenovo_led_set_tp10ubkbd(struct hid_device *hdev, u8 led_code,
+ 	data->led_report[2] = value ? TP10UBKBD_LED_ON : TP10UBKBD_LED_OFF;
+ 	ret = hid_hw_raw_request(hdev, data->led_report[0], data->led_report, 3,
+ 				 HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
+-	if (ret)
+-		hid_err(hdev, "Set LED output report error: %d\n", ret);
++	if (ret != 3) {
++		if (ret != -ENODEV)
++			hid_err(hdev, "Set LED output report error: %d\n", ret);
++
++		ret = ret < 0 ? ret : -EIO;
++	} else {
++		ret = 0;
++	}
+ 
+ 	mutex_unlock(&data->led_report_mutex);
++
++	return ret;
+ }
+ 
+ static void lenovo_tp10ubkbd_sync_fn_lock(struct work_struct *work)
+@@ -126,7 +137,7 @@ static int lenovo_input_mapping_tpkbd(struct hid_device *hdev,
+ 	if (usage->hid == (HID_UP_BUTTON | 0x0010)) {
+ 		/* This sub-device contains trackpoint, mark it */
+ 		hid_set_drvdata(hdev, (void *)1);
+-		map_key_clear(KEY_MICMUTE);
++		map_key_clear(LENOVO_KEY_MICMUTE);
+ 		return 1;
+ 	}
+ 	return 0;
+@@ -141,7 +152,7 @@ static int lenovo_input_mapping_cptkbd(struct hid_device *hdev,
+ 	    (usage->hid & HID_USAGE_PAGE) == HID_UP_LNVENDOR) {
+ 		switch (usage->hid & HID_USAGE) {
+ 		case 0x00f1: /* Fn-F4: Mic mute */
+-			map_key_clear(KEY_MICMUTE);
++			map_key_clear(LENOVO_KEY_MICMUTE);
+ 			return 1;
+ 		case 0x00f2: /* Fn-F5: Brightness down */
+ 			map_key_clear(KEY_BRIGHTNESSDOWN);
+@@ -231,7 +242,7 @@ static int lenovo_input_mapping_tp10_ultrabook_kbd(struct hid_device *hdev,
+ 			map_key_clear(KEY_FN_ESC);
+ 			return 1;
+ 		case 9: /* Fn-F4: Mic mute */
+-			map_key_clear(KEY_MICMUTE);
++			map_key_clear(LENOVO_KEY_MICMUTE);
+ 			return 1;
+ 		case 10: /* Fn-F7: Control panel */
+ 			map_key_clear(KEY_CONFIG);
+@@ -349,7 +360,7 @@ static ssize_t attr_fn_lock_store(struct device *dev,
+ {
+ 	struct hid_device *hdev = to_hid_device(dev);
+ 	struct lenovo_drvdata *data = hid_get_drvdata(hdev);
+-	int value;
++	int value, ret;
+ 
+ 	if (kstrtoint(buf, 10, &value))
+ 		return -EINVAL;
+@@ -364,7 +375,9 @@ static ssize_t attr_fn_lock_store(struct device *dev,
+ 		lenovo_features_set_cptkbd(hdev);
+ 		break;
+ 	case USB_DEVICE_ID_LENOVO_TP10UBKBD:
+-		lenovo_led_set_tp10ubkbd(hdev, TP10UBKBD_FN_LOCK_LED, value);
++		ret = lenovo_led_set_tp10ubkbd(hdev, TP10UBKBD_FN_LOCK_LED, value);
++		if (ret)
++			return ret;
+ 		break;
+ 	}
+ 
+@@ -498,6 +511,9 @@ static int lenovo_event_cptkbd(struct hid_device *hdev,
+ static int lenovo_event(struct hid_device *hdev, struct hid_field *field,
+ 		struct hid_usage *usage, __s32 value)
+ {
++	if (!hid_get_drvdata(hdev))
++		return 0;
++
+ 	switch (hdev->product) {
+ 	case USB_DEVICE_ID_LENOVO_CUSBKBD:
+ 	case USB_DEVICE_ID_LENOVO_CBTKBD:
+@@ -777,7 +793,7 @@ static enum led_brightness lenovo_led_brightness_get(
+ 				: LED_OFF;
+ }
+ 
+-static void lenovo_led_brightness_set(struct led_classdev *led_cdev,
++static int lenovo_led_brightness_set(struct led_classdev *led_cdev,
+ 			enum led_brightness value)
+ {
+ 	struct device *dev = led_cdev->dev->parent;
+@@ -785,6 +801,7 @@ static void lenovo_led_brightness_set(struct led_classdev *led_cdev,
+ 	struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev);
+ 	u8 tp10ubkbd_led[] = { TP10UBKBD_MUTE_LED, TP10UBKBD_MICMUTE_LED };
+ 	int led_nr = 0;
++	int ret = 0;
+ 
+ 	if (led_cdev == &data_pointer->led_micmute)
+ 		led_nr = 1;
+@@ -799,9 +816,11 @@ static void lenovo_led_brightness_set(struct led_classdev *led_cdev,
+ 		lenovo_led_set_tpkbd(hdev);
+ 		break;
+ 	case USB_DEVICE_ID_LENOVO_TP10UBKBD:
+-		lenovo_led_set_tp10ubkbd(hdev, tp10ubkbd_led[led_nr], value);
++		ret = lenovo_led_set_tp10ubkbd(hdev, tp10ubkbd_led[led_nr], value);
+ 		break;
+ 	}
++
++	return ret;
+ }
+ 
+ static int lenovo_register_leds(struct hid_device *hdev)
+@@ -822,7 +841,8 @@ static int lenovo_register_leds(struct hid_device *hdev)
+ 
+ 	data->led_mute.name = name_mute;
+ 	data->led_mute.brightness_get = lenovo_led_brightness_get;
+-	data->led_mute.brightness_set = lenovo_led_brightness_set;
++	data->led_mute.brightness_set_blocking = lenovo_led_brightness_set;
++	data->led_mute.flags = LED_HW_PLUGGABLE;
+ 	data->led_mute.dev = &hdev->dev;
+ 	ret = led_classdev_register(&hdev->dev, &data->led_mute);
+ 	if (ret < 0)
+@@ -830,7 +850,8 @@ static int lenovo_register_leds(struct hid_device *hdev)
+ 
+ 	data->led_micmute.name = name_micm;
+ 	data->led_micmute.brightness_get = lenovo_led_brightness_get;
+-	data->led_micmute.brightness_set = lenovo_led_brightness_set;
++	data->led_micmute.brightness_set_blocking = lenovo_led_brightness_set;
++	data->led_micmute.flags = LED_HW_PLUGGABLE;
+ 	data->led_micmute.dev = &hdev->dev;
+ 	ret = led_classdev_register(&hdev->dev, &data->led_micmute);
+ 	if (ret < 0) {
+diff --git a/drivers/hid/hid-plantronics.c b/drivers/hid/hid-plantronics.c
+index 85b685efc12f3..e81b7cec2d124 100644
+--- a/drivers/hid/hid-plantronics.c
++++ b/drivers/hid/hid-plantronics.c
+@@ -13,6 +13,7 @@
+ 
+ #include <linux/hid.h>
+ #include <linux/module.h>
++#include <linux/jiffies.h>
+ 
+ #define PLT_HID_1_0_PAGE	0xffa00000
+ #define PLT_HID_2_0_PAGE	0xffa20000
+@@ -36,6 +37,16 @@
+ #define PLT_ALLOW_CONSUMER (field->application == HID_CP_CONSUMERCONTROL && \
+ 			    (usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER)
+ 
++#define PLT_QUIRK_DOUBLE_VOLUME_KEYS BIT(0)
++
++#define PLT_DOUBLE_KEY_TIMEOUT 5 /* ms */
++
++struct plt_drv_data {
++	unsigned long device_type;
++	unsigned long last_volume_key_ts;
++	u32 quirks;
++};
++
+ static int plantronics_input_mapping(struct hid_device *hdev,
+ 				     struct hid_input *hi,
+ 				     struct hid_field *field,
+@@ -43,7 +54,8 @@ static int plantronics_input_mapping(struct hid_device *hdev,
+ 				     unsigned long **bit, int *max)
+ {
+ 	unsigned short mapped_key;
+-	unsigned long plt_type = (unsigned long)hid_get_drvdata(hdev);
++	struct plt_drv_data *drv_data = hid_get_drvdata(hdev);
++	unsigned long plt_type = drv_data->device_type;
+ 
+ 	/* special case for PTT products */
+ 	if (field->application == HID_GD_JOYSTICK)
+@@ -105,6 +117,30 @@ mapped:
+ 	return 1;
+ }
+ 
++static int plantronics_event(struct hid_device *hdev, struct hid_field *field,
++			     struct hid_usage *usage, __s32 value)
++{
++	struct plt_drv_data *drv_data = hid_get_drvdata(hdev);
++
++	if (drv_data->quirks & PLT_QUIRK_DOUBLE_VOLUME_KEYS) {
++		unsigned long prev_ts, cur_ts;
++
++		/* Usages are filtered in plantronics_usages. */
++
++		if (!value) /* Handle key presses only. */
++			return 0;
++
++		prev_ts = drv_data->last_volume_key_ts;
++		cur_ts = jiffies;
++		if (jiffies_to_msecs(cur_ts - prev_ts) <= PLT_DOUBLE_KEY_TIMEOUT)
++			return 1; /* Ignore the repeated key. */
++
++		drv_data->last_volume_key_ts = cur_ts;
++	}
++
++	return 0;
++}
++
+ static unsigned long plantronics_device_type(struct hid_device *hdev)
+ {
+ 	unsigned i, col_page;
+@@ -133,15 +169,24 @@ exit:
+ static int plantronics_probe(struct hid_device *hdev,
+ 			     const struct hid_device_id *id)
+ {
++	struct plt_drv_data *drv_data;
+ 	int ret;
+ 
++	drv_data = devm_kzalloc(&hdev->dev, sizeof(*drv_data), GFP_KERNEL);
++	if (!drv_data)
++		return -ENOMEM;
++
+ 	ret = hid_parse(hdev);
+ 	if (ret) {
+ 		hid_err(hdev, "parse failed\n");
+ 		goto err;
+ 	}
+ 
+-	hid_set_drvdata(hdev, (void *)plantronics_device_type(hdev));
++	drv_data->device_type = plantronics_device_type(hdev);
++	drv_data->quirks = id->driver_data;
++	drv_data->last_volume_key_ts = jiffies - msecs_to_jiffies(PLT_DOUBLE_KEY_TIMEOUT);
++
++	hid_set_drvdata(hdev, drv_data);
+ 
+ 	ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT |
+ 		HID_CONNECT_HIDINPUT_FORCE | HID_CONNECT_HIDDEV_FORCE);
+@@ -153,15 +198,26 @@ err:
+ }
+ 
+ static const struct hid_device_id plantronics_devices[] = {
++	{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
++					 USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES),
++		.driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(hid, plantronics_devices);
+ 
++static const struct hid_usage_id plantronics_usages[] = {
++	{ HID_CP_VOLUMEUP, EV_KEY, HID_ANY_ID },
++	{ HID_CP_VOLUMEDOWN, EV_KEY, HID_ANY_ID },
++	{ HID_TERMINATOR, HID_TERMINATOR, HID_TERMINATOR }
++};
++
+ static struct hid_driver plantronics_driver = {
+ 	.name = "plantronics",
+ 	.id_table = plantronics_devices,
++	.usage_table = plantronics_usages,
+ 	.input_mapping = plantronics_input_mapping,
++	.event = plantronics_event,
+ 	.probe = plantronics_probe,
+ };
+ module_hid_driver(plantronics_driver);
+diff --git a/drivers/hsi/hsi_core.c b/drivers/hsi/hsi_core.c
+index c3fb5beb846e2..ec90713564e32 100644
+--- a/drivers/hsi/hsi_core.c
++++ b/drivers/hsi/hsi_core.c
+@@ -210,8 +210,6 @@ static void hsi_add_client_from_dt(struct hsi_port *port,
+ 	if (err)
+ 		goto err;
+ 
+-	dev_set_name(&cl->device, "%s", name);
+-
+ 	err = hsi_of_property_parse_mode(client, "hsi-mode", &mode);
+ 	if (err) {
+ 		err = hsi_of_property_parse_mode(client, "hsi-rx-mode",
+@@ -293,6 +291,7 @@ static void hsi_add_client_from_dt(struct hsi_port *port,
+ 	cl->device.release = hsi_client_release;
+ 	cl->device.of_node = client;
+ 
++	dev_set_name(&cl->device, "%s", name);
+ 	if (device_register(&cl->device) < 0) {
+ 		pr_err("hsi: failed to register client: %s\n", name);
+ 		put_device(&cl->device);
+diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
+index 0bd202de79600..945e41f5e3a82 100644
+--- a/drivers/hv/channel.c
++++ b/drivers/hv/channel.c
+@@ -653,7 +653,7 @@ static int __vmbus_open(struct vmbus_channel *newchannel,
+ 
+ 	if (newchannel->rescind) {
+ 		err = -ENODEV;
+-		goto error_free_info;
++		goto error_clean_msglist;
+ 	}
+ 
+ 	err = vmbus_post_msg(open_msg,
+diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
+index f0ed730e2e4e4..ecebf1235fd50 100644
+--- a/drivers/hv/channel_mgmt.c
++++ b/drivers/hv/channel_mgmt.c
+@@ -756,6 +756,12 @@ static void init_vp_index(struct vmbus_channel *channel)
+ 	free_cpumask_var(available_mask);
+ }
+ 
++#define UNLOAD_DELAY_UNIT_MS	10		/* 10 milliseconds */
++#define UNLOAD_WAIT_MS		(100*1000)	/* 100 seconds */
++#define UNLOAD_WAIT_LOOPS	(UNLOAD_WAIT_MS/UNLOAD_DELAY_UNIT_MS)
++#define UNLOAD_MSG_MS		(5*1000)	/* Every 5 seconds */
++#define UNLOAD_MSG_LOOPS	(UNLOAD_MSG_MS/UNLOAD_DELAY_UNIT_MS)
++
+ static void vmbus_wait_for_unload(void)
+ {
+ 	int cpu;
+@@ -773,12 +779,17 @@ static void vmbus_wait_for_unload(void)
+ 	 * vmbus_connection.unload_event. If not, the last thing we can do is
+ 	 * read message pages for all CPUs directly.
+ 	 *
+-	 * Wait no more than 10 seconds so that the panic path can't get
+-	 * hung forever in case the response message isn't seen.
++	 * Wait up to 100 seconds since an Azure host must writeback any dirty
++	 * data in its disk cache before the VMbus UNLOAD request will
++	 * complete. This flushing has been empirically observed to take up
++	 * to 50 seconds in cases with a lot of dirty data, so allow additional
++	 * leeway and for inaccuracies in mdelay(). But eventually time out so
++	 * that the panic path can't get hung forever in case the response
++	 * message isn't seen.
+ 	 */
+-	for (i = 0; i < 1000; i++) {
++	for (i = 1; i <= UNLOAD_WAIT_LOOPS; i++) {
+ 		if (completion_done(&vmbus_connection.unload_event))
+-			break;
++			goto completed;
+ 
+ 		for_each_online_cpu(cpu) {
+ 			struct hv_per_cpu_context *hv_cpu
+@@ -801,9 +812,18 @@ static void vmbus_wait_for_unload(void)
+ 			vmbus_signal_eom(msg, message_type);
+ 		}
+ 
+-		mdelay(10);
++		/*
++		 * Give a notice periodically so someone watching the
++		 * serial output won't think it is completely hung.
++		 */
++		if (!(i % UNLOAD_MSG_LOOPS))
++			pr_notice("Waiting for VMBus UNLOAD to complete\n");
++
++		mdelay(UNLOAD_DELAY_UNIT_MS);
+ 	}
++	pr_err("Continuing even though VMBus UNLOAD did not complete\n");
+ 
++completed:
+ 	/*
+ 	 * We're crashing and already got the UNLOAD_RESPONSE, cleanup all
+ 	 * maybe-pending messages on all CPUs to be able to receive new
+diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
+index 35833d4d1a1dc..ecd82ebfd5bc4 100644
+--- a/drivers/hv/ring_buffer.c
++++ b/drivers/hv/ring_buffer.c
+@@ -313,7 +313,6 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
+ 		rqst_id = vmbus_next_request_id(&channel->requestor, requestid);
+ 		if (rqst_id == VMBUS_RQST_ERROR) {
+ 			spin_unlock_irqrestore(&outring_info->ring_lock, flags);
+-			pr_err("No request id available\n");
+ 			return -EAGAIN;
+ 		}
+ 	}
+diff --git a/drivers/hwmon/pmbus/pxe1610.c b/drivers/hwmon/pmbus/pxe1610.c
+index da27ce34ee3fd..eb4a06003b7f9 100644
+--- a/drivers/hwmon/pmbus/pxe1610.c
++++ b/drivers/hwmon/pmbus/pxe1610.c
+@@ -41,6 +41,15 @@ static int pxe1610_identify(struct i2c_client *client,
+ 				info->vrm_version[i] = vr13;
+ 				break;
+ 			default:
++				/*
++				 * If prior pages are available limit operation
++				 * to them
++				 */
++				if (i != 0) {
++					info->pages = i;
++					return 0;
++				}
++
+ 				return -ENODEV;
+ 			}
+ 		}
+diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
+index 0f603b4094f22..a706ba11b93e6 100644
+--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
++++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
+@@ -52,7 +52,7 @@ static ssize_t format_attr_contextid_show(struct device *dev,
+ {
+ 	int pid_fmt = ETM_OPT_CTXTID;
+ 
+-#if defined(CONFIG_CORESIGHT_SOURCE_ETM4X)
++#if IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM4X)
+ 	pid_fmt = is_kernel_in_hyp_mode() ? ETM_OPT_CTXTID2 : ETM_OPT_CTXTID;
+ #endif
+ 	return sprintf(page, "config:%d\n", pid_fmt);
+diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
+index e4b7f2a951ad5..c1bbc4caeb5c9 100644
+--- a/drivers/i2c/busses/i2c-cadence.c
++++ b/drivers/i2c/busses/i2c-cadence.c
+@@ -789,7 +789,7 @@ static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ 	bool change_role = false;
+ #endif
+ 
+-	ret = pm_runtime_get_sync(id->dev);
++	ret = pm_runtime_resume_and_get(id->dev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+@@ -911,7 +911,7 @@ static int cdns_reg_slave(struct i2c_client *slave)
+ 	if (slave->flags & I2C_CLIENT_TEN)
+ 		return -EAFNOSUPPORT;
+ 
+-	ret = pm_runtime_get_sync(id->dev);
++	ret = pm_runtime_resume_and_get(id->dev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+@@ -1200,7 +1200,10 @@ static int cdns_i2c_probe(struct platform_device *pdev)
+ 	if (IS_ERR(id->membase))
+ 		return PTR_ERR(id->membase);
+ 
+-	id->irq = platform_get_irq(pdev, 0);
++	ret = platform_get_irq(pdev, 0);
++	if (ret < 0)
++		return ret;
++	id->irq = ret;
+ 
+ 	id->adap.owner = THIS_MODULE;
+ 	id->adap.dev.of_node = pdev->dev.of_node;
+diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c
+index a08554c1a5704..bdff0e6345d9a 100644
+--- a/drivers/i2c/busses/i2c-emev2.c
++++ b/drivers/i2c/busses/i2c-emev2.c
+@@ -395,7 +395,10 @@ static int em_i2c_probe(struct platform_device *pdev)
+ 
+ 	em_i2c_reset(&priv->adap);
+ 
+-	priv->irq = platform_get_irq(pdev, 0);
++	ret = platform_get_irq(pdev, 0);
++	if (ret < 0)
++		goto err_clk;
++	priv->irq = ret;
+ 	ret = devm_request_irq(&pdev->dev, priv->irq, em_i2c_irq_handler, 0,
+ 				"em_i2c", priv);
+ 	if (ret)
+diff --git a/drivers/i2c/busses/i2c-img-scb.c b/drivers/i2c/busses/i2c-img-scb.c
+index 98a89301ed2a6..8e987945ed450 100644
+--- a/drivers/i2c/busses/i2c-img-scb.c
++++ b/drivers/i2c/busses/i2c-img-scb.c
+@@ -1057,7 +1057,7 @@ static int img_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ 			atomic = true;
+ 	}
+ 
+-	ret = pm_runtime_get_sync(adap->dev.parent);
++	ret = pm_runtime_resume_and_get(adap->dev.parent);
+ 	if (ret < 0)
+ 		return ret;
+ 
+@@ -1158,7 +1158,7 @@ static int img_i2c_init(struct img_i2c *i2c)
+ 	u32 rev;
+ 	int ret;
+ 
+-	ret = pm_runtime_get_sync(i2c->adap.dev.parent);
++	ret = pm_runtime_resume_and_get(i2c->adap.dev.parent);
+ 	if (ret < 0)
+ 		return ret;
+ 
+diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
+index 9db6ccded5e9e..8b9ba055c4186 100644
+--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
++++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
+@@ -259,7 +259,7 @@ static int lpi2c_imx_master_enable(struct lpi2c_imx_struct *lpi2c_imx)
+ 	unsigned int temp;
+ 	int ret;
+ 
+-	ret = pm_runtime_get_sync(lpi2c_imx->adapter.dev.parent);
++	ret = pm_runtime_resume_and_get(lpi2c_imx->adapter.dev.parent);
+ 	if (ret < 0)
+ 		return ret;
+ 
+diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
+index b80fdc1f0092f..dc9c4b4cc25a1 100644
+--- a/drivers/i2c/busses/i2c-imx.c
++++ b/drivers/i2c/busses/i2c-imx.c
+@@ -1253,7 +1253,7 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter,
+ 	struct imx_i2c_struct *i2c_imx = i2c_get_adapdata(adapter);
+ 	int result;
+ 
+-	result = pm_runtime_get_sync(i2c_imx->adapter.dev.parent);
++	result = pm_runtime_resume_and_get(i2c_imx->adapter.dev.parent);
+ 	if (result < 0)
+ 		return result;
+ 
+@@ -1496,7 +1496,7 @@ static int i2c_imx_remove(struct platform_device *pdev)
+ 	struct imx_i2c_struct *i2c_imx = platform_get_drvdata(pdev);
+ 	int irq, ret;
+ 
+-	ret = pm_runtime_get_sync(&pdev->dev);
++	ret = pm_runtime_resume_and_get(&pdev->dev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
+index 55177eb21d7b1..baa7319eee539 100644
+--- a/drivers/i2c/busses/i2c-jz4780.c
++++ b/drivers/i2c/busses/i2c-jz4780.c
+@@ -825,7 +825,10 @@ static int jz4780_i2c_probe(struct platform_device *pdev)
+ 
+ 	jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, 0x0);
+ 
+-	i2c->irq = platform_get_irq(pdev, 0);
++	ret = platform_get_irq(pdev, 0);
++	if (ret < 0)
++		goto err;
++	i2c->irq = ret;
+ 	ret = devm_request_irq(&pdev->dev, i2c->irq, jz4780_i2c_irq, 0,
+ 			       dev_name(&pdev->dev), i2c);
+ 	if (ret)
+diff --git a/drivers/i2c/busses/i2c-mlxbf.c b/drivers/i2c/busses/i2c-mlxbf.c
+index 2fb0532d8a161..ab261d762dea3 100644
+--- a/drivers/i2c/busses/i2c-mlxbf.c
++++ b/drivers/i2c/busses/i2c-mlxbf.c
+@@ -2376,6 +2376,8 @@ static int mlxbf_i2c_probe(struct platform_device *pdev)
+ 	mlxbf_i2c_init_slave(pdev, priv);
+ 
+ 	irq = platform_get_irq(pdev, 0);
++	if (irq < 0)
++		return irq;
+ 	ret = devm_request_irq(dev, irq, mlxbf_smbus_irq,
+ 			       IRQF_ONESHOT | IRQF_SHARED | IRQF_PROBE_SHARED,
+ 			       dev_name(dev), priv);
+diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
+index 2ffd2f354d0ae..86f70c7513192 100644
+--- a/drivers/i2c/busses/i2c-mt65xx.c
++++ b/drivers/i2c/busses/i2c-mt65xx.c
+@@ -479,7 +479,7 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
+ {
+ 	u16 control_reg;
+ 
+-	if (i2c->dev_comp->dma_sync) {
++	if (i2c->dev_comp->apdma_sync) {
+ 		writel(I2C_DMA_WARM_RST, i2c->pdmabase + OFFSET_RST);
+ 		udelay(10);
+ 		writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
+diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
+index 12ac4212aded8..d4f6c6d60683a 100644
+--- a/drivers/i2c/busses/i2c-omap.c
++++ b/drivers/i2c/busses/i2c-omap.c
+@@ -1404,9 +1404,9 @@ omap_i2c_probe(struct platform_device *pdev)
+ 	pm_runtime_set_autosuspend_delay(omap->dev, OMAP_I2C_PM_TIMEOUT);
+ 	pm_runtime_use_autosuspend(omap->dev);
+ 
+-	r = pm_runtime_get_sync(omap->dev);
++	r = pm_runtime_resume_and_get(omap->dev);
+ 	if (r < 0)
+-		goto err_free_mem;
++		goto err_disable_pm;
+ 
+ 	/*
+ 	 * Read the Rev hi bit-[15:14] ie scheme this is 1 indicates ver2.
+@@ -1513,8 +1513,8 @@ err_unuse_clocks:
+ 	omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, 0);
+ 	pm_runtime_dont_use_autosuspend(omap->dev);
+ 	pm_runtime_put_sync(omap->dev);
++err_disable_pm:
+ 	pm_runtime_disable(&pdev->dev);
+-err_free_mem:
+ 
+ 	return r;
+ }
+@@ -1525,7 +1525,7 @@ static int omap_i2c_remove(struct platform_device *pdev)
+ 	int ret;
+ 
+ 	i2c_del_adapter(&omap->adapter);
+-	ret = pm_runtime_get_sync(&pdev->dev);
++	ret = pm_runtime_resume_and_get(&pdev->dev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
+index 12f6d452c0f70..8722ca23f889b 100644
+--- a/drivers/i2c/busses/i2c-rcar.c
++++ b/drivers/i2c/busses/i2c-rcar.c
+@@ -1027,7 +1027,10 @@ static int rcar_i2c_probe(struct platform_device *pdev)
+ 	if (of_property_read_bool(dev->of_node, "smbus"))
+ 		priv->flags |= ID_P_HOST_NOTIFY;
+ 
+-	priv->irq = platform_get_irq(pdev, 0);
++	ret = platform_get_irq(pdev, 0);
++	if (ret < 0)
++		goto out_pm_disable;
++	priv->irq = ret;
+ 	ret = devm_request_irq(dev, priv->irq, irqhandler, irqflags, dev_name(dev), priv);
+ 	if (ret < 0) {
+ 		dev_err(dev, "cannot get irq %d\n", priv->irq);
+diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c
+index c2005c789d2b0..319d1fa617c88 100644
+--- a/drivers/i2c/busses/i2c-sh7760.c
++++ b/drivers/i2c/busses/i2c-sh7760.c
+@@ -471,7 +471,10 @@ static int sh7760_i2c_probe(struct platform_device *pdev)
+ 		goto out2;
+ 	}
+ 
+-	id->irq = platform_get_irq(pdev, 0);
++	ret = platform_get_irq(pdev, 0);
++	if (ret < 0)
++		goto out3;
++	id->irq = ret;
+ 
+ 	id->adap.nr = pdev->id;
+ 	id->adap.algo = &sh7760_i2c_algo;
+diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c
+index 2917fecf6c80d..8ead7e021008c 100644
+--- a/drivers/i2c/busses/i2c-sprd.c
++++ b/drivers/i2c/busses/i2c-sprd.c
+@@ -290,7 +290,7 @@ static int sprd_i2c_master_xfer(struct i2c_adapter *i2c_adap,
+ 	struct sprd_i2c *i2c_dev = i2c_adap->algo_data;
+ 	int im, ret;
+ 
+-	ret = pm_runtime_get_sync(i2c_dev->dev);
++	ret = pm_runtime_resume_and_get(i2c_dev->dev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+@@ -576,7 +576,7 @@ static int sprd_i2c_remove(struct platform_device *pdev)
+ 	struct sprd_i2c *i2c_dev = platform_get_drvdata(pdev);
+ 	int ret;
+ 
+-	ret = pm_runtime_get_sync(i2c_dev->dev);
++	ret = pm_runtime_resume_and_get(i2c_dev->dev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
+index c62c815b88eb6..318abfa7926b2 100644
+--- a/drivers/i2c/busses/i2c-stm32f7.c
++++ b/drivers/i2c/busses/i2c-stm32f7.c
+@@ -1652,7 +1652,7 @@ static int stm32f7_i2c_xfer(struct i2c_adapter *i2c_adap,
+ 	i2c_dev->msg_id = 0;
+ 	f7_msg->smbus = false;
+ 
+-	ret = pm_runtime_get_sync(i2c_dev->dev);
++	ret = pm_runtime_resume_and_get(i2c_dev->dev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+@@ -1698,7 +1698,7 @@ static int stm32f7_i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
+ 	f7_msg->read_write = read_write;
+ 	f7_msg->smbus = true;
+ 
+-	ret = pm_runtime_get_sync(dev);
++	ret = pm_runtime_resume_and_get(dev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+@@ -1799,7 +1799,7 @@ static int stm32f7_i2c_reg_slave(struct i2c_client *slave)
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = pm_runtime_get_sync(dev);
++	ret = pm_runtime_resume_and_get(dev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+@@ -1880,7 +1880,7 @@ static int stm32f7_i2c_unreg_slave(struct i2c_client *slave)
+ 
+ 	WARN_ON(!i2c_dev->slave[id]);
+ 
+-	ret = pm_runtime_get_sync(i2c_dev->dev);
++	ret = pm_runtime_resume_and_get(i2c_dev->dev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+@@ -2273,7 +2273,7 @@ static int stm32f7_i2c_regs_backup(struct stm32f7_i2c_dev *i2c_dev)
+ 	int ret;
+ 	struct stm32f7_i2c_regs *backup_regs = &i2c_dev->backup_regs;
+ 
+-	ret = pm_runtime_get_sync(i2c_dev->dev);
++	ret = pm_runtime_resume_and_get(i2c_dev->dev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+@@ -2295,7 +2295,7 @@ static int stm32f7_i2c_regs_restore(struct stm32f7_i2c_dev *i2c_dev)
+ 	int ret;
+ 	struct stm32f7_i2c_regs *backup_regs = &i2c_dev->backup_regs;
+ 
+-	ret = pm_runtime_get_sync(i2c_dev->dev);
++	ret = pm_runtime_resume_and_get(i2c_dev->dev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
+index 087b2951942eb..2a8568b97c14d 100644
+--- a/drivers/i2c/busses/i2c-xiic.c
++++ b/drivers/i2c/busses/i2c-xiic.c
+@@ -706,7 +706,7 @@ static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+ 	dev_dbg(adap->dev.parent, "%s entry SR: 0x%x\n", __func__,
+ 		xiic_getreg8(i2c, XIIC_SR_REG_OFFSET));
+ 
+-	err = pm_runtime_get_sync(i2c->dev);
++	err = pm_runtime_resume_and_get(i2c->dev);
+ 	if (err < 0)
+ 		return err;
+ 
+@@ -873,7 +873,7 @@ static int xiic_i2c_remove(struct platform_device *pdev)
+ 	/* remove adapter & data */
+ 	i2c_del_adapter(&i2c->adap);
+ 
+-	ret = pm_runtime_get_sync(i2c->dev);
++	ret = pm_runtime_resume_and_get(i2c->dev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
+index f8e9b7305c133..e2e12a5585e51 100644
+--- a/drivers/i3c/master.c
++++ b/drivers/i3c/master.c
+@@ -2535,7 +2535,7 @@ int i3c_master_register(struct i3c_master_controller *master,
+ 
+ 	ret = i3c_master_bus_init(master);
+ 	if (ret)
+-		goto err_destroy_wq;
++		goto err_put_dev;
+ 
+ 	ret = device_add(&master->dev);
+ 	if (ret)
+@@ -2566,9 +2566,6 @@ err_del_dev:
+ err_cleanup_bus:
+ 	i3c_master_bus_cleanup(master);
+ 
+-err_destroy_wq:
+-	destroy_workqueue(master->wq);
+-
+ err_put_dev:
+ 	put_device(&master->dev);
+ 
+diff --git a/drivers/iio/accel/adis16201.c b/drivers/iio/accel/adis16201.c
+index 3633a4e302c68..fe225990de24b 100644
+--- a/drivers/iio/accel/adis16201.c
++++ b/drivers/iio/accel/adis16201.c
+@@ -215,7 +215,7 @@ static const struct iio_chan_spec adis16201_channels[] = {
+ 	ADIS_AUX_ADC_CHAN(ADIS16201_AUX_ADC_REG, ADIS16201_SCAN_AUX_ADC, 0, 12),
+ 	ADIS_INCLI_CHAN(X, ADIS16201_XINCL_OUT_REG, ADIS16201_SCAN_INCLI_X,
+ 			BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
+-	ADIS_INCLI_CHAN(X, ADIS16201_YINCL_OUT_REG, ADIS16201_SCAN_INCLI_Y,
++	ADIS_INCLI_CHAN(Y, ADIS16201_YINCL_OUT_REG, ADIS16201_SCAN_INCLI_Y,
+ 			BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
+ 	IIO_CHAN_SOFT_TIMESTAMP(7)
+ };
+diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
+index e0667c4b3c08a..dda0f1e37ec16 100644
+--- a/drivers/iio/adc/Kconfig
++++ b/drivers/iio/adc/Kconfig
+@@ -249,7 +249,7 @@ config AD799X
+ config AD9467
+ 	tristate "Analog Devices AD9467 High Speed ADC driver"
+ 	depends on SPI
+-	select ADI_AXI_ADC
++	depends on ADI_AXI_ADC
+ 	help
+ 	  Say yes here to build support for Analog Devices:
+ 	  * AD9467 16-Bit, 200 MSPS/250 MSPS Analog-to-Digital Converter
+diff --git a/drivers/iio/adc/ad7476.c b/drivers/iio/adc/ad7476.c
+index 17402714b3876..9e9ff07cf972b 100644
+--- a/drivers/iio/adc/ad7476.c
++++ b/drivers/iio/adc/ad7476.c
+@@ -321,25 +321,15 @@ static int ad7476_probe(struct spi_device *spi)
+ 	spi_message_init(&st->msg);
+ 	spi_message_add_tail(&st->xfer, &st->msg);
+ 
+-	ret = iio_triggered_buffer_setup(indio_dev, NULL,
+-			&ad7476_trigger_handler, NULL);
++	ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev, NULL,
++					      &ad7476_trigger_handler, NULL);
+ 	if (ret)
+-		goto error_disable_reg;
++		return ret;
+ 
+ 	if (st->chip_info->reset)
+ 		st->chip_info->reset(st);
+ 
+-	ret = iio_device_register(indio_dev);
+-	if (ret)
+-		goto error_ring_unregister;
+-	return 0;
+-
+-error_ring_unregister:
+-	iio_triggered_buffer_cleanup(indio_dev);
+-error_disable_reg:
+-	regulator_disable(st->reg);
+-
+-	return ret;
++	return devm_iio_device_register(&spi->dev, indio_dev);
+ }
+ 
+ static const struct spi_device_id ad7476_id[] = {
+diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
+index dfe86c5893254..c41b8ef1e2509 100644
+--- a/drivers/iio/imu/adis16480.c
++++ b/drivers/iio/imu/adis16480.c
+@@ -10,6 +10,7 @@
+ #include <linux/of_irq.h>
+ #include <linux/interrupt.h>
+ #include <linux/delay.h>
++#include <linux/math.h>
+ #include <linux/mutex.h>
+ #include <linux/device.h>
+ #include <linux/kernel.h>
+@@ -17,6 +18,7 @@
+ #include <linux/slab.h>
+ #include <linux/sysfs.h>
+ #include <linux/module.h>
++#include <linux/lcm.h>
+ 
+ #include <linux/iio/iio.h>
+ #include <linux/iio/sysfs.h>
+@@ -170,6 +172,11 @@ static const char * const adis16480_int_pin_names[4] = {
+ 	[ADIS16480_PIN_DIO4] = "DIO4",
+ };
+ 
++static bool low_rate_allow;
++module_param(low_rate_allow, bool, 0444);
++MODULE_PARM_DESC(low_rate_allow,
++		 "Allow IMU rates below the minimum advisable when external clk is used in PPS mode (default: N)");
++
+ #ifdef CONFIG_DEBUG_FS
+ 
+ static ssize_t adis16480_show_firmware_revision(struct file *file,
+@@ -312,7 +319,8 @@ static int adis16480_debugfs_init(struct iio_dev *indio_dev)
+ static int adis16480_set_freq(struct iio_dev *indio_dev, int val, int val2)
+ {
+ 	struct adis16480 *st = iio_priv(indio_dev);
+-	unsigned int t, reg;
++	unsigned int t, sample_rate = st->clk_freq;
++	int ret;
+ 
+ 	if (val < 0 || val2 < 0)
+ 		return -EINVAL;
+@@ -321,28 +329,65 @@ static int adis16480_set_freq(struct iio_dev *indio_dev, int val, int val2)
+ 	if (t == 0)
+ 		return -EINVAL;
+ 
++	mutex_lock(&st->adis.state_lock);
+ 	/*
+-	 * When using PPS mode, the rate of data collection is equal to the
+-	 * product of the external clock frequency and the scale factor in the
+-	 * SYNC_SCALE register.
+-	 * When using sync mode, or internal clock, the output data rate is
+-	 * equal with  the clock frequency divided by DEC_RATE + 1.
++	 * When using PPS mode, the input clock needs to be scaled so that we have an IMU
++	 * sample rate between (optimally) 4000 and 4250. After this, we can use the
++	 * decimation filter to lower the sampling rate in order to get what the user wants.
++	 * Optimally, the user sample rate is a multiple of both the IMU sample rate and
++	 * the input clock. Hence, calculating the sync_scale dynamically gives us better
++	 * chances of achieving a perfect/integer value for DEC_RATE. The math here is:
++	 *	1. lcm of the input clock and the desired output rate.
++	 *	2. get the highest multiple of the previous result lower than the adis max rate.
++	 *	3. The last result becomes the IMU sample rate. Use that to calculate SYNC_SCALE
++	 *	   and DEC_RATE (to get the user output rate)
+ 	 */
+ 	if (st->clk_mode == ADIS16480_CLK_PPS) {
+-		t = t / st->clk_freq;
+-		reg = ADIS16495_REG_SYNC_SCALE;
+-	} else {
+-		t = st->clk_freq / t;
+-		reg = ADIS16480_REG_DEC_RATE;
++		unsigned long scaled_rate = lcm(st->clk_freq, t);
++		int sync_scale;
++
++		/*
++		 * If lcm is bigger than the IMU maximum sampling rate there's no perfect
++		 * solution. In this case, we get the highest multiple of the input clock
++		 * lower than the IMU max sample rate.
++		 */
++		if (scaled_rate > st->chip_info->int_clk)
++			scaled_rate = st->chip_info->int_clk / st->clk_freq * st->clk_freq;
++		else
++			scaled_rate = st->chip_info->int_clk / scaled_rate * scaled_rate;
++
++		/*
++		 * This is not an hard requirement but it's not advised to run the IMU
++		 * with a sample rate lower than 4000Hz due to possible undersampling
++		 * issues. However, there are users that might really want to take the risk.
++		 * Hence, we provide a module parameter for them. If set, we allow sample
++		 * rates lower than 4KHz. By default, we won't allow this and we just roundup
++		 * the rate to the next multiple of the input clock bigger than 4KHz. This
++		 * is done like this as in some cases (when DEC_RATE is 0) might give
++		 * us the closest value to the one desired by the user...
++		 */
++		if (scaled_rate < 4000000 && !low_rate_allow)
++			scaled_rate = roundup(4000000, st->clk_freq);
++
++		sync_scale = scaled_rate / st->clk_freq;
++		ret = __adis_write_reg_16(&st->adis, ADIS16495_REG_SYNC_SCALE, sync_scale);
++		if (ret)
++			goto error;
++
++		sample_rate = scaled_rate;
+ 	}
+ 
++	t = DIV_ROUND_CLOSEST(sample_rate, t);
++	if (t)
++		t--;
++
+ 	if (t > st->chip_info->max_dec_rate)
+ 		t = st->chip_info->max_dec_rate;
+ 
+-	if ((t != 0) && (st->clk_mode != ADIS16480_CLK_PPS))
+-		t--;
+-
+-	return adis_write_reg_16(&st->adis, reg, t);
++	ret = __adis_write_reg_16(&st->adis, ADIS16480_REG_DEC_RATE, t);
++error:
++	mutex_unlock(&st->adis.state_lock);
++	return ret;
+ }
+ 
+ static int adis16480_get_freq(struct iio_dev *indio_dev, int *val, int *val2)
+@@ -350,34 +395,35 @@ static int adis16480_get_freq(struct iio_dev *indio_dev, int *val, int *val2)
+ 	struct adis16480 *st = iio_priv(indio_dev);
+ 	uint16_t t;
+ 	int ret;
+-	unsigned int freq;
+-	unsigned int reg;
++	unsigned int freq, sample_rate = st->clk_freq;
+ 
+-	if (st->clk_mode == ADIS16480_CLK_PPS)
+-		reg = ADIS16495_REG_SYNC_SCALE;
+-	else
+-		reg = ADIS16480_REG_DEC_RATE;
++	mutex_lock(&st->adis.state_lock);
++
++	if (st->clk_mode == ADIS16480_CLK_PPS) {
++		u16 sync_scale;
++
++		ret = __adis_read_reg_16(&st->adis, ADIS16495_REG_SYNC_SCALE, &sync_scale);
++		if (ret)
++			goto error;
+ 
+-	ret = adis_read_reg_16(&st->adis, reg, &t);
++		sample_rate = st->clk_freq * sync_scale;
++	}
++
++	ret = __adis_read_reg_16(&st->adis, ADIS16480_REG_DEC_RATE, &t);
+ 	if (ret)
+-		return ret;
++		goto error;
+ 
+-	/*
+-	 * When using PPS mode, the rate of data collection is equal to the
+-	 * product of the external clock frequency and the scale factor in the
+-	 * SYNC_SCALE register.
+-	 * When using sync mode, or internal clock, the output data rate is
+-	 * equal with  the clock frequency divided by DEC_RATE + 1.
+-	 */
+-	if (st->clk_mode == ADIS16480_CLK_PPS)
+-		freq = st->clk_freq * t;
+-	else
+-		freq = st->clk_freq / (t + 1);
++	mutex_unlock(&st->adis.state_lock);
++
++	freq = DIV_ROUND_CLOSEST(sample_rate, (t + 1));
+ 
+ 	*val = freq / 1000;
+ 	*val2 = (freq % 1000) * 1000;
+ 
+ 	return IIO_VAL_INT_PLUS_MICRO;
++error:
++	mutex_unlock(&st->adis.state_lock);
++	return ret;
+ }
+ 
+ enum {
+@@ -1278,6 +1324,20 @@ static int adis16480_probe(struct spi_device *spi)
+ 
+ 		st->clk_freq = clk_get_rate(st->ext_clk);
+ 		st->clk_freq *= 1000; /* micro */
++		if (st->clk_mode == ADIS16480_CLK_PPS) {
++			u16 sync_scale;
++
++			/*
++			 * In PPS mode, the IMU sample rate is the clk_freq * sync_scale. Hence,
++			 * default the IMU sample rate to the highest multiple of the input clock
++			 * lower than the IMU max sample rate. The internal sample rate is the
++			 * max...
++			 */
++			sync_scale = st->chip_info->int_clk / st->clk_freq;
++			ret = __adis_write_reg_16(&st->adis, ADIS16495_REG_SYNC_SCALE, sync_scale);
++			if (ret)
++				return ret;
++		}
+ 	} else {
+ 		st->clk_freq = st->chip_info->int_clk;
+ 	}
+diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+index 453c51c796555..69ab94ab72975 100644
+--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
++++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+@@ -731,12 +731,16 @@ inv_mpu6050_read_raw(struct iio_dev *indio_dev,
+ 	}
+ }
+ 
+-static int inv_mpu6050_write_gyro_scale(struct inv_mpu6050_state *st, int val)
++static int inv_mpu6050_write_gyro_scale(struct inv_mpu6050_state *st, int val,
++					int val2)
+ {
+ 	int result, i;
+ 
++	if (val != 0)
++		return -EINVAL;
++
+ 	for (i = 0; i < ARRAY_SIZE(gyro_scale_6050); ++i) {
+-		if (gyro_scale_6050[i] == val) {
++		if (gyro_scale_6050[i] == val2) {
+ 			result = inv_mpu6050_set_gyro_fsr(st, i);
+ 			if (result)
+ 				return result;
+@@ -767,13 +771,17 @@ static int inv_write_raw_get_fmt(struct iio_dev *indio_dev,
+ 	return -EINVAL;
+ }
+ 
+-static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val)
++static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val,
++					 int val2)
+ {
+ 	int result, i;
+ 	u8 d;
+ 
++	if (val != 0)
++		return -EINVAL;
++
+ 	for (i = 0; i < ARRAY_SIZE(accel_scale); ++i) {
+-		if (accel_scale[i] == val) {
++		if (accel_scale[i] == val2) {
+ 			d = (i << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT);
+ 			result = regmap_write(st->map, st->reg->accl_config, d);
+ 			if (result)
+@@ -814,10 +822,10 @@ static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
+ 	case IIO_CHAN_INFO_SCALE:
+ 		switch (chan->type) {
+ 		case IIO_ANGL_VEL:
+-			result = inv_mpu6050_write_gyro_scale(st, val2);
++			result = inv_mpu6050_write_gyro_scale(st, val, val2);
+ 			break;
+ 		case IIO_ACCEL:
+-			result = inv_mpu6050_write_accel_scale(st, val2);
++			result = inv_mpu6050_write_accel_scale(st, val, val2);
+ 			break;
+ 		default:
+ 			result = -EINVAL;
+diff --git a/drivers/iio/magnetometer/yamaha-yas530.c b/drivers/iio/magnetometer/yamaha-yas530.c
+index d46f23d82b3da..2f2f8cb3c26cd 100644
+--- a/drivers/iio/magnetometer/yamaha-yas530.c
++++ b/drivers/iio/magnetometer/yamaha-yas530.c
+@@ -32,13 +32,14 @@
+ #include <linux/regmap.h>
+ #include <linux/regulator/consumer.h>
+ #include <linux/random.h>
+-#include <linux/unaligned/be_byteshift.h>
+ 
+ #include <linux/iio/buffer.h>
+ #include <linux/iio/iio.h>
+ #include <linux/iio/trigger_consumer.h>
+ #include <linux/iio/triggered_buffer.h>
+ 
++#include <asm/unaligned.h>
++
+ /* This register map covers YAS530 and YAS532 but differs in YAS 537 and YAS539 */
+ #define YAS5XX_DEVICE_ID		0x80
+ #define YAS5XX_ACTUATE_INIT_COIL	0x81
+@@ -887,6 +888,7 @@ static int yas5xx_probe(struct i2c_client *i2c,
+ 		strncpy(yas5xx->name, "yas532", sizeof(yas5xx->name));
+ 		break;
+ 	default:
++		ret = -ENODEV;
+ 		dev_err(dev, "unhandled device ID %02x\n", yas5xx->devid);
+ 		goto assert_reset;
+ 	}
+diff --git a/drivers/iio/orientation/hid-sensor-rotation.c b/drivers/iio/orientation/hid-sensor-rotation.c
+index 18e4ef0600963..c087d8f72a546 100644
+--- a/drivers/iio/orientation/hid-sensor-rotation.c
++++ b/drivers/iio/orientation/hid-sensor-rotation.c
+@@ -21,7 +21,7 @@ struct dev_rot_state {
+ 	struct hid_sensor_common common_attributes;
+ 	struct hid_sensor_hub_attribute_info quaternion;
+ 	struct {
+-		u32 sampled_vals[4] __aligned(16);
++		s32 sampled_vals[4] __aligned(16);
+ 		u64 timestamp __aligned(8);
+ 	} scan;
+ 	int scale_pre_decml;
+@@ -170,8 +170,15 @@ static int dev_rot_capture_sample(struct hid_sensor_hub_device *hsdev,
+ 	struct dev_rot_state *rot_state = iio_priv(indio_dev);
+ 
+ 	if (usage_id == HID_USAGE_SENSOR_ORIENT_QUATERNION) {
+-		memcpy(&rot_state->scan.sampled_vals, raw_data,
+-		       sizeof(rot_state->scan.sampled_vals));
++		if (raw_len / 4 == sizeof(s16)) {
++			rot_state->scan.sampled_vals[0] = ((s16 *)raw_data)[0];
++			rot_state->scan.sampled_vals[1] = ((s16 *)raw_data)[1];
++			rot_state->scan.sampled_vals[2] = ((s16 *)raw_data)[2];
++			rot_state->scan.sampled_vals[3] = ((s16 *)raw_data)[3];
++		} else {
++			memcpy(&rot_state->scan.sampled_vals, raw_data,
++			       sizeof(rot_state->scan.sampled_vals));
++		}
+ 
+ 		dev_dbg(&indio_dev->dev, "Recd Quat len:%zu::%zu\n", raw_len,
+ 			sizeof(rot_state->scan.sampled_vals));
+diff --git a/drivers/iio/proximity/sx9310.c b/drivers/iio/proximity/sx9310.c
+index 37fd0b65a0140..ea82cfaf7f427 100644
+--- a/drivers/iio/proximity/sx9310.c
++++ b/drivers/iio/proximity/sx9310.c
+@@ -763,7 +763,11 @@ static int sx9310_write_far_debounce(struct sx9310_data *data, int val)
+ 	int ret;
+ 	unsigned int regval;
+ 
+-	val = ilog2(val);
++	if (val > 0)
++		val = ilog2(val);
++	if (!FIELD_FIT(SX9310_REG_PROX_CTRL10_FAR_DEBOUNCE_MASK, val))
++		return -EINVAL;
++
+ 	regval = FIELD_PREP(SX9310_REG_PROX_CTRL10_FAR_DEBOUNCE_MASK, val);
+ 
+ 	mutex_lock(&data->mutex);
+@@ -780,7 +784,11 @@ static int sx9310_write_close_debounce(struct sx9310_data *data, int val)
+ 	int ret;
+ 	unsigned int regval;
+ 
+-	val = ilog2(val);
++	if (val > 0)
++		val = ilog2(val);
++	if (!FIELD_FIT(SX9310_REG_PROX_CTRL10_CLOSE_DEBOUNCE_MASK, val))
++		return -EINVAL;
++
+ 	regval = FIELD_PREP(SX9310_REG_PROX_CTRL10_CLOSE_DEBOUNCE_MASK, val);
+ 
+ 	mutex_lock(&data->mutex);
+@@ -1213,17 +1221,17 @@ static int sx9310_init_compensation(struct iio_dev *indio_dev)
+ }
+ 
+ static const struct sx9310_reg_default *
+-sx9310_get_default_reg(struct sx9310_data *data, int i,
++sx9310_get_default_reg(struct sx9310_data *data, int idx,
+ 		       struct sx9310_reg_default *reg_def)
+ {
+-	int ret;
+ 	const struct device_node *np = data->client->dev.of_node;
+-	u32 combined[SX9310_NUM_CHANNELS] = { 4, 4, 4, 4 };
++	u32 combined[SX9310_NUM_CHANNELS];
++	u32 start = 0, raw = 0, pos = 0;
+ 	unsigned long comb_mask = 0;
++	int ret, i, count;
+ 	const char *res;
+-	u32 start = 0, raw = 0, pos = 0;
+ 
+-	memcpy(reg_def, &sx9310_default_regs[i], sizeof(*reg_def));
++	memcpy(reg_def, &sx9310_default_regs[idx], sizeof(*reg_def));
+ 	if (!np)
+ 		return reg_def;
+ 
+@@ -1234,15 +1242,31 @@ sx9310_get_default_reg(struct sx9310_data *data, int i,
+ 			reg_def->def |= SX9310_REG_PROX_CTRL2_SHIELDEN_GROUND;
+ 		}
+ 
+-		reg_def->def &= ~SX9310_REG_PROX_CTRL2_COMBMODE_MASK;
+-		of_property_read_u32_array(np, "semtech,combined-sensors",
+-					   combined, ARRAY_SIZE(combined));
+-		for (i = 0; i < ARRAY_SIZE(combined); i++) {
+-			if (combined[i] <= SX9310_NUM_CHANNELS)
+-				comb_mask |= BIT(combined[i]);
++		count = of_property_count_elems_of_size(np, "semtech,combined-sensors",
++							sizeof(u32));
++		if (count > 0 && count <= ARRAY_SIZE(combined)) {
++			ret = of_property_read_u32_array(np, "semtech,combined-sensors",
++							 combined, count);
++			if (ret)
++				break;
++		} else {
++			/*
++			 * Either the property does not exist in the DT or the
++			 * number of entries is incorrect.
++			 */
++			break;
+ 		}
++		for (i = 0; i < count; i++) {
++			if (combined[i] >= SX9310_NUM_CHANNELS) {
++				/* Invalid sensor (invalid DT). */
++				break;
++			}
++			comb_mask |= BIT(combined[i]);
++		}
++		if (i < count)
++			break;
+ 
+-		comb_mask &= 0xf;
++		reg_def->def &= ~SX9310_REG_PROX_CTRL2_COMBMODE_MASK;
+ 		if (comb_mask == (BIT(3) | BIT(2) | BIT(1) | BIT(0)))
+ 			reg_def->def |= SX9310_REG_PROX_CTRL2_COMBMODE_CS0_CS1_CS2_CS3;
+ 		else if (comb_mask == (BIT(1) | BIT(2)))
+diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
+index 3d194bb608405..6adbaea358aeb 100644
+--- a/drivers/infiniband/core/cm.c
++++ b/drivers/infiniband/core/cm.c
+@@ -2138,7 +2138,8 @@ static int cm_req_handler(struct cm_work *work)
+ 		goto destroy;
+ 	}
+ 
+-	cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
++	if (cm_id_priv->av.ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE)
++		cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
+ 
+ 	memset(&work->path[0], 0, sizeof(work->path[0]));
+ 	if (cm_req_has_alt_path(req_msg))
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index 94096511599f0..6ac07911a17bd 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -463,7 +463,6 @@ static void _cma_attach_to_dev(struct rdma_id_private *id_priv,
+ 	id_priv->id.route.addr.dev_addr.transport =
+ 		rdma_node_get_transport(cma_dev->device->node_type);
+ 	list_add_tail(&id_priv->list, &cma_dev->id_list);
+-	rdma_restrack_add(&id_priv->res);
+ 
+ 	trace_cm_id_attach(id_priv, cma_dev->device);
+ }
+@@ -700,6 +699,7 @@ static int cma_ib_acquire_dev(struct rdma_id_private *id_priv,
+ 	mutex_lock(&lock);
+ 	cma_attach_to_dev(id_priv, listen_id_priv->cma_dev);
+ 	mutex_unlock(&lock);
++	rdma_restrack_add(&id_priv->res);
+ 	return 0;
+ }
+ 
+@@ -754,8 +754,10 @@ static int cma_iw_acquire_dev(struct rdma_id_private *id_priv,
+ 	}
+ 
+ out:
+-	if (!ret)
++	if (!ret) {
+ 		cma_attach_to_dev(id_priv, cma_dev);
++		rdma_restrack_add(&id_priv->res);
++	}
+ 
+ 	mutex_unlock(&lock);
+ 	return ret;
+@@ -816,6 +818,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
+ 
+ found:
+ 	cma_attach_to_dev(id_priv, cma_dev);
++	rdma_restrack_add(&id_priv->res);
+ 	mutex_unlock(&lock);
+ 	addr = (struct sockaddr_ib *)cma_src_addr(id_priv);
+ 	memcpy(&addr->sib_addr, &sgid, sizeof(sgid));
+@@ -2529,6 +2532,7 @@ static int cma_listen_on_dev(struct rdma_id_private *id_priv,
+ 	       rdma_addr_size(cma_src_addr(id_priv)));
+ 
+ 	_cma_attach_to_dev(dev_id_priv, cma_dev);
++	rdma_restrack_add(&dev_id_priv->res);
+ 	cma_id_get(id_priv);
+ 	dev_id_priv->internal_id = 1;
+ 	dev_id_priv->afonly = id_priv->afonly;
+@@ -3169,6 +3173,7 @@ port_found:
+ 	ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey);
+ 	id_priv->id.port_num = p;
+ 	cma_attach_to_dev(id_priv, cma_dev);
++	rdma_restrack_add(&id_priv->res);
+ 	cma_set_loopback(cma_src_addr(id_priv));
+ out:
+ 	mutex_unlock(&lock);
+@@ -3201,6 +3206,7 @@ static void addr_handler(int status, struct sockaddr *src_addr,
+ 		if (status)
+ 			pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n",
+ 					     status);
++		rdma_restrack_add(&id_priv->res);
+ 	} else if (status) {
+ 		pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to resolve IP. status %d\n", status);
+ 	}
+@@ -3812,6 +3818,8 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
+ 	if (ret)
+ 		goto err2;
+ 
++	if (!cma_any_addr(addr))
++		rdma_restrack_add(&id_priv->res);
+ 	return 0;
+ err2:
+ 	if (id_priv->cma_dev)
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+index 995d4633b0a1c..d4d4959c2434c 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+@@ -2784,6 +2784,7 @@ do_rq:
+ 		dev_err(&cq->hwq.pdev->dev,
+ 			"FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
+ 			cqe_cons, rq->max_wqe);
++		rc = -EINVAL;
+ 		goto done;
+ 	}
+ 
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
+index fa7878336100a..3ca47004b7527 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
+@@ -854,6 +854,7 @@ static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res     *res,
+ 
+ unmap_io:
+ 	pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
++	dpit->dbr_bar_reg_iomem = NULL;
+ 	return -ENOMEM;
+ }
+ 
+diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c
+index 5c95c789f302d..e800e8e8bed5a 100644
+--- a/drivers/infiniband/hw/cxgb4/resource.c
++++ b/drivers/infiniband/hw/cxgb4/resource.c
+@@ -216,7 +216,7 @@ u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
+ 			goto out;
+ 		entry->qid = qid;
+ 		list_add_tail(&entry->entry, &uctx->cqids);
+-		for (i = qid; i & rdev->qpmask; i++) {
++		for (i = qid + 1; i & rdev->qpmask; i++) {
+ 			entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ 			if (!entry)
+ 				goto out;
+diff --git a/drivers/infiniband/hw/hfi1/firmware.c b/drivers/infiniband/hw/hfi1/firmware.c
+index 0e83d4b61e463..2cf102b5abd44 100644
+--- a/drivers/infiniband/hw/hfi1/firmware.c
++++ b/drivers/infiniband/hw/hfi1/firmware.c
+@@ -1916,6 +1916,7 @@ int parse_platform_config(struct hfi1_devdata *dd)
+ 			dd_dev_err(dd, "%s: Failed CRC check at offset %ld\n",
+ 				   __func__, (ptr -
+ 				   (u32 *)dd->platform_config.data));
++			ret = -EINVAL;
+ 			goto bail;
+ 		}
+ 		/* Jump the CRC DWORD */
+diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
+index f3fb28e3d5d74..d213f65d4cdd0 100644
+--- a/drivers/infiniband/hw/hfi1/mmu_rb.c
++++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
+@@ -89,7 +89,7 @@ int hfi1_mmu_rb_register(void *ops_arg,
+ 	struct mmu_rb_handler *h;
+ 	int ret;
+ 
+-	h = kmalloc(sizeof(*h), GFP_KERNEL);
++	h = kzalloc(sizeof(*h), GFP_KERNEL);
+ 	if (!h)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index ce26f97b2ca26..ad3cee54140e1 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -5068,6 +5068,7 @@ done:
+ 	qp_attr->cur_qp_state = qp_attr->qp_state;
+ 	qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
+ 	qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
++	qp_attr->cap.max_inline_data = hr_qp->max_inline_data;
+ 
+ 	if (!ibqp->uobject) {
+ 		qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_pble.c b/drivers/infiniband/hw/i40iw/i40iw_pble.c
+index 53e5cd1a2bd6e..146a4148219ba 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_pble.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_pble.c
+@@ -393,12 +393,9 @@ static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
+ 	i40iw_debug(dev, I40IW_DEBUG_PBLE, "next_fpm_addr = %llx chunk_size[%u] = 0x%x\n",
+ 		    pble_rsrc->next_fpm_addr, chunk->size, chunk->size);
+ 	pble_rsrc->unallocated_pble -= (chunk->size >> 3);
+-	list_add(&chunk->list, &pble_rsrc->pinfo.clist);
+ 	sd_reg_val = (sd_entry_type == I40IW_SD_TYPE_PAGED) ?
+ 			sd_entry->u.pd_table.pd_page_addr.pa : sd_entry->u.bp.addr.pa;
+-	if (sd_entry->valid)
+-		return 0;
+-	if (dev->is_pf) {
++	if (dev->is_pf && !sd_entry->valid) {
+ 		ret_code = i40iw_hmc_sd_one(dev, hmc_info->hmc_fn_id,
+ 					    sd_reg_val, idx->sd_idx,
+ 					    sd_entry->entry_type, true);
+@@ -409,6 +406,7 @@ static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
+ 	}
+ 
+ 	sd_entry->valid = true;
++	list_add(&chunk->list, &pble_rsrc->pinfo.clist);
+ 	return 0;
+  error:
+ 	kfree(chunk);
+diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
+index 25da0b05b4e2f..f0af3f1ae0398 100644
+--- a/drivers/infiniband/hw/mlx5/fs.c
++++ b/drivers/infiniband/hw/mlx5/fs.c
+@@ -1528,8 +1528,8 @@ static struct mlx5_ib_flow_handler *raw_fs_rule_add(
+ 		dst_num++;
+ 	}
+ 
+-	handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher,
+-					flow_context, flow_act,
++	handler = _create_raw_flow_rule(dev, ft_prio, dst_num ? dst : NULL,
++					fs_matcher, flow_context, flow_act,
+ 					cmd_in, inlen, dst_num);
+ 
+ 	if (IS_ERR(handler)) {
+@@ -1885,8 +1885,9 @@ static int get_dests(struct uverbs_attr_bundle *attrs,
+ 		else
+ 			*dest_id = mqp->raw_packet_qp.rq.tirn;
+ 		*dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+-	} else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS ||
+-		   fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX) {
++	} else if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS ||
++		    fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX) &&
++		   !(*flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP)) {
+ 		*dest_type = MLX5_FLOW_DESTINATION_TYPE_PORT;
+ 	}
+ 
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index 0d69a697d75f3..4be7bccefaa40 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -499,7 +499,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
+ 	translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
+ 				 &props->active_width, ext);
+ 
+-	if (!dev->is_rep && mlx5_is_roce_enabled(mdev)) {
++	if (!dev->is_rep && dev->mdev->roce.roce_en) {
+ 		u16 qkey_viol_cntr;
+ 
+ 		props->port_cap_flags |= IB_PORT_CM_SUP;
+@@ -4174,7 +4174,7 @@ static int mlx5_ib_roce_init(struct mlx5_ib_dev *dev)
+ 
+ 		/* Register only for native ports */
+ 		err = mlx5_add_netdev_notifier(dev, port_num);
+-		if (err || dev->is_rep || !mlx5_is_roce_enabled(mdev))
++		if (err || dev->is_rep || !mlx5_is_roce_init_enabled(mdev))
+ 			/*
+ 			 * We don't enable ETH interface for
+ 			 * 1. IB representors
+@@ -4711,7 +4711,7 @@ static int mlx5r_probe(struct auxiliary_device *adev,
+ 	dev->mdev = mdev;
+ 	dev->num_ports = num_ports;
+ 
+-	if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_is_roce_enabled(mdev))
++	if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_is_roce_init_enabled(mdev))
+ 		profile = &raw_eth_profile;
+ 	else
+ 		profile = &pf_profile;
+diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
+index 88cc26e008fce..b085c02b53d0e 100644
+--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
++++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
+@@ -547,11 +547,6 @@ static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr)
+ 	return container_of(wr, struct mlx5_umr_wr, wr);
+ }
+ 
+-struct mlx5_shared_mr_info {
+-	int mr_id;
+-	struct ib_umem		*umem;
+-};
+-
+ enum mlx5_ib_cq_pr_flags {
+ 	MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD	= 1 << 0,
+ };
+@@ -654,47 +649,69 @@ struct mlx5_ib_dm {
+ 	atomic64_add(value, &((mr)->odp_stats.counter_name))
+ 
+ struct mlx5_ib_mr {
+-	struct ib_mr		ibmr;
+-	void			*descs;
+-	dma_addr_t		desc_map;
+-	int			ndescs;
+-	int			data_length;
+-	int			meta_ndescs;
+-	int			meta_length;
+-	int			max_descs;
+-	int			desc_size;
+-	int			access_mode;
+-	unsigned int		page_shift;
+-	struct mlx5_core_mkey	mmkey;
+-	struct ib_umem	       *umem;
+-	struct mlx5_shared_mr_info	*smr_info;
+-	struct list_head	list;
+-	struct mlx5_cache_ent  *cache_ent;
+-	u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
+-	struct mlx5_core_sig_ctx    *sig;
+-	void			*descs_alloc;
+-	int			access_flags; /* Needed for rereg MR */
+-
+-	struct mlx5_ib_mr      *parent;
+-	/* Needed for IB_MR_TYPE_INTEGRITY */
+-	struct mlx5_ib_mr      *pi_mr;
+-	struct mlx5_ib_mr      *klm_mr;
+-	struct mlx5_ib_mr      *mtt_mr;
+-	u64			data_iova;
+-	u64			pi_iova;
+-
+-	/* For ODP and implicit */
+-	struct xarray		implicit_children;
+-	union {
+-		struct list_head elm;
+-		struct work_struct work;
+-	} odp_destroy;
+-	struct ib_odp_counters	odp_stats;
+-	bool			is_odp_implicit;
++	struct ib_mr ibmr;
++	struct mlx5_core_mkey mmkey;
+ 
+-	struct mlx5_async_work  cb_work;
++	/* User MR data */
++	struct mlx5_cache_ent *cache_ent;
++	struct ib_umem *umem;
++
++	/* This is zero'd when the MR is allocated */
++	struct {
++		/* Used only while the MR is in the cache */
++		struct {
++			u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
++			struct mlx5_async_work cb_work;
++			/* Cache list element */
++			struct list_head list;
++		};
++
++		/* Used only by kernel MRs (umem == NULL) */
++		struct {
++			void *descs;
++			void *descs_alloc;
++			dma_addr_t desc_map;
++			int max_descs;
++			int ndescs;
++			int desc_size;
++			int access_mode;
++
++			/* For Kernel IB_MR_TYPE_INTEGRITY */
++			struct mlx5_core_sig_ctx *sig;
++			struct mlx5_ib_mr *pi_mr;
++			struct mlx5_ib_mr *klm_mr;
++			struct mlx5_ib_mr *mtt_mr;
++			u64 data_iova;
++			u64 pi_iova;
++			int meta_ndescs;
++			int meta_length;
++			int data_length;
++		};
++
++		/* Used only by User MRs (umem != NULL) */
++		struct {
++			unsigned int page_shift;
++			/* Current access_flags */
++			int access_flags;
++
++			/* For User ODP */
++			struct mlx5_ib_mr *parent;
++			struct xarray implicit_children;
++			union {
++				struct work_struct work;
++			} odp_destroy;
++			struct ib_odp_counters odp_stats;
++			bool is_odp_implicit;
++		};
++	};
+ };
+ 
++/* Zero the fields in the mr that are variant depending on usage */
++static inline void mlx5_clear_mr(struct mlx5_ib_mr *mr)
++{
++	memset(mr->out, 0, sizeof(*mr) - offsetof(struct mlx5_ib_mr, out));
++}
++
+ static inline bool is_odp_mr(struct mlx5_ib_mr *mr)
+ {
+ 	return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem &&
+diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
+index db05b0e0a8d7a..ea8f068a6da3e 100644
+--- a/drivers/infiniband/hw/mlx5/mr.c
++++ b/drivers/infiniband/hw/mlx5/mr.c
+@@ -590,6 +590,8 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
+ 		ent->available_mrs--;
+ 		queue_adjust_cache_locked(ent);
+ 		spin_unlock_irq(&ent->lock);
++
++		mlx5_clear_mr(mr);
+ 	}
+ 	mr->access_flags = access_flags;
+ 	return mr;
+@@ -615,16 +617,14 @@ static struct mlx5_ib_mr *get_cache_mr(struct mlx5_cache_ent *req_ent)
+ 			ent->available_mrs--;
+ 			queue_adjust_cache_locked(ent);
+ 			spin_unlock_irq(&ent->lock);
+-			break;
++			mlx5_clear_mr(mr);
++			return mr;
+ 		}
+ 		queue_adjust_cache_locked(ent);
+ 		spin_unlock_irq(&ent->lock);
+ 	}
+-
+-	if (!mr)
+-		req_ent->miss++;
+-
+-	return mr;
++	req_ent->miss++;
++	return NULL;
+ }
+ 
+ static void detach_mr_from_cache(struct mlx5_ib_mr *mr)
+@@ -993,8 +993,6 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
+ 
+ 	mr->ibmr.pd = pd;
+ 	mr->umem = umem;
+-	mr->access_flags = access_flags;
+-	mr->desc_size = sizeof(struct mlx5_mtt);
+ 	mr->mmkey.iova = iova;
+ 	mr->mmkey.size = umem->length;
+ 	mr->mmkey.pd = to_mpd(pd)->pdn;
+diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
+index b103555b1f5d4..d98755e78362f 100644
+--- a/drivers/infiniband/hw/mlx5/odp.c
++++ b/drivers/infiniband/hw/mlx5/odp.c
+@@ -227,7 +227,6 @@ static void free_implicit_child_mr(struct mlx5_ib_mr *mr, bool need_imr_xlt)
+ 
+ 	dma_fence_odp_mr(mr);
+ 
+-	mr->parent = NULL;
+ 	mlx5_mr_cache_free(mr_to_mdev(mr), mr);
+ 	ib_umem_odp_release(odp);
+ }
+diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
+index f5a52a6fae437..843f9e7fe96ff 100644
+--- a/drivers/infiniband/hw/mlx5/qp.c
++++ b/drivers/infiniband/hw/mlx5/qp.c
+@@ -3146,6 +3146,19 @@ enum {
+ 	MLX5_PATH_FLAG_COUNTER	= 1 << 2,
+ };
+ 
++static int mlx5_to_ib_rate_map(u8 rate)
++{
++	static const int rates[] = { IB_RATE_PORT_CURRENT, IB_RATE_56_GBPS,
++				     IB_RATE_25_GBPS,	   IB_RATE_100_GBPS,
++				     IB_RATE_200_GBPS,	   IB_RATE_50_GBPS,
++				     IB_RATE_400_GBPS };
++
++	if (rate < ARRAY_SIZE(rates))
++		return rates[rate];
++
++	return rate - MLX5_STAT_RATE_OFFSET;
++}
++
+ static int ib_to_mlx5_rate_map(u8 rate)
+ {
+ 	switch (rate) {
+@@ -4485,7 +4498,7 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
+ 	rdma_ah_set_path_bits(ah_attr, MLX5_GET(ads, path, mlid));
+ 
+ 	static_rate = MLX5_GET(ads, path, stat_rate);
+-	rdma_ah_set_static_rate(ah_attr, static_rate ? static_rate - 5 : 0);
++	rdma_ah_set_static_rate(ah_attr, mlx5_to_ib_rate_map(static_rate));
+ 	if (MLX5_GET(ads, path, grh) ||
+ 	    ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
+ 		rdma_ah_set_grh(ah_attr, NULL, MLX5_GET(ads, path, flow_label),
+diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+index c4bc58736e489..1715fbe0719d8 100644
+--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
++++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+@@ -636,8 +636,10 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+ 	memcpy(in_params.local_mac_addr, dev->ndev->dev_addr, ETH_ALEN);
+ 
+ 	if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
+-			     &qp->iwarp_cm_flags))
++			     &qp->iwarp_cm_flags)) {
++		rc = -ENODEV;
+ 		goto err; /* QP already being destroyed */
++	}
+ 
+ 	rc = dev->ops->iwarp_connect(dev->rdma_ctx, &in_params, &out_params);
+ 	if (rc) {
+diff --git a/drivers/infiniband/sw/rxe/rxe_av.c b/drivers/infiniband/sw/rxe/rxe_av.c
+index df0d173d6acba..da2e867a1ed93 100644
+--- a/drivers/infiniband/sw/rxe/rxe_av.c
++++ b/drivers/infiniband/sw/rxe/rxe_av.c
+@@ -88,7 +88,7 @@ void rxe_av_fill_ip_info(struct rxe_av *av, struct rdma_ah_attr *attr)
+ 		type = RXE_NETWORK_TYPE_IPV4;
+ 		break;
+ 	case RDMA_NETWORK_IPV6:
+-		type = RXE_NETWORK_TYPE_IPV4;
++		type = RXE_NETWORK_TYPE_IPV6;
+ 		break;
+ 	default:
+ 		/* not reached - checked in rxe_av_chk_attr */
+diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c
+index 34a910cf0edbd..61c17db70d658 100644
+--- a/drivers/infiniband/sw/siw/siw_mem.c
++++ b/drivers/infiniband/sw/siw/siw_mem.c
+@@ -106,8 +106,6 @@ int siw_mr_add_mem(struct siw_mr *mr, struct ib_pd *pd, void *mem_obj,
+ 	mem->perms = rights & IWARP_ACCESS_MASK;
+ 	kref_init(&mem->ref);
+ 
+-	mr->mem = mem;
+-
+ 	get_random_bytes(&next, 4);
+ 	next &= 0x00ffffff;
+ 
+@@ -116,6 +114,8 @@ int siw_mr_add_mem(struct siw_mr *mr, struct ib_pd *pd, void *mem_obj,
+ 		kfree(mem);
+ 		return -ENOMEM;
+ 	}
++
++	mr->mem = mem;
+ 	/* Set the STag index part */
+ 	mem->stag = id << 8;
+ 	mr->base_mr.lkey = mr->base_mr.rkey = mem->stag;
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
+index 7305ed8976c24..18266f07c58d9 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -438,23 +438,23 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+ 	isert_init_conn(isert_conn);
+ 	isert_conn->cm_id = cma_id;
+ 
+-	ret = isert_alloc_login_buf(isert_conn, cma_id->device);
+-	if (ret)
+-		goto out;
+-
+ 	device = isert_device_get(cma_id);
+ 	if (IS_ERR(device)) {
+ 		ret = PTR_ERR(device);
+-		goto out_rsp_dma_map;
++		goto out;
+ 	}
+ 	isert_conn->device = device;
+ 
++	ret = isert_alloc_login_buf(isert_conn, cma_id->device);
++	if (ret)
++		goto out_conn_dev;
++
+ 	isert_set_nego_params(isert_conn, &event->param.conn);
+ 
+ 	isert_conn->qp = isert_create_qp(isert_conn, cma_id);
+ 	if (IS_ERR(isert_conn->qp)) {
+ 		ret = PTR_ERR(isert_conn->qp);
+-		goto out_conn_dev;
++		goto out_rsp_dma_map;
+ 	}
+ 
+ 	ret = isert_login_post_recv(isert_conn);
+@@ -473,10 +473,10 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+ 
+ out_destroy_qp:
+ 	isert_destroy_qp(isert_conn);
+-out_conn_dev:
+-	isert_device_put(device);
+ out_rsp_dma_map:
+ 	isert_free_login_buf(isert_conn);
++out_conn_dev:
++	isert_device_put(device);
+ out:
+ 	kfree(isert_conn);
+ 	rdma_reject(cma_id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED);
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+index 6734329cca332..959ba0462ef07 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+@@ -2784,8 +2784,8 @@ int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess,
+ 	} while (!changed && old_state != RTRS_CLT_DEAD);
+ 
+ 	if (likely(changed)) {
+-		rtrs_clt_destroy_sess_files(sess, sysfs_self);
+ 		rtrs_clt_remove_path_from_arr(sess);
++		rtrs_clt_destroy_sess_files(sess, sysfs_self);
+ 		kobject_put(&sess->kobj);
+ 	}
+ 
+diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
+index 6be60aa5ffe21..7f0420ad90575 100644
+--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
+@@ -2378,6 +2378,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
+ 		pr_info("rejected SRP_LOGIN_REQ because target %s_%d is not enabled\n",
+ 			dev_name(&sdev->device->dev), port_num);
+ 		mutex_unlock(&sport->mutex);
++		ret = -EINVAL;
+ 		goto reject;
+ 	}
+ 
+diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
+index 321f5906e6ed3..f7e31018cd0b9 100644
+--- a/drivers/iommu/amd/init.c
++++ b/drivers/iommu/amd/init.c
+@@ -1837,7 +1837,7 @@ static void __init late_iommu_features_init(struct amd_iommu *iommu)
+ 	 * IVHD and MMIO conflict.
+ 	 */
+ 	if (features != iommu->features)
+-		pr_warn(FW_WARN "EFR mismatch. Use IVHD EFR (%#llx : %#llx\n).",
++		pr_warn(FW_WARN "EFR mismatch. Use IVHD EFR (%#llx : %#llx).\n",
+ 			features, iommu->features);
+ }
+ 
+diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+index f985817c967a2..230b6f6b39016 100644
+--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
++++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+@@ -115,7 +115,7 @@
+ #define GERROR_PRIQ_ABT_ERR		(1 << 3)
+ #define GERROR_EVTQ_ABT_ERR		(1 << 2)
+ #define GERROR_CMDQ_ERR			(1 << 0)
+-#define GERROR_ERR_MASK			0xfd
++#define GERROR_ERR_MASK			0x1fd
+ 
+ #define ARM_SMMU_GERRORN		0x64
+ 
+diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
+index af765c813cc84..fdd095e1fa521 100644
+--- a/drivers/iommu/dma-iommu.c
++++ b/drivers/iommu/dma-iommu.c
+@@ -52,6 +52,17 @@ struct iommu_dma_cookie {
+ };
+ 
+ static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled);
++bool iommu_dma_forcedac __read_mostly;
++
++static int __init iommu_dma_forcedac_setup(char *str)
++{
++	int ret = kstrtobool(str, &iommu_dma_forcedac);
++
++	if (!ret && iommu_dma_forcedac)
++		pr_info("Forcing DAC for PCI devices\n");
++	return ret;
++}
++early_param("iommu.forcedac", iommu_dma_forcedac_setup);
+ 
+ void iommu_dma_free_cpu_cached_iovas(unsigned int cpu,
+ 		struct iommu_domain *domain)
+@@ -444,7 +455,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
+ 		dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
+ 
+ 	/* Try to get PCI devices a SAC address */
+-	if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
++	if (dma_limit > DMA_BIT_MASK(32) && !iommu_dma_forcedac && dev_is_pci(dev))
+ 		iova = alloc_iova_fast(iovad, iova_len,
+ 				       DMA_BIT_MASK(32) >> shift, false);
+ 
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index 881c9f2a5c7d9..7e551da6c1fbd 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -360,7 +360,6 @@ int intel_iommu_enabled = 0;
+ EXPORT_SYMBOL_GPL(intel_iommu_enabled);
+ 
+ static int dmar_map_gfx = 1;
+-static int dmar_forcedac;
+ static int intel_iommu_strict;
+ static int intel_iommu_superpage = 1;
+ static int iommu_identity_mapping;
+@@ -451,8 +450,8 @@ static int __init intel_iommu_setup(char *str)
+ 			dmar_map_gfx = 0;
+ 			pr_info("Disable GFX device mapping\n");
+ 		} else if (!strncmp(str, "forcedac", 8)) {
+-			pr_info("Forcing DAC for PCI devices\n");
+-			dmar_forcedac = 1;
++			pr_warn("intel_iommu=forcedac deprecated; use iommu.forcedac instead\n");
++			iommu_dma_forcedac = true;
+ 		} else if (!strncmp(str, "strict", 6)) {
+ 			pr_info("Disable batched IOTLB flush\n");
+ 			intel_iommu_strict = 1;
+@@ -658,7 +657,14 @@ static int domain_update_iommu_snooping(struct intel_iommu *skip)
+ 	rcu_read_lock();
+ 	for_each_active_iommu(iommu, drhd) {
+ 		if (iommu != skip) {
+-			if (!ecap_sc_support(iommu->ecap)) {
++			/*
++			 * If the hardware is operating in the scalable mode,
++			 * the snooping control is always supported since we
++			 * always set PASID-table-entry.PGSNP bit if the domain
++			 * is managed outside (UNMANAGED).
++			 */
++			if (!sm_supported(iommu) &&
++			    !ecap_sc_support(iommu->ecap)) {
+ 				ret = 0;
+ 				break;
+ 			}
+@@ -1340,6 +1346,11 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
+ 		      readl, (sts & DMA_GSTS_RTPS), sts);
+ 
+ 	raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
++
++	iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
++	if (sm_supported(iommu))
++		qi_flush_pasid_cache(iommu, 0, QI_PC_GLOBAL, 0);
++	iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
+ }
+ 
+ void iommu_flush_write_buffer(struct intel_iommu *iommu)
+@@ -2340,8 +2351,9 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
+ 		return -EINVAL;
+ 
+ 	attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP);
++	attr |= DMA_FL_PTE_PRESENT;
+ 	if (domain_use_first_level(domain)) {
+-		attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD | DMA_FL_PTE_US;
++		attr |= DMA_FL_PTE_XD | DMA_FL_PTE_US;
+ 
+ 		if (domain->domain.type == IOMMU_DOMAIN_DMA) {
+ 			attr |= DMA_FL_PTE_ACCESS;
+@@ -2446,6 +2458,10 @@ static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn
+ 				   (((u16)bus) << 8) | devfn,
+ 				   DMA_CCMD_MASK_NOBIT,
+ 				   DMA_CCMD_DEVICE_INVL);
++
++	if (sm_supported(iommu))
++		qi_flush_pasid_cache(iommu, did_old, QI_PC_ALL_PASIDS, 0);
++
+ 	iommu->flush.flush_iotlb(iommu,
+ 				 did_old,
+ 				 0,
+@@ -2529,6 +2545,9 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
+ 
+ 	flags |= (level == 5) ? PASID_FLAG_FL5LP : 0;
+ 
++	if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED)
++		flags |= PASID_FLAG_PAGE_SNOOP;
++
+ 	return intel_pasid_setup_first_level(iommu, dev, (pgd_t *)pgd, pasid,
+ 					     domain->iommu_did[iommu->seq_id],
+ 					     flags);
+@@ -3291,8 +3310,6 @@ static int __init init_dmars(void)
+ 		register_pasid_allocator(iommu);
+ #endif
+ 		iommu_set_root_entry(iommu);
+-		iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
+-		iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
+ 	}
+ 
+ #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
+@@ -3482,12 +3499,7 @@ static int init_iommu_hw(void)
+ 		}
+ 
+ 		iommu_flush_write_buffer(iommu);
+-
+ 		iommu_set_root_entry(iommu);
+-
+-		iommu->flush.flush_context(iommu, 0, 0, 0,
+-					   DMA_CCMD_GLOBAL_INVL);
+-		iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
+ 		iommu_enable_translation(iommu);
+ 		iommu_disable_protect_mem_regions(iommu);
+ 	}
+@@ -3870,8 +3882,6 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
+ 		goto disable_iommu;
+ 
+ 	iommu_set_root_entry(iommu);
+-	iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
+-	iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
+ 	iommu_enable_translation(iommu);
+ 
+ 	iommu_disable_protect_mem_regions(iommu);
+diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
+index 611ef5243cb63..5c16ebe037a14 100644
+--- a/drivers/iommu/intel/irq_remapping.c
++++ b/drivers/iommu/intel/irq_remapping.c
+@@ -736,7 +736,7 @@ static int __init intel_prepare_irq_remapping(void)
+ 		return -ENODEV;
+ 
+ 	if (intel_cap_audit(CAP_AUDIT_STATIC_IRQR, NULL))
+-		goto error;
++		return -ENODEV;
+ 
+ 	if (!dmar_ir_support())
+ 		return -ENODEV;
+diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
+index f26cb6195b2c4..5093d317ff1a2 100644
+--- a/drivers/iommu/intel/pasid.c
++++ b/drivers/iommu/intel/pasid.c
+@@ -411,6 +411,16 @@ static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value)
+ 	pasid_set_bits(&pe->val[1], 1 << 23, value << 23);
+ }
+ 
++/*
++ * Setup the Page Snoop (PGSNP) field (Bit 88) of a scalable mode
++ * PASID entry.
++ */
++static inline void
++pasid_set_pgsnp(struct pasid_entry *pe)
++{
++	pasid_set_bits(&pe->val[1], 1ULL << 24, 1ULL << 24);
++}
++
+ /*
+  * Setup the First Level Page table Pointer field (Bit 140~191)
+  * of a scalable mode PASID entry.
+@@ -565,6 +575,9 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
+ 		}
+ 	}
+ 
++	if (flags & PASID_FLAG_PAGE_SNOOP)
++		pasid_set_pgsnp(pte);
++
+ 	pasid_set_domain_id(pte, did);
+ 	pasid_set_address_width(pte, iommu->agaw);
+ 	pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
+@@ -643,6 +656,9 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
+ 	pasid_set_fault_enable(pte);
+ 	pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
+ 
++	if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED)
++		pasid_set_pgsnp(pte);
++
+ 	/*
+ 	 * Since it is a second level only translation setup, we should
+ 	 * set SRE bit as well (addresses are expected to be GPAs).
+diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
+index 444c0bec221a4..086ebd6973199 100644
+--- a/drivers/iommu/intel/pasid.h
++++ b/drivers/iommu/intel/pasid.h
+@@ -48,6 +48,7 @@
+  */
+ #define PASID_FLAG_SUPERVISOR_MODE	BIT(0)
+ #define PASID_FLAG_NESTED		BIT(1)
++#define PASID_FLAG_PAGE_SNOOP		BIT(2)
+ 
+ /*
+  * The PASID_FLAG_FL5LP flag Indicates using 5-level paging for first-
+diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
+index 574a7e657a9af..ecb6314fdd5cd 100644
+--- a/drivers/iommu/intel/svm.c
++++ b/drivers/iommu/intel/svm.c
+@@ -862,7 +862,7 @@ intel_svm_prq_report(struct device *dev, struct page_req_dsc *desc)
+ 	/* Fill in event data for device specific processing */
+ 	memset(&event, 0, sizeof(struct iommu_fault_event));
+ 	event.fault.type = IOMMU_FAULT_PAGE_REQ;
+-	event.fault.prm.addr = desc->addr;
++	event.fault.prm.addr = (u64)desc->addr << VTD_PAGE_SHIFT;
+ 	event.fault.prm.pasid = desc->pasid;
+ 	event.fault.prm.grpid = desc->prg_index;
+ 	event.fault.prm.perm = prq_to_iommu_prot(desc);
+@@ -920,7 +920,17 @@ static irqreturn_t prq_event_thread(int irq, void *d)
+ 			       ((unsigned long long *)req)[1]);
+ 			goto no_pasid;
+ 		}
+-
++		/* We shall not receive page request for supervisor SVM */
++		if (req->pm_req && (req->rd_req | req->wr_req)) {
++			pr_err("Unexpected page request in Privilege Mode");
++			/* No need to find the matching sdev as for bad_req */
++			goto no_pasid;
++		}
++		/* DMA read with exec requeset is not supported. */
++		if (req->exe_req && req->rd_req) {
++			pr_err("Execution request not supported\n");
++			goto no_pasid;
++		}
+ 		if (!svm || svm->pasid != req->pasid) {
+ 			rcu_read_lock();
+ 			svm = ioasid_find(NULL, req->pasid, NULL);
+@@ -1021,12 +1031,12 @@ no_pasid:
+ 				QI_PGRP_RESP_TYPE;
+ 			resp.qw1 = QI_PGRP_IDX(req->prg_index) |
+ 				QI_PGRP_LPIG(req->lpig);
++			resp.qw2 = 0;
++			resp.qw3 = 0;
+ 
+ 			if (req->priv_data_present)
+ 				memcpy(&resp.qw2, req->priv_data,
+ 				       sizeof(req->priv_data));
+-			resp.qw2 = 0;
+-			resp.qw3 = 0;
+ 			qi_submit_sync(iommu, &resp, 1, 0);
+ 		}
+ prq_advance:
+diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
+index d0b0a15dba841..e10cfa99057ce 100644
+--- a/drivers/iommu/iommu.c
++++ b/drivers/iommu/iommu.c
+@@ -2878,10 +2878,12 @@ EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
+  */
+ int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
+ {
+-	const struct iommu_ops *ops = dev->bus->iommu_ops;
++	if (dev->iommu && dev->iommu->iommu_dev) {
++		const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
+ 
+-	if (ops && ops->dev_enable_feat)
+-		return ops->dev_enable_feat(dev, feat);
++		if (ops->dev_enable_feat)
++			return ops->dev_enable_feat(dev, feat);
++	}
+ 
+ 	return -ENODEV;
+ }
+@@ -2894,10 +2896,12 @@ EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
+  */
+ int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
+ {
+-	const struct iommu_ops *ops = dev->bus->iommu_ops;
++	if (dev->iommu && dev->iommu->iommu_dev) {
++		const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
+ 
+-	if (ops && ops->dev_disable_feat)
+-		return ops->dev_disable_feat(dev, feat);
++		if (ops->dev_disable_feat)
++			return ops->dev_disable_feat(dev, feat);
++	}
+ 
+ 	return -EBUSY;
+ }
+@@ -2905,10 +2909,12 @@ EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
+ 
+ bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
+ {
+-	const struct iommu_ops *ops = dev->bus->iommu_ops;
++	if (dev->iommu && dev->iommu->iommu_dev) {
++		const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
+ 
+-	if (ops && ops->dev_feat_enabled)
+-		return ops->dev_feat_enabled(dev, feat);
++		if (ops->dev_feat_enabled)
++			return ops->dev_feat_enabled(dev, feat);
++	}
+ 
+ 	return false;
+ }
+diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
+index 6ecc007f07cd5..e168a682806a9 100644
+--- a/drivers/iommu/mtk_iommu.c
++++ b/drivers/iommu/mtk_iommu.c
+@@ -688,13 +688,6 @@ static const struct iommu_ops mtk_iommu_ops = {
+ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
+ {
+ 	u32 regval;
+-	int ret;
+-
+-	ret = clk_prepare_enable(data->bclk);
+-	if (ret) {
+-		dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret);
+-		return ret;
+-	}
+ 
+ 	if (data->plat_data->m4u_plat == M4U_MT8173) {
+ 		regval = F_MMU_PREFETCH_RT_REPLACE_MOD |
+@@ -760,7 +753,6 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
+ 	if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,
+ 			     dev_name(data->dev), (void *)data)) {
+ 		writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR);
+-		clk_disable_unprepare(data->bclk);
+ 		dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq);
+ 		return -ENODEV;
+ 	}
+@@ -977,14 +969,19 @@ static int __maybe_unused mtk_iommu_runtime_resume(struct device *dev)
+ 	void __iomem *base = data->base;
+ 	int ret;
+ 
+-	/* Avoid first resume to affect the default value of registers below. */
+-	if (!m4u_dom)
+-		return 0;
+ 	ret = clk_prepare_enable(data->bclk);
+ 	if (ret) {
+ 		dev_err(data->dev, "Failed to enable clk(%d) in resume\n", ret);
+ 		return ret;
+ 	}
++
++	/*
++	 * Uppon first resume, only enable the clk and return, since the values of the
++	 * registers are not yet set.
++	 */
++	if (!m4u_dom)
++		return 0;
++
+ 	writel_relaxed(reg->wr_len_ctrl, base + REG_MMU_WR_LEN_CTRL);
+ 	writel_relaxed(reg->misc_ctrl, base + REG_MMU_MISC_CTRL);
+ 	writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS);
+diff --git a/drivers/irqchip/irq-gic-v3-mbi.c b/drivers/irqchip/irq-gic-v3-mbi.c
+index 563a9b3662941..e81e89a81cb5b 100644
+--- a/drivers/irqchip/irq-gic-v3-mbi.c
++++ b/drivers/irqchip/irq-gic-v3-mbi.c
+@@ -303,7 +303,7 @@ int __init mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent)
+ 	reg = of_get_property(np, "mbi-alias", NULL);
+ 	if (reg) {
+ 		mbi_phys_base = of_translate_address(np, reg);
+-		if (mbi_phys_base == OF_BAD_ADDR) {
++		if (mbi_phys_base == (phys_addr_t)OF_BAD_ADDR) {
+ 			ret = -ENXIO;
+ 			goto err_free_mbi;
+ 		}
+diff --git a/drivers/mailbox/sprd-mailbox.c b/drivers/mailbox/sprd-mailbox.c
+index 4c325301a2fe8..94d9067dc8d09 100644
+--- a/drivers/mailbox/sprd-mailbox.c
++++ b/drivers/mailbox/sprd-mailbox.c
+@@ -60,6 +60,8 @@ struct sprd_mbox_priv {
+ 	struct clk		*clk;
+ 	u32			outbox_fifo_depth;
+ 
++	struct mutex		lock;
++	u32			refcnt;
+ 	struct mbox_chan	chan[SPRD_MBOX_CHAN_MAX];
+ };
+ 
+@@ -115,7 +117,11 @@ static irqreturn_t sprd_mbox_outbox_isr(int irq, void *data)
+ 		id = readl(priv->outbox_base + SPRD_MBOX_ID);
+ 
+ 		chan = &priv->chan[id];
+-		mbox_chan_received_data(chan, (void *)msg);
++		if (chan->cl)
++			mbox_chan_received_data(chan, (void *)msg);
++		else
++			dev_warn_ratelimited(priv->dev,
++				    "message's been dropped at ch[%d]\n", id);
+ 
+ 		/* Trigger to update outbox FIFO pointer */
+ 		writel(0x1, priv->outbox_base + SPRD_MBOX_TRIGGER);
+@@ -215,18 +221,22 @@ static int sprd_mbox_startup(struct mbox_chan *chan)
+ 	struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox);
+ 	u32 val;
+ 
+-	/* Select outbox FIFO mode and reset the outbox FIFO status */
+-	writel(0x0, priv->outbox_base + SPRD_MBOX_FIFO_RST);
++	mutex_lock(&priv->lock);
++	if (priv->refcnt++ == 0) {
++		/* Select outbox FIFO mode and reset the outbox FIFO status */
++		writel(0x0, priv->outbox_base + SPRD_MBOX_FIFO_RST);
+ 
+-	/* Enable inbox FIFO overflow and delivery interrupt */
+-	val = readl(priv->inbox_base + SPRD_MBOX_IRQ_MSK);
+-	val &= ~(SPRD_INBOX_FIFO_OVERFLOW_IRQ | SPRD_INBOX_FIFO_DELIVER_IRQ);
+-	writel(val, priv->inbox_base + SPRD_MBOX_IRQ_MSK);
++		/* Enable inbox FIFO overflow and delivery interrupt */
++		val = readl(priv->inbox_base + SPRD_MBOX_IRQ_MSK);
++		val &= ~(SPRD_INBOX_FIFO_OVERFLOW_IRQ | SPRD_INBOX_FIFO_DELIVER_IRQ);
++		writel(val, priv->inbox_base + SPRD_MBOX_IRQ_MSK);
+ 
+-	/* Enable outbox FIFO not empty interrupt */
+-	val = readl(priv->outbox_base + SPRD_MBOX_IRQ_MSK);
+-	val &= ~SPRD_OUTBOX_FIFO_NOT_EMPTY_IRQ;
+-	writel(val, priv->outbox_base + SPRD_MBOX_IRQ_MSK);
++		/* Enable outbox FIFO not empty interrupt */
++		val = readl(priv->outbox_base + SPRD_MBOX_IRQ_MSK);
++		val &= ~SPRD_OUTBOX_FIFO_NOT_EMPTY_IRQ;
++		writel(val, priv->outbox_base + SPRD_MBOX_IRQ_MSK);
++	}
++	mutex_unlock(&priv->lock);
+ 
+ 	return 0;
+ }
+@@ -235,9 +245,13 @@ static void sprd_mbox_shutdown(struct mbox_chan *chan)
+ {
+ 	struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox);
+ 
+-	/* Disable inbox & outbox interrupt */
+-	writel(SPRD_INBOX_FIFO_IRQ_MASK, priv->inbox_base + SPRD_MBOX_IRQ_MSK);
+-	writel(SPRD_OUTBOX_FIFO_IRQ_MASK, priv->outbox_base + SPRD_MBOX_IRQ_MSK);
++	mutex_lock(&priv->lock);
++	if (--priv->refcnt == 0) {
++		/* Disable inbox & outbox interrupt */
++		writel(SPRD_INBOX_FIFO_IRQ_MASK, priv->inbox_base + SPRD_MBOX_IRQ_MSK);
++		writel(SPRD_OUTBOX_FIFO_IRQ_MASK, priv->outbox_base + SPRD_MBOX_IRQ_MSK);
++	}
++	mutex_unlock(&priv->lock);
+ }
+ 
+ static const struct mbox_chan_ops sprd_mbox_ops = {
+@@ -266,6 +280,7 @@ static int sprd_mbox_probe(struct platform_device *pdev)
+ 		return -ENOMEM;
+ 
+ 	priv->dev = dev;
++	mutex_init(&priv->lock);
+ 
+ 	/*
+ 	 * The Spreadtrum mailbox uses an inbox to send messages to the target
+diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
+index 82d4e0880a994..4fb635c0baa07 100644
+--- a/drivers/md/bcache/writeback.c
++++ b/drivers/md/bcache/writeback.c
+@@ -110,13 +110,13 @@ static void __update_writeback_rate(struct cached_dev *dc)
+ 		int64_t fps;
+ 
+ 		if (c->gc_stats.in_use <= BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID) {
+-			fp_term = dc->writeback_rate_fp_term_low *
++			fp_term = (int64_t)dc->writeback_rate_fp_term_low *
+ 			(c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_LOW);
+ 		} else if (c->gc_stats.in_use <= BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH) {
+-			fp_term = dc->writeback_rate_fp_term_mid *
++			fp_term = (int64_t)dc->writeback_rate_fp_term_mid *
+ 			(c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID);
+ 		} else {
+-			fp_term = dc->writeback_rate_fp_term_high *
++			fp_term = (int64_t)dc->writeback_rate_fp_term_high *
+ 			(c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH);
+ 		}
+ 		fps = div_s64(dirty, dirty_buckets) * fp_term;
+diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
+index 200c5d0f08bf5..ea3130e116801 100644
+--- a/drivers/md/md-bitmap.c
++++ b/drivers/md/md-bitmap.c
+@@ -1722,6 +1722,8 @@ void md_bitmap_flush(struct mddev *mddev)
+ 	md_bitmap_daemon_work(mddev);
+ 	bitmap->daemon_lastrun -= sleep;
+ 	md_bitmap_daemon_work(mddev);
++	if (mddev->bitmap_info.external)
++		md_super_wait(mddev);
+ 	md_bitmap_update_sb(bitmap);
+ }
+ 
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 21da0c48f6c21..2a9553efc2d1b 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -734,7 +734,34 @@ void mddev_init(struct mddev *mddev)
+ }
+ EXPORT_SYMBOL_GPL(mddev_init);
+ 
++static struct mddev *mddev_find_locked(dev_t unit)
++{
++	struct mddev *mddev;
++
++	list_for_each_entry(mddev, &all_mddevs, all_mddevs)
++		if (mddev->unit == unit)
++			return mddev;
++
++	return NULL;
++}
++
+ static struct mddev *mddev_find(dev_t unit)
++{
++	struct mddev *mddev;
++
++	if (MAJOR(unit) != MD_MAJOR)
++		unit &= ~((1 << MdpMinorShift) - 1);
++
++	spin_lock(&all_mddevs_lock);
++	mddev = mddev_find_locked(unit);
++	if (mddev)
++		mddev_get(mddev);
++	spin_unlock(&all_mddevs_lock);
++
++	return mddev;
++}
++
++static struct mddev *mddev_find_or_alloc(dev_t unit)
+ {
+ 	struct mddev *mddev, *new = NULL;
+ 
+@@ -745,13 +772,13 @@ static struct mddev *mddev_find(dev_t unit)
+ 	spin_lock(&all_mddevs_lock);
+ 
+ 	if (unit) {
+-		list_for_each_entry(mddev, &all_mddevs, all_mddevs)
+-			if (mddev->unit == unit) {
+-				mddev_get(mddev);
+-				spin_unlock(&all_mddevs_lock);
+-				kfree(new);
+-				return mddev;
+-			}
++		mddev = mddev_find_locked(unit);
++		if (mddev) {
++			mddev_get(mddev);
++			spin_unlock(&all_mddevs_lock);
++			kfree(new);
++			return mddev;
++		}
+ 
+ 		if (new) {
+ 			list_add(&new->all_mddevs, &all_mddevs);
+@@ -777,12 +804,7 @@ static struct mddev *mddev_find(dev_t unit)
+ 				return NULL;
+ 			}
+ 
+-			is_free = 1;
+-			list_for_each_entry(mddev, &all_mddevs, all_mddevs)
+-				if (mddev->unit == dev) {
+-					is_free = 0;
+-					break;
+-				}
++			is_free = !mddev_find_locked(dev);
+ 		}
+ 		new->unit = dev;
+ 		new->md_minor = MINOR(dev);
+@@ -5644,7 +5666,7 @@ static int md_alloc(dev_t dev, char *name)
+ 	 * writing to /sys/module/md_mod/parameters/new_array.
+ 	 */
+ 	static DEFINE_MUTEX(disks_mutex);
+-	struct mddev *mddev = mddev_find(dev);
++	struct mddev *mddev = mddev_find_or_alloc(dev);
+ 	struct gendisk *disk;
+ 	int partitioned;
+ 	int shift;
+@@ -6524,11 +6546,9 @@ static void autorun_devices(int part)
+ 
+ 		md_probe(dev);
+ 		mddev = mddev_find(dev);
+-		if (!mddev || !mddev->gendisk) {
+-			if (mddev)
+-				mddev_put(mddev);
++		if (!mddev)
+ 			break;
+-		}
++
+ 		if (mddev_lock(mddev))
+ 			pr_warn("md: %s locked, cannot run\n", mdname(mddev));
+ 		else if (mddev->raid_disks || mddev->major_version
+@@ -7821,8 +7841,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
+ 		/* Wait until bdev->bd_disk is definitely gone */
+ 		if (work_pending(&mddev->del_work))
+ 			flush_workqueue(md_misc_wq);
+-		/* Then retry the open from the top */
+-		return -ERESTARTSYS;
++		return -EBUSY;
+ 	}
+ 	BUG_ON(mddev != bdev->bd_disk->private_data);
+ 
+@@ -8153,7 +8172,11 @@ static void *md_seq_start(struct seq_file *seq, loff_t *pos)
+ 	loff_t l = *pos;
+ 	struct mddev *mddev;
+ 
+-	if (l >= 0x10000)
++	if (l == 0x10000) {
++		++*pos;
++		return (void *)2;
++	}
++	if (l > 0x10000)
+ 		return NULL;
+ 	if (!l--)
+ 		/* header */
+@@ -9251,11 +9274,11 @@ void md_check_recovery(struct mddev *mddev)
+ 		}
+ 
+ 		if (mddev_is_clustered(mddev)) {
+-			struct md_rdev *rdev;
++			struct md_rdev *rdev, *tmp;
+ 			/* kick the device if another node issued a
+ 			 * remove disk.
+ 			 */
+-			rdev_for_each(rdev, mddev) {
++			rdev_for_each_safe(rdev, tmp, mddev) {
+ 				if (test_and_clear_bit(ClusterRemove, &rdev->flags) &&
+ 						rdev->raid_disk < 0)
+ 					md_kick_rdev_from_array(rdev);
+@@ -9569,7 +9592,7 @@ err_wq:
+ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
+ {
+ 	struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
+-	struct md_rdev *rdev2;
++	struct md_rdev *rdev2, *tmp;
+ 	int role, ret;
+ 	char b[BDEVNAME_SIZE];
+ 
+@@ -9586,7 +9609,7 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
+ 	}
+ 
+ 	/* Check for change of roles in the active devices */
+-	rdev_for_each(rdev2, mddev) {
++	rdev_for_each_safe(rdev2, tmp, mddev) {
+ 		if (test_bit(Faulty, &rdev2->flags))
+ 			continue;
+ 
+diff --git a/drivers/media/common/saa7146/saa7146_core.c b/drivers/media/common/saa7146/saa7146_core.c
+index f2d13b71416cb..e50fa0ff7c5d5 100644
+--- a/drivers/media/common/saa7146/saa7146_core.c
++++ b/drivers/media/common/saa7146/saa7146_core.c
+@@ -253,7 +253,7 @@ int saa7146_pgtable_build_single(struct pci_dev *pci, struct saa7146_pgtable *pt
+ 			 i, sg_dma_address(list), sg_dma_len(list),
+ 			 list->offset);
+ */
+-		for (p = 0; p * 4096 < list->length; p++, ptr++) {
++		for (p = 0; p * 4096 < sg_dma_len(list); p++, ptr++) {
+ 			*ptr = cpu_to_le32(sg_dma_address(list) + p * 4096);
+ 			nr_pages++;
+ 		}
+diff --git a/drivers/media/common/saa7146/saa7146_video.c b/drivers/media/common/saa7146/saa7146_video.c
+index 7b8795eca5893..66215d9106a42 100644
+--- a/drivers/media/common/saa7146/saa7146_video.c
++++ b/drivers/media/common/saa7146/saa7146_video.c
+@@ -247,9 +247,8 @@ static int saa7146_pgtable_build(struct saa7146_dev *dev, struct saa7146_buf *bu
+ 
+ 		/* walk all pages, copy all page addresses to ptr1 */
+ 		for (i = 0; i < length; i++, list++) {
+-			for (p = 0; p * 4096 < list->length; p++, ptr1++) {
++			for (p = 0; p * 4096 < sg_dma_len(list); p++, ptr1++)
+ 				*ptr1 = cpu_to_le32(sg_dma_address(list) - list->offset);
+-			}
+ 		}
+ /*
+ 		ptr1 = pt1->cpu;
+diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
+index cfa4cdde99d8a..02e8aa11e36e7 100644
+--- a/drivers/media/dvb-frontends/m88ds3103.c
++++ b/drivers/media/dvb-frontends/m88ds3103.c
+@@ -1904,8 +1904,8 @@ static int m88ds3103_probe(struct i2c_client *client,
+ 
+ 		dev->dt_client = i2c_new_dummy_device(client->adapter,
+ 						      dev->dt_addr);
+-		if (!dev->dt_client) {
+-			ret = -ENODEV;
++		if (IS_ERR(dev->dt_client)) {
++			ret = PTR_ERR(dev->dt_client);
+ 			goto err_kfree;
+ 		}
+ 	}
+diff --git a/drivers/media/i2c/ccs/ccs-core.c b/drivers/media/i2c/ccs/ccs-core.c
+index 15afbb4f5b317..4505594996bd8 100644
+--- a/drivers/media/i2c/ccs/ccs-core.c
++++ b/drivers/media/i2c/ccs/ccs-core.c
+@@ -3522,11 +3522,11 @@ static int ccs_probe(struct i2c_client *client)
+ 	sensor->pll.scale_n = CCS_LIM(sensor, SCALER_N_MIN);
+ 
+ 	ccs_create_subdev(sensor, sensor->scaler, " scaler", 2,
+-			  MEDIA_ENT_F_CAM_SENSOR);
++			  MEDIA_ENT_F_PROC_VIDEO_SCALER);
+ 	ccs_create_subdev(sensor, sensor->binner, " binner", 2,
+ 			  MEDIA_ENT_F_PROC_VIDEO_SCALER);
+ 	ccs_create_subdev(sensor, sensor->pixel_array, " pixel_array", 1,
+-			  MEDIA_ENT_F_PROC_VIDEO_SCALER);
++			  MEDIA_ENT_F_CAM_SENSOR);
+ 
+ 	rval = ccs_init_controls(sensor);
+ 	if (rval < 0)
+diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c
+index 6e3382b85a90d..49ba39418360a 100644
+--- a/drivers/media/i2c/imx219.c
++++ b/drivers/media/i2c/imx219.c
+@@ -1035,29 +1035,47 @@ static int imx219_start_streaming(struct imx219 *imx219)
+ 	const struct imx219_reg_list *reg_list;
+ 	int ret;
+ 
++	ret = pm_runtime_get_sync(&client->dev);
++	if (ret < 0) {
++		pm_runtime_put_noidle(&client->dev);
++		return ret;
++	}
++
+ 	/* Apply default values of current mode */
+ 	reg_list = &imx219->mode->reg_list;
+ 	ret = imx219_write_regs(imx219, reg_list->regs, reg_list->num_of_regs);
+ 	if (ret) {
+ 		dev_err(&client->dev, "%s failed to set mode\n", __func__);
+-		return ret;
++		goto err_rpm_put;
+ 	}
+ 
+ 	ret = imx219_set_framefmt(imx219);
+ 	if (ret) {
+ 		dev_err(&client->dev, "%s failed to set frame format: %d\n",
+ 			__func__, ret);
+-		return ret;
++		goto err_rpm_put;
+ 	}
+ 
+ 	/* Apply customized values from user */
+ 	ret =  __v4l2_ctrl_handler_setup(imx219->sd.ctrl_handler);
+ 	if (ret)
+-		return ret;
++		goto err_rpm_put;
+ 
+ 	/* set stream on register */
+-	return imx219_write_reg(imx219, IMX219_REG_MODE_SELECT,
+-				IMX219_REG_VALUE_08BIT, IMX219_MODE_STREAMING);
++	ret = imx219_write_reg(imx219, IMX219_REG_MODE_SELECT,
++			       IMX219_REG_VALUE_08BIT, IMX219_MODE_STREAMING);
++	if (ret)
++		goto err_rpm_put;
++
++	/* vflip and hflip cannot change during streaming */
++	__v4l2_ctrl_grab(imx219->vflip, true);
++	__v4l2_ctrl_grab(imx219->hflip, true);
++
++	return 0;
++
++err_rpm_put:
++	pm_runtime_put(&client->dev);
++	return ret;
+ }
+ 
+ static void imx219_stop_streaming(struct imx219 *imx219)
+@@ -1070,12 +1088,16 @@ static void imx219_stop_streaming(struct imx219 *imx219)
+ 			       IMX219_REG_VALUE_08BIT, IMX219_MODE_STANDBY);
+ 	if (ret)
+ 		dev_err(&client->dev, "%s failed to set stream\n", __func__);
++
++	__v4l2_ctrl_grab(imx219->vflip, false);
++	__v4l2_ctrl_grab(imx219->hflip, false);
++
++	pm_runtime_put(&client->dev);
+ }
+ 
+ static int imx219_set_stream(struct v4l2_subdev *sd, int enable)
+ {
+ 	struct imx219 *imx219 = to_imx219(sd);
+-	struct i2c_client *client = v4l2_get_subdevdata(sd);
+ 	int ret = 0;
+ 
+ 	mutex_lock(&imx219->mutex);
+@@ -1085,36 +1107,23 @@ static int imx219_set_stream(struct v4l2_subdev *sd, int enable)
+ 	}
+ 
+ 	if (enable) {
+-		ret = pm_runtime_get_sync(&client->dev);
+-		if (ret < 0) {
+-			pm_runtime_put_noidle(&client->dev);
+-			goto err_unlock;
+-		}
+-
+ 		/*
+ 		 * Apply default & customized values
+ 		 * and then start streaming.
+ 		 */
+ 		ret = imx219_start_streaming(imx219);
+ 		if (ret)
+-			goto err_rpm_put;
++			goto err_unlock;
+ 	} else {
+ 		imx219_stop_streaming(imx219);
+-		pm_runtime_put(&client->dev);
+ 	}
+ 
+ 	imx219->streaming = enable;
+ 
+-	/* vflip and hflip cannot change during streaming */
+-	__v4l2_ctrl_grab(imx219->vflip, enable);
+-	__v4l2_ctrl_grab(imx219->hflip, enable);
+-
+ 	mutex_unlock(&imx219->mutex);
+ 
+ 	return ret;
+ 
+-err_rpm_put:
+-	pm_runtime_put(&client->dev);
+ err_unlock:
+ 	mutex_unlock(&imx219->mutex);
+ 
+diff --git a/drivers/media/i2c/rdacm21.c b/drivers/media/i2c/rdacm21.c
+index dcc21515e5a43..179d107f494ca 100644
+--- a/drivers/media/i2c/rdacm21.c
++++ b/drivers/media/i2c/rdacm21.c
+@@ -345,7 +345,7 @@ static int ov10640_initialize(struct rdacm21_device *dev)
+ 	/* Read OV10640 ID to test communications. */
+ 	ov490_write_reg(dev, OV490_SCCB_SLAVE0_DIR, OV490_SCCB_SLAVE_READ);
+ 	ov490_write_reg(dev, OV490_SCCB_SLAVE0_ADDR_HIGH, OV10640_CHIP_ID >> 8);
+-	ov490_write_reg(dev, OV490_SCCB_SLAVE0_ADDR_LOW, (u8)OV10640_CHIP_ID);
++	ov490_write_reg(dev, OV490_SCCB_SLAVE0_ADDR_LOW, OV10640_CHIP_ID & 0xff);
+ 
+ 	/* Trigger SCCB slave transaction and give it some time to complete. */
+ 	ov490_write_reg(dev, OV490_HOST_CMD, OV490_HOST_CMD_TRIGGER);
+diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c b/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
+index 6e8c0c230e111..fecef85bd62eb 100644
+--- a/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
++++ b/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
+@@ -302,7 +302,7 @@ static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
+ 	if (!q->sensor)
+ 		return -ENODEV;
+ 
+-	freq = v4l2_get_link_freq(q->sensor->ctrl_handler, bpp, lanes);
++	freq = v4l2_get_link_freq(q->sensor->ctrl_handler, bpp, lanes * 2);
+ 	if (freq < 0) {
+ 		dev_err(dev, "error %lld, invalid link_freq\n", freq);
+ 		return freq;
+diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c
+index 391572a6ec76a..efb757d5168a6 100644
+--- a/drivers/media/pci/saa7134/saa7134-core.c
++++ b/drivers/media/pci/saa7134/saa7134-core.c
+@@ -243,7 +243,7 @@ int saa7134_pgtable_build(struct pci_dev *pci, struct saa7134_pgtable *pt,
+ 
+ 	ptr = pt->cpu + startpage;
+ 	for (i = 0; i < length; i++, list = sg_next(list)) {
+-		for (p = 0; p * 4096 < list->length; p++, ptr++)
++		for (p = 0; p * 4096 < sg_dma_len(list); p++, ptr++)
+ 			*ptr = cpu_to_le32(sg_dma_address(list) +
+ 						list->offset + p * 4096);
+ 	}
+diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
+index fd1831e97b22f..1ddb5d6354cf3 100644
+--- a/drivers/media/platform/Kconfig
++++ b/drivers/media/platform/Kconfig
+@@ -244,6 +244,7 @@ config VIDEO_MEDIATEK_JPEG
+ 	depends on MTK_IOMMU_V1 || MTK_IOMMU || COMPILE_TEST
+ 	depends on VIDEO_DEV && VIDEO_V4L2
+ 	depends on ARCH_MEDIATEK || COMPILE_TEST
++	depends on MTK_SMI || (COMPILE_TEST && MTK_SMI=n)
+ 	select VIDEOBUF2_DMA_CONTIG
+ 	select V4L2_MEM2MEM_DEV
+ 	help
+@@ -271,6 +272,7 @@ config VIDEO_MEDIATEK_MDP
+ 	depends on MTK_IOMMU || COMPILE_TEST
+ 	depends on VIDEO_DEV && VIDEO_V4L2
+ 	depends on ARCH_MEDIATEK || COMPILE_TEST
++	depends on MTK_SMI || (COMPILE_TEST && MTK_SMI=n)
+ 	select VIDEOBUF2_DMA_CONTIG
+ 	select V4L2_MEM2MEM_DEV
+ 	select VIDEO_MEDIATEK_VPU
+@@ -291,6 +293,7 @@ config VIDEO_MEDIATEK_VCODEC
+ 	# our dependencies, to avoid missing symbols during link.
+ 	depends on VIDEO_MEDIATEK_VPU || !VIDEO_MEDIATEK_VPU
+ 	depends on MTK_SCP || !MTK_SCP
++	depends on MTK_SMI || (COMPILE_TEST && MTK_SMI=n)
+ 	select VIDEOBUF2_DMA_CONTIG
+ 	select V4L2_MEM2MEM_DEV
+ 	select VIDEO_MEDIATEK_VCODEC_VPU if VIDEO_MEDIATEK_VPU
+diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c
+index f2c4dadd6a0eb..7bb6babdcade0 100644
+--- a/drivers/media/platform/aspeed-video.c
++++ b/drivers/media/platform/aspeed-video.c
+@@ -514,8 +514,8 @@ static void aspeed_video_off(struct aspeed_video *video)
+ 	aspeed_video_write(video, VE_INTERRUPT_STATUS, 0xffffffff);
+ 
+ 	/* Turn off the relevant clocks */
+-	clk_disable(video->vclk);
+ 	clk_disable(video->eclk);
++	clk_disable(video->vclk);
+ 
+ 	clear_bit(VIDEO_CLOCKS_ON, &video->flags);
+ }
+@@ -526,8 +526,8 @@ static void aspeed_video_on(struct aspeed_video *video)
+ 		return;
+ 
+ 	/* Turn on the relevant clocks */
+-	clk_enable(video->eclk);
+ 	clk_enable(video->vclk);
++	clk_enable(video->eclk);
+ 
+ 	set_bit(VIDEO_CLOCKS_ON, &video->flags);
+ }
+@@ -1719,8 +1719,11 @@ static int aspeed_video_probe(struct platform_device *pdev)
+ 		return rc;
+ 
+ 	rc = aspeed_video_setup_video(video);
+-	if (rc)
++	if (rc) {
++		clk_unprepare(video->vclk);
++		clk_unprepare(video->eclk);
+ 		return rc;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/media/platform/meson/ge2d/ge2d.c b/drivers/media/platform/meson/ge2d/ge2d.c
+index 153612ca96fc1..a1393fefa8aea 100644
+--- a/drivers/media/platform/meson/ge2d/ge2d.c
++++ b/drivers/media/platform/meson/ge2d/ge2d.c
+@@ -757,7 +757,7 @@ static int ge2d_s_ctrl(struct v4l2_ctrl *ctrl)
+ 
+ 		if (ctrl->val == 90) {
+ 			ctx->hflip = 0;
+-			ctx->vflip = 0;
++			ctx->vflip = 1;
+ 			ctx->xy_swap = 1;
+ 		} else if (ctrl->val == 180) {
+ 			ctx->hflip = 1;
+@@ -765,7 +765,7 @@ static int ge2d_s_ctrl(struct v4l2_ctrl *ctrl)
+ 			ctx->xy_swap = 0;
+ 		} else if (ctrl->val == 270) {
+ 			ctx->hflip = 1;
+-			ctx->vflip = 1;
++			ctx->vflip = 0;
+ 			ctx->xy_swap = 1;
+ 		} else {
+ 			ctx->hflip = 0;
+diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
+index d2842f496b474..ae374bb2a48f0 100644
+--- a/drivers/media/platform/qcom/venus/core.c
++++ b/drivers/media/platform/qcom/venus/core.c
+@@ -224,11 +224,11 @@ static int venus_probe(struct platform_device *pdev)
+ 	if (IS_ERR(core->base))
+ 		return PTR_ERR(core->base);
+ 
+-	core->video_path = of_icc_get(dev, "video-mem");
++	core->video_path = devm_of_icc_get(dev, "video-mem");
+ 	if (IS_ERR(core->video_path))
+ 		return PTR_ERR(core->video_path);
+ 
+-	core->cpucfg_path = of_icc_get(dev, "cpu-cfg");
++	core->cpucfg_path = devm_of_icc_get(dev, "cpu-cfg");
+ 	if (IS_ERR(core->cpucfg_path))
+ 		return PTR_ERR(core->cpucfg_path);
+ 
+@@ -367,9 +367,6 @@ static int venus_remove(struct platform_device *pdev)
+ 
+ 	hfi_destroy(core);
+ 
+-	icc_put(core->video_path);
+-	icc_put(core->cpucfg_path);
+-
+ 	v4l2_device_unregister(&core->v4l2_dev);
+ 
+ 	mutex_destroy(&core->pm_lock);
+diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
+index 813670ed9577b..79deed8adceab 100644
+--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
++++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
+@@ -520,14 +520,15 @@ static void rkisp1_rsz_set_src_fmt(struct rkisp1_resizer *rsz,
+ 				   struct v4l2_mbus_framefmt *format,
+ 				   unsigned int which)
+ {
+-	const struct rkisp1_isp_mbus_info *mbus_info;
+-	struct v4l2_mbus_framefmt *src_fmt;
++	const struct rkisp1_isp_mbus_info *sink_mbus_info;
++	struct v4l2_mbus_framefmt *src_fmt, *sink_fmt;
+ 
++	sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, cfg, RKISP1_RSZ_PAD_SINK, which);
+ 	src_fmt = rkisp1_rsz_get_pad_fmt(rsz, cfg, RKISP1_RSZ_PAD_SRC, which);
+-	mbus_info = rkisp1_isp_mbus_info_get(src_fmt->code);
++	sink_mbus_info = rkisp1_isp_mbus_info_get(sink_fmt->code);
+ 
+ 	/* for YUV formats, userspace can change the mbus code on the src pad if it is supported */
+-	if (mbus_info->pixel_enc == V4L2_PIXEL_ENC_YUV &&
++	if (sink_mbus_info->pixel_enc == V4L2_PIXEL_ENC_YUV &&
+ 	    rkisp1_rsz_get_yuv_mbus_info(format->code))
+ 		src_fmt->code = format->code;
+ 
+diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
+index b55de9ab64d8b..3181d0781b613 100644
+--- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
++++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
+@@ -151,8 +151,10 @@ static int sun6i_video_start_streaming(struct vb2_queue *vq, unsigned int count)
+ 	}
+ 
+ 	subdev = sun6i_video_remote_subdev(video, NULL);
+-	if (!subdev)
++	if (!subdev) {
++		ret = -EINVAL;
+ 		goto stop_media_pipeline;
++	}
+ 
+ 	config.pixelformat = video->fmt.fmt.pix.pixelformat;
+ 	config.code = video->mbus_code;
+diff --git a/drivers/media/test-drivers/vivid/vivid-vid-out.c b/drivers/media/test-drivers/vivid/vivid-vid-out.c
+index ac1e981e83420..9f731f085179e 100644
+--- a/drivers/media/test-drivers/vivid/vivid-vid-out.c
++++ b/drivers/media/test-drivers/vivid/vivid-vid-out.c
+@@ -1021,7 +1021,7 @@ int vivid_vid_out_s_fbuf(struct file *file, void *fh,
+ 		return -EINVAL;
+ 	}
+ 	dev->fbuf_out_flags &= ~(chroma_flags | alpha_flags);
+-	dev->fbuf_out_flags = a->flags & (chroma_flags | alpha_flags);
++	dev->fbuf_out_flags |= a->flags & (chroma_flags | alpha_flags);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/media/tuners/m88rs6000t.c b/drivers/media/tuners/m88rs6000t.c
+index b3505f4024764..8647c50b66e50 100644
+--- a/drivers/media/tuners/m88rs6000t.c
++++ b/drivers/media/tuners/m88rs6000t.c
+@@ -525,7 +525,7 @@ static int m88rs6000t_get_rf_strength(struct dvb_frontend *fe, u16 *strength)
+ 	PGA2_cri = PGA2_GC >> 2;
+ 	PGA2_crf = PGA2_GC & 0x03;
+ 
+-	for (i = 0; i <= RF_GC; i++)
++	for (i = 0; i <= RF_GC && i < ARRAY_SIZE(RFGS); i++)
+ 		RFG += RFGS[i];
+ 
+ 	if (RF_GC == 0)
+@@ -537,12 +537,12 @@ static int m88rs6000t_get_rf_strength(struct dvb_frontend *fe, u16 *strength)
+ 	if (RF_GC == 3)
+ 		RFG += 100;
+ 
+-	for (i = 0; i <= IF_GC; i++)
++	for (i = 0; i <= IF_GC && i < ARRAY_SIZE(IFGS); i++)
+ 		IFG += IFGS[i];
+ 
+ 	TIAG = TIA_GC * TIA_GS;
+ 
+-	for (i = 0; i <= BB_GC; i++)
++	for (i = 0; i <= BB_GC && i < ARRAY_SIZE(BBGS); i++)
+ 		BBG += BBGS[i];
+ 
+ 	PGA2G = PGA2_cri * PGA2_cri_GS + PGA2_crf * PGA2_crf_GS;
+diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
+index 4f0209695f132..6219c81857827 100644
+--- a/drivers/media/v4l2-core/v4l2-ctrls.c
++++ b/drivers/media/v4l2-core/v4l2-ctrls.c
+@@ -2552,7 +2552,15 @@ void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl)
+ 	if (hdl == NULL || hdl->buckets == NULL)
+ 		return;
+ 
+-	if (!hdl->req_obj.req && !list_empty(&hdl->requests)) {
++	/*
++	 * If the main handler is freed and it is used by handler objects in
++	 * outstanding requests, then unbind and put those objects before
++	 * freeing the main handler.
++	 *
++	 * The main handler can be identified by having a NULL ops pointer in
++	 * the request object.
++	 */
++	if (!hdl->req_obj.ops && !list_empty(&hdl->requests)) {
+ 		struct v4l2_ctrl_handler *req, *next_req;
+ 
+ 		list_for_each_entry_safe(req, next_req, &hdl->requests, requests) {
+@@ -3595,8 +3603,8 @@ static void v4l2_ctrl_request_unbind(struct media_request_object *obj)
+ 		container_of(obj, struct v4l2_ctrl_handler, req_obj);
+ 	struct v4l2_ctrl_handler *main_hdl = obj->priv;
+ 
+-	list_del_init(&hdl->requests);
+ 	mutex_lock(main_hdl->lock);
++	list_del_init(&hdl->requests);
+ 	if (hdl->request_is_queued) {
+ 		list_del_init(&hdl->requests_queued);
+ 		hdl->request_is_queued = false;
+@@ -3655,8 +3663,11 @@ static int v4l2_ctrl_request_bind(struct media_request *req,
+ 	if (!ret) {
+ 		ret = media_request_object_bind(req, &req_ops,
+ 						from, false, &hdl->req_obj);
+-		if (!ret)
++		if (!ret) {
++			mutex_lock(from->lock);
+ 			list_add_tail(&hdl->requests, &from->requests);
++			mutex_unlock(from->lock);
++		}
+ 	}
+ 	return ret;
+ }
+diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
+index cfa730cfd1453..f80c2ea39ca4c 100644
+--- a/drivers/memory/omap-gpmc.c
++++ b/drivers/memory/omap-gpmc.c
+@@ -1009,8 +1009,8 @@ EXPORT_SYMBOL(gpmc_cs_request);
+ 
+ void gpmc_cs_free(int cs)
+ {
+-	struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
+-	struct resource *res = &gpmc->mem;
++	struct gpmc_cs_data *gpmc;
++	struct resource *res;
+ 
+ 	spin_lock(&gpmc_mem_lock);
+ 	if (cs >= gpmc_cs_num || cs < 0 || !gpmc_cs_reserved(cs)) {
+@@ -1018,6 +1018,9 @@ void gpmc_cs_free(int cs)
+ 		spin_unlock(&gpmc_mem_lock);
+ 		return;
+ 	}
++	gpmc = &gpmc_cs[cs];
++	res = &gpmc->mem;
++
+ 	gpmc_cs_disable_mem(cs);
+ 	if (res->flags)
+ 		release_resource(res);
+diff --git a/drivers/memory/pl353-smc.c b/drivers/memory/pl353-smc.c
+index 3b5b1045edd90..9c0a284167773 100644
+--- a/drivers/memory/pl353-smc.c
++++ b/drivers/memory/pl353-smc.c
+@@ -63,7 +63,7 @@
+ /* ECC memory config register specific constants */
+ #define PL353_SMC_ECC_MEMCFG_MODE_MASK	0xC
+ #define PL353_SMC_ECC_MEMCFG_MODE_SHIFT	2
+-#define PL353_SMC_ECC_MEMCFG_PGSIZE_MASK	0xC
++#define PL353_SMC_ECC_MEMCFG_PGSIZE_MASK	0x3
+ 
+ #define PL353_SMC_DC_UPT_NAND_REGS	((4 << 23) |	/* CS: NAND chip */ \
+ 				 (2 << 21))	/* UpdateRegs operation */
+diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c
+index 8d36e221def14..45eed659b0c6d 100644
+--- a/drivers/memory/renesas-rpc-if.c
++++ b/drivers/memory/renesas-rpc-if.c
+@@ -192,10 +192,10 @@ int rpcif_sw_init(struct rpcif *rpc, struct device *dev)
+ 	}
+ 
+ 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dirmap");
+-	rpc->size = resource_size(res);
+ 	rpc->dirmap = devm_ioremap_resource(&pdev->dev, res);
+ 	if (IS_ERR(rpc->dirmap))
+ 		rpc->dirmap = NULL;
++	rpc->size = resource_size(res);
+ 
+ 	rpc->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ 
+diff --git a/drivers/memory/samsung/exynos5422-dmc.c b/drivers/memory/samsung/exynos5422-dmc.c
+index 1dabb509dec3a..dee503640e125 100644
+--- a/drivers/memory/samsung/exynos5422-dmc.c
++++ b/drivers/memory/samsung/exynos5422-dmc.c
+@@ -1298,7 +1298,9 @@ static int exynos5_dmc_init_clks(struct exynos5_dmc *dmc)
+ 
+ 	dmc->curr_volt = target_volt;
+ 
+-	clk_set_parent(dmc->mout_mx_mspll_ccore, dmc->mout_spll);
++	ret = clk_set_parent(dmc->mout_mx_mspll_ccore, dmc->mout_spll);
++	if (ret)
++		return ret;
+ 
+ 	clk_prepare_enable(dmc->fout_bpll);
+ 	clk_prepare_enable(dmc->mout_bpll);
+diff --git a/drivers/mfd/intel_pmt.c b/drivers/mfd/intel_pmt.c
+index 744b230cdccaa..65da2b17a2040 100644
+--- a/drivers/mfd/intel_pmt.c
++++ b/drivers/mfd/intel_pmt.c
+@@ -79,19 +79,18 @@ static int pmt_add_dev(struct pci_dev *pdev, struct intel_dvsec_header *header,
+ 	case DVSEC_INTEL_ID_WATCHER:
+ 		if (quirks & PMT_QUIRK_NO_WATCHER) {
+ 			dev_info(dev, "Watcher not supported\n");
+-			return 0;
++			return -EINVAL;
+ 		}
+ 		name = "pmt_watcher";
+ 		break;
+ 	case DVSEC_INTEL_ID_CRASHLOG:
+ 		if (quirks & PMT_QUIRK_NO_CRASHLOG) {
+ 			dev_info(dev, "Crashlog not supported\n");
+-			return 0;
++			return -EINVAL;
+ 		}
+ 		name = "pmt_crashlog";
+ 		break;
+ 	default:
+-		dev_err(dev, "Unrecognized PMT capability: %d\n", id);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -174,12 +173,8 @@ static int pmt_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 		header.offset = INTEL_DVSEC_TABLE_OFFSET(table);
+ 
+ 		ret = pmt_add_dev(pdev, &header, quirks);
+-		if (ret) {
+-			dev_warn(&pdev->dev,
+-				 "Failed to add device for DVSEC id %d\n",
+-				 header.id);
++		if (ret)
+ 			continue;
+-		}
+ 
+ 		found_devices = true;
+ 	} while (true);
+diff --git a/drivers/mfd/stm32-timers.c b/drivers/mfd/stm32-timers.c
+index add6033591242..44ed2fce03196 100644
+--- a/drivers/mfd/stm32-timers.c
++++ b/drivers/mfd/stm32-timers.c
+@@ -158,13 +158,18 @@ static const struct regmap_config stm32_timers_regmap_cfg = {
+ 
+ static void stm32_timers_get_arr_size(struct stm32_timers *ddata)
+ {
++	u32 arr;
++
++	/* Backup ARR to restore it after getting the maximum value */
++	regmap_read(ddata->regmap, TIM_ARR, &arr);
++
+ 	/*
+ 	 * Only the available bits will be written so when readback
+ 	 * we get the maximum value of auto reload register
+ 	 */
+ 	regmap_write(ddata->regmap, TIM_ARR, ~0L);
+ 	regmap_read(ddata->regmap, TIM_ARR, &ddata->max_arr);
+-	regmap_write(ddata->regmap, TIM_ARR, 0x0);
++	regmap_write(ddata->regmap, TIM_ARR, arr);
+ }
+ 
+ static int stm32_timers_dma_probe(struct device *dev,
+diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
+index 90f3292230c9c..1dd39483e7c14 100644
+--- a/drivers/mfd/stmpe.c
++++ b/drivers/mfd/stmpe.c
+@@ -312,7 +312,7 @@ EXPORT_SYMBOL_GPL(stmpe_set_altfunc);
+  * GPIO (all variants)
+  */
+ 
+-static const struct resource stmpe_gpio_resources[] = {
++static struct resource stmpe_gpio_resources[] = {
+ 	/* Start and end filled dynamically */
+ 	{
+ 		.flags	= IORESOURCE_IRQ,
+@@ -336,7 +336,8 @@ static const struct mfd_cell stmpe_gpio_cell_noirq = {
+  * Keypad (1601, 2401, 2403)
+  */
+ 
+-static const struct resource stmpe_keypad_resources[] = {
++static struct resource stmpe_keypad_resources[] = {
++	/* Start and end filled dynamically */
+ 	{
+ 		.name	= "KEYPAD",
+ 		.flags	= IORESOURCE_IRQ,
+@@ -357,7 +358,8 @@ static const struct mfd_cell stmpe_keypad_cell = {
+ /*
+  * PWM (1601, 2401, 2403)
+  */
+-static const struct resource stmpe_pwm_resources[] = {
++static struct resource stmpe_pwm_resources[] = {
++	/* Start and end filled dynamically */
+ 	{
+ 		.name	= "PWM0",
+ 		.flags	= IORESOURCE_IRQ,
+@@ -445,7 +447,8 @@ static struct stmpe_variant_info stmpe801_noirq = {
+  * Touchscreen (STMPE811 or STMPE610)
+  */
+ 
+-static const struct resource stmpe_ts_resources[] = {
++static struct resource stmpe_ts_resources[] = {
++	/* Start and end filled dynamically */
+ 	{
+ 		.name	= "TOUCH_DET",
+ 		.flags	= IORESOURCE_IRQ,
+@@ -467,7 +470,8 @@ static const struct mfd_cell stmpe_ts_cell = {
+  * ADC (STMPE811)
+  */
+ 
+-static const struct resource stmpe_adc_resources[] = {
++static struct resource stmpe_adc_resources[] = {
++	/* Start and end filled dynamically */
+ 	{
+ 		.name	= "STMPE_TEMP_SENS",
+ 		.flags	= IORESOURCE_IRQ,
+diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
+index dd65cedf3b125..9d14bf444481b 100644
+--- a/drivers/misc/lis3lv02d/lis3lv02d.c
++++ b/drivers/misc/lis3lv02d/lis3lv02d.c
+@@ -208,7 +208,7 @@ static int lis3_3dc_rates[16] = {0, 1, 10, 25, 50, 100, 200, 400, 1600, 5000};
+ static int lis3_3dlh_rates[4] = {50, 100, 400, 1000};
+ 
+ /* ODR is Output Data Rate */
+-static int lis3lv02d_get_odr(struct lis3lv02d *lis3)
++static int lis3lv02d_get_odr_index(struct lis3lv02d *lis3)
+ {
+ 	u8 ctrl;
+ 	int shift;
+@@ -216,15 +216,23 @@ static int lis3lv02d_get_odr(struct lis3lv02d *lis3)
+ 	lis3->read(lis3, CTRL_REG1, &ctrl);
+ 	ctrl &= lis3->odr_mask;
+ 	shift = ffs(lis3->odr_mask) - 1;
+-	return lis3->odrs[(ctrl >> shift)];
++	return (ctrl >> shift);
+ }
+ 
+ static int lis3lv02d_get_pwron_wait(struct lis3lv02d *lis3)
+ {
+-	int div = lis3lv02d_get_odr(lis3);
++	int odr_idx = lis3lv02d_get_odr_index(lis3);
++	int div = lis3->odrs[odr_idx];
+ 
+-	if (WARN_ONCE(div == 0, "device returned spurious data"))
++	if (div == 0) {
++		if (odr_idx == 0) {
++			/* Power-down mode, not sampling no need to sleep */
++			return 0;
++		}
++
++		dev_err(&lis3->pdev->dev, "Error unknown odrs-index: %d\n", odr_idx);
+ 		return -ENXIO;
++	}
+ 
+ 	/* LIS3 power on delay is quite long */
+ 	msleep(lis3->pwron_delay / div);
+@@ -816,9 +824,12 @@ static ssize_t lis3lv02d_rate_show(struct device *dev,
+ 			struct device_attribute *attr, char *buf)
+ {
+ 	struct lis3lv02d *lis3 = dev_get_drvdata(dev);
++	int odr_idx;
+ 
+ 	lis3lv02d_sysfs_poweron(lis3);
+-	return sprintf(buf, "%d\n", lis3lv02d_get_odr(lis3));
++
++	odr_idx = lis3lv02d_get_odr_index(lis3);
++	return sprintf(buf, "%d\n", lis3->odrs[odr_idx]);
+ }
+ 
+ static ssize_t lis3lv02d_rate_set(struct device *dev,
+diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c
+index 345addd9306de..fa8a7fce4481b 100644
+--- a/drivers/misc/vmw_vmci/vmci_doorbell.c
++++ b/drivers/misc/vmw_vmci/vmci_doorbell.c
+@@ -326,7 +326,7 @@ int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle)
+ bool vmci_dbell_register_notification_bitmap(u64 bitmap_ppn)
+ {
+ 	int result;
+-	struct vmci_notify_bm_set_msg bitmap_set_msg;
++	struct vmci_notify_bm_set_msg bitmap_set_msg = { };
+ 
+ 	bitmap_set_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+ 						  VMCI_SET_NOTIFY_BITMAP);
+diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
+index cc8eeb361fcdb..1018dc77269d4 100644
+--- a/drivers/misc/vmw_vmci/vmci_guest.c
++++ b/drivers/misc/vmw_vmci/vmci_guest.c
+@@ -168,7 +168,7 @@ static int vmci_check_host_caps(struct pci_dev *pdev)
+ 				VMCI_UTIL_NUM_RESOURCES * sizeof(u32);
+ 	struct vmci_datagram *check_msg;
+ 
+-	check_msg = kmalloc(msg_size, GFP_KERNEL);
++	check_msg = kzalloc(msg_size, GFP_KERNEL);
+ 	if (!check_msg) {
+ 		dev_err(&pdev->dev, "%s: Insufficient memory\n", __func__);
+ 		return -ENOMEM;
+diff --git a/drivers/mtd/maps/physmap-core.c b/drivers/mtd/maps/physmap-core.c
+index 001ed5deb622a..4f63b8430c710 100644
+--- a/drivers/mtd/maps/physmap-core.c
++++ b/drivers/mtd/maps/physmap-core.c
+@@ -69,8 +69,10 @@ static int physmap_flash_remove(struct platform_device *dev)
+ 	int i, err = 0;
+ 
+ 	info = platform_get_drvdata(dev);
+-	if (!info)
++	if (!info) {
++		err = -EINVAL;
+ 		goto out;
++	}
+ 
+ 	if (info->cmtd) {
+ 		err = mtd_device_unregister(info->cmtd);
+diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
+index 323035d4f2d01..688de663cabf6 100644
+--- a/drivers/mtd/mtdchar.c
++++ b/drivers/mtd/mtdchar.c
+@@ -651,16 +651,12 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
+ 	case MEMGETINFO:
+ 	case MEMREADOOB:
+ 	case MEMREADOOB64:
+-	case MEMLOCK:
+-	case MEMUNLOCK:
+ 	case MEMISLOCKED:
+ 	case MEMGETOOBSEL:
+ 	case MEMGETBADBLOCK:
+-	case MEMSETBADBLOCK:
+ 	case OTPSELECT:
+ 	case OTPGETREGIONCOUNT:
+ 	case OTPGETREGIONINFO:
+-	case OTPLOCK:
+ 	case ECCGETLAYOUT:
+ 	case ECCGETSTATS:
+ 	case MTDFILEMODE:
+@@ -671,9 +667,13 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
+ 	/* "dangerous" commands */
+ 	case MEMERASE:
+ 	case MEMERASE64:
++	case MEMLOCK:
++	case MEMUNLOCK:
++	case MEMSETBADBLOCK:
+ 	case MEMWRITEOOB:
+ 	case MEMWRITEOOB64:
+ 	case MEMWRITE:
++	case OTPLOCK:
+ 		if (!(file->f_mode & FMODE_WRITE))
+ 			return -EPERM;
+ 		break;
+diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
+index 2d6423d89a175..d97ddc65b5d43 100644
+--- a/drivers/mtd/mtdcore.c
++++ b/drivers/mtd/mtdcore.c
+@@ -820,6 +820,9 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
+ 
+ 	/* Prefer parsed partitions over driver-provided fallback */
+ 	ret = parse_mtd_partitions(mtd, types, parser_data);
++	if (ret == -EPROBE_DEFER)
++		goto out;
++
+ 	if (ret > 0)
+ 		ret = 0;
+ 	else if (nr_parts)
+diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
+index 12ca4f19cb14a..665fd9020b764 100644
+--- a/drivers/mtd/mtdpart.c
++++ b/drivers/mtd/mtdpart.c
+@@ -331,7 +331,7 @@ static int __del_mtd_partitions(struct mtd_info *mtd)
+ 
+ 	list_for_each_entry_safe(child, next, &mtd->partitions, part.node) {
+ 		if (mtd_has_partitions(child))
+-			del_mtd_partitions(child);
++			__del_mtd_partitions(child);
+ 
+ 		pr_info("Deleting %s MTD partition\n", child->name);
+ 		ret = del_mtd_device(child);
+diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+index 659eaa6f0980c..5ff4291380c53 100644
+--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
++++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+@@ -2688,6 +2688,12 @@ static int brcmnand_attach_chip(struct nand_chip *chip)
+ 
+ 	ret = brcmstb_choose_ecc_layout(host);
+ 
++	/* If OOB is written with ECC enabled it will cause ECC errors */
++	if (is_hamming_ecc(host->ctrl, &host->hwcfg)) {
++		chip->ecc.write_oob = brcmnand_write_oob_raw;
++		chip->ecc.read_oob = brcmnand_read_oob_raw;
++	}
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
+index 0101c0fab50ad..a24e2f57fa68a 100644
+--- a/drivers/mtd/nand/raw/fsmc_nand.c
++++ b/drivers/mtd/nand/raw/fsmc_nand.c
+@@ -1077,11 +1077,13 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
+ 		host->read_dma_chan = dma_request_channel(mask, filter, NULL);
+ 		if (!host->read_dma_chan) {
+ 			dev_err(&pdev->dev, "Unable to get read dma channel\n");
++			ret = -ENODEV;
+ 			goto disable_clk;
+ 		}
+ 		host->write_dma_chan = dma_request_channel(mask, filter, NULL);
+ 		if (!host->write_dma_chan) {
+ 			dev_err(&pdev->dev, "Unable to get write dma channel\n");
++			ret = -ENODEV;
+ 			goto release_dma_read_chan;
+ 		}
+ 	}
+diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+index 3fa8c22d3f36a..4d08e4ab5c1b6 100644
+--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
++++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+@@ -2449,7 +2449,7 @@ static int gpmi_nand_init(struct gpmi_nand_data *this)
+ 	this->bch_geometry.auxiliary_size = 128;
+ 	ret = gpmi_alloc_dma_buffer(this);
+ 	if (ret)
+-		goto err_out;
++		return ret;
+ 
+ 	nand_controller_init(&this->base);
+ 	this->base.ops = &gpmi_nand_controller_ops;
+diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
+index fd4c318b520f3..87c23bb320bf5 100644
+--- a/drivers/mtd/nand/raw/qcom_nandc.c
++++ b/drivers/mtd/nand/raw/qcom_nandc.c
+@@ -2898,7 +2898,7 @@ static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
+ 	struct device *dev = nandc->dev;
+ 	struct device_node *dn = dev->of_node, *child;
+ 	struct qcom_nand_host *host;
+-	int ret;
++	int ret = -ENODEV;
+ 
+ 	for_each_available_child_of_node(dn, child) {
+ 		host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+@@ -2916,10 +2916,7 @@ static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
+ 		list_add_tail(&host->node, &nandc->host_list);
+ 	}
+ 
+-	if (list_empty(&nandc->host_list))
+-		return -ENODEV;
+-
+-	return 0;
++	return ret;
+ }
+ 
+ /* parse custom DT properties here */
+diff --git a/drivers/mtd/parsers/qcomsmempart.c b/drivers/mtd/parsers/qcomsmempart.c
+index 808cb33d71f8e..d9083308f6ba6 100644
+--- a/drivers/mtd/parsers/qcomsmempart.c
++++ b/drivers/mtd/parsers/qcomsmempart.c
+@@ -65,6 +65,13 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
+ 	int ret, i, numparts;
+ 	char *name, *c;
+ 
++	if (IS_ENABLED(CONFIG_MTD_SPI_NOR_USE_4K_SECTORS)
++			&& mtd->type == MTD_NORFLASH) {
++		pr_err("%s: SMEM partition parser is incompatible with 4K sectors\n",
++				mtd->name);
++		return -EINVAL;
++	}
++
+ 	pr_debug("Parsing partition table info from SMEM\n");
+ 	ptable = qcom_smem_get(SMEM_APPS, SMEM_AARM_PARTITION_TABLE, &len);
+ 	if (IS_ERR(ptable)) {
+@@ -104,7 +111,7 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
+ 	 * complete partition table
+ 	 */
+ 	ptable = qcom_smem_get(SMEM_APPS, SMEM_AARM_PARTITION_TABLE, &len);
+-	if (IS_ERR_OR_NULL(ptable)) {
++	if (IS_ERR(ptable)) {
+ 		pr_err("Error reading partition table\n");
+ 		return PTR_ERR(ptable);
+ 	}
+diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
+index ba5d546d06aa3..9c86cacc4a727 100644
+--- a/drivers/net/dsa/bcm_sf2.c
++++ b/drivers/net/dsa/bcm_sf2.c
+@@ -32,6 +32,36 @@
+ #include "b53/b53_priv.h"
+ #include "b53/b53_regs.h"
+ 
++static u16 bcm_sf2_reg_rgmii_cntrl(struct bcm_sf2_priv *priv, int port)
++{
++	switch (priv->type) {
++	case BCM4908_DEVICE_ID:
++		switch (port) {
++		case 7:
++			return REG_RGMII_11_CNTRL;
++		default:
++			break;
++		}
++		break;
++	default:
++		switch (port) {
++		case 0:
++			return REG_RGMII_0_CNTRL;
++		case 1:
++			return REG_RGMII_1_CNTRL;
++		case 2:
++			return REG_RGMII_2_CNTRL;
++		default:
++			break;
++		}
++	}
++
++	WARN_ONCE(1, "Unsupported port %d\n", port);
++
++	/* RO fallback reg */
++	return REG_SWITCH_STATUS;
++}
++
+ /* Return the number of active ports, not counting the IMP (CPU) port */
+ static unsigned int bcm_sf2_num_active_ports(struct dsa_switch *ds)
+ {
+@@ -647,6 +677,7 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
+ {
+ 	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ 	u32 id_mode_dis = 0, port_mode;
++	u32 reg_rgmii_ctrl;
+ 	u32 reg;
+ 
+ 	if (port == core_readl(priv, CORE_IMP0_PRT_ID))
+@@ -670,10 +701,12 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
+ 		return;
+ 	}
+ 
++	reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
++
+ 	/* Clear id_mode_dis bit, and the existing port mode, let
+ 	 * RGMII_MODE_EN bet set by mac_link_{up,down}
+ 	 */
+-	reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
++	reg = reg_readl(priv, reg_rgmii_ctrl);
+ 	reg &= ~ID_MODE_DIS;
+ 	reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT);
+ 
+@@ -681,13 +714,14 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
+ 	if (id_mode_dis)
+ 		reg |= ID_MODE_DIS;
+ 
+-	reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
++	reg_writel(priv, reg, reg_rgmii_ctrl);
+ }
+ 
+ static void bcm_sf2_sw_mac_link_set(struct dsa_switch *ds, int port,
+ 				    phy_interface_t interface, bool link)
+ {
+ 	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
++	u32 reg_rgmii_ctrl;
+ 	u32 reg;
+ 
+ 	if (!phy_interface_mode_is_rgmii(interface) &&
+@@ -695,13 +729,15 @@ static void bcm_sf2_sw_mac_link_set(struct dsa_switch *ds, int port,
+ 	    interface != PHY_INTERFACE_MODE_REVMII)
+ 		return;
+ 
++	reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
++
+ 	/* If the link is down, just disable the interface to conserve power */
+-	reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
++	reg = reg_readl(priv, reg_rgmii_ctrl);
+ 	if (link)
+ 		reg |= RGMII_MODE_EN;
+ 	else
+ 		reg &= ~RGMII_MODE_EN;
+-	reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
++	reg_writel(priv, reg, reg_rgmii_ctrl);
+ }
+ 
+ static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port,
+@@ -735,11 +771,15 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
+ {
+ 	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ 	struct ethtool_eee *p = &priv->dev->ports[port].eee;
+-	u32 reg, offset;
+ 
+ 	bcm_sf2_sw_mac_link_set(ds, port, interface, true);
+ 
+ 	if (port != core_readl(priv, CORE_IMP0_PRT_ID)) {
++		u32 reg_rgmii_ctrl;
++		u32 reg, offset;
++
++		reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
++
+ 		if (priv->type == BCM4908_DEVICE_ID ||
+ 		    priv->type == BCM7445_DEVICE_ID)
+ 			offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
+@@ -750,7 +790,7 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
+ 		    interface == PHY_INTERFACE_MODE_RGMII_TXID ||
+ 		    interface == PHY_INTERFACE_MODE_MII ||
+ 		    interface == PHY_INTERFACE_MODE_REVMII) {
+-			reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
++			reg = reg_readl(priv, reg_rgmii_ctrl);
+ 			reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
+ 
+ 			if (tx_pause)
+@@ -758,7 +798,7 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
+ 			if (rx_pause)
+ 				reg |= RX_PAUSE_EN;
+ 
+-			reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
++			reg_writel(priv, reg, reg_rgmii_ctrl);
+ 		}
+ 
+ 		reg = SW_OVERRIDE | LINK_STS;
+@@ -1144,9 +1184,7 @@ static const u16 bcm_sf2_4908_reg_offsets[] = {
+ 	[REG_PHY_REVISION]	= 0x14,
+ 	[REG_SPHY_CNTRL]	= 0x24,
+ 	[REG_CROSSBAR]		= 0xc8,
+-	[REG_RGMII_0_CNTRL]	= 0xe0,
+-	[REG_RGMII_1_CNTRL]	= 0xec,
+-	[REG_RGMII_2_CNTRL]	= 0xf8,
++	[REG_RGMII_11_CNTRL]	= 0x014c,
+ 	[REG_LED_0_CNTRL]	= 0x40,
+ 	[REG_LED_1_CNTRL]	= 0x4c,
+ 	[REG_LED_2_CNTRL]	= 0x58,
+diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h
+index 1d2d55c9f8aad..9e141d1a0b07c 100644
+--- a/drivers/net/dsa/bcm_sf2_regs.h
++++ b/drivers/net/dsa/bcm_sf2_regs.h
+@@ -21,6 +21,7 @@ enum bcm_sf2_reg_offs {
+ 	REG_RGMII_0_CNTRL,
+ 	REG_RGMII_1_CNTRL,
+ 	REG_RGMII_2_CNTRL,
++	REG_RGMII_11_CNTRL,
+ 	REG_LED_0_CNTRL,
+ 	REG_LED_1_CNTRL,
+ 	REG_LED_2_CNTRL,
+@@ -48,8 +49,6 @@ enum bcm_sf2_reg_offs {
+ #define  PHY_PHYAD_SHIFT		8
+ #define  PHY_PHYAD_MASK			0x1F
+ 
+-#define REG_RGMII_CNTRL_P(x)		(REG_RGMII_0_CNTRL + (x))
+-
+ /* Relative to REG_RGMII_CNTRL */
+ #define  RGMII_MODE_EN			(1 << 0)
+ #define  ID_MODE_DIS			(1 << 1)
+diff --git a/drivers/net/dsa/mv88e6xxx/devlink.c b/drivers/net/dsa/mv88e6xxx/devlink.c
+index 21953d6d484c5..ada7a38d4d313 100644
+--- a/drivers/net/dsa/mv88e6xxx/devlink.c
++++ b/drivers/net/dsa/mv88e6xxx/devlink.c
+@@ -678,7 +678,7 @@ static int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds,
+ 				sizeof(struct mv88e6xxx_devlink_atu_entry);
+ 			break;
+ 		case MV88E6XXX_REGION_VTU:
+-			size = mv88e6xxx_max_vid(chip) *
++			size = (mv88e6xxx_max_vid(chip) + 1) *
+ 				sizeof(struct mv88e6xxx_devlink_vtu_entry);
+ 			break;
+ 		}
+diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
+index 3195936dc5be7..2ce04fef698de 100644
+--- a/drivers/net/dsa/mv88e6xxx/serdes.c
++++ b/drivers/net/dsa/mv88e6xxx/serdes.c
+@@ -443,15 +443,15 @@ int mv88e6185_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane,
+ u8 mv88e6185_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
+ {
+ 	/* There are no configurable serdes lanes on this switch chip but we
+-	 * need to return non-zero so that callers of
++	 * need to return a non-negative lane number so that callers of
+ 	 * mv88e6xxx_serdes_get_lane() know this is a serdes port.
+ 	 */
+ 	switch (chip->ports[port].cmode) {
+ 	case MV88E6185_PORT_STS_CMODE_SERDES:
+ 	case MV88E6185_PORT_STS_CMODE_1000BASE_X:
+-		return 0xff;
+-	default:
+ 		return 0;
++	default:
++		return -ENODEV;
+ 	}
+ }
+ 
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index b53a0d87371a2..73239d3eaca15 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -1732,14 +1732,16 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ 
+ 	cons = rxcmp->rx_cmp_opaque;
+ 	if (unlikely(cons != rxr->rx_next_cons)) {
+-		int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
++		int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
+ 
+ 		/* 0xffff is forced error, don't print it */
+ 		if (rxr->rx_next_cons != 0xffff)
+ 			netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
+ 				    cons, rxr->rx_next_cons);
+ 		bnxt_sched_reset(bp, rxr);
+-		return rc1;
++		if (rc1)
++			return rc1;
++		goto next_rx_no_prod_no_len;
+ 	}
+ 	rx_buf = &rxr->rx_buf_ring[cons];
+ 	data = rx_buf->data;
+@@ -9736,7 +9738,9 @@ static ssize_t bnxt_show_temp(struct device *dev,
+ 	if (!rc)
+ 		len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
+ 	mutex_unlock(&bp->hwrm_cmd_lock);
+-	return rc ?: len;
++	if (rc)
++		return rc;
++	return len;
+ }
+ static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
+ 
+diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
+index e6d4ad99cc387..3f1c189646f4e 100644
+--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
++++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
+@@ -521,7 +521,7 @@
+ #define    CN23XX_BAR1_INDEX_OFFSET                3
+ 
+ #define    CN23XX_PEM_BAR1_INDEX_REG(port, idx)		\
+-		(CN23XX_PEM_BAR1_INDEX_START + ((port) << CN23XX_PEM_OFFSET) + \
++		(CN23XX_PEM_BAR1_INDEX_START + (((u64)port) << CN23XX_PEM_OFFSET) + \
+ 		 ((idx) << CN23XX_BAR1_INDEX_OFFSET))
+ 
+ /*############################ DPI #########################*/
+diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+index f782e6af45e93..50bbe79fb93df 100644
+--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
++++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+@@ -776,7 +776,7 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
+ 	mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
+ 	mbx.rq.qs_num = qs->vnic_id;
+ 	mbx.rq.rq_num = qidx;
+-	mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) |
++	mbx.rq.cfg = ((u64)rq->caching << 26) | (rq->cq_qs << 19) |
+ 			  (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
+ 			  (rq->cont_qs_rbdr_idx << 8) |
+ 			  (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx);
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+index 83b46440408ba..bde8494215c41 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+@@ -174,31 +174,31 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
+ 				      WORD_MASK, f->fs.nat_lip[15] |
+ 				      f->fs.nat_lip[14] << 8 |
+ 				      f->fs.nat_lip[13] << 16 |
+-				      f->fs.nat_lip[12] << 24, 1);
++				      (u64)f->fs.nat_lip[12] << 24, 1);
+ 
+ 			set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 1,
+ 				      WORD_MASK, f->fs.nat_lip[11] |
+ 				      f->fs.nat_lip[10] << 8 |
+ 				      f->fs.nat_lip[9] << 16 |
+-				      f->fs.nat_lip[8] << 24, 1);
++				      (u64)f->fs.nat_lip[8] << 24, 1);
+ 
+ 			set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 2,
+ 				      WORD_MASK, f->fs.nat_lip[7] |
+ 				      f->fs.nat_lip[6] << 8 |
+ 				      f->fs.nat_lip[5] << 16 |
+-				      f->fs.nat_lip[4] << 24, 1);
++				      (u64)f->fs.nat_lip[4] << 24, 1);
+ 
+ 			set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 3,
+ 				      WORD_MASK, f->fs.nat_lip[3] |
+ 				      f->fs.nat_lip[2] << 8 |
+ 				      f->fs.nat_lip[1] << 16 |
+-				      f->fs.nat_lip[0] << 24, 1);
++				      (u64)f->fs.nat_lip[0] << 24, 1);
+ 		} else {
+ 			set_tcb_field(adap, f, tid, TCB_RX_FRAG3_LEN_RAW_W,
+ 				      WORD_MASK, f->fs.nat_lip[3] |
+ 				      f->fs.nat_lip[2] << 8 |
+ 				      f->fs.nat_lip[1] << 16 |
+-				      f->fs.nat_lip[0] << 24, 1);
++				      (u64)f->fs.nat_lip[0] << 25, 1);
+ 		}
+ 	}
+ 
+@@ -208,25 +208,25 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
+ 				      WORD_MASK, f->fs.nat_fip[15] |
+ 				      f->fs.nat_fip[14] << 8 |
+ 				      f->fs.nat_fip[13] << 16 |
+-				      f->fs.nat_fip[12] << 24, 1);
++				      (u64)f->fs.nat_fip[12] << 24, 1);
+ 
+ 			set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 1,
+ 				      WORD_MASK, f->fs.nat_fip[11] |
+ 				      f->fs.nat_fip[10] << 8 |
+ 				      f->fs.nat_fip[9] << 16 |
+-				      f->fs.nat_fip[8] << 24, 1);
++				      (u64)f->fs.nat_fip[8] << 24, 1);
+ 
+ 			set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 2,
+ 				      WORD_MASK, f->fs.nat_fip[7] |
+ 				      f->fs.nat_fip[6] << 8 |
+ 				      f->fs.nat_fip[5] << 16 |
+-				      f->fs.nat_fip[4] << 24, 1);
++				      (u64)f->fs.nat_fip[4] << 24, 1);
+ 
+ 			set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 3,
+ 				      WORD_MASK, f->fs.nat_fip[3] |
+ 				      f->fs.nat_fip[2] << 8 |
+ 				      f->fs.nat_fip[1] << 16 |
+-				      f->fs.nat_fip[0] << 24, 1);
++				      (u64)f->fs.nat_fip[0] << 24, 1);
+ 
+ 		} else {
+ 			set_tcb_field(adap, f, tid,
+@@ -234,13 +234,13 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
+ 				      WORD_MASK, f->fs.nat_fip[3] |
+ 				      f->fs.nat_fip[2] << 8 |
+ 				      f->fs.nat_fip[1] << 16 |
+-				      f->fs.nat_fip[0] << 24, 1);
++				      (u64)f->fs.nat_fip[0] << 24, 1);
+ 		}
+ 	}
+ 
+ 	set_tcb_field(adap, f, tid, TCB_PDU_HDR_LEN_W, WORD_MASK,
+ 		      (dp ? (nat_lp[1] | nat_lp[0] << 8) : 0) |
+-		      (sp ? (nat_fp[1] << 16 | nat_fp[0] << 24) : 0),
++		      (sp ? (nat_fp[1] << 16 | (u64)nat_fp[0] << 24) : 0),
+ 		      1);
+ }
+ 
+diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
+index 67c436400352f..de7b318422330 100644
+--- a/drivers/net/ethernet/freescale/Makefile
++++ b/drivers/net/ethernet/freescale/Makefile
+@@ -24,6 +24,4 @@ obj-$(CONFIG_FSL_DPAA_ETH) += dpaa/
+ 
+ obj-$(CONFIG_FSL_DPAA2_ETH) += dpaa2/
+ 
+-obj-$(CONFIG_FSL_ENETC) += enetc/
+-obj-$(CONFIG_FSL_ENETC_MDIO) += enetc/
+-obj-$(CONFIG_FSL_ENETC_VF) += enetc/
++obj-y += enetc/
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+index bf4302a5cf956..65752f363f431 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -3704,7 +3704,6 @@ static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv)
+ 
+ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
+ {
+-	struct hnae3_ring_chain_node vector_ring_chain;
+ 	struct hnae3_handle *h = priv->ae_handle;
+ 	struct hns3_enet_tqp_vector *tqp_vector;
+ 	int ret;
+@@ -3736,6 +3735,8 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
+ 	}
+ 
+ 	for (i = 0; i < priv->vector_num; i++) {
++		struct hnae3_ring_chain_node vector_ring_chain;
++
+ 		tqp_vector = &priv->tqp_vector[i];
+ 
+ 		tqp_vector->rx_group.total_bytes = 0;
+diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
+index 25dd903a3e92c..d849b0f65de2d 100644
+--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
++++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
+@@ -431,7 +431,8 @@ static void prestera_port_handle_event(struct prestera_switch *sw,
+ 			netif_carrier_on(port->dev);
+ 			if (!delayed_work_pending(caching_dw))
+ 				queue_delayed_work(prestera_wq, caching_dw, 0);
+-		} else {
++		} else if (netif_running(port->dev) &&
++			   netif_carrier_ok(port->dev)) {
+ 			netif_carrier_off(port->dev);
+ 			if (delayed_work_pending(caching_dw))
+ 				cancel_delayed_work(caching_dw);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+index 22bee49902327..bb61f52d782d9 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+@@ -850,7 +850,7 @@ mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx *sa_ctx)
+ 		return;
+ 	}
+ 
+-	if (sa_ctx->fpga_xfrm->accel_xfrm.attrs.action &
++	if (sa_ctx->fpga_xfrm->accel_xfrm.attrs.action ==
+ 	    MLX5_ACCEL_ESP_ACTION_DECRYPT)
+ 		ida_simple_remove(&fipsec->halloc, sa_ctx->sa_handle);
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
+index 9143ec326ebfb..f146c618a78e7 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
+@@ -1532,6 +1532,7 @@ static void dr_ste_v1_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *val
+ 
+ 	DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_gvmi, misc_mask, source_port);
+ 	DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_qp, misc_mask, source_sqn);
++	misc_mask->source_eswitch_owner_vhca_id = 0;
+ }
+ 
+ static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
+diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
+index 713ee3041d491..bea978df77138 100644
+--- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
++++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
+@@ -364,6 +364,7 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port)
+ 
+ 	attrs.split = eth_port.is_split;
+ 	attrs.splittable = !attrs.split;
++	attrs.lanes = eth_port.port_lanes;
+ 	attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ 	attrs.phys.port_number = eth_port.label_port;
+ 	attrs.phys.split_subport_number = eth_port.label_subport;
+diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+index 117188e3c7de2..87b8c032195d0 100644
+--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
++++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+@@ -1437,6 +1437,7 @@ netdev_tx_t emac_mac_tx_buf_send(struct emac_adapter *adpt,
+ {
+ 	struct emac_tpd tpd;
+ 	u32 prod_idx;
++	int len;
+ 
+ 	memset(&tpd, 0, sizeof(tpd));
+ 
+@@ -1456,9 +1457,10 @@ netdev_tx_t emac_mac_tx_buf_send(struct emac_adapter *adpt,
+ 	if (skb_network_offset(skb) != ETH_HLEN)
+ 		TPD_TYP_SET(&tpd, 1);
+ 
++	len = skb->len;
+ 	emac_tx_fill_tpd(adpt, tx_q, skb, &tpd);
+ 
+-	netdev_sent_queue(adpt->netdev, skb->len);
++	netdev_sent_queue(adpt->netdev, len);
+ 
+ 	/* Make sure the are enough free descriptors to hold one
+ 	 * maximum-sized SKB.  We need one desc for each fragment,
+diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
+index eb0c03bdb12d5..cad57d58d764b 100644
+--- a/drivers/net/ethernet/renesas/ravb_main.c
++++ b/drivers/net/ethernet/renesas/ravb_main.c
+@@ -911,31 +911,20 @@ static int ravb_poll(struct napi_struct *napi, int budget)
+ 	int q = napi - priv->napi;
+ 	int mask = BIT(q);
+ 	int quota = budget;
+-	u32 ris0, tis;
+ 
+-	for (;;) {
+-		tis = ravb_read(ndev, TIS);
+-		ris0 = ravb_read(ndev, RIS0);
+-		if (!((ris0 & mask) || (tis & mask)))
+-			break;
++	/* Processing RX Descriptor Ring */
++	/* Clear RX interrupt */
++	ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
++	if (ravb_rx(ndev, &quota, q))
++		goto out;
+ 
+-		/* Processing RX Descriptor Ring */
+-		if (ris0 & mask) {
+-			/* Clear RX interrupt */
+-			ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
+-			if (ravb_rx(ndev, &quota, q))
+-				goto out;
+-		}
+-		/* Processing TX Descriptor Ring */
+-		if (tis & mask) {
+-			spin_lock_irqsave(&priv->lock, flags);
+-			/* Clear TX interrupt */
+-			ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
+-			ravb_tx_free(ndev, q, true);
+-			netif_wake_subqueue(ndev, q);
+-			spin_unlock_irqrestore(&priv->lock, flags);
+-		}
+-	}
++	/* Processing RX Descriptor Ring */
++	spin_lock_irqsave(&priv->lock, flags);
++	/* Clear TX interrupt */
++	ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
++	ravb_tx_free(ndev, q, true);
++	netif_wake_subqueue(ndev, q);
++	spin_unlock_irqrestore(&priv->lock, flags);
+ 
+ 	napi_complete(napi);
+ 
+diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
+index da6886dcac37c..4fa72b573c172 100644
+--- a/drivers/net/ethernet/sfc/ef10.c
++++ b/drivers/net/ethernet/sfc/ef10.c
+@@ -2928,8 +2928,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
+ 
+ 	/* Get the transmit queue */
+ 	tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
+-	tx_queue = efx_channel_get_tx_queue(channel,
+-					    tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
++	tx_queue = channel->tx_queue + (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
+ 
+ 	if (!tx_queue->timestamping) {
+ 		/* Transmit completion */
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 4749bd0af1607..c6f24abf64328 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -2757,8 +2757,15 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
+ 
+ 	/* Enable TSO */
+ 	if (priv->tso) {
+-		for (chan = 0; chan < tx_cnt; chan++)
++		for (chan = 0; chan < tx_cnt; chan++) {
++			struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
++
++			/* TSO and TBS cannot co-exist */
++			if (tx_q->tbs & STMMAC_TBS_AVAIL)
++				continue;
++
+ 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
++		}
+ 	}
+ 
+ 	/* Enable Split Header */
+@@ -2850,9 +2857,8 @@ static int stmmac_open(struct net_device *dev)
+ 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+ 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
+ 
++		/* Setup per-TXQ tbs flag before TX descriptor alloc */
+ 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
+-		if (stmmac_enable_tbs(priv, priv->ioaddr, tbs_en, chan))
+-			tx_q->tbs &= ~STMMAC_TBS_AVAIL;
+ 	}
+ 
+ 	ret = alloc_dma_desc_resources(priv);
+diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
+index c7031e1960d4a..03055c96f0760 100644
+--- a/drivers/net/ethernet/ti/davinci_emac.c
++++ b/drivers/net/ethernet/ti/davinci_emac.c
+@@ -169,11 +169,11 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
+ /* EMAC mac_status register */
+ #define EMAC_MACSTATUS_TXERRCODE_MASK	(0xF00000)
+ #define EMAC_MACSTATUS_TXERRCODE_SHIFT	(20)
+-#define EMAC_MACSTATUS_TXERRCH_MASK	(0x7)
++#define EMAC_MACSTATUS_TXERRCH_MASK	(0x70000)
+ #define EMAC_MACSTATUS_TXERRCH_SHIFT	(16)
+ #define EMAC_MACSTATUS_RXERRCODE_MASK	(0xF000)
+ #define EMAC_MACSTATUS_RXERRCODE_SHIFT	(12)
+-#define EMAC_MACSTATUS_RXERRCH_MASK	(0x7)
++#define EMAC_MACSTATUS_RXERRCH_MASK	(0x700)
+ #define EMAC_MACSTATUS_RXERRCH_SHIFT	(8)
+ 
+ /* EMAC RX register masks */
+diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
+index c6eb7f2368aa3..911b5ef9e6803 100644
+--- a/drivers/net/ethernet/xilinx/Kconfig
++++ b/drivers/net/ethernet/xilinx/Kconfig
+@@ -18,12 +18,14 @@ if NET_VENDOR_XILINX
+ 
+ config XILINX_EMACLITE
+ 	tristate "Xilinx 10/100 Ethernet Lite support"
++	depends on HAS_IOMEM
+ 	select PHYLIB
+ 	help
+ 	  This driver supports the 10/100 Ethernet Lite from Xilinx.
+ 
+ config XILINX_AXI_EMAC
+ 	tristate "Xilinx 10/100/1000 AXI Ethernet support"
++	depends on HAS_IOMEM
+ 	select PHYLINK
+ 	help
+ 	  This driver supports the 10/100/1000 Ethernet from Xilinx for the
+@@ -31,6 +33,7 @@ config XILINX_AXI_EMAC
+ 
+ config XILINX_LL_TEMAC
+ 	tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
++	depends on HAS_IOMEM
+ 	select PHYLIB
+ 	help
+ 	  This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
+diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
+index 0152f1e707834..9defaa21a1a9e 100644
+--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
++++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
+@@ -1085,7 +1085,7 @@ static int init_queues(struct port *port)
+ 	int i;
+ 
+ 	if (!ports_open) {
+-		dma_pool = dma_pool_create(DRV_NAME, port->netdev->dev.parent,
++		dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev,
+ 					   POOL_ALLOC_SIZE, 32, 0);
+ 		if (!dma_pool)
+ 			return -ENOMEM;
+@@ -1435,6 +1435,9 @@ static int ixp4xx_eth_probe(struct platform_device *pdev)
+ 	ndev->netdev_ops = &ixp4xx_netdev_ops;
+ 	ndev->ethtool_ops = &ixp4xx_ethtool_ops;
+ 	ndev->tx_queue_len = 100;
++	/* Inherit the DMA masks from the platform device */
++	ndev->dev.dma_mask = dev->dma_mask;
++	ndev->dev.coherent_dma_mask = dev->coherent_dma_mask;
+ 
+ 	netif_napi_add(ndev, &port->napi, eth_poll, NAPI_WEIGHT);
+ 
+diff --git a/drivers/net/fddi/Kconfig b/drivers/net/fddi/Kconfig
+index f722079dfb6ae..f99c1048c97e3 100644
+--- a/drivers/net/fddi/Kconfig
++++ b/drivers/net/fddi/Kconfig
+@@ -40,17 +40,20 @@ config DEFXX
+ 
+ config DEFXX_MMIO
+ 	bool
+-	prompt "Use MMIO instead of PIO" if PCI || EISA
++	prompt "Use MMIO instead of IOP" if PCI || EISA
+ 	depends on DEFXX
+-	default n if PCI || EISA
++	default n if EISA
+ 	default y
+ 	help
+ 	  This instructs the driver to use EISA or PCI memory-mapped I/O
+-	  (MMIO) as appropriate instead of programmed I/O ports (PIO).
++	  (MMIO) as appropriate instead of programmed I/O ports (IOP).
+ 	  Enabling this gives an improvement in processing time in parts
+-	  of the driver, but it may cause problems with EISA (DEFEA)
+-	  adapters.  TURBOchannel does not have the concept of I/O ports,
+-	  so MMIO is always used for these (DEFTA) adapters.
++	  of the driver, but it requires a memory window to be configured
++	  for EISA (DEFEA) adapters that may not always be available.
++	  Conversely some PCIe host bridges do not support IOP, so MMIO
++	  may be required to access PCI (DEFPA) adapters on downstream PCI
++	  buses with some systems.  TURBOchannel does not have the concept
++	  of I/O ports, so MMIO is always used for these (DEFTA) adapters.
+ 
+ 	  If unsure, say N.
+ 
+diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
+index 077c68498f048..c7ce6d5491afc 100644
+--- a/drivers/net/fddi/defxx.c
++++ b/drivers/net/fddi/defxx.c
+@@ -495,6 +495,25 @@ static const struct net_device_ops dfx_netdev_ops = {
+ 	.ndo_set_mac_address	= dfx_ctl_set_mac_address,
+ };
+ 
++static void dfx_register_res_alloc_err(const char *print_name, bool mmio,
++				       bool eisa)
++{
++	pr_err("%s: Cannot use %s, no address set, aborting\n",
++	       print_name, mmio ? "MMIO" : "I/O");
++	pr_err("%s: Recompile driver with \"CONFIG_DEFXX_MMIO=%c\"\n",
++	       print_name, mmio ? 'n' : 'y');
++	if (eisa && mmio)
++		pr_err("%s: Or run ECU and set adapter's MMIO location\n",
++		       print_name);
++}
++
++static void dfx_register_res_err(const char *print_name, bool mmio,
++				 unsigned long start, unsigned long len)
++{
++	pr_err("%s: Cannot reserve %s resource 0x%lx @ 0x%lx, aborting\n",
++	       print_name, mmio ? "MMIO" : "I/O", len, start);
++}
++
+ /*
+  * ================
+  * = dfx_register =
+@@ -568,15 +587,12 @@ static int dfx_register(struct device *bdev)
+ 	dev_set_drvdata(bdev, dev);
+ 
+ 	dfx_get_bars(bdev, bar_start, bar_len);
+-	if (dfx_bus_eisa && dfx_use_mmio && bar_start[0] == 0) {
+-		pr_err("%s: Cannot use MMIO, no address set, aborting\n",
+-		       print_name);
+-		pr_err("%s: Run ECU and set adapter's MMIO location\n",
+-		       print_name);
+-		pr_err("%s: Or recompile driver with \"CONFIG_DEFXX_MMIO=n\""
+-		       "\n", print_name);
++	if (bar_len[0] == 0 ||
++	    (dfx_bus_eisa && dfx_use_mmio && bar_start[0] == 0)) {
++		dfx_register_res_alloc_err(print_name, dfx_use_mmio,
++					   dfx_bus_eisa);
+ 		err = -ENXIO;
+-		goto err_out;
++		goto err_out_disable;
+ 	}
+ 
+ 	if (dfx_use_mmio)
+@@ -585,18 +601,16 @@ static int dfx_register(struct device *bdev)
+ 	else
+ 		region = request_region(bar_start[0], bar_len[0], print_name);
+ 	if (!region) {
+-		pr_err("%s: Cannot reserve %s resource 0x%lx @ 0x%lx, "
+-		       "aborting\n", dfx_use_mmio ? "MMIO" : "I/O", print_name,
+-		       (long)bar_len[0], (long)bar_start[0]);
++		dfx_register_res_err(print_name, dfx_use_mmio,
++				     bar_start[0], bar_len[0]);
+ 		err = -EBUSY;
+ 		goto err_out_disable;
+ 	}
+ 	if (bar_start[1] != 0) {
+ 		region = request_region(bar_start[1], bar_len[1], print_name);
+ 		if (!region) {
+-			pr_err("%s: Cannot reserve I/O resource "
+-			       "0x%lx @ 0x%lx, aborting\n", print_name,
+-			       (long)bar_len[1], (long)bar_start[1]);
++			dfx_register_res_err(print_name, 0,
++					     bar_start[1], bar_len[1]);
+ 			err = -EBUSY;
+ 			goto err_out_csr_region;
+ 		}
+@@ -604,9 +618,8 @@ static int dfx_register(struct device *bdev)
+ 	if (bar_start[2] != 0) {
+ 		region = request_region(bar_start[2], bar_len[2], print_name);
+ 		if (!region) {
+-			pr_err("%s: Cannot reserve I/O resource "
+-			       "0x%lx @ 0x%lx, aborting\n", print_name,
+-			       (long)bar_len[2], (long)bar_start[2]);
++			dfx_register_res_err(print_name, 0,
++					     bar_start[2], bar_len[2]);
+ 			err = -EBUSY;
+ 			goto err_out_bh_region;
+ 		}
+diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
+index 42f31c6818462..61cd3dd4deab6 100644
+--- a/drivers/net/geneve.c
++++ b/drivers/net/geneve.c
+@@ -891,7 +891,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+ 	__be16 sport;
+ 	int err;
+ 
+-	if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
++	if (!pskb_inet_may_pull(skb))
+ 		return -EINVAL;
+ 
+ 	sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
+@@ -988,7 +988,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+ 	__be16 sport;
+ 	int err;
+ 
+-	if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
++	if (!pskb_inet_may_pull(skb))
+ 		return -EINVAL;
+ 
+ 	sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
+diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c
+index 6eac50d4b42fc..d453ec0161688 100644
+--- a/drivers/net/phy/intel-xway.c
++++ b/drivers/net/phy/intel-xway.c
+@@ -11,6 +11,18 @@
+ 
+ #define XWAY_MDIO_IMASK			0x19	/* interrupt mask */
+ #define XWAY_MDIO_ISTAT			0x1A	/* interrupt status */
++#define XWAY_MDIO_LED			0x1B	/* led control */
++
++/* bit 15:12 are reserved */
++#define XWAY_MDIO_LED_LED3_EN		BIT(11)	/* Enable the integrated function of LED3 */
++#define XWAY_MDIO_LED_LED2_EN		BIT(10)	/* Enable the integrated function of LED2 */
++#define XWAY_MDIO_LED_LED1_EN		BIT(9)	/* Enable the integrated function of LED1 */
++#define XWAY_MDIO_LED_LED0_EN		BIT(8)	/* Enable the integrated function of LED0 */
++/* bit 7:4 are reserved */
++#define XWAY_MDIO_LED_LED3_DA		BIT(3)	/* Direct Access to LED3 */
++#define XWAY_MDIO_LED_LED2_DA		BIT(2)	/* Direct Access to LED2 */
++#define XWAY_MDIO_LED_LED1_DA		BIT(1)	/* Direct Access to LED1 */
++#define XWAY_MDIO_LED_LED0_DA		BIT(0)	/* Direct Access to LED0 */
+ 
+ #define XWAY_MDIO_INIT_WOL		BIT(15)	/* Wake-On-LAN */
+ #define XWAY_MDIO_INIT_MSRE		BIT(14)
+@@ -159,6 +171,15 @@ static int xway_gphy_config_init(struct phy_device *phydev)
+ 	/* Clear all pending interrupts */
+ 	phy_read(phydev, XWAY_MDIO_ISTAT);
+ 
++	/* Ensure that integrated led function is enabled for all leds */
++	err = phy_write(phydev, XWAY_MDIO_LED,
++			XWAY_MDIO_LED_LED0_EN |
++			XWAY_MDIO_LED_LED1_EN |
++			XWAY_MDIO_LED_LED2_EN |
++			XWAY_MDIO_LED_LED3_EN);
++	if (err)
++		return err;
++
+ 	phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LEDCH,
+ 		      XWAY_MMD_LEDCH_NACS_NONE |
+ 		      XWAY_MMD_LEDCH_SBF_F02HZ |
+diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
+index 8018ddf7f3162..f86c9ddc609ea 100644
+--- a/drivers/net/phy/marvell.c
++++ b/drivers/net/phy/marvell.c
+@@ -967,22 +967,28 @@ static int m88e1111_get_downshift(struct phy_device *phydev, u8 *data)
+ 
+ static int m88e1111_set_downshift(struct phy_device *phydev, u8 cnt)
+ {
+-	int val;
++	int val, err;
+ 
+ 	if (cnt > MII_M1111_PHY_EXT_CR_DOWNSHIFT_MAX)
+ 		return -E2BIG;
+ 
+-	if (!cnt)
+-		return phy_clear_bits(phydev, MII_M1111_PHY_EXT_CR,
+-				      MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN);
++	if (!cnt) {
++		err = phy_clear_bits(phydev, MII_M1111_PHY_EXT_CR,
++				     MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN);
++	} else {
++		val = MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN;
++		val |= FIELD_PREP(MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK, cnt - 1);
+ 
+-	val = MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN;
+-	val |= FIELD_PREP(MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK, cnt - 1);
++		err = phy_modify(phydev, MII_M1111_PHY_EXT_CR,
++				 MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN |
++				 MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK,
++				 val);
++	}
+ 
+-	return phy_modify(phydev, MII_M1111_PHY_EXT_CR,
+-			  MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN |
+-			  MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK,
+-			  val);
++	if (err < 0)
++		return err;
++
++	return genphy_soft_reset(phydev);
+ }
+ 
+ static int m88e1111_get_tunable(struct phy_device *phydev,
+@@ -1025,22 +1031,28 @@ static int m88e1011_get_downshift(struct phy_device *phydev, u8 *data)
+ 
+ static int m88e1011_set_downshift(struct phy_device *phydev, u8 cnt)
+ {
+-	int val;
++	int val, err;
+ 
+ 	if (cnt > MII_M1011_PHY_SCR_DOWNSHIFT_MAX)
+ 		return -E2BIG;
+ 
+-	if (!cnt)
+-		return phy_clear_bits(phydev, MII_M1011_PHY_SCR,
+-				      MII_M1011_PHY_SCR_DOWNSHIFT_EN);
++	if (!cnt) {
++		err = phy_clear_bits(phydev, MII_M1011_PHY_SCR,
++				     MII_M1011_PHY_SCR_DOWNSHIFT_EN);
++	} else {
++		val = MII_M1011_PHY_SCR_DOWNSHIFT_EN;
++		val |= FIELD_PREP(MII_M1011_PHY_SCR_DOWNSHIFT_MASK, cnt - 1);
+ 
+-	val = MII_M1011_PHY_SCR_DOWNSHIFT_EN;
+-	val |= FIELD_PREP(MII_M1011_PHY_SCR_DOWNSHIFT_MASK, cnt - 1);
++		err = phy_modify(phydev, MII_M1011_PHY_SCR,
++				 MII_M1011_PHY_SCR_DOWNSHIFT_EN |
++				 MII_M1011_PHY_SCR_DOWNSHIFT_MASK,
++				 val);
++	}
+ 
+-	return phy_modify(phydev, MII_M1011_PHY_SCR,
+-			  MII_M1011_PHY_SCR_DOWNSHIFT_EN |
+-			  MII_M1011_PHY_SCR_DOWNSHIFT_MASK,
+-			  val);
++	if (err < 0)
++		return err;
++
++	return genphy_soft_reset(phydev);
+ }
+ 
+ static int m88e1011_get_tunable(struct phy_device *phydev,
+diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
+index ddb78fb4d6dc3..d8cac02a79b95 100644
+--- a/drivers/net/phy/smsc.c
++++ b/drivers/net/phy/smsc.c
+@@ -185,10 +185,13 @@ static int lan87xx_config_aneg(struct phy_device *phydev)
+ 	return genphy_config_aneg(phydev);
+ }
+ 
+-static int lan87xx_config_aneg_ext(struct phy_device *phydev)
++static int lan95xx_config_aneg_ext(struct phy_device *phydev)
+ {
+ 	int rc;
+ 
++	if (phydev->phy_id != 0x0007c0f0) /* not (LAN9500A or LAN9505A) */
++		return lan87xx_config_aneg(phydev);
++
+ 	/* Extend Manual AutoMDIX timer */
+ 	rc = phy_read(phydev, PHY_EDPD_CONFIG);
+ 	if (rc < 0)
+@@ -441,7 +444,7 @@ static struct phy_driver smsc_phy_driver[] = {
+ 	.read_status	= lan87xx_read_status,
+ 	.config_init	= smsc_phy_config_init,
+ 	.soft_reset	= smsc_phy_reset,
+-	.config_aneg	= lan87xx_config_aneg_ext,
++	.config_aneg	= lan95xx_config_aneg_ext,
+ 
+ 	/* IRQ related */
+ 	.config_intr	= smsc_phy_config_intr,
+diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
+index 4d9dc7d159089..0720f5f92caa7 100644
+--- a/drivers/net/wan/hdlc_fr.c
++++ b/drivers/net/wan/hdlc_fr.c
+@@ -415,7 +415,7 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
+ 
+ 		if (pad > 0) { /* Pad the frame with zeros */
+ 			if (__skb_pad(skb, pad, false))
+-				goto out;
++				goto drop;
+ 			skb_put(skb, pad);
+ 		}
+ 	}
+@@ -448,9 +448,8 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	return NETDEV_TX_OK;
+ 
+ drop:
+-	kfree_skb(skb);
+-out:
+ 	dev->stats.tx_dropped++;
++	kfree_skb(skb);
+ 	return NETDEV_TX_OK;
+ }
+ 
+diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
+index c3372498f4f15..8fda0446ff71e 100644
+--- a/drivers/net/wan/lapbether.c
++++ b/drivers/net/wan/lapbether.c
+@@ -51,6 +51,8 @@ struct lapbethdev {
+ 	struct list_head	node;
+ 	struct net_device	*ethdev;	/* link to ethernet device */
+ 	struct net_device	*axdev;		/* lapbeth device (lapb#) */
++	bool			up;
++	spinlock_t		up_lock;	/* Protects "up" */
+ };
+ 
+ static LIST_HEAD(lapbeth_devices);
+@@ -101,8 +103,9 @@ static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
+ 	rcu_read_lock();
+ 	lapbeth = lapbeth_get_x25_dev(dev);
+ 	if (!lapbeth)
+-		goto drop_unlock;
+-	if (!netif_running(lapbeth->axdev))
++		goto drop_unlock_rcu;
++	spin_lock_bh(&lapbeth->up_lock);
++	if (!lapbeth->up)
+ 		goto drop_unlock;
+ 
+ 	len = skb->data[0] + skb->data[1] * 256;
+@@ -117,11 +120,14 @@ static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
+ 		goto drop_unlock;
+ 	}
+ out:
++	spin_unlock_bh(&lapbeth->up_lock);
+ 	rcu_read_unlock();
+ 	return 0;
+ drop_unlock:
+ 	kfree_skb(skb);
+ 	goto out;
++drop_unlock_rcu:
++	rcu_read_unlock();
+ drop:
+ 	kfree_skb(skb);
+ 	return 0;
+@@ -151,13 +157,11 @@ static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb)
+ static netdev_tx_t lapbeth_xmit(struct sk_buff *skb,
+ 				      struct net_device *dev)
+ {
++	struct lapbethdev *lapbeth = netdev_priv(dev);
+ 	int err;
+ 
+-	/*
+-	 * Just to be *really* sure not to send anything if the interface
+-	 * is down, the ethernet device may have gone.
+-	 */
+-	if (!netif_running(dev))
++	spin_lock_bh(&lapbeth->up_lock);
++	if (!lapbeth->up)
+ 		goto drop;
+ 
+ 	/* There should be a pseudo header of 1 byte added by upper layers.
+@@ -194,6 +198,7 @@ static netdev_tx_t lapbeth_xmit(struct sk_buff *skb,
+ 		goto drop;
+ 	}
+ out:
++	spin_unlock_bh(&lapbeth->up_lock);
+ 	return NETDEV_TX_OK;
+ drop:
+ 	kfree_skb(skb);
+@@ -285,6 +290,7 @@ static const struct lapb_register_struct lapbeth_callbacks = {
+  */
+ static int lapbeth_open(struct net_device *dev)
+ {
++	struct lapbethdev *lapbeth = netdev_priv(dev);
+ 	int err;
+ 
+ 	if ((err = lapb_register(dev, &lapbeth_callbacks)) != LAPB_OK) {
+@@ -292,13 +298,22 @@ static int lapbeth_open(struct net_device *dev)
+ 		return -ENODEV;
+ 	}
+ 
++	spin_lock_bh(&lapbeth->up_lock);
++	lapbeth->up = true;
++	spin_unlock_bh(&lapbeth->up_lock);
++
+ 	return 0;
+ }
+ 
+ static int lapbeth_close(struct net_device *dev)
+ {
++	struct lapbethdev *lapbeth = netdev_priv(dev);
+ 	int err;
+ 
++	spin_lock_bh(&lapbeth->up_lock);
++	lapbeth->up = false;
++	spin_unlock_bh(&lapbeth->up_lock);
++
+ 	if ((err = lapb_unregister(dev)) != LAPB_OK)
+ 		pr_err("lapb_unregister error: %d\n", err);
+ 
+@@ -356,6 +371,9 @@ static int lapbeth_new_device(struct net_device *dev)
+ 	dev_hold(dev);
+ 	lapbeth->ethdev = dev;
+ 
++	lapbeth->up = false;
++	spin_lock_init(&lapbeth->up_lock);
++
+ 	rc = -EIO;
+ 	if (register_netdevice(ndev))
+ 		goto fail;
+diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
+index 0a37be6a7d33d..fab398046a3f2 100644
+--- a/drivers/net/wireless/ath/ath10k/htc.c
++++ b/drivers/net/wireless/ath/ath10k/htc.c
+@@ -669,7 +669,7 @@ static int ath10k_htc_send_bundle(struct ath10k_htc_ep *ep,
+ 
+ 	ath10k_dbg(ar, ATH10K_DBG_HTC,
+ 		   "bundle tx status %d eid %d req count %d count %d len %d\n",
+-		   ret, ep->eid, skb_queue_len(&ep->tx_req_head), cn, bundle_skb->len);
++		   ret, ep->eid, skb_queue_len(&ep->tx_req_head), cn, skb_len);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+index d97b33f789e44..7efbe03fbca82 100644
+--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
++++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+@@ -592,6 +592,9 @@ static void ath10k_wmi_event_tdls_peer(struct ath10k *ar, struct sk_buff *skb)
+ 					GFP_ATOMIC
+ 					);
+ 		break;
++	default:
++		kfree(tb);
++		return;
+ 	}
+ 
+ exit:
+diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+index db0c6fa9c9dc4..ff61ae34ecdf0 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
++++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+@@ -246,7 +246,7 @@ static unsigned int ath9k_regread(void *hw_priv, u32 reg_offset)
+ 	if (unlikely(r)) {
+ 		ath_dbg(common, WMI, "REGISTER READ FAILED: (0x%04x, %d)\n",
+ 			reg_offset, r);
+-		return -EIO;
++		return -1;
+ 	}
+ 
+ 	return be32_to_cpu(val);
+diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
+index 5abc2a5526ecf..2ca3b86714a9d 100644
+--- a/drivers/net/wireless/ath/ath9k/hw.c
++++ b/drivers/net/wireless/ath/ath9k/hw.c
+@@ -286,7 +286,7 @@ static bool ath9k_hw_read_revisions(struct ath_hw *ah)
+ 
+ 	srev = REG_READ(ah, AR_SREV);
+ 
+-	if (srev == -EIO) {
++	if (srev == -1) {
+ 		ath_err(ath9k_hw_common(ah),
+ 			"Failed to read SREV register");
+ 		return false;
+diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
+index a0cf78c418ac9..903de34028efb 100644
+--- a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
++++ b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
+@@ -633,8 +633,10 @@ int libipw_wx_set_encodeext(struct libipw_device *ieee,
+ 	}
+ 
+ 	if (ext->alg != IW_ENCODE_ALG_NONE) {
+-		memcpy(sec.keys[idx], ext->key, ext->key_len);
+-		sec.key_sizes[idx] = ext->key_len;
++		int key_len = clamp_val(ext->key_len, 0, SCM_KEY_LEN);
++
++		memcpy(sec.keys[idx], ext->key, key_len);
++		sec.key_sizes[idx] = key_len;
+ 		sec.flags |= (1 << idx);
+ 		if (ext->alg == IW_ENCODE_ALG_WEP) {
+ 			sec.encode_alg[idx] = SEC_ALG_WEP;
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+index 579bc81cc0ae2..4cd8c39cc3e95 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+ /*
+- * Copyright (C) 2018-2020 Intel Corporation
++ * Copyright (C) 2018-2021 Intel Corporation
+  */
+ #include <linux/firmware.h>
+ #include "iwl-drv.h"
+@@ -426,7 +426,8 @@ void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans)
+ 	const struct firmware *fw;
+ 	int res;
+ 
+-	if (!iwlwifi_mod_params.enable_ini)
++	if (!iwlwifi_mod_params.enable_ini ||
++	    trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_9000)
+ 		return;
+ 
+ 	res = firmware_request_nowarn(&fw, "iwl-debug-yoyo.bin", dev);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+index 8772b65c9dabb..2d58cb969918f 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+ /*
+  * Copyright (C) 2017 Intel Deutschland GmbH
+- * Copyright (C) 2018-2020 Intel Corporation
++ * Copyright (C) 2018-2021 Intel Corporation
+  */
+ #include "rs.h"
+ #include "fw-api.h"
+@@ -72,19 +72,15 @@ static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm,
+ 	bool vht_ena = vht_cap->vht_supported;
+ 	u16 flags = 0;
+ 
++	/* get STBC flags */
+ 	if (mvm->cfg->ht_params->stbc &&
+ 	    (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1)) {
+-		if (he_cap->has_he) {
+-			if (he_cap->he_cap_elem.phy_cap_info[2] &
+-			    IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ)
+-				flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
+-
+-			if (he_cap->he_cap_elem.phy_cap_info[7] &
+-			    IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ)
+-				flags |= IWL_TLC_MNG_CFG_FLAGS_HE_STBC_160MHZ_MSK;
+-		} else if ((ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) ||
+-			   (vht_ena &&
+-			    (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK)))
++		if (he_cap->has_he && he_cap->he_cap_elem.phy_cap_info[2] &
++				      IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ)
++			flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
++		else if (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK)
++			flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
++		else if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC)
+ 			flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
+ 	}
+ 
+diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
+index c9f8c056aa517..84b32a5f01ee8 100644
+--- a/drivers/net/wireless/marvell/mwl8k.c
++++ b/drivers/net/wireless/marvell/mwl8k.c
+@@ -1473,6 +1473,7 @@ static int mwl8k_txq_init(struct ieee80211_hw *hw, int index)
+ 	if (txq->skb == NULL) {
+ 		dma_free_coherent(&priv->pdev->dev, size, txq->txd,
+ 				  txq->txd_dma);
++		txq->txd = NULL;
+ 		return -ENOMEM;
+ 	}
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
+index 2f27c43ad76df..7196fa9047e6d 100644
+--- a/drivers/net/wireless/mediatek/mt76/dma.c
++++ b/drivers/net/wireless/mediatek/mt76/dma.c
+@@ -309,7 +309,7 @@ static int
+ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
+ 			  struct sk_buff *skb, u32 tx_info)
+ {
+-	struct mt76_queue_buf buf;
++	struct mt76_queue_buf buf = {};
+ 	dma_addr_t addr;
+ 
+ 	if (q->queued + 1 >= q->ndesc - 1)
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+index 59fdd0fc2ad4f..d73841480544a 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+@@ -690,7 +690,7 @@ mt7615_txp_skb_unmap_fw(struct mt76_dev *dev, struct mt7615_fw_txp *txp)
+ {
+ 	int i;
+ 
+-	for (i = 1; i < txp->nbuf; i++)
++	for (i = 0; i < txp->nbuf; i++)
+ 		dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
+ 				 le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
+ }
+@@ -966,6 +966,7 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
+ 	struct mt7615_dev *dev = phy->dev;
+ 	struct mt7615_rate_desc rd;
+ 	u32 w5, w27, addr;
++	u16 idx = sta->vif->mt76.omac_idx;
+ 
+ 	if (!mt76_is_mmio(&dev->mt76)) {
+ 		mt7615_mac_queue_rate_update(phy, sta, probe_rate, rates);
+@@ -1017,7 +1018,10 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
+ 
+ 	mt76_wr(dev, addr + 27 * 4, w27);
+ 
+-	mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */
++	idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx;
++	addr = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx);
++
++	mt76_set(dev, addr, MT_LPON_TCR_MODE); /* TSF read */
+ 	sta->rate_set_tsf = mt76_rr(dev, MT_LPON_UTTR0) & ~BIT(0);
+ 	sta->rate_set_tsf |= rd.rateset;
+ 
+@@ -1821,10 +1825,8 @@ mt7615_mac_update_mib_stats(struct mt7615_phy *phy)
+ 	int i, aggr;
+ 	u32 val, val2;
+ 
+-	memset(mib, 0, sizeof(*mib));
+-
+-	mib->fcs_err_cnt = mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
+-					  MT_MIB_SDR3_FCS_ERR_MASK);
++	mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
++					   MT_MIB_SDR3_FCS_ERR_MASK);
+ 
+ 	val = mt76_get_field(dev, MT_MIB_SDR14(ext_phy),
+ 			     MT_MIB_AMPDU_MPDU_COUNT);
+@@ -1837,24 +1839,16 @@ mt7615_mac_update_mib_stats(struct mt7615_phy *phy)
+ 	aggr = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
+ 	for (i = 0; i < 4; i++) {
+ 		val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i));
+-
+-		val2 = FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
+-		if (val2 > mib->ack_fail_cnt)
+-			mib->ack_fail_cnt = val2;
+-
+-		val2 = FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
+-		if (val2 > mib->ba_miss_cnt)
+-			mib->ba_miss_cnt = val2;
++		mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
++		mib->ack_fail_cnt += FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK,
++					       val);
+ 
+ 		val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i));
+-		val2 = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
+-		if (val2 > mib->rts_retries_cnt) {
+-			mib->rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
+-			mib->rts_retries_cnt = val2;
+-		}
++		mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
++		mib->rts_retries_cnt += FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK,
++						  val);
+ 
+ 		val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
+-
+ 		dev->mt76.aggr_stats[aggr++] += val & 0xffff;
+ 		dev->mt76.aggr_stats[aggr++] += val >> 16;
+ 	}
+@@ -1976,15 +1970,17 @@ void mt7615_dma_reset(struct mt7615_dev *dev)
+ 	mt76_clear(dev, MT_WPDMA_GLO_CFG,
+ 		   MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN |
+ 		   MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
++
+ 	usleep_range(1000, 2000);
+ 
+-	mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], true);
+ 	for (i = 0; i < __MT_TXQ_MAX; i++)
+ 		mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
+ 
+-	mt76_for_each_q_rx(&dev->mt76, i) {
++	for (i = 0; i < __MT_MCUQ_MAX; i++)
++		mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
++
++	mt76_for_each_q_rx(&dev->mt76, i)
+ 		mt76_queue_rx_reset(dev, i);
+-	}
+ 
+ 	mt76_set(dev, MT_WPDMA_GLO_CFG,
+ 		 MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN |
+@@ -2000,8 +1996,12 @@ void mt7615_tx_token_put(struct mt7615_dev *dev)
+ 	spin_lock_bh(&dev->token_lock);
+ 	idr_for_each_entry(&dev->token, txwi, id) {
+ 		mt7615_txp_skb_unmap(&dev->mt76, txwi);
+-		if (txwi->skb)
+-			dev_kfree_skb_any(txwi->skb);
++		if (txwi->skb) {
++			struct ieee80211_hw *hw;
++
++			hw = mt76_tx_status_get_hw(&dev->mt76, txwi->skb);
++			ieee80211_free_txskb(hw, txwi->skb);
++		}
+ 		mt76_put_txwi(&dev->mt76, txwi);
+ 	}
+ 	spin_unlock_bh(&dev->token_lock);
+@@ -2304,8 +2304,10 @@ void mt7615_coredump_work(struct work_struct *work)
+ 			break;
+ 
+ 		skb_pull(skb, sizeof(struct mt7615_mcu_rxd));
+-		if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ)
+-			break;
++		if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) {
++			dev_kfree_skb(skb);
++			continue;
++		}
+ 
+ 		memcpy(data, skb->data, skb->len);
+ 		data += skb->len;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+index 25faf486d2795..6107e827b3836 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+@@ -217,8 +217,6 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
+ 	ret = mt7615_mcu_add_dev_info(phy, vif, true);
+ 	if (ret)
+ 		goto out;
+-
+-	mt7615_mac_set_beacon_filter(phy, vif, true);
+ out:
+ 	mt7615_mutex_release(dev);
+ 
+@@ -244,7 +242,6 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw,
+ 
+ 	mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid);
+ 
+-	mt7615_mac_set_beacon_filter(phy, vif, false);
+ 	mt7615_mcu_add_dev_info(phy, vif, false);
+ 
+ 	rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
+@@ -544,6 +541,9 @@ static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
+ 	if (changed & BSS_CHANGED_ARP_FILTER)
+ 		mt7615_mcu_update_arp_filter(hw, vif, info);
+ 
++	if (changed & BSS_CHANGED_ASSOC)
++		mt7615_mac_set_beacon_filter(phy, vif, info->assoc);
++
+ 	mt7615_mutex_release(dev);
+ }
+ 
+@@ -803,26 +803,38 @@ mt7615_get_stats(struct ieee80211_hw *hw,
+ 	struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ 	struct mib_stats *mib = &phy->mib;
+ 
++	mt7615_mutex_acquire(phy->dev);
++
+ 	stats->dot11RTSSuccessCount = mib->rts_cnt;
+ 	stats->dot11RTSFailureCount = mib->rts_retries_cnt;
+ 	stats->dot11FCSErrorCount = mib->fcs_err_cnt;
+ 	stats->dot11ACKFailureCount = mib->ack_fail_cnt;
+ 
++	memset(mib, 0, sizeof(*mib));
++
++	mt7615_mutex_release(phy->dev);
++
+ 	return 0;
+ }
+ 
+ static u64
+ mt7615_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+ {
++	struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ 	struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ 	union {
+ 		u64 t64;
+ 		u32 t32[2];
+ 	} tsf;
++	u16 idx = mvif->mt76.omac_idx;
++	u32 reg;
++
++	idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx;
++	reg = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx);
+ 
+ 	mt7615_mutex_acquire(dev);
+ 
+-	mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */
++	mt76_set(dev, reg, MT_LPON_TCR_MODE); /* TSF read */
+ 	tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0);
+ 	tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1);
+ 
+@@ -835,18 +847,24 @@ static void
+ mt7615_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ 	       u64 timestamp)
+ {
++	struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ 	struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ 	union {
+ 		u64 t64;
+ 		u32 t32[2];
+ 	} tsf = { .t64 = timestamp, };
++	u16 idx = mvif->mt76.omac_idx;
++	u32 reg;
++
++	idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx;
++	reg = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx);
+ 
+ 	mt7615_mutex_acquire(dev);
+ 
+ 	mt76_wr(dev, MT_LPON_UTTR0, tsf.t32[0]);
+ 	mt76_wr(dev, MT_LPON_UTTR1, tsf.t32[1]);
+ 	/* TSF software overwrite */
+-	mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_WRITE);
++	mt76_set(dev, reg, MT_LPON_TCR_WRITE);
+ 
+ 	mt7615_mutex_release(dev);
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
+index 491841bc62912..4bc0c379c5793 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
+@@ -133,11 +133,11 @@ struct mt7615_vif {
+ };
+ 
+ struct mib_stats {
+-	u16 ack_fail_cnt;
+-	u16 fcs_err_cnt;
+-	u16 rts_cnt;
+-	u16 rts_retries_cnt;
+-	u16 ba_miss_cnt;
++	u32 ack_fail_cnt;
++	u32 fcs_err_cnt;
++	u32 rts_cnt;
++	u32 rts_retries_cnt;
++	u32 ba_miss_cnt;
+ 	unsigned long aggr_per;
+ };
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
+index 72395925ddee4..15b417d6d8895 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
+@@ -163,10 +163,9 @@ void mt7615_unregister_device(struct mt7615_dev *dev)
+ 	mt76_unregister_device(&dev->mt76);
+ 	if (mcu_running)
+ 		mt7615_mcu_exit(dev);
+-	mt7615_dma_cleanup(dev);
+ 
+ 	mt7615_tx_token_put(dev);
+-
++	mt7615_dma_cleanup(dev);
+ 	tasklet_disable(&dev->irq_tasklet);
+ 
+ 	mt76_free_device(&dev->mt76);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
+index 6e5db015b32cd..6e4710d3ddd34 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
+@@ -447,9 +447,10 @@ enum mt7615_reg_base {
+ 
+ #define MT_LPON(_n)			((dev)->reg_map[MT_LPON_BASE] + (_n))
+ 
+-#define MT_LPON_T0CR			MT_LPON(0x010)
+-#define MT_LPON_T0CR_MODE		GENMASK(1, 0)
+-#define MT_LPON_T0CR_WRITE		BIT(0)
++#define MT_LPON_TCR0(_n)		MT_LPON(0x010 + ((_n) * 4))
++#define MT_LPON_TCR2(_n)		MT_LPON(0x0f8 + ((_n) - 2) * 4)
++#define MT_LPON_TCR_MODE		GENMASK(1, 0)
++#define MT_LPON_TCR_WRITE		BIT(0)
+ 
+ #define MT_LPON_UTTR0			MT_LPON(0x018)
+ #define MT_LPON_UTTR1			MT_LPON(0x01c)
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
+index 9fb506f2ace6d..4393dd21ebbbb 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
+@@ -218,12 +218,15 @@ static int mt7663s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
+ 	int qid, err, nframes = 0, len = 0, pse_sz = 0, ple_sz = 0;
+ 	bool mcu = q == dev->q_mcu[MT_MCUQ_WM];
+ 	struct mt76_sdio *sdio = &dev->sdio;
++	u8 pad;
+ 
+ 	qid = mcu ? ARRAY_SIZE(sdio->xmit_buf) - 1 : q->qid;
+ 	while (q->first != q->head) {
+ 		struct mt76_queue_entry *e = &q->entry[q->first];
+ 		struct sk_buff *iter;
+ 
++		smp_rmb();
++
+ 		if (!test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) {
+ 			__skb_put_zero(e->skb, 4);
+ 			err = __mt7663s_xmit_queue(dev, e->skb->data,
+@@ -234,7 +237,8 @@ static int mt7663s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
+ 			goto next;
+ 		}
+ 
+-		if (len + e->skb->len + 4 > MT76S_XMIT_BUF_SZ)
++		pad = roundup(e->skb->len, 4) - e->skb->len;
++		if (len + e->skb->len + pad + 4 > MT76S_XMIT_BUF_SZ)
+ 			break;
+ 
+ 		if (mt7663s_tx_pick_quota(sdio, mcu, e->buf_sz, &pse_sz,
+@@ -252,6 +256,11 @@ static int mt7663s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
+ 			len += iter->len;
+ 			nframes++;
+ 		}
++
++		if (unlikely(pad)) {
++			memset(sdio->xmit_buf[qid] + len, 0, pad);
++			len += pad;
++		}
+ next:
+ 		q->first = (q->first + 1) % q->ndesc;
+ 		e->done = true;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
+index 203256862dfdd..f8d3673c2cae8 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
+@@ -67,6 +67,7 @@ static int mt7663_usb_sdio_set_rates(struct mt7615_dev *dev,
+ 	struct mt7615_rate_desc *rate = &wrd->rate;
+ 	struct mt7615_sta *sta = wrd->sta;
+ 	u32 w5, w27, addr, val;
++	u16 idx;
+ 
+ 	lockdep_assert_held(&dev->mt76.mutex);
+ 
+@@ -118,7 +119,11 @@ static int mt7663_usb_sdio_set_rates(struct mt7615_dev *dev,
+ 
+ 	sta->rate_probe = sta->rateset[rate->rateset].probe_rate.idx != -1;
+ 
+-	mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */
++	idx = sta->vif->mt76.omac_idx;
++	idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx;
++	addr = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx);
++
++	mt76_set(dev, addr, MT_LPON_TCR_MODE); /* TSF read */
+ 	val = mt76_rr(dev, MT_LPON_UTTR0);
+ 	sta->rate_set_tsf = (val & ~BIT(0)) | rate->rateset;
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+index 6cbccfb05f8b5..76a61e8b7fb96 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+@@ -946,6 +946,7 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
+ 
+ 	switch (vif->type) {
+ 	case NL80211_IFTYPE_MESH_POINT:
++	case NL80211_IFTYPE_MONITOR:
+ 	case NL80211_IFTYPE_AP:
+ 		basic_req.basic.conn_type = cpu_to_le32(CONNECTION_INFRA_AP);
+ 		break;
+@@ -1195,6 +1196,7 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
+ 			.center_chan = ieee80211_frequency_to_channel(freq1),
+ 			.center_chan2 = ieee80211_frequency_to_channel(freq2),
+ 			.tx_streams = hweight8(phy->antenna_mask),
++			.ht_op_info = 4, /* set HT 40M allowed */
+ 			.rx_streams = phy->chainmask,
+ 			.short_st = true,
+ 		},
+@@ -1287,6 +1289,7 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
+ 	case NL80211_CHAN_WIDTH_20:
+ 	default:
+ 		rlm_req.rlm.bw = CMD_CBW_20MHZ;
++		rlm_req.rlm.ht_op_info = 0;
+ 		break;
+ 	}
+ 
+@@ -1306,7 +1309,7 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
+ {
+ 	struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ 	struct cfg80211_scan_request *sreq = &scan_req->req;
+-	int n_ssids = 0, err, i, duration = MT76_CONNAC_SCAN_CHANNEL_TIME;
++	int n_ssids = 0, err, i, duration;
+ 	int ext_channels_num = max_t(int, sreq->n_channels - 32, 0);
+ 	struct ieee80211_channel **scan_list = sreq->channels;
+ 	struct mt76_dev *mdev = phy->dev;
+@@ -1343,6 +1346,7 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
+ 	req->ssid_type_ext = n_ssids ? BIT(0) : 0;
+ 	req->ssids_num = n_ssids;
+ 
++	duration = is_mt7921(phy->dev) ? 0 : MT76_CONNAC_SCAN_CHANNEL_TIME;
+ 	/* increase channel time for passive scan */
+ 	if (!sreq->n_ssids)
+ 		duration *= 2;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
+index 77dcd71e49a5e..2f706620686e8 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
+@@ -124,7 +124,7 @@ mt7915_ampdu_stat_read_phy(struct mt7915_phy *phy,
+ 		range[i] = mt76_rr(dev, MT_MIB_ARNG(ext_phy, i));
+ 
+ 	for (i = 0; i < ARRAY_SIZE(bound); i++)
+-		bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i) + 1;
++		bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i % 4) + 1;
+ 
+ 	seq_printf(file, "\nPhy %d\n", ext_phy);
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+index ad4e5b95158bc..894016fdcf070 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+@@ -675,9 +675,8 @@ void mt7915_unregister_device(struct mt7915_dev *dev)
+ 	mt7915_unregister_ext_phy(dev);
+ 	mt76_unregister_device(&dev->mt76);
+ 	mt7915_mcu_exit(dev);
+-	mt7915_dma_cleanup(dev);
+-
+ 	mt7915_tx_token_put(dev);
++	mt7915_dma_cleanup(dev);
+ 
+ 	mt76_free_device(&dev->mt76);
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+index e5a258958ac91..819670767521f 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+@@ -1091,7 +1091,7 @@ void mt7915_txp_skb_unmap(struct mt76_dev *dev,
+ 	int i;
+ 
+ 	txp = mt7915_txwi_to_txp(dev, t);
+-	for (i = 1; i < txp->nbuf; i++)
++	for (i = 0; i < txp->nbuf; i++)
+ 		dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
+ 				 le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
+ }
+@@ -1470,9 +1470,8 @@ mt7915_update_beacons(struct mt7915_dev *dev)
+ }
+ 
+ static void
+-mt7915_dma_reset(struct mt7915_phy *phy)
++mt7915_dma_reset(struct mt7915_dev *dev)
+ {
+-	struct mt7915_dev *dev = phy->dev;
+ 	struct mt76_phy *mphy_ext = dev->mt76.phy2;
+ 	u32 hif1_ofs = MT_WFDMA1_PCIE1_BASE - MT_WFDMA1_BASE;
+ 	int i;
+@@ -1489,18 +1488,20 @@ mt7915_dma_reset(struct mt7915_phy *phy)
+ 			   (MT_WFDMA1_GLO_CFG_TX_DMA_EN |
+ 			    MT_WFDMA1_GLO_CFG_RX_DMA_EN));
+ 	}
++
+ 	usleep_range(1000, 2000);
+ 
+-	mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WA], true);
+ 	for (i = 0; i < __MT_TXQ_MAX; i++) {
+-		mt76_queue_tx_cleanup(dev, phy->mt76->q_tx[i], true);
++		mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
+ 		if (mphy_ext)
+ 			mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[i], true);
+ 	}
+ 
+-	mt76_for_each_q_rx(&dev->mt76, i) {
++	for (i = 0; i < __MT_MCUQ_MAX; i++)
++		mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
++
++	mt76_for_each_q_rx(&dev->mt76, i)
+ 		mt76_queue_rx_reset(dev, i);
+-	}
+ 
+ 	/* re-init prefetch settings after reset */
+ 	mt7915_dma_prefetch(dev);
+@@ -1584,7 +1585,7 @@ void mt7915_mac_reset_work(struct work_struct *work)
+ 	idr_init(&dev->token);
+ 
+ 	if (mt7915_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
+-		mt7915_dma_reset(&dev->phy);
++		mt7915_dma_reset(dev);
+ 
+ 		mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT);
+ 		mt7915_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
+@@ -1633,39 +1634,30 @@ mt7915_mac_update_mib_stats(struct mt7915_phy *phy)
+ 	bool ext_phy = phy != &dev->phy;
+ 	int i, aggr0, aggr1;
+ 
+-	memset(mib, 0, sizeof(*mib));
+-
+-	mib->fcs_err_cnt = mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
+-					  MT_MIB_SDR3_FCS_ERR_MASK);
++	mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
++					   MT_MIB_SDR3_FCS_ERR_MASK);
+ 
+ 	aggr0 = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
+ 	for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
+-		u32 val, val2;
++		u32 val;
+ 
+ 		val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i));
+-
+-		val2 = FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
+-		if (val2 > mib->ack_fail_cnt)
+-			mib->ack_fail_cnt = val2;
+-
+-		val2 = FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
+-		if (val2 > mib->ba_miss_cnt)
+-			mib->ba_miss_cnt = val2;
++		mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
++		mib->ack_fail_cnt +=
++			FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
+ 
+ 		val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i));
+-		val2 = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
+-		if (val2 > mib->rts_retries_cnt) {
+-			mib->rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
+-			mib->rts_retries_cnt = val2;
+-		}
++		mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
++		mib->rts_retries_cnt +=
++			FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
+ 
+ 		val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
+-		val2 = mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i));
+-
+ 		dev->mt76.aggr_stats[aggr0++] += val & 0xffff;
+ 		dev->mt76.aggr_stats[aggr0++] += val >> 16;
+-		dev->mt76.aggr_stats[aggr1++] += val2 & 0xffff;
+-		dev->mt76.aggr_stats[aggr1++] += val2 >> 16;
++
++		val = mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i));
++		dev->mt76.aggr_stats[aggr1++] += val & 0xffff;
++		dev->mt76.aggr_stats[aggr1++] += val >> 16;
+ 	}
+ }
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+index d4969b2e1ffb0..98f4b49642a8c 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+@@ -717,13 +717,19 @@ mt7915_get_stats(struct ieee80211_hw *hw,
+ 		 struct ieee80211_low_level_stats *stats)
+ {
+ 	struct mt7915_phy *phy = mt7915_hw_phy(hw);
++	struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ 	struct mib_stats *mib = &phy->mib;
+ 
++	mutex_lock(&dev->mt76.mutex);
+ 	stats->dot11RTSSuccessCount = mib->rts_cnt;
+ 	stats->dot11RTSFailureCount = mib->rts_retries_cnt;
+ 	stats->dot11FCSErrorCount = mib->fcs_err_cnt;
+ 	stats->dot11ACKFailureCount = mib->ack_fail_cnt;
+ 
++	memset(mib, 0, sizeof(*mib));
++
++	mutex_unlock(&dev->mt76.mutex);
++
+ 	return 0;
+ }
+ 
+@@ -833,9 +839,12 @@ static void mt7915_sta_statistics(struct ieee80211_hw *hw,
+ 	struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ 	struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ 	struct mt7915_sta_stats *stats = &msta->stats;
++	struct rate_info rxrate = {};
+ 
+-	if (mt7915_mcu_get_rx_rate(phy, vif, sta, &sinfo->rxrate) == 0)
++	if (!mt7915_mcu_get_rx_rate(phy, vif, sta, &rxrate)) {
++		sinfo->rxrate = rxrate;
+ 		sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE);
++	}
+ 
+ 	if (!stats->tx_rate.legacy && !stats->tx_rate.flags)
+ 		return;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+index 195929242b72f..443cb09ae7cbd 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+@@ -351,54 +351,62 @@ mt7915_mcu_rx_radar_detected(struct mt7915_dev *dev, struct sk_buff *skb)
+ 	dev->hw_pattern++;
+ }
+ 
+-static void
++static int
+ mt7915_mcu_tx_rate_parse(struct mt76_phy *mphy, struct mt7915_mcu_ra_info *ra,
+ 			 struct rate_info *rate, u16 r)
+ {
+ 	struct ieee80211_supported_band *sband;
+ 	u16 ru_idx = le16_to_cpu(ra->ru_idx);
+-	u16 flags = 0;
++	bool cck = false;
+ 
+ 	rate->mcs = FIELD_GET(MT_RA_RATE_MCS, r);
+ 	rate->nss = FIELD_GET(MT_RA_RATE_NSS, r) + 1;
+ 
+ 	switch (FIELD_GET(MT_RA_RATE_TX_MODE, r)) {
+ 	case MT_PHY_TYPE_CCK:
++		cck = true;
++		fallthrough;
+ 	case MT_PHY_TYPE_OFDM:
+ 		if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
+ 			sband = &mphy->sband_5g.sband;
+ 		else
+ 			sband = &mphy->sband_2g.sband;
+ 
++		rate->mcs = mt76_get_rate(mphy->dev, sband, rate->mcs, cck);
+ 		rate->legacy = sband->bitrates[rate->mcs].bitrate;
+ 		break;
+ 	case MT_PHY_TYPE_HT:
+ 	case MT_PHY_TYPE_HT_GF:
+ 		rate->mcs += (rate->nss - 1) * 8;
+-		flags |= RATE_INFO_FLAGS_MCS;
++		if (rate->mcs > 31)
++			return -EINVAL;
+ 
++		rate->flags = RATE_INFO_FLAGS_MCS;
+ 		if (ra->gi)
+-			flags |= RATE_INFO_FLAGS_SHORT_GI;
++			rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
+ 		break;
+ 	case MT_PHY_TYPE_VHT:
+-		flags |= RATE_INFO_FLAGS_VHT_MCS;
++		if (rate->mcs > 9)
++			return -EINVAL;
+ 
++		rate->flags = RATE_INFO_FLAGS_VHT_MCS;
+ 		if (ra->gi)
+-			flags |= RATE_INFO_FLAGS_SHORT_GI;
++			rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
+ 		break;
+ 	case MT_PHY_TYPE_HE_SU:
+ 	case MT_PHY_TYPE_HE_EXT_SU:
+ 	case MT_PHY_TYPE_HE_TB:
+ 	case MT_PHY_TYPE_HE_MU:
++		if (ra->gi > NL80211_RATE_INFO_HE_GI_3_2 || rate->mcs > 11)
++			return -EINVAL;
++
+ 		rate->he_gi = ra->gi;
+ 		rate->he_dcm = FIELD_GET(MT_RA_RATE_DCM_EN, r);
+-
+-		flags |= RATE_INFO_FLAGS_HE_MCS;
++		rate->flags = RATE_INFO_FLAGS_HE_MCS;
+ 		break;
+ 	default:
+-		break;
++		return -EINVAL;
+ 	}
+-	rate->flags = flags;
+ 
+ 	if (ru_idx) {
+ 		switch (ru_idx) {
+@@ -435,6 +443,8 @@ mt7915_mcu_tx_rate_parse(struct mt76_phy *mphy, struct mt7915_mcu_ra_info *ra,
+ 			break;
+ 		}
+ 	}
++
++	return 0;
+ }
+ 
+ static void
+@@ -465,12 +475,12 @@ mt7915_mcu_tx_rate_report(struct mt7915_dev *dev, struct sk_buff *skb)
+ 		mphy = dev->mt76.phy2;
+ 
+ 	/* current rate */
+-	mt7915_mcu_tx_rate_parse(mphy, ra, &rate, curr);
+-	stats->tx_rate = rate;
++	if (!mt7915_mcu_tx_rate_parse(mphy, ra, &rate, curr))
++		stats->tx_rate = rate;
+ 
+ 	/* probing rate */
+-	mt7915_mcu_tx_rate_parse(mphy, ra, &prob_rate, probe);
+-	stats->prob_rate = prob_rate;
++	if (!mt7915_mcu_tx_rate_parse(mphy, ra, &prob_rate, probe))
++		stats->prob_rate = prob_rate;
+ 
+ 	if (attempts) {
+ 		u16 success = le16_to_cpu(ra->success);
+@@ -3501,9 +3511,8 @@ int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
+ 	struct ieee80211_supported_band *sband;
+ 	struct mt7915_mcu_phy_rx_info *res;
+ 	struct sk_buff *skb;
+-	u16 flags = 0;
+ 	int ret;
+-	int i;
++	bool cck = false;
+ 
+ 	ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD(PHY_STAT_INFO),
+ 					&req, sizeof(req), true, &skb);
+@@ -3517,48 +3526,53 @@ int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
+ 
+ 	switch (res->mode) {
+ 	case MT_PHY_TYPE_CCK:
++		cck = true;
++		fallthrough;
+ 	case MT_PHY_TYPE_OFDM:
+ 		if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
+ 			sband = &mphy->sband_5g.sband;
+ 		else
+ 			sband = &mphy->sband_2g.sband;
+ 
+-		for (i = 0; i < sband->n_bitrates; i++) {
+-			if (rate->mcs != (sband->bitrates[i].hw_value & 0xf))
+-				continue;
+-
+-			rate->legacy = sband->bitrates[i].bitrate;
+-			break;
+-		}
++		rate->mcs = mt76_get_rate(&dev->mt76, sband, rate->mcs, cck);
++		rate->legacy = sband->bitrates[rate->mcs].bitrate;
+ 		break;
+ 	case MT_PHY_TYPE_HT:
+ 	case MT_PHY_TYPE_HT_GF:
+-		if (rate->mcs > 31)
+-			return -EINVAL;
+-
+-		flags |= RATE_INFO_FLAGS_MCS;
++		if (rate->mcs > 31) {
++			ret = -EINVAL;
++			goto out;
++		}
+ 
++		rate->flags = RATE_INFO_FLAGS_MCS;
+ 		if (res->gi)
+-			flags |= RATE_INFO_FLAGS_SHORT_GI;
++			rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
+ 		break;
+ 	case MT_PHY_TYPE_VHT:
+-		flags |= RATE_INFO_FLAGS_VHT_MCS;
++		if (rate->mcs > 9) {
++			ret = -EINVAL;
++			goto out;
++		}
+ 
++		rate->flags = RATE_INFO_FLAGS_VHT_MCS;
+ 		if (res->gi)
+-			flags |= RATE_INFO_FLAGS_SHORT_GI;
++			rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
+ 		break;
+ 	case MT_PHY_TYPE_HE_SU:
+ 	case MT_PHY_TYPE_HE_EXT_SU:
+ 	case MT_PHY_TYPE_HE_TB:
+ 	case MT_PHY_TYPE_HE_MU:
++		if (res->gi > NL80211_RATE_INFO_HE_GI_3_2 || rate->mcs > 11) {
++			ret = -EINVAL;
++			goto out;
++		}
+ 		rate->he_gi = res->gi;
+-
+-		flags |= RATE_INFO_FLAGS_HE_MCS;
++		rate->flags = RATE_INFO_FLAGS_HE_MCS;
+ 		break;
+ 	default:
+-		break;
++		ret = -EINVAL;
++		goto out;
+ 	}
+-	rate->flags = flags;
+ 
+ 	switch (res->bw) {
+ 	case IEEE80211_STA_RX_BW_160:
+@@ -3575,7 +3589,8 @@ int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
+ 		break;
+ 	}
+ 
++out:
+ 	dev_kfree_skb(skb);
+ 
+-	return 0;
++	return ret;
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+index 5c7eefdf2013c..1160d1bf8a7c3 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+@@ -108,11 +108,11 @@ struct mt7915_vif {
+ };
+ 
+ struct mib_stats {
+-	u16 ack_fail_cnt;
+-	u16 fcs_err_cnt;
+-	u16 rts_cnt;
+-	u16 rts_retries_cnt;
+-	u16 ba_miss_cnt;
++	u32 ack_fail_cnt;
++	u32 fcs_err_cnt;
++	u32 rts_cnt;
++	u32 rts_retries_cnt;
++	u32 ba_miss_cnt;
+ };
+ 
+ struct mt7915_hif {
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
+index 0dc8e25e18e4a..87a7ea12f3b3c 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
+@@ -9,10 +9,13 @@ mt7921_fw_debug_set(void *data, u64 val)
+ {
+ 	struct mt7921_dev *dev = data;
+ 
+-	dev->fw_debug = (u8)val;
++	mt7921_mutex_acquire(dev);
+ 
++	dev->fw_debug = (u8)val;
+ 	mt7921_mcu_fw_log_2_host(dev, dev->fw_debug);
+ 
++	mt7921_mutex_release(dev);
++
+ 	return 0;
+ }
+ 
+@@ -44,14 +47,13 @@ mt7921_ampdu_stat_read_phy(struct mt7921_phy *phy,
+ 		range[i] = mt76_rr(dev, MT_MIB_ARNG(0, i));
+ 
+ 	for (i = 0; i < ARRAY_SIZE(bound); i++)
+-		bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i) + 1;
++		bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i % 4) + 1;
+ 
+ 	seq_printf(file, "\nPhy0\n");
+ 
+ 	seq_printf(file, "Length: %8d | ", bound[0]);
+ 	for (i = 0; i < ARRAY_SIZE(bound) - 1; i++)
+-		seq_printf(file, "%3d -%3d | ",
+-			   bound[i] + 1, bound[i + 1]);
++		seq_printf(file, "%3d  %3d | ", bound[i] + 1, bound[i + 1]);
+ 
+ 	seq_puts(file, "\nCount:  ");
+ 	for (i = 0; i < ARRAY_SIZE(bound); i++)
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+index 3f9097481a5ef..a6d2a25b3495f 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+@@ -400,7 +400,9 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
+ 
+ 	/* RXD Group 3 - P-RXV */
+ 	if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
+-		u32 v0, v1, v2;
++		u8 stbc, gi;
++		u32 v0, v1;
++		bool cck;
+ 
+ 		rxv = rxd;
+ 		rxd += 2;
+@@ -409,7 +411,6 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
+ 
+ 		v0 = le32_to_cpu(rxv[0]);
+ 		v1 = le32_to_cpu(rxv[1]);
+-		v2 = le32_to_cpu(rxv[2]);
+ 
+ 		if (v0 & MT_PRXV_HT_AD_CODE)
+ 			status->enc_flags |= RX_ENC_FLAG_LDPC;
+@@ -429,87 +430,87 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
+ 					     status->chain_signal[i]);
+ 		}
+ 
+-		/* RXD Group 5 - C-RXV */
+-		if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
+-			u8 stbc = FIELD_GET(MT_CRXV_HT_STBC, v2);
+-			u8 gi = FIELD_GET(MT_CRXV_HT_SHORT_GI, v2);
+-			bool cck = false;
++		stbc = FIELD_GET(MT_PRXV_STBC, v0);
++		gi = FIELD_GET(MT_PRXV_SGI, v0);
++		cck = false;
+ 
+-			rxd += 18;
+-			if ((u8 *)rxd - skb->data >= skb->len)
+-				return -EINVAL;
++		idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0);
++		mode = FIELD_GET(MT_PRXV_TX_MODE, v0);
+ 
+-			idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0);
+-			mode = FIELD_GET(MT_CRXV_TX_MODE, v2);
+-
+-			switch (mode) {
+-			case MT_PHY_TYPE_CCK:
+-				cck = true;
+-				fallthrough;
+-			case MT_PHY_TYPE_OFDM:
+-				i = mt76_get_rate(&dev->mt76, sband, i, cck);
+-				break;
+-			case MT_PHY_TYPE_HT_GF:
+-			case MT_PHY_TYPE_HT:
+-				status->encoding = RX_ENC_HT;
+-				if (i > 31)
+-					return -EINVAL;
+-				break;
+-			case MT_PHY_TYPE_VHT:
+-				status->nss =
+-					FIELD_GET(MT_PRXV_NSTS, v0) + 1;
+-				status->encoding = RX_ENC_VHT;
+-				if (i > 9)
+-					return -EINVAL;
+-				break;
+-			case MT_PHY_TYPE_HE_MU:
+-				status->flag |= RX_FLAG_RADIOTAP_HE_MU;
+-				fallthrough;
+-			case MT_PHY_TYPE_HE_SU:
+-			case MT_PHY_TYPE_HE_EXT_SU:
+-			case MT_PHY_TYPE_HE_TB:
+-				status->nss =
+-					FIELD_GET(MT_PRXV_NSTS, v0) + 1;
+-				status->encoding = RX_ENC_HE;
+-				status->flag |= RX_FLAG_RADIOTAP_HE;
+-				i &= GENMASK(3, 0);
+-
+-				if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
+-					status->he_gi = gi;
+-
+-				status->he_dcm = !!(idx & MT_PRXV_TX_DCM);
+-				break;
+-			default:
++		switch (mode) {
++		case MT_PHY_TYPE_CCK:
++			cck = true;
++			fallthrough;
++		case MT_PHY_TYPE_OFDM:
++			i = mt76_get_rate(&dev->mt76, sband, i, cck);
++			break;
++		case MT_PHY_TYPE_HT_GF:
++		case MT_PHY_TYPE_HT:
++			status->encoding = RX_ENC_HT;
++			if (i > 31)
+ 				return -EINVAL;
+-			}
+-			status->rate_idx = i;
+-
+-			switch (FIELD_GET(MT_CRXV_FRAME_MODE, v2)) {
+-			case IEEE80211_STA_RX_BW_20:
+-				break;
+-			case IEEE80211_STA_RX_BW_40:
+-				if (mode & MT_PHY_TYPE_HE_EXT_SU &&
+-				    (idx & MT_PRXV_TX_ER_SU_106T)) {
+-					status->bw = RATE_INFO_BW_HE_RU;
+-					status->he_ru =
+-						NL80211_RATE_INFO_HE_RU_ALLOC_106;
+-				} else {
+-					status->bw = RATE_INFO_BW_40;
+-				}
+-				break;
+-			case IEEE80211_STA_RX_BW_80:
+-				status->bw = RATE_INFO_BW_80;
+-				break;
+-			case IEEE80211_STA_RX_BW_160:
+-				status->bw = RATE_INFO_BW_160;
+-				break;
+-			default:
++			break;
++		case MT_PHY_TYPE_VHT:
++			status->nss =
++				FIELD_GET(MT_PRXV_NSTS, v0) + 1;
++			status->encoding = RX_ENC_VHT;
++			if (i > 9)
+ 				return -EINVAL;
++			break;
++		case MT_PHY_TYPE_HE_MU:
++			status->flag |= RX_FLAG_RADIOTAP_HE_MU;
++			fallthrough;
++		case MT_PHY_TYPE_HE_SU:
++		case MT_PHY_TYPE_HE_EXT_SU:
++		case MT_PHY_TYPE_HE_TB:
++			status->nss =
++				FIELD_GET(MT_PRXV_NSTS, v0) + 1;
++			status->encoding = RX_ENC_HE;
++			status->flag |= RX_FLAG_RADIOTAP_HE;
++			i &= GENMASK(3, 0);
++
++			if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
++				status->he_gi = gi;
++
++			status->he_dcm = !!(idx & MT_PRXV_TX_DCM);
++			break;
++		default:
++			return -EINVAL;
++		}
++
++		status->rate_idx = i;
++
++		switch (FIELD_GET(MT_PRXV_FRAME_MODE, v0)) {
++		case IEEE80211_STA_RX_BW_20:
++			break;
++		case IEEE80211_STA_RX_BW_40:
++			if (mode & MT_PHY_TYPE_HE_EXT_SU &&
++			    (idx & MT_PRXV_TX_ER_SU_106T)) {
++				status->bw = RATE_INFO_BW_HE_RU;
++				status->he_ru =
++					NL80211_RATE_INFO_HE_RU_ALLOC_106;
++			} else {
++				status->bw = RATE_INFO_BW_40;
+ 			}
++			break;
++		case IEEE80211_STA_RX_BW_80:
++			status->bw = RATE_INFO_BW_80;
++			break;
++		case IEEE80211_STA_RX_BW_160:
++			status->bw = RATE_INFO_BW_160;
++			break;
++		default:
++			return -EINVAL;
++		}
+ 
+-			status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
+-			if (mode < MT_PHY_TYPE_HE_SU && gi)
+-				status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
++		status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
++		if (mode < MT_PHY_TYPE_HE_SU && gi)
++			status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
++
++		if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
++			rxd += 18;
++			if ((u8 *)rxd - skb->data >= skb->len)
++				return -EINVAL;
+ 		}
+ 	}
+ 
+@@ -1317,31 +1318,20 @@ mt7921_mac_update_mib_stats(struct mt7921_phy *phy)
+ 	struct mib_stats *mib = &phy->mib;
+ 	int i, aggr0 = 0, aggr1;
+ 
+-	memset(mib, 0, sizeof(*mib));
+-
+-	mib->fcs_err_cnt = mt76_get_field(dev, MT_MIB_SDR3(0),
+-					  MT_MIB_SDR3_FCS_ERR_MASK);
++	mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(0),
++					   MT_MIB_SDR3_FCS_ERR_MASK);
++	mib->ack_fail_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR3(0),
++					    MT_MIB_ACK_FAIL_COUNT_MASK);
++	mib->ba_miss_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR2(0),
++					   MT_MIB_BA_FAIL_COUNT_MASK);
++	mib->rts_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR0(0),
++				       MT_MIB_RTS_COUNT_MASK);
++	mib->rts_retries_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR1(0),
++					       MT_MIB_RTS_FAIL_COUNT_MASK);
+ 
+ 	for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
+ 		u32 val, val2;
+ 
+-		val = mt76_rr(dev, MT_MIB_MB_SDR1(0, i));
+-
+-		val2 = FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
+-		if (val2 > mib->ack_fail_cnt)
+-			mib->ack_fail_cnt = val2;
+-
+-		val2 = FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
+-		if (val2 > mib->ba_miss_cnt)
+-			mib->ba_miss_cnt = val2;
+-
+-		val = mt76_rr(dev, MT_MIB_MB_SDR0(0, i));
+-		val2 = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
+-		if (val2 > mib->rts_retries_cnt) {
+-			mib->rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
+-			mib->rts_retries_cnt = val2;
+-		}
+-
+ 		val = mt76_rr(dev, MT_TX_AGG_CNT(0, i));
+ 		val2 = mt76_rr(dev, MT_TX_AGG_CNT2(0, i));
+ 
+@@ -1503,8 +1493,10 @@ void mt7921_coredump_work(struct work_struct *work)
+ 			break;
+ 
+ 		skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
+-		if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ)
+-			break;
++		if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) {
++			dev_kfree_skb(skb);
++			continue;
++		}
+ 
+ 		memcpy(data, skb->data, skb->len);
+ 		data += skb->len;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.h b/drivers/net/wireless/mediatek/mt76/mt7921/mac.h
+index a0c1fa0f20e46..109c8849d106a 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.h
+@@ -97,18 +97,24 @@ enum rx_pkt_type {
+ #define MT_RXD3_NORMAL_PF_MODE		BIT(29)
+ #define MT_RXD3_NORMAL_PF_STS		GENMASK(31, 30)
+ 
+-/* P-RXV */
++/* P-RXV DW0 */
+ #define MT_PRXV_TX_RATE			GENMASK(6, 0)
+ #define MT_PRXV_TX_DCM			BIT(4)
+ #define MT_PRXV_TX_ER_SU_106T		BIT(5)
+ #define MT_PRXV_NSTS			GENMASK(9, 7)
+ #define MT_PRXV_HT_AD_CODE		BIT(11)
++#define MT_PRXV_FRAME_MODE		GENMASK(14, 12)
++#define MT_PRXV_SGI			GENMASK(16, 15)
++#define MT_PRXV_STBC			GENMASK(23, 22)
++#define MT_PRXV_TX_MODE			GENMASK(27, 24)
+ #define MT_PRXV_HE_RU_ALLOC_L		GENMASK(31, 28)
+-#define MT_PRXV_HE_RU_ALLOC_H		GENMASK(3, 0)
++
++/* P-RXV DW1 */
+ #define MT_PRXV_RCPI3			GENMASK(31, 24)
+ #define MT_PRXV_RCPI2			GENMASK(23, 16)
+ #define MT_PRXV_RCPI1			GENMASK(15, 8)
+ #define MT_PRXV_RCPI0			GENMASK(7, 0)
++#define MT_PRXV_HE_RU_ALLOC_H		GENMASK(3, 0)
+ 
+ /* C-RXV */
+ #define MT_CRXV_HT_STBC			GENMASK(1, 0)
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+index 729f6c42cddee..cd9fd0e24e3e6 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+@@ -348,6 +348,7 @@ static void mt7921_remove_interface(struct ieee80211_hw *hw,
+ 	if (vif == phy->monitor_vif)
+ 		phy->monitor_vif = NULL;
+ 
++	mt7921_mutex_acquire(dev);
+ 	mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid);
+ 
+ 	if (dev->pm.enable) {
+@@ -360,7 +361,6 @@ static void mt7921_remove_interface(struct ieee80211_hw *hw,
+ 
+ 	rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
+ 
+-	mt7921_mutex_acquire(dev);
+ 	dev->mt76.vif_mask &= ~BIT(mvif->mt76.idx);
+ 	phy->omac_mask &= ~BIT_ULL(mvif->mt76.omac_idx);
+ 	mt7921_mutex_release(dev);
+@@ -587,6 +587,9 @@ static void mt7921_bss_info_changed(struct ieee80211_hw *hw,
+ 	if (changed & BSS_CHANGED_PS)
+ 		mt7921_mcu_uni_bss_ps(dev, vif);
+ 
++	if (changed & BSS_CHANGED_ARP_FILTER)
++		mt7921_mcu_update_arp_filter(hw, vif, info);
++
+ 	mt7921_mutex_release(dev);
+ }
+ 
+@@ -814,11 +817,17 @@ mt7921_get_stats(struct ieee80211_hw *hw,
+ 	struct mt7921_phy *phy = mt7921_hw_phy(hw);
+ 	struct mib_stats *mib = &phy->mib;
+ 
++	mt7921_mutex_acquire(phy->dev);
++
+ 	stats->dot11RTSSuccessCount = mib->rts_cnt;
+ 	stats->dot11RTSFailureCount = mib->rts_retries_cnt;
+ 	stats->dot11FCSErrorCount = mib->fcs_err_cnt;
+ 	stats->dot11ACKFailureCount = mib->ack_fail_cnt;
+ 
++	memset(mib, 0, sizeof(*mib));
++
++	mt7921_mutex_release(phy->dev);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+index b5cc72e7e81c3..62afbad77596b 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+@@ -1304,3 +1304,47 @@ mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+ 		mt76_clear(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON);
+ 	}
+ }
++
++int mt7921_mcu_update_arp_filter(struct ieee80211_hw *hw,
++				 struct ieee80211_vif *vif,
++				 struct ieee80211_bss_conf *info)
++{
++	struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
++	struct mt7921_dev *dev = mt7921_hw_dev(hw);
++	struct sk_buff *skb;
++	int i, len = min_t(int, info->arp_addr_cnt,
++			   IEEE80211_BSS_ARP_ADDR_LIST_LEN);
++	struct {
++		struct {
++			u8 bss_idx;
++			u8 pad[3];
++		} __packed hdr;
++		struct mt76_connac_arpns_tlv arp;
++	} req_hdr = {
++		.hdr = {
++			.bss_idx = mvif->mt76.idx,
++		},
++		.arp = {
++			.tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ARP),
++			.len = cpu_to_le16(sizeof(struct mt76_connac_arpns_tlv)),
++			.ips_num = len,
++			.mode = 2,  /* update */
++			.option = 1,
++		},
++	};
++
++	skb = mt76_mcu_msg_alloc(&dev->mt76, NULL,
++				 sizeof(req_hdr) + len * sizeof(__be32));
++	if (!skb)
++		return -ENOMEM;
++
++	skb_put_data(skb, &req_hdr, sizeof(req_hdr));
++	for (i = 0; i < len; i++) {
++		u8 *addr = (u8 *)skb_put(skb, sizeof(__be32));
++
++		memcpy(addr, &info->arp_addr_list[i], sizeof(__be32));
++	}
++
++	return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_UNI_CMD_OFFLOAD,
++				     true);
++}
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
+index 46e6aeec35ae3..25a1a6acb6baf 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
+@@ -102,11 +102,11 @@ struct mt7921_vif {
+ };
+ 
+ struct mib_stats {
+-	u16 ack_fail_cnt;
+-	u16 fcs_err_cnt;
+-	u16 rts_cnt;
+-	u16 rts_retries_cnt;
+-	u16 ba_miss_cnt;
++	u32 ack_fail_cnt;
++	u32 fcs_err_cnt;
++	u32 rts_cnt;
++	u32 rts_retries_cnt;
++	u32 ba_miss_cnt;
+ };
+ 
+ struct mt7921_phy {
+@@ -339,4 +339,7 @@ int mt7921_mac_set_beacon_filter(struct mt7921_phy *phy,
+ 				 bool enable);
+ void mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif);
+ void mt7921_coredump_work(struct work_struct *work);
++int mt7921_mcu_update_arp_filter(struct ieee80211_hw *hw,
++				 struct ieee80211_vif *vif,
++				 struct ieee80211_bss_conf *info);
+ #endif
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+index 5570b4a505314..80f6f29892a45 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+@@ -137,7 +137,7 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
+ 
+ 	mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
+ 
+-	mt7921_l1_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
++	mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
+ 
+ 	ret = devm_request_irq(mdev->dev, pdev->irq, mt7921_irq_handler,
+ 			       IRQF_SHARED, KBUILD_MODNAME, dev);
+@@ -146,10 +146,12 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
+ 
+ 	ret = mt7921_register_device(dev);
+ 	if (ret)
+-		goto err_free_dev;
++		goto err_free_irq;
+ 
+ 	return 0;
+ 
++err_free_irq:
++	devm_free_irq(&pdev->dev, pdev->irq, dev);
+ err_free_dev:
+ 	mt76_free_device(&dev->mt76);
+ err_free_pci_vec:
+@@ -193,7 +195,6 @@ static int mt7921_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+ 	mt76_for_each_q_rx(mdev, i) {
+ 		napi_disable(&mdev->napi[i]);
+ 	}
+-	tasklet_kill(&dev->irq_tasklet);
+ 
+ 	pci_enable_wake(pdev, pci_choose_state(pdev, state), true);
+ 
+@@ -208,13 +209,16 @@ static int mt7921_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+ 
+ 	/* disable interrupt */
+ 	mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
++	mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
++	synchronize_irq(pdev->irq);
++	tasklet_kill(&dev->irq_tasklet);
+ 
+-	pci_save_state(pdev);
+-	err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
++	err = mt7921_mcu_fw_pmctrl(dev);
+ 	if (err)
+ 		goto restore;
+ 
+-	err = mt7921_mcu_drv_pmctrl(dev);
++	pci_save_state(pdev);
++	err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ 	if (err)
+ 		goto restore;
+ 
+@@ -237,18 +241,18 @@ static int mt7921_pci_resume(struct pci_dev *pdev)
+ 	struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ 	int i, err;
+ 
+-	err = mt7921_mcu_fw_pmctrl(dev);
+-	if (err < 0)
+-		return err;
+-
+ 	err = pci_set_power_state(pdev, PCI_D0);
+ 	if (err)
+ 		return err;
+ 
+ 	pci_restore_state(pdev);
+ 
++	err = mt7921_mcu_drv_pmctrl(dev);
++	if (err < 0)
++		return err;
++
+ 	/* enable interrupt */
+-	mt7921_l1_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
++	mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
+ 	mt7921_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
+ 			  MT_INT_MCU_CMD);
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
+index 6dad7f6ab09df..73878d3e24951 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
+@@ -96,8 +96,8 @@
+ #define MT_WF_MIB_BASE(_band)		((_band) ? 0xa4800 : 0x24800)
+ #define MT_WF_MIB(_band, ofs)		(MT_WF_MIB_BASE(_band) + (ofs))
+ 
+-#define MT_MIB_SDR3(_band)		MT_WF_MIB(_band, 0x014)
+-#define MT_MIB_SDR3_FCS_ERR_MASK	GENMASK(15, 0)
++#define MT_MIB_SDR3(_band)		MT_WF_MIB(_band, 0x698)
++#define MT_MIB_SDR3_FCS_ERR_MASK	GENMASK(31, 16)
+ 
+ #define MT_MIB_SDR9(_band)		MT_WF_MIB(_band, 0x02c)
+ #define MT_MIB_SDR9_BUSY_MASK		GENMASK(23, 0)
+@@ -121,16 +121,21 @@
+ #define MT_MIB_RTS_RETRIES_COUNT_MASK	GENMASK(31, 16)
+ #define MT_MIB_RTS_COUNT_MASK		GENMASK(15, 0)
+ 
+-#define MT_MIB_MB_SDR1(_band, n)	MT_WF_MIB(_band, 0x104 + ((n) << 4))
+-#define MT_MIB_BA_MISS_COUNT_MASK	GENMASK(15, 0)
+-#define MT_MIB_ACK_FAIL_COUNT_MASK	GENMASK(31, 16)
++#define MT_MIB_MB_BSDR0(_band)		MT_WF_MIB(_band, 0x688)
++#define MT_MIB_RTS_COUNT_MASK		GENMASK(15, 0)
++#define MT_MIB_MB_BSDR1(_band)		MT_WF_MIB(_band, 0x690)
++#define MT_MIB_RTS_FAIL_COUNT_MASK	GENMASK(15, 0)
++#define MT_MIB_MB_BSDR2(_band)		MT_WF_MIB(_band, 0x518)
++#define MT_MIB_BA_FAIL_COUNT_MASK	GENMASK(15, 0)
++#define MT_MIB_MB_BSDR3(_band)		MT_WF_MIB(_band, 0x520)
++#define MT_MIB_ACK_FAIL_COUNT_MASK	GENMASK(15, 0)
+ 
+ #define MT_MIB_MB_SDR2(_band, n)	MT_WF_MIB(_band, 0x108 + ((n) << 4))
+ #define MT_MIB_FRAME_RETRIES_COUNT_MASK	GENMASK(15, 0)
+ 
+-#define MT_TX_AGG_CNT(_band, n)		MT_WF_MIB(_band, 0x0a8 + ((n) << 2))
+-#define MT_TX_AGG_CNT2(_band, n)	MT_WF_MIB(_band, 0x164 + ((n) << 2))
+-#define MT_MIB_ARNG(_band, n)		MT_WF_MIB(_band, 0x4b8 + ((n) << 2))
++#define MT_TX_AGG_CNT(_band, n)		MT_WF_MIB(_band, 0x7dc + ((n) << 2))
++#define MT_TX_AGG_CNT2(_band, n)	MT_WF_MIB(_band, 0x7ec + ((n) << 2))
++#define MT_MIB_ARNG(_band, n)		MT_WF_MIB(_band, 0x0b0 + ((n) << 2))
+ #define MT_MIB_ARNCR_RANGE(val, n)	(((val) >> ((n) << 3)) & GENMASK(7, 0))
+ 
+ #define MT_WTBLON_TOP_BASE		0x34000
+@@ -357,11 +362,11 @@
+ #define MT_INFRA_CFG_BASE		0xfe000
+ #define MT_INFRA(ofs)			(MT_INFRA_CFG_BASE + (ofs))
+ 
+-#define MT_HIF_REMAP_L1			MT_INFRA(0x260)
++#define MT_HIF_REMAP_L1			MT_INFRA(0x24c)
+ #define MT_HIF_REMAP_L1_MASK		GENMASK(15, 0)
+ #define MT_HIF_REMAP_L1_OFFSET		GENMASK(15, 0)
+ #define MT_HIF_REMAP_L1_BASE		GENMASK(31, 16)
+-#define MT_HIF_REMAP_BASE_L1		0xe0000
++#define MT_HIF_REMAP_BASE_L1		0x40000
+ 
+ #define MT_SWDEF_BASE			0x41f200
+ #define MT_SWDEF(ofs)			(MT_SWDEF_BASE + (ofs))
+@@ -384,7 +389,7 @@
+ #define MT_HW_CHIPID			0x70010200
+ #define MT_HW_REV			0x70010204
+ 
+-#define MT_PCIE_MAC_BASE		0x74030000
++#define MT_PCIE_MAC_BASE		0x10000
+ #define MT_PCIE_MAC(ofs)		(MT_PCIE_MAC_BASE + (ofs))
+ #define MT_PCIE_MAC_INT_ENABLE		MT_PCIE_MAC(0x188)
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c
+index 0b6facb17ff72..a18d2896ee1fb 100644
+--- a/drivers/net/wireless/mediatek/mt76/sdio.c
++++ b/drivers/net/wireless/mediatek/mt76/sdio.c
+@@ -256,6 +256,9 @@ mt76s_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+ 
+ 	q->entry[q->head].skb = tx_info.skb;
+ 	q->entry[q->head].buf_sz = len;
++
++	smp_wmb();
++
+ 	q->head = (q->head + 1) % q->ndesc;
+ 	q->queued++;
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
+index b8fe8adc43a31..451ed60c62961 100644
+--- a/drivers/net/wireless/mediatek/mt76/tx.c
++++ b/drivers/net/wireless/mediatek/mt76/tx.c
+@@ -461,11 +461,11 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
+ 	int ret = 0;
+ 
+ 	while (1) {
++		int n_frames = 0;
++
+ 		if (test_bit(MT76_STATE_PM, &phy->state) ||
+-		    test_bit(MT76_RESET, &phy->state)) {
+-			ret = -EBUSY;
+-			break;
+-		}
++		    test_bit(MT76_RESET, &phy->state))
++			return -EBUSY;
+ 
+ 		if (dev->queue_ops->tx_cleanup &&
+ 		    q->queued + 2 * MT_TXQ_FREE_THR >= q->ndesc) {
+@@ -497,11 +497,16 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
+ 		}
+ 
+ 		if (!mt76_txq_stopped(q))
+-			ret += mt76_txq_send_burst(phy, q, mtxq);
++			n_frames = mt76_txq_send_burst(phy, q, mtxq);
+ 
+ 		spin_unlock_bh(&q->lock);
+ 
+ 		ieee80211_return_txq(phy->hw, txq, false);
++
++		if (unlikely(n_frames < 0))
++			return n_frames;
++
++		ret += n_frames;
+ 	}
+ 
+ 	return ret;
+diff --git a/drivers/net/wireless/mediatek/mt7601u/eeprom.c b/drivers/net/wireless/mediatek/mt7601u/eeprom.c
+index c868582c5d225..aa3b64902cf9b 100644
+--- a/drivers/net/wireless/mediatek/mt7601u/eeprom.c
++++ b/drivers/net/wireless/mediatek/mt7601u/eeprom.c
+@@ -99,7 +99,7 @@ mt7601u_has_tssi(struct mt7601u_dev *dev, u8 *eeprom)
+ {
+ 	u16 nic_conf1 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_1);
+ 
+-	return ~nic_conf1 && (nic_conf1 & MT_EE_NIC_CONF_1_TX_ALC_EN);
++	return (u16)~nic_conf1 && (nic_conf1 & MT_EE_NIC_CONF_1_TX_ALC_EN);
+ }
+ 
+ static void
+diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c
+index 351ff909ab1c7..e14b9fc2c67ac 100644
+--- a/drivers/net/wireless/microchip/wilc1000/sdio.c
++++ b/drivers/net/wireless/microchip/wilc1000/sdio.c
+@@ -947,7 +947,7 @@ static int wilc_sdio_sync_ext(struct wilc *wilc, int nint)
+ 			for (i = 0; (i < 3) && (nint > 0); i++, nint--)
+ 				reg |= BIT(i);
+ 
+-			ret = wilc_sdio_read_reg(wilc, WILC_INTR2_ENABLE, &reg);
++			ret = wilc_sdio_write_reg(wilc, WILC_INTR2_ENABLE, reg);
+ 			if (ret) {
+ 				dev_err(&func->dev,
+ 					"Failed write reg (%08x)...\n",
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
+index 27c8a5d965208..fcaaf664cbec5 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
+@@ -249,7 +249,7 @@ u32 RTL8821AE_PHY_REG_ARRAY[] = {
+ 	0x824, 0x00030FE0,
+ 	0x828, 0x00000000,
+ 	0x82C, 0x002081DD,
+-	0x830, 0x2AAA8E24,
++	0x830, 0x2AAAEEC8,
+ 	0x834, 0x0037A706,
+ 	0x838, 0x06489B44,
+ 	0x83C, 0x0000095B,
+@@ -324,10 +324,10 @@ u32 RTL8821AE_PHY_REG_ARRAY[] = {
+ 	0x9D8, 0x00000000,
+ 	0x9DC, 0x00000000,
+ 	0x9E0, 0x00005D00,
+-	0x9E4, 0x00000002,
++	0x9E4, 0x00000003,
+ 	0x9E8, 0x00000001,
+ 	0xA00, 0x00D047C8,
+-	0xA04, 0x01FF000C,
++	0xA04, 0x01FF800C,
+ 	0xA08, 0x8C8A8300,
+ 	0xA0C, 0x2E68000F,
+ 	0xA10, 0x9500BB78,
+@@ -1320,7 +1320,11 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x083, 0x00021800,
+ 		0x084, 0x00028000,
+ 		0x085, 0x00048000,
++	0x80000111,	0x00000000,	0x40000000,	0x00000000,
++		0x086, 0x0009483A,
++	0xA0000000,	0x00000000,
+ 		0x086, 0x00094838,
++	0xB0000000,	0x00000000,
+ 		0x087, 0x00044980,
+ 		0x088, 0x00048000,
+ 		0x089, 0x0000D480,
+@@ -1409,36 +1413,32 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x03C, 0x000CA000,
+ 		0x0EF, 0x00000000,
+ 		0x0EF, 0x00001100,
+-	0xFF0F0104, 0xABCD,
++	0x80000111,	0x00000000,	0x40000000,	0x00000000,
+ 		0x034, 0x0004ADF3,
+ 		0x034, 0x00049DF0,
+-	0xFF0F0204, 0xCDEF,
++	0x90000110,	0x00000000,	0x40000000,	0x00000000,
+ 		0x034, 0x0004ADF3,
+ 		0x034, 0x00049DF0,
+-	0xFF0F0404, 0xCDEF,
+-		0x034, 0x0004ADF3,
+-		0x034, 0x00049DF0,
+-	0xFF0F0200, 0xCDEF,
++	0x90000210,	0x00000000,	0x40000000,	0x00000000,
+ 		0x034, 0x0004ADF5,
+ 		0x034, 0x00049DF2,
+-	0xFF0F02C0, 0xCDEF,
++	0x9000020c,	0x00000000,	0x40000000,	0x00000000,
++		0x034, 0x0004A0F3,
++		0x034, 0x000490B1,
++		0x9000040c,	0x00000000,	0x40000000,	0x00000000,
+ 		0x034, 0x0004A0F3,
+ 		0x034, 0x000490B1,
+-	0xCDCDCDCD, 0xCDCD,
++	0x90000200,	0x00000000,	0x40000000,	0x00000000,
++		0x034, 0x0004ADF5,
++		0x034, 0x00049DF2,
++	0x90000410,	0x00000000,	0x40000000,	0x00000000,
++		0x034, 0x0004ADF3,
++		0x034, 0x00049DF0,
++	0xA0000000,	0x00000000,
+ 		0x034, 0x0004ADF7,
+ 		0x034, 0x00049DF3,
+-	0xFF0F0104, 0xDEAD,
+-	0xFF0F0104, 0xABCD,
+-		0x034, 0x00048DED,
+-		0x034, 0x00047DEA,
+-		0x034, 0x00046DE7,
+-		0x034, 0x00045CE9,
+-		0x034, 0x00044CE6,
+-		0x034, 0x000438C6,
+-		0x034, 0x00042886,
+-		0x034, 0x00041486,
+-		0x034, 0x00040447,
+-	0xFF0F0204, 0xCDEF,
++	0xB0000000,	0x00000000,
++	0x80000111,	0x00000000,	0x40000000,	0x00000000,
+ 		0x034, 0x00048DED,
+ 		0x034, 0x00047DEA,
+ 		0x034, 0x00046DE7,
+@@ -1448,7 +1448,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x034, 0x00042886,
+ 		0x034, 0x00041486,
+ 		0x034, 0x00040447,
+-	0xFF0F0404, 0xCDEF,
++	0x90000110,	0x00000000,	0x40000000,	0x00000000,
+ 		0x034, 0x00048DED,
+ 		0x034, 0x00047DEA,
+ 		0x034, 0x00046DE7,
+@@ -1458,7 +1458,17 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x034, 0x00042886,
+ 		0x034, 0x00041486,
+ 		0x034, 0x00040447,
+-	0xFF0F02C0, 0xCDEF,
++	0x9000020c,	0x00000000,	0x40000000,	0x00000000,
++		0x034, 0x000480AE,
++		0x034, 0x000470AB,
++		0x034, 0x0004608B,
++		0x034, 0x00045069,
++		0x034, 0x00044048,
++		0x034, 0x00043045,
++		0x034, 0x00042026,
++		0x034, 0x00041023,
++		0x034, 0x00040002,
++	0x9000040c,	0x00000000,	0x40000000,	0x00000000,
+ 		0x034, 0x000480AE,
+ 		0x034, 0x000470AB,
+ 		0x034, 0x0004608B,
+@@ -1468,7 +1478,17 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x034, 0x00042026,
+ 		0x034, 0x00041023,
+ 		0x034, 0x00040002,
+-	0xCDCDCDCD, 0xCDCD,
++	0x90000410,	0x00000000,	0x40000000,	0x00000000,
++		0x034, 0x00048DED,
++		0x034, 0x00047DEA,
++		0x034, 0x00046DE7,
++		0x034, 0x00045CE9,
++		0x034, 0x00044CE6,
++		0x034, 0x000438C6,
++		0x034, 0x00042886,
++		0x034, 0x00041486,
++		0x034, 0x00040447,
++	0xA0000000,	0x00000000,
+ 		0x034, 0x00048DEF,
+ 		0x034, 0x00047DEC,
+ 		0x034, 0x00046DE9,
+@@ -1478,38 +1498,36 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x034, 0x0004248A,
+ 		0x034, 0x0004108D,
+ 		0x034, 0x0004008A,
+-	0xFF0F0104, 0xDEAD,
+-	0xFF0F0200, 0xABCD,
++	0xB0000000,	0x00000000,
++	0x80000210,	0x00000000,	0x40000000,	0x00000000,
+ 		0x034, 0x0002ADF4,
+-	0xFF0F02C0, 0xCDEF,
++	0x9000020c,	0x00000000,	0x40000000,	0x00000000,
++		0x034, 0x0002A0F3,
++	0x9000040c,	0x00000000,	0x40000000,	0x00000000,
+ 		0x034, 0x0002A0F3,
+-	0xCDCDCDCD, 0xCDCD,
++	0x90000200,	0x00000000,	0x40000000,	0x00000000,
++		0x034, 0x0002ADF4,
++	0xA0000000,	0x00000000,
+ 		0x034, 0x0002ADF7,
+-	0xFF0F0200, 0xDEAD,
+-	0xFF0F0104, 0xABCD,
+-		0x034, 0x00029DF4,
+-	0xFF0F0204, 0xCDEF,
++	0xB0000000,	0x00000000,
++	0x80000111,	0x00000000,	0x40000000,	0x00000000,
+ 		0x034, 0x00029DF4,
+-	0xFF0F0404, 0xCDEF,
++	0x90000110,	0x00000000,	0x40000000,	0x00000000,
+ 		0x034, 0x00029DF4,
+-	0xFF0F0200, 0xCDEF,
++	0x90000210,	0x00000000,	0x40000000,	0x00000000,
+ 		0x034, 0x00029DF1,
+-	0xFF0F02C0, 0xCDEF,
++	0x9000020c,	0x00000000,	0x40000000,	0x00000000,
++		0x034, 0x000290F0,
++	0x9000040c,	0x00000000,	0x40000000,	0x00000000,
+ 		0x034, 0x000290F0,
+-	0xCDCDCDCD, 0xCDCD,
++	0x90000200,	0x00000000,	0x40000000,	0x00000000,
++		0x034, 0x00029DF1,
++	0x90000410,	0x00000000,	0x40000000,	0x00000000,
++		0x034, 0x00029DF4,
++	0xA0000000,	0x00000000,
+ 		0x034, 0x00029DF2,
+-	0xFF0F0104, 0xDEAD,
+-	0xFF0F0104, 0xABCD,
+-		0x034, 0x00028DF1,
+-		0x034, 0x00027DEE,
+-		0x034, 0x00026DEB,
+-		0x034, 0x00025CEC,
+-		0x034, 0x00024CE9,
+-		0x034, 0x000238CA,
+-		0x034, 0x00022889,
+-		0x034, 0x00021489,
+-		0x034, 0x0002044A,
+-	0xFF0F0204, 0xCDEF,
++	0xB0000000,	0x00000000,
++	0x80000111,	0x00000000,	0x40000000,	0x00000000,
+ 		0x034, 0x00028DF1,
+ 		0x034, 0x00027DEE,
+ 		0x034, 0x00026DEB,
+@@ -1519,7 +1537,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x034, 0x00022889,
+ 		0x034, 0x00021489,
+ 		0x034, 0x0002044A,
+-	0xFF0F0404, 0xCDEF,
++	0x90000110,	0x00000000,	0x40000000,	0x00000000,
+ 		0x034, 0x00028DF1,
+ 		0x034, 0x00027DEE,
+ 		0x034, 0x00026DEB,
+@@ -1529,7 +1547,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x034, 0x00022889,
+ 		0x034, 0x00021489,
+ 		0x034, 0x0002044A,
+-	0xFF0F02C0, 0xCDEF,
++	0x9000020c,	0x00000000,	0x40000000,	0x00000000,
+ 		0x034, 0x000280AF,
+ 		0x034, 0x000270AC,
+ 		0x034, 0x0002608B,
+@@ -1539,7 +1557,27 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x034, 0x00022026,
+ 		0x034, 0x00021023,
+ 		0x034, 0x00020002,
+-	0xCDCDCDCD, 0xCDCD,
++	0x9000040c,	0x00000000,	0x40000000,	0x00000000,
++		0x034, 0x000280AF,
++		0x034, 0x000270AC,
++		0x034, 0x0002608B,
++		0x034, 0x00025069,
++		0x034, 0x00024048,
++		0x034, 0x00023045,
++		0x034, 0x00022026,
++		0x034, 0x00021023,
++		0x034, 0x00020002,
++	0x90000410,	0x00000000,	0x40000000,	0x00000000,
++		0x034, 0x00028DF1,
++		0x034, 0x00027DEE,
++		0x034, 0x00026DEB,
++		0x034, 0x00025CEC,
++		0x034, 0x00024CE9,
++		0x034, 0x000238CA,
++		0x034, 0x00022889,
++		0x034, 0x00021489,
++		0x034, 0x0002044A,
++	0xA0000000,	0x00000000,
+ 		0x034, 0x00028DEE,
+ 		0x034, 0x00027DEB,
+ 		0x034, 0x00026CCD,
+@@ -1549,27 +1587,24 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x034, 0x00022849,
+ 		0x034, 0x00021449,
+ 		0x034, 0x0002004D,
+-	0xFF0F0104, 0xDEAD,
+-	0xFF0F02C0, 0xABCD,
++	0xB0000000,	0x00000000,
++	0x8000020c,	0x00000000,	0x40000000,	0x00000000,
++		0x034, 0x0000A0D7,
++		0x034, 0x000090D3,
++		0x034, 0x000080B1,
++		0x034, 0x000070AE,
++	0x9000040c,	0x00000000,	0x40000000,	0x00000000,
+ 		0x034, 0x0000A0D7,
+ 		0x034, 0x000090D3,
+ 		0x034, 0x000080B1,
+ 		0x034, 0x000070AE,
+-	0xCDCDCDCD, 0xCDCD,
++	0xA0000000,	0x00000000,
+ 		0x034, 0x0000ADF7,
+ 		0x034, 0x00009DF4,
+ 		0x034, 0x00008DF1,
+ 		0x034, 0x00007DEE,
+-	0xFF0F02C0, 0xDEAD,
+-	0xFF0F0104, 0xABCD,
+-		0x034, 0x00006DEB,
+-		0x034, 0x00005CEC,
+-		0x034, 0x00004CE9,
+-		0x034, 0x000038CA,
+-		0x034, 0x00002889,
+-		0x034, 0x00001489,
+-		0x034, 0x0000044A,
+-	0xFF0F0204, 0xCDEF,
++	0xB0000000,	0x00000000,
++	0x80000111,	0x00000000,	0x40000000,	0x00000000,
+ 		0x034, 0x00006DEB,
+ 		0x034, 0x00005CEC,
+ 		0x034, 0x00004CE9,
+@@ -1577,7 +1612,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x034, 0x00002889,
+ 		0x034, 0x00001489,
+ 		0x034, 0x0000044A,
+-	0xFF0F0404, 0xCDEF,
++	0x90000110,	0x00000000,	0x40000000,	0x00000000,
+ 		0x034, 0x00006DEB,
+ 		0x034, 0x00005CEC,
+ 		0x034, 0x00004CE9,
+@@ -1585,7 +1620,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x034, 0x00002889,
+ 		0x034, 0x00001489,
+ 		0x034, 0x0000044A,
+-	0xFF0F02C0, 0xCDEF,
++	0x9000020c,	0x00000000,	0x40000000,	0x00000000,
+ 		0x034, 0x0000608D,
+ 		0x034, 0x0000506B,
+ 		0x034, 0x0000404A,
+@@ -1593,7 +1628,23 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x034, 0x00002044,
+ 		0x034, 0x00001025,
+ 		0x034, 0x00000004,
+-	0xCDCDCDCD, 0xCDCD,
++	0x9000040c,	0x00000000,	0x40000000,	0x00000000,
++		0x034, 0x0000608D,
++		0x034, 0x0000506B,
++		0x034, 0x0000404A,
++		0x034, 0x00003047,
++		0x034, 0x00002044,
++		0x034, 0x00001025,
++		0x034, 0x00000004,
++	0x90000410,	0x00000000,	0x40000000,	0x00000000,
++		0x034, 0x00006DEB,
++		0x034, 0x00005CEC,
++		0x034, 0x00004CE9,
++		0x034, 0x000038CA,
++		0x034, 0x00002889,
++		0x034, 0x00001489,
++		0x034, 0x0000044A,
++	0xA0000000,	0x00000000,
+ 		0x034, 0x00006DCD,
+ 		0x034, 0x00005CCD,
+ 		0x034, 0x00004CCA,
+@@ -1601,11 +1652,11 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x034, 0x00002888,
+ 		0x034, 0x00001488,
+ 		0x034, 0x00000486,
+-	0xFF0F0104, 0xDEAD,
++	0xB0000000,	0x00000000,
+ 		0x0EF, 0x00000000,
+ 		0x018, 0x0001712A,
+ 		0x0EF, 0x00000040,
+-	0xFF0F0104, 0xABCD,
++	0x80000111,	0x00000000,	0x40000000,	0x00000000,
+ 		0x035, 0x00000187,
+ 		0x035, 0x00008187,
+ 		0x035, 0x00010187,
+@@ -1615,7 +1666,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x035, 0x00040188,
+ 		0x035, 0x00048188,
+ 		0x035, 0x00050188,
+-	0xFF0F0204, 0xCDEF,
++	0x90000110,	0x00000000,	0x40000000,	0x00000000,
+ 		0x035, 0x00000187,
+ 		0x035, 0x00008187,
+ 		0x035, 0x00010187,
+@@ -1625,7 +1676,37 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x035, 0x00040188,
+ 		0x035, 0x00048188,
+ 		0x035, 0x00050188,
+-	0xFF0F0404, 0xCDEF,
++	0x90000210,	0x00000000,	0x40000000,	0x00000000,
++		0x035, 0x00000128,
++		0x035, 0x00008128,
++		0x035, 0x00010128,
++		0x035, 0x000201C8,
++		0x035, 0x000281C8,
++		0x035, 0x000301C8,
++		0x035, 0x000401C8,
++		0x035, 0x000481C8,
++		0x035, 0x000501C8,
++	0x9000040c,	0x00000000,	0x40000000,	0x00000000,
++		0x035, 0x00000145,
++		0x035, 0x00008145,
++		0x035, 0x00010145,
++		0x035, 0x00020196,
++		0x035, 0x00028196,
++		0x035, 0x00030196,
++		0x035, 0x000401C7,
++		0x035, 0x000481C7,
++		0x035, 0x000501C7,
++	0x90000200,	0x00000000,	0x40000000,	0x00000000,
++		0x035, 0x00000128,
++		0x035, 0x00008128,
++		0x035, 0x00010128,
++		0x035, 0x000201C8,
++		0x035, 0x000281C8,
++		0x035, 0x000301C8,
++		0x035, 0x000401C8,
++		0x035, 0x000481C8,
++		0x035, 0x000501C8,
++	0x90000410,	0x00000000,	0x40000000,	0x00000000,
+ 		0x035, 0x00000187,
+ 		0x035, 0x00008187,
+ 		0x035, 0x00010187,
+@@ -1635,7 +1716,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x035, 0x00040188,
+ 		0x035, 0x00048188,
+ 		0x035, 0x00050188,
+-	0xCDCDCDCD, 0xCDCD,
++	0xA0000000,	0x00000000,
+ 		0x035, 0x00000145,
+ 		0x035, 0x00008145,
+ 		0x035, 0x00010145,
+@@ -1645,11 +1726,11 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x035, 0x000401C7,
+ 		0x035, 0x000481C7,
+ 		0x035, 0x000501C7,
+-	0xFF0F0104, 0xDEAD,
++	0xB0000000,	0x00000000,
+ 		0x0EF, 0x00000000,
+ 		0x018, 0x0001712A,
+ 		0x0EF, 0x00000010,
+-	0xFF0F0104, 0xABCD,
++	0x80000111,	0x00000000,	0x40000000,	0x00000000,
+ 		0x036, 0x00085733,
+ 		0x036, 0x0008D733,
+ 		0x036, 0x00095733,
+@@ -1662,7 +1743,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x036, 0x000CE4B4,
+ 		0x036, 0x000D64B4,
+ 		0x036, 0x000DE4B4,
+-	0xFF0F0204, 0xCDEF,
++	0x90000110,	0x00000000,	0x40000000,	0x00000000,
+ 		0x036, 0x00085733,
+ 		0x036, 0x0008D733,
+ 		0x036, 0x00095733,
+@@ -1675,7 +1756,46 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x036, 0x000CE4B4,
+ 		0x036, 0x000D64B4,
+ 		0x036, 0x000DE4B4,
+-	0xFF0F0404, 0xCDEF,
++	0x90000210,	0x00000000,	0x40000000,	0x00000000,
++		0x036, 0x000063B5,
++		0x036, 0x0000E3B5,
++		0x036, 0x000163B5,
++		0x036, 0x0001E3B5,
++		0x036, 0x000263B5,
++		0x036, 0x0002E3B5,
++		0x036, 0x000363B5,
++		0x036, 0x0003E3B5,
++		0x036, 0x000463B5,
++		0x036, 0x0004E3B5,
++		0x036, 0x000563B5,
++		0x036, 0x0005E3B5,
++	0x9000040c,	0x00000000,	0x40000000,	0x00000000,
++		0x036, 0x000056B3,
++		0x036, 0x0000D6B3,
++		0x036, 0x000156B3,
++		0x036, 0x0001D6B3,
++		0x036, 0x00026634,
++		0x036, 0x0002E634,
++		0x036, 0x00036634,
++		0x036, 0x0003E634,
++		0x036, 0x000467B4,
++		0x036, 0x0004E7B4,
++		0x036, 0x000567B4,
++		0x036, 0x0005E7B4,
++	0x90000200,	0x00000000,	0x40000000,	0x00000000,
++		0x036, 0x000063B5,
++		0x036, 0x0000E3B5,
++		0x036, 0x000163B5,
++		0x036, 0x0001E3B5,
++		0x036, 0x000263B5,
++		0x036, 0x0002E3B5,
++		0x036, 0x000363B5,
++		0x036, 0x0003E3B5,
++		0x036, 0x000463B5,
++		0x036, 0x0004E3B5,
++		0x036, 0x000563B5,
++		0x036, 0x0005E3B5,
++	0x90000410,	0x00000000,	0x40000000,	0x00000000,
+ 		0x036, 0x00085733,
+ 		0x036, 0x0008D733,
+ 		0x036, 0x00095733,
+@@ -1688,7 +1808,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x036, 0x000CE4B4,
+ 		0x036, 0x000D64B4,
+ 		0x036, 0x000DE4B4,
+-	0xCDCDCDCD, 0xCDCD,
++	0xA0000000,	0x00000000,
+ 		0x036, 0x000056B3,
+ 		0x036, 0x0000D6B3,
+ 		0x036, 0x000156B3,
+@@ -1701,103 +1821,162 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x036, 0x0004E7B4,
+ 		0x036, 0x000567B4,
+ 		0x036, 0x0005E7B4,
+-	0xFF0F0104, 0xDEAD,
++	0xB0000000,	0x00000000,
+ 		0x0EF, 0x00000000,
+ 		0x0EF, 0x00000008,
+-	0xFF0F0104, 0xABCD,
++	0x80000111,	0x00000000,	0x40000000,	0x00000000,
+ 		0x03C, 0x000001C8,
+ 		0x03C, 0x00000492,
+-	0xFF0F0204, 0xCDEF,
++	0x90000110,	0x00000000,	0x40000000,	0x00000000,
+ 		0x03C, 0x000001C8,
+ 		0x03C, 0x00000492,
+-	0xFF0F0404, 0xCDEF,
++	0x90000210,	0x00000000,	0x40000000,	0x00000000,
++		0x03C, 0x000001B6,
++		0x03C, 0x00000492,
++	0x9000040c,	0x00000000,	0x40000000,	0x00000000,
++		0x03C, 0x0000022A,
++		0x03C, 0x00000594,
++	0x90000200,	0x00000000,	0x40000000,	0x00000000,
++		0x03C, 0x000001B6,
++		0x03C, 0x00000492,
++	0x90000410,	0x00000000,	0x40000000,	0x00000000,
+ 		0x03C, 0x000001C8,
+ 		0x03C, 0x00000492,
+-	0xCDCDCDCD, 0xCDCD,
++	0xA0000000,	0x00000000,
+ 		0x03C, 0x0000022A,
+ 		0x03C, 0x00000594,
+-	0xFF0F0104, 0xDEAD,
+-	0xFF0F0104, 0xABCD,
++	0xB0000000,	0x00000000,
++	0x80000111,	0x00000000,	0x40000000,	0x00000000,
+ 		0x03C, 0x00000800,
+-	0xFF0F0204, 0xCDEF,
++	0x90000110,	0x00000000,	0x40000000,	0x00000000,
+ 		0x03C, 0x00000800,
+-	0xFF0F0404, 0xCDEF,
++	0x90000210,	0x00000000,	0x40000000,	0x00000000,
+ 		0x03C, 0x00000800,
+-	0xFF0F02C0, 0xCDEF,
++	0x9000020c,	0x00000000,	0x40000000,	0x00000000,
+ 		0x03C, 0x00000820,
+-	0xCDCDCDCD, 0xCDCD,
++	0x9000040c,	0x00000000,	0x40000000,	0x00000000,
++		0x03C, 0x00000820,
++	0x90000200,	0x00000000,	0x40000000,	0x00000000,
++		0x03C, 0x00000800,
++	0x90000410,	0x00000000,	0x40000000,	0x00000000,
++		0x03C, 0x00000800,
++	0xA0000000,	0x00000000,
+ 		0x03C, 0x00000900,
+-	0xFF0F0104, 0xDEAD,
++	0xB0000000,	0x00000000,
+ 		0x0EF, 0x00000000,
+ 		0x018, 0x0001712A,
+ 		0x0EF, 0x00000002,
+-	0xFF0F0104, 0xABCD,
++	0x80000111,	0x00000000,	0x40000000,	0x00000000,
+ 		0x008, 0x0004E400,
+-	0xFF0F0204, 0xCDEF,
++	0x90000110,	0x00000000,	0x40000000,	0x00000000,
+ 		0x008, 0x0004E400,
+-	0xFF0F0404, 0xCDEF,
++	0x90000210,	0x00000000,	0x40000000,	0x00000000,
++		0x008, 0x00002000,
++	0x9000020c,	0x00000000,	0x40000000,	0x00000000,
++		0x008, 0x00002000,
++	0x9000040c,	0x00000000,	0x40000000,	0x00000000,
++		0x008, 0x00002000,
++	0x90000200,	0x00000000,	0x40000000,	0x00000000,
++		0x008, 0x00002000,
++	0x90000410,	0x00000000,	0x40000000,	0x00000000,
+ 		0x008, 0x0004E400,
+-	0xCDCDCDCD, 0xCDCD,
++	0xA0000000,	0x00000000,
+ 		0x008, 0x00002000,
+-	0xFF0F0104, 0xDEAD,
++	0xB0000000,	0x00000000,
+ 		0x0EF, 0x00000000,
+ 		0x0DF, 0x000000C0,
+-		0x01F, 0x00040064,
+-	0xFF0F0104, 0xABCD,
++		0x01F, 0x00000064,
++	0x80000111,	0x00000000,	0x40000000,	0x00000000,
+ 		0x058, 0x000A7284,
+ 		0x059, 0x000600EC,
+-	0xFF0F0204, 0xCDEF,
++	0x90000110,	0x00000000,	0x40000000,	0x00000000,
+ 		0x058, 0x000A7284,
+ 		0x059, 0x000600EC,
+-	0xFF0F0404, 0xCDEF,
++	0x9000020c,	0x00000000,	0x40000000,	0x00000000,
++		0x058, 0x00081184,
++		0x059, 0x0006016C,
++	0x9000040c,	0x00000000,	0x40000000,	0x00000000,
++		0x058, 0x00081184,
++		0x059, 0x0006016C,
++	0x90000200,	0x00000000,	0x40000000,	0x00000000,
++		0x058, 0x00081184,
++		0x059, 0x0006016C,
++	0x90000410,	0x00000000,	0x40000000,	0x00000000,
+ 		0x058, 0x000A7284,
+ 		0x059, 0x000600EC,
+-	0xCDCDCDCD, 0xCDCD,
++	0xA0000000,	0x00000000,
+ 		0x058, 0x00081184,
+ 		0x059, 0x0006016C,
+-	0xFF0F0104, 0xDEAD,
+-	0xFF0F0104, 0xABCD,
++	0xB0000000,	0x00000000,
++	0x80000111,	0x00000000,	0x40000000,	0x00000000,
+ 		0x061, 0x000E8D73,
+ 		0x062, 0x00093FC5,
+-	0xFF0F0204, 0xCDEF,
++	0x90000110,	0x00000000,	0x40000000,	0x00000000,
+ 		0x061, 0x000E8D73,
+ 		0x062, 0x00093FC5,
+-	0xFF0F0404, 0xCDEF,
++	0x90000210,	0x00000000,	0x40000000,	0x00000000,
++		0x061, 0x000EFD83,
++		0x062, 0x00093FCC,
++	0x9000040c,	0x00000000,	0x40000000,	0x00000000,
++		0x061, 0x000EAD53,
++		0x062, 0x00093BC4,
++	0x90000200,	0x00000000,	0x40000000,	0x00000000,
++		0x061, 0x000EFD83,
++		0x062, 0x00093FCC,
++	0x90000410,	0x00000000,	0x40000000,	0x00000000,
+ 		0x061, 0x000E8D73,
+ 		0x062, 0x00093FC5,
+-	0xCDCDCDCD, 0xCDCD,
++	0xA0000000,	0x00000000,
+ 		0x061, 0x000EAD53,
+ 		0x062, 0x00093BC4,
+-	0xFF0F0104, 0xDEAD,
+-	0xFF0F0104, 0xABCD,
++	0xB0000000,	0x00000000,
++	0x80000111,	0x00000000,	0x40000000,	0x00000000,
+ 		0x063, 0x000110E9,
+-	0xFF0F0204, 0xCDEF,
++	0x90000110,	0x00000000,	0x40000000,	0x00000000,
+ 		0x063, 0x000110E9,
+-	0xFF0F0404, 0xCDEF,
++	0x90000210,	0x00000000,	0x40000000,	0x00000000,
++		0x063, 0x000110EB,
++	0x9000020c,	0x00000000,	0x40000000,	0x00000000,
+ 		0x063, 0x000110E9,
+-	0xFF0F0200, 0xCDEF,
+-		0x063, 0x000710E9,
+-	0xFF0F02C0, 0xCDEF,
++	0x9000040c,	0x00000000,	0x40000000,	0x00000000,
+ 		0x063, 0x000110E9,
+-	0xCDCDCDCD, 0xCDCD,
++	0x90000200,	0x00000000,	0x40000000,	0x00000000,
++		0x063, 0x000110EB,
++	0x90000410,	0x00000000,	0x40000000,	0x00000000,
++		0x063, 0x000110E9,
++	0xA0000000,	0x00000000,
+ 		0x063, 0x000714E9,
+-	0xFF0F0104, 0xDEAD,
+-	0xFF0F0104, 0xABCD,
++	0xB0000000,	0x00000000,
++	0x80000111,	0x00000000,	0x40000000,	0x00000000,
++		0x064, 0x0001C27C,
++	0x90000110,	0x00000000,	0x40000000,	0x00000000,
++		0x064, 0x0001C27C,
++	0x90000210,	0x00000000,	0x40000000,	0x00000000,
+ 		0x064, 0x0001C27C,
+-	0xFF0F0204, 0xCDEF,
++	0x9000040c,	0x00000000,	0x40000000,	0x00000000,
++		0x064, 0x0001C67C,
++	0x90000200,	0x00000000,	0x40000000,	0x00000000,
+ 		0x064, 0x0001C27C,
+-	0xFF0F0404, 0xCDEF,
++	0x90000410,	0x00000000,	0x40000000,	0x00000000,
+ 		0x064, 0x0001C27C,
+-	0xCDCDCDCD, 0xCDCD,
++	0xA0000000,	0x00000000,
+ 		0x064, 0x0001C67C,
+-	0xFF0F0104, 0xDEAD,
+-	0xFF0F0200, 0xABCD,
++	0xB0000000,	0x00000000,
++	0x80000111,	0x00000000,	0x40000000,	0x00000000,
++		0x065, 0x00091016,
++	0x90000110,	0x00000000,	0x40000000,	0x00000000,
++		0x065, 0x00091016,
++	0x90000210,	0x00000000,	0x40000000,	0x00000000,
+ 		0x065, 0x00093016,
+-	0xFF0F02C0, 0xCDEF,
++		0x9000020c,	0x00000000,	0x40000000,	0x00000000,
+ 		0x065, 0x00093015,
+-	0xCDCDCDCD, 0xCDCD,
++		0x9000040c,	0x00000000,	0x40000000,	0x00000000,
++		0x065, 0x00093015,
++		0x90000200,	0x00000000,	0x40000000,	0x00000000,
++		0x065, 0x00093016,
++		0xA0000000,	0x00000000,
+ 		0x065, 0x00091016,
+-	0xFF0F0200, 0xDEAD,
++		0xB0000000,	0x00000000,
+ 		0x018, 0x00000006,
+ 		0x0EF, 0x00002000,
+ 		0x03B, 0x0003824B,
+@@ -1895,9 +2074,10 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x0B4, 0x0001214C,
+ 		0x0B7, 0x0003000C,
+ 		0x01C, 0x000539D2,
++		0x0C4, 0x000AFE00,
+ 		0x018, 0x0001F12A,
+-		0x0FE, 0x00000000,
+-		0x0FE, 0x00000000,
++		0xFFE, 0x00000000,
++		0xFFE, 0x00000000,
+ 		0x018, 0x0001712A,
+ 
+ };
+@@ -2017,6 +2197,7 @@ u32 RTL8812AE_MAC_REG_ARRAY[] = {
+ u32 RTL8812AE_MAC_1T_ARRAYLEN = ARRAY_SIZE(RTL8812AE_MAC_REG_ARRAY);
+ 
+ u32 RTL8821AE_MAC_REG_ARRAY[] = {
++		0x421, 0x0000000F,
+ 		0x428, 0x0000000A,
+ 		0x429, 0x00000010,
+ 		0x430, 0x00000000,
+@@ -2485,7 +2666,7 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = {
+ 		0x81C, 0xA6360001,
+ 		0x81C, 0xA5380001,
+ 		0x81C, 0xA43A0001,
+-		0x81C, 0xA33C0001,
++		0x81C, 0x683C0001,
+ 		0x81C, 0x673E0001,
+ 		0x81C, 0x66400001,
+ 		0x81C, 0x65420001,
+@@ -2519,7 +2700,66 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = {
+ 		0x81C, 0x017A0001,
+ 		0x81C, 0x017C0001,
+ 		0x81C, 0x017E0001,
+-	0xFF0F02C0, 0xABCD,
++	0x8000020c,	0x00000000,	0x40000000,	0x00000000,
++		0x81C, 0xFB000101,
++		0x81C, 0xFA020101,
++		0x81C, 0xF9040101,
++		0x81C, 0xF8060101,
++		0x81C, 0xF7080101,
++		0x81C, 0xF60A0101,
++		0x81C, 0xF50C0101,
++		0x81C, 0xF40E0101,
++		0x81C, 0xF3100101,
++		0x81C, 0xF2120101,
++		0x81C, 0xF1140101,
++		0x81C, 0xF0160101,
++		0x81C, 0xEF180101,
++		0x81C, 0xEE1A0101,
++		0x81C, 0xED1C0101,
++		0x81C, 0xEC1E0101,
++		0x81C, 0xEB200101,
++		0x81C, 0xEA220101,
++		0x81C, 0xE9240101,
++		0x81C, 0xE8260101,
++		0x81C, 0xE7280101,
++		0x81C, 0xE62A0101,
++		0x81C, 0xE52C0101,
++		0x81C, 0xE42E0101,
++		0x81C, 0xE3300101,
++		0x81C, 0xA5320101,
++		0x81C, 0xA4340101,
++		0x81C, 0xA3360101,
++		0x81C, 0x87380101,
++		0x81C, 0x863A0101,
++		0x81C, 0x853C0101,
++		0x81C, 0x843E0101,
++		0x81C, 0x69400101,
++		0x81C, 0x68420101,
++		0x81C, 0x67440101,
++		0x81C, 0x66460101,
++		0x81C, 0x49480101,
++		0x81C, 0x484A0101,
++		0x81C, 0x474C0101,
++		0x81C, 0x2A4E0101,
++		0x81C, 0x29500101,
++		0x81C, 0x28520101,
++		0x81C, 0x27540101,
++		0x81C, 0x26560101,
++		0x81C, 0x25580101,
++		0x81C, 0x245A0101,
++		0x81C, 0x235C0101,
++		0x81C, 0x055E0101,
++		0x81C, 0x04600101,
++		0x81C, 0x03620101,
++		0x81C, 0x02640101,
++		0x81C, 0x01660101,
++		0x81C, 0x01680101,
++		0x81C, 0x016A0101,
++		0x81C, 0x016C0101,
++		0x81C, 0x016E0101,
++		0x81C, 0x01700101,
++		0x81C, 0x01720101,
++	0x9000040c,	0x00000000,	0x40000000,	0x00000000,
+ 		0x81C, 0xFB000101,
+ 		0x81C, 0xFA020101,
+ 		0x81C, 0xF9040101,
+@@ -2578,7 +2818,7 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = {
+ 		0x81C, 0x016E0101,
+ 		0x81C, 0x01700101,
+ 		0x81C, 0x01720101,
+-	0xCDCDCDCD, 0xCDCD,
++	0xA0000000,	0x00000000,
+ 		0x81C, 0xFF000101,
+ 		0x81C, 0xFF020101,
+ 		0x81C, 0xFE040101,
+@@ -2637,7 +2877,7 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = {
+ 		0x81C, 0x046E0101,
+ 		0x81C, 0x03700101,
+ 		0x81C, 0x02720101,
+-	0xFF0F02C0, 0xDEAD,
++	0xB0000000,	0x00000000,
+ 		0x81C, 0x01740101,
+ 		0x81C, 0x01760101,
+ 		0x81C, 0x01780101,
+diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
+index 948cb79050ea9..e7d51ac9b6891 100644
+--- a/drivers/net/wireless/realtek/rtw88/debug.c
++++ b/drivers/net/wireless/realtek/rtw88/debug.c
+@@ -270,7 +270,7 @@ static ssize_t rtw_debugfs_set_rsvd_page(struct file *filp,
+ 
+ 	if (num != 2) {
+ 		rtw_warn(rtwdev, "invalid arguments\n");
+-		return num;
++		return -EINVAL;
+ 	}
+ 
+ 	debugfs_priv->rsvd_page.page_offset = offset;
+diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
+index 786a486499463..6b5c885798a4b 100644
+--- a/drivers/net/wireless/realtek/rtw88/pci.c
++++ b/drivers/net/wireless/realtek/rtw88/pci.c
+@@ -581,23 +581,30 @@ static int rtw_pci_start(struct rtw_dev *rtwdev)
+ {
+ 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+ 
++	rtw_pci_napi_start(rtwdev);
++
+ 	spin_lock_bh(&rtwpci->irq_lock);
++	rtwpci->running = true;
+ 	rtw_pci_enable_interrupt(rtwdev, rtwpci, false);
+ 	spin_unlock_bh(&rtwpci->irq_lock);
+ 
+-	rtw_pci_napi_start(rtwdev);
+-
+ 	return 0;
+ }
+ 
+ static void rtw_pci_stop(struct rtw_dev *rtwdev)
+ {
+ 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
++	struct pci_dev *pdev = rtwpci->pdev;
+ 
++	spin_lock_bh(&rtwpci->irq_lock);
++	rtwpci->running = false;
++	rtw_pci_disable_interrupt(rtwdev, rtwpci);
++	spin_unlock_bh(&rtwpci->irq_lock);
++
++	synchronize_irq(pdev->irq);
+ 	rtw_pci_napi_stop(rtwdev);
+ 
+ 	spin_lock_bh(&rtwpci->irq_lock);
+-	rtw_pci_disable_interrupt(rtwdev, rtwpci);
+ 	rtw_pci_dma_release(rtwdev, rtwpci);
+ 	spin_unlock_bh(&rtwpci->irq_lock);
+ }
+@@ -1138,7 +1145,8 @@ static irqreturn_t rtw_pci_interrupt_threadfn(int irq, void *dev)
+ 		rtw_fw_c2h_cmd_isr(rtwdev);
+ 
+ 	/* all of the jobs for this interrupt have been done */
+-	rtw_pci_enable_interrupt(rtwdev, rtwpci, rx);
++	if (rtwpci->running)
++		rtw_pci_enable_interrupt(rtwdev, rtwpci, rx);
+ 	spin_unlock_bh(&rtwpci->irq_lock);
+ 
+ 	return IRQ_HANDLED;
+@@ -1558,7 +1566,8 @@ static int rtw_pci_napi_poll(struct napi_struct *napi, int budget)
+ 	if (work_done < budget) {
+ 		napi_complete_done(napi, work_done);
+ 		spin_lock_bh(&rtwpci->irq_lock);
+-		rtw_pci_enable_interrupt(rtwdev, rtwpci, false);
++		if (rtwpci->running)
++			rtw_pci_enable_interrupt(rtwdev, rtwpci, false);
+ 		spin_unlock_bh(&rtwpci->irq_lock);
+ 		/* When ISR happens during polling and before napi_complete
+ 		 * while no further data is received. Data on the dma_ring will
+diff --git a/drivers/net/wireless/realtek/rtw88/pci.h b/drivers/net/wireless/realtek/rtw88/pci.h
+index e76fc549a7883..0ffae887527a2 100644
+--- a/drivers/net/wireless/realtek/rtw88/pci.h
++++ b/drivers/net/wireless/realtek/rtw88/pci.h
+@@ -211,6 +211,7 @@ struct rtw_pci {
+ 	spinlock_t irq_lock;
+ 	u32 irq_mask[4];
+ 	bool irq_enabled;
++	bool running;
+ 
+ 	/* napi structure */
+ 	struct net_device netdev;
+diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
+index e114ddecac099..0b3da5bef7036 100644
+--- a/drivers/net/wireless/realtek/rtw88/phy.c
++++ b/drivers/net/wireless/realtek/rtw88/phy.c
+@@ -1584,7 +1584,7 @@ void rtw_phy_load_tables(struct rtw_dev *rtwdev)
+ }
+ EXPORT_SYMBOL(rtw_phy_load_tables);
+ 
+-static u8 rtw_get_channel_group(u8 channel)
++static u8 rtw_get_channel_group(u8 channel, u8 rate)
+ {
+ 	switch (channel) {
+ 	default:
+@@ -1628,6 +1628,7 @@ static u8 rtw_get_channel_group(u8 channel)
+ 	case 106:
+ 		return 4;
+ 	case 14:
++		return rate <= DESC_RATE11M ? 5 : 4;
+ 	case 108:
+ 	case 110:
+ 	case 112:
+@@ -1879,7 +1880,7 @@ void rtw_get_tx_power_params(struct rtw_dev *rtwdev, u8 path, u8 rate, u8 bw,
+ 	s8 *remnant = &pwr_param->pwr_remnant;
+ 
+ 	pwr_idx = &rtwdev->efuse.txpwr_idx_table[path];
+-	group = rtw_get_channel_group(ch);
++	group = rtw_get_channel_group(ch, rate);
+ 
+ 	/* base power index for 2.4G/5G */
+ 	if (IS_CH_2G_BAND(ch)) {
+diff --git a/drivers/net/wireless/ti/wlcore/boot.c b/drivers/net/wireless/ti/wlcore/boot.c
+index e14d88e558f04..85abd0a2d1c90 100644
+--- a/drivers/net/wireless/ti/wlcore/boot.c
++++ b/drivers/net/wireless/ti/wlcore/boot.c
+@@ -72,6 +72,7 @@ static int wlcore_validate_fw_ver(struct wl1271 *wl)
+ 	unsigned int *min_ver = (wl->fw_type == WL12XX_FW_TYPE_MULTI) ?
+ 		wl->min_mr_fw_ver : wl->min_sr_fw_ver;
+ 	char min_fw_str[32] = "";
++	int off = 0;
+ 	int i;
+ 
+ 	/* the chip must be exactly equal */
+@@ -105,13 +106,15 @@ static int wlcore_validate_fw_ver(struct wl1271 *wl)
+ 	return 0;
+ 
+ fail:
+-	for (i = 0; i < NUM_FW_VER; i++)
++	for (i = 0; i < NUM_FW_VER && off < sizeof(min_fw_str); i++)
+ 		if (min_ver[i] == WLCORE_FW_VER_IGNORE)
+-			snprintf(min_fw_str, sizeof(min_fw_str),
+-				  "%s*.", min_fw_str);
++			off += snprintf(min_fw_str + off,
++					sizeof(min_fw_str) - off,
++					"*.");
+ 		else
+-			snprintf(min_fw_str, sizeof(min_fw_str),
+-				  "%s%u.", min_fw_str, min_ver[i]);
++			off += snprintf(min_fw_str + off,
++					sizeof(min_fw_str) - off,
++					"%u.", min_ver[i]);
+ 
+ 	wl1271_error("Your WiFi FW version (%u.%u.%u.%u.%u) is invalid.\n"
+ 		     "Please use at least FW %s\n"
+diff --git a/drivers/net/wireless/ti/wlcore/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h
+index b143293e694f9..a9e13e6d65c50 100644
+--- a/drivers/net/wireless/ti/wlcore/debugfs.h
++++ b/drivers/net/wireless/ti/wlcore/debugfs.h
+@@ -78,13 +78,14 @@ static ssize_t sub## _ ##name## _read(struct file *file,		\
+ 	struct wl1271 *wl = file->private_data;				\
+ 	struct struct_type *stats = wl->stats.fw_stats;			\
+ 	char buf[DEBUGFS_FORMAT_BUFFER_SIZE] = "";			\
++	int pos = 0;							\
+ 	int i;								\
+ 									\
+ 	wl1271_debugfs_update_stats(wl);				\
+ 									\
+-	for (i = 0; i < len; i++)					\
+-		snprintf(buf, sizeof(buf), "%s[%d] = %d\n",		\
+-			 buf, i, stats->sub.name[i]);			\
++	for (i = 0; i < len && pos < sizeof(buf); i++)			\
++		pos += snprintf(buf + pos, sizeof(buf) - pos,		\
++			 "[%d] = %d\n", i, stats->sub.name[i]);		\
+ 									\
+ 	return wl1271_format_buffer(userbuf, count, ppos, "%s", buf);	\
+ }									\
+diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
+index f1469ac8ff425..3fe5b81eda2d6 100644
+--- a/drivers/nfc/pn533/pn533.c
++++ b/drivers/nfc/pn533/pn533.c
+@@ -706,6 +706,9 @@ static bool pn533_target_type_a_is_valid(struct pn533_target_type_a *type_a,
+ 	if (PN533_TYPE_A_SEL_CASCADE(type_a->sel_res) != 0)
+ 		return false;
+ 
++	if (type_a->nfcid_len > NFC_NFCID1_MAXSIZE)
++		return false;
++
+ 	return true;
+ }
+ 
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index a1d476e1ac020..ec1e454848e58 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -668,6 +668,10 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
+ 		if (desc.state) {
+ 			/* found the group desc: update */
+ 			nvme_update_ns_ana_state(&desc, ns);
++		} else {
++			/* group desc not found: trigger a re-read */
++			set_bit(NVME_NS_ANA_PENDING, &ns->flags);
++			queue_work(nvme_wq, &ns->ctrl->ana_work);
+ 		}
+ 	} else {
+ 		ns->ana_state = NVME_ANA_OPTIMIZED; 
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 7249ae74f71ff..c92a15c3fbc5e 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -852,7 +852,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
+ 				return nvme_setup_prp_simple(dev, req,
+ 							     &cmnd->rw, &bv);
+ 
+-			if (iod->nvmeq->qid &&
++			if (iod->nvmeq->qid && sgl_threshold &&
+ 			    dev->ctrl.sgls & ((1 << 0) | (1 << 1)))
+ 				return nvme_setup_sgl_simple(dev, req,
+ 							     &cmnd->rw, &bv);
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index a0f00cb8f9f3c..d7d7c81d07014 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -874,7 +874,7 @@ static void nvme_tcp_state_change(struct sock *sk)
+ {
+ 	struct nvme_tcp_queue *queue;
+ 
+-	read_lock(&sk->sk_callback_lock);
++	read_lock_bh(&sk->sk_callback_lock);
+ 	queue = sk->sk_user_data;
+ 	if (!queue)
+ 		goto done;
+@@ -895,7 +895,7 @@ static void nvme_tcp_state_change(struct sock *sk)
+ 
+ 	queue->state_change(sk);
+ done:
+-	read_unlock(&sk->sk_callback_lock);
++	read_unlock_bh(&sk->sk_callback_lock);
+ }
+ 
+ static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
+diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
+index d658c6e8263af..d958b5da9b88a 100644
+--- a/drivers/nvme/target/tcp.c
++++ b/drivers/nvme/target/tcp.c
+@@ -525,11 +525,36 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req)
+ 	struct nvmet_tcp_cmd *cmd =
+ 		container_of(req, struct nvmet_tcp_cmd, req);
+ 	struct nvmet_tcp_queue	*queue = cmd->queue;
++	struct nvme_sgl_desc *sgl;
++	u32 len;
++
++	if (unlikely(cmd == queue->cmd)) {
++		sgl = &cmd->req.cmd->common.dptr.sgl;
++		len = le32_to_cpu(sgl->length);
++
++		/*
++		 * Wait for inline data before processing the response.
++		 * Avoid using helpers, this might happen before
++		 * nvmet_req_init is completed.
++		 */
++		if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
++		    len && len < cmd->req.port->inline_data_size &&
++		    nvme_is_write(cmd->req.cmd))
++			return;
++	}
+ 
+ 	llist_add(&cmd->lentry, &queue->resp_list);
+ 	queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work);
+ }
+ 
++static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd)
++{
++	if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED))
++		nvmet_tcp_queue_response(&cmd->req);
++	else
++		cmd->req.execute(&cmd->req);
++}
++
+ static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
+ {
+ 	u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
+@@ -961,7 +986,7 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
+ 			le32_to_cpu(req->cmd->common.dptr.sgl.length));
+ 
+ 		nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
+-		return -EAGAIN;
++		return 0;
+ 	}
+ 
+ 	ret = nvmet_tcp_map_data(queue->cmd);
+@@ -1104,10 +1129,8 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
+ 		return 0;
+ 	}
+ 
+-	if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
+-	    cmd->rbytes_done == cmd->req.transfer_len) {
+-		cmd->req.execute(&cmd->req);
+-	}
++	if (cmd->rbytes_done == cmd->req.transfer_len)
++		nvmet_tcp_execute_request(cmd);
+ 
+ 	nvmet_prepare_receive_pdu(queue);
+ 	return 0;
+@@ -1144,9 +1167,9 @@ static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
+ 		goto out;
+ 	}
+ 
+-	if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
+-	    cmd->rbytes_done == cmd->req.transfer_len)
+-		cmd->req.execute(&cmd->req);
++	if (cmd->rbytes_done == cmd->req.transfer_len)
++		nvmet_tcp_execute_request(cmd);
++
+ 	ret = 0;
+ out:
+ 	nvmet_prepare_receive_pdu(queue);
+@@ -1434,7 +1457,7 @@ static void nvmet_tcp_state_change(struct sock *sk)
+ {
+ 	struct nvmet_tcp_queue *queue;
+ 
+-	write_lock_bh(&sk->sk_callback_lock);
++	read_lock_bh(&sk->sk_callback_lock);
+ 	queue = sk->sk_user_data;
+ 	if (!queue)
+ 		goto done;
+@@ -1452,7 +1475,7 @@ static void nvmet_tcp_state_change(struct sock *sk)
+ 			queue->idx, sk->sk_state);
+ 	}
+ done:
+-	write_unlock_bh(&sk->sk_callback_lock);
++	read_unlock_bh(&sk->sk_callback_lock);
+ }
+ 
+ static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
+diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
+index 75d2594c16e19..267a0d9e99ba0 100644
+--- a/drivers/nvmem/Kconfig
++++ b/drivers/nvmem/Kconfig
+@@ -272,6 +272,7 @@ config SPRD_EFUSE
+ 
+ config NVMEM_RMEM
+ 	tristate "Reserved Memory Based Driver Support"
++	depends on HAS_IOMEM
+ 	help
+ 	  This driver maps reserved memory into an nvmem device. It might be
+ 	  useful to expose information left by firmware in memory.
+diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
+index 6cace24dfbf73..100d69d8f2e1c 100644
+--- a/drivers/nvmem/qfprom.c
++++ b/drivers/nvmem/qfprom.c
+@@ -127,6 +127,16 @@ static void qfprom_disable_fuse_blowing(const struct qfprom_priv *priv,
+ {
+ 	int ret;
+ 
++	/*
++	 * This may be a shared rail and may be able to run at a lower rate
++	 * when we're not blowing fuses.  At the moment, the regulator framework
++	 * applies voltage constraints even on disabled rails, so remove our
++	 * constraints and allow the rail to be adjusted by other users.
++	 */
++	ret = regulator_set_voltage(priv->vcc, 0, INT_MAX);
++	if (ret)
++		dev_warn(priv->dev, "Failed to set 0 voltage (ignoring)\n");
++
+ 	ret = regulator_disable(priv->vcc);
+ 	if (ret)
+ 		dev_warn(priv->dev, "Failed to disable regulator (ignoring)\n");
+@@ -172,6 +182,17 @@ static int qfprom_enable_fuse_blowing(const struct qfprom_priv *priv,
+ 		goto err_clk_prepared;
+ 	}
+ 
++	/*
++	 * Hardware requires 1.8V min for fuse blowing; this may be
++	 * a rail shared do don't specify a max--regulator constraints
++	 * will handle.
++	 */
++	ret = regulator_set_voltage(priv->vcc, 1800000, INT_MAX);
++	if (ret) {
++		dev_err(priv->dev, "Failed to set 1.8 voltage\n");
++		goto err_clk_rate_set;
++	}
++
+ 	ret = regulator_enable(priv->vcc);
+ 	if (ret) {
+ 		dev_err(priv->dev, "Failed to enable regulator\n");
+diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
+index 23effe5e50ece..2d132949572d6 100644
+--- a/drivers/of/overlay.c
++++ b/drivers/of/overlay.c
+@@ -796,6 +796,7 @@ static int init_overlay_changeset(struct overlay_changeset *ovcs,
+ 		if (!fragment->target) {
+ 			of_node_put(fragment->overlay);
+ 			ret = -EINVAL;
++			of_node_put(node);
+ 			goto err_free_fragments;
+ 		}
+ 
+diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
+index 53aa35cb3a493..a59ecbec601fc 100644
+--- a/drivers/pci/controller/dwc/pci-keystone.c
++++ b/drivers/pci/controller/dwc/pci-keystone.c
+@@ -798,7 +798,8 @@ static int __init ks_pcie_host_init(struct pcie_port *pp)
+ 	int ret;
+ 
+ 	pp->bridge->ops = &ks_pcie_ops;
+-	pp->bridge->child_ops = &ks_child_pcie_ops;
++	if (!ks_pcie->is_am6)
++		pp->bridge->child_ops = &ks_child_pcie_ops;
+ 
+ 	ret = ks_pcie_config_legacy_irq(ks_pcie);
+ 	if (ret)
+diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c
+index 2afdc865253e8..7f503dd4ff81d 100644
+--- a/drivers/pci/controller/pci-xgene.c
++++ b/drivers/pci/controller/pci-xgene.c
+@@ -354,7 +354,8 @@ static int xgene_pcie_map_reg(struct xgene_pcie_port *port,
+ 	if (IS_ERR(port->csr_base))
+ 		return PTR_ERR(port->csr_base);
+ 
+-	port->cfg_base = devm_platform_ioremap_resource_byname(pdev, "cfg");
++	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
++	port->cfg_base = devm_ioremap_resource(dev, res);
+ 	if (IS_ERR(port->cfg_base))
+ 		return PTR_ERR(port->cfg_base);
+ 	port->cfg_addr = res->start;
+diff --git a/drivers/pci/vpd.c b/drivers/pci/vpd.c
+index 7915d10f9aa10..bd549070c0112 100644
+--- a/drivers/pci/vpd.c
++++ b/drivers/pci/vpd.c
+@@ -570,7 +570,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005d, quirk_blacklist_vpd);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID,
+ 		quirk_blacklist_vpd);
+-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_QLOGIC, 0x2261, quirk_blacklist_vpd);
+ /*
+  * The Amazon Annapurna Labs 0x0031 device id is reused for other non Root Port
+  * device types, so the quirk is registered for the PCI_CLASS_BRIDGE_PCI class.
+diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c
+index 26a0badabe38b..19f32ae877b94 100644
+--- a/drivers/phy/cadence/phy-cadence-sierra.c
++++ b/drivers/phy/cadence/phy-cadence-sierra.c
+@@ -319,6 +319,12 @@ static int cdns_sierra_phy_on(struct phy *gphy)
+ 	u32 val;
+ 	int ret;
+ 
++	ret = reset_control_deassert(sp->phy_rst);
++	if (ret) {
++		dev_err(dev, "Failed to take the PHY out of reset\n");
++		return ret;
++	}
++
+ 	/* Take the PHY lane group out of reset */
+ 	ret = reset_control_deassert(ins->lnk_rst);
+ 	if (ret) {
+@@ -616,7 +622,6 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
+ 
+ 	pm_runtime_enable(dev);
+ 	phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+-	reset_control_deassert(sp->phy_rst);
+ 	return PTR_ERR_OR_ZERO(phy_provider);
+ 
+ put_child:
+diff --git a/drivers/phy/ingenic/phy-ingenic-usb.c b/drivers/phy/ingenic/phy-ingenic-usb.c
+index ea127b177f46b..28c28d8164849 100644
+--- a/drivers/phy/ingenic/phy-ingenic-usb.c
++++ b/drivers/phy/ingenic/phy-ingenic-usb.c
+@@ -352,8 +352,8 @@ static int ingenic_usb_phy_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	priv->phy = devm_phy_create(dev, NULL, &ingenic_usb_phy_ops);
+-	if (IS_ERR(priv))
+-		return PTR_ERR(priv);
++	if (IS_ERR(priv->phy))
++		return PTR_ERR(priv->phy);
+ 
+ 	phy_set_drvdata(priv->phy, priv);
+ 
+diff --git a/drivers/phy/marvell/Kconfig b/drivers/phy/marvell/Kconfig
+index 6c96f2bf52665..c8ee23fc3a83d 100644
+--- a/drivers/phy/marvell/Kconfig
++++ b/drivers/phy/marvell/Kconfig
+@@ -3,8 +3,8 @@
+ # Phy drivers for Marvell platforms
+ #
+ config ARMADA375_USBCLUSTER_PHY
+-	def_bool y
+-	depends on MACH_ARMADA_375 || COMPILE_TEST
++	bool "Armada 375 USB cluster PHY support" if COMPILE_TEST
++	default y if MACH_ARMADA_375
+ 	depends on OF && HAS_IOMEM
+ 	select GENERIC_PHY
+ 
+diff --git a/drivers/phy/ralink/phy-mt7621-pci.c b/drivers/phy/ralink/phy-mt7621-pci.c
+index 9a610b414b1fb..753cb5bab9308 100644
+--- a/drivers/phy/ralink/phy-mt7621-pci.c
++++ b/drivers/phy/ralink/phy-mt7621-pci.c
+@@ -62,7 +62,7 @@
+ 
+ #define RG_PE1_FRC_MSTCKDIV			BIT(5)
+ 
+-#define XTAL_MASK				GENMASK(7, 6)
++#define XTAL_MASK				GENMASK(8, 6)
+ 
+ #define MAX_PHYS	2
+ 
+@@ -319,9 +319,9 @@ static int mt7621_pci_phy_probe(struct platform_device *pdev)
+ 		return PTR_ERR(phy->regmap);
+ 
+ 	phy->phy = devm_phy_create(dev, dev->of_node, &mt7621_pci_phy_ops);
+-	if (IS_ERR(phy)) {
++	if (IS_ERR(phy->phy)) {
+ 		dev_err(dev, "failed to create phy\n");
+-		return PTR_ERR(phy);
++		return PTR_ERR(phy->phy);
+ 	}
+ 
+ 	phy_set_drvdata(phy->phy, phy);
+diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
+index c9cfafe89cbf1..e28e25f98708c 100644
+--- a/drivers/phy/ti/phy-j721e-wiz.c
++++ b/drivers/phy/ti/phy-j721e-wiz.c
+@@ -615,6 +615,12 @@ static void wiz_clock_cleanup(struct wiz *wiz, struct device_node *node)
+ 		of_clk_del_provider(clk_node);
+ 		of_node_put(clk_node);
+ 	}
++
++	for (i = 0; i < wiz->clk_div_sel_num; i++) {
++		clk_node = of_get_child_by_name(node, clk_div_sel[i].node_name);
++		of_clk_del_provider(clk_node);
++		of_node_put(clk_node);
++	}
+ }
+ 
+ static int wiz_clock_init(struct wiz *wiz, struct device_node *node)
+@@ -947,27 +953,24 @@ static int wiz_probe(struct platform_device *pdev)
+ 		goto err_get_sync;
+ 	}
+ 
++	ret = wiz_init(wiz);
++	if (ret) {
++		dev_err(dev, "WIZ initialization failed\n");
++		goto err_wiz_init;
++	}
++
+ 	serdes_pdev = of_platform_device_create(child_node, NULL, dev);
+ 	if (!serdes_pdev) {
+ 		dev_WARN(dev, "Unable to create SERDES platform device\n");
+ 		ret = -ENOMEM;
+-		goto err_pdev_create;
+-	}
+-	wiz->serdes_pdev = serdes_pdev;
+-
+-	ret = wiz_init(wiz);
+-	if (ret) {
+-		dev_err(dev, "WIZ initialization failed\n");
+ 		goto err_wiz_init;
+ 	}
++	wiz->serdes_pdev = serdes_pdev;
+ 
+ 	of_node_put(child_node);
+ 	return 0;
+ 
+ err_wiz_init:
+-	of_platform_device_destroy(&serdes_pdev->dev, NULL);
+-
+-err_pdev_create:
+ 	wiz_clock_cleanup(wiz, node);
+ 
+ err_get_sync:
+diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
+index e71ebccc479cf..03c32b2c5d303 100644
+--- a/drivers/pinctrl/pinctrl-at91-pio4.c
++++ b/drivers/pinctrl/pinctrl-at91-pio4.c
+@@ -801,6 +801,10 @@ static int atmel_conf_pin_config_group_set(struct pinctrl_dev *pctldev,
+ 
+ 	conf = atmel_pin_config_read(pctldev, pin_id);
+ 
++	/* Keep slew rate enabled by default. */
++	if (atmel_pioctrl->slew_rate_support)
++		conf |= ATMEL_PIO_SR_MASK;
++
+ 	for (i = 0; i < num_configs; i++) {
+ 		unsigned int param = pinconf_to_config_param(configs[i]);
+ 		unsigned int arg = pinconf_to_config_argument(configs[i]);
+@@ -808,10 +812,6 @@ static int atmel_conf_pin_config_group_set(struct pinctrl_dev *pctldev,
+ 		dev_dbg(pctldev->dev, "%s: pin=%u, config=0x%lx\n",
+ 			__func__, pin_id, configs[i]);
+ 
+-		/* Keep slew rate enabled by default. */
+-		if (atmel_pioctrl->slew_rate_support)
+-			conf |= ATMEL_PIO_SR_MASK;
+-
+ 		switch (param) {
+ 		case PIN_CONFIG_BIAS_DISABLE:
+ 			conf &= (~ATMEL_PIO_PUEN_MASK);
+diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
+index 7771316dfffab..10890fde9a751 100644
+--- a/drivers/pinctrl/pinctrl-single.c
++++ b/drivers/pinctrl/pinctrl-single.c
+@@ -270,20 +270,44 @@ static void __maybe_unused pcs_writel(unsigned val, void __iomem *reg)
+ 	writel(val, reg);
+ }
+ 
++static unsigned int pcs_pin_reg_offset_get(struct pcs_device *pcs,
++					   unsigned int pin)
++{
++	unsigned int mux_bytes = pcs->width / BITS_PER_BYTE;
++
++	if (pcs->bits_per_mux) {
++		unsigned int pin_offset_bytes;
++
++		pin_offset_bytes = (pcs->bits_per_pin * pin) / BITS_PER_BYTE;
++		return (pin_offset_bytes / mux_bytes) * mux_bytes;
++	}
++
++	return pin * mux_bytes;
++}
++
++static unsigned int pcs_pin_shift_reg_get(struct pcs_device *pcs,
++					  unsigned int pin)
++{
++	return (pin % (pcs->width / pcs->bits_per_pin)) * pcs->bits_per_pin;
++}
++
+ static void pcs_pin_dbg_show(struct pinctrl_dev *pctldev,
+ 					struct seq_file *s,
+ 					unsigned pin)
+ {
+ 	struct pcs_device *pcs;
+-	unsigned val, mux_bytes;
++	unsigned int val;
+ 	unsigned long offset;
+ 	size_t pa;
+ 
+ 	pcs = pinctrl_dev_get_drvdata(pctldev);
+ 
+-	mux_bytes = pcs->width / BITS_PER_BYTE;
+-	offset = pin * mux_bytes;
++	offset = pcs_pin_reg_offset_get(pcs, pin);
+ 	val = pcs->read(pcs->base + offset);
++
++	if (pcs->bits_per_mux)
++		val &= pcs->fmask << pcs_pin_shift_reg_get(pcs, pin);
++
+ 	pa = pcs->res->start + offset;
+ 
+ 	seq_printf(s, "%zx %08x %s ", pa, val, DRIVER_NAME);
+@@ -384,7 +408,6 @@ static int pcs_request_gpio(struct pinctrl_dev *pctldev,
+ 	struct pcs_device *pcs = pinctrl_dev_get_drvdata(pctldev);
+ 	struct pcs_gpiofunc_range *frange = NULL;
+ 	struct list_head *pos, *tmp;
+-	int mux_bytes = 0;
+ 	unsigned data;
+ 
+ 	/* If function mask is null, return directly. */
+@@ -392,29 +415,27 @@ static int pcs_request_gpio(struct pinctrl_dev *pctldev,
+ 		return -ENOTSUPP;
+ 
+ 	list_for_each_safe(pos, tmp, &pcs->gpiofuncs) {
++		u32 offset;
++
+ 		frange = list_entry(pos, struct pcs_gpiofunc_range, node);
+ 		if (pin >= frange->offset + frange->npins
+ 			|| pin < frange->offset)
+ 			continue;
+-		mux_bytes = pcs->width / BITS_PER_BYTE;
+ 
+-		if (pcs->bits_per_mux) {
+-			int byte_num, offset, pin_shift;
++		offset = pcs_pin_reg_offset_get(pcs, pin);
+ 
+-			byte_num = (pcs->bits_per_pin * pin) / BITS_PER_BYTE;
+-			offset = (byte_num / mux_bytes) * mux_bytes;
+-			pin_shift = pin % (pcs->width / pcs->bits_per_pin) *
+-				    pcs->bits_per_pin;
++		if (pcs->bits_per_mux) {
++			int pin_shift = pcs_pin_shift_reg_get(pcs, pin);
+ 
+ 			data = pcs->read(pcs->base + offset);
+ 			data &= ~(pcs->fmask << pin_shift);
+ 			data |= frange->gpiofunc << pin_shift;
+ 			pcs->write(data, pcs->base + offset);
+ 		} else {
+-			data = pcs->read(pcs->base + pin * mux_bytes);
++			data = pcs->read(pcs->base + offset);
+ 			data &= ~pcs->fmask;
+ 			data |= frange->gpiofunc;
+-			pcs->write(data, pcs->base + pin * mux_bytes);
++			pcs->write(data, pcs->base + offset);
+ 		}
+ 		break;
+ 	}
+@@ -656,10 +677,8 @@ static const struct pinconf_ops pcs_pinconf_ops = {
+  * pcs_add_pin() - add a pin to the static per controller pin array
+  * @pcs: pcs driver instance
+  * @offset: register offset from base
+- * @pin_pos: unused
+  */
+-static int pcs_add_pin(struct pcs_device *pcs, unsigned offset,
+-		unsigned pin_pos)
++static int pcs_add_pin(struct pcs_device *pcs, unsigned int offset)
+ {
+ 	struct pcs_soc_data *pcs_soc = &pcs->socdata;
+ 	struct pinctrl_pin_desc *pin;
+@@ -728,17 +747,9 @@ static int pcs_allocate_pin_table(struct pcs_device *pcs)
+ 	for (i = 0; i < pcs->desc.npins; i++) {
+ 		unsigned offset;
+ 		int res;
+-		int byte_num;
+-		int pin_pos = 0;
+ 
+-		if (pcs->bits_per_mux) {
+-			byte_num = (pcs->bits_per_pin * i) / BITS_PER_BYTE;
+-			offset = (byte_num / mux_bytes) * mux_bytes;
+-			pin_pos = i % num_pins_in_register;
+-		} else {
+-			offset = i * mux_bytes;
+-		}
+-		res = pcs_add_pin(pcs, offset, pin_pos);
++		offset = pcs_pin_reg_offset_get(pcs, i);
++		res = pcs_add_pin(pcs, offset);
+ 		if (res < 0) {
+ 			dev_err(pcs->dev, "error adding pins: %i\n", res);
+ 			return res;
+diff --git a/drivers/platform/surface/aggregator/controller.c b/drivers/platform/surface/aggregator/controller.c
+index 5bcb59ed579db..89761d3e1a476 100644
+--- a/drivers/platform/surface/aggregator/controller.c
++++ b/drivers/platform/surface/aggregator/controller.c
+@@ -1040,7 +1040,7 @@ static int ssam_dsm_load_u32(acpi_handle handle, u64 funcs, u64 func, u32 *ret)
+ 	union acpi_object *obj;
+ 	u64 val;
+ 
+-	if (!(funcs & BIT(func)))
++	if (!(funcs & BIT_ULL(func)))
+ 		return 0; /* Not supported, leave *ret at its default value */
+ 
+ 	obj = acpi_evaluate_dsm_typed(handle, &SSAM_SSH_DSM_GUID,
+diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
+index 7410ccae650c2..a90ae6ba4a73b 100644
+--- a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
++++ b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
+@@ -399,6 +399,7 @@ static int init_bios_attributes(int attr_type, const char *guid)
+ 	union acpi_object *obj = NULL;
+ 	union acpi_object *elements;
+ 	struct kset *tmp_set;
++	int min_elements;
+ 
+ 	/* instance_id needs to be reset for each type GUID
+ 	 * also, instance IDs are unique within GUID but not across
+@@ -409,14 +410,38 @@ static int init_bios_attributes(int attr_type, const char *guid)
+ 	retval = alloc_attributes_data(attr_type);
+ 	if (retval)
+ 		return retval;
++
++	switch (attr_type) {
++	case ENUM:	min_elements = 8;	break;
++	case INT:	min_elements = 9;	break;
++	case STR:	min_elements = 8;	break;
++	case PO:	min_elements = 4;	break;
++	default:
++		pr_err("Error: Unknown attr_type: %d\n", attr_type);
++		return -EINVAL;
++	}
++
+ 	/* need to use specific instance_id and guid combination to get right data */
+ 	obj = get_wmiobj_pointer(instance_id, guid);
+-	if (!obj || obj->type != ACPI_TYPE_PACKAGE)
++	if (!obj)
+ 		return -ENODEV;
+-	elements = obj->package.elements;
+ 
+ 	mutex_lock(&wmi_priv.mutex);
+-	while (elements) {
++	while (obj) {
++		if (obj->type != ACPI_TYPE_PACKAGE) {
++			pr_err("Error: Expected ACPI-package type, got: %d\n", obj->type);
++			retval = -EIO;
++			goto err_attr_init;
++		}
++
++		if (obj->package.count < min_elements) {
++			pr_err("Error: ACPI-package does not have enough elements: %d < %d\n",
++			       obj->package.count, min_elements);
++			goto nextobj;
++		}
++
++		elements = obj->package.elements;
++
+ 		/* sanity checking */
+ 		if (elements[ATTR_NAME].type != ACPI_TYPE_STRING) {
+ 			pr_debug("incorrect element type\n");
+@@ -481,7 +506,6 @@ nextobj:
+ 		kfree(obj);
+ 		instance_id++;
+ 		obj = get_wmiobj_pointer(instance_id, guid);
+-		elements = obj ? obj->package.elements : NULL;
+ 	}
+ 
+ 	mutex_unlock(&wmi_priv.mutex);
+diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
+index ca684ed760d14..a9d2a4b98e570 100644
+--- a/drivers/platform/x86/pmc_atom.c
++++ b/drivers/platform/x86/pmc_atom.c
+@@ -393,34 +393,10 @@ static const struct dmi_system_id critclk_systems[] = {
+ 	},
+ 	{
+ 		/* pmc_plt_clk* - are used for ethernet controllers */
+-		.ident = "Beckhoff CB3163",
++		.ident = "Beckhoff Baytrail",
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
+-			DMI_MATCH(DMI_BOARD_NAME, "CB3163"),
+-		},
+-	},
+-	{
+-		/* pmc_plt_clk* - are used for ethernet controllers */
+-		.ident = "Beckhoff CB4063",
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
+-			DMI_MATCH(DMI_BOARD_NAME, "CB4063"),
+-		},
+-	},
+-	{
+-		/* pmc_plt_clk* - are used for ethernet controllers */
+-		.ident = "Beckhoff CB6263",
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
+-			DMI_MATCH(DMI_BOARD_NAME, "CB6263"),
+-		},
+-	},
+-	{
+-		/* pmc_plt_clk* - are used for ethernet controllers */
+-		.ident = "Beckhoff CB6363",
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
+-			DMI_MATCH(DMI_BOARD_NAME, "CB6363"),
++			DMI_MATCH(DMI_PRODUCT_FAMILY, "CBxx63"),
+ 		},
+ 	},
+ 	{
+diff --git a/drivers/power/supply/bq25980_charger.c b/drivers/power/supply/bq25980_charger.c
+index 530ff4025b31c..0008c229fd9c7 100644
+--- a/drivers/power/supply/bq25980_charger.c
++++ b/drivers/power/supply/bq25980_charger.c
+@@ -606,33 +606,6 @@ static int bq25980_get_state(struct bq25980_device *bq,
+ 	return 0;
+ }
+ 
+-static int bq25980_set_battery_property(struct power_supply *psy,
+-				enum power_supply_property psp,
+-				const union power_supply_propval *val)
+-{
+-	struct bq25980_device *bq = power_supply_get_drvdata(psy);
+-	int ret = 0;
+-
+-	switch (psp) {
+-	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+-		ret = bq25980_set_const_charge_curr(bq, val->intval);
+-		if (ret)
+-			return ret;
+-		break;
+-
+-	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+-		ret = bq25980_set_const_charge_volt(bq, val->intval);
+-		if (ret)
+-			return ret;
+-		break;
+-
+-	default:
+-		return -EINVAL;
+-	}
+-
+-	return ret;
+-}
+-
+ static int bq25980_get_battery_property(struct power_supply *psy,
+ 				enum power_supply_property psp,
+ 				union power_supply_propval *val)
+@@ -701,6 +674,18 @@ static int bq25980_set_charger_property(struct power_supply *psy,
+ 			return ret;
+ 		break;
+ 
++	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
++		ret = bq25980_set_const_charge_curr(bq, val->intval);
++		if (ret)
++			return ret;
++		break;
++
++	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
++		ret = bq25980_set_const_charge_volt(bq, val->intval);
++		if (ret)
++			return ret;
++		break;
++
+ 	default:
+ 		return -EINVAL;
+ 	}
+@@ -922,7 +907,6 @@ static struct power_supply_desc bq25980_battery_desc = {
+ 	.name			= "bq25980-battery",
+ 	.type			= POWER_SUPPLY_TYPE_BATTERY,
+ 	.get_property		= bq25980_get_battery_property,
+-	.set_property		= bq25980_set_battery_property,
+ 	.properties		= bq25980_battery_props,
+ 	.num_properties		= ARRAY_SIZE(bq25980_battery_props),
+ 	.property_is_writeable	= bq25980_property_is_writeable,
+diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
+index 0262109ac285e..20e1dc8a87cf2 100644
+--- a/drivers/power/supply/bq27xxx_battery.c
++++ b/drivers/power/supply/bq27xxx_battery.c
+@@ -1804,7 +1804,7 @@ static int bq27xxx_battery_current(struct bq27xxx_device_info *di,
+ 		val->intval = curr * BQ27XXX_CURRENT_CONSTANT / BQ27XXX_RS;
+ 	} else {
+ 		/* Other gauges return signed value */
+-		val->intval = -(int)((s16)curr) * 1000;
++		val->intval = (int)((s16)curr) * 1000;
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/regulator/bd9576-regulator.c b/drivers/regulator/bd9576-regulator.c
+index a8b5832a5a1bb..204a2da054f53 100644
+--- a/drivers/regulator/bd9576-regulator.c
++++ b/drivers/regulator/bd9576-regulator.c
+@@ -206,7 +206,7 @@ static int bd957x_probe(struct platform_device *pdev)
+ {
+ 	struct regmap *regmap;
+ 	struct regulator_config config = { 0 };
+-	int i, err;
++	int i;
+ 	bool vout_mode, ddr_sel;
+ 	const struct bd957x_regulator_data *reg_data = &bd9576_regulators[0];
+ 	unsigned int num_reg_data = ARRAY_SIZE(bd9576_regulators);
+@@ -279,8 +279,7 @@ static int bd957x_probe(struct platform_device *pdev)
+ 		break;
+ 	default:
+ 		dev_err(&pdev->dev, "Unsupported chip type\n");
+-		err = -EINVAL;
+-		goto err;
++		return -EINVAL;
+ 	}
+ 
+ 	config.dev = pdev->dev.parent;
+@@ -300,8 +299,7 @@ static int bd957x_probe(struct platform_device *pdev)
+ 			dev_err(&pdev->dev,
+ 				"failed to register %s regulator\n",
+ 				desc->name);
+-			err = PTR_ERR(rdev);
+-			goto err;
++			return PTR_ERR(rdev);
+ 		}
+ 		/*
+ 		 * Clear the VOUT1 GPIO setting - rest of the regulators do not
+@@ -310,8 +308,7 @@ static int bd957x_probe(struct platform_device *pdev)
+ 		config.ena_gpiod = NULL;
+ 	}
+ 
+-err:
+-	return err;
++	return 0;
+ }
+ 
+ static const struct platform_device_id bd957x_pmic_id[] = {
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+index 7451377c4cb68..3e359ac752fd2 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+@@ -1646,7 +1646,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
+ 		idx = i * HISI_SAS_PHY_INT_NR;
+ 		for (j = 0; j < HISI_SAS_PHY_INT_NR; j++, idx++) {
+ 			irq = platform_get_irq(pdev, idx);
+-			if (!irq) {
++			if (irq < 0) {
+ 				dev_err(dev, "irq init: fail map phy interrupt %d\n",
+ 					idx);
+ 				return -ENOENT;
+@@ -1665,7 +1665,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
+ 	idx = hisi_hba->n_phy * HISI_SAS_PHY_INT_NR;
+ 	for (i = 0; i < hisi_hba->queue_count; i++, idx++) {
+ 		irq = platform_get_irq(pdev, idx);
+-		if (!irq) {
++		if (irq < 0) {
+ 			dev_err(dev, "irq init: could not map cq interrupt %d\n",
+ 				idx);
+ 			return -ENOENT;
+@@ -1683,7 +1683,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
+ 	idx = (hisi_hba->n_phy * HISI_SAS_PHY_INT_NR) + hisi_hba->queue_count;
+ 	for (i = 0; i < HISI_SAS_FATAL_INT_NR; i++, idx++) {
+ 		irq = platform_get_irq(pdev, idx);
+-		if (!irq) {
++		if (irq < 0) {
+ 			dev_err(dev, "irq init: could not map fatal interrupt %d\n",
+ 				idx);
+ 			return -ENOENT;
+diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
+index 61831f2fdb309..d6675a25719d5 100644
+--- a/drivers/scsi/ibmvscsi/ibmvfc.c
++++ b/drivers/scsi/ibmvscsi/ibmvfc.c
+@@ -603,8 +603,17 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
+ 		if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
+ 			vhost->action = action;
+ 		break;
++	case IBMVFC_HOST_ACTION_REENABLE:
++	case IBMVFC_HOST_ACTION_RESET:
++		vhost->action = action;
++		break;
+ 	case IBMVFC_HOST_ACTION_INIT:
+ 	case IBMVFC_HOST_ACTION_TGT_DEL:
++	case IBMVFC_HOST_ACTION_LOGO:
++	case IBMVFC_HOST_ACTION_QUERY_TGTS:
++	case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
++	case IBMVFC_HOST_ACTION_NONE:
++	default:
+ 		switch (vhost->action) {
+ 		case IBMVFC_HOST_ACTION_RESET:
+ 		case IBMVFC_HOST_ACTION_REENABLE:
+@@ -614,15 +623,6 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
+ 			break;
+ 		}
+ 		break;
+-	case IBMVFC_HOST_ACTION_LOGO:
+-	case IBMVFC_HOST_ACTION_QUERY_TGTS:
+-	case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
+-	case IBMVFC_HOST_ACTION_NONE:
+-	case IBMVFC_HOST_ACTION_RESET:
+-	case IBMVFC_HOST_ACTION_REENABLE:
+-	default:
+-		vhost->action = action;
+-		break;
+ 	}
+ }
+ 
+@@ -5373,30 +5373,49 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
+ 	case IBMVFC_HOST_ACTION_INIT_WAIT:
+ 		break;
+ 	case IBMVFC_HOST_ACTION_RESET:
+-		vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
+ 		list_splice_init(&vhost->purge, &purge);
+ 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ 		ibmvfc_complete_purge(&purge);
+ 		rc = ibmvfc_reset_crq(vhost);
++
+ 		spin_lock_irqsave(vhost->host->host_lock, flags);
+-		if (rc == H_CLOSED)
++		if (!rc || rc == H_CLOSED)
+ 			vio_enable_interrupts(to_vio_dev(vhost->dev));
+-		if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
+-		    (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
+-			ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+-			dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
++		if (vhost->action == IBMVFC_HOST_ACTION_RESET) {
++			/*
++			 * The only action we could have changed to would have
++			 * been reenable, in which case, we skip the rest of
++			 * this path and wait until we've done the re-enable
++			 * before sending the crq init.
++			 */
++			vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
++
++			if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
++			    (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
++				ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
++				dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
++			}
+ 		}
+ 		break;
+ 	case IBMVFC_HOST_ACTION_REENABLE:
+-		vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
+ 		list_splice_init(&vhost->purge, &purge);
+ 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ 		ibmvfc_complete_purge(&purge);
+ 		rc = ibmvfc_reenable_crq_queue(vhost);
++
+ 		spin_lock_irqsave(vhost->host->host_lock, flags);
+-		if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
+-			ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+-			dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
++		if (vhost->action == IBMVFC_HOST_ACTION_REENABLE) {
++			/*
++			 * The only action we could have changed to would have
++			 * been reset, in which case, we skip the rest of this
++			 * path and wait until we've done the reset before
++			 * sending the crq init.
++			 */
++			vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
++			if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
++				ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
++				dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
++			}
+ 		}
+ 		break;
+ 	case IBMVFC_HOST_ACTION_LOGO:
+diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
+index f0ed6863cc700..60a88a95a8e23 100644
+--- a/drivers/scsi/jazz_esp.c
++++ b/drivers/scsi/jazz_esp.c
+@@ -143,7 +143,9 @@ static int esp_jazz_probe(struct platform_device *dev)
+ 	if (!esp->command_block)
+ 		goto fail_unmap_regs;
+ 
+-	host->irq = platform_get_irq(dev, 0);
++	host->irq = err = platform_get_irq(dev, 0);
++	if (err < 0)
++		goto fail_unmap_command_block;
+ 	err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
+ 	if (err < 0)
+ 		goto fail_unmap_command_block;
+diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
+index fd18ac2acc135..3dd22da3153ff 100644
+--- a/drivers/scsi/lpfc/lpfc_els.c
++++ b/drivers/scsi/lpfc/lpfc_els.c
+@@ -1,7 +1,7 @@
+ /*******************************************************************
+  * This file is part of the Emulex Linux Device Driver for         *
+  * Fibre Channel Host Bus Adapters.                                *
+- * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
++ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
+  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
+  * EMULEX and SLI are trademarks of Emulex.                        *
+@@ -2044,13 +2044,12 @@ out_freeiocb:
+  * This routine issues a Port Login (PLOGI) command to a remote N_Port
+  * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port,
+  * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list.
+- * This routine constructs the proper feilds of the PLOGI IOCB and invokes
++ * This routine constructs the proper fields of the PLOGI IOCB and invokes
+  * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command.
+  *
+- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+- * will be incremented by 1 for holding the ndlp and the reference to ndlp
+- * will be stored into the context1 field of the IOCB for the completion
+- * callback function to the PLOGI ELS command.
++ * Note that the ndlp reference count will be incremented by 1 for holding
++ * the ndlp and the reference to ndlp will be stored into the context1 field
++ * of the IOCB for the completion callback function to the PLOGI ELS command.
+  *
+  * Return code
+  *   0 - Successfully issued a plogi for @vport
+@@ -2068,29 +2067,28 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
+ 	int ret;
+ 
+ 	ndlp = lpfc_findnode_did(vport, did);
++	if (!ndlp)
++		return 1;
+ 
+-	if (ndlp) {
+-		/* Defer the processing of the issue PLOGI until after the
+-		 * outstanding UNREG_RPI mbox command completes, unless we
+-		 * are going offline. This logic does not apply for Fabric DIDs
+-		 */
+-		if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
+-		    ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
+-		    !(vport->fc_flag & FC_OFFLINE_MODE)) {
+-			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+-					 "4110 Issue PLOGI x%x deferred "
+-					 "on NPort x%x rpi x%x Data: x%px\n",
+-					 ndlp->nlp_defer_did, ndlp->nlp_DID,
+-					 ndlp->nlp_rpi, ndlp);
+-
+-			/* We can only defer 1st PLOGI */
+-			if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING)
+-				ndlp->nlp_defer_did = did;
+-			return 0;
+-		}
++	/* Defer the processing of the issue PLOGI until after the
++	 * outstanding UNREG_RPI mbox command completes, unless we
++	 * are going offline. This logic does not apply for Fabric DIDs
++	 */
++	if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
++	    ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
++	    !(vport->fc_flag & FC_OFFLINE_MODE)) {
++		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
++				 "4110 Issue PLOGI x%x deferred "
++				 "on NPort x%x rpi x%x Data: x%px\n",
++				 ndlp->nlp_defer_did, ndlp->nlp_DID,
++				 ndlp->nlp_rpi, ndlp);
++
++		/* We can only defer 1st PLOGI */
++		if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING)
++			ndlp->nlp_defer_did = did;
++		return 0;
+ 	}
+ 
+-	/* If ndlp is not NULL, we will bump the reference count on it */
+ 	cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
+ 	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
+ 				     ELS_CMD_PLOGI);
+diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
+index 31e5455d280cb..1b1a57f46989a 100644
+--- a/drivers/scsi/pm8001/pm8001_hwi.c
++++ b/drivers/scsi/pm8001/pm8001_hwi.c
+@@ -643,7 +643,7 @@ static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
+  */
+ static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
+ {
+-	u8 i = 0;
++	u32 i = 0;
+ 	u16 deviceid;
+ 	pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
+ 	/* 8081 controllers need BAR shift to access MPI space
+diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
+index 84315560e8e1a..c6b0834e38061 100644
+--- a/drivers/scsi/pm8001/pm80xx_hwi.c
++++ b/drivers/scsi/pm8001/pm80xx_hwi.c
+@@ -1502,9 +1502,9 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
+ 
+ 	/* wait until Inbound DoorBell Clear Register toggled */
+ 	if (IS_SPCV_12G(pm8001_ha->pdev)) {
+-		max_wait_count = 4 * 1000 * 1000;/* 4 sec */
++		max_wait_count = 30 * 1000 * 1000; /* 30 sec */
+ 	} else {
+-		max_wait_count = 2 * 1000 * 1000;/* 2 sec */
++		max_wait_count = 15 * 1000 * 1000; /* 15 sec */
+ 	}
+ 	do {
+ 		udelay(1);
+diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
+index d021e51344f57..aef2f7cc89d36 100644
+--- a/drivers/scsi/qla2xxx/qla_bsg.c
++++ b/drivers/scsi/qla2xxx/qla_bsg.c
+@@ -2584,6 +2584,10 @@ qla2x00_get_host_stats(struct bsg_job *bsg_job)
+ 	}
+ 
+ 	data = kzalloc(response_len, GFP_KERNEL);
++	if (!data) {
++		kfree(req_data);
++		return -ENOMEM;
++	}
+ 
+ 	ret = qla2xxx_get_ini_stats(fc_bsg_to_shost(bsg_job), req_data->stat_type,
+ 				    data, response_len);
+diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c
+index 9e2e196bc2026..97c6f81b1d2a6 100644
+--- a/drivers/scsi/sni_53c710.c
++++ b/drivers/scsi/sni_53c710.c
+@@ -58,6 +58,7 @@ static int snirm710_probe(struct platform_device *dev)
+ 	struct NCR_700_Host_Parameters *hostdata;
+ 	struct Scsi_Host *host;
+ 	struct  resource *res;
++	int rc;
+ 
+ 	res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ 	if (!res)
+@@ -83,7 +84,9 @@ static int snirm710_probe(struct platform_device *dev)
+ 		goto out_kfree;
+ 	host->this_id = 7;
+ 	host->base = base;
+-	host->irq = platform_get_irq(dev, 0);
++	host->irq = rc = platform_get_irq(dev, 0);
++	if (rc < 0)
++		goto out_put_host;
+ 	if(request_irq(host->irq, NCR_700_intr, IRQF_SHARED, "snirm710", host)) {
+ 		printk(KERN_ERR "snirm710: request_irq failed!\n");
+ 		goto out_put_host;
+diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
+index 7de82f2c97579..d3489ac7ab28b 100644
+--- a/drivers/scsi/sun3x_esp.c
++++ b/drivers/scsi/sun3x_esp.c
+@@ -206,7 +206,9 @@ static int esp_sun3x_probe(struct platform_device *dev)
+ 	if (!esp->command_block)
+ 		goto fail_unmap_regs_dma;
+ 
+-	host->irq = platform_get_irq(dev, 0);
++	host->irq = err = platform_get_irq(dev, 0);
++	if (err < 0)
++		goto fail_unmap_command_block;
+ 	err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED,
+ 			  "SUN3X ESP", esp);
+ 	if (err < 0)
+diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
+index 1a69949a4ea1c..b56d9b4e5f033 100644
+--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
++++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
+@@ -377,7 +377,7 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
+ 
+ 	irq = platform_get_irq(pdev, 0);
+ 	if (irq < 0) {
+-		err = -ENODEV;
++		err = irq;
+ 		goto out;
+ 	}
+ 
+diff --git a/drivers/soc/aspeed/aspeed-lpc-snoop.c b/drivers/soc/aspeed/aspeed-lpc-snoop.c
+index 20acac6342eff..5828f94b8a7df 100644
+--- a/drivers/soc/aspeed/aspeed-lpc-snoop.c
++++ b/drivers/soc/aspeed/aspeed-lpc-snoop.c
+@@ -95,8 +95,10 @@ static ssize_t snoop_file_read(struct file *file, char __user *buffer,
+ 			return -EINTR;
+ 	}
+ 	ret = kfifo_to_user(&chan->fifo, buffer, count, &copied);
++	if (ret)
++		return ret;
+ 
+-	return ret ? ret : copied;
++	return copied;
+ }
+ 
+ static __poll_t snoop_file_poll(struct file *file,
+diff --git a/drivers/soc/mediatek/mtk-pm-domains.c b/drivers/soc/mediatek/mtk-pm-domains.c
+index b7f697666bdd7..06aaf03b194c0 100644
+--- a/drivers/soc/mediatek/mtk-pm-domains.c
++++ b/drivers/soc/mediatek/mtk-pm-domains.c
+@@ -487,8 +487,9 @@ static int scpsys_add_subdomain(struct scpsys *scpsys, struct device_node *paren
+ 
+ 		child_pd = scpsys_add_one_domain(scpsys, child);
+ 		if (IS_ERR(child_pd)) {
+-			dev_err_probe(scpsys->dev, PTR_ERR(child_pd),
+-				      "%pOF: failed to get child domain id\n", child);
++			ret = PTR_ERR(child_pd);
++			dev_err_probe(scpsys->dev, ret, "%pOF: failed to get child domain id\n",
++				      child);
+ 			goto err_put_node;
+ 		}
+ 
+diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c
+index 24cd193dec550..eba7f76f9d61a 100644
+--- a/drivers/soc/qcom/mdt_loader.c
++++ b/drivers/soc/qcom/mdt_loader.c
+@@ -230,6 +230,14 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
+ 			break;
+ 		}
+ 
++		if (phdr->p_filesz > phdr->p_memsz) {
++			dev_err(dev,
++				"refusing to load segment %d with p_filesz > p_memsz\n",
++				i);
++			ret = -EINVAL;
++			break;
++		}
++
+ 		ptr = mem_region + offset;
+ 
+ 		if (phdr->p_filesz && phdr->p_offset < fw->size) {
+@@ -253,6 +261,15 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
+ 				break;
+ 			}
+ 
++			if (seg_fw->size != phdr->p_filesz) {
++				dev_err(dev,
++					"failed to load segment %d from truncated file %s\n",
++					i, fw_name);
++				release_firmware(seg_fw);
++				ret = -EINVAL;
++				break;
++			}
++
+ 			release_firmware(seg_fw);
+ 		}
+ 
+diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c
+index 209dcdca923f9..915d5bc3d46e6 100644
+--- a/drivers/soc/qcom/pdr_interface.c
++++ b/drivers/soc/qcom/pdr_interface.c
+@@ -153,7 +153,7 @@ static int pdr_register_listener(struct pdr_handle *pdr,
+ 	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ 		pr_err("PDR: %s register listener failed: 0x%x\n",
+ 		       pds->service_path, resp.resp.error);
+-		return ret;
++		return -EREMOTEIO;
+ 	}
+ 
+ 	pds->state = resp.curr_state;
+diff --git a/drivers/soc/tegra/regulators-tegra30.c b/drivers/soc/tegra/regulators-tegra30.c
+index 7f21f31de09d6..0e776b20f6252 100644
+--- a/drivers/soc/tegra/regulators-tegra30.c
++++ b/drivers/soc/tegra/regulators-tegra30.c
+@@ -178,7 +178,7 @@ static int tegra30_voltage_update(struct tegra_regulator_coupler *tegra,
+ 	 * survive the voltage drop if it's running on a higher frequency.
+ 	 */
+ 	if (!cpu_min_uV_consumers)
+-		cpu_min_uV = cpu_uV;
++		cpu_min_uV = max(cpu_uV, cpu_min_uV);
+ 
+ 	/*
+ 	 * Bootloader shall set up voltages correctly, but if it
+diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
+index 46885429928ab..4ec29338ce9a1 100644
+--- a/drivers/soundwire/bus.c
++++ b/drivers/soundwire/bus.c
+@@ -705,7 +705,7 @@ static int sdw_program_device_num(struct sdw_bus *bus)
+ 	struct sdw_slave *slave, *_s;
+ 	struct sdw_slave_id id;
+ 	struct sdw_msg msg;
+-	bool found = false;
++	bool found;
+ 	int count = 0, ret;
+ 	u64 addr;
+ 
+@@ -737,6 +737,7 @@ static int sdw_program_device_num(struct sdw_bus *bus)
+ 
+ 		sdw_extract_slave_id(bus, addr, &id);
+ 
++		found = false;
+ 		/* Now compare with entries */
+ 		list_for_each_entry_safe(slave, _s, &bus->slaves, node) {
+ 			if (sdw_compare_devid(slave, id) == 0) {
+diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
+index 1099b5d1262be..a418c3c7001c0 100644
+--- a/drivers/soundwire/stream.c
++++ b/drivers/soundwire/stream.c
+@@ -1375,8 +1375,16 @@ int sdw_stream_add_slave(struct sdw_slave *slave,
+ 	}
+ 
+ 	ret = sdw_config_stream(&slave->dev, stream, stream_config, true);
+-	if (ret)
++	if (ret) {
++		/*
++		 * sdw_release_master_stream will release s_rt in slave_rt_list in
++		 * stream_error case, but s_rt is only added to slave_rt_list
++		 * when sdw_config_stream is successful, so free s_rt explicitly
++		 * when sdw_config_stream is failed.
++		 */
++		kfree(s_rt);
+ 		goto stream_error;
++	}
+ 
+ 	list_add_tail(&s_rt->m_rt_node, &m_rt->slave_rt_list);
+ 
+diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
+index a2886ee44e4cb..5d98611dd999d 100644
+--- a/drivers/spi/spi-fsl-lpspi.c
++++ b/drivers/spi/spi-fsl-lpspi.c
+@@ -200,7 +200,7 @@ static int lpspi_prepare_xfer_hardware(struct spi_controller *controller)
+ 				spi_controller_get_devdata(controller);
+ 	int ret;
+ 
+-	ret = pm_runtime_get_sync(fsl_lpspi->dev);
++	ret = pm_runtime_resume_and_get(fsl_lpspi->dev);
+ 	if (ret < 0) {
+ 		dev_err(fsl_lpspi->dev, "failed to enable clock\n");
+ 		return ret;
+diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
+index e4a8d203f9408..d0e5aa18b7bad 100644
+--- a/drivers/spi/spi-fsl-spi.c
++++ b/drivers/spi/spi-fsl-spi.c
+@@ -707,6 +707,11 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
+ 	struct resource mem;
+ 	int irq, type;
+ 	int ret;
++	bool spisel_boot = false;
++#if IS_ENABLED(CONFIG_FSL_SOC)
++	struct mpc8xxx_spi_probe_info *pinfo = NULL;
++#endif
++
+ 
+ 	ret = of_mpc8xxx_spi_probe(ofdev);
+ 	if (ret)
+@@ -715,9 +720,8 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
+ 	type = fsl_spi_get_type(&ofdev->dev);
+ 	if (type == TYPE_FSL) {
+ 		struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
+-		bool spisel_boot = false;
+ #if IS_ENABLED(CONFIG_FSL_SOC)
+-		struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
++		pinfo = to_of_pinfo(pdata);
+ 
+ 		spisel_boot = of_property_read_bool(np, "fsl,spisel_boot");
+ 		if (spisel_boot) {
+@@ -746,15 +750,24 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
+ 
+ 	ret = of_address_to_resource(np, 0, &mem);
+ 	if (ret)
+-		return ret;
++		goto unmap_out;
+ 
+ 	irq = platform_get_irq(ofdev, 0);
+-	if (irq < 0)
+-		return irq;
++	if (irq < 0) {
++		ret = irq;
++		goto unmap_out;
++	}
+ 
+ 	master = fsl_spi_probe(dev, &mem, irq);
+ 
+ 	return PTR_ERR_OR_ZERO(master);
++
++unmap_out:
++#if IS_ENABLED(CONFIG_FSL_SOC)
++	if (spisel_boot)
++		iounmap(pinfo->immr_spi_cs);
++#endif
++	return ret;
+ }
+ 
+ static int of_fsl_spi_remove(struct platform_device *ofdev)
+diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
+index 936ef54e09037..0d75080da6480 100644
+--- a/drivers/spi/spi-rockchip.c
++++ b/drivers/spi/spi-rockchip.c
+@@ -476,7 +476,7 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs,
+ 	return 1;
+ }
+ 
+-static void rockchip_spi_config(struct rockchip_spi *rs,
++static int rockchip_spi_config(struct rockchip_spi *rs,
+ 		struct spi_device *spi, struct spi_transfer *xfer,
+ 		bool use_dma, bool slave_mode)
+ {
+@@ -521,7 +521,9 @@ static void rockchip_spi_config(struct rockchip_spi *rs,
+ 		 * ctlr->bits_per_word_mask, so this shouldn't
+ 		 * happen
+ 		 */
+-		unreachable();
++		dev_err(rs->dev, "unknown bits per word: %d\n",
++			xfer->bits_per_word);
++		return -EINVAL;
+ 	}
+ 
+ 	if (use_dma) {
+@@ -554,6 +556,8 @@ static void rockchip_spi_config(struct rockchip_spi *rs,
+ 	 */
+ 	writel_relaxed(2 * DIV_ROUND_UP(rs->freq, 2 * xfer->speed_hz),
+ 			rs->regs + ROCKCHIP_SPI_BAUDR);
++
++	return 0;
+ }
+ 
+ static size_t rockchip_spi_max_transfer_size(struct spi_device *spi)
+@@ -577,6 +581,7 @@ static int rockchip_spi_transfer_one(
+ 		struct spi_transfer *xfer)
+ {
+ 	struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
++	int ret;
+ 	bool use_dma;
+ 
+ 	WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) &&
+@@ -596,7 +601,9 @@ static int rockchip_spi_transfer_one(
+ 
+ 	use_dma = ctlr->can_dma ? ctlr->can_dma(ctlr, spi, xfer) : false;
+ 
+-	rockchip_spi_config(rs, spi, xfer, use_dma, ctlr->slave);
++	ret = rockchip_spi_config(rs, spi, xfer, use_dma, ctlr->slave);
++	if (ret)
++		return ret;
+ 
+ 	if (use_dma)
+ 		return rockchip_spi_prepare_dma(rs, ctlr, xfer);
+diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
+index 25c0764610119..7f0244a246e92 100644
+--- a/drivers/spi/spi-stm32.c
++++ b/drivers/spi/spi-stm32.c
+@@ -1803,7 +1803,7 @@ static int stm32_spi_probe(struct platform_device *pdev)
+ 	struct reset_control *rst;
+ 	int ret;
+ 
+-	master = spi_alloc_master(&pdev->dev, sizeof(struct stm32_spi));
++	master = devm_spi_alloc_master(&pdev->dev, sizeof(struct stm32_spi));
+ 	if (!master) {
+ 		dev_err(&pdev->dev, "spi master allocation failed\n");
+ 		return -ENOMEM;
+@@ -1821,18 +1821,16 @@ static int stm32_spi_probe(struct platform_device *pdev)
+ 
+ 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ 	spi->base = devm_ioremap_resource(&pdev->dev, res);
+-	if (IS_ERR(spi->base)) {
+-		ret = PTR_ERR(spi->base);
+-		goto err_master_put;
+-	}
++	if (IS_ERR(spi->base))
++		return PTR_ERR(spi->base);
+ 
+ 	spi->phys_addr = (dma_addr_t)res->start;
+ 
+ 	spi->irq = platform_get_irq(pdev, 0);
+-	if (spi->irq <= 0) {
+-		ret = dev_err_probe(&pdev->dev, spi->irq, "failed to get irq\n");
+-		goto err_master_put;
+-	}
++	if (spi->irq <= 0)
++		return dev_err_probe(&pdev->dev, spi->irq,
++				     "failed to get irq\n");
++
+ 	ret = devm_request_threaded_irq(&pdev->dev, spi->irq,
+ 					spi->cfg->irq_handler_event,
+ 					spi->cfg->irq_handler_thread,
+@@ -1840,20 +1838,20 @@ static int stm32_spi_probe(struct platform_device *pdev)
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "irq%d request failed: %d\n", spi->irq,
+ 			ret);
+-		goto err_master_put;
++		return ret;
+ 	}
+ 
+ 	spi->clk = devm_clk_get(&pdev->dev, NULL);
+ 	if (IS_ERR(spi->clk)) {
+ 		ret = PTR_ERR(spi->clk);
+ 		dev_err(&pdev->dev, "clk get failed: %d\n", ret);
+-		goto err_master_put;
++		return ret;
+ 	}
+ 
+ 	ret = clk_prepare_enable(spi->clk);
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "clk enable failed: %d\n", ret);
+-		goto err_master_put;
++		return ret;
+ 	}
+ 	spi->clk_rate = clk_get_rate(spi->clk);
+ 	if (!spi->clk_rate) {
+@@ -1929,7 +1927,7 @@ static int stm32_spi_probe(struct platform_device *pdev)
+ 	pm_runtime_set_active(&pdev->dev);
+ 	pm_runtime_enable(&pdev->dev);
+ 
+-	ret = devm_spi_register_master(&pdev->dev, master);
++	ret = spi_register_master(master);
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "spi master registration failed: %d\n",
+ 			ret);
+@@ -1949,8 +1947,6 @@ err_dma_release:
+ 		dma_release_channel(spi->dma_rx);
+ err_clk_disable:
+ 	clk_disable_unprepare(spi->clk);
+-err_master_put:
+-	spi_master_put(master);
+ 
+ 	return ret;
+ }
+@@ -1960,6 +1956,7 @@ static int stm32_spi_remove(struct platform_device *pdev)
+ 	struct spi_master *master = platform_get_drvdata(pdev);
+ 	struct stm32_spi *spi = spi_master_get_devdata(master);
+ 
++	spi_unregister_master(master);
+ 	spi->cfg->disable(spi);
+ 
+ 	if (master->dma_tx)
+diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
+index c8fa6ee18ae77..7162387b9f96a 100644
+--- a/drivers/spi/spi-zynqmp-gqspi.c
++++ b/drivers/spi/spi-zynqmp-gqspi.c
+@@ -157,6 +157,7 @@ enum mode_type {GQSPI_MODE_IO, GQSPI_MODE_DMA};
+  * @data_completion:	completion structure
+  */
+ struct zynqmp_qspi {
++	struct spi_controller *ctlr;
+ 	void __iomem *regs;
+ 	struct clk *refclk;
+ 	struct clk *pclk;
+@@ -173,6 +174,7 @@ struct zynqmp_qspi {
+ 	u32 genfifoentry;
+ 	enum mode_type mode;
+ 	struct completion data_completion;
++	struct mutex op_lock;
+ };
+ 
+ /**
+@@ -486,24 +488,10 @@ static int zynqmp_qspi_setup_op(struct spi_device *qspi)
+ {
+ 	struct spi_controller *ctlr = qspi->master;
+ 	struct zynqmp_qspi *xqspi = spi_controller_get_devdata(ctlr);
+-	struct device *dev = &ctlr->dev;
+-	int ret;
+ 
+ 	if (ctlr->busy)
+ 		return -EBUSY;
+ 
+-	ret = clk_enable(xqspi->refclk);
+-	if (ret) {
+-		dev_err(dev, "Cannot enable device clock.\n");
+-		return ret;
+-	}
+-
+-	ret = clk_enable(xqspi->pclk);
+-	if (ret) {
+-		dev_err(dev, "Cannot enable APB clock.\n");
+-		clk_disable(xqspi->refclk);
+-		return ret;
+-	}
+ 	zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, GQSPI_EN_MASK);
+ 
+ 	return 0;
+@@ -520,7 +508,7 @@ static void zynqmp_qspi_filltxfifo(struct zynqmp_qspi *xqspi, int size)
+ {
+ 	u32 count = 0, intermediate;
+ 
+-	while ((xqspi->bytes_to_transfer > 0) && (count < size)) {
++	while ((xqspi->bytes_to_transfer > 0) && (count < size) && (xqspi->txbuf)) {
+ 		memcpy(&intermediate, xqspi->txbuf, 4);
+ 		zynqmp_gqspi_write(xqspi, GQSPI_TXD_OFST, intermediate);
+ 
+@@ -579,7 +567,7 @@ static void zynqmp_qspi_fillgenfifo(struct zynqmp_qspi *xqspi, u8 nbits,
+ 		genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
+ 		genfifoentry |= GQSPI_GENFIFO_TX;
+ 		transfer_len = xqspi->bytes_to_transfer;
+-	} else {
++	} else if (xqspi->rxbuf) {
+ 		genfifoentry &= ~GQSPI_GENFIFO_TX;
+ 		genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
+ 		genfifoentry |= GQSPI_GENFIFO_RX;
+@@ -587,6 +575,11 @@ static void zynqmp_qspi_fillgenfifo(struct zynqmp_qspi *xqspi, u8 nbits,
+ 			transfer_len = xqspi->dma_rx_bytes;
+ 		else
+ 			transfer_len = xqspi->bytes_to_receive;
++	} else {
++		/* Sending dummy circles here */
++		genfifoentry &= ~(GQSPI_GENFIFO_TX | GQSPI_GENFIFO_RX);
++		genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
++		transfer_len = xqspi->bytes_to_transfer;
+ 	}
+ 	genfifoentry |= zynqmp_qspi_selectspimode(xqspi, nbits);
+ 	xqspi->genfifoentry = genfifoentry;
+@@ -738,7 +731,7 @@ static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id)
+  * zynqmp_qspi_setuprxdma - This function sets up the RX DMA operation
+  * @xqspi:	xqspi is a pointer to the GQSPI instance.
+  */
+-static void zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
++static int zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
+ {
+ 	u32 rx_bytes, rx_rem, config_reg;
+ 	dma_addr_t addr;
+@@ -752,7 +745,7 @@ static void zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
+ 		zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
+ 		xqspi->mode = GQSPI_MODE_IO;
+ 		xqspi->dma_rx_bytes = 0;
+-		return;
++		return 0;
+ 	}
+ 
+ 	rx_rem = xqspi->bytes_to_receive % 4;
+@@ -760,8 +753,10 @@ static void zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
+ 
+ 	addr = dma_map_single(xqspi->dev, (void *)xqspi->rxbuf,
+ 			      rx_bytes, DMA_FROM_DEVICE);
+-	if (dma_mapping_error(xqspi->dev, addr))
++	if (dma_mapping_error(xqspi->dev, addr)) {
+ 		dev_err(xqspi->dev, "ERR:rxdma:memory not mapped\n");
++		return -ENOMEM;
++	}
+ 
+ 	xqspi->dma_rx_bytes = rx_bytes;
+ 	xqspi->dma_addr = addr;
+@@ -782,6 +777,8 @@ static void zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
+ 
+ 	/* Write the number of bytes to transfer */
+ 	zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_SIZE_OFST, rx_bytes);
++
++	return 0;
+ }
+ 
+ /**
+@@ -818,11 +815,17 @@ static void zynqmp_qspi_write_op(struct zynqmp_qspi *xqspi, u8 tx_nbits,
+  * @genfifoentry:	genfifoentry is pointer to the variable in which
+  *			GENFIFO	mask is returned to calling function
+  */
+-static void zynqmp_qspi_read_op(struct zynqmp_qspi *xqspi, u8 rx_nbits,
++static int zynqmp_qspi_read_op(struct zynqmp_qspi *xqspi, u8 rx_nbits,
+ 				u32 genfifoentry)
+ {
++	int ret;
++
++	ret = zynqmp_qspi_setuprxdma(xqspi);
++	if (ret)
++		return ret;
+ 	zynqmp_qspi_fillgenfifo(xqspi, rx_nbits, genfifoentry);
+-	zynqmp_qspi_setuprxdma(xqspi);
++
++	return 0;
+ }
+ 
+ /**
+@@ -835,10 +838,13 @@ static void zynqmp_qspi_read_op(struct zynqmp_qspi *xqspi, u8 rx_nbits,
+  */
+ static int __maybe_unused zynqmp_qspi_suspend(struct device *dev)
+ {
+-	struct spi_controller *ctlr = dev_get_drvdata(dev);
+-	struct zynqmp_qspi *xqspi = spi_controller_get_devdata(ctlr);
++	struct zynqmp_qspi *xqspi = dev_get_drvdata(dev);
++	struct spi_controller *ctlr = xqspi->ctlr;
++	int ret;
+ 
+-	spi_controller_suspend(ctlr);
++	ret = spi_controller_suspend(ctlr);
++	if (ret)
++		return ret;
+ 
+ 	zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, 0x0);
+ 
+@@ -856,27 +862,13 @@ static int __maybe_unused zynqmp_qspi_suspend(struct device *dev)
+  */
+ static int __maybe_unused zynqmp_qspi_resume(struct device *dev)
+ {
+-	struct spi_controller *ctlr = dev_get_drvdata(dev);
+-	struct zynqmp_qspi *xqspi = spi_controller_get_devdata(ctlr);
+-	int ret = 0;
+-
+-	ret = clk_enable(xqspi->pclk);
+-	if (ret) {
+-		dev_err(dev, "Cannot enable APB clock.\n");
+-		return ret;
+-	}
++	struct zynqmp_qspi *xqspi = dev_get_drvdata(dev);
++	struct spi_controller *ctlr = xqspi->ctlr;
+ 
+-	ret = clk_enable(xqspi->refclk);
+-	if (ret) {
+-		dev_err(dev, "Cannot enable device clock.\n");
+-		clk_disable(xqspi->pclk);
+-		return ret;
+-	}
++	zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, GQSPI_EN_MASK);
+ 
+ 	spi_controller_resume(ctlr);
+ 
+-	clk_disable(xqspi->refclk);
+-	clk_disable(xqspi->pclk);
+ 	return 0;
+ }
+ 
+@@ -890,10 +882,10 @@ static int __maybe_unused zynqmp_qspi_resume(struct device *dev)
+  */
+ static int __maybe_unused zynqmp_runtime_suspend(struct device *dev)
+ {
+-	struct zynqmp_qspi *xqspi = (struct zynqmp_qspi *)dev_get_drvdata(dev);
++	struct zynqmp_qspi *xqspi = dev_get_drvdata(dev);
+ 
+-	clk_disable(xqspi->refclk);
+-	clk_disable(xqspi->pclk);
++	clk_disable_unprepare(xqspi->refclk);
++	clk_disable_unprepare(xqspi->pclk);
+ 
+ 	return 0;
+ }
+@@ -908,19 +900,19 @@ static int __maybe_unused zynqmp_runtime_suspend(struct device *dev)
+  */
+ static int __maybe_unused zynqmp_runtime_resume(struct device *dev)
+ {
+-	struct zynqmp_qspi *xqspi = (struct zynqmp_qspi *)dev_get_drvdata(dev);
++	struct zynqmp_qspi *xqspi = dev_get_drvdata(dev);
+ 	int ret;
+ 
+-	ret = clk_enable(xqspi->pclk);
++	ret = clk_prepare_enable(xqspi->pclk);
+ 	if (ret) {
+ 		dev_err(dev, "Cannot enable APB clock.\n");
+ 		return ret;
+ 	}
+ 
+-	ret = clk_enable(xqspi->refclk);
++	ret = clk_prepare_enable(xqspi->refclk);
+ 	if (ret) {
+ 		dev_err(dev, "Cannot enable device clock.\n");
+-		clk_disable(xqspi->pclk);
++		clk_disable_unprepare(xqspi->pclk);
+ 		return ret;
+ 	}
+ 
+@@ -944,25 +936,23 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
+ 	struct zynqmp_qspi *xqspi = spi_controller_get_devdata
+ 				    (mem->spi->master);
+ 	int err = 0, i;
+-	u8 *tmpbuf;
+ 	u32 genfifoentry = 0;
++	u16 opcode = op->cmd.opcode;
++	u64 opaddr;
+ 
+ 	dev_dbg(xqspi->dev, "cmd:%#x mode:%d.%d.%d.%d\n",
+ 		op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
+ 		op->dummy.buswidth, op->data.buswidth);
+ 
++	mutex_lock(&xqspi->op_lock);
+ 	zynqmp_qspi_config_op(xqspi, mem->spi);
+ 	zynqmp_qspi_chipselect(mem->spi, false);
+ 	genfifoentry |= xqspi->genfifocs;
+ 	genfifoentry |= xqspi->genfifobus;
+ 
+ 	if (op->cmd.opcode) {
+-		tmpbuf = kzalloc(op->cmd.nbytes, GFP_KERNEL | GFP_DMA);
+-		if (!tmpbuf)
+-			return -ENOMEM;
+-		tmpbuf[0] = op->cmd.opcode;
+ 		reinit_completion(&xqspi->data_completion);
+-		xqspi->txbuf = tmpbuf;
++		xqspi->txbuf = &opcode;
+ 		xqspi->rxbuf = NULL;
+ 		xqspi->bytes_to_transfer = op->cmd.nbytes;
+ 		xqspi->bytes_to_receive = 0;
+@@ -973,16 +963,15 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
+ 		zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST,
+ 				   GQSPI_IER_GENFIFOEMPTY_MASK |
+ 				   GQSPI_IER_TXNOT_FULL_MASK);
+-		if (!wait_for_completion_interruptible_timeout
++		if (!wait_for_completion_timeout
+ 		    (&xqspi->data_completion, msecs_to_jiffies(1000))) {
+ 			err = -ETIMEDOUT;
+-			kfree(tmpbuf);
+ 			goto return_err;
+ 		}
+-		kfree(tmpbuf);
+ 	}
+ 
+ 	if (op->addr.nbytes) {
++		xqspi->txbuf = &opaddr;
+ 		for (i = 0; i < op->addr.nbytes; i++) {
+ 			*(((u8 *)xqspi->txbuf) + i) = op->addr.val >>
+ 					(8 * (op->addr.nbytes - i - 1));
+@@ -1001,7 +990,7 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
+ 				   GQSPI_IER_TXEMPTY_MASK |
+ 				   GQSPI_IER_GENFIFOEMPTY_MASK |
+ 				   GQSPI_IER_TXNOT_FULL_MASK);
+-		if (!wait_for_completion_interruptible_timeout
++		if (!wait_for_completion_timeout
+ 		    (&xqspi->data_completion, msecs_to_jiffies(1000))) {
+ 			err = -ETIMEDOUT;
+ 			goto return_err;
+@@ -1009,32 +998,23 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
+ 	}
+ 
+ 	if (op->dummy.nbytes) {
+-		tmpbuf = kzalloc(op->dummy.nbytes, GFP_KERNEL | GFP_DMA);
+-		if (!tmpbuf)
+-			return -ENOMEM;
+-		memset(tmpbuf, 0xff, op->dummy.nbytes);
+-		reinit_completion(&xqspi->data_completion);
+-		xqspi->txbuf = tmpbuf;
++		xqspi->txbuf = NULL;
+ 		xqspi->rxbuf = NULL;
+-		xqspi->bytes_to_transfer = op->dummy.nbytes;
++		/*
++		 * xqspi->bytes_to_transfer here represents the dummy circles
++		 * which need to be sent.
++		 */
++		xqspi->bytes_to_transfer = op->dummy.nbytes * 8 / op->dummy.buswidth;
+ 		xqspi->bytes_to_receive = 0;
+-		zynqmp_qspi_write_op(xqspi, op->dummy.buswidth,
++		/*
++		 * Using op->data.buswidth instead of op->dummy.buswidth here because
++		 * we need to use it to configure the correct SPI mode.
++		 */
++		zynqmp_qspi_write_op(xqspi, op->data.buswidth,
+ 				     genfifoentry);
+ 		zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
+ 				   zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST) |
+ 				   GQSPI_CFG_START_GEN_FIFO_MASK);
+-		zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST,
+-				   GQSPI_IER_TXEMPTY_MASK |
+-				   GQSPI_IER_GENFIFOEMPTY_MASK |
+-				   GQSPI_IER_TXNOT_FULL_MASK);
+-		if (!wait_for_completion_interruptible_timeout
+-		    (&xqspi->data_completion, msecs_to_jiffies(1000))) {
+-			err = -ETIMEDOUT;
+-			kfree(tmpbuf);
+-			goto return_err;
+-		}
+-
+-		kfree(tmpbuf);
+ 	}
+ 
+ 	if (op->data.nbytes) {
+@@ -1059,8 +1039,11 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
+ 			xqspi->rxbuf = (u8 *)op->data.buf.in;
+ 			xqspi->bytes_to_receive = op->data.nbytes;
+ 			xqspi->bytes_to_transfer = 0;
+-			zynqmp_qspi_read_op(xqspi, op->data.buswidth,
++			err = zynqmp_qspi_read_op(xqspi, op->data.buswidth,
+ 					    genfifoentry);
++			if (err)
++				goto return_err;
++
+ 			zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
+ 					   zynqmp_gqspi_read
+ 					   (xqspi, GQSPI_CONFIG_OFST) |
+@@ -1076,7 +1059,7 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
+ 						   GQSPI_IER_RXEMPTY_MASK);
+ 			}
+ 		}
+-		if (!wait_for_completion_interruptible_timeout
++		if (!wait_for_completion_timeout
+ 		    (&xqspi->data_completion, msecs_to_jiffies(1000)))
+ 			err = -ETIMEDOUT;
+ 	}
+@@ -1084,6 +1067,7 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
+ return_err:
+ 
+ 	zynqmp_qspi_chipselect(mem->spi, true);
++	mutex_unlock(&xqspi->op_lock);
+ 
+ 	return err;
+ }
+@@ -1120,6 +1104,7 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
+ 
+ 	xqspi = spi_controller_get_devdata(ctlr);
+ 	xqspi->dev = dev;
++	xqspi->ctlr = ctlr;
+ 	platform_set_drvdata(pdev, xqspi);
+ 
+ 	xqspi->regs = devm_platform_ioremap_resource(pdev, 0);
+@@ -1135,13 +1120,11 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
+ 		goto remove_master;
+ 	}
+ 
+-	init_completion(&xqspi->data_completion);
+-
+ 	xqspi->refclk = devm_clk_get(&pdev->dev, "ref_clk");
+ 	if (IS_ERR(xqspi->refclk)) {
+ 		dev_err(dev, "ref_clk clock not found.\n");
+ 		ret = PTR_ERR(xqspi->refclk);
+-		goto clk_dis_pclk;
++		goto remove_master;
+ 	}
+ 
+ 	ret = clk_prepare_enable(xqspi->pclk);
+@@ -1156,15 +1139,24 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
+ 		goto clk_dis_pclk;
+ 	}
+ 
++	init_completion(&xqspi->data_completion);
++
++	mutex_init(&xqspi->op_lock);
++
+ 	pm_runtime_use_autosuspend(&pdev->dev);
+ 	pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
+ 	pm_runtime_set_active(&pdev->dev);
+ 	pm_runtime_enable(&pdev->dev);
++
++	ret = pm_runtime_get_sync(&pdev->dev);
++	if (ret < 0) {
++		dev_err(&pdev->dev, "Failed to pm_runtime_get_sync: %d\n", ret);
++		goto clk_dis_all;
++	}
++
+ 	/* QSPI controller initializations */
+ 	zynqmp_qspi_init_hw(xqspi);
+ 
+-	pm_runtime_mark_last_busy(&pdev->dev);
+-	pm_runtime_put_autosuspend(&pdev->dev);
+ 	xqspi->irq = platform_get_irq(pdev, 0);
+ 	if (xqspi->irq <= 0) {
+ 		ret = -ENXIO;
+@@ -1178,6 +1170,7 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
+ 		goto clk_dis_all;
+ 	}
+ 
++	dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
+ 	ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
+ 	ctlr->num_chipselect = GQSPI_DEFAULT_NUM_CS;
+ 	ctlr->mem_ops = &zynqmp_qspi_mem_ops;
+@@ -1187,6 +1180,7 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
+ 	ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD |
+ 			    SPI_TX_DUAL | SPI_TX_QUAD;
+ 	ctlr->dev.of_node = np;
++	ctlr->auto_runtime_pm = true;
+ 
+ 	ret = devm_spi_register_controller(&pdev->dev, ctlr);
+ 	if (ret) {
+@@ -1194,9 +1188,13 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
+ 		goto clk_dis_all;
+ 	}
+ 
++	pm_runtime_mark_last_busy(&pdev->dev);
++	pm_runtime_put_autosuspend(&pdev->dev);
++
+ 	return 0;
+ 
+ clk_dis_all:
++	pm_runtime_put_sync(&pdev->dev);
+ 	pm_runtime_set_suspended(&pdev->dev);
+ 	pm_runtime_disable(&pdev->dev);
+ 	clk_disable_unprepare(xqspi->refclk);
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index 927c2a28011f6..8da4fe475b84b 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -2496,6 +2496,7 @@ struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
+ 
+ 	ctlr = __spi_alloc_controller(dev, size, slave);
+ 	if (ctlr) {
++		ctlr->devm_allocated = true;
+ 		*ptr = ctlr;
+ 		devres_add(dev, ptr);
+ 	} else {
+@@ -2842,11 +2843,6 @@ int devm_spi_register_controller(struct device *dev,
+ }
+ EXPORT_SYMBOL_GPL(devm_spi_register_controller);
+ 
+-static int devm_spi_match_controller(struct device *dev, void *res, void *ctlr)
+-{
+-	return *(struct spi_controller **)res == ctlr;
+-}
+-
+ static int __unregister(struct device *dev, void *null)
+ {
+ 	spi_unregister_device(to_spi_device(dev));
+@@ -2893,8 +2889,7 @@ void spi_unregister_controller(struct spi_controller *ctlr)
+ 	/* Release the last reference on the controller if its driver
+ 	 * has not yet been converted to devm_spi_alloc_master/slave().
+ 	 */
+-	if (!devres_find(ctlr->dev.parent, devm_spi_release_controller,
+-			 devm_spi_match_controller, ctlr))
++	if (!ctlr->devm_allocated)
+ 		put_device(&ctlr->dev);
+ 
+ 	/* free bus id */
+diff --git a/drivers/staging/comedi/drivers/tests/ni_routes_test.c b/drivers/staging/comedi/drivers/tests/ni_routes_test.c
+index 4061b3b5f8e9b..68defeb53de4a 100644
+--- a/drivers/staging/comedi/drivers/tests/ni_routes_test.c
++++ b/drivers/staging/comedi/drivers/tests/ni_routes_test.c
+@@ -217,7 +217,8 @@ void test_ni_assign_device_routes(void)
+ 	const u8 *table, *oldtable;
+ 
+ 	init_pci_6070e();
+-	ni_assign_device_routes(ni_eseries, pci_6070e, &private.routing_tables);
++	ni_assign_device_routes(ni_eseries, pci_6070e, NULL,
++				&private.routing_tables);
+ 	devroutes = private.routing_tables.valid_routes;
+ 	table = private.routing_tables.route_values;
+ 
+@@ -253,7 +254,8 @@ void test_ni_assign_device_routes(void)
+ 	olddevroutes = devroutes;
+ 	oldtable = table;
+ 	init_pci_6220();
+-	ni_assign_device_routes(ni_mseries, pci_6220, &private.routing_tables);
++	ni_assign_device_routes(ni_mseries, pci_6220, NULL,
++				&private.routing_tables);
+ 	devroutes = private.routing_tables.valid_routes;
+ 	table = private.routing_tables.route_values;
+ 
+diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c
+index c368082aae1aa..0f4655d7d520a 100644
+--- a/drivers/staging/fwserial/fwserial.c
++++ b/drivers/staging/fwserial/fwserial.c
+@@ -1218,13 +1218,12 @@ static int get_serial_info(struct tty_struct *tty,
+ 	struct fwtty_port *port = tty->driver_data;
+ 
+ 	mutex_lock(&port->port.mutex);
+-	ss->type =  PORT_UNKNOWN;
+-	ss->line =  port->port.tty->index;
+-	ss->flags = port->port.flags;
+-	ss->xmit_fifo_size = FWTTY_PORT_TXFIFO_LEN;
++	ss->line = port->index;
+ 	ss->baud_base = 400000000;
+-	ss->close_delay = port->port.close_delay;
++	ss->close_delay = jiffies_to_msecs(port->port.close_delay) / 10;
++	ss->closing_wait = 3000;
+ 	mutex_unlock(&port->port.mutex);
++
+ 	return 0;
+ }
+ 
+@@ -1232,20 +1231,20 @@ static int set_serial_info(struct tty_struct *tty,
+ 			   struct serial_struct *ss)
+ {
+ 	struct fwtty_port *port = tty->driver_data;
++	unsigned int cdelay;
+ 
+-	if (ss->irq != 0 || ss->port != 0 || ss->custom_divisor != 0 ||
+-	    ss->baud_base != 400000000)
+-		return -EPERM;
++	cdelay = msecs_to_jiffies(ss->close_delay * 10);
+ 
+ 	mutex_lock(&port->port.mutex);
+ 	if (!capable(CAP_SYS_ADMIN)) {
+-		if (((ss->flags & ~ASYNC_USR_MASK) !=
++		if (cdelay != port->port.close_delay ||
++		    ((ss->flags & ~ASYNC_USR_MASK) !=
+ 		     (port->port.flags & ~ASYNC_USR_MASK))) {
+ 			mutex_unlock(&port->port.mutex);
+ 			return -EPERM;
+ 		}
+ 	}
+-	port->port.close_delay = ss->close_delay * HZ / 100;
++	port->port.close_delay = cdelay;
+ 	mutex_unlock(&port->port.mutex);
+ 
+ 	return 0;
+diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c
+index 607378bfebb7e..a520f7f213db0 100644
+--- a/drivers/staging/greybus/uart.c
++++ b/drivers/staging/greybus/uart.c
+@@ -614,10 +614,12 @@ static int get_serial_info(struct tty_struct *tty,
+ 	ss->line = gb_tty->minor;
+ 	ss->xmit_fifo_size = 16;
+ 	ss->baud_base = 9600;
+-	ss->close_delay = gb_tty->port.close_delay / 10;
++	ss->close_delay = jiffies_to_msecs(gb_tty->port.close_delay) / 10;
+ 	ss->closing_wait =
+ 		gb_tty->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
+-		ASYNC_CLOSING_WAIT_NONE : gb_tty->port.closing_wait / 10;
++		ASYNC_CLOSING_WAIT_NONE :
++		jiffies_to_msecs(gb_tty->port.closing_wait) / 10;
++
+ 	return 0;
+ }
+ 
+@@ -629,17 +631,16 @@ static int set_serial_info(struct tty_struct *tty,
+ 	unsigned int close_delay;
+ 	int retval = 0;
+ 
+-	close_delay = ss->close_delay * 10;
++	close_delay = msecs_to_jiffies(ss->close_delay * 10);
+ 	closing_wait = ss->closing_wait == ASYNC_CLOSING_WAIT_NONE ?
+-			ASYNC_CLOSING_WAIT_NONE : ss->closing_wait * 10;
++			ASYNC_CLOSING_WAIT_NONE :
++			msecs_to_jiffies(ss->closing_wait * 10);
+ 
+ 	mutex_lock(&gb_tty->port.mutex);
+ 	if (!capable(CAP_SYS_ADMIN)) {
+ 		if ((close_delay != gb_tty->port.close_delay) ||
+ 		    (closing_wait != gb_tty->port.closing_wait))
+ 			retval = -EPERM;
+-		else
+-			retval = -EOPNOTSUPP;
+ 	} else {
+ 		gb_tty->port.close_delay = close_delay;
+ 		gb_tty->port.closing_wait = closing_wait;
+diff --git a/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c b/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
+index 7ca7378b18592..0ab67b2aec671 100644
+--- a/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
++++ b/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
+@@ -843,8 +843,10 @@ static int lm3554_probe(struct i2c_client *client)
+ 		return -ENOMEM;
+ 
+ 	flash->pdata = lm3554_platform_data_func(client);
+-	if (IS_ERR(flash->pdata))
+-		return PTR_ERR(flash->pdata);
++	if (IS_ERR(flash->pdata)) {
++		err = PTR_ERR(flash->pdata);
++		goto fail1;
++	}
+ 
+ 	v4l2_i2c_subdev_init(&flash->sd, client, &lm3554_ops);
+ 	flash->sd.internal_ops = &lm3554_internal_ops;
+@@ -856,7 +858,7 @@ static int lm3554_probe(struct i2c_client *client)
+ 				   ARRAY_SIZE(lm3554_controls));
+ 	if (ret) {
+ 		dev_err(&client->dev, "error initialize a ctrl_handler.\n");
+-		goto fail2;
++		goto fail3;
+ 	}
+ 
+ 	for (i = 0; i < ARRAY_SIZE(lm3554_controls); i++)
+@@ -865,14 +867,14 @@ static int lm3554_probe(struct i2c_client *client)
+ 
+ 	if (flash->ctrl_handler.error) {
+ 		dev_err(&client->dev, "ctrl_handler error.\n");
+-		goto fail2;
++		goto fail3;
+ 	}
+ 
+ 	flash->sd.ctrl_handler = &flash->ctrl_handler;
+ 	err = media_entity_pads_init(&flash->sd.entity, 0, NULL);
+ 	if (err) {
+ 		dev_err(&client->dev, "error initialize a media entity.\n");
+-		goto fail1;
++		goto fail2;
+ 	}
+ 
+ 	flash->sd.entity.function = MEDIA_ENT_F_FLASH;
+@@ -884,14 +886,15 @@ static int lm3554_probe(struct i2c_client *client)
+ 	err = lm3554_gpio_init(client);
+ 	if (err) {
+ 		dev_err(&client->dev, "gpio request/direction_output fail");
+-		goto fail2;
++		goto fail3;
+ 	}
+ 	return atomisp_register_i2c_module(&flash->sd, NULL, LED_FLASH);
+-fail2:
++fail3:
+ 	media_entity_cleanup(&flash->sd.entity);
+ 	v4l2_ctrl_handler_free(&flash->ctrl_handler);
+-fail1:
++fail2:
+ 	v4l2_device_unregister_subdev(&flash->sd);
++fail1:
+ 	kfree(flash);
+ 
+ 	return err;
+diff --git a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
+index 2ae50decfc8bd..9da82855552de 100644
+--- a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
++++ b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
+@@ -948,10 +948,8 @@ int atomisp_alloc_css_stat_bufs(struct atomisp_sub_device *asd,
+ 		dev_dbg(isp->dev, "allocating %d dis buffers\n", count);
+ 		while (count--) {
+ 			dis_buf = kzalloc(sizeof(struct atomisp_dis_buf), GFP_KERNEL);
+-			if (!dis_buf) {
+-				kfree(s3a_buf);
++			if (!dis_buf)
+ 				goto error;
+-			}
+ 			if (atomisp_css_allocate_stat_buffers(
+ 				asd, stream_id, NULL, dis_buf, NULL)) {
+ 				kfree(dis_buf);
+diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
+index f13af2329f486..0168f9839c905 100644
+--- a/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
++++ b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
+@@ -857,16 +857,17 @@ static void free_private_pages(struct hmm_buffer_object *bo,
+ 	kfree(bo->page_obj);
+ }
+ 
+-static void free_user_pages(struct hmm_buffer_object *bo)
++static void free_user_pages(struct hmm_buffer_object *bo,
++			    unsigned int page_nr)
+ {
+ 	int i;
+ 
+ 	hmm_mem_stat.usr_size -= bo->pgnr;
+ 
+ 	if (bo->mem_type == HMM_BO_MEM_TYPE_PFN) {
+-		unpin_user_pages(bo->pages, bo->pgnr);
++		unpin_user_pages(bo->pages, page_nr);
+ 	} else {
+-		for (i = 0; i < bo->pgnr; i++)
++		for (i = 0; i < page_nr; i++)
+ 			put_page(bo->pages[i]);
+ 	}
+ 	kfree(bo->pages);
+@@ -942,6 +943,8 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
+ 		dev_err(atomisp_dev,
+ 			"get_user_pages err: bo->pgnr = %d, pgnr actually pinned = %d.\n",
+ 			bo->pgnr, page_nr);
++		if (page_nr < 0)
++			page_nr = 0;
+ 		goto out_of_mem;
+ 	}
+ 
+@@ -954,7 +957,7 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
+ 
+ out_of_mem:
+ 
+-	free_user_pages(bo);
++	free_user_pages(bo, page_nr);
+ 
+ 	return -ENOMEM;
+ }
+@@ -1037,7 +1040,7 @@ void hmm_bo_free_pages(struct hmm_buffer_object *bo)
+ 	if (bo->type == HMM_BO_PRIVATE)
+ 		free_private_pages(bo, &dynamic_pool, &reserved_pool);
+ 	else if (bo->type == HMM_BO_USER)
+-		free_user_pages(bo);
++		free_user_pages(bo, bo->pgnr);
+ 	else
+ 		dev_err(atomisp_dev, "invalid buffer type.\n");
+ 	mutex_unlock(&bo->mutex);
+diff --git a/drivers/staging/media/omap4iss/iss.c b/drivers/staging/media/omap4iss/iss.c
+index dae9073e7d3cc..085397045b36d 100644
+--- a/drivers/staging/media/omap4iss/iss.c
++++ b/drivers/staging/media/omap4iss/iss.c
+@@ -1236,8 +1236,10 @@ static int iss_probe(struct platform_device *pdev)
+ 	if (ret < 0)
+ 		goto error;
+ 
+-	if (!omap4iss_get(iss))
++	if (!omap4iss_get(iss)) {
++		ret = -EINVAL;
+ 		goto error;
++	}
+ 
+ 	ret = iss_reset(iss);
+ 	if (ret < 0)
+diff --git a/drivers/staging/media/rkvdec/rkvdec.c b/drivers/staging/media/rkvdec/rkvdec.c
+index d3eb81ee8dc27..5f0219d117fb8 100644
+--- a/drivers/staging/media/rkvdec/rkvdec.c
++++ b/drivers/staging/media/rkvdec/rkvdec.c
+@@ -55,16 +55,13 @@ static const struct v4l2_ctrl_ops rkvdec_ctrl_ops = {
+ 
+ static const struct rkvdec_ctrl_desc rkvdec_h264_ctrl_descs[] = {
+ 	{
+-		.mandatory = true,
+ 		.cfg.id = V4L2_CID_STATELESS_H264_DECODE_PARAMS,
+ 	},
+ 	{
+-		.mandatory = true,
+ 		.cfg.id = V4L2_CID_STATELESS_H264_SPS,
+ 		.cfg.ops = &rkvdec_ctrl_ops,
+ 	},
+ 	{
+-		.mandatory = true,
+ 		.cfg.id = V4L2_CID_STATELESS_H264_PPS,
+ 	},
+ 	{
+@@ -585,25 +582,7 @@ static const struct vb2_ops rkvdec_queue_ops = {
+ 
+ static int rkvdec_request_validate(struct media_request *req)
+ {
+-	struct media_request_object *obj;
+-	const struct rkvdec_ctrls *ctrls;
+-	struct v4l2_ctrl_handler *hdl;
+-	struct rkvdec_ctx *ctx = NULL;
+-	unsigned int count, i;
+-	int ret;
+-
+-	list_for_each_entry(obj, &req->objects, list) {
+-		if (vb2_request_object_is_buffer(obj)) {
+-			struct vb2_buffer *vb;
+-
+-			vb = container_of(obj, struct vb2_buffer, req_obj);
+-			ctx = vb2_get_drv_priv(vb->vb2_queue);
+-			break;
+-		}
+-	}
+-
+-	if (!ctx)
+-		return -EINVAL;
++	unsigned int count;
+ 
+ 	count = vb2_request_buffer_cnt(req);
+ 	if (!count)
+@@ -611,31 +590,6 @@ static int rkvdec_request_validate(struct media_request *req)
+ 	else if (count > 1)
+ 		return -EINVAL;
+ 
+-	hdl = v4l2_ctrl_request_hdl_find(req, &ctx->ctrl_hdl);
+-	if (!hdl)
+-		return -ENOENT;
+-
+-	ret = 0;
+-	ctrls = ctx->coded_fmt_desc->ctrls;
+-	for (i = 0; ctrls && i < ctrls->num_ctrls; i++) {
+-		u32 id = ctrls->ctrls[i].cfg.id;
+-		struct v4l2_ctrl *ctrl;
+-
+-		if (!ctrls->ctrls[i].mandatory)
+-			continue;
+-
+-		ctrl = v4l2_ctrl_request_hdl_ctrl_find(hdl, id);
+-		if (!ctrl) {
+-			ret = -ENOENT;
+-			break;
+-		}
+-	}
+-
+-	v4l2_ctrl_request_hdl_put(hdl);
+-
+-	if (ret)
+-		return ret;
+-
+ 	return vb2_request_validate(req);
+ }
+ 
+diff --git a/drivers/staging/media/rkvdec/rkvdec.h b/drivers/staging/media/rkvdec/rkvdec.h
+index 77a137cca88ea..52ac3874c5e54 100644
+--- a/drivers/staging/media/rkvdec/rkvdec.h
++++ b/drivers/staging/media/rkvdec/rkvdec.h
+@@ -25,7 +25,6 @@
+ struct rkvdec_ctx;
+ 
+ struct rkvdec_ctrl_desc {
+-	u32 mandatory : 1;
+ 	struct v4l2_ctrl_config cfg;
+ };
+ 
+diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
+index 7718c561823f6..92ace87c1c7d1 100644
+--- a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
++++ b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
+@@ -443,16 +443,17 @@
+ #define VE_DEC_H265_STATUS_STCD_BUSY		BIT(21)
+ #define VE_DEC_H265_STATUS_WB_BUSY		BIT(20)
+ #define VE_DEC_H265_STATUS_BS_DMA_BUSY		BIT(19)
+-#define VE_DEC_H265_STATUS_IQIT_BUSY		BIT(18)
++#define VE_DEC_H265_STATUS_IT_BUSY		BIT(18)
+ #define VE_DEC_H265_STATUS_INTER_BUSY		BIT(17)
+ #define VE_DEC_H265_STATUS_MORE_DATA		BIT(16)
+-#define VE_DEC_H265_STATUS_VLD_BUSY		BIT(14)
+-#define VE_DEC_H265_STATUS_DEBLOCKING_BUSY	BIT(13)
+-#define VE_DEC_H265_STATUS_DEBLOCKING_DRAM_BUSY	BIT(12)
+-#define VE_DEC_H265_STATUS_INTRA_BUSY		BIT(11)
+-#define VE_DEC_H265_STATUS_SAO_BUSY		BIT(10)
+-#define VE_DEC_H265_STATUS_MVP_BUSY		BIT(9)
+-#define VE_DEC_H265_STATUS_SWDEC_BUSY		BIT(8)
++#define VE_DEC_H265_STATUS_DBLK_BUSY		BIT(15)
++#define VE_DEC_H265_STATUS_IREC_BUSY		BIT(14)
++#define VE_DEC_H265_STATUS_INTRA_BUSY		BIT(13)
++#define VE_DEC_H265_STATUS_MCRI_BUSY		BIT(12)
++#define VE_DEC_H265_STATUS_IQIT_BUSY		BIT(11)
++#define VE_DEC_H265_STATUS_MVP_BUSY		BIT(10)
++#define VE_DEC_H265_STATUS_IS_BUSY		BIT(9)
++#define VE_DEC_H265_STATUS_VLD_BUSY		BIT(8)
+ #define VE_DEC_H265_STATUS_OVER_TIME		BIT(3)
+ #define VE_DEC_H265_STATUS_VLD_DATA_REQ		BIT(2)
+ #define VE_DEC_H265_STATUS_ERROR		BIT(1)
+diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c
+index 5516be3af8983..c1d52190e1bdd 100644
+--- a/drivers/staging/qlge/qlge_main.c
++++ b/drivers/staging/qlge/qlge_main.c
+@@ -4550,7 +4550,7 @@ static int qlge_probe(struct pci_dev *pdev,
+ 	struct net_device *ndev = NULL;
+ 	struct devlink *devlink;
+ 	static int cards_found;
+-	int err = 0;
++	int err;
+ 
+ 	devlink = devlink_alloc(&qlge_devlink_ops, sizeof(struct qlge_adapter));
+ 	if (!devlink)
+@@ -4561,8 +4561,10 @@ static int qlge_probe(struct pci_dev *pdev,
+ 	ndev = alloc_etherdev_mq(sizeof(struct qlge_netdev_priv),
+ 				 min(MAX_CPUS,
+ 				     netif_get_num_default_rss_queues()));
+-	if (!ndev)
++	if (!ndev) {
++		err = -ENOMEM;
+ 		goto devlink_free;
++	}
+ 
+ 	ndev_priv = netdev_priv(ndev);
+ 	ndev_priv->qdev = qdev;
+diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
+index 9fc4adc83d77d..b5a313649f445 100644
+--- a/drivers/staging/rtl8192u/r8192U_core.c
++++ b/drivers/staging/rtl8192u/r8192U_core.c
+@@ -3210,7 +3210,7 @@ static void rtl819x_update_rxcounts(struct r8192_priv *priv, u32 *TotalRxBcnNum,
+ 			     u32 *TotalRxDataNum)
+ {
+ 	u16			SlotIndex;
+-	u8			i;
++	u16			i;
+ 
+ 	*TotalRxBcnNum = 0;
+ 	*TotalRxDataNum = 0;
+diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
+index 18b78ea110ef4..ecda5e18d23f6 100644
+--- a/drivers/tty/amiserial.c
++++ b/drivers/tty/amiserial.c
+@@ -970,6 +970,7 @@ static int set_serial_info(struct tty_struct *tty, struct serial_struct *ss)
+ 	if (!serial_isroot()) {
+ 		if ((ss->baud_base != state->baud_base) ||
+ 		    (ss->close_delay != port->close_delay) ||
++		    (ss->closing_wait != port->closing_wait) ||
+ 		    (ss->xmit_fifo_size != state->xmit_fifo_size) ||
+ 		    ((ss->flags & ~ASYNC_USR_MASK) !=
+ 		     (port->flags & ~ASYNC_USR_MASK))) {
+diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
+index 9f13f7d49dd78..f9f14104bd2c0 100644
+--- a/drivers/tty/moxa.c
++++ b/drivers/tty/moxa.c
+@@ -2040,7 +2040,7 @@ static int moxa_get_serial_info(struct tty_struct *tty,
+ 	ss->line = info->port.tty->index,
+ 	ss->flags = info->port.flags,
+ 	ss->baud_base = 921600,
+-	ss->close_delay = info->port.close_delay;
++	ss->close_delay = jiffies_to_msecs(info->port.close_delay) / 10;
+ 	mutex_unlock(&info->port.mutex);
+ 	return 0;
+ }
+@@ -2050,6 +2050,7 @@ static int moxa_set_serial_info(struct tty_struct *tty,
+ 		struct serial_struct *ss)
+ {
+ 	struct moxa_port *info = tty->driver_data;
++	unsigned int close_delay;
+ 
+ 	if (tty->index == MAX_PORTS)
+ 		return -EINVAL;
+@@ -2061,19 +2062,24 @@ static int moxa_set_serial_info(struct tty_struct *tty,
+ 			ss->baud_base != 921600)
+ 		return -EPERM;
+ 
++	close_delay = msecs_to_jiffies(ss->close_delay * 10);
++
+ 	mutex_lock(&info->port.mutex);
+ 	if (!capable(CAP_SYS_ADMIN)) {
+-		if (((ss->flags & ~ASYNC_USR_MASK) !=
++		if (close_delay != info->port.close_delay ||
++		    ss->type != info->type ||
++		    ((ss->flags & ~ASYNC_USR_MASK) !=
+ 		     (info->port.flags & ~ASYNC_USR_MASK))) {
+ 			mutex_unlock(&info->port.mutex);
+ 			return -EPERM;
+ 		}
+-	}
+-	info->port.close_delay = ss->close_delay * HZ / 100;
++	} else {
++		info->port.close_delay = close_delay;
+ 
+-	MoxaSetFifo(info, ss->type == PORT_16550A);
++		MoxaSetFifo(info, ss->type == PORT_16550A);
+ 
+-	info->type = ss->type;
++		info->type = ss->type;
++	}
+ 	mutex_unlock(&info->port.mutex);
+ 	return 0;
+ }
+diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
+index 4203b64bccdb1..2d8e76263a25b 100644
+--- a/drivers/tty/mxser.c
++++ b/drivers/tty/mxser.c
+@@ -1208,19 +1208,26 @@ static int mxser_get_serial_info(struct tty_struct *tty,
+ {
+ 	struct mxser_port *info = tty->driver_data;
+ 	struct tty_port *port = &info->port;
++	unsigned int closing_wait, close_delay;
+ 
+ 	if (tty->index == MXSER_PORTS)
+ 		return -ENOTTY;
+ 
+ 	mutex_lock(&port->mutex);
++
++	close_delay = jiffies_to_msecs(info->port.close_delay) / 10;
++	closing_wait = info->port.closing_wait;
++	if (closing_wait != ASYNC_CLOSING_WAIT_NONE)
++		closing_wait = jiffies_to_msecs(closing_wait) / 10;
++
+ 	ss->type = info->type,
+ 	ss->line = tty->index,
+ 	ss->port = info->ioaddr,
+ 	ss->irq = info->board->irq,
+ 	ss->flags = info->port.flags,
+ 	ss->baud_base = info->baud_base,
+-	ss->close_delay = info->port.close_delay,
+-	ss->closing_wait = info->port.closing_wait,
++	ss->close_delay = close_delay;
++	ss->closing_wait = closing_wait;
+ 	ss->custom_divisor = info->custom_divisor,
+ 	mutex_unlock(&port->mutex);
+ 	return 0;
+@@ -1233,7 +1240,7 @@ static int mxser_set_serial_info(struct tty_struct *tty,
+ 	struct tty_port *port = &info->port;
+ 	speed_t baud;
+ 	unsigned long sl_flags;
+-	unsigned int flags;
++	unsigned int flags, close_delay, closing_wait;
+ 	int retval = 0;
+ 
+ 	if (tty->index == MXSER_PORTS)
+@@ -1255,9 +1262,15 @@ static int mxser_set_serial_info(struct tty_struct *tty,
+ 
+ 	flags = port->flags & ASYNC_SPD_MASK;
+ 
++	close_delay = msecs_to_jiffies(ss->close_delay * 10);
++	closing_wait = ss->closing_wait;
++	if (closing_wait != ASYNC_CLOSING_WAIT_NONE)
++		closing_wait = msecs_to_jiffies(closing_wait * 10);
++
+ 	if (!capable(CAP_SYS_ADMIN)) {
+ 		if ((ss->baud_base != info->baud_base) ||
+-				(ss->close_delay != info->port.close_delay) ||
++				(close_delay != info->port.close_delay) ||
++				(closing_wait != info->port.closing_wait) ||
+ 				((ss->flags & ~ASYNC_USR_MASK) != (info->port.flags & ~ASYNC_USR_MASK))) {
+ 			mutex_unlock(&port->mutex);
+ 			return -EPERM;
+@@ -1271,8 +1284,8 @@ static int mxser_set_serial_info(struct tty_struct *tty,
+ 		 */
+ 		port->flags = ((port->flags & ~ASYNC_FLAGS) |
+ 				(ss->flags & ASYNC_FLAGS));
+-		port->close_delay = ss->close_delay * HZ / 100;
+-		port->closing_wait = ss->closing_wait * HZ / 100;
++		port->close_delay = close_delay;
++		port->closing_wait = closing_wait;
+ 		if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST &&
+ 				(ss->baud_base != info->baud_base ||
+ 				ss->custom_divisor !=
+@@ -1284,11 +1297,11 @@ static int mxser_set_serial_info(struct tty_struct *tty,
+ 			baud = ss->baud_base / ss->custom_divisor;
+ 			tty_encode_baud_rate(tty, baud, baud);
+ 		}
+-	}
+ 
+-	info->type = ss->type;
++		info->type = ss->type;
+ 
+-	process_txrx_fifo(info);
++		process_txrx_fifo(info);
++	}
+ 
+ 	if (tty_port_initialized(port)) {
+ 		if (flags != (port->flags & ASYNC_SPD_MASK)) {
+diff --git a/drivers/tty/serial/liteuart.c b/drivers/tty/serial/liteuart.c
+index 64842f3539e19..0b06770642cb3 100644
+--- a/drivers/tty/serial/liteuart.c
++++ b/drivers/tty/serial/liteuart.c
+@@ -270,8 +270,8 @@ static int liteuart_probe(struct platform_device *pdev)
+ 
+ 	/* get membase */
+ 	port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+-	if (!port->membase)
+-		return -ENXIO;
++	if (IS_ERR(port->membase))
++		return PTR_ERR(port->membase);
+ 
+ 	/* values not from device tree */
+ 	port->dev = &pdev->dev;
+diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
+index 76b94d0ff5865..84e8158088cd2 100644
+--- a/drivers/tty/serial/omap-serial.c
++++ b/drivers/tty/serial/omap-serial.c
+@@ -159,6 +159,8 @@ struct uart_omap_port {
+ 	u32			calc_latency;
+ 	struct work_struct	qos_work;
+ 	bool			is_suspending;
++
++	unsigned int		rs485_tx_filter_count;
+ };
+ 
+ #define to_uart_omap_port(p) ((container_of((p), struct uart_omap_port, port)))
+@@ -302,7 +304,8 @@ static void serial_omap_stop_tx(struct uart_port *port)
+ 			serial_out(up, UART_OMAP_SCR, up->scr);
+ 			res = (port->rs485.flags & SER_RS485_RTS_AFTER_SEND) ?
+ 				1 : 0;
+-			if (gpiod_get_value(up->rts_gpiod) != res) {
++			if (up->rts_gpiod &&
++			    gpiod_get_value(up->rts_gpiod) != res) {
+ 				if (port->rs485.delay_rts_after_send > 0)
+ 					mdelay(
+ 					port->rs485.delay_rts_after_send);
+@@ -328,19 +331,6 @@ static void serial_omap_stop_tx(struct uart_port *port)
+ 		serial_out(up, UART_IER, up->ier);
+ 	}
+ 
+-	if ((port->rs485.flags & SER_RS485_ENABLED) &&
+-	    !(port->rs485.flags & SER_RS485_RX_DURING_TX)) {
+-		/*
+-		 * Empty the RX FIFO, we are not interested in anything
+-		 * received during the half-duplex transmission.
+-		 */
+-		serial_out(up, UART_FCR, up->fcr | UART_FCR_CLEAR_RCVR);
+-		/* Re-enable RX interrupts */
+-		up->ier |= UART_IER_RLSI | UART_IER_RDI;
+-		up->port.read_status_mask |= UART_LSR_DR;
+-		serial_out(up, UART_IER, up->ier);
+-	}
+-
+ 	pm_runtime_mark_last_busy(up->dev);
+ 	pm_runtime_put_autosuspend(up->dev);
+ }
+@@ -366,6 +356,10 @@ static void transmit_chars(struct uart_omap_port *up, unsigned int lsr)
+ 		serial_out(up, UART_TX, up->port.x_char);
+ 		up->port.icount.tx++;
+ 		up->port.x_char = 0;
++		if ((up->port.rs485.flags & SER_RS485_ENABLED) &&
++		    !(up->port.rs485.flags & SER_RS485_RX_DURING_TX))
++			up->rs485_tx_filter_count++;
++
+ 		return;
+ 	}
+ 	if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
+@@ -377,6 +371,10 @@ static void transmit_chars(struct uart_omap_port *up, unsigned int lsr)
+ 		serial_out(up, UART_TX, xmit->buf[xmit->tail]);
+ 		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ 		up->port.icount.tx++;
++		if ((up->port.rs485.flags & SER_RS485_ENABLED) &&
++		    !(up->port.rs485.flags & SER_RS485_RX_DURING_TX))
++			up->rs485_tx_filter_count++;
++
+ 		if (uart_circ_empty(xmit))
+ 			break;
+ 	} while (--count > 0);
+@@ -411,7 +409,7 @@ static void serial_omap_start_tx(struct uart_port *port)
+ 
+ 		/* if rts not already enabled */
+ 		res = (port->rs485.flags & SER_RS485_RTS_ON_SEND) ? 1 : 0;
+-		if (gpiod_get_value(up->rts_gpiod) != res) {
++		if (up->rts_gpiod && gpiod_get_value(up->rts_gpiod) != res) {
+ 			gpiod_set_value(up->rts_gpiod, res);
+ 			if (port->rs485.delay_rts_before_send > 0)
+ 				mdelay(port->rs485.delay_rts_before_send);
+@@ -420,7 +418,7 @@ static void serial_omap_start_tx(struct uart_port *port)
+ 
+ 	if ((port->rs485.flags & SER_RS485_ENABLED) &&
+ 	    !(port->rs485.flags & SER_RS485_RX_DURING_TX))
+-		serial_omap_stop_rx(port);
++		up->rs485_tx_filter_count = 0;
+ 
+ 	serial_omap_enable_ier_thri(up);
+ 	pm_runtime_mark_last_busy(up->dev);
+@@ -491,8 +489,13 @@ static void serial_omap_rlsi(struct uart_omap_port *up, unsigned int lsr)
+ 	 * Read one data character out to avoid stalling the receiver according
+ 	 * to the table 23-246 of the omap4 TRM.
+ 	 */
+-	if (likely(lsr & UART_LSR_DR))
++	if (likely(lsr & UART_LSR_DR)) {
+ 		serial_in(up, UART_RX);
++		if ((up->port.rs485.flags & SER_RS485_ENABLED) &&
++		    !(up->port.rs485.flags & SER_RS485_RX_DURING_TX) &&
++		    up->rs485_tx_filter_count)
++			up->rs485_tx_filter_count--;
++	}
+ 
+ 	up->port.icount.rx++;
+ 	flag = TTY_NORMAL;
+@@ -543,6 +546,13 @@ static void serial_omap_rdi(struct uart_omap_port *up, unsigned int lsr)
+ 		return;
+ 
+ 	ch = serial_in(up, UART_RX);
++	if ((up->port.rs485.flags & SER_RS485_ENABLED) &&
++	    !(up->port.rs485.flags & SER_RS485_RX_DURING_TX) &&
++	    up->rs485_tx_filter_count) {
++		up->rs485_tx_filter_count--;
++		return;
++	}
++
+ 	flag = TTY_NORMAL;
+ 	up->port.icount.rx++;
+ 
+@@ -1407,18 +1417,13 @@ serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
+ 	/* store new config */
+ 	port->rs485 = *rs485;
+ 
+-	/*
+-	 * Just as a precaution, only allow rs485
+-	 * to be enabled if the gpio pin is valid
+-	 */
+ 	if (up->rts_gpiod) {
+ 		/* enable / disable rts */
+ 		val = (port->rs485.flags & SER_RS485_ENABLED) ?
+ 			SER_RS485_RTS_AFTER_SEND : SER_RS485_RTS_ON_SEND;
+ 		val = (port->rs485.flags & val) ? 1 : 0;
+ 		gpiod_set_value(up->rts_gpiod, val);
+-	} else
+-		port->rs485.flags &= ~SER_RS485_ENABLED;
++	}
+ 
+ 	/* Enable interrupts */
+ 	up->ier = mode;
+diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
+index f86ec2d2635b7..9adb8362578c5 100644
+--- a/drivers/tty/serial/sc16is7xx.c
++++ b/drivers/tty/serial/sc16is7xx.c
+@@ -1196,7 +1196,7 @@ static int sc16is7xx_probe(struct device *dev,
+ 	ret = regmap_read(regmap,
+ 			  SC16IS7XX_LSR_REG << SC16IS7XX_REG_SHIFT, &val);
+ 	if (ret < 0)
+-		return ret;
++		return -EPROBE_DEFER;
+ 
+ 	/* Alloc port structure */
+ 	s = devm_kzalloc(dev, struct_size(s, p, devtype->nr_uart), GFP_KERNEL);
+diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
+index ba31e97d3d96b..43f02ed055d5e 100644
+--- a/drivers/tty/serial/serial_core.c
++++ b/drivers/tty/serial/serial_core.c
+@@ -1305,7 +1305,7 @@ static int uart_set_rs485_config(struct uart_port *port,
+ 	unsigned long flags;
+ 
+ 	if (!port->rs485_config)
+-		return -ENOIOCTLCMD;
++		return -ENOTTY;
+ 
+ 	if (copy_from_user(&rs485, rs485_user, sizeof(*rs485_user)))
+ 		return -EFAULT;
+@@ -1329,7 +1329,7 @@ static int uart_get_iso7816_config(struct uart_port *port,
+ 	struct serial_iso7816 aux;
+ 
+ 	if (!port->iso7816_config)
+-		return -ENOIOCTLCMD;
++		return -ENOTTY;
+ 
+ 	spin_lock_irqsave(&port->lock, flags);
+ 	aux = port->iso7816;
+@@ -1349,7 +1349,7 @@ static int uart_set_iso7816_config(struct uart_port *port,
+ 	unsigned long flags;
+ 
+ 	if (!port->iso7816_config)
+-		return -ENOIOCTLCMD;
++		return -ENOTTY;
+ 
+ 	if (copy_from_user(&iso7816, iso7816_user, sizeof(*iso7816_user)))
+ 		return -EFAULT;
+diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
+index b3675cf25a692..99dfa884cbefb 100644
+--- a/drivers/tty/serial/stm32-usart.c
++++ b/drivers/tty/serial/stm32-usart.c
+@@ -214,12 +214,14 @@ static void stm32_usart_receive_chars(struct uart_port *port, bool threaded)
+ 	struct tty_port *tport = &port->state->port;
+ 	struct stm32_port *stm32_port = to_stm32_port(port);
+ 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+-	unsigned long c;
++	unsigned long c, flags;
+ 	u32 sr;
+ 	char flag;
+ 
+-	if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
+-		pm_wakeup_event(tport->tty->dev, 0);
++	if (threaded)
++		spin_lock_irqsave(&port->lock, flags);
++	else
++		spin_lock(&port->lock);
+ 
+ 	while (stm32_usart_pending_rx(port, &sr, &stm32_port->last_res,
+ 				      threaded)) {
+@@ -276,9 +278,12 @@ static void stm32_usart_receive_chars(struct uart_port *port, bool threaded)
+ 		uart_insert_char(port, sr, USART_SR_ORE, c, flag);
+ 	}
+ 
+-	spin_unlock(&port->lock);
++	if (threaded)
++		spin_unlock_irqrestore(&port->lock, flags);
++	else
++		spin_unlock(&port->lock);
++
+ 	tty_flip_buffer_push(tport);
+-	spin_lock(&port->lock);
+ }
+ 
+ static void stm32_usart_tx_dma_complete(void *arg)
+@@ -286,12 +291,16 @@ static void stm32_usart_tx_dma_complete(void *arg)
+ 	struct uart_port *port = arg;
+ 	struct stm32_port *stm32port = to_stm32_port(port);
+ 	const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
++	unsigned long flags;
+ 
++	dmaengine_terminate_async(stm32port->tx_ch);
+ 	stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
+ 	stm32port->tx_dma_busy = false;
+ 
+ 	/* Let's see if we have pending data to send */
++	spin_lock_irqsave(&port->lock, flags);
+ 	stm32_usart_transmit_chars(port);
++	spin_unlock_irqrestore(&port->lock, flags);
+ }
+ 
+ static void stm32_usart_tx_interrupt_enable(struct uart_port *port)
+@@ -455,29 +464,34 @@ static void stm32_usart_transmit_chars(struct uart_port *port)
+ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
+ {
+ 	struct uart_port *port = ptr;
++	struct tty_port *tport = &port->state->port;
+ 	struct stm32_port *stm32_port = to_stm32_port(port);
+ 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ 	u32 sr;
+ 
+-	spin_lock(&port->lock);
+-
+ 	sr = readl_relaxed(port->membase + ofs->isr);
+ 
+ 	if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG)
+ 		writel_relaxed(USART_ICR_RTOCF,
+ 			       port->membase + ofs->icr);
+ 
+-	if ((sr & USART_SR_WUF) && ofs->icr != UNDEF_REG)
++	if ((sr & USART_SR_WUF) && ofs->icr != UNDEF_REG) {
++		/* Clear wake up flag and disable wake up interrupt */
+ 		writel_relaxed(USART_ICR_WUCF,
+ 			       port->membase + ofs->icr);
++		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
++		if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
++			pm_wakeup_event(tport->tty->dev, 0);
++	}
+ 
+ 	if ((sr & USART_SR_RXNE) && !(stm32_port->rx_ch))
+ 		stm32_usart_receive_chars(port, false);
+ 
+-	if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch))
++	if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch)) {
++		spin_lock(&port->lock);
+ 		stm32_usart_transmit_chars(port);
+-
+-	spin_unlock(&port->lock);
++		spin_unlock(&port->lock);
++	}
+ 
+ 	if (stm32_port->rx_ch)
+ 		return IRQ_WAKE_THREAD;
+@@ -490,13 +504,9 @@ static irqreturn_t stm32_usart_threaded_interrupt(int irq, void *ptr)
+ 	struct uart_port *port = ptr;
+ 	struct stm32_port *stm32_port = to_stm32_port(port);
+ 
+-	spin_lock(&port->lock);
+-
+ 	if (stm32_port->rx_ch)
+ 		stm32_usart_receive_chars(port, true);
+ 
+-	spin_unlock(&port->lock);
+-
+ 	return IRQ_HANDLED;
+ }
+ 
+@@ -505,7 +515,10 @@ static unsigned int stm32_usart_tx_empty(struct uart_port *port)
+ 	struct stm32_port *stm32_port = to_stm32_port(port);
+ 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ 
+-	return readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE;
++	if (readl_relaxed(port->membase + ofs->isr) & USART_SR_TC)
++		return TIOCSER_TEMT;
++
++	return 0;
+ }
+ 
+ static void stm32_usart_set_mctrl(struct uart_port *port, unsigned int mctrl)
+@@ -634,6 +647,7 @@ static int stm32_usart_startup(struct uart_port *port)
+ {
+ 	struct stm32_port *stm32_port = to_stm32_port(port);
+ 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
++	const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
+ 	const char *name = to_platform_device(port->dev)->name;
+ 	u32 val;
+ 	int ret;
+@@ -646,21 +660,10 @@ static int stm32_usart_startup(struct uart_port *port)
+ 
+ 	/* RX FIFO Flush */
+ 	if (ofs->rqr != UNDEF_REG)
+-		stm32_usart_set_bits(port, ofs->rqr, USART_RQR_RXFRQ);
+-
+-	/* Tx and RX FIFO configuration */
+-	if (stm32_port->fifoen) {
+-		val = readl_relaxed(port->membase + ofs->cr3);
+-		val &= ~(USART_CR3_TXFTCFG_MASK | USART_CR3_RXFTCFG_MASK);
+-		val |= USART_CR3_TXFTCFG_HALF << USART_CR3_TXFTCFG_SHIFT;
+-		val |= USART_CR3_RXFTCFG_HALF << USART_CR3_RXFTCFG_SHIFT;
+-		writel_relaxed(val, port->membase + ofs->cr3);
+-	}
++		writel_relaxed(USART_RQR_RXFRQ, port->membase + ofs->rqr);
+ 
+-	/* RX FIFO enabling */
+-	val = stm32_port->cr1_irq | USART_CR1_RE;
+-	if (stm32_port->fifoen)
+-		val |= USART_CR1_FIFOEN;
++	/* RX enabling */
++	val = stm32_port->cr1_irq | USART_CR1_RE | BIT(cfg->uart_enable_bit);
+ 	stm32_usart_set_bits(port, ofs->cr1, val);
+ 
+ 	return 0;
+@@ -691,6 +694,11 @@ static void stm32_usart_shutdown(struct uart_port *port)
+ 	if (ret)
+ 		dev_err(port->dev, "Transmission is not complete\n");
+ 
++	/* flush RX & TX FIFO */
++	if (ofs->rqr != UNDEF_REG)
++		writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ,
++			       port->membase + ofs->rqr);
++
+ 	stm32_usart_clr_bits(port, ofs->cr1, val);
+ 
+ 	free_irq(port->irq, port);
+@@ -737,8 +745,9 @@ static void stm32_usart_set_termios(struct uart_port *port,
+ 	unsigned int baud, bits;
+ 	u32 usartdiv, mantissa, fraction, oversampling;
+ 	tcflag_t cflag = termios->c_cflag;
+-	u32 cr1, cr2, cr3;
++	u32 cr1, cr2, cr3, isr;
+ 	unsigned long flags;
++	int ret;
+ 
+ 	if (!stm32_port->hw_flow_control)
+ 		cflag &= ~CRTSCTS;
+@@ -747,21 +756,36 @@ static void stm32_usart_set_termios(struct uart_port *port,
+ 
+ 	spin_lock_irqsave(&port->lock, flags);
+ 
++	ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr,
++						isr,
++						(isr & USART_SR_TC),
++						10, 100000);
++
++	/* Send the TC error message only when ISR_TC is not set. */
++	if (ret)
++		dev_err(port->dev, "Transmission is not complete\n");
++
+ 	/* Stop serial port and reset value */
+ 	writel_relaxed(0, port->membase + ofs->cr1);
+ 
+ 	/* flush RX & TX FIFO */
+ 	if (ofs->rqr != UNDEF_REG)
+-		stm32_usart_set_bits(port, ofs->rqr,
+-				     USART_RQR_TXFRQ | USART_RQR_RXFRQ);
++		writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ,
++			       port->membase + ofs->rqr);
+ 
+ 	cr1 = USART_CR1_TE | USART_CR1_RE;
+ 	if (stm32_port->fifoen)
+ 		cr1 |= USART_CR1_FIFOEN;
+ 	cr2 = 0;
++
++	/* Tx and RX FIFO configuration */
+ 	cr3 = readl_relaxed(port->membase + ofs->cr3);
+-	cr3 &= USART_CR3_TXFTIE | USART_CR3_RXFTCFG_MASK | USART_CR3_RXFTIE
+-		| USART_CR3_TXFTCFG_MASK;
++	cr3 &= USART_CR3_TXFTIE | USART_CR3_RXFTIE;
++	if (stm32_port->fifoen) {
++		cr3 &= ~(USART_CR3_TXFTCFG_MASK | USART_CR3_RXFTCFG_MASK);
++		cr3 |= USART_CR3_TXFTCFG_HALF << USART_CR3_TXFTCFG_SHIFT;
++		cr3 |= USART_CR3_RXFTCFG_HALF << USART_CR3_RXFTCFG_SHIFT;
++	}
+ 
+ 	if (cflag & CSTOPB)
+ 		cr2 |= USART_CR2_STOP_2B;
+@@ -817,12 +841,6 @@ static void stm32_usart_set_termios(struct uart_port *port,
+ 		cr3 |= USART_CR3_CTSE | USART_CR3_RTSE;
+ 	}
+ 
+-	/* Handle modem control interrupts */
+-	if (UART_ENABLE_MS(port, termios->c_cflag))
+-		stm32_usart_enable_ms(port);
+-	else
+-		stm32_usart_disable_ms(port);
+-
+ 	usartdiv = DIV_ROUND_CLOSEST(port->uartclk, baud);
+ 
+ 	/*
+@@ -892,12 +910,24 @@ static void stm32_usart_set_termios(struct uart_port *port,
+ 		cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
+ 	}
+ 
++	/* Configure wake up from low power on start bit detection */
++	if (stm32_port->wakeirq > 0) {
++		cr3 &= ~USART_CR3_WUS_MASK;
++		cr3 |= USART_CR3_WUS_START_BIT;
++	}
++
+ 	writel_relaxed(cr3, port->membase + ofs->cr3);
+ 	writel_relaxed(cr2, port->membase + ofs->cr2);
+ 	writel_relaxed(cr1, port->membase + ofs->cr1);
+ 
+ 	stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
+ 	spin_unlock_irqrestore(&port->lock, flags);
++
++	/* Handle modem control interrupts */
++	if (UART_ENABLE_MS(port, termios->c_cflag))
++		stm32_usart_enable_ms(port);
++	else
++		stm32_usart_disable_ms(port);
+ }
+ 
+ static const char *stm32_usart_type(struct uart_port *port)
+@@ -1252,10 +1282,6 @@ static int stm32_usart_serial_probe(struct platform_device *pdev)
+ 		device_set_wakeup_enable(&pdev->dev, false);
+ 	}
+ 
+-	ret = uart_add_one_port(&stm32_usart_driver, &stm32port->port);
+-	if (ret)
+-		goto err_wirq;
+-
+ 	ret = stm32_usart_of_dma_rx_probe(stm32port, pdev);
+ 	if (ret)
+ 		dev_info(&pdev->dev, "interrupt mode used for rx (no dma)\n");
+@@ -1269,11 +1295,40 @@ static int stm32_usart_serial_probe(struct platform_device *pdev)
+ 	pm_runtime_get_noresume(&pdev->dev);
+ 	pm_runtime_set_active(&pdev->dev);
+ 	pm_runtime_enable(&pdev->dev);
++
++	ret = uart_add_one_port(&stm32_usart_driver, &stm32port->port);
++	if (ret)
++		goto err_port;
++
+ 	pm_runtime_put_sync(&pdev->dev);
+ 
+ 	return 0;
+ 
+-err_wirq:
++err_port:
++	pm_runtime_disable(&pdev->dev);
++	pm_runtime_set_suspended(&pdev->dev);
++	pm_runtime_put_noidle(&pdev->dev);
++
++	if (stm32port->rx_ch) {
++		dmaengine_terminate_async(stm32port->rx_ch);
++		dma_release_channel(stm32port->rx_ch);
++	}
++
++	if (stm32port->rx_dma_buf)
++		dma_free_coherent(&pdev->dev,
++				  RX_BUF_L, stm32port->rx_buf,
++				  stm32port->rx_dma_buf);
++
++	if (stm32port->tx_ch) {
++		dmaengine_terminate_async(stm32port->tx_ch);
++		dma_release_channel(stm32port->tx_ch);
++	}
++
++	if (stm32port->tx_dma_buf)
++		dma_free_coherent(&pdev->dev,
++				  TX_BUF_L, stm32port->tx_buf,
++				  stm32port->tx_dma_buf);
++
+ 	if (stm32port->wakeirq > 0)
+ 		dev_pm_clear_wake_irq(&pdev->dev);
+ 
+@@ -1295,11 +1350,20 @@ static int stm32_usart_serial_remove(struct platform_device *pdev)
+ 	int err;
+ 
+ 	pm_runtime_get_sync(&pdev->dev);
++	err = uart_remove_one_port(&stm32_usart_driver, port);
++	if (err)
++		return(err);
++
++	pm_runtime_disable(&pdev->dev);
++	pm_runtime_set_suspended(&pdev->dev);
++	pm_runtime_put_noidle(&pdev->dev);
+ 
+ 	stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
+ 
+-	if (stm32_port->rx_ch)
++	if (stm32_port->rx_ch) {
++		dmaengine_terminate_async(stm32_port->rx_ch);
+ 		dma_release_channel(stm32_port->rx_ch);
++	}
+ 
+ 	if (stm32_port->rx_dma_buf)
+ 		dma_free_coherent(&pdev->dev,
+@@ -1308,8 +1372,10 @@ static int stm32_usart_serial_remove(struct platform_device *pdev)
+ 
+ 	stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
+ 
+-	if (stm32_port->tx_ch)
++	if (stm32_port->tx_ch) {
++		dmaengine_terminate_async(stm32_port->tx_ch);
+ 		dma_release_channel(stm32_port->tx_ch);
++	}
+ 
+ 	if (stm32_port->tx_dma_buf)
+ 		dma_free_coherent(&pdev->dev,
+@@ -1323,12 +1389,7 @@ static int stm32_usart_serial_remove(struct platform_device *pdev)
+ 
+ 	stm32_usart_deinit_port(stm32_port);
+ 
+-	err = uart_remove_one_port(&stm32_usart_driver, port);
+-
+-	pm_runtime_disable(&pdev->dev);
+-	pm_runtime_put_noidle(&pdev->dev);
+-
+-	return err;
++	return 0;
+ }
+ 
+ #ifdef CONFIG_SERIAL_STM32_CONSOLE
+@@ -1436,23 +1497,20 @@ static void __maybe_unused stm32_usart_serial_en_wakeup(struct uart_port *port,
+ {
+ 	struct stm32_port *stm32_port = to_stm32_port(port);
+ 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+-	const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
+-	u32 val;
+ 
+ 	if (stm32_port->wakeirq <= 0)
+ 		return;
+ 
++	/*
++	 * Enable low-power wake-up and wake-up irq if argument is set to
++	 * "enable", disable low-power wake-up and wake-up irq otherwise
++	 */
+ 	if (enable) {
+-		stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
+ 		stm32_usart_set_bits(port, ofs->cr1, USART_CR1_UESM);
+-		val = readl_relaxed(port->membase + ofs->cr3);
+-		val &= ~USART_CR3_WUS_MASK;
+-		/* Enable Wake up interrupt from low power on start bit */
+-		val |= USART_CR3_WUS_START_BIT | USART_CR3_WUFIE;
+-		writel_relaxed(val, port->membase + ofs->cr3);
+-		stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
++		stm32_usart_set_bits(port, ofs->cr3, USART_CR3_WUFIE);
+ 	} else {
+ 		stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_UESM);
++		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
+ 	}
+ }
+ 
+diff --git a/drivers/tty/serial/stm32-usart.h b/drivers/tty/serial/stm32-usart.h
+index cb4f327c46db9..94b568aa46bbd 100644
+--- a/drivers/tty/serial/stm32-usart.h
++++ b/drivers/tty/serial/stm32-usart.h
+@@ -127,9 +127,6 @@ struct stm32_usart_info stm32h7_info = {
+ /* Dummy bits */
+ #define USART_SR_DUMMY_RX	BIT(16)
+ 
+-/* USART_ICR (F7) */
+-#define USART_CR_TC		BIT(6)
+-
+ /* USART_DR */
+ #define USART_DR_MASK		GENMASK(8, 0)
+ 
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index 391bada4cedb6..adbcbfa11b290 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -2530,14 +2530,14 @@ out:
+  *	@p: pointer to result
+  *
+  *	Obtain the modem status bits from the tty driver if the feature
+- *	is supported. Return -EINVAL if it is not available.
++ *	is supported. Return -ENOTTY if it is not available.
+  *
+  *	Locking: none (up to the driver)
+  */
+ 
+ static int tty_tiocmget(struct tty_struct *tty, int __user *p)
+ {
+-	int retval = -EINVAL;
++	int retval = -ENOTTY;
+ 
+ 	if (tty->ops->tiocmget) {
+ 		retval = tty->ops->tiocmget(tty);
+@@ -2555,7 +2555,7 @@ static int tty_tiocmget(struct tty_struct *tty, int __user *p)
+  *	@p: pointer to desired bits
+  *
+  *	Set the modem status bits from the tty driver if the feature
+- *	is supported. Return -EINVAL if it is not available.
++ *	is supported. Return -ENOTTY if it is not available.
+  *
+  *	Locking: none (up to the driver)
+  */
+@@ -2567,7 +2567,7 @@ static int tty_tiocmset(struct tty_struct *tty, unsigned int cmd,
+ 	unsigned int set, clear, val;
+ 
+ 	if (tty->ops->tiocmset == NULL)
+-		return -EINVAL;
++		return -ENOTTY;
+ 
+ 	retval = get_user(val, p);
+ 	if (retval)
+@@ -2607,7 +2607,7 @@ int tty_get_icount(struct tty_struct *tty,
+ 	if (tty->ops->get_icount)
+ 		return tty->ops->get_icount(tty, icount);
+ 	else
+-		return -EINVAL;
++		return -ENOTTY;
+ }
+ EXPORT_SYMBOL_GPL(tty_get_icount);
+ 
+diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
+index 4de1c6ddb8ffb..803da2d111c8c 100644
+--- a/drivers/tty/tty_ioctl.c
++++ b/drivers/tty/tty_ioctl.c
+@@ -774,8 +774,8 @@ int tty_mode_ioctl(struct tty_struct *tty, struct file *file,
+ 	case TCSETX:
+ 	case TCSETXW:
+ 	case TCSETXF:
+-		return -EINVAL;
+-#endif		
++		return -ENOTTY;
++#endif
+ 	case TIOCGSOFTCAR:
+ 		copy_termios(real_tty, &kterm);
+ 		ret = put_user((kterm.c_cflag & CLOCAL) ? 1 : 0,
+diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c
+index d7d4bdd57f46f..56707b6b0f57c 100644
+--- a/drivers/usb/cdns3/cdnsp-gadget.c
++++ b/drivers/usb/cdns3/cdnsp-gadget.c
+@@ -727,7 +727,7 @@ int cdnsp_reset_device(struct cdnsp_device *pdev)
+ 	 * are in Disabled state.
+ 	 */
+ 	for (i = 1; i < CDNSP_ENDPOINTS_NUM; ++i)
+-		pdev->eps[i].ep_state |= EP_STOPPED;
++		pdev->eps[i].ep_state |= EP_STOPPED | EP_UNCONFIGURED;
+ 
+ 	trace_cdnsp_handle_cmd_reset_dev(slot_ctx);
+ 
+@@ -942,6 +942,7 @@ static int cdnsp_gadget_ep_enable(struct usb_ep *ep,
+ 
+ 	pep = to_cdnsp_ep(ep);
+ 	pdev = pep->pdev;
++	pep->ep_state &= ~EP_UNCONFIGURED;
+ 
+ 	if (dev_WARN_ONCE(pdev->dev, pep->ep_state & EP_ENABLED,
+ 			  "%s is already enabled\n", pep->name))
+@@ -1023,9 +1024,13 @@ static int cdnsp_gadget_ep_disable(struct usb_ep *ep)
+ 		goto finish;
+ 	}
+ 
+-	cdnsp_cmd_stop_ep(pdev, pep);
+ 	pep->ep_state |= EP_DIS_IN_RROGRESS;
+-	cdnsp_cmd_flush_ep(pdev, pep);
++
++	/* Endpoint was unconfigured by Reset Device command. */
++	if (!(pep->ep_state & EP_UNCONFIGURED)) {
++		cdnsp_cmd_stop_ep(pdev, pep);
++		cdnsp_cmd_flush_ep(pdev, pep);
++	}
+ 
+ 	/* Remove all queued USB requests. */
+ 	while (!list_empty(&pep->pending_list)) {
+@@ -1043,10 +1048,12 @@ static int cdnsp_gadget_ep_disable(struct usb_ep *ep)
+ 
+ 	cdnsp_endpoint_zero(pdev, pep);
+ 
+-	ret = cdnsp_update_eps_configuration(pdev, pep);
++	if (!(pep->ep_state & EP_UNCONFIGURED))
++		ret = cdnsp_update_eps_configuration(pdev, pep);
++
+ 	cdnsp_free_endpoint_rings(pdev, pep);
+ 
+-	pep->ep_state &= ~EP_ENABLED;
++	pep->ep_state &= ~(EP_ENABLED | EP_UNCONFIGURED);
+ 	pep->ep_state |= EP_STOPPED;
+ 
+ finish:
+diff --git a/drivers/usb/cdns3/cdnsp-gadget.h b/drivers/usb/cdns3/cdnsp-gadget.h
+index 6bbb26548c049..783ca8ffde007 100644
+--- a/drivers/usb/cdns3/cdnsp-gadget.h
++++ b/drivers/usb/cdns3/cdnsp-gadget.h
+@@ -835,6 +835,7 @@ struct cdnsp_ep {
+ #define EP_WEDGE		BIT(4)
+ #define EP0_HALTED_STATUS	BIT(5)
+ #define EP_HAS_STREAMS		BIT(6)
++#define EP_UNCONFIGURED		BIT(7)
+ 
+ 	bool skip;
+ };
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index f5886c512fec1..c103961c3fae9 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -929,8 +929,7 @@ static int get_serial_info(struct tty_struct *tty, struct serial_struct *ss)
+ {
+ 	struct acm *acm = tty->driver_data;
+ 
+-	ss->xmit_fifo_size = acm->writesize;
+-	ss->baud_base = le32_to_cpu(acm->line.dwDTERate);
++	ss->line = acm->minor;
+ 	ss->close_delay	= jiffies_to_msecs(acm->port.close_delay) / 10;
+ 	ss->closing_wait = acm->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
+ 				ASYNC_CLOSING_WAIT_NONE :
+@@ -942,7 +941,6 @@ static int set_serial_info(struct tty_struct *tty, struct serial_struct *ss)
+ {
+ 	struct acm *acm = tty->driver_data;
+ 	unsigned int closing_wait, close_delay;
+-	unsigned int old_closing_wait, old_close_delay;
+ 	int retval = 0;
+ 
+ 	close_delay = msecs_to_jiffies(ss->close_delay * 10);
+@@ -950,20 +948,12 @@ static int set_serial_info(struct tty_struct *tty, struct serial_struct *ss)
+ 			ASYNC_CLOSING_WAIT_NONE :
+ 			msecs_to_jiffies(ss->closing_wait * 10);
+ 
+-	/* we must redo the rounding here, so that the values match */
+-	old_close_delay	= jiffies_to_msecs(acm->port.close_delay) / 10;
+-	old_closing_wait = acm->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
+-				ASYNC_CLOSING_WAIT_NONE :
+-				jiffies_to_msecs(acm->port.closing_wait) / 10;
+-
+ 	mutex_lock(&acm->port.mutex);
+ 
+ 	if (!capable(CAP_SYS_ADMIN)) {
+-		if ((ss->close_delay != old_close_delay) ||
+-		    (ss->closing_wait != old_closing_wait))
++		if ((close_delay != acm->port.close_delay) ||
++		    (closing_wait != acm->port.closing_wait))
+ 			retval = -EPERM;
+-		else
+-			retval = -EOPNOTSUPP;
+ 	} else {
+ 		acm->port.close_delay  = close_delay;
+ 		acm->port.closing_wait = closing_wait;
+diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
+index 800c8b6c55ff1..510fd0572feb1 100644
+--- a/drivers/usb/dwc2/core_intr.c
++++ b/drivers/usb/dwc2/core_intr.c
+@@ -660,6 +660,71 @@ static u32 dwc2_read_common_intr(struct dwc2_hsotg *hsotg)
+ 		return 0;
+ }
+ 
++/**
++ * dwc_handle_gpwrdn_disc_det() - Handles the gpwrdn disconnect detect.
++ * Exits hibernation without restoring registers.
++ *
++ * @hsotg: Programming view of DWC_otg controller
++ * @gpwrdn: GPWRDN register
++ */
++static inline void dwc_handle_gpwrdn_disc_det(struct dwc2_hsotg *hsotg,
++					      u32 gpwrdn)
++{
++	u32 gpwrdn_tmp;
++
++	/* Switch-on voltage to the core */
++	gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
++	gpwrdn_tmp &= ~GPWRDN_PWRDNSWTCH;
++	dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
++	udelay(5);
++
++	/* Reset core */
++	gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
++	gpwrdn_tmp &= ~GPWRDN_PWRDNRSTN;
++	dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
++	udelay(5);
++
++	/* Disable Power Down Clamp */
++	gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
++	gpwrdn_tmp &= ~GPWRDN_PWRDNCLMP;
++	dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
++	udelay(5);
++
++	/* Deassert reset core */
++	gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
++	gpwrdn_tmp |= GPWRDN_PWRDNRSTN;
++	dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
++	udelay(5);
++
++	/* Disable PMU interrupt */
++	gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
++	gpwrdn_tmp &= ~GPWRDN_PMUINTSEL;
++	dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
++
++	/* De-assert Wakeup Logic */
++	gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
++	gpwrdn_tmp &= ~GPWRDN_PMUACTV;
++	dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
++
++	hsotg->hibernated = 0;
++	hsotg->bus_suspended = 0;
++
++	if (gpwrdn & GPWRDN_IDSTS) {
++		hsotg->op_state = OTG_STATE_B_PERIPHERAL;
++		dwc2_core_init(hsotg, false);
++		dwc2_enable_global_interrupts(hsotg);
++		dwc2_hsotg_core_init_disconnected(hsotg, false);
++		dwc2_hsotg_core_connect(hsotg);
++	} else {
++		hsotg->op_state = OTG_STATE_A_HOST;
++
++		/* Initialize the Core for Host mode */
++		dwc2_core_init(hsotg, false);
++		dwc2_enable_global_interrupts(hsotg);
++		dwc2_hcd_start(hsotg);
++	}
++}
++
+ /*
+  * GPWRDN interrupt handler.
+  *
+@@ -681,64 +746,14 @@ static void dwc2_handle_gpwrdn_intr(struct dwc2_hsotg *hsotg)
+ 
+ 	if ((gpwrdn & GPWRDN_DISCONN_DET) &&
+ 	    (gpwrdn & GPWRDN_DISCONN_DET_MSK) && !linestate) {
+-		u32 gpwrdn_tmp;
+-
+ 		dev_dbg(hsotg->dev, "%s: GPWRDN_DISCONN_DET\n", __func__);
+-
+-		/* Switch-on voltage to the core */
+-		gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
+-		gpwrdn_tmp &= ~GPWRDN_PWRDNSWTCH;
+-		dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
+-		udelay(10);
+-
+-		/* Reset core */
+-		gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
+-		gpwrdn_tmp &= ~GPWRDN_PWRDNRSTN;
+-		dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
+-		udelay(10);
+-
+-		/* Disable Power Down Clamp */
+-		gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
+-		gpwrdn_tmp &= ~GPWRDN_PWRDNCLMP;
+-		dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
+-		udelay(10);
+-
+-		/* Deassert reset core */
+-		gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
+-		gpwrdn_tmp |= GPWRDN_PWRDNRSTN;
+-		dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
+-		udelay(10);
+-
+-		/* Disable PMU interrupt */
+-		gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
+-		gpwrdn_tmp &= ~GPWRDN_PMUINTSEL;
+-		dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
+-
+-		/* De-assert Wakeup Logic */
+-		gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
+-		gpwrdn_tmp &= ~GPWRDN_PMUACTV;
+-		dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
+-
+-		hsotg->hibernated = 0;
+-
+-		if (gpwrdn & GPWRDN_IDSTS) {
+-			hsotg->op_state = OTG_STATE_B_PERIPHERAL;
+-			dwc2_core_init(hsotg, false);
+-			dwc2_enable_global_interrupts(hsotg);
+-			dwc2_hsotg_core_init_disconnected(hsotg, false);
+-			dwc2_hsotg_core_connect(hsotg);
+-		} else {
+-			hsotg->op_state = OTG_STATE_A_HOST;
+-
+-			/* Initialize the Core for Host mode */
+-			dwc2_core_init(hsotg, false);
+-			dwc2_enable_global_interrupts(hsotg);
+-			dwc2_hcd_start(hsotg);
+-		}
+-	}
+-
+-	if ((gpwrdn & GPWRDN_LNSTSCHG) &&
+-	    (gpwrdn & GPWRDN_LNSTSCHG_MSK) && linestate) {
++		/*
++		 * Call disconnect detect function to exit from
++		 * hibernation
++		 */
++		dwc_handle_gpwrdn_disc_det(hsotg, gpwrdn);
++	} else if ((gpwrdn & GPWRDN_LNSTSCHG) &&
++		   (gpwrdn & GPWRDN_LNSTSCHG_MSK) && linestate) {
+ 		dev_dbg(hsotg->dev, "%s: GPWRDN_LNSTSCHG\n", __func__);
+ 		if (hsotg->hw_params.hibernation &&
+ 		    hsotg->hibernated) {
+@@ -749,24 +764,21 @@ static void dwc2_handle_gpwrdn_intr(struct dwc2_hsotg *hsotg)
+ 				dwc2_exit_hibernation(hsotg, 1, 0, 1);
+ 			}
+ 		}
+-	}
+-	if ((gpwrdn & GPWRDN_RST_DET) && (gpwrdn & GPWRDN_RST_DET_MSK)) {
++	} else if ((gpwrdn & GPWRDN_RST_DET) &&
++		   (gpwrdn & GPWRDN_RST_DET_MSK)) {
+ 		dev_dbg(hsotg->dev, "%s: GPWRDN_RST_DET\n", __func__);
+ 		if (!linestate && (gpwrdn & GPWRDN_BSESSVLD))
+ 			dwc2_exit_hibernation(hsotg, 0, 1, 0);
+-	}
+-	if ((gpwrdn & GPWRDN_STS_CHGINT) &&
+-	    (gpwrdn & GPWRDN_STS_CHGINT_MSK) && linestate) {
++	} else if ((gpwrdn & GPWRDN_STS_CHGINT) &&
++		   (gpwrdn & GPWRDN_STS_CHGINT_MSK)) {
+ 		dev_dbg(hsotg->dev, "%s: GPWRDN_STS_CHGINT\n", __func__);
+-		if (hsotg->hw_params.hibernation &&
+-		    hsotg->hibernated) {
+-			if (gpwrdn & GPWRDN_IDSTS) {
+-				dwc2_exit_hibernation(hsotg, 0, 0, 0);
+-				call_gadget(hsotg, resume);
+-			} else {
+-				dwc2_exit_hibernation(hsotg, 1, 0, 1);
+-			}
+-		}
++		/*
++		 * As GPWRDN_STS_CHGINT exit from hibernation flow is
++		 * the same as in GPWRDN_DISCONN_DET flow. Call
++		 * disconnect detect helper function to exit from
++		 * hibernation.
++		 */
++		dwc_handle_gpwrdn_disc_det(hsotg, gpwrdn);
+ 	}
+ }
+ 
+diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
+index 1a9789ec5847f..6af1dcbc36564 100644
+--- a/drivers/usb/dwc2/hcd.c
++++ b/drivers/usb/dwc2/hcd.c
+@@ -5580,7 +5580,15 @@ int dwc2_host_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup,
+ 		return ret;
+ 	}
+ 
+-	dwc2_hcd_rem_wakeup(hsotg);
++	if (rem_wakeup) {
++		dwc2_hcd_rem_wakeup(hsotg);
++		/*
++		 * Change "port_connect_status_change" flag to re-enumerate,
++		 * because after exit from hibernation port connection status
++		 * is not detected.
++		 */
++		hsotg->flags.b.port_connect_status_change = 1;
++	}
+ 
+ 	hsotg->hibernated = 0;
+ 	hsotg->bus_suspended = 0;
+diff --git a/drivers/usb/gadget/udc/aspeed-vhub/core.c b/drivers/usb/gadget/udc/aspeed-vhub/core.c
+index be7bb64e3594d..d11d3d14313f9 100644
+--- a/drivers/usb/gadget/udc/aspeed-vhub/core.c
++++ b/drivers/usb/gadget/udc/aspeed-vhub/core.c
+@@ -36,6 +36,7 @@ void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
+ 		   int status)
+ {
+ 	bool internal = req->internal;
++	struct ast_vhub *vhub = ep->vhub;
+ 
+ 	EPVDBG(ep, "completing request @%p, status %d\n", req, status);
+ 
+@@ -46,7 +47,7 @@ void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
+ 
+ 	if (req->req.dma) {
+ 		if (!WARN_ON(!ep->dev))
+-			usb_gadget_unmap_request(&ep->dev->gadget,
++			usb_gadget_unmap_request_by_dev(&vhub->pdev->dev,
+ 						 &req->req, ep->epn.is_in);
+ 		req->req.dma = 0;
+ 	}
+diff --git a/drivers/usb/gadget/udc/aspeed-vhub/epn.c b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
+index 02d8bfae58fb1..cb164c615e6fc 100644
+--- a/drivers/usb/gadget/udc/aspeed-vhub/epn.c
++++ b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
+@@ -376,7 +376,7 @@ static int ast_vhub_epn_queue(struct usb_ep* u_ep, struct usb_request *u_req,
+ 	if (ep->epn.desc_mode ||
+ 	    ((((unsigned long)u_req->buf & 7) == 0) &&
+ 	     (ep->epn.is_in || !(u_req->length & (u_ep->maxpacket - 1))))) {
+-		rc = usb_gadget_map_request(&ep->dev->gadget, u_req,
++		rc = usb_gadget_map_request_by_dev(&vhub->pdev->dev, u_req,
+ 					    ep->epn.is_in);
+ 		if (rc) {
+ 			dev_warn(&vhub->pdev->dev,
+diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c
+index d6ca50f019853..75bf446f4a666 100644
+--- a/drivers/usb/gadget/udc/fotg210-udc.c
++++ b/drivers/usb/gadget/udc/fotg210-udc.c
+@@ -338,15 +338,16 @@ static void fotg210_start_dma(struct fotg210_ep *ep,
+ 		} else {
+ 			buffer = req->req.buf + req->req.actual;
+ 			length = ioread32(ep->fotg210->reg +
+-					FOTG210_FIBCR(ep->epnum - 1));
+-			length &= FIBCR_BCFX;
++					FOTG210_FIBCR(ep->epnum - 1)) & FIBCR_BCFX;
++			if (length > req->req.length - req->req.actual)
++				length = req->req.length - req->req.actual;
+ 		}
+ 	} else {
+ 		buffer = req->req.buf + req->req.actual;
+ 		if (req->req.length - req->req.actual > ep->ep.maxpacket)
+ 			length = ep->ep.maxpacket;
+ 		else
+-			length = req->req.length;
++			length = req->req.length - req->req.actual;
+ 	}
+ 
+ 	d = dma_map_single(dev, buffer, length,
+@@ -379,8 +380,7 @@ static void fotg210_ep0_queue(struct fotg210_ep *ep,
+ 	}
+ 	if (ep->dir_in) { /* if IN */
+ 		fotg210_start_dma(ep, req);
+-		if ((req->req.length == req->req.actual) ||
+-		    (req->req.actual < ep->ep.maxpacket))
++		if (req->req.length == req->req.actual)
+ 			fotg210_done(ep, req, 0);
+ 	} else { /* OUT */
+ 		u32 value = ioread32(ep->fotg210->reg + FOTG210_DMISGR0);
+@@ -820,7 +820,7 @@ static void fotg210_ep0in(struct fotg210_udc *fotg210)
+ 		if (req->req.length)
+ 			fotg210_start_dma(ep, req);
+ 
+-		if ((req->req.length - req->req.actual) < ep->ep.maxpacket)
++		if (req->req.actual == req->req.length)
+ 			fotg210_done(ep, req, 0);
+ 	} else {
+ 		fotg210_set_cxdone(fotg210);
+@@ -849,12 +849,16 @@ static void fotg210_out_fifo_handler(struct fotg210_ep *ep)
+ {
+ 	struct fotg210_request *req = list_entry(ep->queue.next,
+ 						 struct fotg210_request, queue);
++	int disgr1 = ioread32(ep->fotg210->reg + FOTG210_DISGR1);
+ 
+ 	fotg210_start_dma(ep, req);
+ 
+-	/* finish out transfer */
++	/* Complete the request when it's full or a short packet arrived.
++	 * Like other drivers, short_not_ok isn't handled.
++	 */
++
+ 	if (req->req.length == req->req.actual ||
+-	    req->req.actual < ep->ep.maxpacket)
++	    (disgr1 & DISGR1_SPK_INT(ep->epnum - 1)))
+ 		fotg210_done(ep, req, 0);
+ }
+ 
+@@ -1027,6 +1031,12 @@ static void fotg210_init(struct fotg210_udc *fotg210)
+ 	value &= ~DMCR_GLINT_EN;
+ 	iowrite32(value, fotg210->reg + FOTG210_DMCR);
+ 
++	/* enable only grp2 irqs we handle */
++	iowrite32(~(DISGR2_DMA_ERROR | DISGR2_RX0BYTE_INT | DISGR2_TX0BYTE_INT
++		    | DISGR2_ISO_SEQ_ABORT_INT | DISGR2_ISO_SEQ_ERR_INT
++		    | DISGR2_RESM_INT | DISGR2_SUSP_INT | DISGR2_USBRST_INT),
++		  fotg210->reg + FOTG210_DMISGR2);
++
+ 	/* disable all fifo interrupt */
+ 	iowrite32(~(u32)0, fotg210->reg + FOTG210_DMISGR1);
+ 
+diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c
+index a3c1fc9242686..fd3656d0f760c 100644
+--- a/drivers/usb/gadget/udc/pch_udc.c
++++ b/drivers/usb/gadget/udc/pch_udc.c
+@@ -7,12 +7,14 @@
+ #include <linux/module.h>
+ #include <linux/pci.h>
+ #include <linux/delay.h>
++#include <linux/dmi.h>
+ #include <linux/errno.h>
++#include <linux/gpio/consumer.h>
++#include <linux/gpio/machine.h>
+ #include <linux/list.h>
+ #include <linux/interrupt.h>
+ #include <linux/usb/ch9.h>
+ #include <linux/usb/gadget.h>
+-#include <linux/gpio/consumer.h>
+ #include <linux/irq.h>
+ 
+ #define PCH_VBUS_PERIOD		3000	/* VBUS polling period (msec) */
+@@ -596,18 +598,22 @@ static void pch_udc_reconnect(struct pch_udc_dev *dev)
+ static inline void pch_udc_vbus_session(struct pch_udc_dev *dev,
+ 					  int is_active)
+ {
++	unsigned long		iflags;
++
++	spin_lock_irqsave(&dev->lock, iflags);
+ 	if (is_active) {
+ 		pch_udc_reconnect(dev);
+ 		dev->vbus_session = 1;
+ 	} else {
+ 		if (dev->driver && dev->driver->disconnect) {
+-			spin_lock(&dev->lock);
++			spin_unlock_irqrestore(&dev->lock, iflags);
+ 			dev->driver->disconnect(&dev->gadget);
+-			spin_unlock(&dev->lock);
++			spin_lock_irqsave(&dev->lock, iflags);
+ 		}
+ 		pch_udc_set_disconnect(dev);
+ 		dev->vbus_session = 0;
+ 	}
++	spin_unlock_irqrestore(&dev->lock, iflags);
+ }
+ 
+ /**
+@@ -1166,20 +1172,25 @@ static int pch_udc_pcd_selfpowered(struct usb_gadget *gadget, int value)
+ static int pch_udc_pcd_pullup(struct usb_gadget *gadget, int is_on)
+ {
+ 	struct pch_udc_dev	*dev;
++	unsigned long		iflags;
+ 
+ 	if (!gadget)
+ 		return -EINVAL;
++
+ 	dev = container_of(gadget, struct pch_udc_dev, gadget);
++
++	spin_lock_irqsave(&dev->lock, iflags);
+ 	if (is_on) {
+ 		pch_udc_reconnect(dev);
+ 	} else {
+ 		if (dev->driver && dev->driver->disconnect) {
+-			spin_lock(&dev->lock);
++			spin_unlock_irqrestore(&dev->lock, iflags);
+ 			dev->driver->disconnect(&dev->gadget);
+-			spin_unlock(&dev->lock);
++			spin_lock_irqsave(&dev->lock, iflags);
+ 		}
+ 		pch_udc_set_disconnect(dev);
+ 	}
++	spin_unlock_irqrestore(&dev->lock, iflags);
+ 
+ 	return 0;
+ }
+@@ -1350,6 +1361,43 @@ static irqreturn_t pch_vbus_gpio_irq(int irq, void *data)
+ 	return IRQ_HANDLED;
+ }
+ 
++static struct gpiod_lookup_table minnowboard_udc_gpios = {
++	.dev_id		= "0000:02:02.4",
++	.table		= {
++		GPIO_LOOKUP("sch_gpio.33158", 12, NULL, GPIO_ACTIVE_HIGH),
++		{}
++	},
++};
++
++static const struct dmi_system_id pch_udc_gpio_dmi_table[] = {
++	{
++		.ident = "MinnowBoard",
++		.matches = {
++			DMI_MATCH(DMI_BOARD_NAME, "MinnowBoard"),
++		},
++		.driver_data = &minnowboard_udc_gpios,
++	},
++	{ }
++};
++
++static void pch_vbus_gpio_remove_table(void *table)
++{
++	gpiod_remove_lookup_table(table);
++}
++
++static int pch_vbus_gpio_add_table(struct pch_udc_dev *dev)
++{
++	struct device *d = &dev->pdev->dev;
++	const struct dmi_system_id *dmi;
++
++	dmi = dmi_first_match(pch_udc_gpio_dmi_table);
++	if (!dmi)
++		return 0;
++
++	gpiod_add_lookup_table(dmi->driver_data);
++	return devm_add_action_or_reset(d, pch_vbus_gpio_remove_table, dmi->driver_data);
++}
++
+ /**
+  * pch_vbus_gpio_init() - This API initializes GPIO port detecting VBUS.
+  * @dev:		Reference to the driver structure
+@@ -1360,6 +1408,7 @@ static irqreturn_t pch_vbus_gpio_irq(int irq, void *data)
+  */
+ static int pch_vbus_gpio_init(struct pch_udc_dev *dev)
+ {
++	struct device *d = &dev->pdev->dev;
+ 	int err;
+ 	int irq_num = 0;
+ 	struct gpio_desc *gpiod;
+@@ -1367,8 +1416,12 @@ static int pch_vbus_gpio_init(struct pch_udc_dev *dev)
+ 	dev->vbus_gpio.port = NULL;
+ 	dev->vbus_gpio.intr = 0;
+ 
++	err = pch_vbus_gpio_add_table(dev);
++	if (err)
++		return err;
++
+ 	/* Retrieve the GPIO line from the USB gadget device */
+-	gpiod = devm_gpiod_get(dev->gadget.dev.parent, NULL, GPIOD_IN);
++	gpiod = devm_gpiod_get_optional(d, NULL, GPIOD_IN);
+ 	if (IS_ERR(gpiod))
+ 		return PTR_ERR(gpiod);
+ 	gpiod_set_consumer_name(gpiod, "pch_vbus");
+@@ -1756,7 +1809,7 @@ static struct usb_request *pch_udc_alloc_request(struct usb_ep *usbep,
+ 	}
+ 	/* prevent from using desc. - set HOST BUSY */
+ 	dma_desc->status |= PCH_UDC_BS_HST_BSY;
+-	dma_desc->dataptr = cpu_to_le32(DMA_ADDR_INVALID);
++	dma_desc->dataptr = lower_32_bits(DMA_ADDR_INVALID);
+ 	req->td_data = dma_desc;
+ 	req->td_data_last = dma_desc;
+ 	req->chain_len = 1;
+@@ -2298,6 +2351,21 @@ static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num)
+ 		pch_udc_set_dma(dev, DMA_DIR_RX);
+ }
+ 
++static int pch_udc_gadget_setup(struct pch_udc_dev *dev)
++	__must_hold(&dev->lock)
++{
++	int rc;
++
++	/* In some cases we can get an interrupt before driver gets setup */
++	if (!dev->driver)
++		return -ESHUTDOWN;
++
++	spin_unlock(&dev->lock);
++	rc = dev->driver->setup(&dev->gadget, &dev->setup_data);
++	spin_lock(&dev->lock);
++	return rc;
++}
++
+ /**
+  * pch_udc_svc_control_in() - Handle Control IN endpoint interrupts
+  * @dev:	Reference to the device structure
+@@ -2369,15 +2437,12 @@ static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
+ 			dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
+ 		else /* OUT */
+ 			dev->gadget.ep0 = &ep->ep;
+-		spin_lock(&dev->lock);
+ 		/* If Mass storage Reset */
+ 		if ((dev->setup_data.bRequestType == 0x21) &&
+ 		    (dev->setup_data.bRequest == 0xFF))
+ 			dev->prot_stall = 0;
+ 		/* call gadget with setup data received */
+-		setup_supported = dev->driver->setup(&dev->gadget,
+-						     &dev->setup_data);
+-		spin_unlock(&dev->lock);
++		setup_supported = pch_udc_gadget_setup(dev);
+ 
+ 		if (dev->setup_data.bRequestType & USB_DIR_IN) {
+ 			ep->td_data->status = (ep->td_data->status &
+@@ -2625,9 +2690,7 @@ static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev)
+ 		dev->ep[i].halted = 0;
+ 	}
+ 	dev->stall = 0;
+-	spin_unlock(&dev->lock);
+-	dev->driver->setup(&dev->gadget, &dev->setup_data);
+-	spin_lock(&dev->lock);
++	pch_udc_gadget_setup(dev);
+ }
+ 
+ /**
+@@ -2662,9 +2725,7 @@ static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev)
+ 	dev->stall = 0;
+ 
+ 	/* call gadget zero with setup data received */
+-	spin_unlock(&dev->lock);
+-	dev->driver->setup(&dev->gadget, &dev->setup_data);
+-	spin_lock(&dev->lock);
++	pch_udc_gadget_setup(dev);
+ }
+ 
+ /**
+@@ -2870,14 +2931,20 @@ static void pch_udc_pcd_reinit(struct pch_udc_dev *dev)
+  * @dev:	Reference to the driver structure
+  *
+  * Return codes:
+- *	0: Success
++ *	0:		Success
++ *	-%ERRNO:	All kind of errors when retrieving VBUS GPIO
+  */
+ static int pch_udc_pcd_init(struct pch_udc_dev *dev)
+ {
++	int ret;
++
+ 	pch_udc_init(dev);
+ 	pch_udc_pcd_reinit(dev);
+-	pch_vbus_gpio_init(dev);
+-	return 0;
++
++	ret = pch_vbus_gpio_init(dev);
++	if (ret)
++		pch_udc_exit(dev);
++	return ret;
+ }
+ 
+ /**
+@@ -2938,7 +3005,7 @@ static int init_dma_pools(struct pch_udc_dev *dev)
+ 	dev->dma_addr = dma_map_single(&dev->pdev->dev, ep0out_buf,
+ 				       UDC_EP0OUT_BUFF_SIZE * 4,
+ 				       DMA_FROM_DEVICE);
+-	return 0;
++	return dma_mapping_error(&dev->pdev->dev, dev->dma_addr);
+ }
+ 
+ static int pch_udc_start(struct usb_gadget *g,
+@@ -3063,6 +3130,7 @@ static int pch_udc_probe(struct pci_dev *pdev,
+ 	if (retval)
+ 		return retval;
+ 
++	dev->pdev = pdev;
+ 	pci_set_drvdata(pdev, dev);
+ 
+ 	/* Determine BAR based on PCI ID */
+@@ -3078,16 +3146,10 @@ static int pch_udc_probe(struct pci_dev *pdev,
+ 
+ 	dev->base_addr = pcim_iomap_table(pdev)[bar];
+ 
+-	/*
+-	 * FIXME: add a GPIO descriptor table to pdev.dev using
+-	 * gpiod_add_descriptor_table() from <linux/gpio/machine.h> based on
+-	 * the PCI subsystem ID. The system-dependent GPIO is necessary for
+-	 * VBUS operation.
+-	 */
+-
+ 	/* initialize the hardware */
+-	if (pch_udc_pcd_init(dev))
+-		return -ENODEV;
++	retval = pch_udc_pcd_init(dev);
++	if (retval)
++		return retval;
+ 
+ 	pci_enable_msi(pdev);
+ 
+@@ -3104,7 +3166,6 @@ static int pch_udc_probe(struct pci_dev *pdev,
+ 
+ 	/* device struct setup */
+ 	spin_lock_init(&dev->lock);
+-	dev->pdev = pdev;
+ 	dev->gadget.ops = &pch_udc_ops;
+ 
+ 	retval = init_dma_pools(dev);
+diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
+index 896c1a016d550..65cae48834545 100644
+--- a/drivers/usb/gadget/udc/r8a66597-udc.c
++++ b/drivers/usb/gadget/udc/r8a66597-udc.c
+@@ -1849,6 +1849,8 @@ static int r8a66597_probe(struct platform_device *pdev)
+ 		return PTR_ERR(reg);
+ 
+ 	ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
++	if (!ires)
++		return -EINVAL;
+ 	irq = ires->start;
+ 	irq_trigger = ires->flags & IRQF_TRIGGER_MASK;
+ 
+diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c
+index 1d3ebb07ccd4d..b154b62abefa1 100644
+--- a/drivers/usb/gadget/udc/s3c2410_udc.c
++++ b/drivers/usb/gadget/udc/s3c2410_udc.c
+@@ -54,8 +54,6 @@ static struct clk		*udc_clock;
+ static struct clk		*usb_bus_clock;
+ static void __iomem		*base_addr;
+ static int			irq_usbd;
+-static u64			rsrc_start;
+-static u64			rsrc_len;
+ static struct dentry		*s3c2410_udc_debugfs_root;
+ 
+ static inline u32 udc_read(u32 reg)
+@@ -1752,7 +1750,8 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
+ 	udc_clock = clk_get(NULL, "usb-device");
+ 	if (IS_ERR(udc_clock)) {
+ 		dev_err(dev, "failed to get udc clock source\n");
+-		return PTR_ERR(udc_clock);
++		retval = PTR_ERR(udc_clock);
++		goto err_usb_bus_clk;
+ 	}
+ 
+ 	clk_prepare_enable(udc_clock);
+@@ -1775,7 +1774,7 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
+ 	base_addr = devm_platform_ioremap_resource(pdev, 0);
+ 	if (IS_ERR(base_addr)) {
+ 		retval = PTR_ERR(base_addr);
+-		goto err_mem;
++		goto err_udc_clk;
+ 	}
+ 
+ 	the_controller = udc;
+@@ -1793,7 +1792,7 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
+ 	if (retval != 0) {
+ 		dev_err(dev, "cannot get irq %i, err %d\n", irq_usbd, retval);
+ 		retval = -EBUSY;
+-		goto err_map;
++		goto err_udc_clk;
+ 	}
+ 
+ 	dev_dbg(dev, "got irq %i\n", irq_usbd);
+@@ -1864,10 +1863,14 @@ err_gpio_claim:
+ 		gpio_free(udc_info->vbus_pin);
+ err_int:
+ 	free_irq(irq_usbd, udc);
+-err_map:
+-	iounmap(base_addr);
+-err_mem:
+-	release_mem_region(rsrc_start, rsrc_len);
++err_udc_clk:
++	clk_disable_unprepare(udc_clock);
++	clk_put(udc_clock);
++	udc_clock = NULL;
++err_usb_bus_clk:
++	clk_disable_unprepare(usb_bus_clock);
++	clk_put(usb_bus_clock);
++	usb_bus_clock = NULL;
+ 
+ 	return retval;
+ }
+@@ -1899,9 +1902,6 @@ static int s3c2410_udc_remove(struct platform_device *pdev)
+ 
+ 	free_irq(irq_usbd, udc);
+ 
+-	iounmap(base_addr);
+-	release_mem_region(rsrc_start, rsrc_len);
+-
+ 	if (!IS_ERR(udc_clock) && udc_clock != NULL) {
+ 		clk_disable_unprepare(udc_clock);
+ 		clk_put(udc_clock);
+diff --git a/drivers/usb/gadget/udc/snps_udc_plat.c b/drivers/usb/gadget/udc/snps_udc_plat.c
+index 32f1d3e90c264..99805d60a7ab3 100644
+--- a/drivers/usb/gadget/udc/snps_udc_plat.c
++++ b/drivers/usb/gadget/udc/snps_udc_plat.c
+@@ -114,8 +114,8 @@ static int udc_plat_probe(struct platform_device *pdev)
+ 
+ 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ 	udc->virt_addr = devm_ioremap_resource(dev, res);
+-	if (IS_ERR(udc->regs))
+-		return PTR_ERR(udc->regs);
++	if (IS_ERR(udc->virt_addr))
++		return PTR_ERR(udc->virt_addr);
+ 
+ 	/* udc csr registers base */
+ 	udc->csr = udc->virt_addr + UDC_CSR_ADDR;
+diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
+index b94f2a070c056..df9428f1dc5ed 100644
+--- a/drivers/usb/host/Kconfig
++++ b/drivers/usb/host/Kconfig
+@@ -272,6 +272,7 @@ config USB_EHCI_TEGRA
+ 	select USB_CHIPIDEA
+ 	select USB_CHIPIDEA_HOST
+ 	select USB_CHIPIDEA_TEGRA
++	select USB_GADGET
+ 	help
+ 	  This option is deprecated now and the driver was removed, use
+ 	  USB_CHIPIDEA_TEGRA instead.
+diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
+index b45e5bf089979..8950d1f10a7fb 100644
+--- a/drivers/usb/host/xhci-mtk-sch.c
++++ b/drivers/usb/host/xhci-mtk-sch.c
+@@ -378,6 +378,31 @@ static void update_bus_bw(struct mu3h_sch_bw_info *sch_bw,
+ 	sch_ep->allocated = used;
+ }
+ 
++static int check_fs_bus_bw(struct mu3h_sch_ep_info *sch_ep, int offset)
++{
++	struct mu3h_sch_tt *tt = sch_ep->sch_tt;
++	u32 num_esit, tmp;
++	int base;
++	int i, j;
++
++	num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit;
++	for (i = 0; i < num_esit; i++) {
++		base = offset + i * sch_ep->esit;
++
++		/*
++		 * Compared with hs bus, no matter what ep type,
++		 * the hub will always delay one uframe to send data
++		 */
++		for (j = 0; j < sch_ep->cs_count; j++) {
++			tmp = tt->fs_bus_bw[base + j] + sch_ep->bw_cost_per_microframe;
++			if (tmp > FS_PAYLOAD_MAX)
++				return -ERANGE;
++		}
++	}
++
++	return 0;
++}
++
+ static int check_sch_tt(struct usb_device *udev,
+ 	struct mu3h_sch_ep_info *sch_ep, u32 offset)
+ {
+@@ -402,7 +427,7 @@ static int check_sch_tt(struct usb_device *udev,
+ 			return -ERANGE;
+ 
+ 		for (i = 0; i < sch_ep->cs_count; i++)
+-			if (test_bit(offset + i, tt->split_bit_map))
++			if (test_bit(offset + i, tt->ss_bit_map))
+ 				return -ERANGE;
+ 
+ 	} else {
+@@ -432,7 +457,7 @@ static int check_sch_tt(struct usb_device *udev,
+ 			cs_count = 7; /* HW limit */
+ 
+ 		for (i = 0; i < cs_count + 2; i++) {
+-			if (test_bit(offset + i, tt->split_bit_map))
++			if (test_bit(offset + i, tt->ss_bit_map))
+ 				return -ERANGE;
+ 		}
+ 
+@@ -448,24 +473,44 @@ static int check_sch_tt(struct usb_device *udev,
+ 			sch_ep->num_budget_microframes = sch_ep->esit;
+ 	}
+ 
+-	return 0;
++	return check_fs_bus_bw(sch_ep, offset);
+ }
+ 
+ static void update_sch_tt(struct usb_device *udev,
+-	struct mu3h_sch_ep_info *sch_ep)
++	struct mu3h_sch_ep_info *sch_ep, bool used)
+ {
+ 	struct mu3h_sch_tt *tt = sch_ep->sch_tt;
+ 	u32 base, num_esit;
++	int bw_updated;
++	int bits;
+ 	int i, j;
+ 
+ 	num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit;
++	bits = (sch_ep->ep_type == ISOC_OUT_EP) ? sch_ep->cs_count : 1;
++
++	if (used)
++		bw_updated = sch_ep->bw_cost_per_microframe;
++	else
++		bw_updated = -sch_ep->bw_cost_per_microframe;
++
+ 	for (i = 0; i < num_esit; i++) {
+ 		base = sch_ep->offset + i * sch_ep->esit;
+-		for (j = 0; j < sch_ep->num_budget_microframes; j++)
+-			set_bit(base + j, tt->split_bit_map);
++
++		for (j = 0; j < bits; j++) {
++			if (used)
++				set_bit(base + j, tt->ss_bit_map);
++			else
++				clear_bit(base + j, tt->ss_bit_map);
++		}
++
++		for (j = 0; j < sch_ep->cs_count; j++)
++			tt->fs_bus_bw[base + j] += bw_updated;
+ 	}
+ 
+-	list_add_tail(&sch_ep->tt_endpoint, &tt->ep_list);
++	if (used)
++		list_add_tail(&sch_ep->tt_endpoint, &tt->ep_list);
++	else
++		list_del(&sch_ep->tt_endpoint);
+ }
+ 
+ static int check_sch_bw(struct usb_device *udev,
+@@ -535,7 +580,7 @@ static int check_sch_bw(struct usb_device *udev,
+ 		if (!tt_offset_ok)
+ 			return -ERANGE;
+ 
+-		update_sch_tt(udev, sch_ep);
++		update_sch_tt(udev, sch_ep, 1);
+ 	}
+ 
+ 	/* update bus bandwidth info */
+@@ -548,15 +593,16 @@ static void destroy_sch_ep(struct usb_device *udev,
+ 	struct mu3h_sch_bw_info *sch_bw, struct mu3h_sch_ep_info *sch_ep)
+ {
+ 	/* only release ep bw check passed by check_sch_bw() */
+-	if (sch_ep->allocated)
++	if (sch_ep->allocated) {
+ 		update_bus_bw(sch_bw, sch_ep, 0);
++		if (sch_ep->sch_tt)
++			update_sch_tt(udev, sch_ep, 0);
++	}
+ 
+-	list_del(&sch_ep->endpoint);
+-
+-	if (sch_ep->sch_tt) {
+-		list_del(&sch_ep->tt_endpoint);
++	if (sch_ep->sch_tt)
+ 		drop_tt(udev);
+-	}
++
++	list_del(&sch_ep->endpoint);
+ 	kfree(sch_ep);
+ }
+ 
+@@ -643,7 +689,7 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
+ 		 */
+ 		if (usb_endpoint_xfer_int(&ep->desc)
+ 			|| usb_endpoint_xfer_isoc(&ep->desc))
+-			ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(1));
++			ep_ctx->reserved[0] = cpu_to_le32(EP_BPKTS(1));
+ 
+ 		return 0;
+ 	}
+@@ -730,10 +776,10 @@ int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+ 		list_move_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list);
+ 
+ 		ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
+-		ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts)
++		ep_ctx->reserved[0] = cpu_to_le32(EP_BPKTS(sch_ep->pkts)
+ 			| EP_BCSCOUNT(sch_ep->cs_count)
+ 			| EP_BBM(sch_ep->burst_mode));
+-		ep_ctx->reserved[1] |= cpu_to_le32(EP_BOFFSET(sch_ep->offset)
++		ep_ctx->reserved[1] = cpu_to_le32(EP_BOFFSET(sch_ep->offset)
+ 			| EP_BREPEAT(sch_ep->repeat));
+ 
+ 		xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n",
+diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
+index 080109012b9ac..2fc0568ba054e 100644
+--- a/drivers/usb/host/xhci-mtk.h
++++ b/drivers/usb/host/xhci-mtk.h
+@@ -20,13 +20,15 @@
+ #define XHCI_MTK_MAX_ESIT	64
+ 
+ /**
+- * @split_bit_map: used to avoid split microframes overlay
++ * @ss_bit_map: used to avoid start split microframes overlay
++ * @fs_bus_bw: array to keep track of bandwidth already used for FS
+  * @ep_list: Endpoints using this TT
+  * @usb_tt: usb TT related
+  * @tt_port: TT port number
+  */
+ struct mu3h_sch_tt {
+-	DECLARE_BITMAP(split_bit_map, XHCI_MTK_MAX_ESIT);
++	DECLARE_BITMAP(ss_bit_map, XHCI_MTK_MAX_ESIT);
++	u32 fs_bus_bw[XHCI_MTK_MAX_ESIT];
+ 	struct list_head ep_list;
+ 	struct usb_tt *usb_tt;
+ 	int tt_port;
+diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
+index 97f37077b7f97..33b637d0d8d99 100644
+--- a/drivers/usb/roles/class.c
++++ b/drivers/usb/roles/class.c
+@@ -189,6 +189,8 @@ usb_role_switch_find_by_fwnode(const struct fwnode_handle *fwnode)
+ 		return NULL;
+ 
+ 	dev = class_find_device_by_fwnode(role_class, fwnode);
++	if (dev)
++		WARN_ON(!try_module_get(dev->parent->driver->owner));
+ 
+ 	return dev ? to_role_switch(dev) : NULL;
+ }
+diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
+index 7252b0ce75a6c..fe1c13a8849cc 100644
+--- a/drivers/usb/serial/ti_usb_3410_5052.c
++++ b/drivers/usb/serial/ti_usb_3410_5052.c
+@@ -1418,14 +1418,19 @@ static int ti_set_serial_info(struct tty_struct *tty,
+ 	struct serial_struct *ss)
+ {
+ 	struct usb_serial_port *port = tty->driver_data;
+-	struct ti_port *tport = usb_get_serial_port_data(port);
++	struct tty_port *tport = &port->port;
+ 	unsigned cwait;
+ 
+ 	cwait = ss->closing_wait;
+ 	if (cwait != ASYNC_CLOSING_WAIT_NONE)
+ 		cwait = msecs_to_jiffies(10 * ss->closing_wait);
+ 
+-	tport->tp_port->port.closing_wait = cwait;
++	if (!capable(CAP_SYS_ADMIN)) {
++		if (cwait != tport->closing_wait)
++			return -EPERM;
++	}
++
++	tport->closing_wait = cwait;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
+index 46d46a4f99c9f..4e9c994a972a2 100644
+--- a/drivers/usb/serial/usb_wwan.c
++++ b/drivers/usb/serial/usb_wwan.c
+@@ -140,10 +140,10 @@ int usb_wwan_get_serial_info(struct tty_struct *tty,
+ 	ss->line            = port->minor;
+ 	ss->port            = port->port_number;
+ 	ss->baud_base       = tty_get_baud_rate(port->port.tty);
+-	ss->close_delay	    = port->port.close_delay / 10;
++	ss->close_delay	    = jiffies_to_msecs(port->port.close_delay) / 10;
+ 	ss->closing_wait    = port->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
+ 				 ASYNC_CLOSING_WAIT_NONE :
+-				 port->port.closing_wait / 10;
++				 jiffies_to_msecs(port->port.closing_wait) / 10;
+ 	return 0;
+ }
+ EXPORT_SYMBOL(usb_wwan_get_serial_info);
+@@ -155,9 +155,10 @@ int usb_wwan_set_serial_info(struct tty_struct *tty,
+ 	unsigned int closing_wait, close_delay;
+ 	int retval = 0;
+ 
+-	close_delay = ss->close_delay * 10;
++	close_delay = msecs_to_jiffies(ss->close_delay * 10);
+ 	closing_wait = ss->closing_wait == ASYNC_CLOSING_WAIT_NONE ?
+-			ASYNC_CLOSING_WAIT_NONE : ss->closing_wait * 10;
++			ASYNC_CLOSING_WAIT_NONE :
++			msecs_to_jiffies(ss->closing_wait * 10);
+ 
+ 	mutex_lock(&port->port.mutex);
+ 
+diff --git a/drivers/usb/serial/xr_serial.c b/drivers/usb/serial/xr_serial.c
+index 0ca04906da4bf..c59c8b47a120d 100644
+--- a/drivers/usb/serial/xr_serial.c
++++ b/drivers/usb/serial/xr_serial.c
+@@ -467,6 +467,11 @@ static void xr_set_termios(struct tty_struct *tty,
+ 		termios->c_cflag &= ~CSIZE;
+ 		if (old_termios)
+ 			termios->c_cflag |= old_termios->c_cflag & CSIZE;
++		else
++			termios->c_cflag |= CS8;
++
++		if (C_CSIZE(tty) == CS7)
++			bits |= XR21V141X_UART_DATA_7;
+ 		else
+ 			bits |= XR21V141X_UART_DATA_8;
+ 		break;
+diff --git a/drivers/usb/typec/stusb160x.c b/drivers/usb/typec/stusb160x.c
+index d21750bbbb44d..6eaeba9b096e1 100644
+--- a/drivers/usb/typec/stusb160x.c
++++ b/drivers/usb/typec/stusb160x.c
+@@ -682,8 +682,8 @@ static int stusb160x_probe(struct i2c_client *client)
+ 	}
+ 
+ 	fwnode = device_get_named_child_node(chip->dev, "connector");
+-	if (IS_ERR(fwnode))
+-		return PTR_ERR(fwnode);
++	if (!fwnode)
++		return -ENODEV;
+ 
+ 	/*
+ 	 * When both VDD and VSYS power supplies are present, the low power
+diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
+index a27deb0b5f03c..027afd7dfdce2 100644
+--- a/drivers/usb/typec/tcpm/tcpci.c
++++ b/drivers/usb/typec/tcpm/tcpci.c
+@@ -24,6 +24,15 @@
+ #define	AUTO_DISCHARGE_PD_HEADROOM_MV		850
+ #define	AUTO_DISCHARGE_PPS_HEADROOM_MV		1250
+ 
++#define tcpc_presenting_cc1_rd(reg) \
++	(!(TCPC_ROLE_CTRL_DRP & (reg)) && \
++	 (((reg) & (TCPC_ROLE_CTRL_CC1_MASK << TCPC_ROLE_CTRL_CC1_SHIFT)) == \
++	  (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC1_SHIFT)))
++#define tcpc_presenting_cc2_rd(reg) \
++	(!(TCPC_ROLE_CTRL_DRP & (reg)) && \
++	 (((reg) & (TCPC_ROLE_CTRL_CC2_MASK << TCPC_ROLE_CTRL_CC2_SHIFT)) == \
++	  (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC2_SHIFT)))
++
+ struct tcpci {
+ 	struct device *dev;
+ 
+@@ -178,19 +187,25 @@ static int tcpci_get_cc(struct tcpc_dev *tcpc,
+ 			enum typec_cc_status *cc1, enum typec_cc_status *cc2)
+ {
+ 	struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
+-	unsigned int reg;
++	unsigned int reg, role_control;
+ 	int ret;
+ 
++	ret = regmap_read(tcpci->regmap, TCPC_ROLE_CTRL, &role_control);
++	if (ret < 0)
++		return ret;
++
+ 	ret = regmap_read(tcpci->regmap, TCPC_CC_STATUS, &reg);
+ 	if (ret < 0)
+ 		return ret;
+ 
+ 	*cc1 = tcpci_to_typec_cc((reg >> TCPC_CC_STATUS_CC1_SHIFT) &
+ 				 TCPC_CC_STATUS_CC1_MASK,
+-				 reg & TCPC_CC_STATUS_TERM);
++				 reg & TCPC_CC_STATUS_TERM ||
++				 tcpc_presenting_cc1_rd(role_control));
+ 	*cc2 = tcpci_to_typec_cc((reg >> TCPC_CC_STATUS_CC2_SHIFT) &
+ 				 TCPC_CC_STATUS_CC2_MASK,
+-				 reg & TCPC_CC_STATUS_TERM);
++				 reg & TCPC_CC_STATUS_TERM ||
++				 tcpc_presenting_cc2_rd(role_control));
+ 
+ 	return 0;
+ }
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
+index ce7af398c7c1c..1a086ba254d23 100644
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -268,12 +268,27 @@ struct pd_mode_data {
+ 	struct typec_altmode_desc altmode_desc[ALTMODE_DISCOVERY_MAX];
+ };
+ 
++/*
++ * @min_volt: Actual min voltage at the local port
++ * @req_min_volt: Requested min voltage to the port partner
++ * @max_volt: Actual max voltage at the local port
++ * @req_max_volt: Requested max voltage to the port partner
++ * @max_curr: Actual max current at the local port
++ * @req_max_curr: Requested max current of the port partner
++ * @req_out_volt: Requested output voltage to the port partner
++ * @req_op_curr: Requested operating current to the port partner
++ * @supported: Parter has atleast one APDO hence supports PPS
++ * @active: PPS mode is active
++ */
+ struct pd_pps_data {
+ 	u32 min_volt;
++	u32 req_min_volt;
+ 	u32 max_volt;
++	u32 req_max_volt;
+ 	u32 max_curr;
+-	u32 out_volt;
+-	u32 op_curr;
++	u32 req_max_curr;
++	u32 req_out_volt;
++	u32 req_op_curr;
+ 	bool supported;
+ 	bool active;
+ };
+@@ -389,7 +404,10 @@ struct tcpm_port {
+ 	unsigned int operating_snk_mw;
+ 	bool update_sink_caps;
+ 
+-	/* Requested current / voltage */
++	/* Requested current / voltage to the port partner */
++	u32 req_current_limit;
++	u32 req_supply_voltage;
++	/* Actual current / voltage limit of the local port */
+ 	u32 current_limit;
+ 	u32 supply_voltage;
+ 
+@@ -438,6 +456,9 @@ struct tcpm_port {
+ 	enum tcpm_ams next_ams;
+ 	bool in_ams;
+ 
++	/* Auto vbus discharge status */
++	bool auto_vbus_discharge_enabled;
++
+ #ifdef CONFIG_DEBUG_FS
+ 	struct dentry *dentry;
+ 	struct mutex logbuffer_lock;	/* log buffer access lock */
+@@ -507,6 +528,9 @@ static const char * const pd_rev[] = {
+ 	(tcpm_port_is_sink(port) && \
+ 	((port)->cc1 == TYPEC_CC_RP_3_0 || (port)->cc2 == TYPEC_CC_RP_3_0))
+ 
++#define tcpm_wait_for_discharge(port) \
++	(((port)->auto_vbus_discharge_enabled && !(port)->vbus_vsafe0v) ? PD_T_SAFE_0V : 0)
++
+ static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
+ {
+ 	if (port->port_type == TYPEC_PORT_DRP) {
+@@ -2432,8 +2456,8 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
+ 		case SNK_TRANSITION_SINK:
+ 			if (port->vbus_present) {
+ 				tcpm_set_current_limit(port,
+-						       port->current_limit,
+-						       port->supply_voltage);
++						       port->req_current_limit,
++						       port->req_supply_voltage);
+ 				port->explicit_contract = true;
+ 				tcpm_set_auto_vbus_discharge_threshold(port,
+ 								       TYPEC_PWR_MODE_PD,
+@@ -2492,8 +2516,8 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
+ 			break;
+ 		case SNK_NEGOTIATE_PPS_CAPABILITIES:
+ 			/* Revert data back from any requested PPS updates */
+-			port->pps_data.out_volt = port->supply_voltage;
+-			port->pps_data.op_curr = port->current_limit;
++			port->pps_data.req_out_volt = port->supply_voltage;
++			port->pps_data.req_op_curr = port->current_limit;
+ 			port->pps_status = (type == PD_CTRL_WAIT ?
+ 					    -EAGAIN : -EOPNOTSUPP);
+ 
+@@ -2542,8 +2566,12 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
+ 			break;
+ 		case SNK_NEGOTIATE_PPS_CAPABILITIES:
+ 			port->pps_data.active = true;
+-			port->supply_voltage = port->pps_data.out_volt;
+-			port->current_limit = port->pps_data.op_curr;
++			port->pps_data.min_volt = port->pps_data.req_min_volt;
++			port->pps_data.max_volt = port->pps_data.req_max_volt;
++			port->pps_data.max_curr = port->pps_data.req_max_curr;
++			port->req_supply_voltage = port->pps_data.req_out_volt;
++			port->req_current_limit = port->pps_data.req_op_curr;
++			power_supply_changed(port->psy);
+ 			tcpm_set_state(port, SNK_TRANSITION_SINK, 0);
+ 			break;
+ 		case SOFT_RESET_SEND:
+@@ -3102,17 +3130,16 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
+ 		src = port->source_caps[src_pdo];
+ 		snk = port->snk_pdo[snk_pdo];
+ 
+-		port->pps_data.min_volt = max(pdo_pps_apdo_min_voltage(src),
+-					      pdo_pps_apdo_min_voltage(snk));
+-		port->pps_data.max_volt = min(pdo_pps_apdo_max_voltage(src),
+-					      pdo_pps_apdo_max_voltage(snk));
+-		port->pps_data.max_curr = min_pps_apdo_current(src, snk);
+-		port->pps_data.out_volt = min(port->pps_data.max_volt,
+-					      max(port->pps_data.min_volt,
+-						  port->pps_data.out_volt));
+-		port->pps_data.op_curr = min(port->pps_data.max_curr,
+-					     port->pps_data.op_curr);
+-		power_supply_changed(port->psy);
++		port->pps_data.req_min_volt = max(pdo_pps_apdo_min_voltage(src),
++						  pdo_pps_apdo_min_voltage(snk));
++		port->pps_data.req_max_volt = min(pdo_pps_apdo_max_voltage(src),
++						  pdo_pps_apdo_max_voltage(snk));
++		port->pps_data.req_max_curr = min_pps_apdo_current(src, snk);
++		port->pps_data.req_out_volt = min(port->pps_data.max_volt,
++						  max(port->pps_data.min_volt,
++						      port->pps_data.req_out_volt));
++		port->pps_data.req_op_curr = min(port->pps_data.max_curr,
++						 port->pps_data.req_op_curr);
+ 	}
+ 
+ 	return src_pdo;
+@@ -3192,8 +3219,8 @@ static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo)
+ 			 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
+ 	}
+ 
+-	port->current_limit = ma;
+-	port->supply_voltage = mv;
++	port->req_current_limit = ma;
++	port->req_supply_voltage = mv;
+ 
+ 	return 0;
+ }
+@@ -3239,10 +3266,10 @@ static int tcpm_pd_build_pps_request(struct tcpm_port *port, u32 *rdo)
+ 			tcpm_log(port, "Invalid APDO selected!");
+ 			return -EINVAL;
+ 		}
+-		max_mv = port->pps_data.max_volt;
+-		max_ma = port->pps_data.max_curr;
+-		out_mv = port->pps_data.out_volt;
+-		op_ma = port->pps_data.op_curr;
++		max_mv = port->pps_data.req_max_volt;
++		max_ma = port->pps_data.req_max_curr;
++		out_mv = port->pps_data.req_out_volt;
++		op_ma = port->pps_data.req_op_curr;
+ 		break;
+ 	default:
+ 		tcpm_log(port, "Invalid PDO selected!");
+@@ -3289,8 +3316,8 @@ static int tcpm_pd_build_pps_request(struct tcpm_port *port, u32 *rdo)
+ 	tcpm_log(port, "Requesting APDO %d: %u mV, %u mA",
+ 		 src_pdo_index, out_mv, op_ma);
+ 
+-	port->pps_data.op_curr = op_ma;
+-	port->pps_data.out_volt = out_mv;
++	port->pps_data.req_op_curr = op_ma;
++	port->pps_data.req_out_volt = out_mv;
+ 
+ 	return 0;
+ }
+@@ -3418,6 +3445,8 @@ static int tcpm_src_attach(struct tcpm_port *port)
+ 	if (port->tcpc->enable_auto_vbus_discharge) {
+ 		ret = port->tcpc->enable_auto_vbus_discharge(port->tcpc, true);
+ 		tcpm_log_force(port, "enable vbus discharge ret:%d", ret);
++		if (!ret)
++			port->auto_vbus_discharge_enabled = true;
+ 	}
+ 
+ 	ret = tcpm_set_roles(port, true, TYPEC_SOURCE, tcpm_data_role_for_source(port));
+@@ -3500,6 +3529,8 @@ static void tcpm_reset_port(struct tcpm_port *port)
+ 	if (port->tcpc->enable_auto_vbus_discharge) {
+ 		ret = port->tcpc->enable_auto_vbus_discharge(port->tcpc, false);
+ 		tcpm_log_force(port, "Disable vbus discharge ret:%d", ret);
++		if (!ret)
++			port->auto_vbus_discharge_enabled = false;
+ 	}
+ 	port->in_ams = false;
+ 	port->ams = NONE_AMS;
+@@ -3533,8 +3564,6 @@ static void tcpm_reset_port(struct tcpm_port *port)
+ 	port->sink_cap_done = false;
+ 	if (port->tcpc->enable_frs)
+ 		port->tcpc->enable_frs(port->tcpc, false);
+-
+-	power_supply_changed(port->psy);
+ }
+ 
+ static void tcpm_detach(struct tcpm_port *port)
+@@ -3574,6 +3603,8 @@ static int tcpm_snk_attach(struct tcpm_port *port)
+ 		tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, VSAFE5V);
+ 		ret = port->tcpc->enable_auto_vbus_discharge(port->tcpc, true);
+ 		tcpm_log_force(port, "enable vbus discharge ret:%d", ret);
++		if (!ret)
++			port->auto_vbus_discharge_enabled = true;
+ 	}
+ 
+ 	ret = tcpm_set_roles(port, true, TYPEC_SINK, tcpm_data_role_for_sink(port));
+@@ -4103,6 +4134,23 @@ static void run_state_machine(struct tcpm_port *port)
+ 		}
+ 		break;
+ 	case SNK_TRANSITION_SINK:
++		/* From the USB PD spec:
++		 * "The Sink Shall transition to Sink Standby before a positive or
++		 * negative voltage transition of VBUS. During Sink Standby
++		 * the Sink Shall reduce its power draw to pSnkStdby."
++		 *
++		 * This is not applicable to PPS though as the port can continue
++		 * to draw negotiated power without switching to standby.
++		 */
++		if (port->supply_voltage != port->req_supply_voltage && !port->pps_data.active &&
++		    port->current_limit * port->supply_voltage / 1000 > PD_P_SNK_STDBY_MW) {
++			u32 stdby_ma = PD_P_SNK_STDBY_MW * 1000 / port->supply_voltage;
++
++			tcpm_log(port, "Setting standby current %u mV @ %u mA",
++				 port->supply_voltage, stdby_ma);
++			tcpm_set_current_limit(port, stdby_ma, port->supply_voltage);
++		}
++		fallthrough;
+ 	case SNK_TRANSITION_SINK_VBUS:
+ 		tcpm_set_state(port, hard_reset_state(port),
+ 			       PD_T_PS_TRANSITION);
+@@ -4676,9 +4724,9 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
+ 		if (tcpm_port_is_disconnected(port) ||
+ 		    !tcpm_port_is_source(port)) {
+ 			if (port->port_type == TYPEC_PORT_SRC)
+-				tcpm_set_state(port, SRC_UNATTACHED, 0);
++				tcpm_set_state(port, SRC_UNATTACHED, tcpm_wait_for_discharge(port));
+ 			else
+-				tcpm_set_state(port, SNK_UNATTACHED, 0);
++				tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
+ 		}
+ 		break;
+ 	case SNK_UNATTACHED:
+@@ -4709,7 +4757,23 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
+ 			tcpm_set_state(port, SNK_DEBOUNCED, 0);
+ 		break;
+ 	case SNK_READY:
+-		if (tcpm_port_is_disconnected(port))
++		/*
++		 * EXIT condition is based primarily on vbus disconnect and CC is secondary.
++		 * "A port that has entered into USB PD communications with the Source and
++		 * has seen the CC voltage exceed vRd-USB may monitor the CC pin to detect
++		 * cable disconnect in addition to monitoring VBUS.
++		 *
++		 * A port that is monitoring the CC voltage for disconnect (but is not in
++		 * the process of a USB PD PR_Swap or USB PD FR_Swap) shall transition to
++		 * Unattached.SNK within tSinkDisconnect after the CC voltage remains below
++		 * vRd-USB for tPDDebounce."
++		 *
++		 * When set_auto_vbus_discharge_threshold is enabled, CC pins go
++		 * away before vbus decays to disconnect threshold. Allow
++		 * disconnect to be driven by vbus disconnect when auto vbus
++		 * discharge is enabled.
++		 */
++		if (!port->auto_vbus_discharge_enabled && tcpm_port_is_disconnected(port))
+ 			tcpm_set_state(port, unattached_state(port), 0);
+ 		else if (!port->pd_capable &&
+ 			 (cc1 != old_cc1 || cc2 != old_cc2))
+@@ -4808,9 +4872,13 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
+ 		 * Ignore CC changes here.
+ 		 */
+ 		break;
+-
+ 	default:
+-		if (tcpm_port_is_disconnected(port))
++		/*
++		 * While acting as sink and auto vbus discharge is enabled, Allow disconnect
++		 * to be driven by vbus disconnect.
++		 */
++		if (tcpm_port_is_disconnected(port) && !(port->pwr_role == TYPEC_SINK &&
++							 port->auto_vbus_discharge_enabled))
+ 			tcpm_set_state(port, unattached_state(port), 0);
+ 		break;
+ 	}
+@@ -4974,8 +5042,16 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port)
+ 	case SRC_TRANSITION_SUPPLY:
+ 	case SRC_READY:
+ 	case SRC_WAIT_NEW_CAPABILITIES:
+-		/* Force to unattached state to re-initiate connection */
+-		tcpm_set_state(port, SRC_UNATTACHED, 0);
++		/*
++		 * Force to unattached state to re-initiate connection.
++		 * DRP port should move to Unattached.SNK instead of Unattached.SRC if
++		 * sink removed. Although sink removal here is due to source's vbus collapse,
++		 * treat it the same way for consistency.
++		 */
++		if (port->port_type == TYPEC_PORT_SRC)
++			tcpm_set_state(port, SRC_UNATTACHED, tcpm_wait_for_discharge(port));
++		else
++			tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
+ 		break;
+ 
+ 	case PORT_RESET:
+@@ -4994,9 +5070,8 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port)
+ 		break;
+ 
+ 	default:
+-		if (port->pwr_role == TYPEC_SINK &&
+-		    port->attached)
+-			tcpm_set_state(port, SNK_UNATTACHED, 0);
++		if (port->pwr_role == TYPEC_SINK && port->attached)
++			tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
+ 		break;
+ 	}
+ }
+@@ -5018,7 +5093,23 @@ static void _tcpm_pd_vbus_vsafe0v(struct tcpm_port *port)
+ 			tcpm_set_state(port, tcpm_try_snk(port) ? SNK_TRY : SRC_ATTACHED,
+ 				       PD_T_CC_DEBOUNCE);
+ 		break;
++	case SRC_STARTUP:
++	case SRC_SEND_CAPABILITIES:
++	case SRC_SEND_CAPABILITIES_TIMEOUT:
++	case SRC_NEGOTIATE_CAPABILITIES:
++	case SRC_TRANSITION_SUPPLY:
++	case SRC_READY:
++	case SRC_WAIT_NEW_CAPABILITIES:
++		if (port->auto_vbus_discharge_enabled) {
++			if (port->port_type == TYPEC_PORT_SRC)
++				tcpm_set_state(port, SRC_UNATTACHED, 0);
++			else
++				tcpm_set_state(port, SNK_UNATTACHED, 0);
++		}
++		break;
+ 	default:
++		if (port->pwr_role == TYPEC_SINK && port->auto_vbus_discharge_enabled)
++			tcpm_set_state(port, SNK_UNATTACHED, 0);
+ 		break;
+ 	}
+ }
+@@ -5374,7 +5465,7 @@ static int tcpm_try_role(struct typec_port *p, int role)
+ 	return ret;
+ }
+ 
+-static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 op_curr)
++static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 req_op_curr)
+ {
+ 	unsigned int target_mw;
+ 	int ret;
+@@ -5392,12 +5483,12 @@ static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 op_curr)
+ 		goto port_unlock;
+ 	}
+ 
+-	if (op_curr > port->pps_data.max_curr) {
++	if (req_op_curr > port->pps_data.max_curr) {
+ 		ret = -EINVAL;
+ 		goto port_unlock;
+ 	}
+ 
+-	target_mw = (op_curr * port->pps_data.out_volt) / 1000;
++	target_mw = (req_op_curr * port->supply_voltage) / 1000;
+ 	if (target_mw < port->operating_snk_mw) {
+ 		ret = -EINVAL;
+ 		goto port_unlock;
+@@ -5411,10 +5502,10 @@ static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 op_curr)
+ 	}
+ 
+ 	/* Round down operating current to align with PPS valid steps */
+-	op_curr = op_curr - (op_curr % RDO_PROG_CURR_MA_STEP);
++	req_op_curr = req_op_curr - (req_op_curr % RDO_PROG_CURR_MA_STEP);
+ 
+ 	reinit_completion(&port->pps_complete);
+-	port->pps_data.op_curr = op_curr;
++	port->pps_data.req_op_curr = req_op_curr;
+ 	port->pps_status = 0;
+ 	port->pps_pending = true;
+ 	mutex_unlock(&port->lock);
+@@ -5435,7 +5526,7 @@ swap_unlock:
+ 	return ret;
+ }
+ 
+-static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 out_volt)
++static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 req_out_volt)
+ {
+ 	unsigned int target_mw;
+ 	int ret;
+@@ -5453,13 +5544,13 @@ static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 out_volt)
+ 		goto port_unlock;
+ 	}
+ 
+-	if (out_volt < port->pps_data.min_volt ||
+-	    out_volt > port->pps_data.max_volt) {
++	if (req_out_volt < port->pps_data.min_volt ||
++	    req_out_volt > port->pps_data.max_volt) {
+ 		ret = -EINVAL;
+ 		goto port_unlock;
+ 	}
+ 
+-	target_mw = (port->pps_data.op_curr * out_volt) / 1000;
++	target_mw = (port->current_limit * req_out_volt) / 1000;
+ 	if (target_mw < port->operating_snk_mw) {
+ 		ret = -EINVAL;
+ 		goto port_unlock;
+@@ -5473,10 +5564,10 @@ static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 out_volt)
+ 	}
+ 
+ 	/* Round down output voltage to align with PPS valid steps */
+-	out_volt = out_volt - (out_volt % RDO_PROG_VOLT_MV_STEP);
++	req_out_volt = req_out_volt - (req_out_volt % RDO_PROG_VOLT_MV_STEP);
+ 
+ 	reinit_completion(&port->pps_complete);
+-	port->pps_data.out_volt = out_volt;
++	port->pps_data.req_out_volt = req_out_volt;
+ 	port->pps_status = 0;
+ 	port->pps_pending = true;
+ 	mutex_unlock(&port->lock);
+@@ -5534,8 +5625,8 @@ static int tcpm_pps_activate(struct tcpm_port *port, bool activate)
+ 
+ 	/* Trigger PPS request or move back to standard PDO contract */
+ 	if (activate) {
+-		port->pps_data.out_volt = port->supply_voltage;
+-		port->pps_data.op_curr = port->current_limit;
++		port->pps_data.req_out_volt = port->supply_voltage;
++		port->pps_data.req_op_curr = port->current_limit;
+ 	}
+ 	mutex_unlock(&port->lock);
+ 
+diff --git a/drivers/usb/typec/tps6598x.c b/drivers/usb/typec/tps6598x.c
+index 29bd1c5a283cd..4038104568f5a 100644
+--- a/drivers/usb/typec/tps6598x.c
++++ b/drivers/usb/typec/tps6598x.c
+@@ -614,8 +614,8 @@ static int tps6598x_probe(struct i2c_client *client)
+ 		return ret;
+ 
+ 	fwnode = device_get_named_child_node(&client->dev, "connector");
+-	if (IS_ERR(fwnode))
+-		return PTR_ERR(fwnode);
++	if (!fwnode)
++		return -ENODEV;
+ 
+ 	tps->role_sw = fwnode_usb_role_switch_get(fwnode);
+ 	if (IS_ERR(tps->role_sw)) {
+diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c
+index f7633ee655a17..d1cf6b51bf85d 100644
+--- a/drivers/usb/usbip/vudc_sysfs.c
++++ b/drivers/usb/usbip/vudc_sysfs.c
+@@ -156,12 +156,14 @@ static ssize_t usbip_sockfd_store(struct device *dev,
+ 		tcp_rx = kthread_create(&v_rx_loop, &udc->ud, "vudc_rx");
+ 		if (IS_ERR(tcp_rx)) {
+ 			sockfd_put(socket);
++			mutex_unlock(&udc->ud.sysfs_lock);
+ 			return -EINVAL;
+ 		}
+ 		tcp_tx = kthread_create(&v_tx_loop, &udc->ud, "vudc_tx");
+ 		if (IS_ERR(tcp_tx)) {
+ 			kthread_stop(tcp_rx);
+ 			sockfd_put(socket);
++			mutex_unlock(&udc->ud.sysfs_lock);
+ 			return -EINVAL;
+ 		}
+ 
+diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc.c b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
+index f27e25112c403..8722f5effacd4 100644
+--- a/drivers/vfio/fsl-mc/vfio_fsl_mc.c
++++ b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
+@@ -568,23 +568,39 @@ static int vfio_fsl_mc_init_device(struct vfio_fsl_mc_device *vdev)
+ 		dev_err(&mc_dev->dev, "VFIO_FSL_MC: Failed to setup DPRC (%d)\n", ret);
+ 		goto out_nc_unreg;
+ 	}
++	return 0;
++
++out_nc_unreg:
++	bus_unregister_notifier(&fsl_mc_bus_type, &vdev->nb);
++	return ret;
++}
+ 
++static int vfio_fsl_mc_scan_container(struct fsl_mc_device *mc_dev)
++{
++	int ret;
++
++	/* non dprc devices do not scan for other devices */
++	if (!is_fsl_mc_bus_dprc(mc_dev))
++		return 0;
+ 	ret = dprc_scan_container(mc_dev, false);
+ 	if (ret) {
+-		dev_err(&mc_dev->dev, "VFIO_FSL_MC: Container scanning failed (%d)\n", ret);
+-		goto out_dprc_cleanup;
++		dev_err(&mc_dev->dev,
++			"VFIO_FSL_MC: Container scanning failed (%d)\n", ret);
++		dprc_remove_devices(mc_dev, NULL, 0);
++		return ret;
+ 	}
+-
+ 	return 0;
++}
++
++static void vfio_fsl_uninit_device(struct vfio_fsl_mc_device *vdev)
++{
++	struct fsl_mc_device *mc_dev = vdev->mc_dev;
++
++	if (!is_fsl_mc_bus_dprc(mc_dev))
++		return;
+ 
+-out_dprc_cleanup:
+-	dprc_remove_devices(mc_dev, NULL, 0);
+ 	dprc_cleanup(mc_dev);
+-out_nc_unreg:
+ 	bus_unregister_notifier(&fsl_mc_bus_type, &vdev->nb);
+-	vdev->nb.notifier_call = NULL;
+-
+-	return ret;
+ }
+ 
+ static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
+@@ -607,29 +623,39 @@ static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
+ 	}
+ 
+ 	vdev->mc_dev = mc_dev;
+-
+-	ret = vfio_add_group_dev(dev, &vfio_fsl_mc_ops, vdev);
+-	if (ret) {
+-		dev_err(dev, "VFIO_FSL_MC: Failed to add to vfio group\n");
+-		goto out_group_put;
+-	}
++	mutex_init(&vdev->igate);
+ 
+ 	ret = vfio_fsl_mc_reflck_attach(vdev);
+ 	if (ret)
+-		goto out_group_dev;
++		goto out_group_put;
+ 
+ 	ret = vfio_fsl_mc_init_device(vdev);
+ 	if (ret)
+ 		goto out_reflck;
+ 
+-	mutex_init(&vdev->igate);
++	ret = vfio_add_group_dev(dev, &vfio_fsl_mc_ops, vdev);
++	if (ret) {
++		dev_err(dev, "VFIO_FSL_MC: Failed to add to vfio group\n");
++		goto out_device;
++	}
+ 
++	/*
++	 * This triggers recursion into vfio_fsl_mc_probe() on another device
++	 * and the vfio_fsl_mc_reflck_attach() must succeed, which relies on the
++	 * vfio_add_group_dev() above. It has no impact on this vdev, so it is
++	 * safe to be after the vfio device is made live.
++	 */
++	ret = vfio_fsl_mc_scan_container(mc_dev);
++	if (ret)
++		goto out_group_dev;
+ 	return 0;
+ 
+-out_reflck:
+-	vfio_fsl_mc_reflck_put(vdev->reflck);
+ out_group_dev:
+ 	vfio_del_group_dev(dev);
++out_device:
++	vfio_fsl_uninit_device(vdev);
++out_reflck:
++	vfio_fsl_mc_reflck_put(vdev->reflck);
+ out_group_put:
+ 	vfio_iommu_group_put(group, dev);
+ 	return ret;
+@@ -646,16 +672,10 @@ static int vfio_fsl_mc_remove(struct fsl_mc_device *mc_dev)
+ 
+ 	mutex_destroy(&vdev->igate);
+ 
++	dprc_remove_devices(mc_dev, NULL, 0);
++	vfio_fsl_uninit_device(vdev);
+ 	vfio_fsl_mc_reflck_put(vdev->reflck);
+ 
+-	if (is_fsl_mc_bus_dprc(mc_dev)) {
+-		dprc_remove_devices(mc_dev, NULL, 0);
+-		dprc_cleanup(mc_dev);
+-	}
+-
+-	if (vdev->nb.notifier_call)
+-		bus_unregister_notifier(&fsl_mc_bus_type, &vdev->nb);
+-
+ 	vfio_iommu_group_put(mc_dev->dev.iommu_group, dev);
+ 
+ 	return 0;
+diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c
+index 917fd84c1c6f2..367ff5412a387 100644
+--- a/drivers/vfio/mdev/mdev_sysfs.c
++++ b/drivers/vfio/mdev/mdev_sysfs.c
+@@ -105,6 +105,7 @@ static struct mdev_type *add_mdev_supported_type(struct mdev_parent *parent,
+ 		return ERR_PTR(-ENOMEM);
+ 
+ 	type->kobj.kset = parent->mdev_types_kset;
++	type->parent = parent;
+ 
+ 	ret = kobject_init_and_add(&type->kobj, &mdev_type_ktype, NULL,
+ 				   "%s-%s", dev_driver_string(parent->dev),
+@@ -132,7 +133,6 @@ static struct mdev_type *add_mdev_supported_type(struct mdev_parent *parent,
+ 	}
+ 
+ 	type->group = group;
+-	type->parent = parent;
+ 	return type;
+ 
+ attrs_failed:
+diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
+index 5023e23db3bcb..cb7f2dc09e9d4 100644
+--- a/drivers/vfio/pci/vfio_pci.c
++++ b/drivers/vfio/pci/vfio_pci.c
+@@ -1924,6 +1924,68 @@ static int vfio_pci_bus_notifier(struct notifier_block *nb,
+ 	return 0;
+ }
+ 
++static int vfio_pci_vf_init(struct vfio_pci_device *vdev)
++{
++	struct pci_dev *pdev = vdev->pdev;
++	int ret;
++
++	if (!pdev->is_physfn)
++		return 0;
++
++	vdev->vf_token = kzalloc(sizeof(*vdev->vf_token), GFP_KERNEL);
++	if (!vdev->vf_token)
++		return -ENOMEM;
++
++	mutex_init(&vdev->vf_token->lock);
++	uuid_gen(&vdev->vf_token->uuid);
++
++	vdev->nb.notifier_call = vfio_pci_bus_notifier;
++	ret = bus_register_notifier(&pci_bus_type, &vdev->nb);
++	if (ret) {
++		kfree(vdev->vf_token);
++		return ret;
++	}
++	return 0;
++}
++
++static void vfio_pci_vf_uninit(struct vfio_pci_device *vdev)
++{
++	if (!vdev->vf_token)
++		return;
++
++	bus_unregister_notifier(&pci_bus_type, &vdev->nb);
++	WARN_ON(vdev->vf_token->users);
++	mutex_destroy(&vdev->vf_token->lock);
++	kfree(vdev->vf_token);
++}
++
++static int vfio_pci_vga_init(struct vfio_pci_device *vdev)
++{
++	struct pci_dev *pdev = vdev->pdev;
++	int ret;
++
++	if (!vfio_pci_is_vga(pdev))
++		return 0;
++
++	ret = vga_client_register(pdev, vdev, NULL, vfio_pci_set_vga_decode);
++	if (ret)
++		return ret;
++	vga_set_legacy_decoding(pdev, vfio_pci_set_vga_decode(vdev, false));
++	return 0;
++}
++
++static void vfio_pci_vga_uninit(struct vfio_pci_device *vdev)
++{
++	struct pci_dev *pdev = vdev->pdev;
++
++	if (!vfio_pci_is_vga(pdev))
++		return;
++	vga_client_register(pdev, NULL, NULL, NULL);
++	vga_set_legacy_decoding(pdev, VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
++					      VGA_RSRC_LEGACY_IO |
++					      VGA_RSRC_LEGACY_MEM);
++}
++
+ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ {
+ 	struct vfio_pci_device *vdev;
+@@ -1970,35 +2032,15 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	INIT_LIST_HEAD(&vdev->vma_list);
+ 	init_rwsem(&vdev->memory_lock);
+ 
+-	ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
++	ret = vfio_pci_reflck_attach(vdev);
+ 	if (ret)
+ 		goto out_free;
+-
+-	ret = vfio_pci_reflck_attach(vdev);
++	ret = vfio_pci_vf_init(vdev);
+ 	if (ret)
+-		goto out_del_group_dev;
+-
+-	if (pdev->is_physfn) {
+-		vdev->vf_token = kzalloc(sizeof(*vdev->vf_token), GFP_KERNEL);
+-		if (!vdev->vf_token) {
+-			ret = -ENOMEM;
+-			goto out_reflck;
+-		}
+-
+-		mutex_init(&vdev->vf_token->lock);
+-		uuid_gen(&vdev->vf_token->uuid);
+-
+-		vdev->nb.notifier_call = vfio_pci_bus_notifier;
+-		ret = bus_register_notifier(&pci_bus_type, &vdev->nb);
+-		if (ret)
+-			goto out_vf_token;
+-	}
+-
+-	if (vfio_pci_is_vga(pdev)) {
+-		vga_client_register(pdev, vdev, NULL, vfio_pci_set_vga_decode);
+-		vga_set_legacy_decoding(pdev,
+-					vfio_pci_set_vga_decode(vdev, false));
+-	}
++		goto out_reflck;
++	ret = vfio_pci_vga_init(vdev);
++	if (ret)
++		goto out_vf;
+ 
+ 	vfio_pci_probe_power_state(vdev);
+ 
+@@ -2016,15 +2058,20 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 		vfio_pci_set_power_state(vdev, PCI_D3hot);
+ 	}
+ 
+-	return ret;
++	ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
++	if (ret)
++		goto out_power;
++	return 0;
+ 
+-out_vf_token:
+-	kfree(vdev->vf_token);
++out_power:
++	if (!disable_idle_d3)
++		vfio_pci_set_power_state(vdev, PCI_D0);
++out_vf:
++	vfio_pci_vf_uninit(vdev);
+ out_reflck:
+ 	vfio_pci_reflck_put(vdev->reflck);
+-out_del_group_dev:
+-	vfio_del_group_dev(&pdev->dev);
+ out_free:
++	kfree(vdev->pm_save);
+ 	kfree(vdev);
+ out_group_put:
+ 	vfio_iommu_group_put(group, &pdev->dev);
+@@ -2041,33 +2088,19 @@ static void vfio_pci_remove(struct pci_dev *pdev)
+ 	if (!vdev)
+ 		return;
+ 
+-	if (vdev->vf_token) {
+-		WARN_ON(vdev->vf_token->users);
+-		mutex_destroy(&vdev->vf_token->lock);
+-		kfree(vdev->vf_token);
+-	}
+-
+-	if (vdev->nb.notifier_call)
+-		bus_unregister_notifier(&pci_bus_type, &vdev->nb);
+-
++	vfio_pci_vf_uninit(vdev);
+ 	vfio_pci_reflck_put(vdev->reflck);
++	vfio_pci_vga_uninit(vdev);
+ 
+ 	vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev);
+-	kfree(vdev->region);
+-	mutex_destroy(&vdev->ioeventfds_lock);
+ 
+ 	if (!disable_idle_d3)
+ 		vfio_pci_set_power_state(vdev, PCI_D0);
+ 
++	mutex_destroy(&vdev->ioeventfds_lock);
++	kfree(vdev->region);
+ 	kfree(vdev->pm_save);
+ 	kfree(vdev);
+-
+-	if (vfio_pci_is_vga(pdev)) {
+-		vga_client_register(pdev, NULL, NULL, NULL);
+-		vga_set_legacy_decoding(pdev,
+-				VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
+-				VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM);
+-	}
+ }
+ 
+ static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev,
+diff --git a/fs/afs/dir.c b/fs/afs/dir.c
+index 17548c1faf029..31251d11d576c 100644
+--- a/fs/afs/dir.c
++++ b/fs/afs/dir.c
+@@ -1342,6 +1342,7 @@ static int afs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
+ 
+ 	afs_op_set_vnode(op, 0, dvnode);
+ 	op->file[0].dv_delta = 1;
++	op->file[0].modification = true;
+ 	op->file[0].update_ctime = true;
+ 	op->dentry	= dentry;
+ 	op->create.mode	= S_IFDIR | mode;
+@@ -1423,6 +1424,7 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
+ 
+ 	afs_op_set_vnode(op, 0, dvnode);
+ 	op->file[0].dv_delta = 1;
++	op->file[0].modification = true;
+ 	op->file[0].update_ctime = true;
+ 
+ 	op->dentry	= dentry;
+@@ -1559,6 +1561,7 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
+ 
+ 	afs_op_set_vnode(op, 0, dvnode);
+ 	op->file[0].dv_delta = 1;
++	op->file[0].modification = true;
+ 	op->file[0].update_ctime = true;
+ 
+ 	/* Try to make sure we have a callback promise on the victim. */
+@@ -1641,6 +1644,7 @@ static int afs_create(struct user_namespace *mnt_userns, struct inode *dir,
+ 
+ 	afs_op_set_vnode(op, 0, dvnode);
+ 	op->file[0].dv_delta = 1;
++	op->file[0].modification = true;
+ 	op->file[0].update_ctime = true;
+ 
+ 	op->dentry	= dentry;
+@@ -1715,6 +1719,7 @@ static int afs_link(struct dentry *from, struct inode *dir,
+ 	afs_op_set_vnode(op, 0, dvnode);
+ 	afs_op_set_vnode(op, 1, vnode);
+ 	op->file[0].dv_delta = 1;
++	op->file[0].modification = true;
+ 	op->file[0].update_ctime = true;
+ 	op->file[1].update_ctime = true;
+ 
+@@ -1910,6 +1915,8 @@ static int afs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+ 	afs_op_set_vnode(op, 1, new_dvnode); /* May be same as orig_dvnode */
+ 	op->file[0].dv_delta = 1;
+ 	op->file[1].dv_delta = 1;
++	op->file[0].modification = true;
++	op->file[1].modification = true;
+ 	op->file[0].update_ctime = true;
+ 	op->file[1].update_ctime = true;
+ 
+diff --git a/fs/afs/dir_silly.c b/fs/afs/dir_silly.c
+index 04f75a44f2432..dae9a57d7ec0c 100644
+--- a/fs/afs/dir_silly.c
++++ b/fs/afs/dir_silly.c
+@@ -73,6 +73,8 @@ static int afs_do_silly_rename(struct afs_vnode *dvnode, struct afs_vnode *vnode
+ 	afs_op_set_vnode(op, 1, dvnode);
+ 	op->file[0].dv_delta = 1;
+ 	op->file[1].dv_delta = 1;
++	op->file[0].modification = true;
++	op->file[1].modification = true;
+ 	op->file[0].update_ctime = true;
+ 	op->file[1].update_ctime = true;
+ 
+@@ -201,6 +203,7 @@ static int afs_do_silly_unlink(struct afs_vnode *dvnode, struct afs_vnode *vnode
+ 	afs_op_set_vnode(op, 0, dvnode);
+ 	afs_op_set_vnode(op, 1, vnode);
+ 	op->file[0].dv_delta = 1;
++	op->file[0].modification = true;
+ 	op->file[0].update_ctime = true;
+ 	op->file[1].op_unlinked = true;
+ 	op->file[1].update_ctime = true;
+diff --git a/fs/afs/fs_operation.c b/fs/afs/fs_operation.c
+index 71c58723763d2..a82515b47350e 100644
+--- a/fs/afs/fs_operation.c
++++ b/fs/afs/fs_operation.c
+@@ -118,6 +118,8 @@ static void afs_prepare_vnode(struct afs_operation *op, struct afs_vnode_param *
+ 		vp->cb_break_before	= afs_calc_vnode_cb_break(vnode);
+ 		if (vnode->lock_state != AFS_VNODE_LOCK_NONE)
+ 			op->flags	|= AFS_OPERATION_CUR_ONLY;
++		if (vp->modification)
++			set_bit(AFS_VNODE_MODIFYING, &vnode->flags);
+ 	}
+ 
+ 	if (vp->fid.vnode)
+@@ -223,6 +225,10 @@ int afs_put_operation(struct afs_operation *op)
+ 
+ 	if (op->ops && op->ops->put)
+ 		op->ops->put(op);
++	if (op->file[0].modification)
++		clear_bit(AFS_VNODE_MODIFYING, &op->file[0].vnode->flags);
++	if (op->file[1].modification && op->file[1].vnode != op->file[0].vnode)
++		clear_bit(AFS_VNODE_MODIFYING, &op->file[1].vnode->flags);
+ 	if (op->file[0].put_vnode)
+ 		iput(&op->file[0].vnode->vfs_inode);
+ 	if (op->file[1].put_vnode)
+diff --git a/fs/afs/inode.c b/fs/afs/inode.c
+index 12be88716e4c9..fddf7d54e0b76 100644
+--- a/fs/afs/inode.c
++++ b/fs/afs/inode.c
+@@ -102,13 +102,13 @@ static int afs_inode_init_from_status(struct afs_operation *op,
+ 
+ 	switch (status->type) {
+ 	case AFS_FTYPE_FILE:
+-		inode->i_mode	= S_IFREG | status->mode;
++		inode->i_mode	= S_IFREG | (status->mode & S_IALLUGO);
+ 		inode->i_op	= &afs_file_inode_operations;
+ 		inode->i_fop	= &afs_file_operations;
+ 		inode->i_mapping->a_ops	= &afs_fs_aops;
+ 		break;
+ 	case AFS_FTYPE_DIR:
+-		inode->i_mode	= S_IFDIR | status->mode;
++		inode->i_mode	= S_IFDIR |  (status->mode & S_IALLUGO);
+ 		inode->i_op	= &afs_dir_inode_operations;
+ 		inode->i_fop	= &afs_dir_file_operations;
+ 		inode->i_mapping->a_ops	= &afs_dir_aops;
+@@ -198,7 +198,7 @@ static void afs_apply_status(struct afs_operation *op,
+ 	if (status->mode != vnode->status.mode) {
+ 		mode = inode->i_mode;
+ 		mode &= ~S_IALLUGO;
+-		mode |= status->mode;
++		mode |= status->mode & S_IALLUGO;
+ 		WRITE_ONCE(inode->i_mode, mode);
+ 	}
+ 
+@@ -293,8 +293,9 @@ void afs_vnode_commit_status(struct afs_operation *op, struct afs_vnode_param *v
+ 			op->flags &= ~AFS_OPERATION_DIR_CONFLICT;
+ 		}
+ 	} else if (vp->scb.have_status) {
+-		if (vp->dv_before + vp->dv_delta != vp->scb.status.data_version &&
+-		    vp->speculative)
++		if (vp->speculative &&
++		    (test_bit(AFS_VNODE_MODIFYING, &vnode->flags) ||
++		     vp->dv_before != vnode->status.data_version))
+ 			/* Ignore the result of a speculative bulk status fetch
+ 			 * if it splits around a modification op, thereby
+ 			 * appearing to regress the data version.
+@@ -910,6 +911,7 @@ int afs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ 	}
+ 	op->ctime = attr->ia_ctime;
+ 	op->file[0].update_ctime = 1;
++	op->file[0].modification = true;
+ 
+ 	op->ops = &afs_setattr_operation;
+ 	ret = afs_do_sync_operation(op);
+diff --git a/fs/afs/internal.h b/fs/afs/internal.h
+index 1627b18728125..be981a9a1add2 100644
+--- a/fs/afs/internal.h
++++ b/fs/afs/internal.h
+@@ -640,6 +640,7 @@ struct afs_vnode {
+ #define AFS_VNODE_PSEUDODIR	7 		/* set if Vnode is a pseudo directory */
+ #define AFS_VNODE_NEW_CONTENT	8		/* Set if file has new content (create/trunc-0) */
+ #define AFS_VNODE_SILLY_DELETED	9		/* Set if file has been silly-deleted */
++#define AFS_VNODE_MODIFYING	10		/* Set if we're performing a modification op */
+ 
+ 	struct list_head	wb_keys;	/* List of keys available for writeback */
+ 	struct list_head	pending_locks;	/* locks waiting to be granted */
+@@ -756,6 +757,7 @@ struct afs_vnode_param {
+ 	bool			set_size:1;	/* Must update i_size */
+ 	bool			op_unlinked:1;	/* True if file was unlinked by op */
+ 	bool			speculative:1;	/* T if speculative status fetch (no vnode lock) */
++	bool			modification:1;	/* Set if the content gets modified */
+ };
+ 
+ /*
+diff --git a/fs/afs/write.c b/fs/afs/write.c
+index eb737ed63afb6..ebe3b6493fcef 100644
+--- a/fs/afs/write.c
++++ b/fs/afs/write.c
+@@ -450,6 +450,7 @@ static int afs_store_data(struct address_space *mapping,
+ 	afs_op_set_vnode(op, 0, vnode);
+ 	op->file[0].dv_delta = 1;
+ 	op->store.mapping = mapping;
++	op->file[0].modification = true;
+ 	op->store.first = first;
+ 	op->store.last = last;
+ 	op->store.first_offset = offset;
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 92a3686277918..72c4b66ed5163 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -3165,20 +3165,22 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
+ 	 */
+ 	mutex_unlock(&root->log_mutex);
+ 
+-	btrfs_init_log_ctx(&root_log_ctx, NULL);
+-
+-	mutex_lock(&log_root_tree->log_mutex);
+-
+ 	if (btrfs_is_zoned(fs_info)) {
++		mutex_lock(&fs_info->tree_root->log_mutex);
+ 		if (!log_root_tree->node) {
+ 			ret = btrfs_alloc_log_tree_node(trans, log_root_tree);
+ 			if (ret) {
+-				mutex_unlock(&log_root_tree->log_mutex);
++				mutex_unlock(&fs_info->tree_log_mutex);
+ 				goto out;
+ 			}
+ 		}
++		mutex_unlock(&fs_info->tree_root->log_mutex);
+ 	}
+ 
++	btrfs_init_log_ctx(&root_log_ctx, NULL);
++
++	mutex_lock(&log_root_tree->log_mutex);
++
+ 	index2 = log_root_tree->log_transid % 2;
+ 	list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]);
+ 	root_log_ctx.log_transid = log_root_tree->log_transid;
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 1c6810bbaf8b5..3912eda7905f6 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -4989,6 +4989,8 @@ static void init_alloc_chunk_ctl_policy_zoned(
+ 		ctl->max_chunk_size = 2 * ctl->max_stripe_size;
+ 		ctl->devs_max = min_t(int, ctl->devs_max,
+ 				      BTRFS_MAX_DEVS_SYS_CHUNK);
++	} else {
++		BUG();
+ 	}
+ 
+ 	/* We don't want a chunk larger than 10% of writable space */
+diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
+index 372c34ff8594f..f7d2c52791f8f 100644
+--- a/fs/dlm/lowcomms.c
++++ b/fs/dlm/lowcomms.c
+@@ -908,6 +908,7 @@ static int accept_from_sock(struct listen_connection *con)
+ 			result = dlm_con_init(othercon, nodeid);
+ 			if (result < 0) {
+ 				kfree(othercon);
++				mutex_unlock(&newcon->sock_mutex);
+ 				goto accept_err;
+ 			}
+ 
+diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
+index c0fee830a34ed..a5ceccc5ef00f 100644
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -2233,11 +2233,8 @@ static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
+ 	int oldfd;
+ 	struct fuse_dev *fud = NULL;
+ 
+-	if (_IOC_TYPE(cmd) != FUSE_DEV_IOC_MAGIC)
+-		return -ENOTTY;
+-
+-	switch (_IOC_NR(cmd)) {
+-	case _IOC_NR(FUSE_DEV_IOC_CLONE):
++	switch (cmd) {
++	case FUSE_DEV_IOC_CLONE:
+ 		res = -EFAULT;
+ 		if (!get_user(oldfd, (__u32 __user *)arg)) {
+ 			struct file *old = fget(oldfd);
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 0b5fbbd969cbd..144056b0cac92 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -238,7 +238,7 @@ struct fixed_rsrc_data {
+ struct io_buffer {
+ 	struct list_head list;
+ 	__u64 addr;
+-	__s32 len;
++	__u32 len;
+ 	__u16 bid;
+ };
+ 
+@@ -614,7 +614,7 @@ struct io_splice {
+ struct io_provide_buf {
+ 	struct file			*file;
+ 	__u64				addr;
+-	__s32				len;
++	__u32				len;
+ 	__u32				bgid;
+ 	__u16				nbufs;
+ 	__u16				bid;
+@@ -3979,7 +3979,7 @@ static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
+ static int io_provide_buffers_prep(struct io_kiocb *req,
+ 				   const struct io_uring_sqe *sqe)
+ {
+-	unsigned long size;
++	unsigned long size, tmp_check;
+ 	struct io_provide_buf *p = &req->pbuf;
+ 	u64 tmp;
+ 
+@@ -3993,6 +3993,12 @@ static int io_provide_buffers_prep(struct io_kiocb *req,
+ 	p->addr = READ_ONCE(sqe->addr);
+ 	p->len = READ_ONCE(sqe->len);
+ 
++	if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
++				&size))
++		return -EOVERFLOW;
++	if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
++		return -EOVERFLOW;
++
+ 	size = (unsigned long)p->len * p->nbufs;
+ 	if (!access_ok(u64_to_user_ptr(p->addr), size))
+ 		return -EFAULT;
+@@ -4017,7 +4023,7 @@ static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
+ 			break;
+ 
+ 		buf->addr = addr;
+-		buf->len = pbuf->len;
++		buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
+ 		buf->bid = bid;
+ 		addr += pbuf->len;
+ 		bid++;
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index dd9f38d072dd6..e13c4c81fb89d 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -1538,8 +1538,8 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 		if (!nfs4_init_copy_state(nn, copy))
+ 			goto out_err;
+ 		refcount_set(&async_copy->refcount, 1);
+-		memcpy(&copy->cp_res.cb_stateid, &copy->cp_stateid,
+-			sizeof(copy->cp_stateid));
++		memcpy(&copy->cp_res.cb_stateid, &copy->cp_stateid.stid,
++			sizeof(copy->cp_res.cb_stateid));
+ 		dup_copy_fields(copy, async_copy);
+ 		async_copy->copy_task = kthread_create(nfsd4_do_async_copy,
+ 				async_copy, "%s", "copy thread");
+diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
+index 0b2891c6c71e0..2846b943e80c1 100644
+--- a/fs/overlayfs/copy_up.c
++++ b/fs/overlayfs/copy_up.c
+@@ -932,7 +932,7 @@ static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
+ static int ovl_copy_up_flags(struct dentry *dentry, int flags)
+ {
+ 	int err = 0;
+-	const struct cred *old_cred = ovl_override_creds(dentry->d_sb);
++	const struct cred *old_cred;
+ 	bool disconnected = (dentry->d_flags & DCACHE_DISCONNECTED);
+ 
+ 	/*
+@@ -943,6 +943,7 @@ static int ovl_copy_up_flags(struct dentry *dentry, int flags)
+ 	if (WARN_ON(disconnected && d_is_dir(dentry)))
+ 		return -EIO;
+ 
++	old_cred = ovl_override_creds(dentry->d_sb);
+ 	while (!err) {
+ 		struct dentry *next;
+ 		struct dentry *parent = NULL;
+diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
+index 95cff83786a55..2322f854533cf 100644
+--- a/fs/overlayfs/overlayfs.h
++++ b/fs/overlayfs/overlayfs.h
+@@ -319,9 +319,6 @@ int ovl_check_setxattr(struct dentry *dentry, struct dentry *upperdentry,
+ 		       enum ovl_xattr ox, const void *value, size_t size,
+ 		       int xerr);
+ int ovl_set_impure(struct dentry *dentry, struct dentry *upperdentry);
+-void ovl_set_flag(unsigned long flag, struct inode *inode);
+-void ovl_clear_flag(unsigned long flag, struct inode *inode);
+-bool ovl_test_flag(unsigned long flag, struct inode *inode);
+ bool ovl_inuse_trylock(struct dentry *dentry);
+ void ovl_inuse_unlock(struct dentry *dentry);
+ bool ovl_is_inuse(struct dentry *dentry);
+@@ -335,6 +332,21 @@ char *ovl_get_redirect_xattr(struct ovl_fs *ofs, struct dentry *dentry,
+ 			     int padding);
+ int ovl_sync_status(struct ovl_fs *ofs);
+ 
++static inline void ovl_set_flag(unsigned long flag, struct inode *inode)
++{
++	set_bit(flag, &OVL_I(inode)->flags);
++}
++
++static inline void ovl_clear_flag(unsigned long flag, struct inode *inode)
++{
++	clear_bit(flag, &OVL_I(inode)->flags);
++}
++
++static inline bool ovl_test_flag(unsigned long flag, struct inode *inode)
++{
++	return test_bit(flag, &OVL_I(inode)->flags);
++}
++
+ static inline bool ovl_is_impuredir(struct super_block *sb,
+ 				    struct dentry *dentry)
+ {
+@@ -439,6 +451,18 @@ int ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt,
+ 			struct dentry *dentry, int level);
+ int ovl_indexdir_cleanup(struct ovl_fs *ofs);
+ 
++/*
++ * Can we iterate real dir directly?
++ *
++ * Non-merge dir may contain whiteouts from a time it was a merge upper, before
++ * lower dir was removed under it and possibly before it was rotated from upper
++ * to lower layer.
++ */
++static inline bool ovl_dir_is_real(struct dentry *dir)
++{
++	return !ovl_test_flag(OVL_WHITEOUTS, d_inode(dir));
++}
++
+ /* inode.c */
+ int ovl_set_nlink_upper(struct dentry *dentry);
+ int ovl_set_nlink_lower(struct dentry *dentry);
+diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
+index f404a78e6b607..cc1e802570644 100644
+--- a/fs/overlayfs/readdir.c
++++ b/fs/overlayfs/readdir.c
+@@ -319,18 +319,6 @@ static inline int ovl_dir_read(struct path *realpath,
+ 	return err;
+ }
+ 
+-/*
+- * Can we iterate real dir directly?
+- *
+- * Non-merge dir may contain whiteouts from a time it was a merge upper, before
+- * lower dir was removed under it and possibly before it was rotated from upper
+- * to lower layer.
+- */
+-static bool ovl_dir_is_real(struct dentry *dir)
+-{
+-	return !ovl_test_flag(OVL_WHITEOUTS, d_inode(dir));
+-}
+-
+ static void ovl_dir_reset(struct file *file)
+ {
+ 	struct ovl_dir_file *od = file->private_data;
+diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
+index 8cf3433350296..787ce7c38fba5 100644
+--- a/fs/overlayfs/super.c
++++ b/fs/overlayfs/super.c
+@@ -380,6 +380,8 @@ static int ovl_show_options(struct seq_file *m, struct dentry *dentry)
+ 			   ofs->config.metacopy ? "on" : "off");
+ 	if (ofs->config.ovl_volatile)
+ 		seq_puts(m, ",volatile");
++	if (ofs->config.userxattr)
++		seq_puts(m, ",userxattr");
+ 	return 0;
+ }
+ 
+diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
+index 7f5a01a11f97d..404a0a32ddf6b 100644
+--- a/fs/overlayfs/util.c
++++ b/fs/overlayfs/util.c
+@@ -422,18 +422,20 @@ void ovl_inode_update(struct inode *inode, struct dentry *upperdentry)
+ 	}
+ }
+ 
+-static void ovl_dentry_version_inc(struct dentry *dentry, bool impurity)
++static void ovl_dir_version_inc(struct dentry *dentry, bool impurity)
+ {
+ 	struct inode *inode = d_inode(dentry);
+ 
+ 	WARN_ON(!inode_is_locked(inode));
++	WARN_ON(!d_is_dir(dentry));
+ 	/*
+-	 * Version is used by readdir code to keep cache consistent.  For merge
+-	 * dirs all changes need to be noted.  For non-merge dirs, cache only
+-	 * contains impure (ones which have been copied up and have origins)
+-	 * entries, so only need to note changes to impure entries.
++	 * Version is used by readdir code to keep cache consistent.
++	 * For merge dirs (or dirs with origin) all changes need to be noted.
++	 * For non-merge dirs, cache contains only impure entries (i.e. ones
++	 * which have been copied up and have origins), so only need to note
++	 * changes to impure entries.
+ 	 */
+-	if (OVL_TYPE_MERGE(ovl_path_type(dentry)) || impurity)
++	if (!ovl_dir_is_real(dentry) || impurity)
+ 		OVL_I(inode)->version++;
+ }
+ 
+@@ -442,7 +444,7 @@ void ovl_dir_modified(struct dentry *dentry, bool impurity)
+ 	/* Copy mtime/ctime */
+ 	ovl_copyattr(d_inode(ovl_dentry_upper(dentry)), d_inode(dentry));
+ 
+-	ovl_dentry_version_inc(dentry, impurity);
++	ovl_dir_version_inc(dentry, impurity);
+ }
+ 
+ u64 ovl_dentry_version_get(struct dentry *dentry)
+@@ -638,21 +640,6 @@ int ovl_set_impure(struct dentry *dentry, struct dentry *upperdentry)
+ 	return err;
+ }
+ 
+-void ovl_set_flag(unsigned long flag, struct inode *inode)
+-{
+-	set_bit(flag, &OVL_I(inode)->flags);
+-}
+-
+-void ovl_clear_flag(unsigned long flag, struct inode *inode)
+-{
+-	clear_bit(flag, &OVL_I(inode)->flags);
+-}
+-
+-bool ovl_test_flag(unsigned long flag, struct inode *inode)
+-{
+-	return test_bit(flag, &OVL_I(inode)->flags);
+-}
+-
+ /**
+  * Caller must hold a reference to inode to prevent it from being freed while
+  * it is marked inuse.
+diff --git a/fs/proc/array.c b/fs/proc/array.c
+index bb87e4d89cd8f..7ec59171f197f 100644
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -342,8 +342,10 @@ static inline void task_seccomp(struct seq_file *m, struct task_struct *p)
+ 	seq_put_decimal_ull(m, "NoNewPrivs:\t", task_no_new_privs(p));
+ #ifdef CONFIG_SECCOMP
+ 	seq_put_decimal_ull(m, "\nSeccomp:\t", p->seccomp.mode);
++#ifdef CONFIG_SECCOMP_FILTER
+ 	seq_put_decimal_ull(m, "\nSeccomp_filters:\t",
+ 			    atomic_read(&p->seccomp.filter_count));
++#endif
+ #endif
+ 	seq_puts(m, "\nSpeculation_Store_Bypass:\t");
+ 	switch (arch_prctl_spec_ctrl_get(p, PR_SPEC_STORE_BYPASS)) {
+diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
+index 472b3039eabbf..902e5f7e66423 100644
+--- a/fs/xfs/libxfs/xfs_attr.c
++++ b/fs/xfs/libxfs/xfs_attr.c
+@@ -928,6 +928,7 @@ restart:
+ 	 * Search to see if name already exists, and get back a pointer
+ 	 * to where it should go.
+ 	 */
++	error = 0;
+ 	retval = xfs_attr_node_hasname(args, &state);
+ 	if (retval != -ENOATTR && retval != -EEXIST)
+ 		goto out;
+diff --git a/include/crypto/internal/poly1305.h b/include/crypto/internal/poly1305.h
+index 064e52ca52480..196aa769f2968 100644
+--- a/include/crypto/internal/poly1305.h
++++ b/include/crypto/internal/poly1305.h
+@@ -18,7 +18,8 @@
+  * only the ε-almost-∆-universal hash function (not the full MAC) is computed.
+  */
+ 
+-void poly1305_core_setkey(struct poly1305_core_key *key, const u8 *raw_key);
++void poly1305_core_setkey(struct poly1305_core_key *key,
++			  const u8 raw_key[POLY1305_BLOCK_SIZE]);
+ static inline void poly1305_core_init(struct poly1305_state *state)
+ {
+ 	*state = (struct poly1305_state){};
+diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h
+index f1f67fc749cf4..090692ec3bc73 100644
+--- a/include/crypto/poly1305.h
++++ b/include/crypto/poly1305.h
+@@ -58,8 +58,10 @@ struct poly1305_desc_ctx {
+ 	};
+ };
+ 
+-void poly1305_init_arch(struct poly1305_desc_ctx *desc, const u8 *key);
+-void poly1305_init_generic(struct poly1305_desc_ctx *desc, const u8 *key);
++void poly1305_init_arch(struct poly1305_desc_ctx *desc,
++			const u8 key[POLY1305_KEY_SIZE]);
++void poly1305_init_generic(struct poly1305_desc_ctx *desc,
++			   const u8 key[POLY1305_KEY_SIZE]);
+ 
+ static inline void poly1305_init(struct poly1305_desc_ctx *desc, const u8 *key)
+ {
+diff --git a/include/keys/trusted-type.h b/include/keys/trusted-type.h
+index a94c03a61d8f9..b2ed3481c6a02 100644
+--- a/include/keys/trusted-type.h
++++ b/include/keys/trusted-type.h
+@@ -30,6 +30,7 @@ struct trusted_key_options {
+ 	uint16_t keytype;
+ 	uint32_t keyhandle;
+ 	unsigned char keyauth[TPM_DIGEST_SIZE];
++	uint32_t blobauth_len;
+ 	unsigned char blobauth[TPM_DIGEST_SIZE];
+ 	uint32_t pcrinfo_len;
+ 	unsigned char pcrinfo[MAX_PCRINFO_SIZE];
+diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
+index 706b68d1359be..13d1f4c14d7ba 100644
+--- a/include/linux/dma-iommu.h
++++ b/include/linux/dma-iommu.h
+@@ -40,6 +40,8 @@ void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
+ void iommu_dma_free_cpu_cached_iovas(unsigned int cpu,
+ 		struct iommu_domain *domain);
+ 
++extern bool iommu_dma_forcedac;
++
+ #else /* CONFIG_IOMMU_DMA */
+ 
+ struct iommu_domain;
+diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
+index 71177b17eee5e..66e2423d9feb7 100644
+--- a/include/linux/firmware/xlnx-zynqmp.h
++++ b/include/linux/firmware/xlnx-zynqmp.h
+@@ -354,11 +354,6 @@ int zynqmp_pm_read_pggs(u32 index, u32 *value);
+ int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype);
+ int zynqmp_pm_set_boot_health_status(u32 value);
+ #else
+-static inline struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void)
+-{
+-	return ERR_PTR(-ENODEV);
+-}
+-
+ static inline int zynqmp_pm_get_api_version(u32 *version)
+ {
+ 	return -ENODEV;
+diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
+index 286de0520574e..ecf0032a09954 100644
+--- a/include/linux/gpio/driver.h
++++ b/include/linux/gpio/driver.h
+@@ -624,8 +624,17 @@ void gpiochip_irq_domain_deactivate(struct irq_domain *domain,
+ bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gc,
+ 				unsigned int offset);
+ 
++#ifdef CONFIG_GPIOLIB_IRQCHIP
+ int gpiochip_irqchip_add_domain(struct gpio_chip *gc,
+ 				struct irq_domain *domain);
++#else
++static inline int gpiochip_irqchip_add_domain(struct gpio_chip *gc,
++					      struct irq_domain *domain)
++{
++	WARN_ON(1);
++	return -EINVAL;
++}
++#endif
+ 
+ int gpiochip_generic_request(struct gpio_chip *gc, unsigned int offset);
+ void gpiochip_generic_free(struct gpio_chip *gc, unsigned int offset);
+diff --git a/include/linux/hid.h b/include/linux/hid.h
+index ef702b3f56e38..3e33eb14118c8 100644
+--- a/include/linux/hid.h
++++ b/include/linux/hid.h
+@@ -262,6 +262,8 @@ struct hid_item {
+ #define HID_CP_SELECTION	0x000c0080
+ #define HID_CP_MEDIASELECTION	0x000c0087
+ #define HID_CP_SELECTDISC	0x000c00ba
++#define HID_CP_VOLUMEUP		0x000c00e9
++#define HID_CP_VOLUMEDOWN	0x000c00ea
+ #define HID_CP_PLAYBACKSPEED	0x000c00f1
+ #define HID_CP_PROXIMITY	0x000c0109
+ #define HID_CP_SPEAKERSYSTEM	0x000c0160
+diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
+index 1bc46b88711a9..d1f32b33415a5 100644
+--- a/include/linux/intel-iommu.h
++++ b/include/linux/intel-iommu.h
+@@ -372,6 +372,7 @@ enum {
+ /* PASID cache invalidation granu */
+ #define QI_PC_ALL_PASIDS	0
+ #define QI_PC_PASID_SEL		1
++#define QI_PC_GLOBAL		3
+ 
+ #define QI_EIOTLB_ADDR(addr)	((u64)(addr) & VTD_PAGE_MASK)
+ #define QI_EIOTLB_IH(ih)	(((u64)ih) << 6)
+diff --git a/include/linux/iommu.h b/include/linux/iommu.h
+index 5e7fe519430af..9ca6e6b8084dc 100644
+--- a/include/linux/iommu.h
++++ b/include/linux/iommu.h
+@@ -547,7 +547,7 @@ static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
+ 	 * structure can be rewritten.
+ 	 */
+ 	if (gather->pgsize != size ||
+-	    end < gather->start || start > gather->end) {
++	    end + 1 < gather->start || start > gather->end + 1) {
+ 		if (gather->pgsize)
+ 			iommu_iotlb_sync(domain, gather);
+ 		gather->pgsize = size;
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
+index 1b65e7204344a..99dccea4293c6 100644
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -192,8 +192,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
+ 		    int len, void *val);
+ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
+ 			    int len, struct kvm_io_device *dev);
+-void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+-			       struct kvm_io_device *dev);
++int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
++			      struct kvm_io_device *dev);
+ struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+ 					 gpa_t addr);
+ 
+diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
+index 53b89631a1d9d..ab07f09f2bad9 100644
+--- a/include/linux/mlx5/driver.h
++++ b/include/linux/mlx5/driver.h
+@@ -1226,7 +1226,7 @@ enum {
+ 	MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
+ };
+ 
+-static inline bool mlx5_is_roce_enabled(struct mlx5_core_dev *dev)
++static inline bool mlx5_is_roce_init_enabled(struct mlx5_core_dev *dev)
+ {
+ 	struct devlink *devlink = priv_to_devlink(dev);
+ 	union devlink_param_value val;
+diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
+index 3f23f6e430bfa..cd81e060863c9 100644
+--- a/include/linux/platform_device.h
++++ b/include/linux/platform_device.h
+@@ -359,4 +359,7 @@ static inline int is_sh_early_platform_device(struct platform_device *pdev)
+ }
+ #endif /* CONFIG_SUPERH */
+ 
++/* For now only SuperH uses it */
++void early_platform_cleanup(void);
++
+ #endif /* _PLATFORM_DEVICE_H_ */
+diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
+index b492ae00cc908..6c08a085367bf 100644
+--- a/include/linux/pm_runtime.h
++++ b/include/linux/pm_runtime.h
+@@ -265,7 +265,7 @@ static inline void pm_runtime_no_callbacks(struct device *dev) {}
+ static inline void pm_runtime_irq_safe(struct device *dev) {}
+ static inline bool pm_runtime_is_irq_safe(struct device *dev) { return false; }
+ 
+-static inline bool pm_runtime_callbacks_present(struct device *dev) { return false; }
++static inline bool pm_runtime_has_no_callbacks(struct device *dev) { return false; }
+ static inline void pm_runtime_mark_last_busy(struct device *dev) {}
+ static inline void __pm_runtime_use_autosuspend(struct device *dev,
+ 						bool use) {}
+diff --git a/include/linux/smp.h b/include/linux/smp.h
+index 70c6f6284dcf6..238a3f97a415b 100644
+--- a/include/linux/smp.h
++++ b/include/linux/smp.h
+@@ -73,7 +73,7 @@ void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
+ void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
+ 			   void *info, bool wait, const struct cpumask *mask);
+ 
+-int smp_call_function_single_async(int cpu, call_single_data_t *csd);
++int smp_call_function_single_async(int cpu, struct __call_single_data *csd);
+ 
+ #ifdef CONFIG_SMP
+ 
+diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
+index 592897fa4f030..643139b1eafea 100644
+--- a/include/linux/spi/spi.h
++++ b/include/linux/spi/spi.h
+@@ -510,6 +510,9 @@ struct spi_controller {
+ 
+ #define SPI_MASTER_GPIO_SS		BIT(5)	/* GPIO CS must select slave */
+ 
++	/* flag indicating this is a non-devres managed controller */
++	bool			devm_allocated;
++
+ 	/* flag indicating this is an SPI slave controller */
+ 	bool			slave;
+ 
+diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
+index 61c3372d3f328..2f719b471d524 100644
+--- a/include/linux/tty_driver.h
++++ b/include/linux/tty_driver.h
+@@ -228,7 +228,7 @@
+  *
+  *	Called when the device receives a TIOCGICOUNT ioctl. Passed a kernel
+  *	structure to complete. This method is optional and will only be called
+- *	if provided (otherwise EINVAL will be returned).
++ *	if provided (otherwise ENOTTY will be returned).
+  */
+ 
+ #include <linux/export.h>
+diff --git a/include/linux/udp.h b/include/linux/udp.h
+index aa84597bdc33c..ae58ff3b6b5b8 100644
+--- a/include/linux/udp.h
++++ b/include/linux/udp.h
+@@ -51,7 +51,9 @@ struct udp_sock {
+ 					   * different encapsulation layer set
+ 					   * this
+ 					   */
+-			 gro_enabled:1;	/* Can accept GRO packets */
++			 gro_enabled:1,	/* Request GRO aggregation */
++			 accept_udp_l4:1,
++			 accept_udp_fraglist:1;
+ 	/*
+ 	 * Following member retains the information to create a UDP header
+ 	 * when the socket is uncorked.
+@@ -131,8 +133,16 @@ static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk,
+ 
+ static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
+ {
+-	return !udp_sk(sk)->gro_enabled && skb_is_gso(skb) &&
+-	       skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4;
++	if (!skb_is_gso(skb))
++		return false;
++
++	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 && !udp_sk(sk)->accept_udp_l4)
++		return true;
++
++	if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST && !udp_sk(sk)->accept_udp_fraglist)
++		return true;
++
++	return false;
+ }
+ 
+ #define udp_portaddr_for_each_entry(__sk, list) \
+diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h
+index 70d681918d013..bf00259493e07 100644
+--- a/include/linux/usb/pd.h
++++ b/include/linux/usb/pd.h
+@@ -493,4 +493,6 @@ static inline unsigned int rdo_max_power(u32 rdo)
+ #define PD_N_CAPS_COUNT		(PD_T_NO_RESPONSE / PD_T_SEND_SOURCE_CAP)
+ #define PD_N_HARD_RESET_COUNT	2
+ 
++#define PD_P_SNK_STDBY_MW	2500	/* 2500 mW */
++
+ #endif /* __LINUX_USB_PD_H */
+diff --git a/include/net/addrconf.h b/include/net/addrconf.h
+index 18f783dcd55fa..78ea3e332688f 100644
+--- a/include/net/addrconf.h
++++ b/include/net/addrconf.h
+@@ -233,7 +233,6 @@ void ipv6_mc_unmap(struct inet6_dev *idev);
+ void ipv6_mc_remap(struct inet6_dev *idev);
+ void ipv6_mc_init_dev(struct inet6_dev *idev);
+ void ipv6_mc_destroy_dev(struct inet6_dev *idev);
+-int ipv6_mc_check_icmpv6(struct sk_buff *skb);
+ int ipv6_mc_check_mld(struct sk_buff *skb);
+ void addrconf_dad_failure(struct sk_buff *skb, struct inet6_ifaddr *ifp);
+ 
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index ebdd4afe30d27..ca4ac6603b9a0 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -704,6 +704,7 @@ struct hci_chan {
+ 	struct sk_buff_head data_q;
+ 	unsigned int	sent;
+ 	__u8		state;
++	bool		amp;
+ };
+ 
+ struct hci_conn_params {
+diff --git a/include/net/netfilter/nf_tables_offload.h b/include/net/netfilter/nf_tables_offload.h
+index 1d34fe154fe0b..434a6158852f3 100644
+--- a/include/net/netfilter/nf_tables_offload.h
++++ b/include/net/netfilter/nf_tables_offload.h
+@@ -4,11 +4,16 @@
+ #include <net/flow_offload.h>
+ #include <net/netfilter/nf_tables.h>
+ 
++enum nft_offload_reg_flags {
++	NFT_OFFLOAD_F_NETWORK2HOST	= (1 << 0),
++};
++
+ struct nft_offload_reg {
+ 	u32		key;
+ 	u32		len;
+ 	u32		base_offset;
+ 	u32		offset;
++	u32		flags;
+ 	struct nft_data data;
+ 	struct nft_data	mask;
+ };
+@@ -45,6 +50,7 @@ struct nft_flow_key {
+ 	struct flow_dissector_key_ports			tp;
+ 	struct flow_dissector_key_ip			ip;
+ 	struct flow_dissector_key_vlan			vlan;
++	struct flow_dissector_key_vlan			cvlan;
+ 	struct flow_dissector_key_eth_addrs		eth_addrs;
+ 	struct flow_dissector_key_meta			meta;
+ } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
+@@ -71,13 +77,17 @@ struct nft_flow_rule *nft_flow_rule_create(struct net *net, const struct nft_rul
+ void nft_flow_rule_destroy(struct nft_flow_rule *flow);
+ int nft_flow_rule_offload_commit(struct net *net);
+ 
+-#define NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg)		\
++#define NFT_OFFLOAD_MATCH_FLAGS(__key, __base, __field, __len, __reg, __flags)	\
+ 	(__reg)->base_offset	=					\
+ 		offsetof(struct nft_flow_key, __base);			\
+ 	(__reg)->offset		=					\
+ 		offsetof(struct nft_flow_key, __base.__field);		\
+ 	(__reg)->len		= __len;				\
+ 	(__reg)->key		= __key;				\
++	(__reg)->flags		= __flags;
++
++#define NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg)		\
++	NFT_OFFLOAD_MATCH_FLAGS(__key, __base, __field, __len, __reg, 0)
+ 
+ #define NFT_OFFLOAD_MATCH_EXACT(__key, __base, __field, __len, __reg)	\
+ 	NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg)		\
+diff --git a/include/uapi/linux/tty_flags.h b/include/uapi/linux/tty_flags.h
+index 900a32e634247..6a3ac496a56c1 100644
+--- a/include/uapi/linux/tty_flags.h
++++ b/include/uapi/linux/tty_flags.h
+@@ -39,7 +39,7 @@
+  * WARNING: These flags are no longer used and have been superceded by the
+  *	    TTY_PORT_ flags in the iflags field (and not userspace-visible)
+  */
+-#ifndef _KERNEL_
++#ifndef __KERNEL__
+ #define ASYNCB_INITIALIZED	31 /* Serial port was initialized */
+ #define ASYNCB_SUSPENDED	30 /* Serial port is suspended */
+ #define ASYNCB_NORMAL_ACTIVE	29 /* Normal device is active */
+@@ -81,7 +81,7 @@
+ #define ASYNC_SPD_WARP		(ASYNC_SPD_HI|ASYNC_SPD_SHI)
+ #define ASYNC_SPD_MASK		(ASYNC_SPD_HI|ASYNC_SPD_VHI|ASYNC_SPD_SHI)
+ 
+-#ifndef _KERNEL_
++#ifndef __KERNEL__
+ /* These flags are no longer used (and were always masked from userspace) */
+ #define ASYNC_INITIALIZED	(1U << ASYNCB_INITIALIZED)
+ #define ASYNC_NORMAL_ACTIVE	(1U << ASYNCB_NORMAL_ACTIVE)
+diff --git a/init/init_task.c b/init/init_task.c
+index 3711cdaafed2f..8b08c2e19cbb5 100644
+--- a/init/init_task.c
++++ b/init/init_task.c
+@@ -210,7 +210,7 @@ struct task_struct init_task
+ #ifdef CONFIG_SECURITY
+ 	.security	= NULL,
+ #endif
+-#ifdef CONFIG_SECCOMP
++#ifdef CONFIG_SECCOMP_FILTER
+ 	.seccomp	= { .filter_count = ATOMIC_INIT(0) },
+ #endif
+ };
+diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c
+index f25b719ac7868..84b3b35fc0d05 100644
+--- a/kernel/bpf/ringbuf.c
++++ b/kernel/bpf/ringbuf.c
+@@ -221,25 +221,20 @@ static int ringbuf_map_get_next_key(struct bpf_map *map, void *key,
+ 	return -ENOTSUPP;
+ }
+ 
+-static size_t bpf_ringbuf_mmap_page_cnt(const struct bpf_ringbuf *rb)
+-{
+-	size_t data_pages = (rb->mask + 1) >> PAGE_SHIFT;
+-
+-	/* consumer page + producer page + 2 x data pages */
+-	return RINGBUF_POS_PAGES + 2 * data_pages;
+-}
+-
+ static int ringbuf_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
+ {
+ 	struct bpf_ringbuf_map *rb_map;
+-	size_t mmap_sz;
+ 
+ 	rb_map = container_of(map, struct bpf_ringbuf_map, map);
+-	mmap_sz = bpf_ringbuf_mmap_page_cnt(rb_map->rb) << PAGE_SHIFT;
+-
+-	if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) > mmap_sz)
+-		return -EINVAL;
+ 
++	if (vma->vm_flags & VM_WRITE) {
++		/* allow writable mapping for the consumer_pos only */
++		if (vma->vm_pgoff != 0 || vma->vm_end - vma->vm_start != PAGE_SIZE)
++			return -EPERM;
++	} else {
++		vma->vm_flags &= ~VM_MAYWRITE;
++	}
++	/* remap_vmalloc_range() checks size and offset constraints */
+ 	return remap_vmalloc_range(vma, rb_map->rb,
+ 				   vma->vm_pgoff + RINGBUF_PGOFF);
+ }
+@@ -315,6 +310,9 @@ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
+ 		return NULL;
+ 
+ 	len = round_up(size + BPF_RINGBUF_HDR_SZ, 8);
++	if (len > rb->mask + 1)
++		return NULL;
++
+ 	cons_pos = smp_load_acquire(&rb->consumer_pos);
+ 
+ 	if (in_nmi()) {
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index a2ed7a7e27e2b..7fa6fc6bedf1f 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -1362,9 +1362,7 @@ static bool __reg64_bound_s32(s64 a)
+ 
+ static bool __reg64_bound_u32(u64 a)
+ {
+-	if (a > U32_MIN && a < U32_MAX)
+-		return true;
+-	return false;
++	return a > U32_MIN && a < U32_MAX;
+ }
+ 
+ static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
+@@ -1375,10 +1373,10 @@ static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
+ 		reg->s32_min_value = (s32)reg->smin_value;
+ 		reg->s32_max_value = (s32)reg->smax_value;
+ 	}
+-	if (__reg64_bound_u32(reg->umin_value))
++	if (__reg64_bound_u32(reg->umin_value) && __reg64_bound_u32(reg->umax_value)) {
+ 		reg->u32_min_value = (u32)reg->umin_value;
+-	if (__reg64_bound_u32(reg->umax_value))
+ 		reg->u32_max_value = (u32)reg->umax_value;
++	}
+ 
+ 	/* Intersecting with the old var_off might have improved our bounds
+ 	 * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
+@@ -6540,11 +6538,10 @@ static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
+ 	s32 smin_val = src_reg->s32_min_value;
+ 	u32 umax_val = src_reg->u32_max_value;
+ 
+-	/* Assuming scalar64_min_max_and will be called so its safe
+-	 * to skip updating register for known 32-bit case.
+-	 */
+-	if (src_known && dst_known)
++	if (src_known && dst_known) {
++		__mark_reg32_known(dst_reg, var32_off.value);
+ 		return;
++	}
+ 
+ 	/* We get our minimum from the var_off, since that's inherently
+ 	 * bitwise.  Our maximum is the minimum of the operands' maxima.
+@@ -6564,7 +6561,6 @@ static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
+ 		dst_reg->s32_min_value = dst_reg->u32_min_value;
+ 		dst_reg->s32_max_value = dst_reg->u32_max_value;
+ 	}
+-
+ }
+ 
+ static void scalar_min_max_and(struct bpf_reg_state *dst_reg,
+@@ -6611,11 +6607,10 @@ static void scalar32_min_max_or(struct bpf_reg_state *dst_reg,
+ 	s32 smin_val = src_reg->s32_min_value;
+ 	u32 umin_val = src_reg->u32_min_value;
+ 
+-	/* Assuming scalar64_min_max_or will be called so it is safe
+-	 * to skip updating register for known case.
+-	 */
+-	if (src_known && dst_known)
++	if (src_known && dst_known) {
++		__mark_reg32_known(dst_reg, var32_off.value);
+ 		return;
++	}
+ 
+ 	/* We get our maximum from the var_off, and our minimum is the
+ 	 * maximum of the operands' minima
+@@ -6680,11 +6675,10 @@ static void scalar32_min_max_xor(struct bpf_reg_state *dst_reg,
+ 	struct tnum var32_off = tnum_subreg(dst_reg->var_off);
+ 	s32 smin_val = src_reg->s32_min_value;
+ 
+-	/* Assuming scalar64_min_max_xor will be called so it is safe
+-	 * to skip updating register for known case.
+-	 */
+-	if (src_known && dst_known)
++	if (src_known && dst_known) {
++		__mark_reg32_known(dst_reg, var32_off.value);
+ 		return;
++	}
+ 
+ 	/* We get both minimum and maximum from the var32_off. */
+ 	dst_reg->u32_min_value = var32_off.value;
+diff --git a/kernel/kthread.c b/kernel/kthread.c
+index 1578973c57409..6d3c488a0f824 100644
+--- a/kernel/kthread.c
++++ b/kernel/kthread.c
+@@ -84,6 +84,25 @@ static inline struct kthread *to_kthread(struct task_struct *k)
+ 	return (__force void *)k->set_child_tid;
+ }
+ 
++/*
++ * Variant of to_kthread() that doesn't assume @p is a kthread.
++ *
++ * Per construction; when:
++ *
++ *   (p->flags & PF_KTHREAD) && p->set_child_tid
++ *
++ * the task is both a kthread and struct kthread is persistent. However
++ * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and
++ * begin_new_exec()).
++ */
++static inline struct kthread *__to_kthread(struct task_struct *p)
++{
++	void *kthread = (__force void *)p->set_child_tid;
++	if (kthread && !(p->flags & PF_KTHREAD))
++		kthread = NULL;
++	return kthread;
++}
++
+ void free_kthread_struct(struct task_struct *k)
+ {
+ 	struct kthread *kthread;
+@@ -168,8 +187,9 @@ EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
+  */
+ void *kthread_func(struct task_struct *task)
+ {
+-	if (task->flags & PF_KTHREAD)
+-		return to_kthread(task)->threadfn;
++	struct kthread *kthread = __to_kthread(task);
++	if (kthread)
++		return kthread->threadfn;
+ 	return NULL;
+ }
+ EXPORT_SYMBOL_GPL(kthread_func);
+@@ -199,10 +219,11 @@ EXPORT_SYMBOL_GPL(kthread_data);
+  */
+ void *kthread_probe_data(struct task_struct *task)
+ {
+-	struct kthread *kthread = to_kthread(task);
++	struct kthread *kthread = __to_kthread(task);
+ 	void *data = NULL;
+ 
+-	copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
++	if (kthread)
++		copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
+ 	return data;
+ }
+ 
+@@ -514,9 +535,9 @@ void kthread_set_per_cpu(struct task_struct *k, int cpu)
+ 	set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
+ }
+ 
+-bool kthread_is_per_cpu(struct task_struct *k)
++bool kthread_is_per_cpu(struct task_struct *p)
+ {
+-	struct kthread *kthread = to_kthread(k);
++	struct kthread *kthread = __to_kthread(p);
+ 	if (!kthread)
+ 		return false;
+ 
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index 575a34b88936f..77ae2704e979c 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -1494,6 +1494,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
+ 	struct printk_info info;
+ 	unsigned int line_count;
+ 	struct printk_record r;
++	u64 max_seq;
+ 	char *text;
+ 	int len = 0;
+ 	u64 seq;
+@@ -1512,9 +1513,15 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
+ 	prb_for_each_info(clear_seq, prb, seq, &info, &line_count)
+ 		len += get_record_print_text_size(&info, line_count, true, time);
+ 
++	/*
++	 * Set an upper bound for the next loop to avoid subtracting lengths
++	 * that were never added.
++	 */
++	max_seq = seq;
++
+ 	/* move first record forward until length fits into the buffer */
+ 	prb_for_each_info(clear_seq, prb, seq, &info, &line_count) {
+-		if (len <= size)
++		if (len <= size || info.seq >= max_seq)
+ 			break;
+ 		len -= get_record_print_text_size(&info, line_count, true, time);
+ 	}
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index 2a739c5fcca5a..7356764e49a0b 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -1077,7 +1077,6 @@ noinstr void rcu_nmi_enter(void)
+ 	} else if (!in_nmi()) {
+ 		instrumentation_begin();
+ 		rcu_irq_enter_check_tick();
+-		instrumentation_end();
+ 	} else  {
+ 		instrumentation_begin();
+ 	}
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 98191218d891d..17ad829a114c8 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -7652,7 +7652,7 @@ static void balance_push(struct rq *rq)
+ 	 * histerical raisins.
+ 	 */
+ 	if (rq->idle == push_task ||
+-	    ((push_task->flags & PF_KTHREAD) && kthread_is_per_cpu(push_task)) ||
++	    kthread_is_per_cpu(push_task) ||
+ 	    is_migration_disabled(push_task)) {
+ 
+ 		/*
+diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
+index 486f403a778b2..9c8b3ed2199a8 100644
+--- a/kernel/sched/debug.c
++++ b/kernel/sched/debug.c
+@@ -8,8 +8,6 @@
+  */
+ #include "sched.h"
+ 
+-static DEFINE_SPINLOCK(sched_debug_lock);
+-
+ /*
+  * This allows printing both to /proc/sched_debug and
+  * to the console
+@@ -470,16 +468,37 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
+ #endif
+ 
+ #ifdef CONFIG_CGROUP_SCHED
++static DEFINE_SPINLOCK(sched_debug_lock);
+ static char group_path[PATH_MAX];
+ 
+-static char *task_group_path(struct task_group *tg)
++static void task_group_path(struct task_group *tg, char *path, int plen)
+ {
+-	if (autogroup_path(tg, group_path, PATH_MAX))
+-		return group_path;
++	if (autogroup_path(tg, path, plen))
++		return;
+ 
+-	cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
++	cgroup_path(tg->css.cgroup, path, plen);
++}
+ 
+-	return group_path;
++/*
++ * Only 1 SEQ_printf_task_group_path() caller can use the full length
++ * group_path[] for cgroup path. Other simultaneous callers will have
++ * to use a shorter stack buffer. A "..." suffix is appended at the end
++ * of the stack buffer so that it will show up in case the output length
++ * matches the given buffer size to indicate possible path name truncation.
++ */
++#define SEQ_printf_task_group_path(m, tg, fmt...)			\
++{									\
++	if (spin_trylock(&sched_debug_lock)) {				\
++		task_group_path(tg, group_path, sizeof(group_path));	\
++		SEQ_printf(m, fmt, group_path);				\
++		spin_unlock(&sched_debug_lock);				\
++	} else {							\
++		char buf[128];						\
++		char *bufend = buf + sizeof(buf) - 3;			\
++		task_group_path(tg, buf, bufend - buf);			\
++		strcpy(bufend - 1, "...");				\
++		SEQ_printf(m, fmt, buf);				\
++	}								\
+ }
+ #endif
+ 
+@@ -506,7 +525,7 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
+ 	SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
+ #endif
+ #ifdef CONFIG_CGROUP_SCHED
+-	SEQ_printf(m, " %s", task_group_path(task_group(p)));
++	SEQ_printf_task_group_path(m, task_group(p), " %s")
+ #endif
+ 
+ 	SEQ_printf(m, "\n");
+@@ -543,7 +562,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
+ 
+ #ifdef CONFIG_FAIR_GROUP_SCHED
+ 	SEQ_printf(m, "\n");
+-	SEQ_printf(m, "cfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
++	SEQ_printf_task_group_path(m, cfs_rq->tg, "cfs_rq[%d]:%s\n", cpu);
+ #else
+ 	SEQ_printf(m, "\n");
+ 	SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
+@@ -614,7 +633,7 @@ void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
+ {
+ #ifdef CONFIG_RT_GROUP_SCHED
+ 	SEQ_printf(m, "\n");
+-	SEQ_printf(m, "rt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
++	SEQ_printf_task_group_path(m, rt_rq->tg, "rt_rq[%d]:%s\n", cpu);
+ #else
+ 	SEQ_printf(m, "\n");
+ 	SEQ_printf(m, "rt_rq[%d]:\n", cpu);
+@@ -666,7 +685,6 @@ void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
+ static void print_cpu(struct seq_file *m, int cpu)
+ {
+ 	struct rq *rq = cpu_rq(cpu);
+-	unsigned long flags;
+ 
+ #ifdef CONFIG_X86
+ 	{
+@@ -717,13 +735,11 @@ do {									\
+ 	}
+ #undef P
+ 
+-	spin_lock_irqsave(&sched_debug_lock, flags);
+ 	print_cfs_stats(m, cpu);
+ 	print_rt_stats(m, cpu);
+ 	print_dl_stats(m, cpu);
+ 
+ 	print_rq(m, rq, cpu);
+-	spin_unlock_irqrestore(&sched_debug_lock, flags);
+ 	SEQ_printf(m, "\n");
+ }
+ 
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 0eeeeeb66f33a..d078767f677f4 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -7608,7 +7608,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
+ 		return 0;
+ 
+ 	/* Disregard pcpu kthreads; they are where they need to be. */
+-	if ((p->flags & PF_KTHREAD) && kthread_is_per_cpu(p))
++	if (kthread_is_per_cpu(p))
+ 		return 0;
+ 
+ 	if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) {
+@@ -7780,8 +7780,7 @@ static int detach_tasks(struct lb_env *env)
+ 			 * scheduler fails to find a good waiting task to
+ 			 * migrate.
+ 			 */
+-
+-			if ((load >> env->sd->nr_balance_failed) > env->imbalance)
++			if (shr_bound(load, env->sd->nr_balance_failed) > env->imbalance)
+ 				goto next;
+ 
+ 			env->imbalance -= load;
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index 10a1522b1e303..e4e4f47cee6a4 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -204,6 +204,13 @@ static inline void update_avg(u64 *avg, u64 sample)
+ 	*avg += diff / 8;
+ }
+ 
++/*
++ * Shifting a value by an exponent greater *or equal* to the size of said value
++ * is UB; cap at size-1.
++ */
++#define shr_bound(val, shift)							\
++	(val >> min_t(typeof(shift), shift, BITS_PER_TYPE(typeof(val)) - 1))
++
+ /*
+  * !! For sched_setattr_nocheck() (kernel) only !!
+  *
+diff --git a/kernel/smp.c b/kernel/smp.c
+index aeb0adfa06063..c678589fbb767 100644
+--- a/kernel/smp.c
++++ b/kernel/smp.c
+@@ -110,7 +110,7 @@ static DEFINE_PER_CPU(void *, cur_csd_info);
+ static atomic_t csd_bug_count = ATOMIC_INIT(0);
+ 
+ /* Record current CSD work for current CPU, NULL to erase. */
+-static void csd_lock_record(call_single_data_t *csd)
++static void csd_lock_record(struct __call_single_data *csd)
+ {
+ 	if (!csd) {
+ 		smp_mb(); /* NULL cur_csd after unlock. */
+@@ -125,7 +125,7 @@ static void csd_lock_record(call_single_data_t *csd)
+ 		  /* Or before unlock, as the case may be. */
+ }
+ 
+-static __always_inline int csd_lock_wait_getcpu(call_single_data_t *csd)
++static __always_inline int csd_lock_wait_getcpu(struct __call_single_data *csd)
+ {
+ 	unsigned int csd_type;
+ 
+@@ -140,7 +140,7 @@ static __always_inline int csd_lock_wait_getcpu(call_single_data_t *csd)
+  * the CSD_TYPE_SYNC/ASYNC types provide the destination CPU,
+  * so waiting on other types gets much less information.
+  */
+-static __always_inline bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, int *bug_id)
++static __always_inline bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *ts1, int *bug_id)
+ {
+ 	int cpu = -1;
+ 	int cpux;
+@@ -204,7 +204,7 @@ static __always_inline bool csd_lock_wait_toolong(call_single_data_t *csd, u64 t
+  * previous function call. For multi-cpu calls its even more interesting
+  * as we'll have to ensure no other cpu is observing our csd.
+  */
+-static __always_inline void csd_lock_wait(call_single_data_t *csd)
++static __always_inline void csd_lock_wait(struct __call_single_data *csd)
+ {
+ 	int bug_id = 0;
+ 	u64 ts0, ts1;
+@@ -219,17 +219,17 @@ static __always_inline void csd_lock_wait(call_single_data_t *csd)
+ }
+ 
+ #else
+-static void csd_lock_record(call_single_data_t *csd)
++static void csd_lock_record(struct __call_single_data *csd)
+ {
+ }
+ 
+-static __always_inline void csd_lock_wait(call_single_data_t *csd)
++static __always_inline void csd_lock_wait(struct __call_single_data *csd)
+ {
+ 	smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
+ }
+ #endif
+ 
+-static __always_inline void csd_lock(call_single_data_t *csd)
++static __always_inline void csd_lock(struct __call_single_data *csd)
+ {
+ 	csd_lock_wait(csd);
+ 	csd->node.u_flags |= CSD_FLAG_LOCK;
+@@ -242,7 +242,7 @@ static __always_inline void csd_lock(call_single_data_t *csd)
+ 	smp_wmb();
+ }
+ 
+-static __always_inline void csd_unlock(call_single_data_t *csd)
++static __always_inline void csd_unlock(struct __call_single_data *csd)
+ {
+ 	WARN_ON(!(csd->node.u_flags & CSD_FLAG_LOCK));
+ 
+@@ -276,7 +276,7 @@ void __smp_call_single_queue(int cpu, struct llist_node *node)
+  * for execution on the given CPU. data must already have
+  * ->func, ->info, and ->flags set.
+  */
+-static int generic_exec_single(int cpu, call_single_data_t *csd)
++static int generic_exec_single(int cpu, struct __call_single_data *csd)
+ {
+ 	if (cpu == smp_processor_id()) {
+ 		smp_call_func_t func = csd->func;
+@@ -542,7 +542,7 @@ EXPORT_SYMBOL(smp_call_function_single);
+  * NOTE: Be careful, there is unfortunately no current debugging facility to
+  * validate the correctness of this serialization.
+  */
+-int smp_call_function_single_async(int cpu, call_single_data_t *csd)
++int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
+ {
+ 	int err = 0;
+ 
+diff --git a/kernel/up.c b/kernel/up.c
+index c6f323dcd45bb..4edd5493eba24 100644
+--- a/kernel/up.c
++++ b/kernel/up.c
+@@ -25,7 +25,7 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
+ }
+ EXPORT_SYMBOL(smp_call_function_single);
+ 
+-int smp_call_function_single_async(int cpu, call_single_data_t *csd)
++int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
+ {
+ 	unsigned long flags;
+ 
+diff --git a/lib/bug.c b/lib/bug.c
+index 8f9d537bfb2a5..b92da1f6e21b2 100644
+--- a/lib/bug.c
++++ b/lib/bug.c
+@@ -155,30 +155,27 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
+ 
+ 	file = NULL;
+ 	line = 0;
+-	warning = 0;
+ 
+-	if (bug) {
+ #ifdef CONFIG_DEBUG_BUGVERBOSE
+ #ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
+-		file = bug->file;
++	file = bug->file;
+ #else
+-		file = (const char *)bug + bug->file_disp;
++	file = (const char *)bug + bug->file_disp;
+ #endif
+-		line = bug->line;
++	line = bug->line;
+ #endif
+-		warning = (bug->flags & BUGFLAG_WARNING) != 0;
+-		once = (bug->flags & BUGFLAG_ONCE) != 0;
+-		done = (bug->flags & BUGFLAG_DONE) != 0;
+-
+-		if (warning && once) {
+-			if (done)
+-				return BUG_TRAP_TYPE_WARN;
+-
+-			/*
+-			 * Since this is the only store, concurrency is not an issue.
+-			 */
+-			bug->flags |= BUGFLAG_DONE;
+-		}
++	warning = (bug->flags & BUGFLAG_WARNING) != 0;
++	once = (bug->flags & BUGFLAG_ONCE) != 0;
++	done = (bug->flags & BUGFLAG_DONE) != 0;
++
++	if (warning && once) {
++		if (done)
++			return BUG_TRAP_TYPE_WARN;
++
++		/*
++		 * Since this is the only store, concurrency is not an issue.
++		 */
++		bug->flags |= BUGFLAG_DONE;
+ 	}
+ 
+ 	/*
+diff --git a/lib/crypto/poly1305-donna32.c b/lib/crypto/poly1305-donna32.c
+index 3cc77d94390b2..7fb71845cc846 100644
+--- a/lib/crypto/poly1305-donna32.c
++++ b/lib/crypto/poly1305-donna32.c
+@@ -10,7 +10,8 @@
+ #include <asm/unaligned.h>
+ #include <crypto/internal/poly1305.h>
+ 
+-void poly1305_core_setkey(struct poly1305_core_key *key, const u8 raw_key[16])
++void poly1305_core_setkey(struct poly1305_core_key *key,
++			  const u8 raw_key[POLY1305_BLOCK_SIZE])
+ {
+ 	/* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
+ 	key->key.r[0] = (get_unaligned_le32(&raw_key[0])) & 0x3ffffff;
+diff --git a/lib/crypto/poly1305-donna64.c b/lib/crypto/poly1305-donna64.c
+index 6ae181bb43450..d34cf40536689 100644
+--- a/lib/crypto/poly1305-donna64.c
++++ b/lib/crypto/poly1305-donna64.c
+@@ -12,7 +12,8 @@
+ 
+ typedef __uint128_t u128;
+ 
+-void poly1305_core_setkey(struct poly1305_core_key *key, const u8 raw_key[16])
++void poly1305_core_setkey(struct poly1305_core_key *key,
++			  const u8 raw_key[POLY1305_BLOCK_SIZE])
+ {
+ 	u64 t0, t1;
+ 
+diff --git a/lib/crypto/poly1305.c b/lib/crypto/poly1305.c
+index 9d2d14df0fee5..26d87fc3823e8 100644
+--- a/lib/crypto/poly1305.c
++++ b/lib/crypto/poly1305.c
+@@ -12,7 +12,8 @@
+ #include <linux/module.h>
+ #include <asm/unaligned.h>
+ 
+-void poly1305_init_generic(struct poly1305_desc_ctx *desc, const u8 *key)
++void poly1305_init_generic(struct poly1305_desc_ctx *desc,
++			   const u8 key[POLY1305_KEY_SIZE])
+ {
+ 	poly1305_core_setkey(&desc->core_r, key);
+ 	desc->s[0] = get_unaligned_le32(key + 16);
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index e064ac0d850ac..e876ba693998d 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -3181,9 +3181,17 @@ static void drain_obj_stock(struct memcg_stock_pcp *stock)
+ 		unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
+ 
+ 		if (nr_pages) {
++			struct mem_cgroup *memcg;
++
+ 			rcu_read_lock();
+-			__memcg_kmem_uncharge(obj_cgroup_memcg(old), nr_pages);
++retry:
++			memcg = obj_cgroup_memcg(old);
++			if (unlikely(!css_tryget(&memcg->css)))
++				goto retry;
+ 			rcu_read_unlock();
++
++			__memcg_kmem_uncharge(memcg, nr_pages);
++			css_put(&memcg->css);
+ 		}
+ 
+ 		/*
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 24210c9bd8434..bd3945446d47e 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1368,7 +1368,7 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
+ 		 * communicated in siginfo, see kill_proc()
+ 		 */
+ 		start = (page->index << PAGE_SHIFT) & ~(size - 1);
+-		unmap_mapping_range(page->mapping, start, start + size, 0);
++		unmap_mapping_range(page->mapping, start, size, 0);
+ 	}
+ 	kill_procs(&tokill, flags & MF_MUST_KILL, !unmap_success, pfn, flags);
+ 	rc = 0;
+diff --git a/mm/sparse.c b/mm/sparse.c
+index 7bd23f9d6cef6..33406ea2ecc44 100644
+--- a/mm/sparse.c
++++ b/mm/sparse.c
+@@ -547,6 +547,7 @@ static void __init sparse_init_nid(int nid, unsigned long pnum_begin,
+ 			pr_err("%s: node[%d] memory map backing failed. Some memory will not be available.",
+ 			       __func__, nid);
+ 			pnum_begin = pnum;
++			sparse_buffer_fini();
+ 			goto failed;
+ 		}
+ 		check_usemap_section_nr(nid, usage);
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index 6ffa89e3ba0a8..f72646690539f 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -1830,8 +1830,6 @@ u32 hci_conn_get_phy(struct hci_conn *conn)
+ {
+ 	u32 phys = 0;
+ 
+-	hci_dev_lock(conn->hdev);
+-
+ 	/* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471:
+ 	 * Table 6.2: Packets defined for synchronous, asynchronous, and
+ 	 * CSB logical transport types.
+@@ -1928,7 +1926,5 @@ u32 hci_conn_get_phy(struct hci_conn *conn)
+ 		break;
+ 	}
+ 
+-	hci_dev_unlock(conn->hdev);
+-
+ 	return phys;
+ }
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 67668be3461e9..7a3e42e752350 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -5005,6 +5005,7 @@ static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+ 		return;
+ 
+ 	hchan->handle = le16_to_cpu(ev->handle);
++	hchan->amp = true;
+ 
+ 	BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
+ 
+@@ -5037,7 +5038,7 @@ static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
+ 	hci_dev_lock(hdev);
+ 
+ 	hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
+-	if (!hchan)
++	if (!hchan || !hchan->amp)
+ 		goto unlock;
+ 
+ 	amp_destroy_logical_link(hchan, ev->reason);
+diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
+index e55976db4403e..805ce546b8133 100644
+--- a/net/bluetooth/hci_request.c
++++ b/net/bluetooth/hci_request.c
+@@ -272,12 +272,16 @@ int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
+ {
+ 	int ret;
+ 
+-	if (!test_bit(HCI_UP, &hdev->flags))
+-		return -ENETDOWN;
+-
+ 	/* Serialize all requests */
+ 	hci_req_sync_lock(hdev);
+-	ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
++	/* check the state after obtaing the lock to protect the HCI_UP
++	 * against any races from hci_dev_do_close when the controller
++	 * gets removed.
++	 */
++	if (test_bit(HCI_UP, &hdev->flags))
++		ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
++	else
++		ret = -ENETDOWN;
+ 	hci_req_sync_unlock(hdev);
+ 
+ 	return ret;
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index 9d265447d6540..229309d7b4ff9 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -3152,25 +3152,14 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
+ }
+ 
+ #if IS_ENABLED(CONFIG_IPV6)
+-static int br_ip6_multicast_mrd_rcv(struct net_bridge *br,
+-				    struct net_bridge_port *port,
+-				    struct sk_buff *skb)
++static void br_ip6_multicast_mrd_rcv(struct net_bridge *br,
++				     struct net_bridge_port *port,
++				     struct sk_buff *skb)
+ {
+-	int ret;
+-
+-	if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6)
+-		return -ENOMSG;
+-
+-	ret = ipv6_mc_check_icmpv6(skb);
+-	if (ret < 0)
+-		return ret;
+-
+ 	if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV)
+-		return -ENOMSG;
++		return;
+ 
+ 	br_multicast_mark_router(br, port);
+-
+-	return 0;
+ }
+ 
+ static int br_multicast_ipv6_rcv(struct net_bridge *br,
+@@ -3184,18 +3173,12 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
+ 
+ 	err = ipv6_mc_check_mld(skb);
+ 
+-	if (err == -ENOMSG) {
++	if (err == -ENOMSG || err == -ENODATA) {
+ 		if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
+ 			BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
+-
+-		if (ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr)) {
+-			err = br_ip6_multicast_mrd_rcv(br, port, skb);
+-
+-			if (err < 0 && err != -ENOMSG) {
+-				br_multicast_err_count(br, port, skb->protocol);
+-				return err;
+-			}
+-		}
++		if (err == -ENODATA &&
++		    ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr))
++			br_ip6_multicast_mrd_rcv(br, port, skb);
+ 
+ 		return 0;
+ 	} else if (err < 0) {
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 1f79b9aa9a3f2..70829c568645c 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -4672,10 +4672,10 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
+ 	void *orig_data, *orig_data_end, *hard_start;
+ 	struct netdev_rx_queue *rxqueue;
+ 	u32 metalen, act = XDP_DROP;
++	bool orig_bcast, orig_host;
+ 	u32 mac_len, frame_sz;
+ 	__be16 orig_eth_type;
+ 	struct ethhdr *eth;
+-	bool orig_bcast;
+ 	int off;
+ 
+ 	/* Reinjected packets coming from act_mirred or similar should
+@@ -4722,6 +4722,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
+ 	orig_data_end = xdp->data_end;
+ 	orig_data = xdp->data;
+ 	eth = (struct ethhdr *)xdp->data;
++	orig_host = ether_addr_equal_64bits(eth->h_dest, skb->dev->dev_addr);
+ 	orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest);
+ 	orig_eth_type = eth->h_proto;
+ 
+@@ -4749,8 +4750,11 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
+ 	/* check if XDP changed eth hdr such SKB needs update */
+ 	eth = (struct ethhdr *)xdp->data;
+ 	if ((orig_eth_type != eth->h_proto) ||
++	    (orig_host != ether_addr_equal_64bits(eth->h_dest,
++						  skb->dev->dev_addr)) ||
+ 	    (orig_bcast != is_multicast_ether_addr_64bits(eth->h_dest))) {
+ 		__skb_push(skb, ETH_HLEN);
++		skb->pkt_type = PACKET_HOST;
+ 		skb->protocol = eth_type_trans(skb, skb->dev);
+ 	}
+ 
+@@ -5914,7 +5918,7 @@ static struct list_head *gro_list_prepare(struct napi_struct *napi,
+ 	return head;
+ }
+ 
+-static void skb_gro_reset_offset(struct sk_buff *skb)
++static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
+ {
+ 	const struct skb_shared_info *pinfo = skb_shinfo(skb);
+ 	const skb_frag_t *frag0 = &pinfo->frags[0];
+@@ -5925,7 +5929,7 @@ static void skb_gro_reset_offset(struct sk_buff *skb)
+ 
+ 	if (!skb_headlen(skb) && pinfo->nr_frags &&
+ 	    !PageHighMem(skb_frag_page(frag0)) &&
+-	    (!NET_IP_ALIGN || !(skb_frag_off(frag0) & 3))) {
++	    (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) {
+ 		NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
+ 		NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
+ 						    skb_frag_size(frag0),
+@@ -6143,7 +6147,7 @@ gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
+ 	skb_mark_napi_id(skb, napi);
+ 	trace_napi_gro_receive_entry(skb);
+ 
+-	skb_gro_reset_offset(skb);
++	skb_gro_reset_offset(skb, 0);
+ 
+ 	ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
+ 	trace_napi_gro_receive_exit(ret);
+@@ -6232,7 +6236,7 @@ static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
+ 	napi->skb = NULL;
+ 
+ 	skb_reset_mac_header(skb);
+-	skb_gro_reset_offset(skb);
++	skb_gro_reset_offset(skb, hlen);
+ 
+ 	if (unlikely(skb_gro_header_hard(skb, hlen))) {
+ 		eth = skb_gro_header_slow(skb, hlen, 0);
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index bba150fdd2658..d635b4f32d348 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -66,6 +66,7 @@
+ #include <linux/types.h>
+ #include <linux/kernel.h>
+ #include <linux/mm.h>
++#include <linux/memblock.h>
+ #include <linux/string.h>
+ #include <linux/socket.h>
+ #include <linux/sockios.h>
+@@ -478,8 +479,10 @@ static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr)
+ 	__ipv4_confirm_neigh(dev, *(__force u32 *)pkey);
+ }
+ 
+-#define IP_IDENTS_SZ 2048u
+-
++/* Hash tables of size 2048..262144 depending on RAM size.
++ * Each bucket uses 8 bytes.
++ */
++static u32 ip_idents_mask __read_mostly;
+ static atomic_t *ip_idents __read_mostly;
+ static u32 *ip_tstamps __read_mostly;
+ 
+@@ -489,12 +492,16 @@ static u32 *ip_tstamps __read_mostly;
+  */
+ u32 ip_idents_reserve(u32 hash, int segs)
+ {
+-	u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ;
+-	atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
+-	u32 old = READ_ONCE(*p_tstamp);
+-	u32 now = (u32)jiffies;
++	u32 bucket, old, now = (u32)jiffies;
++	atomic_t *p_id;
++	u32 *p_tstamp;
+ 	u32 delta = 0;
+ 
++	bucket = hash & ip_idents_mask;
++	p_tstamp = ip_tstamps + bucket;
++	p_id = ip_idents + bucket;
++	old = READ_ONCE(*p_tstamp);
++
+ 	if (old != now && cmpxchg(p_tstamp, old, now) == old)
+ 		delta = prandom_u32_max(now - old);
+ 
+@@ -3553,18 +3560,25 @@ struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
+ 
+ int __init ip_rt_init(void)
+ {
++	void *idents_hash;
+ 	int cpu;
+ 
+-	ip_idents = kmalloc_array(IP_IDENTS_SZ, sizeof(*ip_idents),
+-				  GFP_KERNEL);
+-	if (!ip_idents)
+-		panic("IP: failed to allocate ip_idents\n");
++	/* For modern hosts, this will use 2 MB of memory */
++	idents_hash = alloc_large_system_hash("IP idents",
++					      sizeof(*ip_idents) + sizeof(*ip_tstamps),
++					      0,
++					      16, /* one bucket per 64 KB */
++					      HASH_ZERO,
++					      NULL,
++					      &ip_idents_mask,
++					      2048,
++					      256*1024);
++
++	ip_idents = idents_hash;
+ 
+-	prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
++	prandom_bytes(ip_idents, (ip_idents_mask + 1) * sizeof(*ip_idents));
+ 
+-	ip_tstamps = kcalloc(IP_IDENTS_SZ, sizeof(*ip_tstamps), GFP_KERNEL);
+-	if (!ip_tstamps)
+-		panic("IP: failed to allocate ip_tstamps\n");
++	ip_tstamps = idents_hash + (ip_idents_mask + 1) * sizeof(*ip_idents);
+ 
+ 	for_each_possible_cpu(cpu) {
+ 		struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
+diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
+index 563d016e74783..db5831e6c136a 100644
+--- a/net/ipv4/tcp_cong.c
++++ b/net/ipv4/tcp_cong.c
+@@ -230,6 +230,10 @@ int tcp_set_default_congestion_control(struct net *net, const char *name)
+ 		ret = -ENOENT;
+ 	} else if (!bpf_try_module_get(ca, ca->owner)) {
+ 		ret = -EBUSY;
++	} else if (!net_eq(net, &init_net) &&
++			!(ca->flags & TCP_CONG_NON_RESTRICTED)) {
++		/* Only init netns can set default to a restricted algorithm */
++		ret = -EPERM;
+ 	} else {
+ 		prev = xchg(&net->ipv4.tcp_congestion_control, ca);
+ 		if (prev)
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 99d743eb9dc46..c586a6bb8c6d0 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -2664,9 +2664,12 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
+ 
+ 	case UDP_GRO:
+ 		lock_sock(sk);
++
++		/* when enabling GRO, accept the related GSO packet type */
+ 		if (valbool)
+ 			udp_tunnel_encap_enable(sk->sk_socket);
+ 		up->gro_enabled = valbool;
++		up->accept_udp_l4 = valbool;
+ 		release_sock(sk);
+ 		break;
+ 
+diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
+index c5b4b586570fe..25134a3548e99 100644
+--- a/net/ipv4/udp_offload.c
++++ b/net/ipv4/udp_offload.c
+@@ -515,21 +515,24 @@ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
+ 	unsigned int off = skb_gro_offset(skb);
+ 	int flush = 1;
+ 
++	/* we can do L4 aggregation only if the packet can't land in a tunnel
++	 * otherwise we could corrupt the inner stream
++	 */
+ 	NAPI_GRO_CB(skb)->is_flist = 0;
+-	if (skb->dev->features & NETIF_F_GRO_FRAGLIST)
+-		NAPI_GRO_CB(skb)->is_flist = sk ? !udp_sk(sk)->gro_enabled: 1;
++	if (!sk || !udp_sk(sk)->gro_receive) {
++		if (skb->dev->features & NETIF_F_GRO_FRAGLIST)
++			NAPI_GRO_CB(skb)->is_flist = sk ? !udp_sk(sk)->gro_enabled : 1;
+ 
+-	if ((!sk && (skb->dev->features & NETIF_F_GRO_UDP_FWD)) ||
+-	    (sk && udp_sk(sk)->gro_enabled) || NAPI_GRO_CB(skb)->is_flist) {
+-		pp = call_gro_receive(udp_gro_receive_segment, head, skb);
++		if ((!sk && (skb->dev->features & NETIF_F_GRO_UDP_FWD)) ||
++		    (sk && udp_sk(sk)->gro_enabled) || NAPI_GRO_CB(skb)->is_flist)
++			pp = call_gro_receive(udp_gro_receive_segment, head, skb);
+ 		return pp;
+ 	}
+ 
+-	if (!sk || NAPI_GRO_CB(skb)->encap_mark ||
++	if (NAPI_GRO_CB(skb)->encap_mark ||
+ 	    (uh->check && skb->ip_summed != CHECKSUM_PARTIAL &&
+ 	     NAPI_GRO_CB(skb)->csum_cnt == 0 &&
+-	     !NAPI_GRO_CB(skb)->csum_valid) ||
+-	    !udp_sk(sk)->gro_receive)
++	     !NAPI_GRO_CB(skb)->csum_valid))
+ 		goto out;
+ 
+ 	/* mark that this skb passed once through the tunnel gro layer */
+diff --git a/net/ipv6/mcast_snoop.c b/net/ipv6/mcast_snoop.c
+index d3d6b6a66e5fa..04d5fcdfa6e00 100644
+--- a/net/ipv6/mcast_snoop.c
++++ b/net/ipv6/mcast_snoop.c
+@@ -109,7 +109,7 @@ static int ipv6_mc_check_mld_msg(struct sk_buff *skb)
+ 	struct mld_msg *mld;
+ 
+ 	if (!ipv6_mc_may_pull(skb, len))
+-		return -EINVAL;
++		return -ENODATA;
+ 
+ 	mld = (struct mld_msg *)skb_transport_header(skb);
+ 
+@@ -122,7 +122,7 @@ static int ipv6_mc_check_mld_msg(struct sk_buff *skb)
+ 	case ICMPV6_MGM_QUERY:
+ 		return ipv6_mc_check_mld_query(skb);
+ 	default:
+-		return -ENOMSG;
++		return -ENODATA;
+ 	}
+ }
+ 
+@@ -131,7 +131,7 @@ static inline __sum16 ipv6_mc_validate_checksum(struct sk_buff *skb)
+ 	return skb_checksum_validate(skb, IPPROTO_ICMPV6, ip6_compute_pseudo);
+ }
+ 
+-int ipv6_mc_check_icmpv6(struct sk_buff *skb)
++static int ipv6_mc_check_icmpv6(struct sk_buff *skb)
+ {
+ 	unsigned int len = skb_transport_offset(skb) + sizeof(struct icmp6hdr);
+ 	unsigned int transport_len = ipv6_transport_len(skb);
+@@ -150,7 +150,6 @@ int ipv6_mc_check_icmpv6(struct sk_buff *skb)
+ 
+ 	return 0;
+ }
+-EXPORT_SYMBOL(ipv6_mc_check_icmpv6);
+ 
+ /**
+  * ipv6_mc_check_mld - checks whether this is a sane MLD packet
+@@ -161,7 +160,10 @@ EXPORT_SYMBOL(ipv6_mc_check_icmpv6);
+  *
+  * -EINVAL: A broken packet was detected, i.e. it violates some internet
+  *  standard
+- * -ENOMSG: IP header validation succeeded but it is not an MLD packet.
++ * -ENOMSG: IP header validation succeeded but it is not an ICMPv6 packet
++ *  with a hop-by-hop option.
++ * -ENODATA: IP+ICMPv6 header with hop-by-hop option validation succeeded
++ *  but it is not an MLD packet.
+  * -ENOMEM: A memory allocation failure happened.
+  *
+  * Caller needs to set the skb network header and free any returned skb if it
+diff --git a/net/mac80211/main.c b/net/mac80211/main.c
+index 1b9c82616606b..0331f3a3c40e0 100644
+--- a/net/mac80211/main.c
++++ b/net/mac80211/main.c
+@@ -1141,8 +1141,11 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
+ 	if (local->hw.wiphy->max_scan_ie_len)
+ 		local->hw.wiphy->max_scan_ie_len -= local->scan_ies_len;
+ 
+-	WARN_ON(!ieee80211_cs_list_valid(local->hw.cipher_schemes,
+-					 local->hw.n_cipher_schemes));
++	if (WARN_ON(!ieee80211_cs_list_valid(local->hw.cipher_schemes,
++					     local->hw.n_cipher_schemes))) {
++		result = -EINVAL;
++		goto fail_workqueue;
++	}
+ 
+ 	result = ieee80211_init_cipher_suites(local);
+ 	if (result < 0)
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 4bde960e19dc0..65e5d3eb10789 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -399,6 +399,14 @@ static bool mptcp_pending_data_fin(struct sock *sk, u64 *seq)
+ 	return false;
+ }
+ 
++static void mptcp_set_datafin_timeout(const struct sock *sk)
++{
++	struct inet_connection_sock *icsk = inet_csk(sk);
++
++	mptcp_sk(sk)->timer_ival = min(TCP_RTO_MAX,
++				       TCP_RTO_MIN << icsk->icsk_retransmits);
++}
++
+ static void mptcp_set_timeout(const struct sock *sk, const struct sock *ssk)
+ {
+ 	long tout = ssk && inet_csk(ssk)->icsk_pending ?
+@@ -1052,7 +1060,7 @@ out:
+ 	}
+ 
+ 	if (snd_una == READ_ONCE(msk->snd_nxt)) {
+-		if (msk->timer_ival)
++		if (msk->timer_ival && !mptcp_data_fin_enabled(msk))
+ 			mptcp_stop_timer(sk);
+ 	} else {
+ 		mptcp_reset_timer(sk);
+@@ -1275,7 +1283,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
+ 	int avail_size;
+ 	size_t ret = 0;
+ 
+-	pr_debug("msk=%p ssk=%p sending dfrag at seq=%lld len=%d already sent=%d",
++	pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u",
+ 		 msk, ssk, dfrag->data_seq, dfrag->data_len, info->sent);
+ 
+ 	/* compute send limit */
+@@ -1693,7 +1701,7 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 			if (!msk->first_pending)
+ 				WRITE_ONCE(msk->first_pending, dfrag);
+ 		}
+-		pr_debug("msk=%p dfrag at seq=%lld len=%d sent=%d new=%d", msk,
++		pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d", msk,
+ 			 dfrag->data_seq, dfrag->data_len, dfrag->already_sent,
+ 			 !dfrag_collapsed);
+ 
+@@ -2276,8 +2284,19 @@ static void __mptcp_retrans(struct sock *sk)
+ 
+ 	__mptcp_clean_una_wakeup(sk);
+ 	dfrag = mptcp_rtx_head(sk);
+-	if (!dfrag)
++	if (!dfrag) {
++		if (mptcp_data_fin_enabled(msk)) {
++			struct inet_connection_sock *icsk = inet_csk(sk);
++
++			icsk->icsk_retransmits++;
++			mptcp_set_datafin_timeout(sk);
++			mptcp_send_ack(msk);
++
++			goto reset_timer;
++		}
++
+ 		return;
++	}
+ 
+ 	ssk = mptcp_subflow_get_retrans(msk);
+ 	if (!ssk)
+@@ -2460,6 +2479,8 @@ void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
+ 			pr_debug("Sending DATA_FIN on subflow %p", ssk);
+ 			mptcp_set_timeout(sk, ssk);
+ 			tcp_send_ack(ssk);
++			if (!mptcp_timer_pending(sk))
++				mptcp_reset_timer(sk);
+ 		}
+ 		break;
+ 	}
+diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c
+index 9ae14270c543e..2b00f7f47693b 100644
+--- a/net/netfilter/nf_tables_offload.c
++++ b/net/netfilter/nf_tables_offload.c
+@@ -45,6 +45,48 @@ void nft_flow_rule_set_addr_type(struct nft_flow_rule *flow,
+ 		offsetof(struct nft_flow_key, control);
+ }
+ 
++struct nft_offload_ethertype {
++	__be16 value;
++	__be16 mask;
++};
++
++static void nft_flow_rule_transfer_vlan(struct nft_offload_ctx *ctx,
++					struct nft_flow_rule *flow)
++{
++	struct nft_flow_match *match = &flow->match;
++	struct nft_offload_ethertype ethertype;
++
++	if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL) &&
++	    match->key.basic.n_proto != htons(ETH_P_8021Q) &&
++	    match->key.basic.n_proto != htons(ETH_P_8021AD))
++		return;
++
++	ethertype.value = match->key.basic.n_proto;
++	ethertype.mask = match->mask.basic.n_proto;
++
++	if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_VLAN) &&
++	    (match->key.vlan.vlan_tpid == htons(ETH_P_8021Q) ||
++	     match->key.vlan.vlan_tpid == htons(ETH_P_8021AD))) {
++		match->key.basic.n_proto = match->key.cvlan.vlan_tpid;
++		match->mask.basic.n_proto = match->mask.cvlan.vlan_tpid;
++		match->key.cvlan.vlan_tpid = match->key.vlan.vlan_tpid;
++		match->mask.cvlan.vlan_tpid = match->mask.vlan.vlan_tpid;
++		match->key.vlan.vlan_tpid = ethertype.value;
++		match->mask.vlan.vlan_tpid = ethertype.mask;
++		match->dissector.offset[FLOW_DISSECTOR_KEY_CVLAN] =
++			offsetof(struct nft_flow_key, cvlan);
++		match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CVLAN);
++	} else {
++		match->key.basic.n_proto = match->key.vlan.vlan_tpid;
++		match->mask.basic.n_proto = match->mask.vlan.vlan_tpid;
++		match->key.vlan.vlan_tpid = ethertype.value;
++		match->mask.vlan.vlan_tpid = ethertype.mask;
++		match->dissector.offset[FLOW_DISSECTOR_KEY_VLAN] =
++			offsetof(struct nft_flow_key, vlan);
++		match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_VLAN);
++	}
++}
++
+ struct nft_flow_rule *nft_flow_rule_create(struct net *net,
+ 					   const struct nft_rule *rule)
+ {
+@@ -89,6 +131,8 @@ struct nft_flow_rule *nft_flow_rule_create(struct net *net,
+ 
+ 		expr = nft_expr_next(expr);
+ 	}
++	nft_flow_rule_transfer_vlan(ctx, flow);
++
+ 	flow->proto = ctx->dep.l3num;
+ 	kfree(ctx);
+ 
+diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c
+index eb6a43a180bba..47b6d05f1ae69 100644
+--- a/net/netfilter/nft_cmp.c
++++ b/net/netfilter/nft_cmp.c
+@@ -114,19 +114,56 @@ nla_put_failure:
+ 	return -1;
+ }
+ 
++union nft_cmp_offload_data {
++	u16	val16;
++	u32	val32;
++	u64	val64;
++};
++
++static void nft_payload_n2h(union nft_cmp_offload_data *data,
++			    const u8 *val, u32 len)
++{
++	switch (len) {
++	case 2:
++		data->val16 = ntohs(*((u16 *)val));
++		break;
++	case 4:
++		data->val32 = ntohl(*((u32 *)val));
++		break;
++	case 8:
++		data->val64 = be64_to_cpu(*((u64 *)val));
++		break;
++	default:
++		WARN_ON_ONCE(1);
++		break;
++	}
++}
++
+ static int __nft_cmp_offload(struct nft_offload_ctx *ctx,
+ 			     struct nft_flow_rule *flow,
+ 			     const struct nft_cmp_expr *priv)
+ {
+ 	struct nft_offload_reg *reg = &ctx->regs[priv->sreg];
++	union nft_cmp_offload_data _data, _datamask;
+ 	u8 *mask = (u8 *)&flow->match.mask;
+ 	u8 *key = (u8 *)&flow->match.key;
++	u8 *data, *datamask;
+ 
+ 	if (priv->op != NFT_CMP_EQ || priv->len > reg->len)
+ 		return -EOPNOTSUPP;
+ 
+-	memcpy(key + reg->offset, &priv->data, reg->len);
+-	memcpy(mask + reg->offset, &reg->mask, reg->len);
++	if (reg->flags & NFT_OFFLOAD_F_NETWORK2HOST) {
++		nft_payload_n2h(&_data, (u8 *)&priv->data, reg->len);
++		nft_payload_n2h(&_datamask, (u8 *)&reg->mask, reg->len);
++		data = (u8 *)&_data;
++		datamask = (u8 *)&_datamask;
++	} else {
++		data = (u8 *)&priv->data;
++		datamask = (u8 *)&reg->mask;
++	}
++
++	memcpy(key + reg->offset, data, reg->len);
++	memcpy(mask + reg->offset, datamask, reg->len);
+ 
+ 	flow->match.dissector.used_keys |= BIT(reg->key);
+ 	flow->match.dissector.offset[reg->key] = reg->base_offset;
+diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
+index cb1c8c2318803..501c5b24cc396 100644
+--- a/net/netfilter/nft_payload.c
++++ b/net/netfilter/nft_payload.c
+@@ -226,8 +226,9 @@ static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
+ 		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
+ 			return -EOPNOTSUPP;
+ 
+-		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan,
+-				  vlan_tci, sizeof(__be16), reg);
++		NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_VLAN, vlan,
++					vlan_tci, sizeof(__be16), reg,
++					NFT_OFFLOAD_F_NETWORK2HOST);
+ 		break;
+ 	case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto):
+ 		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
+@@ -241,16 +242,18 @@ static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
+ 		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
+ 			return -EOPNOTSUPP;
+ 
+-		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, vlan,
+-				  vlan_tci, sizeof(__be16), reg);
++		NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_CVLAN, cvlan,
++					vlan_tci, sizeof(__be16), reg,
++					NFT_OFFLOAD_F_NETWORK2HOST);
+ 		break;
+ 	case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto) +
+ 							sizeof(struct vlan_hdr):
+ 		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
+ 			return -EOPNOTSUPP;
+ 
+-		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, vlan,
++		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, cvlan,
+ 				  vlan_tpid, sizeof(__be16), reg);
++		nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
+ 		break;
+ 	default:
+ 		return -EOPNOTSUPP;
+diff --git a/net/nfc/digital_dep.c b/net/nfc/digital_dep.c
+index 5971fb6f51cc7..dc21b4141b0af 100644
+--- a/net/nfc/digital_dep.c
++++ b/net/nfc/digital_dep.c
+@@ -1273,6 +1273,8 @@ static void digital_tg_recv_dep_req(struct nfc_digital_dev *ddev, void *arg,
+ 	}
+ 
+ 	rc = nfc_tm_data_received(ddev->nfc_dev, resp);
++	if (rc)
++		resp = NULL;
+ 
+ exit:
+ 	kfree_skb(ddev->chaining_skb);
+diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
+index a3b46f8888033..53dbe733f9981 100644
+--- a/net/nfc/llcp_sock.c
++++ b/net/nfc/llcp_sock.c
+@@ -109,12 +109,14 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
+ 					  GFP_KERNEL);
+ 	if (!llcp_sock->service_name) {
+ 		nfc_llcp_local_put(llcp_sock->local);
++		llcp_sock->local = NULL;
+ 		ret = -ENOMEM;
+ 		goto put_dev;
+ 	}
+ 	llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock);
+ 	if (llcp_sock->ssap == LLCP_SAP_MAX) {
+ 		nfc_llcp_local_put(llcp_sock->local);
++		llcp_sock->local = NULL;
+ 		kfree(llcp_sock->service_name);
+ 		llcp_sock->service_name = NULL;
+ 		ret = -EADDRINUSE;
+@@ -709,6 +711,7 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
+ 	llcp_sock->ssap = nfc_llcp_get_local_ssap(local);
+ 	if (llcp_sock->ssap == LLCP_SAP_MAX) {
+ 		nfc_llcp_local_put(llcp_sock->local);
++		llcp_sock->local = NULL;
+ 		ret = -ENOMEM;
+ 		goto put_dev;
+ 	}
+@@ -756,6 +759,7 @@ sock_unlink:
+ sock_llcp_release:
+ 	nfc_llcp_put_ssap(local, llcp_sock->ssap);
+ 	nfc_llcp_local_put(llcp_sock->local);
++	llcp_sock->local = NULL;
+ 
+ put_dev:
+ 	nfc_put_device(dev);
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index e24b2841c643a..9611e41c7b8be 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -1359,7 +1359,7 @@ static unsigned int fanout_demux_rollover(struct packet_fanout *f,
+ 	struct packet_sock *po, *po_next, *po_skip = NULL;
+ 	unsigned int i, j, room = ROOM_NONE;
+ 
+-	po = pkt_sk(f->arr[idx]);
++	po = pkt_sk(rcu_dereference(f->arr[idx]));
+ 
+ 	if (try_self) {
+ 		room = packet_rcv_has_room(po, skb);
+@@ -1371,7 +1371,7 @@ static unsigned int fanout_demux_rollover(struct packet_fanout *f,
+ 
+ 	i = j = min_t(int, po->rollover->sock, num - 1);
+ 	do {
+-		po_next = pkt_sk(f->arr[i]);
++		po_next = pkt_sk(rcu_dereference(f->arr[i]));
+ 		if (po_next != po_skip && !READ_ONCE(po_next->pressure) &&
+ 		    packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
+ 			if (i != j)
+@@ -1466,7 +1466,7 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
+ 	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
+ 		idx = fanout_demux_rollover(f, skb, idx, true, num);
+ 
+-	po = pkt_sk(f->arr[idx]);
++	po = pkt_sk(rcu_dereference(f->arr[idx]));
+ 	return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
+ }
+ 
+@@ -1480,7 +1480,7 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po)
+ 	struct packet_fanout *f = po->fanout;
+ 
+ 	spin_lock(&f->lock);
+-	f->arr[f->num_members] = sk;
++	rcu_assign_pointer(f->arr[f->num_members], sk);
+ 	smp_wmb();
+ 	f->num_members++;
+ 	if (f->num_members == 1)
+@@ -1495,11 +1495,14 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
+ 
+ 	spin_lock(&f->lock);
+ 	for (i = 0; i < f->num_members; i++) {
+-		if (f->arr[i] == sk)
++		if (rcu_dereference_protected(f->arr[i],
++					      lockdep_is_held(&f->lock)) == sk)
+ 			break;
+ 	}
+ 	BUG_ON(i >= f->num_members);
+-	f->arr[i] = f->arr[f->num_members - 1];
++	rcu_assign_pointer(f->arr[i],
++			   rcu_dereference_protected(f->arr[f->num_members - 1],
++						     lockdep_is_held(&f->lock)));
+ 	f->num_members--;
+ 	if (f->num_members == 0)
+ 		__dev_remove_pack(&f->prot_hook);
+diff --git a/net/packet/internal.h b/net/packet/internal.h
+index 5f61e59ebbffa..48af35b1aed25 100644
+--- a/net/packet/internal.h
++++ b/net/packet/internal.h
+@@ -94,7 +94,7 @@ struct packet_fanout {
+ 	spinlock_t		lock;
+ 	refcount_t		sk_ref;
+ 	struct packet_type	prot_hook ____cacheline_aligned_in_smp;
+-	struct sock		*arr[];
++	struct sock	__rcu	*arr[];
+ };
+ 
+ struct packet_rollover {
+diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
+index 16e888a9601dd..48fdf7293deaa 100644
+--- a/net/sched/act_ct.c
++++ b/net/sched/act_ct.c
+@@ -732,7 +732,8 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
+ #endif
+ 	}
+ 
+-	*qdisc_skb_cb(skb) = cb;
++	if (err != -EINPROGRESS)
++		*qdisc_skb_cb(skb) = cb;
+ 	skb_clear_hash(skb);
+ 	skb->ignore_df = 1;
+ 	return err;
+@@ -967,7 +968,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
+ 	err = tcf_ct_handle_fragments(net, skb, family, p->zone, &defrag);
+ 	if (err == -EINPROGRESS) {
+ 		retval = TC_ACT_STOLEN;
+-		goto out;
++		goto out_clear;
+ 	}
+ 	if (err)
+ 		goto drop;
+@@ -1030,7 +1031,6 @@ do_nat:
+ out_push:
+ 	skb_push_rcsum(skb, nh_ofs);
+ 
+-out:
+ 	qdisc_skb_cb(skb)->post_ct = true;
+ out_clear:
+ 	tcf_action_update_bstats(&c->common, skb);
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index b9b3d899a611c..4ae428f2f2c57 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -357,6 +357,18 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
+ 	return af;
+ }
+ 
++static void sctp_auto_asconf_init(struct sctp_sock *sp)
++{
++	struct net *net = sock_net(&sp->inet.sk);
++
++	if (net->sctp.default_auto_asconf) {
++		spin_lock(&net->sctp.addr_wq_lock);
++		list_add_tail(&sp->auto_asconf_list, &net->sctp.auto_asconf_splist);
++		spin_unlock(&net->sctp.addr_wq_lock);
++		sp->do_auto_asconf = 1;
++	}
++}
++
+ /* Bind a local address either to an endpoint or to an association.  */
+ static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
+ {
+@@ -418,8 +430,10 @@ static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
+ 		return -EADDRINUSE;
+ 
+ 	/* Refresh ephemeral port.  */
+-	if (!bp->port)
++	if (!bp->port) {
+ 		bp->port = inet_sk(sk)->inet_num;
++		sctp_auto_asconf_init(sp);
++	}
+ 
+ 	/* Add the address to the bind address list.
+ 	 * Use GFP_ATOMIC since BHs will be disabled.
+@@ -1520,9 +1534,11 @@ static void sctp_close(struct sock *sk, long timeout)
+ 
+ 	/* Supposedly, no process has access to the socket, but
+ 	 * the net layers still may.
++	 * Also, sctp_destroy_sock() needs to be called with addr_wq_lock
++	 * held and that should be grabbed before socket lock.
+ 	 */
+-	local_bh_disable();
+-	bh_lock_sock(sk);
++	spin_lock_bh(&net->sctp.addr_wq_lock);
++	bh_lock_sock_nested(sk);
+ 
+ 	/* Hold the sock, since sk_common_release() will put sock_put()
+ 	 * and we have just a little more cleanup.
+@@ -1531,7 +1547,7 @@ static void sctp_close(struct sock *sk, long timeout)
+ 	sk_common_release(sk);
+ 
+ 	bh_unlock_sock(sk);
+-	local_bh_enable();
++	spin_unlock_bh(&net->sctp.addr_wq_lock);
+ 
+ 	sock_put(sk);
+ 
+@@ -4991,16 +5007,6 @@ static int sctp_init_sock(struct sock *sk)
+ 	sk_sockets_allocated_inc(sk);
+ 	sock_prot_inuse_add(net, sk->sk_prot, 1);
+ 
+-	if (net->sctp.default_auto_asconf) {
+-		spin_lock(&sock_net(sk)->sctp.addr_wq_lock);
+-		list_add_tail(&sp->auto_asconf_list,
+-		    &net->sctp.auto_asconf_splist);
+-		sp->do_auto_asconf = 1;
+-		spin_unlock(&sock_net(sk)->sctp.addr_wq_lock);
+-	} else {
+-		sp->do_auto_asconf = 0;
+-	}
+-
+ 	local_bh_enable();
+ 
+ 	return 0;
+@@ -5025,9 +5031,7 @@ static void sctp_destroy_sock(struct sock *sk)
+ 
+ 	if (sp->do_auto_asconf) {
+ 		sp->do_auto_asconf = 0;
+-		spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock);
+ 		list_del(&sp->auto_asconf_list);
+-		spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock);
+ 	}
+ 	sctp_endpoint_free(sp->ep);
+ 	local_bh_disable();
+@@ -9398,6 +9402,8 @@ static int sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
+ 			return err;
+ 	}
+ 
++	sctp_auto_asconf_init(newsp);
++
+ 	/* Move any messages in the old socket's receive queue that are for the
+ 	 * peeled off association to the new socket's receive queue.
+ 	 */
+diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
+index 97710ce36047c..c89ce47c56cf2 100644
+--- a/net/tipc/crypto.c
++++ b/net/tipc/crypto.c
+@@ -1492,6 +1492,8 @@ int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net,
+ 	/* Allocate statistic structure */
+ 	c->stats = alloc_percpu_gfp(struct tipc_crypto_stats, GFP_ATOMIC);
+ 	if (!c->stats) {
++		if (c->wq)
++			destroy_workqueue(c->wq);
+ 		kfree_sensitive(c);
+ 		return -ENOMEM;
+ 	}
+diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
+index e4370b1b74947..902cb6dd710bd 100644
+--- a/net/vmw_vsock/virtio_transport_common.c
++++ b/net/vmw_vsock/virtio_transport_common.c
+@@ -733,6 +733,23 @@ static int virtio_transport_reset_no_sock(const struct virtio_transport *t,
+ 	return t->send_pkt(reply);
+ }
+ 
++/* This function should be called with sk_lock held and SOCK_DONE set */
++static void virtio_transport_remove_sock(struct vsock_sock *vsk)
++{
++	struct virtio_vsock_sock *vvs = vsk->trans;
++	struct virtio_vsock_pkt *pkt, *tmp;
++
++	/* We don't need to take rx_lock, as the socket is closing and we are
++	 * removing it.
++	 */
++	list_for_each_entry_safe(pkt, tmp, &vvs->rx_queue, list) {
++		list_del(&pkt->list);
++		virtio_transport_free_pkt(pkt);
++	}
++
++	vsock_remove_sock(vsk);
++}
++
+ static void virtio_transport_wait_close(struct sock *sk, long timeout)
+ {
+ 	if (timeout) {
+@@ -765,7 +782,7 @@ static void virtio_transport_do_close(struct vsock_sock *vsk,
+ 	    (!cancel_timeout || cancel_delayed_work(&vsk->close_work))) {
+ 		vsk->close_work_scheduled = false;
+ 
+-		vsock_remove_sock(vsk);
++		virtio_transport_remove_sock(vsk);
+ 
+ 		/* Release refcnt obtained when we scheduled the timeout */
+ 		sock_put(sk);
+@@ -828,22 +845,15 @@ static bool virtio_transport_close(struct vsock_sock *vsk)
+ 
+ void virtio_transport_release(struct vsock_sock *vsk)
+ {
+-	struct virtio_vsock_sock *vvs = vsk->trans;
+-	struct virtio_vsock_pkt *pkt, *tmp;
+ 	struct sock *sk = &vsk->sk;
+ 	bool remove_sock = true;
+ 
+ 	if (sk->sk_type == SOCK_STREAM)
+ 		remove_sock = virtio_transport_close(vsk);
+ 
+-	list_for_each_entry_safe(pkt, tmp, &vvs->rx_queue, list) {
+-		list_del(&pkt->list);
+-		virtio_transport_free_pkt(pkt);
+-	}
+-
+ 	if (remove_sock) {
+ 		sock_set_flag(sk, SOCK_DONE);
+-		vsock_remove_sock(vsk);
++		virtio_transport_remove_sock(vsk);
+ 	}
+ }
+ EXPORT_SYMBOL_GPL(virtio_transport_release);
+diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
+index 8b65323207db5..1c9ecb18b8e64 100644
+--- a/net/vmw_vsock/vmci_transport.c
++++ b/net/vmw_vsock/vmci_transport.c
+@@ -568,8 +568,7 @@ vmci_transport_queue_pair_alloc(struct vmci_qp **qpair,
+ 			       peer, flags, VMCI_NO_PRIVILEGE_FLAGS);
+ out:
+ 	if (err < 0) {
+-		pr_err("Could not attach to queue pair with %d\n",
+-		       err);
++		pr_err_once("Could not attach to queue pair with %d\n", err);
+ 		err = vmci_transport_error_to_vsock_error(err);
+ 	}
+ 
+diff --git a/net/wireless/scan.c b/net/wireless/scan.c
+index 758eb7d2a7068..caa8eafbd5834 100644
+--- a/net/wireless/scan.c
++++ b/net/wireless/scan.c
+@@ -1751,6 +1751,8 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
+ 
+ 		if (rdev->bss_entries >= bss_entries_limit &&
+ 		    !cfg80211_bss_expire_oldest(rdev)) {
++			if (!list_empty(&new->hidden_list))
++				list_del(&new->hidden_list);
+ 			kfree(new);
+ 			goto drop;
+ 		}
+diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
+index 4faabd1ecfd1b..143979ea41657 100644
+--- a/net/xdp/xsk.c
++++ b/net/xdp/xsk.c
+@@ -454,12 +454,16 @@ static int xsk_generic_xmit(struct sock *sk)
+ 	struct sk_buff *skb;
+ 	unsigned long flags;
+ 	int err = 0;
++	u32 hr, tr;
+ 
+ 	mutex_lock(&xs->mutex);
+ 
+ 	if (xs->queue_id >= xs->dev->real_num_tx_queues)
+ 		goto out;
+ 
++	hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom));
++	tr = xs->dev->needed_tailroom;
++
+ 	while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
+ 		char *buffer;
+ 		u64 addr;
+@@ -471,11 +475,13 @@ static int xsk_generic_xmit(struct sock *sk)
+ 		}
+ 
+ 		len = desc.len;
+-		skb = sock_alloc_send_skb(sk, len, 1, &err);
++		skb = sock_alloc_send_skb(sk, hr + len + tr, 1, &err);
+ 		if (unlikely(!skb))
+ 			goto out;
+ 
++		skb_reserve(skb, hr);
+ 		skb_put(skb, len);
++
+ 		addr = desc.addr;
+ 		buffer = xsk_buff_raw_get_data(xs->pool, addr);
+ 		err = skb_store_bits(skb, 0, buffer, len);
+diff --git a/samples/kfifo/bytestream-example.c b/samples/kfifo/bytestream-example.c
+index c406f03ee5519..5a90aa5278775 100644
+--- a/samples/kfifo/bytestream-example.c
++++ b/samples/kfifo/bytestream-example.c
+@@ -122,8 +122,10 @@ static ssize_t fifo_write(struct file *file, const char __user *buf,
+ 	ret = kfifo_from_user(&test, buf, count, &copied);
+ 
+ 	mutex_unlock(&write_lock);
++	if (ret)
++		return ret;
+ 
+-	return ret ? ret : copied;
++	return copied;
+ }
+ 
+ static ssize_t fifo_read(struct file *file, char __user *buf,
+@@ -138,8 +140,10 @@ static ssize_t fifo_read(struct file *file, char __user *buf,
+ 	ret = kfifo_to_user(&test, buf, count, &copied);
+ 
+ 	mutex_unlock(&read_lock);
++	if (ret)
++		return ret;
+ 
+-	return ret ? ret : copied;
++	return copied;
+ }
+ 
+ static const struct proc_ops fifo_proc_ops = {
+diff --git a/samples/kfifo/inttype-example.c b/samples/kfifo/inttype-example.c
+index 78977fc4a23f7..e5403d8c971a5 100644
+--- a/samples/kfifo/inttype-example.c
++++ b/samples/kfifo/inttype-example.c
+@@ -115,8 +115,10 @@ static ssize_t fifo_write(struct file *file, const char __user *buf,
+ 	ret = kfifo_from_user(&test, buf, count, &copied);
+ 
+ 	mutex_unlock(&write_lock);
++	if (ret)
++		return ret;
+ 
+-	return ret ? ret : copied;
++	return copied;
+ }
+ 
+ static ssize_t fifo_read(struct file *file, char __user *buf,
+@@ -131,8 +133,10 @@ static ssize_t fifo_read(struct file *file, char __user *buf,
+ 	ret = kfifo_to_user(&test, buf, count, &copied);
+ 
+ 	mutex_unlock(&read_lock);
++	if (ret)
++		return ret;
+ 
+-	return ret ? ret : copied;
++	return copied;
+ }
+ 
+ static const struct proc_ops fifo_proc_ops = {
+diff --git a/samples/kfifo/record-example.c b/samples/kfifo/record-example.c
+index c507998a2617c..f64f3d62d6c2a 100644
+--- a/samples/kfifo/record-example.c
++++ b/samples/kfifo/record-example.c
+@@ -129,8 +129,10 @@ static ssize_t fifo_write(struct file *file, const char __user *buf,
+ 	ret = kfifo_from_user(&test, buf, count, &copied);
+ 
+ 	mutex_unlock(&write_lock);
++	if (ret)
++		return ret;
+ 
+-	return ret ? ret : copied;
++	return copied;
+ }
+ 
+ static ssize_t fifo_read(struct file *file, char __user *buf,
+@@ -145,8 +147,10 @@ static ssize_t fifo_read(struct file *file, char __user *buf,
+ 	ret = kfifo_to_user(&test, buf, count, &copied);
+ 
+ 	mutex_unlock(&read_lock);
++	if (ret)
++		return ret;
+ 
+-	return ret ? ret : copied;
++	return copied;
+ }
+ 
+ static const struct proc_ops fifo_proc_ops = {
+diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c
+index e22e510ae92d4..4e081e6500476 100644
+--- a/security/integrity/ima/ima_template.c
++++ b/security/integrity/ima/ima_template.c
+@@ -494,8 +494,8 @@ int ima_restore_measurement_list(loff_t size, void *buf)
+ 			}
+ 		}
+ 
+-		entry->pcr = !ima_canonical_fmt ? *(hdr[HDR_PCR].data) :
+-			     le32_to_cpu(*(hdr[HDR_PCR].data));
++		entry->pcr = !ima_canonical_fmt ? *(u32 *)(hdr[HDR_PCR].data) :
++			     le32_to_cpu(*(u32 *)(hdr[HDR_PCR].data));
+ 		ret = ima_restore_measurement_entry(entry);
+ 		if (ret < 0)
+ 			break;
+diff --git a/security/keys/trusted-keys/trusted_tpm1.c b/security/keys/trusted-keys/trusted_tpm1.c
+index 493eb91ed017f..1e13c9f7ea8c1 100644
+--- a/security/keys/trusted-keys/trusted_tpm1.c
++++ b/security/keys/trusted-keys/trusted_tpm1.c
+@@ -791,13 +791,33 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
+ 				return -EINVAL;
+ 			break;
+ 		case Opt_blobauth:
+-			if (strlen(args[0].from) != 2 * SHA1_DIGEST_SIZE)
+-				return -EINVAL;
+-			res = hex2bin(opt->blobauth, args[0].from,
+-				      SHA1_DIGEST_SIZE);
+-			if (res < 0)
+-				return -EINVAL;
++			/*
++			 * TPM 1.2 authorizations are sha1 hashes passed in as
++			 * hex strings.  TPM 2.0 authorizations are simple
++			 * passwords (although it can take a hash as well)
++			 */
++			opt->blobauth_len = strlen(args[0].from);
++
++			if (opt->blobauth_len == 2 * TPM_DIGEST_SIZE) {
++				res = hex2bin(opt->blobauth, args[0].from,
++					      TPM_DIGEST_SIZE);
++				if (res < 0)
++					return -EINVAL;
++
++				opt->blobauth_len = TPM_DIGEST_SIZE;
++				break;
++			}
++
++			if (tpm2 && opt->blobauth_len <= sizeof(opt->blobauth)) {
++				memcpy(opt->blobauth, args[0].from,
++				       opt->blobauth_len);
++				break;
++			}
++
++			return -EINVAL;
++
+ 			break;
++
+ 		case Opt_migratable:
+ 			if (*args[0].from == '0')
+ 				pay->migratable = 0;
+diff --git a/security/keys/trusted-keys/trusted_tpm2.c b/security/keys/trusted-keys/trusted_tpm2.c
+index c87c4df8703d4..4c19d3abddbee 100644
+--- a/security/keys/trusted-keys/trusted_tpm2.c
++++ b/security/keys/trusted-keys/trusted_tpm2.c
+@@ -97,10 +97,12 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
+ 			     TPM_DIGEST_SIZE);
+ 
+ 	/* sensitive */
+-	tpm_buf_append_u16(&buf, 4 + TPM_DIGEST_SIZE + payload->key_len + 1);
++	tpm_buf_append_u16(&buf, 4 + options->blobauth_len + payload->key_len + 1);
++
++	tpm_buf_append_u16(&buf, options->blobauth_len);
++	if (options->blobauth_len)
++		tpm_buf_append(&buf, options->blobauth, options->blobauth_len);
+ 
+-	tpm_buf_append_u16(&buf, TPM_DIGEST_SIZE);
+-	tpm_buf_append(&buf, options->blobauth, TPM_DIGEST_SIZE);
+ 	tpm_buf_append_u16(&buf, payload->key_len + 1);
+ 	tpm_buf_append(&buf, payload->key, payload->key_len);
+ 	tpm_buf_append_u8(&buf, payload->migratable);
+@@ -265,7 +267,7 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
+ 			     NULL /* nonce */, 0,
+ 			     TPM2_SA_CONTINUE_SESSION,
+ 			     options->blobauth /* hmac */,
+-			     TPM_DIGEST_SIZE);
++			     options->blobauth_len);
+ 
+ 	rc = tpm_transmit_cmd(chip, &buf, 6, "unsealing");
+ 	if (rc > 0)
+diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h
+index ba2e01a6955cb..62d19bccf3de1 100644
+--- a/security/selinux/include/classmap.h
++++ b/security/selinux/include/classmap.h
+@@ -242,11 +242,12 @@ struct security_class_mapping secclass_map[] = {
+ 	{ "infiniband_endport",
+ 	  { "manage_subnet", NULL } },
+ 	{ "bpf",
+-	  {"map_create", "map_read", "map_write", "prog_load", "prog_run"} },
++	  { "map_create", "map_read", "map_write", "prog_load", "prog_run",
++	    NULL } },
+ 	{ "xdp_socket",
+ 	  { COMMON_SOCK_PERMS, NULL } },
+ 	{ "perf_event",
+-	  {"open", "cpu", "kernel", "tracepoint", "read", "write"} },
++	  { "open", "cpu", "kernel", "tracepoint", "read", "write", NULL } },
+ 	{ "lockdown",
+ 	  { "integrity", "confidentiality", NULL } },
+ 	{ "anon_inode",
+diff --git a/sound/core/init.c b/sound/core/init.c
+index 45f4b01de23fb..ef41f5b3a240d 100644
+--- a/sound/core/init.c
++++ b/sound/core/init.c
+@@ -398,10 +398,8 @@ int snd_card_disconnect(struct snd_card *card)
+ 		return 0;
+ 	}
+ 	card->shutdown = 1;
+-	spin_unlock(&card->files_lock);
+ 
+ 	/* replace file->f_op with special dummy operations */
+-	spin_lock(&card->files_lock);
+ 	list_for_each_entry(mfile, &card->files_list, list) {
+ 		/* it's critical part, use endless loop */
+ 		/* we have no room to fail */
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index d05d16ddbdf2c..8ec57bd351dfe 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -2470,13 +2470,13 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ 		      ALC882_FIXUP_ACER_ASPIRE_8930G),
+ 	SND_PCI_QUIRK(0x1025, 0x0146, "Acer Aspire 6935G",
+ 		      ALC882_FIXUP_ACER_ASPIRE_8930G),
++	SND_PCI_QUIRK(0x1025, 0x0142, "Acer Aspire 7730G",
++		      ALC882_FIXUP_ACER_ASPIRE_4930G),
++	SND_PCI_QUIRK(0x1025, 0x0155, "Packard-Bell M5120", ALC882_FIXUP_PB_M5210),
+ 	SND_PCI_QUIRK(0x1025, 0x015e, "Acer Aspire 6930G",
+ 		      ALC882_FIXUP_ACER_ASPIRE_4930G),
+ 	SND_PCI_QUIRK(0x1025, 0x0166, "Acer Aspire 6530G",
+ 		      ALC882_FIXUP_ACER_ASPIRE_4930G),
+-	SND_PCI_QUIRK(0x1025, 0x0142, "Acer Aspire 7730G",
+-		      ALC882_FIXUP_ACER_ASPIRE_4930G),
+-	SND_PCI_QUIRK(0x1025, 0x0155, "Packard-Bell M5120", ALC882_FIXUP_PB_M5210),
+ 	SND_PCI_QUIRK(0x1025, 0x021e, "Acer Aspire 5739G",
+ 		      ALC882_FIXUP_ACER_ASPIRE_4930G),
+ 	SND_PCI_QUIRK(0x1025, 0x0259, "Acer Aspire 5935", ALC889_FIXUP_DAC_ROUTE),
+@@ -2489,11 +2489,11 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601),
+ 	SND_PCI_QUIRK(0x1043, 0x84bc, "ASUS ET2700", ALC887_FIXUP_ASUS_BASS),
+ 	SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3),
++	SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
++	SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP),
+ 	SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
+ 	SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
+ 	SND_PCI_QUIRK(0x104d, 0x9060, "Sony Vaio VPCL14M1R", ALC882_FIXUP_NO_PRIMARY_HP),
+-	SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
+-	SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP),
+ 
+ 	/* All Apple entries are in codec SSIDs */
+ 	SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF),
+@@ -2536,9 +2536,19 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
+ 	SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
+ 	SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
++	SND_PCI_QUIRK(0x1558, 0x50d3, "Clevo PC50[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
++	SND_PCI_QUIRK(0x1558, 0x65d1, "Clevo PB51[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
++	SND_PCI_QUIRK(0x1558, 0x65d2, "Clevo PB51R[CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
++	SND_PCI_QUIRK(0x1558, 0x65e1, "Clevo PB51[ED][DF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
++	SND_PCI_QUIRK(0x1558, 0x65e5, "Clevo PC50D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
++	SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
++	SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
++	SND_PCI_QUIRK(0x1558, 0x67e5, "Clevo PC70D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
++	SND_PCI_QUIRK(0x1558, 0x70d1, "Clevo PC70[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
++	SND_PCI_QUIRK(0x1558, 0x7714, "Clevo X170", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
+ 	SND_PCI_QUIRK(0x1558, 0x9506, "Clevo P955HQ", ALC1220_FIXUP_CLEVO_P950),
+-	SND_PCI_QUIRK(0x1558, 0x950A, "Clevo P955H[PR]", ALC1220_FIXUP_CLEVO_P950),
++	SND_PCI_QUIRK(0x1558, 0x950a, "Clevo P955H[PR]", ALC1220_FIXUP_CLEVO_P950),
+ 	SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950),
+ 	SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950),
+ 	SND_PCI_QUIRK(0x1558, 0x95e3, "Clevo P955[ER]T", ALC1220_FIXUP_CLEVO_P950),
+@@ -2548,16 +2558,6 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1558, 0x96e1, "Clevo P960[ER][CDFN]-K", ALC1220_FIXUP_CLEVO_P950),
+ 	SND_PCI_QUIRK(0x1558, 0x97e1, "Clevo P970[ER][CDFN]", ALC1220_FIXUP_CLEVO_P950),
+ 	SND_PCI_QUIRK(0x1558, 0x97e2, "Clevo P970RC-M", ALC1220_FIXUP_CLEVO_P950),
+-	SND_PCI_QUIRK(0x1558, 0x50d3, "Clevo PC50[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+-	SND_PCI_QUIRK(0x1558, 0x65d1, "Clevo PB51[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+-	SND_PCI_QUIRK(0x1558, 0x65d2, "Clevo PB51R[CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+-	SND_PCI_QUIRK(0x1558, 0x65e1, "Clevo PB51[ED][DF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+-	SND_PCI_QUIRK(0x1558, 0x65e5, "Clevo PC50D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+-	SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+-	SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+-	SND_PCI_QUIRK(0x1558, 0x67e5, "Clevo PC70D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+-	SND_PCI_QUIRK(0x1558, 0x70d1, "Clevo PC70[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+-	SND_PCI_QUIRK(0x1558, 0x7714, "Clevo X170", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
+ 	SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
+ 	SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530),
+@@ -4331,6 +4331,35 @@ static void alc245_fixup_hp_x360_amp(struct hda_codec *codec,
+ 	}
+ }
+ 
++/* toggle GPIO2 at each time stream is started; we use PREPARE state instead */
++static void alc274_hp_envy_pcm_hook(struct hda_pcm_stream *hinfo,
++				    struct hda_codec *codec,
++				    struct snd_pcm_substream *substream,
++				    int action)
++{
++	switch (action) {
++	case HDA_GEN_PCM_ACT_PREPARE:
++		alc_update_gpio_data(codec, 0x04, true);
++		break;
++	case HDA_GEN_PCM_ACT_CLEANUP:
++		alc_update_gpio_data(codec, 0x04, false);
++		break;
++	}
++}
++
++static void alc274_fixup_hp_envy_gpio(struct hda_codec *codec,
++				      const struct hda_fixup *fix,
++				      int action)
++{
++	struct alc_spec *spec = codec->spec;
++
++	if (action == HDA_FIXUP_ACT_PROBE) {
++		spec->gpio_mask |= 0x04;
++		spec->gpio_dir |= 0x04;
++		spec->gen.pcm_playback_hook = alc274_hp_envy_pcm_hook;
++	}
++}
++
+ static void alc_update_coef_led(struct hda_codec *codec,
+ 				struct alc_coef_led *led,
+ 				bool polarity, bool on)
+@@ -6443,6 +6472,7 @@ enum {
+ 	ALC255_FIXUP_XIAOMI_HEADSET_MIC,
+ 	ALC274_FIXUP_HP_MIC,
+ 	ALC274_FIXUP_HP_HEADSET_MIC,
++	ALC274_FIXUP_HP_ENVY_GPIO,
+ 	ALC256_FIXUP_ASUS_HPE,
+ 	ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK,
+ 	ALC287_FIXUP_HP_GPIO_LED,
+@@ -7882,6 +7912,10 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC274_FIXUP_HP_MIC
+ 	},
++	[ALC274_FIXUP_HP_ENVY_GPIO] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc274_fixup_hp_envy_gpio,
++	},
+ 	[ALC256_FIXUP_ASUS_HPE] = {
+ 		.type = HDA_FIXUP_VERBS,
+ 		.v.verbs = (const struct hda_verb[]) {
+@@ -7947,12 +7981,12 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1025, 0x0349, "Acer AOD260", ALC269_FIXUP_INV_DMIC),
+ 	SND_PCI_QUIRK(0x1025, 0x047c, "Acer AC700", ALC269_FIXUP_ACER_AC700),
+ 	SND_PCI_QUIRK(0x1025, 0x072d, "Acer Aspire V5-571G", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
+-	SND_PCI_QUIRK(0x1025, 0x080d, "Acer Aspire V5-122P", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK),
+ 	SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK),
+ 	SND_PCI_QUIRK(0x1025, 0x0762, "Acer Aspire E1-472", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
+ 	SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
+ 	SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
++	SND_PCI_QUIRK(0x1025, 0x080d, "Acer Aspire V5-122P", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1025, 0x0840, "Acer Aspire E1", ALC269VB_FIXUP_ASPIRE_E1_COEF),
+ 	SND_PCI_QUIRK(0x1025, 0x101c, "Acer Veriton N2510G", ALC269_FIXUP_LIFEBOOK),
+ 	SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
+@@ -8008,8 +8042,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1028, 0x0738, "Dell Precision 5820", ALC269_FIXUP_NO_SHUTUP),
+ 	SND_PCI_QUIRK(0x1028, 0x075c, "Dell XPS 27 7760", ALC298_FIXUP_SPK_VOLUME),
+ 	SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
+-	SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
+ 	SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
++	SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
+ 	SND_PCI_QUIRK(0x1028, 0x080c, "Dell WYSE", ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x084b, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
+ 	SND_PCI_QUIRK(0x1028, 0x084e, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
+@@ -8019,8 +8053,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1028, 0x08ad, "Dell WYSE AIO", ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x08ae, "Dell WYSE NB", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
+-	SND_PCI_QUIRK(0x1028, 0x097e, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
+ 	SND_PCI_QUIRK(0x1028, 0x097d, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
++	SND_PCI_QUIRK(0x1028, 0x097e, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
+ 	SND_PCI_QUIRK(0x1028, 0x098d, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x09bf, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x0a2e, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC),
+@@ -8031,35 +8065,18 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
+ 	SND_PCI_QUIRK(0x103c, 0x18e6, "HP", ALC269_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x218b, "HP", ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED),
+-	SND_PCI_QUIRK(0x103c, 0x225f, "HP", ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY),
+-	/* ALC282 */
+ 	SND_PCI_QUIRK(0x103c, 0x21f9, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ 	SND_PCI_QUIRK(0x103c, 0x2210, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ 	SND_PCI_QUIRK(0x103c, 0x2214, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
++	SND_PCI_QUIRK(0x103c, 0x221b, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
++	SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
++	SND_PCI_QUIRK(0x103c, 0x2221, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
++	SND_PCI_QUIRK(0x103c, 0x2225, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x2236, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x2237, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x2238, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x2239, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x224b, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
+-	SND_PCI_QUIRK(0x103c, 0x2268, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+-	SND_PCI_QUIRK(0x103c, 0x226a, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+-	SND_PCI_QUIRK(0x103c, 0x226b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+-	SND_PCI_QUIRK(0x103c, 0x226e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+-	SND_PCI_QUIRK(0x103c, 0x2271, "HP", ALC286_FIXUP_HP_GPIO_LED),
+-	SND_PCI_QUIRK(0x103c, 0x2272, "HP", ALC280_FIXUP_HP_DOCK_PINS),
+-	SND_PCI_QUIRK(0x103c, 0x2273, "HP", ALC280_FIXUP_HP_DOCK_PINS),
+-	SND_PCI_QUIRK(0x103c, 0x229e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+-	SND_PCI_QUIRK(0x103c, 0x22b2, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+-	SND_PCI_QUIRK(0x103c, 0x22b7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+-	SND_PCI_QUIRK(0x103c, 0x22bf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+-	SND_PCI_QUIRK(0x103c, 0x22cf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+-	SND_PCI_QUIRK(0x103c, 0x22db, "HP", ALC280_FIXUP_HP_9480M),
+-	SND_PCI_QUIRK(0x103c, 0x22dc, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+-	SND_PCI_QUIRK(0x103c, 0x22fb, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+-	/* ALC290 */
+-	SND_PCI_QUIRK(0x103c, 0x221b, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+-	SND_PCI_QUIRK(0x103c, 0x2221, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+-	SND_PCI_QUIRK(0x103c, 0x2225, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x2253, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x2254, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x2255, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+@@ -8067,26 +8084,41 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x2257, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x2259, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x225a, "HP", ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED),
++	SND_PCI_QUIRK(0x103c, 0x225f, "HP", ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY),
+ 	SND_PCI_QUIRK(0x103c, 0x2260, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ 	SND_PCI_QUIRK(0x103c, 0x2263, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ 	SND_PCI_QUIRK(0x103c, 0x2264, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ 	SND_PCI_QUIRK(0x103c, 0x2265, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
++	SND_PCI_QUIRK(0x103c, 0x2268, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
++	SND_PCI_QUIRK(0x103c, 0x226a, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
++	SND_PCI_QUIRK(0x103c, 0x226b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
++	SND_PCI_QUIRK(0x103c, 0x226e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
++	SND_PCI_QUIRK(0x103c, 0x2271, "HP", ALC286_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x2272, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
++	SND_PCI_QUIRK(0x103c, 0x2272, "HP", ALC280_FIXUP_HP_DOCK_PINS),
+ 	SND_PCI_QUIRK(0x103c, 0x2273, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
++	SND_PCI_QUIRK(0x103c, 0x2273, "HP", ALC280_FIXUP_HP_DOCK_PINS),
+ 	SND_PCI_QUIRK(0x103c, 0x2278, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x227f, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ 	SND_PCI_QUIRK(0x103c, 0x2282, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ 	SND_PCI_QUIRK(0x103c, 0x228b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ 	SND_PCI_QUIRK(0x103c, 0x228e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
++	SND_PCI_QUIRK(0x103c, 0x229e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
++	SND_PCI_QUIRK(0x103c, 0x22b2, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
++	SND_PCI_QUIRK(0x103c, 0x22b7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
++	SND_PCI_QUIRK(0x103c, 0x22bf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
++	SND_PCI_QUIRK(0x103c, 0x22c4, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ 	SND_PCI_QUIRK(0x103c, 0x22c5, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ 	SND_PCI_QUIRK(0x103c, 0x22c7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ 	SND_PCI_QUIRK(0x103c, 0x22c8, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+-	SND_PCI_QUIRK(0x103c, 0x22c4, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
++	SND_PCI_QUIRK(0x103c, 0x22cf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
++	SND_PCI_QUIRK(0x103c, 0x22db, "HP", ALC280_FIXUP_HP_9480M),
++	SND_PCI_QUIRK(0x103c, 0x22dc, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
++	SND_PCI_QUIRK(0x103c, 0x22fb, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x2334, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ 	SND_PCI_QUIRK(0x103c, 0x2335, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ 	SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ 	SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+-	SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x103c, 0x802e, "HP Z240 SFF", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x802f, "HP Z240", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x8077, "HP", ALC256_FIXUP_HP_HEADSET_MIC),
+@@ -8101,6 +8133,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ 	SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ 	SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
++	SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO),
+ 	SND_PCI_QUIRK(0x103c, 0x8724, "HP EliteBook 850 G7", ALC285_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8730, "HP ProBook 445 G7", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+@@ -8128,16 +8161,18 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1043, 0x125e, "ASUS Q524UQK", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1043, 0x1271, "ASUS X430UN", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1043, 0x1290, "ASUS X441SA", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1043, 0x12a0, "ASUS X441UV", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
+-	SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC),
++	SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
+ 	SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
+ 	SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
+ 	SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK),
++	SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
+ 	SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x194e, "ASUS UX563FD", ALC294_FIXUP_ASUS_HPE),
+@@ -8150,32 +8185,31 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+-	SND_PCI_QUIRK(0x1043, 0x125e, "ASUS Q524UQK", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
+ 	SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
+ 	SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401),
+ 	SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401),
+-	SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
+ 	SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
+ 	SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
+ 	SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC),
+ 	SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
+ 	SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
+ 	SND_PCI_QUIRK(0x1043, 0x8516, "ASUS X101CH", ALC269_FIXUP_ASUS_X101),
+-	SND_PCI_QUIRK(0x104d, 0x90b5, "Sony VAIO Pro 11", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
+-	SND_PCI_QUIRK(0x104d, 0x90b6, "Sony VAIO Pro 13", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x104d, 0x9073, "Sony VAIO", ALC275_FIXUP_SONY_VAIO_GPIO2),
+ 	SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
+ 	SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
+ 	SND_PCI_QUIRK(0x104d, 0x9099, "Sony VAIO S13", ALC275_FIXUP_SONY_DISABLE_AAMIX),
++	SND_PCI_QUIRK(0x104d, 0x90b5, "Sony VAIO Pro 11", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x104d, 0x90b6, "Sony VAIO Pro 13", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK),
+ 	SND_PCI_QUIRK(0x10cf, 0x159f, "Lifebook E780", ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT),
+ 	SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN),
+-	SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN),
+ 	SND_PCI_QUIRK(0x10cf, 0x1629, "Lifebook U7x7", ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC),
++	SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN),
+ 	SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
+ 	SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE),
++	SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x10ec, 0x1230, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
+ 	SND_PCI_QUIRK(0x10ec, 0x1252, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
+ 	SND_PCI_QUIRK(0x10ec, 0x1254, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
+@@ -8185,9 +8219,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x144d, 0xc176, "Samsung Notebook 9 Pro (NP930MBE-K04US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
+ 	SND_PCI_QUIRK(0x144d, 0xc189, "Samsung Galaxy Flex Book (NT950QCG-X716)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
+ 	SND_PCI_QUIRK(0x144d, 0xc18a, "Samsung Galaxy Book Ion (NP930XCJ-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
+-	SND_PCI_QUIRK(0x144d, 0xc830, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
+ 	SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8),
+ 	SND_PCI_QUIRK(0x144d, 0xc812, "Samsung Notebook Pen S (NT950SBE-X58)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
++	SND_PCI_QUIRK(0x144d, 0xc830, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
+ 	SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
+@@ -8243,9 +8277,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
+ 	SND_PCI_QUIRK(0x17aa, 0x21ca, "Thinkpad L412", ALC269_FIXUP_SKU_IGNORE),
+ 	SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE),
++	SND_PCI_QUIRK(0x17aa, 0x21f3, "Thinkpad T430", ALC269_FIXUP_LENOVO_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST),
+ 	SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK),
+-	SND_PCI_QUIRK(0x17aa, 0x21f3, "Thinkpad T430", ALC269_FIXUP_LENOVO_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x2208, "Thinkpad T431s", ALC269_FIXUP_LENOVO_DOCK),
+@@ -8289,6 +8323,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
+ 	SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
++	SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
+ 	SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK),
+@@ -8307,20 +8342,18 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x17aa, 0x511e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+-	SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
+ 	SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
+ 	SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS),
+ 	SND_PCI_QUIRK(0x1b35, 0x1235, "CZC B20", ALC269_FIXUP_CZC_B20),
+ 	SND_PCI_QUIRK(0x1b35, 0x1236, "CZC TMI", ALC269_FIXUP_CZC_TMI),
+ 	SND_PCI_QUIRK(0x1b35, 0x1237, "CZC L101", ALC269_FIXUP_CZC_L101),
+ 	SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
++	SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
++	SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
+ 	SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1d72, 0x1701, "XiaomiNotebook Pro", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1d72, 0x1947, "RedmiBook Air", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
+-	SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
+-	SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
+-	SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
+ 	SND_PCI_QUIRK(0x8086, 0x2074, "Intel NUC 8", ALC233_FIXUP_INTEL_NUC8_DMIC),
+ 	SND_PCI_QUIRK(0x8086, 0x2080, "Intel NUC 8 Rugged", ALC256_FIXUP_INTEL_NUC8_RUGGED),
+ 	SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", ALC256_FIXUP_INTEL_NUC10),
+@@ -8777,6 +8810,16 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ 		{0x19, 0x03a11020},
+ 		{0x21, 0x0321101f}),
+ 	SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
++		{0x12, 0x90a60130},
++		{0x14, 0x90170110},
++		{0x19, 0x04a11040},
++		{0x21, 0x04211020}),
++	SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
++		{0x14, 0x90170110},
++		{0x19, 0x04a11040},
++		{0x1d, 0x40600001},
++		{0x21, 0x04211020}),
++	SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK,
+ 		{0x14, 0x90170110},
+ 		{0x19, 0x04a11040},
+ 		{0x21, 0x04211020}),
+@@ -8947,10 +8990,6 @@ static const struct snd_hda_pin_quirk alc269_fallback_pin_fixup_tbl[] = {
+ 	SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
+ 		{0x19, 0x40000000},
+ 		{0x1a, 0x40000000}),
+-	SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK,
+-		{0x14, 0x90170110},
+-		{0x19, 0x04a11040},
+-		{0x21, 0x04211020}),
+ 	{}
+ };
+ 
+@@ -9266,8 +9305,7 @@ static const struct snd_pci_quirk alc861_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1393, "ASUS A6Rp", ALC861_FIXUP_ASUS_A6RP),
+ 	SND_PCI_QUIRK_VENDOR(0x1043, "ASUS laptop", ALC861_FIXUP_AMP_VREF_0F),
+ 	SND_PCI_QUIRK(0x1462, 0x7254, "HP DX2200", ALC861_FIXUP_NO_JACK_DETECT),
+-	SND_PCI_QUIRK(0x1584, 0x2b01, "Haier W18", ALC861_FIXUP_AMP_VREF_0F),
+-	SND_PCI_QUIRK(0x1584, 0x0000, "Uniwill ECS M31EI", ALC861_FIXUP_AMP_VREF_0F),
++	SND_PCI_QUIRK_VENDOR(0x1584, "Haier/Uniwill", ALC861_FIXUP_AMP_VREF_0F),
+ 	SND_PCI_QUIRK(0x1734, 0x10c7, "FSC Amilo Pi1505", ALC861_FIXUP_FSC_AMILO_PI1505),
+ 	{}
+ };
+@@ -10062,6 +10100,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC),
+ 	SND_PCI_QUIRK(0x1025, 0x034a, "Gateway LT27", ALC662_FIXUP_INV_DMIC),
+ 	SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
++	SND_PCI_QUIRK(0x1025, 0x0566, "Acer Aspire Ethos 8951G", ALC669_FIXUP_ACER_ASPIRE_ETHOS),
+ 	SND_PCI_QUIRK(0x1025, 0x123c, "Acer Nitro N50-600", ALC662_FIXUP_ACER_NITRO_HEADSET_MODE),
+ 	SND_PCI_QUIRK(0x1025, 0x124e, "Acer 2660G", ALC662_FIXUP_ACER_X2660G_HEADSET_MODE),
+ 	SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+@@ -10078,9 +10117,9 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x873e, "HP", ALC671_FIXUP_HP_HEADSET_MIC2),
+ 	SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE),
+ 	SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_ASUS_Nx50),
+-	SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
+ 	SND_PCI_QUIRK(0x1043, 0x129d, "Asus N750", ALC662_FIXUP_ASUS_Nx50),
+ 	SND_PCI_QUIRK(0x1043, 0x12ff, "ASUS G751", ALC668_FIXUP_ASUS_G751),
++	SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
+ 	SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
+ 	SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16),
+ 	SND_PCI_QUIRK(0x1043, 0x177d, "ASUS N551", ALC668_FIXUP_ASUS_Nx51),
+@@ -10100,7 +10139,6 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1b0a, 0x01b8, "ACER Veriton", ALC662_FIXUP_ACER_VERITON),
+ 	SND_PCI_QUIRK(0x1b35, 0x1234, "CZC ET26", ALC662_FIXUP_CZC_ET26),
+ 	SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T),
+-	SND_PCI_QUIRK(0x1025, 0x0566, "Acer Aspire Ethos 8951G", ALC669_FIXUP_ACER_ASPIRE_ETHOS),
+ 
+ #if 0
+ 	/* Below is a quirk table taken from the old code.
+diff --git a/sound/soc/codecs/ak5558.c b/sound/soc/codecs/ak5558.c
+index 85bdd05341803..80b3b162ca5ba 100644
+--- a/sound/soc/codecs/ak5558.c
++++ b/sound/soc/codecs/ak5558.c
+@@ -272,7 +272,7 @@ static void ak5558_power_off(struct ak5558_priv *ak5558)
+ 	if (!ak5558->reset_gpiod)
+ 		return;
+ 
+-	gpiod_set_value_cansleep(ak5558->reset_gpiod, 0);
++	gpiod_set_value_cansleep(ak5558->reset_gpiod, 1);
+ 	usleep_range(1000, 2000);
+ }
+ 
+@@ -281,7 +281,7 @@ static void ak5558_power_on(struct ak5558_priv *ak5558)
+ 	if (!ak5558->reset_gpiod)
+ 		return;
+ 
+-	gpiod_set_value_cansleep(ak5558->reset_gpiod, 1);
++	gpiod_set_value_cansleep(ak5558->reset_gpiod, 0);
+ 	usleep_range(1000, 2000);
+ }
+ 
+diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
+index f04f88c8d4251..b689f26fc4be6 100644
+--- a/sound/soc/codecs/tlv320aic32x4.c
++++ b/sound/soc/codecs/tlv320aic32x4.c
+@@ -577,12 +577,12 @@ static const struct regmap_range_cfg aic32x4_regmap_pages[] = {
+ 		.window_start = 0,
+ 		.window_len = 128,
+ 		.range_min = 0,
+-		.range_max = AIC32X4_RMICPGAVOL,
++		.range_max = AIC32X4_REFPOWERUP,
+ 	},
+ };
+ 
+ const struct regmap_config aic32x4_regmap_config = {
+-	.max_register = AIC32X4_RMICPGAVOL,
++	.max_register = AIC32X4_REFPOWERUP,
+ 	.ranges = aic32x4_regmap_pages,
+ 	.num_ranges = ARRAY_SIZE(aic32x4_regmap_pages),
+ };
+@@ -1243,6 +1243,10 @@ int aic32x4_probe(struct device *dev, struct regmap *regmap)
+ 	if (ret)
+ 		goto err_disable_regulators;
+ 
++	ret = aic32x4_register_clocks(dev, aic32x4->mclk_name);
++	if (ret)
++		goto err_disable_regulators;
++
+ 	ret = devm_snd_soc_register_component(dev,
+ 			&soc_component_dev_aic32x4, &aic32x4_dai, 1);
+ 	if (ret) {
+@@ -1250,10 +1254,6 @@ int aic32x4_probe(struct device *dev, struct regmap *regmap)
+ 		goto err_disable_regulators;
+ 	}
+ 
+-	ret = aic32x4_register_clocks(dev, aic32x4->mclk_name);
+-	if (ret)
+-		goto err_disable_regulators;
+-
+ 	return 0;
+ 
+ err_disable_regulators:
+diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
+index cda9cd935d4f3..9e621a254392c 100644
+--- a/sound/soc/codecs/wm8960.c
++++ b/sound/soc/codecs/wm8960.c
+@@ -608,10 +608,6 @@ static const int bclk_divs[] = {
+  *		- lrclk      = sysclk / dac_divs
+  *		- 10 * bclk  = sysclk / bclk_divs
+  *
+- *	If we cannot find an exact match for (sysclk, lrclk, bclk)
+- *	triplet, we relax the bclk such that bclk is chosen as the
+- *	closest available frequency greater than expected bclk.
+- *
+  * @wm8960: codec private data
+  * @mclk: MCLK used to derive sysclk
+  * @sysclk_idx: sysclk_divs index for found sysclk
+@@ -629,7 +625,7 @@ int wm8960_configure_sysclk(struct wm8960_priv *wm8960, int mclk,
+ {
+ 	int sysclk, bclk, lrclk;
+ 	int i, j, k;
+-	int diff, closest = mclk;
++	int diff;
+ 
+ 	/* marker for no match */
+ 	*bclk_idx = -1;
+@@ -653,12 +649,6 @@ int wm8960_configure_sysclk(struct wm8960_priv *wm8960, int mclk,
+ 					*bclk_idx = k;
+ 					break;
+ 				}
+-				if (diff > 0 && closest > diff) {
+-					*sysclk_idx = i;
+-					*dac_idx = j;
+-					*bclk_idx = k;
+-					closest = diff;
+-				}
+ 			}
+ 			if (k != ARRAY_SIZE(bclk_divs))
+ 				break;
+diff --git a/sound/soc/generic/audio-graph-card.c b/sound/soc/generic/audio-graph-card.c
+index 8c5cdcdc8713c..e81b5cf0d37a9 100644
+--- a/sound/soc/generic/audio-graph-card.c
++++ b/sound/soc/generic/audio-graph-card.c
+@@ -380,7 +380,7 @@ static int graph_dai_link_of(struct asoc_simple_priv *priv,
+ 	struct device_node *top = dev->of_node;
+ 	struct asoc_simple_dai *cpu_dai;
+ 	struct asoc_simple_dai *codec_dai;
+-	int ret, single_cpu;
++	int ret, single_cpu = 0;
+ 
+ 	/* Do it only CPU turn */
+ 	if (!li->cpu)
+diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
+index 75365c7bb3930..d916ec69c24ff 100644
+--- a/sound/soc/generic/simple-card.c
++++ b/sound/soc/generic/simple-card.c
+@@ -258,7 +258,7 @@ static int simple_dai_link_of(struct asoc_simple_priv *priv,
+ 	struct device_node *plat = NULL;
+ 	char prop[128];
+ 	char *prefix = "";
+-	int ret, single_cpu;
++	int ret, single_cpu = 0;
+ 
+ 	/*
+ 	 *	 |CPU   |Codec   : turn
+diff --git a/sound/soc/intel/Makefile b/sound/soc/intel/Makefile
+index 4e0248d2accc7..7c5038803be73 100644
+--- a/sound/soc/intel/Makefile
++++ b/sound/soc/intel/Makefile
+@@ -5,7 +5,7 @@ obj-$(CONFIG_SND_SOC) += common/
+ # Platform Support
+ obj-$(CONFIG_SND_SST_ATOM_HIFI2_PLATFORM) += atom/
+ obj-$(CONFIG_SND_SOC_INTEL_CATPT) += catpt/
+-obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE) += skylake/
++obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE_COMMON) += skylake/
+ obj-$(CONFIG_SND_SOC_INTEL_KEEMBAY) += keembay/
+ 
+ # Machine support
+diff --git a/sound/soc/intel/boards/kbl_da7219_max98927.c b/sound/soc/intel/boards/kbl_da7219_max98927.c
+index cc9a2509ace29..e0149cf6127d0 100644
+--- a/sound/soc/intel/boards/kbl_da7219_max98927.c
++++ b/sound/soc/intel/boards/kbl_da7219_max98927.c
+@@ -282,11 +282,33 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
+ 	struct snd_interval *chan = hw_param_interval(params,
+ 			SNDRV_PCM_HW_PARAM_CHANNELS);
+ 	struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+-	struct snd_soc_dpcm *dpcm = container_of(
+-			params, struct snd_soc_dpcm, hw_params);
+-	struct snd_soc_dai_link *fe_dai_link = dpcm->fe->dai_link;
+-	struct snd_soc_dai_link *be_dai_link = dpcm->be->dai_link;
++	struct snd_soc_dpcm *dpcm, *rtd_dpcm = NULL;
+ 
++	/*
++	 * The following loop will be called only for playback stream
++	 * In this platform, there is only one playback device on every SSP
++	 */
++	for_each_dpcm_fe(rtd, SNDRV_PCM_STREAM_PLAYBACK, dpcm) {
++		rtd_dpcm = dpcm;
++		break;
++	}
++
++	/*
++	 * This following loop will be called only for capture stream
++	 * In this platform, there is only one capture device on every SSP
++	 */
++	for_each_dpcm_fe(rtd, SNDRV_PCM_STREAM_CAPTURE, dpcm) {
++		rtd_dpcm = dpcm;
++		break;
++	}
++
++	if (!rtd_dpcm)
++		return -EINVAL;
++
++	/*
++	 * The above 2 loops are mutually exclusive based on the stream direction,
++	 * thus rtd_dpcm variable will never be overwritten
++	 */
+ 	/*
+ 	 * Topology for kblda7219m98373 & kblmax98373 supports only S24_LE,
+ 	 * where as kblda7219m98927 & kblmax98927 supports S16_LE by default.
+@@ -309,9 +331,9 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
+ 	/*
+ 	 * The ADSP will convert the FE rate to 48k, stereo, 24 bit
+ 	 */
+-	if (!strcmp(fe_dai_link->name, "Kbl Audio Port") ||
+-	    !strcmp(fe_dai_link->name, "Kbl Audio Headset Playback") ||
+-	    !strcmp(fe_dai_link->name, "Kbl Audio Capture Port")) {
++	if (!strcmp(rtd_dpcm->fe->dai_link->name, "Kbl Audio Port") ||
++	    !strcmp(rtd_dpcm->fe->dai_link->name, "Kbl Audio Headset Playback") ||
++	    !strcmp(rtd_dpcm->fe->dai_link->name, "Kbl Audio Capture Port")) {
+ 		rate->min = rate->max = 48000;
+ 		chan->min = chan->max = 2;
+ 		snd_mask_none(fmt);
+@@ -322,7 +344,7 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
+ 	 * The speaker on the SSP0 supports S16_LE and not S24_LE.
+ 	 * thus changing the mask here
+ 	 */
+-	if (!strcmp(be_dai_link->name, "SSP0-Codec"))
++	if (!strcmp(rtd_dpcm->be->dai_link->name, "SSP0-Codec"))
+ 		snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S16_LE);
+ 
+ 	return 0;
+diff --git a/sound/soc/intel/boards/sof_wm8804.c b/sound/soc/intel/boards/sof_wm8804.c
+index a46ba13e8eb0c..6a181e45143d7 100644
+--- a/sound/soc/intel/boards/sof_wm8804.c
++++ b/sound/soc/intel/boards/sof_wm8804.c
+@@ -124,7 +124,11 @@ static int sof_wm8804_hw_params(struct snd_pcm_substream *substream,
+ 	}
+ 
+ 	snd_soc_dai_set_clkdiv(codec_dai, WM8804_MCLK_DIV, mclk_div);
+-	snd_soc_dai_set_pll(codec_dai, 0, 0, sysclk, mclk_freq);
++	ret = snd_soc_dai_set_pll(codec_dai, 0, 0, sysclk, mclk_freq);
++	if (ret < 0) {
++		dev_err(rtd->card->dev, "Failed to set WM8804 PLL\n");
++		return ret;
++	}
+ 
+ 	ret = snd_soc_dai_set_sysclk(codec_dai, WM8804_TX_CLKSRC_PLL,
+ 				     sysclk, SND_SOC_CLOCK_OUT);
+diff --git a/sound/soc/intel/skylake/Makefile b/sound/soc/intel/skylake/Makefile
+index dd39149b89b1d..1c4649bccec5a 100644
+--- a/sound/soc/intel/skylake/Makefile
++++ b/sound/soc/intel/skylake/Makefile
+@@ -7,7 +7,7 @@ ifdef CONFIG_DEBUG_FS
+   snd-soc-skl-objs += skl-debug.o
+ endif
+ 
+-obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE) += snd-soc-skl.o
++obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE_COMMON) += snd-soc-skl.o
+ 
+ #Skylake Clock device support
+ snd-soc-skl-ssp-clk-objs := skl-ssp-clk.o
+diff --git a/sound/soc/qcom/qdsp6/q6afe-clocks.c b/sound/soc/qcom/qdsp6/q6afe-clocks.c
+index f0362f0616521..9431656283cd1 100644
+--- a/sound/soc/qcom/qdsp6/q6afe-clocks.c
++++ b/sound/soc/qcom/qdsp6/q6afe-clocks.c
+@@ -11,33 +11,29 @@
+ #include <linux/slab.h>
+ #include "q6afe.h"
+ 
+-#define Q6AFE_CLK(id) &(struct q6afe_clk) {		\
++#define Q6AFE_CLK(id) {					\
+ 		.clk_id	= id,				\
+ 		.afe_clk_id	= Q6AFE_##id,		\
+ 		.name = #id,				\
+-		.attributes = LPASS_CLK_ATTRIBUTE_COUPLE_NO, \
+ 		.rate = 19200000,			\
+-		.hw.init = &(struct clk_init_data) {	\
+-			.ops = &clk_q6afe_ops,		\
+-			.name = #id,			\
+-		},					\
+ 	}
+ 
+-#define Q6AFE_VOTE_CLK(id, blkid, n) &(struct q6afe_clk) { \
++#define Q6AFE_VOTE_CLK(id, blkid, n) {			\
+ 		.clk_id	= id,				\
+ 		.afe_clk_id = blkid,			\
+-		.name = #n,				\
+-		.hw.init = &(struct clk_init_data) {	\
+-			.ops = &clk_vote_q6afe_ops,	\
+-			.name = #id,			\
+-		},					\
++		.name = n,				\
+ 	}
+ 
+-struct q6afe_clk {
+-	struct device *dev;
++struct q6afe_clk_init {
+ 	int clk_id;
+ 	int afe_clk_id;
+ 	char *name;
++	int rate;
++};
++
++struct q6afe_clk {
++	struct device *dev;
++	int afe_clk_id;
+ 	int attributes;
+ 	int rate;
+ 	uint32_t handle;
+@@ -48,8 +44,7 @@ struct q6afe_clk {
+ 
+ struct q6afe_cc {
+ 	struct device *dev;
+-	struct q6afe_clk **clks;
+-	int num_clks;
++	struct q6afe_clk *clks[Q6AFE_MAX_CLK_ID];
+ };
+ 
+ static int clk_q6afe_prepare(struct clk_hw *hw)
+@@ -105,7 +100,7 @@ static int clk_vote_q6afe_block(struct clk_hw *hw)
+ 	struct q6afe_clk *clk = to_q6afe_clk(hw);
+ 
+ 	return q6afe_vote_lpass_core_hw(clk->dev, clk->afe_clk_id,
+-					clk->name, &clk->handle);
++					clk_hw_get_name(&clk->hw), &clk->handle);
+ }
+ 
+ static void clk_unvote_q6afe_block(struct clk_hw *hw)
+@@ -120,84 +115,76 @@ static const struct clk_ops clk_vote_q6afe_ops = {
+ 	.unprepare	= clk_unvote_q6afe_block,
+ };
+ 
+-static struct q6afe_clk *q6afe_clks[Q6AFE_MAX_CLK_ID] = {
+-	[LPASS_CLK_ID_PRI_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_PRI_MI2S_IBIT),
+-	[LPASS_CLK_ID_PRI_MI2S_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_PRI_MI2S_EBIT),
+-	[LPASS_CLK_ID_SEC_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEC_MI2S_IBIT),
+-	[LPASS_CLK_ID_SEC_MI2S_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEC_MI2S_EBIT),
+-	[LPASS_CLK_ID_TER_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_TER_MI2S_IBIT),
+-	[LPASS_CLK_ID_TER_MI2S_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_TER_MI2S_EBIT),
+-	[LPASS_CLK_ID_QUAD_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUAD_MI2S_IBIT),
+-	[LPASS_CLK_ID_QUAD_MI2S_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUAD_MI2S_EBIT),
+-	[LPASS_CLK_ID_SPEAKER_I2S_IBIT] =
+-				Q6AFE_CLK(LPASS_CLK_ID_SPEAKER_I2S_IBIT),
+-	[LPASS_CLK_ID_SPEAKER_I2S_EBIT] =
+-				Q6AFE_CLK(LPASS_CLK_ID_SPEAKER_I2S_EBIT),
+-	[LPASS_CLK_ID_SPEAKER_I2S_OSR] =
+-				Q6AFE_CLK(LPASS_CLK_ID_SPEAKER_I2S_OSR),
+-	[LPASS_CLK_ID_QUI_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUI_MI2S_IBIT),
+-	[LPASS_CLK_ID_QUI_MI2S_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUI_MI2S_EBIT),
+-	[LPASS_CLK_ID_SEN_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEN_MI2S_IBIT),
+-	[LPASS_CLK_ID_SEN_MI2S_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEN_MI2S_EBIT),
+-	[LPASS_CLK_ID_INT0_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_INT0_MI2S_IBIT),
+-	[LPASS_CLK_ID_INT1_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_INT1_MI2S_IBIT),
+-	[LPASS_CLK_ID_INT2_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_INT2_MI2S_IBIT),
+-	[LPASS_CLK_ID_INT3_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_INT3_MI2S_IBIT),
+-	[LPASS_CLK_ID_INT4_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_INT4_MI2S_IBIT),
+-	[LPASS_CLK_ID_INT5_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_INT5_MI2S_IBIT),
+-	[LPASS_CLK_ID_INT6_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_INT6_MI2S_IBIT),
+-	[LPASS_CLK_ID_QUI_MI2S_OSR] = Q6AFE_CLK(LPASS_CLK_ID_QUI_MI2S_OSR),
+-	[LPASS_CLK_ID_PRI_PCM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_PRI_PCM_IBIT),
+-	[LPASS_CLK_ID_PRI_PCM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_PRI_PCM_EBIT),
+-	[LPASS_CLK_ID_SEC_PCM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEC_PCM_IBIT),
+-	[LPASS_CLK_ID_SEC_PCM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEC_PCM_EBIT),
+-	[LPASS_CLK_ID_TER_PCM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_TER_PCM_IBIT),
+-	[LPASS_CLK_ID_TER_PCM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_TER_PCM_EBIT),
+-	[LPASS_CLK_ID_QUAD_PCM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUAD_PCM_IBIT),
+-	[LPASS_CLK_ID_QUAD_PCM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUAD_PCM_EBIT),
+-	[LPASS_CLK_ID_QUIN_PCM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUIN_PCM_IBIT),
+-	[LPASS_CLK_ID_QUIN_PCM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUIN_PCM_EBIT),
+-	[LPASS_CLK_ID_QUI_PCM_OSR] = Q6AFE_CLK(LPASS_CLK_ID_QUI_PCM_OSR),
+-	[LPASS_CLK_ID_PRI_TDM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_PRI_TDM_IBIT),
+-	[LPASS_CLK_ID_PRI_TDM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_PRI_TDM_EBIT),
+-	[LPASS_CLK_ID_SEC_TDM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEC_TDM_IBIT),
+-	[LPASS_CLK_ID_SEC_TDM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEC_TDM_EBIT),
+-	[LPASS_CLK_ID_TER_TDM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_TER_TDM_IBIT),
+-	[LPASS_CLK_ID_TER_TDM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_TER_TDM_EBIT),
+-	[LPASS_CLK_ID_QUAD_TDM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUAD_TDM_IBIT),
+-	[LPASS_CLK_ID_QUAD_TDM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUAD_TDM_EBIT),
+-	[LPASS_CLK_ID_QUIN_TDM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUIN_TDM_IBIT),
+-	[LPASS_CLK_ID_QUIN_TDM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUIN_TDM_EBIT),
+-	[LPASS_CLK_ID_QUIN_TDM_OSR] = Q6AFE_CLK(LPASS_CLK_ID_QUIN_TDM_OSR),
+-	[LPASS_CLK_ID_MCLK_1] = Q6AFE_CLK(LPASS_CLK_ID_MCLK_1),
+-	[LPASS_CLK_ID_MCLK_2] = Q6AFE_CLK(LPASS_CLK_ID_MCLK_2),
+-	[LPASS_CLK_ID_MCLK_3] = Q6AFE_CLK(LPASS_CLK_ID_MCLK_3),
+-	[LPASS_CLK_ID_MCLK_4] = Q6AFE_CLK(LPASS_CLK_ID_MCLK_4),
+-	[LPASS_CLK_ID_INTERNAL_DIGITAL_CODEC_CORE] =
+-		Q6AFE_CLK(LPASS_CLK_ID_INTERNAL_DIGITAL_CODEC_CORE),
+-	[LPASS_CLK_ID_INT_MCLK_0] = Q6AFE_CLK(LPASS_CLK_ID_INT_MCLK_0),
+-	[LPASS_CLK_ID_INT_MCLK_1] = Q6AFE_CLK(LPASS_CLK_ID_INT_MCLK_1),
+-	[LPASS_CLK_ID_WSA_CORE_MCLK] = Q6AFE_CLK(LPASS_CLK_ID_WSA_CORE_MCLK),
+-	[LPASS_CLK_ID_WSA_CORE_NPL_MCLK] =
+-				Q6AFE_CLK(LPASS_CLK_ID_WSA_CORE_NPL_MCLK),
+-	[LPASS_CLK_ID_VA_CORE_MCLK] = Q6AFE_CLK(LPASS_CLK_ID_VA_CORE_MCLK),
+-	[LPASS_CLK_ID_TX_CORE_MCLK] = Q6AFE_CLK(LPASS_CLK_ID_TX_CORE_MCLK),
+-	[LPASS_CLK_ID_TX_CORE_NPL_MCLK] =
+-			Q6AFE_CLK(LPASS_CLK_ID_TX_CORE_NPL_MCLK),
+-	[LPASS_CLK_ID_RX_CORE_MCLK] = Q6AFE_CLK(LPASS_CLK_ID_RX_CORE_MCLK),
+-	[LPASS_CLK_ID_RX_CORE_NPL_MCLK] =
+-				Q6AFE_CLK(LPASS_CLK_ID_RX_CORE_NPL_MCLK),
+-	[LPASS_CLK_ID_VA_CORE_2X_MCLK] =
+-				Q6AFE_CLK(LPASS_CLK_ID_VA_CORE_2X_MCLK),
+-	[LPASS_HW_AVTIMER_VOTE] = Q6AFE_VOTE_CLK(LPASS_HW_AVTIMER_VOTE,
+-						 Q6AFE_LPASS_CORE_AVTIMER_BLOCK,
+-						 "LPASS_AVTIMER_MACRO"),
+-	[LPASS_HW_MACRO_VOTE] = Q6AFE_VOTE_CLK(LPASS_HW_MACRO_VOTE,
+-						Q6AFE_LPASS_CORE_HW_MACRO_BLOCK,
+-						"LPASS_HW_MACRO"),
+-	[LPASS_HW_DCODEC_VOTE] = Q6AFE_VOTE_CLK(LPASS_HW_DCODEC_VOTE,
+-					Q6AFE_LPASS_CORE_HW_DCODEC_BLOCK,
+-					"LPASS_HW_DCODEC"),
++static const struct q6afe_clk_init q6afe_clks[] = {
++	Q6AFE_CLK(LPASS_CLK_ID_PRI_MI2S_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_PRI_MI2S_EBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_SEC_MI2S_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_SEC_MI2S_EBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_TER_MI2S_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_TER_MI2S_EBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_QUAD_MI2S_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_QUAD_MI2S_EBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_SPEAKER_I2S_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_SPEAKER_I2S_EBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_SPEAKER_I2S_OSR),
++	Q6AFE_CLK(LPASS_CLK_ID_QUI_MI2S_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_QUI_MI2S_EBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_SEN_MI2S_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_SEN_MI2S_EBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_INT0_MI2S_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_INT1_MI2S_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_INT2_MI2S_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_INT3_MI2S_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_INT4_MI2S_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_INT5_MI2S_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_INT6_MI2S_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_QUI_MI2S_OSR),
++	Q6AFE_CLK(LPASS_CLK_ID_PRI_PCM_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_PRI_PCM_EBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_SEC_PCM_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_SEC_PCM_EBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_TER_PCM_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_TER_PCM_EBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_QUAD_PCM_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_QUAD_PCM_EBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_QUIN_PCM_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_QUIN_PCM_EBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_QUI_PCM_OSR),
++	Q6AFE_CLK(LPASS_CLK_ID_PRI_TDM_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_PRI_TDM_EBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_SEC_TDM_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_SEC_TDM_EBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_TER_TDM_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_TER_TDM_EBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_QUAD_TDM_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_QUAD_TDM_EBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_QUIN_TDM_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_QUIN_TDM_EBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_QUIN_TDM_OSR),
++	Q6AFE_CLK(LPASS_CLK_ID_MCLK_1),
++	Q6AFE_CLK(LPASS_CLK_ID_MCLK_2),
++	Q6AFE_CLK(LPASS_CLK_ID_MCLK_3),
++	Q6AFE_CLK(LPASS_CLK_ID_MCLK_4),
++	Q6AFE_CLK(LPASS_CLK_ID_INTERNAL_DIGITAL_CODEC_CORE),
++	Q6AFE_CLK(LPASS_CLK_ID_INT_MCLK_0),
++	Q6AFE_CLK(LPASS_CLK_ID_INT_MCLK_1),
++	Q6AFE_CLK(LPASS_CLK_ID_WSA_CORE_MCLK),
++	Q6AFE_CLK(LPASS_CLK_ID_WSA_CORE_NPL_MCLK),
++	Q6AFE_CLK(LPASS_CLK_ID_VA_CORE_MCLK),
++	Q6AFE_CLK(LPASS_CLK_ID_TX_CORE_MCLK),
++	Q6AFE_CLK(LPASS_CLK_ID_TX_CORE_NPL_MCLK),
++	Q6AFE_CLK(LPASS_CLK_ID_RX_CORE_MCLK),
++	Q6AFE_CLK(LPASS_CLK_ID_RX_CORE_NPL_MCLK),
++	Q6AFE_CLK(LPASS_CLK_ID_VA_CORE_2X_MCLK),
++	Q6AFE_VOTE_CLK(LPASS_HW_AVTIMER_VOTE,
++		       Q6AFE_LPASS_CORE_AVTIMER_BLOCK,
++		       "LPASS_AVTIMER_MACRO"),
++	Q6AFE_VOTE_CLK(LPASS_HW_MACRO_VOTE,
++		       Q6AFE_LPASS_CORE_HW_MACRO_BLOCK,
++		       "LPASS_HW_MACRO"),
++	Q6AFE_VOTE_CLK(LPASS_HW_DCODEC_VOTE,
++		       Q6AFE_LPASS_CORE_HW_DCODEC_BLOCK,
++		       "LPASS_HW_DCODEC"),
+ };
+ 
+ static struct clk_hw *q6afe_of_clk_hw_get(struct of_phandle_args *clkspec,
+@@ -207,7 +194,7 @@ static struct clk_hw *q6afe_of_clk_hw_get(struct of_phandle_args *clkspec,
+ 	unsigned int idx = clkspec->args[0];
+ 	unsigned int attr = clkspec->args[1];
+ 
+-	if (idx >= cc->num_clks || attr > LPASS_CLK_ATTRIBUTE_COUPLE_DIVISOR) {
++	if (idx >= Q6AFE_MAX_CLK_ID || attr > LPASS_CLK_ATTRIBUTE_COUPLE_DIVISOR) {
+ 		dev_err(cc->dev, "Invalid clk specifier (%d, %d)\n", idx, attr);
+ 		return ERR_PTR(-EINVAL);
+ 	}
+@@ -230,20 +217,36 @@ static int q6afe_clock_dev_probe(struct platform_device *pdev)
+ 	if (!cc)
+ 		return -ENOMEM;
+ 
+-	cc->clks = &q6afe_clks[0];
+-	cc->num_clks = ARRAY_SIZE(q6afe_clks);
++	cc->dev = dev;
+ 	for (i = 0; i < ARRAY_SIZE(q6afe_clks); i++) {
+-		if (!q6afe_clks[i])
+-			continue;
++		unsigned int id = q6afe_clks[i].clk_id;
++		struct clk_init_data init = {
++			.name =  q6afe_clks[i].name,
++		};
++		struct q6afe_clk *clk;
++
++		clk = devm_kzalloc(dev, sizeof(*clk), GFP_KERNEL);
++		if (!clk)
++			return -ENOMEM;
++
++		clk->dev = dev;
++		clk->afe_clk_id = q6afe_clks[i].afe_clk_id;
++		clk->rate = q6afe_clks[i].rate;
++		clk->hw.init = &init;
++
++		if (clk->rate)
++			init.ops = &clk_q6afe_ops;
++		else
++			init.ops = &clk_vote_q6afe_ops;
+ 
+-		q6afe_clks[i]->dev = dev;
++		cc->clks[id] = clk;
+ 
+-		ret = devm_clk_hw_register(dev, &q6afe_clks[i]->hw);
++		ret = devm_clk_hw_register(dev, &clk->hw);
+ 		if (ret)
+ 			return ret;
+ 	}
+ 
+-	ret = of_clk_add_hw_provider(dev->of_node, q6afe_of_clk_hw_get, cc);
++	ret = devm_of_clk_add_hw_provider(dev, q6afe_of_clk_hw_get, cc);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/sound/soc/qcom/qdsp6/q6afe.c b/sound/soc/qcom/qdsp6/q6afe.c
+index cad1cd1bfdf0e..4327b72162ecd 100644
+--- a/sound/soc/qcom/qdsp6/q6afe.c
++++ b/sound/soc/qcom/qdsp6/q6afe.c
+@@ -1681,7 +1681,7 @@ int q6afe_unvote_lpass_core_hw(struct device *dev, uint32_t hw_block_id,
+ EXPORT_SYMBOL(q6afe_unvote_lpass_core_hw);
+ 
+ int q6afe_vote_lpass_core_hw(struct device *dev, uint32_t hw_block_id,
+-			     char *client_name, uint32_t *client_handle)
++			     const char *client_name, uint32_t *client_handle)
+ {
+ 	struct q6afe *afe = dev_get_drvdata(dev->parent);
+ 	struct afe_cmd_remote_lpass_core_hw_vote_request *vote_cfg;
+diff --git a/sound/soc/qcom/qdsp6/q6afe.h b/sound/soc/qcom/qdsp6/q6afe.h
+index 22e10269aa109..3845b56c0ed36 100644
+--- a/sound/soc/qcom/qdsp6/q6afe.h
++++ b/sound/soc/qcom/qdsp6/q6afe.h
+@@ -236,7 +236,7 @@ int q6afe_port_set_sysclk(struct q6afe_port *port, int clk_id,
+ int q6afe_set_lpass_clock(struct device *dev, int clk_id, int clk_src,
+ 			  int clk_root, unsigned int freq);
+ int q6afe_vote_lpass_core_hw(struct device *dev, uint32_t hw_block_id,
+-			     char *client_name, uint32_t *client_handle);
++			     const char *client_name, uint32_t *client_handle);
+ int q6afe_unvote_lpass_core_hw(struct device *dev, uint32_t hw_block_id,
+ 			       uint32_t client_handle);
+ #endif /* __Q6AFE_H__ */
+diff --git a/sound/soc/samsung/tm2_wm5110.c b/sound/soc/samsung/tm2_wm5110.c
+index 9300fef9bf269..125e07f65d2b5 100644
+--- a/sound/soc/samsung/tm2_wm5110.c
++++ b/sound/soc/samsung/tm2_wm5110.c
+@@ -553,7 +553,7 @@ static int tm2_probe(struct platform_device *pdev)
+ 
+ 		ret = of_parse_phandle_with_args(dev->of_node, "i2s-controller",
+ 						 cells_name, i, &args);
+-		if (!args.np) {
++		if (ret) {
+ 			dev_err(dev, "i2s-controller property parse error: %d\n", i);
+ 			ret = -EINVAL;
+ 			goto dai_node_put;
+diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c
+index 6740df541508f..3d22c1be6f3d5 100644
+--- a/sound/soc/tegra/tegra30_i2s.c
++++ b/sound/soc/tegra/tegra30_i2s.c
+@@ -58,8 +58,18 @@ static int tegra30_i2s_runtime_resume(struct device *dev)
+ 	}
+ 
+ 	regcache_cache_only(i2s->regmap, false);
++	regcache_mark_dirty(i2s->regmap);
++
++	ret = regcache_sync(i2s->regmap);
++	if (ret)
++		goto disable_clocks;
+ 
+ 	return 0;
++
++disable_clocks:
++	clk_disable_unprepare(i2s->clk_i2s);
++
++	return ret;
+ }
+ 
+ static int tegra30_i2s_set_fmt(struct snd_soc_dai *dai,
+@@ -551,37 +561,11 @@ static int tegra30_i2s_platform_remove(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
+-#ifdef CONFIG_PM_SLEEP
+-static int tegra30_i2s_suspend(struct device *dev)
+-{
+-	struct tegra30_i2s *i2s = dev_get_drvdata(dev);
+-
+-	regcache_mark_dirty(i2s->regmap);
+-
+-	return 0;
+-}
+-
+-static int tegra30_i2s_resume(struct device *dev)
+-{
+-	struct tegra30_i2s *i2s = dev_get_drvdata(dev);
+-	int ret;
+-
+-	ret = pm_runtime_get_sync(dev);
+-	if (ret < 0) {
+-		pm_runtime_put(dev);
+-		return ret;
+-	}
+-	ret = regcache_sync(i2s->regmap);
+-	pm_runtime_put(dev);
+-
+-	return ret;
+-}
+-#endif
+-
+ static const struct dev_pm_ops tegra30_i2s_pm_ops = {
+ 	SET_RUNTIME_PM_OPS(tegra30_i2s_runtime_suspend,
+ 			   tegra30_i2s_runtime_resume, NULL)
+-	SET_SYSTEM_SLEEP_PM_OPS(tegra30_i2s_suspend, tegra30_i2s_resume)
++	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
++				pm_runtime_force_resume)
+ };
+ 
+ static struct platform_driver tegra30_i2s_driver = {
+diff --git a/sound/usb/card.c b/sound/usb/card.c
+index 0826a437f8fca..7b7526d3a56e8 100644
+--- a/sound/usb/card.c
++++ b/sound/usb/card.c
+@@ -181,9 +181,8 @@ static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int int
+ 				ctrlif, interface);
+ 			return -EINVAL;
+ 		}
+-		usb_driver_claim_interface(&usb_audio_driver, iface, (void *)-1L);
+-
+-		return 0;
++		return usb_driver_claim_interface(&usb_audio_driver, iface,
++						  USB_AUDIO_IFACE_UNUSED);
+ 	}
+ 
+ 	if ((altsd->bInterfaceClass != USB_CLASS_AUDIO &&
+@@ -203,7 +202,8 @@ static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int int
+ 
+ 	if (! snd_usb_parse_audio_interface(chip, interface)) {
+ 		usb_set_interface(dev, interface, 0); /* reset the current interface */
+-		usb_driver_claim_interface(&usb_audio_driver, iface, (void *)-1L);
++		return usb_driver_claim_interface(&usb_audio_driver, iface,
++						  USB_AUDIO_IFACE_UNUSED);
+ 	}
+ 
+ 	return 0;
+@@ -862,7 +862,7 @@ static void usb_audio_disconnect(struct usb_interface *intf)
+ 	struct snd_card *card;
+ 	struct list_head *p;
+ 
+-	if (chip == (void *)-1L)
++	if (chip == USB_AUDIO_IFACE_UNUSED)
+ 		return;
+ 
+ 	card = chip->card;
+@@ -992,7 +992,7 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message)
+ 	struct usb_mixer_interface *mixer;
+ 	struct list_head *p;
+ 
+-	if (chip == (void *)-1L)
++	if (chip == USB_AUDIO_IFACE_UNUSED)
+ 		return 0;
+ 
+ 	if (!chip->num_suspended_intf++) {
+@@ -1022,7 +1022,7 @@ static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume)
+ 	struct list_head *p;
+ 	int err = 0;
+ 
+-	if (chip == (void *)-1L)
++	if (chip == USB_AUDIO_IFACE_UNUSED)
+ 		return 0;
+ 
+ 	atomic_inc(&chip->active); /* avoid autopm */
+diff --git a/sound/usb/midi.c b/sound/usb/midi.c
+index 0c23fa6d8525d..cd46ca7cd28de 100644
+--- a/sound/usb/midi.c
++++ b/sound/usb/midi.c
+@@ -1332,7 +1332,7 @@ static int snd_usbmidi_in_endpoint_create(struct snd_usb_midi *umidi,
+ 
+  error:
+ 	snd_usbmidi_in_endpoint_delete(ep);
+-	return -ENOMEM;
++	return err;
+ }
+ 
+ /*
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 176437a441e6c..7c6e83eee71dc 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -55,8 +55,12 @@ static int create_composite_quirk(struct snd_usb_audio *chip,
+ 		if (!iface)
+ 			continue;
+ 		if (quirk->ifnum != probed_ifnum &&
+-		    !usb_interface_claimed(iface))
+-			usb_driver_claim_interface(driver, iface, (void *)-1L);
++		    !usb_interface_claimed(iface)) {
++			err = usb_driver_claim_interface(driver, iface,
++							 USB_AUDIO_IFACE_UNUSED);
++			if (err < 0)
++				return err;
++		}
+ 	}
+ 
+ 	return 0;
+@@ -426,8 +430,12 @@ static int create_autodetect_quirks(struct snd_usb_audio *chip,
+ 			continue;
+ 
+ 		err = create_autodetect_quirk(chip, iface, driver);
+-		if (err >= 0)
+-			usb_driver_claim_interface(driver, iface, (void *)-1L);
++		if (err >= 0) {
++			err = usb_driver_claim_interface(driver, iface,
++							 USB_AUDIO_IFACE_UNUSED);
++			if (err < 0)
++				return err;
++		}
+ 	}
+ 
+ 	return 0;
+diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
+index 60b9dd7df6bb7..8794c8658ab96 100644
+--- a/sound/usb/usbaudio.h
++++ b/sound/usb/usbaudio.h
+@@ -61,6 +61,8 @@ struct snd_usb_audio {
+ 	struct media_intf_devnode *ctl_intf_media_devnode;
+ };
+ 
++#define USB_AUDIO_IFACE_UNUSED	((void *)-1L)
++
+ #define usb_audio_err(chip, fmt, args...) \
+ 	dev_err(&(chip)->dev->dev, fmt, ##args)
+ #define usb_audio_warn(chip, fmt, args...) \
+diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c
+index fe9e7b3a4b503..1326fff3629b1 100644
+--- a/tools/bpf/bpftool/btf.c
++++ b/tools/bpf/bpftool/btf.c
+@@ -538,6 +538,7 @@ static int do_dump(int argc, char **argv)
+ 			NEXT_ARG();
+ 			if (argc < 1) {
+ 				p_err("expecting value for 'format' option\n");
++				err = -EINVAL;
+ 				goto done;
+ 			}
+ 			if (strcmp(*argv, "c") == 0) {
+@@ -547,11 +548,13 @@ static int do_dump(int argc, char **argv)
+ 			} else {
+ 				p_err("unrecognized format specifier: '%s', possible values: raw, c",
+ 				      *argv);
++				err = -EINVAL;
+ 				goto done;
+ 			}
+ 			NEXT_ARG();
+ 		} else {
+ 			p_err("unrecognized option: '%s'", *argv);
++			err = -EINVAL;
+ 			goto done;
+ 		}
+ 	}
+diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
+index b86f450e6fce2..d9afb730136a4 100644
+--- a/tools/bpf/bpftool/main.c
++++ b/tools/bpf/bpftool/main.c
+@@ -276,7 +276,7 @@ static int do_batch(int argc, char **argv)
+ 	int n_argc;
+ 	FILE *fp;
+ 	char *cp;
+-	int err;
++	int err = 0;
+ 	int i;
+ 
+ 	if (argc < 2) {
+@@ -370,7 +370,6 @@ static int do_batch(int argc, char **argv)
+ 	} else {
+ 		if (!json_output)
+ 			printf("processed %d commands\n", lines);
+-		err = 0;
+ 	}
+ err_close:
+ 	if (fp != stdin)
+diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
+index b400364ee054e..09ae0381205b6 100644
+--- a/tools/bpf/bpftool/map.c
++++ b/tools/bpf/bpftool/map.c
+@@ -100,7 +100,7 @@ static int do_dump_btf(const struct btf_dumper *d,
+ 		       void *value)
+ {
+ 	__u32 value_id;
+-	int ret;
++	int ret = 0;
+ 
+ 	/* start of key-value pair */
+ 	jsonw_start_object(d->jw);
+diff --git a/tools/lib/bpf/bpf_core_read.h b/tools/lib/bpf/bpf_core_read.h
+index 53b3e199fb254..09ebe3db5f2f8 100644
+--- a/tools/lib/bpf/bpf_core_read.h
++++ b/tools/lib/bpf/bpf_core_read.h
+@@ -88,11 +88,19 @@ enum bpf_enum_value_kind {
+ 	const void *p = (const void *)s + __CORE_RELO(s, field, BYTE_OFFSET); \
+ 	unsigned long long val;						      \
+ 									      \
++	/* This is a so-called barrier_var() operation that makes specified   \
++	 * variable "a black box" for optimizing compiler.		      \
++	 * It forces compiler to perform BYTE_OFFSET relocation on p and use  \
++	 * its calculated value in the switch below, instead of applying      \
++	 * the same relocation 4 times for each individual memory load.       \
++	 */								      \
++	asm volatile("" : "=r"(p) : "0"(p));				      \
++									      \
+ 	switch (__CORE_RELO(s, field, BYTE_SIZE)) {			      \
+-	case 1: val = *(const unsigned char *)p;			      \
+-	case 2: val = *(const unsigned short *)p;			      \
+-	case 4: val = *(const unsigned int *)p;				      \
+-	case 8: val = *(const unsigned long long *)p;			      \
++	case 1: val = *(const unsigned char *)p; break;			      \
++	case 2: val = *(const unsigned short *)p; break;		      \
++	case 4: val = *(const unsigned int *)p; break;			      \
++	case 8: val = *(const unsigned long long *)p; break;		      \
+ 	}								      \
+ 	val <<= __CORE_RELO(s, field, LSHIFT_U64);			      \
+ 	if (__CORE_RELO(s, field, SIGNED))				      \
+diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h
+index f9ef37707888f..1c2e91ee041d8 100644
+--- a/tools/lib/bpf/bpf_tracing.h
++++ b/tools/lib/bpf/bpf_tracing.h
+@@ -413,20 +413,38 @@ typeof(name(0)) name(struct pt_regs *ctx)				    \
+ }									    \
+ static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args)
+ 
++#define ___bpf_fill0(arr, p, x) do {} while (0)
++#define ___bpf_fill1(arr, p, x) arr[p] = x
++#define ___bpf_fill2(arr, p, x, args...) arr[p] = x; ___bpf_fill1(arr, p + 1, args)
++#define ___bpf_fill3(arr, p, x, args...) arr[p] = x; ___bpf_fill2(arr, p + 1, args)
++#define ___bpf_fill4(arr, p, x, args...) arr[p] = x; ___bpf_fill3(arr, p + 1, args)
++#define ___bpf_fill5(arr, p, x, args...) arr[p] = x; ___bpf_fill4(arr, p + 1, args)
++#define ___bpf_fill6(arr, p, x, args...) arr[p] = x; ___bpf_fill5(arr, p + 1, args)
++#define ___bpf_fill7(arr, p, x, args...) arr[p] = x; ___bpf_fill6(arr, p + 1, args)
++#define ___bpf_fill8(arr, p, x, args...) arr[p] = x; ___bpf_fill7(arr, p + 1, args)
++#define ___bpf_fill9(arr, p, x, args...) arr[p] = x; ___bpf_fill8(arr, p + 1, args)
++#define ___bpf_fill10(arr, p, x, args...) arr[p] = x; ___bpf_fill9(arr, p + 1, args)
++#define ___bpf_fill11(arr, p, x, args...) arr[p] = x; ___bpf_fill10(arr, p + 1, args)
++#define ___bpf_fill12(arr, p, x, args...) arr[p] = x; ___bpf_fill11(arr, p + 1, args)
++#define ___bpf_fill(arr, args...) \
++	___bpf_apply(___bpf_fill, ___bpf_narg(args))(arr, 0, args)
++
+ /*
+  * BPF_SEQ_PRINTF to wrap bpf_seq_printf to-be-printed values
+  * in a structure.
+  */
+-#define BPF_SEQ_PRINTF(seq, fmt, args...)				    \
+-	({								    \
+-		_Pragma("GCC diagnostic push")				    \
+-		_Pragma("GCC diagnostic ignored \"-Wint-conversion\"")	    \
+-		static const char ___fmt[] = fmt;			    \
+-		unsigned long long ___param[] = { args };		    \
+-		_Pragma("GCC diagnostic pop")				    \
+-		int ___ret = bpf_seq_printf(seq, ___fmt, sizeof(___fmt),    \
+-					    ___param, sizeof(___param));    \
+-		___ret;							    \
+-	})
++#define BPF_SEQ_PRINTF(seq, fmt, args...)			\
++({								\
++	static const char ___fmt[] = fmt;			\
++	unsigned long long ___param[___bpf_narg(args)];		\
++								\
++	_Pragma("GCC diagnostic push")				\
++	_Pragma("GCC diagnostic ignored \"-Wint-conversion\"")	\
++	___bpf_fill(___param, args);				\
++	_Pragma("GCC diagnostic pop")				\
++								\
++	bpf_seq_printf(seq, ___fmt, sizeof(___fmt),		\
++		       ___param, sizeof(___param));		\
++})
+ 
+ #endif
+diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
+index 1237bcd1dd17e..5b8a6ea44b38b 100644
+--- a/tools/lib/bpf/btf.h
++++ b/tools/lib/bpf/btf.h
+@@ -173,6 +173,7 @@ struct btf_dump_emit_type_decl_opts {
+ 	int indent_level;
+ 	/* strip all the const/volatile/restrict mods */
+ 	bool strip_mods;
++	size_t :0;
+ };
+ #define btf_dump_emit_type_decl_opts__last_field strip_mods
+ 
+diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
+index 3c35eb401931f..3d690d4e785c3 100644
+--- a/tools/lib/bpf/libbpf.h
++++ b/tools/lib/bpf/libbpf.h
+@@ -507,6 +507,7 @@ struct xdp_link_info {
+ struct bpf_xdp_set_link_opts {
+ 	size_t sz;
+ 	int old_fd;
++	size_t :0;
+ };
+ #define bpf_xdp_set_link_opts__last_field old_fd
+ 
+diff --git a/tools/lib/perf/include/perf/event.h b/tools/lib/perf/include/perf/event.h
+index d82054225fcc0..4d0c02ba3f7d3 100644
+--- a/tools/lib/perf/include/perf/event.h
++++ b/tools/lib/perf/include/perf/event.h
+@@ -8,6 +8,8 @@
+ #include <linux/bpf.h>
+ #include <sys/types.h> /* pid_t */
+ 
++#define event_contains(obj, mem) ((obj).header.size > offsetof(typeof(obj), mem))
++
+ struct perf_record_mmap {
+ 	struct perf_event_header header;
+ 	__u32			 pid, tid;
+@@ -346,8 +348,9 @@ struct perf_record_time_conv {
+ 	__u64			 time_zero;
+ 	__u64			 time_cycles;
+ 	__u64			 time_mask;
+-	bool			 cap_user_time_zero;
+-	bool			 cap_user_time_short;
++	__u8			 cap_user_time_zero;
++	__u8			 cap_user_time_short;
++	__u8			 reserved[6];	/* For alignment */
+ };
+ 
+ struct perf_record_header_feature {
+diff --git a/tools/perf/pmu-events/arch/x86/amdzen1/cache.json b/tools/perf/pmu-events/arch/x86/amdzen1/cache.json
+index 4ea7ec4f496e8..008f1683e5407 100644
+--- a/tools/perf/pmu-events/arch/x86/amdzen1/cache.json
++++ b/tools/perf/pmu-events/arch/x86/amdzen1/cache.json
+@@ -275,7 +275,7 @@
+   {
+     "EventName": "l2_pf_hit_l2",
+     "EventCode": "0x70",
+-    "BriefDescription": "L2 prefetch hit in L2.",
++    "BriefDescription": "L2 prefetch hit in L2. Use l2_cache_hits_from_l2_hwpf instead.",
+     "UMask": "0xff"
+   },
+   {
+diff --git a/tools/perf/pmu-events/arch/x86/amdzen1/recommended.json b/tools/perf/pmu-events/arch/x86/amdzen1/recommended.json
+index 2cfe2d2f3bfdd..3c954543d1ae6 100644
+--- a/tools/perf/pmu-events/arch/x86/amdzen1/recommended.json
++++ b/tools/perf/pmu-events/arch/x86/amdzen1/recommended.json
+@@ -79,10 +79,10 @@
+     "UMask": "0x70"
+   },
+   {
+-    "MetricName": "l2_cache_hits_from_l2_hwpf",
++    "EventName": "l2_cache_hits_from_l2_hwpf",
++    "EventCode": "0x70",
+     "BriefDescription": "L2 Cache Hits from L2 HWPF",
+-    "MetricExpr": "l2_pf_hit_l2 + l2_pf_miss_l2_hit_l3 + l2_pf_miss_l2_l3",
+-    "MetricGroup": "l2_cache"
++    "UMask": "0xff"
+   },
+   {
+     "EventName": "l3_accesses",
+diff --git a/tools/perf/pmu-events/arch/x86/amdzen2/cache.json b/tools/perf/pmu-events/arch/x86/amdzen2/cache.json
+index f61b982f83ca3..8ba84a48188dd 100644
+--- a/tools/perf/pmu-events/arch/x86/amdzen2/cache.json
++++ b/tools/perf/pmu-events/arch/x86/amdzen2/cache.json
+@@ -205,7 +205,7 @@
+   {
+     "EventName": "l2_pf_hit_l2",
+     "EventCode": "0x70",
+-    "BriefDescription": "L2 prefetch hit in L2.",
++    "BriefDescription": "L2 prefetch hit in L2. Use l2_cache_hits_from_l2_hwpf instead.",
+     "UMask": "0xff"
+   },
+   {
+diff --git a/tools/perf/pmu-events/arch/x86/amdzen2/recommended.json b/tools/perf/pmu-events/arch/x86/amdzen2/recommended.json
+index 2ef91e25e6613..1c624cee9ef48 100644
+--- a/tools/perf/pmu-events/arch/x86/amdzen2/recommended.json
++++ b/tools/perf/pmu-events/arch/x86/amdzen2/recommended.json
+@@ -79,10 +79,10 @@
+     "UMask": "0x70"
+   },
+   {
+-    "MetricName": "l2_cache_hits_from_l2_hwpf",
++    "EventName": "l2_cache_hits_from_l2_hwpf",
++    "EventCode": "0x70",
+     "BriefDescription": "L2 Cache Hits from L2 HWPF",
+-    "MetricExpr": "l2_pf_hit_l2 + l2_pf_miss_l2_hit_l3 + l2_pf_miss_l2_l3",
+-    "MetricGroup": "l2_cache"
++    "UMask": "0xff"
+   },
+   {
+     "EventName": "l3_accesses",
+diff --git a/tools/perf/trace/beauty/fsconfig.sh b/tools/perf/trace/beauty/fsconfig.sh
+index 83fb24df05c9f..bc6ef7bb7a5f9 100755
+--- a/tools/perf/trace/beauty/fsconfig.sh
++++ b/tools/perf/trace/beauty/fsconfig.sh
+@@ -10,8 +10,7 @@ fi
+ linux_mount=${linux_header_dir}/mount.h
+ 
+ printf "static const char *fsconfig_cmds[] = {\n"
+-regex='^[[:space:]]*+FSCONFIG_([[:alnum:]_]+)[[:space:]]*=[[:space:]]*([[:digit:]]+)[[:space:]]*,[[:space:]]*.*'
+-egrep $regex ${linux_mount} | \
+-	sed -r "s/$regex/\2 \1/g"	| \
+-	xargs printf "\t[%s] = \"%s\",\n"
++ms='[[:space:]]*'
++sed -nr "s/^${ms}FSCONFIG_([[:alnum:]_]+)${ms}=${ms}([[:digit:]]+)${ms},.*/\t[\2] = \"\1\",/p" \
++	${linux_mount}
+ printf "};\n"
+diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c
+index 9760d8e7b3860..917a9c707371a 100644
+--- a/tools/perf/util/jitdump.c
++++ b/tools/perf/util/jitdump.c
+@@ -396,21 +396,31 @@ static pid_t jr_entry_tid(struct jit_buf_desc *jd, union jr_entry *jr)
+ 
+ static uint64_t convert_timestamp(struct jit_buf_desc *jd, uint64_t timestamp)
+ {
+-	struct perf_tsc_conversion tc;
++	struct perf_tsc_conversion tc = { .time_shift = 0, };
++	struct perf_record_time_conv *time_conv = &jd->session->time_conv;
+ 
+ 	if (!jd->use_arch_timestamp)
+ 		return timestamp;
+ 
+-	tc.time_shift	       = jd->session->time_conv.time_shift;
+-	tc.time_mult	       = jd->session->time_conv.time_mult;
+-	tc.time_zero	       = jd->session->time_conv.time_zero;
+-	tc.time_cycles	       = jd->session->time_conv.time_cycles;
+-	tc.time_mask	       = jd->session->time_conv.time_mask;
+-	tc.cap_user_time_zero  = jd->session->time_conv.cap_user_time_zero;
+-	tc.cap_user_time_short = jd->session->time_conv.cap_user_time_short;
++	tc.time_shift = time_conv->time_shift;
++	tc.time_mult  = time_conv->time_mult;
++	tc.time_zero  = time_conv->time_zero;
+ 
+-	if (!tc.cap_user_time_zero)
+-		return 0;
++	/*
++	 * The event TIME_CONV was extended for the fields from "time_cycles"
++	 * when supported cap_user_time_short, for backward compatibility,
++	 * checks the event size and assigns these extended fields if these
++	 * fields are contained in the event.
++	 */
++	if (event_contains(*time_conv, time_cycles)) {
++		tc.time_cycles	       = time_conv->time_cycles;
++		tc.time_mask	       = time_conv->time_mask;
++		tc.cap_user_time_zero  = time_conv->cap_user_time_zero;
++		tc.cap_user_time_short = time_conv->cap_user_time_short;
++
++		if (!tc.cap_user_time_zero)
++			return 0;
++	}
+ 
+ 	return tsc_to_perf_time(timestamp, &tc);
+ }
+diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
+index 859832a824967..e9d4e6f4bdf3e 100644
+--- a/tools/perf/util/session.c
++++ b/tools/perf/util/session.c
+@@ -949,6 +949,19 @@ static void perf_event__stat_round_swap(union perf_event *event,
+ 	event->stat_round.time = bswap_64(event->stat_round.time);
+ }
+ 
++static void perf_event__time_conv_swap(union perf_event *event,
++				       bool sample_id_all __maybe_unused)
++{
++	event->time_conv.time_shift = bswap_64(event->time_conv.time_shift);
++	event->time_conv.time_mult  = bswap_64(event->time_conv.time_mult);
++	event->time_conv.time_zero  = bswap_64(event->time_conv.time_zero);
++
++	if (event_contains(event->time_conv, time_cycles)) {
++		event->time_conv.time_cycles = bswap_64(event->time_conv.time_cycles);
++		event->time_conv.time_mask = bswap_64(event->time_conv.time_mask);
++	}
++}
++
+ typedef void (*perf_event__swap_op)(union perf_event *event,
+ 				    bool sample_id_all);
+ 
+@@ -985,7 +998,7 @@ static perf_event__swap_op perf_event__swap_ops[] = {
+ 	[PERF_RECORD_STAT]		  = perf_event__stat_swap,
+ 	[PERF_RECORD_STAT_ROUND]	  = perf_event__stat_round_swap,
+ 	[PERF_RECORD_EVENT_UPDATE]	  = perf_event__event_update_swap,
+-	[PERF_RECORD_TIME_CONV]		  = perf_event__all64_swap,
++	[PERF_RECORD_TIME_CONV]		  = perf_event__time_conv_swap,
+ 	[PERF_RECORD_HEADER_MAX]	  = NULL,
+ };
+ 
+diff --git a/tools/perf/util/symbol_fprintf.c b/tools/perf/util/symbol_fprintf.c
+index 35c936ce33efa..2664fb65e47ad 100644
+--- a/tools/perf/util/symbol_fprintf.c
++++ b/tools/perf/util/symbol_fprintf.c
+@@ -68,7 +68,7 @@ size_t dso__fprintf_symbols_by_name(struct dso *dso,
+ 
+ 	for (nd = rb_first_cached(&dso->symbol_names); nd; nd = rb_next(nd)) {
+ 		pos = rb_entry(nd, struct symbol_name_rb_node, rb_node);
+-		fprintf(fp, "%s\n", pos->sym.name);
++		ret += fprintf(fp, "%s\n", pos->sym.name);
+ 	}
+ 
+ 	return ret;
+diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
+index 490c9a496fe28..0026970214748 100644
+--- a/tools/power/x86/turbostat/turbostat.c
++++ b/tools/power/x86/turbostat/turbostat.c
+@@ -4822,33 +4822,12 @@ double discover_bclk(unsigned int family, unsigned int model)
+  * below this value, including the Digital Thermal Sensor (DTS),
+  * Package Thermal Management Sensor (PTM), and thermal event thresholds.
+  */
+-int read_tcc_activation_temp()
++int set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p)
+ {
+ 	unsigned long long msr;
+-	unsigned int tcc, target_c, offset_c;
+-
+-	/* Temperature Target MSR is Nehalem and newer only */
+-	if (!do_nhm_platform_info)
+-		return 0;
+-
+-	if (get_msr(base_cpu, MSR_IA32_TEMPERATURE_TARGET, &msr))
+-		return 0;
+-
+-	target_c = (msr >> 16) & 0xFF;
+-
+-	offset_c = (msr >> 24) & 0xF;
+-
+-	tcc = target_c - offset_c;
+-
+-	if (!quiet)
+-		fprintf(outf, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C) (%d default - %d offset)\n",
+-			base_cpu, msr, tcc, target_c, offset_c);
+-
+-	return tcc;
+-}
++	unsigned int target_c_local;
++	int cpu;
+ 
+-int set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p)
+-{
+ 	/* tcc_activation_temp is used only for dts or ptm */
+ 	if (!(do_dts || do_ptm))
+ 		return 0;
+@@ -4857,18 +4836,43 @@ int set_temperature_target(struct thread_data *t, struct core_data *c, struct pk
+ 	if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
+ 		return 0;
+ 
++	cpu = t->cpu_id;
++	if (cpu_migrate(cpu)) {
++		fprintf(outf, "Could not migrate to CPU %d\n", cpu);
++		return -1;
++	}
++
+ 	if (tcc_activation_temp_override != 0) {
+ 		tcc_activation_temp = tcc_activation_temp_override;
+-		fprintf(outf, "Using cmdline TCC Target (%d C)\n", tcc_activation_temp);
++		fprintf(outf, "cpu%d: Using cmdline TCC Target (%d C)\n",
++			cpu, tcc_activation_temp);
+ 		return 0;
+ 	}
+ 
+-	tcc_activation_temp = read_tcc_activation_temp();
+-	if (tcc_activation_temp)
+-		return 0;
++	/* Temperature Target MSR is Nehalem and newer only */
++	if (!do_nhm_platform_info)
++		goto guess;
++
++	if (get_msr(base_cpu, MSR_IA32_TEMPERATURE_TARGET, &msr))
++		goto guess;
++
++	target_c_local = (msr >> 16) & 0xFF;
++
++	if (!quiet)
++		fprintf(outf, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C)\n",
++			cpu, msr, target_c_local);
++
++	if (!target_c_local)
++		goto guess;
++
++	tcc_activation_temp = target_c_local;
++
++	return 0;
+ 
++guess:
+ 	tcc_activation_temp = TJMAX_DEFAULT;
+-	fprintf(outf, "Guessing tjMax %d C, Please use -T to specify\n", tcc_activation_temp);
++	fprintf(outf, "cpu%d: Guessing tjMax %d C, Please use -T to specify\n",
++		cpu, tcc_activation_temp);
+ 
+ 	return 0;
+ }
+diff --git a/tools/spi/Makefile b/tools/spi/Makefile
+index ada881afb489a..0aa6dbd31fb8d 100644
+--- a/tools/spi/Makefile
++++ b/tools/spi/Makefile
+@@ -25,11 +25,12 @@ include $(srctree)/tools/build/Makefile.include
+ #
+ # We need the following to be outside of kernel tree
+ #
+-$(OUTPUT)include/linux/spi/spidev.h: ../../include/uapi/linux/spi/spidev.h
++$(OUTPUT)include/linux/spi: ../../include/uapi/linux/spi
+ 	mkdir -p $(OUTPUT)include/linux/spi 2>&1 || true
+ 	ln -sf $(CURDIR)/../../include/uapi/linux/spi/spidev.h $@
++	ln -sf $(CURDIR)/../../include/uapi/linux/spi/spi.h $@
+ 
+-prepare: $(OUTPUT)include/linux/spi/spidev.h
++prepare: $(OUTPUT)include/linux/spi
+ 
+ #
+ # spidev_test
+diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
+index 044bfdcf5b74f..76a325862119b 100644
+--- a/tools/testing/selftests/bpf/Makefile
++++ b/tools/testing/selftests/bpf/Makefile
+@@ -221,7 +221,7 @@ $(HOST_BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile)                \
+ 		    DESTDIR=$(HOST_SCRATCH_DIR)/ prefix= all install_headers
+ endif
+ 
+-$(INCLUDE_DIR)/vmlinux.h: $(VMLINUX_BTF) | $(BPFTOOL) $(INCLUDE_DIR)
++$(INCLUDE_DIR)/vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL) | $(INCLUDE_DIR)
+ ifeq ($(VMLINUX_H),)
+ 	$(call msg,GEN,,$@)
+ 	$(Q)$(BPFTOOL) btf dump file $(VMLINUX_BTF) format c > $@
+@@ -346,7 +346,8 @@ $(TRUNNER_BPF_OBJS): $(TRUNNER_OUTPUT)/%.o:				\
+ 
+ $(TRUNNER_BPF_SKELS): $(TRUNNER_OUTPUT)/%.skel.h:			\
+ 		      $(TRUNNER_OUTPUT)/%.o				\
+-		      | $(BPFTOOL) $(TRUNNER_OUTPUT)
++		      $(BPFTOOL)					\
++		      | $(TRUNNER_OUTPUT)
+ 	$$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@)
+ 	$(Q)$$(BPFTOOL) gen skeleton $$< > $$@
+ endif
+diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
+index 06eb956ff7bbd..4b517d76257d1 100644
+--- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c
++++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
+@@ -210,11 +210,6 @@ static int duration = 0;
+ 	.bpf_obj_file = "test_core_reloc_existence.o",			\
+ 	.btf_src_file = "btf__core_reloc_" #name ".o"			\
+ 
+-#define FIELD_EXISTS_ERR_CASE(name) {					\
+-	FIELD_EXISTS_CASE_COMMON(name),					\
+-	.fails = true,							\
+-}
+-
+ #define BITFIELDS_CASE_COMMON(objfile, test_name_prefix,  name)		\
+ 	.case_name = test_name_prefix#name,				\
+ 	.bpf_obj_file = objfile,					\
+@@ -222,7 +217,7 @@ static int duration = 0;
+ 
+ #define BITFIELDS_CASE(name, ...) {					\
+ 	BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_probed.o",	\
+-			      "direct:", name),				\
++			      "probed:", name),				\
+ 	.input = STRUCT_TO_CHAR_PTR(core_reloc_##name) __VA_ARGS__,	\
+ 	.input_len = sizeof(struct core_reloc_##name),			\
+ 	.output = STRUCT_TO_CHAR_PTR(core_reloc_bitfields_output)	\
+@@ -230,7 +225,7 @@ static int duration = 0;
+ 	.output_len = sizeof(struct core_reloc_bitfields_output),	\
+ }, {									\
+ 	BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_direct.o",	\
+-			      "probed:", name),				\
++			      "direct:", name),				\
+ 	.input = STRUCT_TO_CHAR_PTR(core_reloc_##name) __VA_ARGS__,	\
+ 	.input_len = sizeof(struct core_reloc_##name),			\
+ 	.output = STRUCT_TO_CHAR_PTR(core_reloc_bitfields_output)	\
+@@ -550,8 +545,7 @@ static struct core_reloc_test_case test_cases[] = {
+ 	ARRAYS_ERR_CASE(arrays___err_too_small),
+ 	ARRAYS_ERR_CASE(arrays___err_too_shallow),
+ 	ARRAYS_ERR_CASE(arrays___err_non_array),
+-	ARRAYS_ERR_CASE(arrays___err_wrong_val_type1),
+-	ARRAYS_ERR_CASE(arrays___err_wrong_val_type2),
++	ARRAYS_ERR_CASE(arrays___err_wrong_val_type),
+ 	ARRAYS_ERR_CASE(arrays___err_bad_zero_sz_arr),
+ 
+ 	/* enum/ptr/int handling scenarios */
+@@ -642,13 +636,25 @@ static struct core_reloc_test_case test_cases[] = {
+ 		},
+ 		.output_len = sizeof(struct core_reloc_existence_output),
+ 	},
+-
+-	FIELD_EXISTS_ERR_CASE(existence__err_int_sz),
+-	FIELD_EXISTS_ERR_CASE(existence__err_int_type),
+-	FIELD_EXISTS_ERR_CASE(existence__err_int_kind),
+-	FIELD_EXISTS_ERR_CASE(existence__err_arr_kind),
+-	FIELD_EXISTS_ERR_CASE(existence__err_arr_value_type),
+-	FIELD_EXISTS_ERR_CASE(existence__err_struct_type),
++	{
++		FIELD_EXISTS_CASE_COMMON(existence___wrong_field_defs),
++		.input = STRUCT_TO_CHAR_PTR(core_reloc_existence___wrong_field_defs) {
++		},
++		.input_len = sizeof(struct core_reloc_existence___wrong_field_defs),
++		.output = STRUCT_TO_CHAR_PTR(core_reloc_existence_output) {
++			.a_exists = 0,
++			.b_exists = 0,
++			.c_exists = 0,
++			.arr_exists = 0,
++			.s_exists = 0,
++			.a_value = 0xff000001u,
++			.b_value = 0xff000002u,
++			.c_value = 0xff000003u,
++			.arr_value = 0xff000004u,
++			.s_value = 0xff000005u,
++		},
++		.output_len = sizeof(struct core_reloc_existence_output),
++	},
+ 
+ 	/* bitfield relocation checks */
+ 	BITFIELDS_CASE(bitfields, {
+@@ -857,13 +863,20 @@ void test_core_reloc(void)
+ 			  "prog '%s' not found\n", probe_name))
+ 			goto cleanup;
+ 
++
++		if (test_case->btf_src_file) {
++			err = access(test_case->btf_src_file, R_OK);
++			if (!ASSERT_OK(err, "btf_src_file"))
++				goto cleanup;
++		}
++
+ 		load_attr.obj = obj;
+ 		load_attr.log_level = 0;
+ 		load_attr.target_btf_path = test_case->btf_src_file;
+ 		err = bpf_object__load_xattr(&load_attr);
+ 		if (err) {
+ 			if (!test_case->fails)
+-				CHECK(false, "obj_load", "failed to load prog '%s': %d\n", probe_name, err);
++				ASSERT_OK(err, "obj_load");
+ 			goto cleanup;
+ 		}
+ 
+@@ -902,10 +915,8 @@ void test_core_reloc(void)
+ 			goto cleanup;
+ 		}
+ 
+-		if (test_case->fails) {
+-			CHECK(false, "obj_load_fail", "should fail to load prog '%s'\n", probe_name);
++		if (!ASSERT_FALSE(test_case->fails, "obj_load_should_fail"))
+ 			goto cleanup;
+-		}
+ 
+ 		equal = memcmp(data->out, test_case->output,
+ 			       test_case->output_len) == 0;
+diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_kind.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_kind.c
+deleted file mode 100644
+index dd0ffa518f366..0000000000000
+--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_kind.c
++++ /dev/null
+@@ -1,3 +0,0 @@
+-#include "core_reloc_types.h"
+-
+-void f(struct core_reloc_existence___err_wrong_arr_kind x) {}
+diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_value_type.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_value_type.c
+deleted file mode 100644
+index bc83372088ad0..0000000000000
+--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_value_type.c
++++ /dev/null
+@@ -1,3 +0,0 @@
+-#include "core_reloc_types.h"
+-
+-void f(struct core_reloc_existence___err_wrong_arr_value_type x) {}
+diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_kind.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_kind.c
+deleted file mode 100644
+index 917bec41be081..0000000000000
+--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_kind.c
++++ /dev/null
+@@ -1,3 +0,0 @@
+-#include "core_reloc_types.h"
+-
+-void f(struct core_reloc_existence___err_wrong_int_kind x) {}
+diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_sz.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_sz.c
+deleted file mode 100644
+index 6ec7e6ec1c915..0000000000000
+--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_sz.c
++++ /dev/null
+@@ -1,3 +0,0 @@
+-#include "core_reloc_types.h"
+-
+-void f(struct core_reloc_existence___err_wrong_int_sz x) {}
+diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_type.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_type.c
+deleted file mode 100644
+index 7bbcacf2b0d17..0000000000000
+--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_type.c
++++ /dev/null
+@@ -1,3 +0,0 @@
+-#include "core_reloc_types.h"
+-
+-void f(struct core_reloc_existence___err_wrong_int_type x) {}
+diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_struct_type.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_struct_type.c
+deleted file mode 100644
+index f384dd38ec709..0000000000000
+--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_struct_type.c
++++ /dev/null
+@@ -1,3 +0,0 @@
+-#include "core_reloc_types.h"
+-
+-void f(struct core_reloc_existence___err_wrong_struct_type x) {}
+diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___wrong_field_defs.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___wrong_field_defs.c
+new file mode 100644
+index 0000000000000..d14b496190c3d
+--- /dev/null
++++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___wrong_field_defs.c
+@@ -0,0 +1,3 @@
++#include "core_reloc_types.h"
++
++void f(struct core_reloc_existence___wrong_field_defs x) {}
+diff --git a/tools/testing/selftests/bpf/progs/core_reloc_types.h b/tools/testing/selftests/bpf/progs/core_reloc_types.h
+index 9a28508501213..664eea1013aab 100644
+--- a/tools/testing/selftests/bpf/progs/core_reloc_types.h
++++ b/tools/testing/selftests/bpf/progs/core_reloc_types.h
+@@ -700,27 +700,11 @@ struct core_reloc_existence___minimal {
+ 	int a;
+ };
+ 
+-struct core_reloc_existence___err_wrong_int_sz {
+-	short a;
+-};
+-
+-struct core_reloc_existence___err_wrong_int_type {
++struct core_reloc_existence___wrong_field_defs {
++	void *a;
+ 	int b[1];
+-};
+-
+-struct core_reloc_existence___err_wrong_int_kind {
+ 	struct{ int x; } c;
+-};
+-
+-struct core_reloc_existence___err_wrong_arr_kind {
+ 	int arr;
+-};
+-
+-struct core_reloc_existence___err_wrong_arr_value_type {
+-	short arr[1];
+-};
+-
+-struct core_reloc_existence___err_wrong_struct_type {
+ 	int s;
+ };
+ 
+diff --git a/tools/testing/selftests/bpf/verifier/array_access.c b/tools/testing/selftests/bpf/verifier/array_access.c
+index 1b138cd2b187d..1b1c798e92489 100644
+--- a/tools/testing/selftests/bpf/verifier/array_access.c
++++ b/tools/testing/selftests/bpf/verifier/array_access.c
+@@ -186,7 +186,7 @@
+ 	},
+ 	.fixup_map_hash_48b = { 3 },
+ 	.errstr_unpriv = "R0 leaks addr",
+-	.errstr = "invalid access to map value, value_size=48 off=44 size=8",
++	.errstr = "R0 unbounded memory access",
+ 	.result_unpriv = REJECT,
+ 	.result = REJECT,
+ 	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+diff --git a/tools/testing/selftests/drivers/net/mlxsw/port_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/port_scale.sh
+index f813ffefc07ec..65f43a7ce9c93 100644
+--- a/tools/testing/selftests/drivers/net/mlxsw/port_scale.sh
++++ b/tools/testing/selftests/drivers/net/mlxsw/port_scale.sh
+@@ -55,10 +55,6 @@ port_test()
+ 	      | jq '.[][][] | select(.name=="physical_ports") |.["occ"]')
+ 
+ 	[[ $occ -eq $max_ports ]]
+-	if [[ $should_fail -eq 0 ]]; then
+-		check_err $? "Mismatch ports number: Expected $max_ports, got $occ."
+-	else
+-		check_err_fail $should_fail $? "Reached more ports than expected"
+-	fi
++	check_err_fail $should_fail $? "Attempt to create $max_ports ports (actual result $occ)"
+ 
+ }
+diff --git a/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh
+index cc0f07e72cf22..aa74be9f47c85 100644
+--- a/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh
++++ b/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh
+@@ -98,11 +98,7 @@ __tc_flower_test()
+ 			jq -r '[ .[] | select(.kind == "flower") |
+ 			.options | .in_hw ]' | jq .[] | wc -l)
+ 	[[ $((offload_count - 1)) -eq $count ]]
+-	if [[ $should_fail -eq 0 ]]; then
+-		check_err $? "Offload mismatch"
+-	else
+-		check_err_fail $should_fail $? "Offload more than expacted"
+-	fi
++	check_err_fail $should_fail $? "Attempt to offload $count rules (actual result $((offload_count - 1)))"
+ }
+ 
+ tc_flower_test()
+diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
+index bb2752d78fe3a..81edbd23d371c 100644
+--- a/tools/testing/selftests/kvm/dirty_log_test.c
++++ b/tools/testing/selftests/kvm/dirty_log_test.c
+@@ -17,6 +17,7 @@
+ #include <linux/bitmap.h>
+ #include <linux/bitops.h>
+ #include <asm/barrier.h>
++#include <linux/atomic.h>
+ 
+ #include "kvm_util.h"
+ #include "test_util.h"
+@@ -137,12 +138,20 @@ static uint64_t host_clear_count;
+ static uint64_t host_track_next_count;
+ 
+ /* Whether dirty ring reset is requested, or finished */
+-static sem_t dirty_ring_vcpu_stop;
+-static sem_t dirty_ring_vcpu_cont;
++static sem_t sem_vcpu_stop;
++static sem_t sem_vcpu_cont;
++/*
++ * This is only set by main thread, and only cleared by vcpu thread.  It is
++ * used to request vcpu thread to stop at the next GUEST_SYNC, since GUEST_SYNC
++ * is the only place that we'll guarantee both "dirty bit" and "dirty data"
++ * will match.  E.g., SIG_IPI won't guarantee that if the vcpu is interrupted
++ * after setting dirty bit but before the data is written.
++ */
++static atomic_t vcpu_sync_stop_requested;
+ /*
+  * This is updated by the vcpu thread to tell the host whether it's a
+  * ring-full event.  It should only be read until a sem_wait() of
+- * dirty_ring_vcpu_stop and before vcpu continues to run.
++ * sem_vcpu_stop and before vcpu continues to run.
+  */
+ static bool dirty_ring_vcpu_ring_full;
+ /*
+@@ -234,6 +243,17 @@ static void clear_log_collect_dirty_pages(struct kvm_vm *vm, int slot,
+ 	kvm_vm_clear_dirty_log(vm, slot, bitmap, 0, num_pages);
+ }
+ 
++/* Should only be called after a GUEST_SYNC */
++static void vcpu_handle_sync_stop(void)
++{
++	if (atomic_read(&vcpu_sync_stop_requested)) {
++		/* It means main thread is sleeping waiting */
++		atomic_set(&vcpu_sync_stop_requested, false);
++		sem_post(&sem_vcpu_stop);
++		sem_wait_until(&sem_vcpu_cont);
++	}
++}
++
+ static void default_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
+ {
+ 	struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+@@ -244,6 +264,8 @@ static void default_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
+ 	TEST_ASSERT(get_ucall(vm, VCPU_ID, NULL) == UCALL_SYNC,
+ 		    "Invalid guest sync status: exit_reason=%s\n",
+ 		    exit_reason_str(run->exit_reason));
++
++	vcpu_handle_sync_stop();
+ }
+ 
+ static bool dirty_ring_supported(void)
+@@ -301,13 +323,13 @@ static void dirty_ring_wait_vcpu(void)
+ {
+ 	/* This makes sure that hardware PML cache flushed */
+ 	vcpu_kick();
+-	sem_wait_until(&dirty_ring_vcpu_stop);
++	sem_wait_until(&sem_vcpu_stop);
+ }
+ 
+ static void dirty_ring_continue_vcpu(void)
+ {
+ 	pr_info("Notifying vcpu to continue\n");
+-	sem_post(&dirty_ring_vcpu_cont);
++	sem_post(&sem_vcpu_cont);
+ }
+ 
+ static void dirty_ring_collect_dirty_pages(struct kvm_vm *vm, int slot,
+@@ -361,11 +383,11 @@ static void dirty_ring_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
+ 		/* Update the flag first before pause */
+ 		WRITE_ONCE(dirty_ring_vcpu_ring_full,
+ 			   run->exit_reason == KVM_EXIT_DIRTY_RING_FULL);
+-		sem_post(&dirty_ring_vcpu_stop);
++		sem_post(&sem_vcpu_stop);
+ 		pr_info("vcpu stops because %s...\n",
+ 			dirty_ring_vcpu_ring_full ?
+ 			"dirty ring is full" : "vcpu is kicked out");
+-		sem_wait_until(&dirty_ring_vcpu_cont);
++		sem_wait_until(&sem_vcpu_cont);
+ 		pr_info("vcpu continues now.\n");
+ 	} else {
+ 		TEST_ASSERT(false, "Invalid guest sync status: "
+@@ -377,7 +399,7 @@ static void dirty_ring_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
+ static void dirty_ring_before_vcpu_join(void)
+ {
+ 	/* Kick another round of vcpu just to make sure it will quit */
+-	sem_post(&dirty_ring_vcpu_cont);
++	sem_post(&sem_vcpu_cont);
+ }
+ 
+ struct log_mode {
+@@ -505,9 +527,8 @@ static void *vcpu_worker(void *data)
+ 	 */
+ 	sigmask->len = 8;
+ 	pthread_sigmask(0, NULL, sigset);
++	sigdelset(sigset, SIG_IPI);
+ 	vcpu_ioctl(vm, VCPU_ID, KVM_SET_SIGNAL_MASK, sigmask);
+-	sigaddset(sigset, SIG_IPI);
+-	pthread_sigmask(SIG_BLOCK, sigset, NULL);
+ 
+ 	sigemptyset(sigset);
+ 	sigaddset(sigset, SIG_IPI);
+@@ -768,7 +789,25 @@ static void run_test(enum vm_guest_mode mode, void *arg)
+ 		usleep(p->interval * 1000);
+ 		log_mode_collect_dirty_pages(vm, TEST_MEM_SLOT_INDEX,
+ 					     bmap, host_num_pages);
++
++		/*
++		 * See vcpu_sync_stop_requested definition for details on why
++		 * we need to stop vcpu when verify data.
++		 */
++		atomic_set(&vcpu_sync_stop_requested, true);
++		sem_wait_until(&sem_vcpu_stop);
++		/*
++		 * NOTE: for dirty ring, it's possible that we didn't stop at
++		 * GUEST_SYNC but instead we stopped because ring is full;
++		 * that's okay too because ring full means we're only missing
++		 * the flush of the last page, and since we handle the last
++		 * page specially verification will succeed anyway.
++		 */
++		assert(host_log_mode == LOG_MODE_DIRTY_RING ||
++		       atomic_read(&vcpu_sync_stop_requested) == false);
+ 		vm_dirty_log_verify(mode, bmap);
++		sem_post(&sem_vcpu_cont);
++
+ 		iteration++;
+ 		sync_global_to_guest(vm, iteration);
+ 	}
+@@ -818,9 +857,10 @@ int main(int argc, char *argv[])
+ 		.interval = TEST_HOST_LOOP_INTERVAL,
+ 	};
+ 	int opt, i;
++	sigset_t sigset;
+ 
+-	sem_init(&dirty_ring_vcpu_stop, 0, 0);
+-	sem_init(&dirty_ring_vcpu_cont, 0, 0);
++	sem_init(&sem_vcpu_stop, 0, 0);
++	sem_init(&sem_vcpu_cont, 0, 0);
+ 
+ 	guest_modes_append_default();
+ 
+@@ -876,6 +916,11 @@ int main(int argc, char *argv[])
+ 
+ 	srandom(time(0));
+ 
++	/* Ensure that vCPU threads start with SIG_IPI blocked.  */
++	sigemptyset(&sigset);
++	sigaddset(&sigset, SIG_IPI);
++	pthread_sigmask(SIG_BLOCK, &sigset, NULL);
++
+ 	if (host_log_mode_option == LOG_MODE_ALL) {
+ 		/* Run each log mode */
+ 		for (i = 0; i < LOG_MODE_NUM; i++) {
+diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
+index a5ce26d548e4f..be17462fe1467 100644
+--- a/tools/testing/selftests/lib.mk
++++ b/tools/testing/selftests/lib.mk
+@@ -74,7 +74,8 @@ ifdef building_out_of_srctree
+ 		rsync -aq $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(OUTPUT); \
+ 	fi
+ 	@if [ "X$(TEST_PROGS)" != "X" ]; then \
+-		$(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(OUTPUT)/$(TEST_PROGS)) ; \
++		$(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) \
++				  $(addprefix $(OUTPUT)/,$(TEST_PROGS))) ; \
+ 	else \
+ 		$(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS)); \
+ 	fi
+diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
+index c02291e9841e3..880e3ab9d088d 100755
+--- a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
++++ b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
+@@ -271,7 +271,7 @@ test_span_gre_fdb_roaming()
+ 
+ 	while ((RET == 0)); do
+ 		bridge fdb del dev $swp3 $h3mac vlan 555 master 2>/dev/null
+-		bridge fdb add dev $swp2 $h3mac vlan 555 master
++		bridge fdb add dev $swp2 $h3mac vlan 555 master static
+ 		sleep 1
+ 		fail_test_span_gre_dir $tundev ingress
+ 
+diff --git a/tools/testing/selftests/x86/thunks_32.S b/tools/testing/selftests/x86/thunks_32.S
+index a71d92da8f466..f3f56e681e9fb 100644
+--- a/tools/testing/selftests/x86/thunks_32.S
++++ b/tools/testing/selftests/x86/thunks_32.S
+@@ -45,3 +45,5 @@ call64_from_32:
+ 	ret
+ 
+ .size call64_from_32, .-call64_from_32
++
++.section .note.GNU-stack,"",%progbits
+diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
+index 62bd908ecd580..f08f5e82460b1 100644
+--- a/virt/kvm/coalesced_mmio.c
++++ b/virt/kvm/coalesced_mmio.c
+@@ -174,21 +174,36 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
+ 					   struct kvm_coalesced_mmio_zone *zone)
+ {
+ 	struct kvm_coalesced_mmio_dev *dev, *tmp;
++	int r;
+ 
+ 	if (zone->pio != 1 && zone->pio != 0)
+ 		return -EINVAL;
+ 
+ 	mutex_lock(&kvm->slots_lock);
+ 
+-	list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list)
++	list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list) {
+ 		if (zone->pio == dev->zone.pio &&
+ 		    coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
+-			kvm_io_bus_unregister_dev(kvm,
++			r = kvm_io_bus_unregister_dev(kvm,
+ 				zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev);
+ 			kvm_iodevice_destructor(&dev->dev);
++
++			/*
++			 * On failure, unregister destroys all devices on the
++			 * bus _except_ the target device, i.e. coalesced_zones
++			 * has been modified.  No need to restart the walk as
++			 * there aren't any zones left.
++			 */
++			if (r)
++				break;
+ 		}
++	}
+ 
+ 	mutex_unlock(&kvm->slots_lock);
+ 
++	/*
++	 * Ignore the result of kvm_io_bus_unregister_dev(), from userspace's
++	 * perspective, the coalesced MMIO is most definitely unregistered.
++	 */
+ 	return 0;
+ }
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 383df23514b93..ab1fa6f92c825 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -4486,15 +4486,15 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
+ }
+ 
+ /* Caller must hold slots_lock. */
+-void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+-			       struct kvm_io_device *dev)
++int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
++			      struct kvm_io_device *dev)
+ {
+ 	int i, j;
+ 	struct kvm_io_bus *new_bus, *bus;
+ 
+ 	bus = kvm_get_bus(kvm, bus_idx);
+ 	if (!bus)
+-		return;
++		return 0;
+ 
+ 	for (i = 0; i < bus->dev_count; i++)
+ 		if (bus->range[i].dev == dev) {
+@@ -4502,7 +4502,7 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+ 		}
+ 
+ 	if (i == bus->dev_count)
+-		return;
++		return 0;
+ 
+ 	new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1),
+ 			  GFP_KERNEL_ACCOUNT);
+@@ -4511,7 +4511,13 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+ 		new_bus->dev_count--;
+ 		memcpy(new_bus->range + i, bus->range + i + 1,
+ 				flex_array_size(new_bus, range, new_bus->dev_count - i));
+-	} else {
++	}
++
++	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
++	synchronize_srcu_expedited(&kvm->srcu);
++
++	/* Destroy the old bus _after_ installing the (null) bus. */
++	if (!new_bus) {
+ 		pr_err("kvm: failed to shrink bus, removing it completely\n");
+ 		for (j = 0; j < bus->dev_count; j++) {
+ 			if (j == i)
+@@ -4520,10 +4526,8 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+ 		}
+ 	}
+ 
+-	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
+-	synchronize_srcu_expedited(&kvm->srcu);
+ 	kfree(bus);
+-	return;
++	return new_bus ? 0 : -ENOMEM;
+ }
+ 
+ struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,


^ permalink raw reply related	[flat|nested] 33+ messages in thread
* [gentoo-commits] proj/linux-patches:5.12 commit in: /
@ 2021-05-12 12:30 Mike Pagano
  0 siblings, 0 replies; 33+ messages in thread
From: Mike Pagano @ 2021-05-12 12:30 UTC (permalink / raw
  To: gentoo-commits

commit:     c9d5ee0d0a4467cf833043032dceaf8d526cef79
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed May 12 12:30:11 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed May 12 12:30:11 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=c9d5ee0d

Linux patch 5.12.3

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |     4 +
 1002_linux-5.12.3.patch | 16867 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 16871 insertions(+)

diff --git a/0000_README b/0000_README
index d1c77ca..00f2c30 100644
--- a/0000_README
+++ b/0000_README
@@ -51,6 +51,10 @@ Patch:  1001_linux-5.12.2.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.12.2
 
+Patch:  1002_linux-5.12.3.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.12.3
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1002_linux-5.12.3.patch b/1002_linux-5.12.3.patch
new file mode 100644
index 0000000..4a0a039
--- /dev/null
+++ b/1002_linux-5.12.3.patch
@@ -0,0 +1,16867 @@
+diff --git a/Makefile b/Makefile
+index 5077411a1874b..53a4b1cb7bb06 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 12
+-SUBLEVEL = 2
++SUBLEVEL = 3
+ EXTRAVERSION =
+ NAME = Frozen Wasteland
+ 
+@@ -775,16 +775,16 @@ KBUILD_CFLAGS += -Wno-gnu
+ KBUILD_CFLAGS += -mno-global-merge
+ else
+ 
+-# These warnings generated too much noise in a regular build.
+-# Use make W=1 to enable them (see scripts/Makefile.extrawarn)
+-KBUILD_CFLAGS += -Wno-unused-but-set-variable
+-
+ # Warn about unmarked fall-throughs in switch statement.
+ # Disabled for clang while comment to attribute conversion happens and
+ # https://github.com/ClangBuiltLinux/linux/issues/636 is discussed.
+ KBUILD_CFLAGS += $(call cc-option,-Wimplicit-fallthrough,)
+ endif
+ 
++# These warnings generated too much noise in a regular build.
++# Use make W=1 to enable them (see scripts/Makefile.extrawarn)
++KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
++
+ KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
+ ifdef CONFIG_FRAME_POINTER
+ KBUILD_CFLAGS	+= -fno-omit-frame-pointer -fno-optimize-sibling-calls
+diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
+index fd94e27ba4fa3..c1f8047686219 100644
+--- a/arch/arm/boot/compressed/Makefile
++++ b/arch/arm/boot/compressed/Makefile
+@@ -118,8 +118,8 @@ asflags-y := -DZIMAGE
+ 
+ # Supply kernel BSS size to the decompressor via a linker symbol.
+ KBSS_SZ = $(shell echo $$(($$($(NM) $(obj)/../../../../vmlinux | \
+-		sed -n -e 's/^\([^ ]*\) [AB] __bss_start$$/-0x\1/p' \
+-		       -e 's/^\([^ ]*\) [AB] __bss_stop$$/+0x\1/p') )) )
++		sed -n -e 's/^\([^ ]*\) [ABD] __bss_start$$/-0x\1/p' \
++		       -e 's/^\([^ ]*\) [ABD] __bss_stop$$/+0x\1/p') )) )
+ LDFLAGS_vmlinux = --defsym _kernel_bss_size=$(KBSS_SZ)
+ # Supply ZRELADDR to the decompressor via a linker symbol.
+ ifneq ($(CONFIG_AUTO_ZRELADDR),y)
+diff --git a/arch/arm/boot/dts/at91-sam9x60ek.dts b/arch/arm/boot/dts/at91-sam9x60ek.dts
+index 775ceb3acb6c0..edca66c232c15 100644
+--- a/arch/arm/boot/dts/at91-sam9x60ek.dts
++++ b/arch/arm/boot/dts/at91-sam9x60ek.dts
+@@ -8,6 +8,7 @@
+  */
+ /dts-v1/;
+ #include "sam9x60.dtsi"
++#include <dt-bindings/input/input.h>
+ 
+ / {
+ 	model = "Microchip SAM9X60-EK";
+@@ -84,7 +85,7 @@
+ 		sw1 {
+ 			label = "SW1";
+ 			gpios = <&pioD 18 GPIO_ACTIVE_LOW>;
+-			linux,code=<0x104>;
++			linux,code=<KEY_PROG1>;
+ 			wakeup-source;
+ 		};
+ 	};
+diff --git a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
+index 84e1180f3e89d..a9e6fee55a2a8 100644
+--- a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
++++ b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
+@@ -11,6 +11,7 @@
+ #include "at91-sama5d27_som1.dtsi"
+ #include <dt-bindings/mfd/atmel-flexcom.h>
+ #include <dt-bindings/gpio/gpio.h>
++#include <dt-bindings/input/input.h>
+ 
+ / {
+ 	model = "Atmel SAMA5D27 SOM1 EK";
+@@ -466,7 +467,7 @@
+ 		pb4 {
+ 			label = "USER";
+ 			gpios = <&pioA PIN_PA29 GPIO_ACTIVE_LOW>;
+-			linux,code = <0x104>;
++			linux,code = <KEY_PROG1>;
+ 			wakeup-source;
+ 		};
+ 	};
+diff --git a/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts b/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
+index 180a08765cb85..ff83967fd0082 100644
+--- a/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
++++ b/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
+@@ -8,6 +8,7 @@
+  */
+ /dts-v1/;
+ #include "at91-sama5d27_wlsom1.dtsi"
++#include <dt-bindings/input/input.h>
+ 
+ / {
+ 	model = "Microchip SAMA5D27 WLSOM1 EK";
+@@ -35,7 +36,7 @@
+ 		sw4 {
+ 			label = "USER BUTTON";
+ 			gpios = <&pioA PIN_PB2 GPIO_ACTIVE_LOW>;
+-			linux,code = <0x104>;
++			linux,code = <KEY_PROG1>;
+ 			wakeup-source;
+ 		};
+ 	};
+diff --git a/arch/arm/boot/dts/at91-sama5d2_icp.dts b/arch/arm/boot/dts/at91-sama5d2_icp.dts
+index 46722a163184e..bd64721fa23ca 100644
+--- a/arch/arm/boot/dts/at91-sama5d2_icp.dts
++++ b/arch/arm/boot/dts/at91-sama5d2_icp.dts
+@@ -12,6 +12,7 @@
+ #include "sama5d2.dtsi"
+ #include "sama5d2-pinfunc.h"
+ #include <dt-bindings/gpio/gpio.h>
++#include <dt-bindings/input/input.h>
+ #include <dt-bindings/mfd/atmel-flexcom.h>
+ 
+ / {
+@@ -51,7 +52,7 @@
+ 		sw4 {
+ 			label = "USER_PB1";
+ 			gpios = <&pioA PIN_PD0 GPIO_ACTIVE_LOW>;
+-			linux,code = <0x104>;
++			linux,code = <KEY_PROG1>;
+ 			wakeup-source;
+ 		};
+ 	};
+diff --git a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
+index 8de57d164acd3..dfd150eb0fd86 100644
+--- a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
++++ b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
+@@ -11,6 +11,7 @@
+ #include "sama5d2-pinfunc.h"
+ #include <dt-bindings/mfd/atmel-flexcom.h>
+ #include <dt-bindings/gpio/gpio.h>
++#include <dt-bindings/input/input.h>
+ #include <dt-bindings/pinctrl/at91.h>
+ 
+ / {
+@@ -402,7 +403,7 @@
+ 		bp1 {
+ 			label = "PB_USER";
+ 			gpios = <&pioA PIN_PA10 GPIO_ACTIVE_LOW>;
+-			linux,code = <0x104>;
++			linux,code = <KEY_PROG1>;
+ 			wakeup-source;
+ 		};
+ 	};
+diff --git a/arch/arm/boot/dts/at91-sama5d2_xplained.dts b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
+index 4e7cf21f124c0..509c732a0d8b4 100644
+--- a/arch/arm/boot/dts/at91-sama5d2_xplained.dts
++++ b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
+@@ -10,6 +10,7 @@
+ #include "sama5d2-pinfunc.h"
+ #include <dt-bindings/mfd/atmel-flexcom.h>
+ #include <dt-bindings/gpio/gpio.h>
++#include <dt-bindings/input/input.h>
+ #include <dt-bindings/regulator/active-semi,8945a-regulator.h>
+ 
+ / {
+@@ -712,7 +713,7 @@
+ 		bp1 {
+ 			label = "PB_USER";
+ 			gpios = <&pioA PIN_PB9 GPIO_ACTIVE_LOW>;
+-			linux,code = <0x104>;
++			linux,code = <KEY_PROG1>;
+ 			wakeup-source;
+ 		};
+ 	};
+diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
+index 5179258f92470..9c55a921263bd 100644
+--- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts
++++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
+@@ -7,6 +7,7 @@
+  */
+ /dts-v1/;
+ #include "sama5d36.dtsi"
++#include <dt-bindings/input/input.h>
+ 
+ / {
+ 	model = "SAMA5D3 Xplained";
+@@ -354,7 +355,7 @@
+ 		bp3 {
+ 			label = "PB_USER";
+ 			gpios = <&pioE 29 GPIO_ACTIVE_LOW>;
+-			linux,code = <0x104>;
++			linux,code = <KEY_PROG1>;
+ 			wakeup-source;
+ 		};
+ 	};
+diff --git a/arch/arm/boot/dts/at91sam9260ek.dts b/arch/arm/boot/dts/at91sam9260ek.dts
+index d3446e42b5983..ce96345d28a39 100644
+--- a/arch/arm/boot/dts/at91sam9260ek.dts
++++ b/arch/arm/boot/dts/at91sam9260ek.dts
+@@ -7,6 +7,7 @@
+  */
+ /dts-v1/;
+ #include "at91sam9260.dtsi"
++#include <dt-bindings/input/input.h>
+ 
+ / {
+ 	model = "Atmel at91sam9260ek";
+@@ -156,7 +157,7 @@
+ 		btn4 {
+ 			label = "Button 4";
+ 			gpios = <&pioA 31 GPIO_ACTIVE_LOW>;
+-			linux,code = <0x104>;
++			linux,code = <KEY_PROG1>;
+ 			wakeup-source;
+ 		};
+ 	};
+diff --git a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
+index 6e6e672c0b86d..87bb39060e8be 100644
+--- a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
++++ b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
+@@ -5,6 +5,7 @@
+  * Copyright (C) 2012 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
+  */
+ #include "at91sam9g20.dtsi"
++#include <dt-bindings/input/input.h>
+ 
+ / {
+ 
+@@ -234,7 +235,7 @@
+ 		btn4 {
+ 			label = "Button 4";
+ 			gpios = <&pioA 31 GPIO_ACTIVE_LOW>;
+-			linux,code = <0x104>;
++			linux,code = <KEY_PROG1>;
+ 			wakeup-source;
+ 		};
+ 	};
+diff --git a/arch/arm/boot/dts/bcm4708-asus-rt-ac56u.dts b/arch/arm/boot/dts/bcm4708-asus-rt-ac56u.dts
+index 6a96655d86260..8ed403767540e 100644
+--- a/arch/arm/boot/dts/bcm4708-asus-rt-ac56u.dts
++++ b/arch/arm/boot/dts/bcm4708-asus-rt-ac56u.dts
+@@ -21,8 +21,8 @@
+ 
+ 	memory@0 {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x08000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x08000000>;
+ 	};
+ 
+ 	leds {
+diff --git a/arch/arm/boot/dts/bcm4708-asus-rt-ac68u.dts b/arch/arm/boot/dts/bcm4708-asus-rt-ac68u.dts
+index 3b0029e61b4c6..667b118ba4ee1 100644
+--- a/arch/arm/boot/dts/bcm4708-asus-rt-ac68u.dts
++++ b/arch/arm/boot/dts/bcm4708-asus-rt-ac68u.dts
+@@ -21,8 +21,8 @@
+ 
+ 	memory@0 {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x08000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x08000000>;
+ 	};
+ 
+ 	leds {
+diff --git a/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts b/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts
+index 90f57bad6b243..ff31ce45831a7 100644
+--- a/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts
++++ b/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts
+@@ -21,8 +21,8 @@
+ 
+ 	memory@0 {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x18000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x18000000>;
+ 	};
+ 
+ 	spi {
+diff --git a/arch/arm/boot/dts/bcm4708-netgear-r6250.dts b/arch/arm/boot/dts/bcm4708-netgear-r6250.dts
+index fed75e6ab58ca..61c7b137607e5 100644
+--- a/arch/arm/boot/dts/bcm4708-netgear-r6250.dts
++++ b/arch/arm/boot/dts/bcm4708-netgear-r6250.dts
+@@ -22,8 +22,8 @@
+ 
+ 	memory {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x08000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x08000000>;
+ 	};
+ 
+ 	leds {
+diff --git a/arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts b/arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts
+index 79542e18915c5..4c60eda296d97 100644
+--- a/arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts
++++ b/arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts
+@@ -21,8 +21,8 @@
+ 
+ 	memory@0 {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x08000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x08000000>;
+ 	};
+ 
+ 	leds {
+diff --git a/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts b/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts
+index 51c64f0b25603..9ca6d1b2590d4 100644
+--- a/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts
++++ b/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts
+@@ -21,8 +21,8 @@
+ 
+ 	memory@0 {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x08000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x08000000>;
+ 	};
+ 
+ 	leds {
+diff --git a/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts b/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts
+index c29950b43a953..0e273c598732f 100644
+--- a/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts
++++ b/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts
+@@ -21,8 +21,8 @@
+ 
+ 	memory@0 {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x08000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x08000000>;
+ 	};
+ 
+ 	leds {
+diff --git a/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts b/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts
+index 2f2d2b0a6893c..d857751ec5076 100644
+--- a/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts
++++ b/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts
+@@ -21,8 +21,8 @@
+ 
+ 	memory@0 {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x08000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x08000000>;
+ 	};
+ 
+ 	spi {
+diff --git a/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts b/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts
+index 0e349e39f6081..8b1a05a0f1a11 100644
+--- a/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts
++++ b/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts
+@@ -21,8 +21,8 @@
+ 
+ 	memory@0 {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x08000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x08000000>;
+ 	};
+ 
+ 	spi {
+diff --git a/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts b/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts
+index 8f1e565c3db45..6c6bb7b17d27a 100644
+--- a/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts
++++ b/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts
+@@ -21,8 +21,8 @@
+ 
+ 	memory {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x08000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x08000000>;
+ 	};
+ 
+ 	leds {
+diff --git a/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts b/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts
+index ce888b1835d1f..d29e7f80ea6aa 100644
+--- a/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts
++++ b/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts
+@@ -21,8 +21,8 @@
+ 
+ 	memory {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x18000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x18000000>;
+ 	};
+ 
+ 	leds {
+diff --git a/arch/arm/boot/dts/bcm4709-linksys-ea9200.dts b/arch/arm/boot/dts/bcm4709-linksys-ea9200.dts
+index ed8619b54d692..38fbefdf2e4e4 100644
+--- a/arch/arm/boot/dts/bcm4709-linksys-ea9200.dts
++++ b/arch/arm/boot/dts/bcm4709-linksys-ea9200.dts
+@@ -18,8 +18,8 @@
+ 
+ 	memory {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x08000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x08000000>;
+ 	};
+ 
+ 	gpio-keys {
+diff --git a/arch/arm/boot/dts/bcm4709-netgear-r7000.dts b/arch/arm/boot/dts/bcm4709-netgear-r7000.dts
+index 1f87993eae1d1..7989a53597d4f 100644
+--- a/arch/arm/boot/dts/bcm4709-netgear-r7000.dts
++++ b/arch/arm/boot/dts/bcm4709-netgear-r7000.dts
+@@ -21,8 +21,8 @@
+ 
+ 	memory {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x08000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x08000000>;
+ 	};
+ 
+ 	leds {
+diff --git a/arch/arm/boot/dts/bcm4709-netgear-r8000.dts b/arch/arm/boot/dts/bcm4709-netgear-r8000.dts
+index 6c6199a53d091..87b655be674c5 100644
+--- a/arch/arm/boot/dts/bcm4709-netgear-r8000.dts
++++ b/arch/arm/boot/dts/bcm4709-netgear-r8000.dts
+@@ -32,8 +32,8 @@
+ 
+ 	memory {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x08000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x08000000>;
+ 	};
+ 
+ 	leds {
+diff --git a/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts b/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts
+index 911c65fbf2510..e635a15041dd8 100644
+--- a/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts
++++ b/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts
+@@ -21,8 +21,8 @@
+ 
+ 	memory@0 {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x08000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x08000000>;
+ 	};
+ 
+ 	nand: nand@18028000 {
+diff --git a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
+index 3725f2b0d60bd..4b24b25389b5f 100644
+--- a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
++++ b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
+@@ -18,8 +18,8 @@
+ 
+ 	memory@0 {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x08000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x08000000>;
+ 	};
+ 
+ 	gpio-keys {
+diff --git a/arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts b/arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts
+index 50f7cd08cfbbc..a6dc99955e191 100644
+--- a/arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts
++++ b/arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts
+@@ -18,8 +18,8 @@
+ 
+ 	memory@0 {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x18000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x18000000>;
+ 	};
+ 
+ 	leds {
+diff --git a/arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts b/arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts
+index bcc420f85b566..ff98837bc0db0 100644
+--- a/arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts
++++ b/arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts
+@@ -18,8 +18,8 @@
+ 
+ 	memory@0 {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x18000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x18000000>;
+ 	};
+ 
+ 	leds {
+diff --git a/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts b/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts
+index 4f8d777ae18de..452b8d0ab180e 100644
+--- a/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts
++++ b/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts
+@@ -18,8 +18,8 @@
+ 
+ 	memory {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x18000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x18000000>;
+ 	};
+ 
+ 	leds {
+diff --git a/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts b/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts
+index e17e9a17fb008..b76bfe6efcd4a 100644
+--- a/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts
++++ b/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts
+@@ -18,8 +18,8 @@
+ 
+ 	memory@0 {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x08000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x08000000>;
+ 	};
+ 
+ 	leds {
+diff --git a/arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts b/arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts
+index 60cc87ecc7ece..32d5a50578ec1 100644
+--- a/arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts
++++ b/arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts
+@@ -18,8 +18,8 @@
+ 
+ 	memory@0 {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x18000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x18000000>;
+ 	};
+ 
+ 	leds {
+diff --git a/arch/arm/boot/dts/bcm47094-netgear-r8500.dts b/arch/arm/boot/dts/bcm47094-netgear-r8500.dts
+index f42a1703f4ab1..42097a4c2659f 100644
+--- a/arch/arm/boot/dts/bcm47094-netgear-r8500.dts
++++ b/arch/arm/boot/dts/bcm47094-netgear-r8500.dts
+@@ -18,8 +18,8 @@
+ 
+ 	memory@0 {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x18000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x18000000>;
+ 	};
+ 
+ 	leds {
+diff --git a/arch/arm/boot/dts/bcm47094-phicomm-k3.dts b/arch/arm/boot/dts/bcm47094-phicomm-k3.dts
+index ac3a4483dcb3f..a2566ad4619c4 100644
+--- a/arch/arm/boot/dts/bcm47094-phicomm-k3.dts
++++ b/arch/arm/boot/dts/bcm47094-phicomm-k3.dts
+@@ -15,8 +15,8 @@
+ 
+ 	memory@0 {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x18000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x18000000>;
+ 	};
+ 
+ 	gpio-keys {
+diff --git a/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi b/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi
+index cb3677f0a1cbb..b580397ede833 100644
+--- a/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi
++++ b/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi
+@@ -8,37 +8,43 @@
+ / {
+ 	soc {
+ 		i2c@80128000 {
+-			/* Marked:
+-			 * 129
+-			 * M35
+-			 * L3GD20
+-			 */
+-			l3gd20@6a {
+-				/* Gyroscope */
+-				compatible = "st,l3gd20";
+-				status = "disabled";
++			accelerometer@19 {
++				compatible = "st,lsm303dlhc-accel";
+ 				st,drdy-int-pin = <1>;
+-				drive-open-drain;
+-				reg = <0x6a>; // 0x6a or 0x6b
++				reg = <0x19>;
+ 				vdd-supply = <&ab8500_ldo_aux1_reg>;
+ 				vddio-supply = <&db8500_vsmps2_reg>;
++				interrupt-parent = <&gpio2>;
++				interrupts = <18 IRQ_TYPE_EDGE_RISING>,
++					     <19 IRQ_TYPE_EDGE_RISING>;
++				pinctrl-names = "default";
++				pinctrl-0 = <&accel_tvk_mode>;
+ 			};
+-			/*
+-			 * Marked:
+-			 * 2122
+-			 * C3H
+-			 * DQEEE
+-			 * LIS3DH?
+-			 */
+-			lis3dh@18 {
+-				/* Accelerometer */
+-				compatible = "st,lis3dh-accel";
++			magnetometer@1e {
++				compatible = "st,lsm303dlm-magn";
+ 				st,drdy-int-pin = <1>;
+-				reg = <0x18>;
++				reg = <0x1e>;
+ 				vdd-supply = <&ab8500_ldo_aux1_reg>;
+ 				vddio-supply = <&db8500_vsmps2_reg>;
++				// This interrupt is not properly working with the driver
++				// interrupt-parent = <&gpio1>;
++				// interrupts = <0 IRQ_TYPE_EDGE_RISING>;
+ 				pinctrl-names = "default";
+-				pinctrl-0 = <&accel_tvk_mode>;
++				pinctrl-0 = <&magn_tvk_mode>;
++			};
++			gyroscope@68 {
++				/* Gyroscope */
++				compatible = "st,l3g4200d-gyro";
++				reg = <0x68>;
++				vdd-supply = <&ab8500_ldo_aux1_reg>;
++				vddio-supply = <&db8500_vsmps2_reg>;
++			};
++			pressure@5c {
++				/* Barometer/pressure sensor */
++				compatible = "st,lps001wp-press";
++				reg = <0x5c>;
++				vdd-supply = <&ab8500_ldo_aux1_reg>;
++				vddio-supply = <&db8500_vsmps2_reg>;
+ 			};
+ 		};
+ 
+@@ -54,5 +60,26 @@
+ 				};
+ 			};
+ 		};
++
++		pinctrl {
++			accelerometer {
++				accel_tvk_mode: accel_tvk {
++					/* Accelerometer interrupt lines 1 & 2 */
++					tvk_cfg {
++						pins = "GPIO82_C1", "GPIO83_D3";
++						ste,config = <&gpio_in_pd>;
++					};
++				};
++			};
++			magnetometer {
++				magn_tvk_mode: magn_tvk {
++					/* GPIO 32 used for DRDY, pull this down */
++					tvk_cfg {
++						pins = "GPIO32_V2";
++						ste,config = <&gpio_in_pd>;
++					};
++				};
++			};
++		};
+ 	};
+ };
+diff --git a/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts b/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts
+index d3b99535d755e..f9c0f6884cc1e 100644
+--- a/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts
++++ b/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts
+@@ -448,7 +448,7 @@
+ 
+ 			reset-gpios = <&gpio TEGRA_GPIO(Q, 7) GPIO_ACTIVE_LOW>;
+ 
+-			avdd-supply = <&vdd_3v3_sys>;
++			vdda-supply = <&vdd_3v3_sys>;
+ 			vdd-supply  = <&vdd_3v3_sys>;
+ 		};
+ 
+diff --git a/arch/arm/crypto/curve25519-core.S b/arch/arm/crypto/curve25519-core.S
+index be18af52e7dc9..b697fa5d059a2 100644
+--- a/arch/arm/crypto/curve25519-core.S
++++ b/arch/arm/crypto/curve25519-core.S
+@@ -10,8 +10,8 @@
+ #include <linux/linkage.h>
+ 
+ .text
+-.fpu neon
+ .arch armv7-a
++.fpu neon
+ .align 4
+ 
+ ENTRY(curve25519_neon)
+diff --git a/arch/arm64/boot/dts/freescale/imx8mq-librem5-r3.dts b/arch/arm64/boot/dts/freescale/imx8mq-librem5-r3.dts
+index 0d38327043f88..cd3c3edd48fa3 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mq-librem5-r3.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mq-librem5-r3.dts
+@@ -28,6 +28,10 @@
+ 	ti,termination-current = <144000>;  /* uA */
+ };
+ 
++&buck3_reg {
++	regulator-always-on;
++};
++
+ &proximity {
+ 	proximity-near-level = <25>;
+ };
+diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+index 7a2df148c6a31..456dcd4a7793f 100644
+--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
++++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+@@ -156,7 +156,8 @@
+ 			};
+ 
+ 			nb_periph_clk: nb-periph-clk@13000 {
+-				compatible = "marvell,armada-3700-periph-clock-nb";
++				compatible = "marvell,armada-3700-periph-clock-nb",
++					     "syscon";
+ 				reg = <0x13000 0x100>;
+ 				clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>,
+ 				<&tbg 3>, <&xtalclk>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
+index 7fa870e4386a3..ecb37a7e68705 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
+@@ -1235,7 +1235,7 @@
+ 				 <&mmsys CLK_MM_DSI1_DIGITAL>,
+ 				 <&mipi_tx1>;
+ 			clock-names = "engine", "digital", "hs";
+-			phy = <&mipi_tx1>;
++			phys = <&mipi_tx1>;
+ 			phy-names = "dphy";
+ 			status = "disabled";
+ 		};
+diff --git a/arch/arm64/kernel/vdso/vdso.lds.S b/arch/arm64/kernel/vdso/vdso.lds.S
+index 61dbb4c838ef7..a5e61e09ea927 100644
+--- a/arch/arm64/kernel/vdso/vdso.lds.S
++++ b/arch/arm64/kernel/vdso/vdso.lds.S
+@@ -31,6 +31,13 @@ SECTIONS
+ 	.gnu.version_d	: { *(.gnu.version_d) }
+ 	.gnu.version_r	: { *(.gnu.version_r) }
+ 
++	/*
++	 * Discard .note.gnu.property sections which are unused and have
++	 * different alignment requirement from vDSO note sections.
++	 */
++	/DISCARD/	: {
++		*(.note.GNU-stack .note.gnu.property)
++	}
+ 	.note		: { *(.note.*) }		:text	:note
+ 
+ 	. = ALIGN(16);
+@@ -48,7 +55,6 @@ SECTIONS
+ 	PROVIDE(end = .);
+ 
+ 	/DISCARD/	: {
+-		*(.note.GNU-stack)
+ 		*(.data .data.* .gnu.linkonce.d.* .sdata*)
+ 		*(.bss .sbss .dynbss .dynsbss)
+ 		*(.eh_frame .eh_frame_hdr)
+diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
+index 652ce85f9410a..4bc45d3ed8b0e 100644
+--- a/arch/powerpc/include/asm/mmu_context.h
++++ b/arch/powerpc/include/asm/mmu_context.h
+@@ -263,7 +263,7 @@ extern void arch_exit_mmap(struct mm_struct *mm);
+ static inline void arch_unmap(struct mm_struct *mm,
+ 			      unsigned long start, unsigned long end)
+ {
+-	unsigned long vdso_base = (unsigned long)mm->context.vdso - PAGE_SIZE;
++	unsigned long vdso_base = (unsigned long)mm->context.vdso;
+ 
+ 	if (start <= vdso_base && vdso_base < end)
+ 		mm->context.vdso = NULL;
+diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
+index da103e92c1126..37d0b8c76a59d 100644
+--- a/arch/powerpc/include/asm/reg.h
++++ b/arch/powerpc/include/asm/reg.h
+@@ -441,6 +441,7 @@
+ #define   LPCR_VRMA_LP1		ASM_CONST(0x0000800000000000)
+ #define   LPCR_RMLS		0x1C000000	/* Implementation dependent RMO limit sel */
+ #define   LPCR_RMLS_SH		26
++#define   LPCR_HAIL		ASM_CONST(0x0000000004000000)   /* HV AIL (ISAv3.1) */
+ #define   LPCR_ILE		ASM_CONST(0x0000000002000000)   /* !HV irqs set MSR:LE */
+ #define   LPCR_AIL		ASM_CONST(0x0000000001800000)	/* Alternate interrupt location */
+ #define   LPCR_AIL_0		ASM_CONST(0x0000000000000000)	/* MMU off exception offset 0x0 */
+diff --git a/arch/powerpc/include/uapi/asm/errno.h b/arch/powerpc/include/uapi/asm/errno.h
+index cc79856896a19..4ba87de32be00 100644
+--- a/arch/powerpc/include/uapi/asm/errno.h
++++ b/arch/powerpc/include/uapi/asm/errno.h
+@@ -2,6 +2,7 @@
+ #ifndef _ASM_POWERPC_ERRNO_H
+ #define _ASM_POWERPC_ERRNO_H
+ 
++#undef	EDEADLOCK
+ #include <asm-generic/errno.h>
+ 
+ #undef	EDEADLOCK
+diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
+index cd60bc1c87011..7040e430a1249 100644
+--- a/arch/powerpc/kernel/eeh.c
++++ b/arch/powerpc/kernel/eeh.c
+@@ -362,14 +362,11 @@ static inline unsigned long eeh_token_to_phys(unsigned long token)
+ 	pa = pte_pfn(*ptep);
+ 
+ 	/* On radix we can do hugepage mappings for io, so handle that */
+-	if (hugepage_shift) {
+-		pa <<= hugepage_shift;
+-		pa |= token & ((1ul << hugepage_shift) - 1);
+-	} else {
+-		pa <<= PAGE_SHIFT;
+-		pa |= token & (PAGE_SIZE - 1);
+-	}
++	if (!hugepage_shift)
++		hugepage_shift = PAGE_SHIFT;
+ 
++	pa <<= PAGE_SHIFT;
++	pa |= token & ((1ul << hugepage_shift) - 1);
+ 	return pa;
+ }
+ 
+diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
+index 560ed8b975e77..830fee91b2d99 100644
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -232,10 +232,23 @@ static void cpu_ready_for_interrupts(void)
+ 	 * If we are not in hypervisor mode the job is done once for
+ 	 * the whole partition in configure_exceptions().
+ 	 */
+-	if (cpu_has_feature(CPU_FTR_HVMODE) &&
+-	    cpu_has_feature(CPU_FTR_ARCH_207S)) {
++	if (cpu_has_feature(CPU_FTR_HVMODE)) {
+ 		unsigned long lpcr = mfspr(SPRN_LPCR);
+-		mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
++		unsigned long new_lpcr = lpcr;
++
++		if (cpu_has_feature(CPU_FTR_ARCH_31)) {
++			/* P10 DD1 does not have HAIL */
++			if (pvr_version_is(PVR_POWER10) &&
++					(mfspr(SPRN_PVR) & 0xf00) == 0x100)
++				new_lpcr |= LPCR_AIL_3;
++			else
++				new_lpcr |= LPCR_HAIL;
++		} else if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
++			new_lpcr |= LPCR_AIL_3;
++		}
++
++		if (new_lpcr != lpcr)
++			mtspr(SPRN_LPCR, new_lpcr);
+ 	}
+ 
+ 	/*
+diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
+index e839a906fdf23..b14907209822e 100644
+--- a/arch/powerpc/kernel/vdso.c
++++ b/arch/powerpc/kernel/vdso.c
+@@ -55,10 +55,10 @@ static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struc
+ {
+ 	unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
+ 
+-	if (new_size != text_size + PAGE_SIZE)
++	if (new_size != text_size)
+ 		return -EINVAL;
+ 
+-	current->mm->context.vdso = (void __user *)new_vma->vm_start + PAGE_SIZE;
++	current->mm->context.vdso = (void __user *)new_vma->vm_start;
+ 
+ 	return 0;
+ }
+@@ -73,6 +73,10 @@ static int vdso64_mremap(const struct vm_special_mapping *sm, struct vm_area_str
+ 	return vdso_mremap(sm, new_vma, &vdso64_end - &vdso64_start);
+ }
+ 
++static struct vm_special_mapping vvar_spec __ro_after_init = {
++	.name = "[vvar]",
++};
++
+ static struct vm_special_mapping vdso32_spec __ro_after_init = {
+ 	.name = "[vdso]",
+ 	.mremap = vdso32_mremap,
+@@ -89,11 +93,11 @@ static struct vm_special_mapping vdso64_spec __ro_after_init = {
+  */
+ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+ {
+-	struct mm_struct *mm = current->mm;
++	unsigned long vdso_size, vdso_base, mappings_size;
+ 	struct vm_special_mapping *vdso_spec;
++	unsigned long vvar_size = PAGE_SIZE;
++	struct mm_struct *mm = current->mm;
+ 	struct vm_area_struct *vma;
+-	unsigned long vdso_size;
+-	unsigned long vdso_base;
+ 
+ 	if (is_32bit_task()) {
+ 		vdso_spec = &vdso32_spec;
+@@ -110,8 +114,8 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
+ 		vdso_base = 0;
+ 	}
+ 
+-	/* Add a page to the vdso size for the data page */
+-	vdso_size += PAGE_SIZE;
++	mappings_size = vdso_size + vvar_size;
++	mappings_size += (VDSO_ALIGNMENT - 1) & PAGE_MASK;
+ 
+ 	/*
+ 	 * pick a base address for the vDSO in process space. We try to put it
+@@ -119,9 +123,7 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
+ 	 * and end up putting it elsewhere.
+ 	 * Add enough to the size so that the result can be aligned.
+ 	 */
+-	vdso_base = get_unmapped_area(NULL, vdso_base,
+-				      vdso_size + ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
+-				      0, 0);
++	vdso_base = get_unmapped_area(NULL, vdso_base, mappings_size, 0, 0);
+ 	if (IS_ERR_VALUE(vdso_base))
+ 		return vdso_base;
+ 
+@@ -133,7 +135,13 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
+ 	 * install_special_mapping or the perf counter mmap tracking code
+ 	 * will fail to recognise it as a vDSO.
+ 	 */
+-	mm->context.vdso = (void __user *)vdso_base + PAGE_SIZE;
++	mm->context.vdso = (void __user *)vdso_base + vvar_size;
++
++	vma = _install_special_mapping(mm, vdso_base, vvar_size,
++				       VM_READ | VM_MAYREAD | VM_IO |
++				       VM_DONTDUMP | VM_PFNMAP, &vvar_spec);
++	if (IS_ERR(vma))
++		return PTR_ERR(vma);
+ 
+ 	/*
+ 	 * our vma flags don't have VM_WRITE so by default, the process isn't
+@@ -145,9 +153,12 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
+ 	 * It's fine to use that for setting breakpoints in the vDSO code
+ 	 * pages though.
+ 	 */
+-	vma = _install_special_mapping(mm, vdso_base, vdso_size,
++	vma = _install_special_mapping(mm, vdso_base + vvar_size, vdso_size,
+ 				       VM_READ | VM_EXEC | VM_MAYREAD |
+ 				       VM_MAYWRITE | VM_MAYEXEC, vdso_spec);
++	if (IS_ERR(vma))
++		do_munmap(mm, vdso_base, vvar_size, NULL);
++
+ 	return PTR_ERR_OR_ZERO(vma);
+ }
+ 
+@@ -249,11 +260,22 @@ static struct page ** __init vdso_setup_pages(void *start, void *end)
+ 	if (!pagelist)
+ 		panic("%s: Cannot allocate page list for VDSO", __func__);
+ 
+-	pagelist[0] = virt_to_page(vdso_data);
+-
+ 	for (i = 0; i < pages; i++)
+-		pagelist[i + 1] = virt_to_page(start + i * PAGE_SIZE);
++		pagelist[i] = virt_to_page(start + i * PAGE_SIZE);
++
++	return pagelist;
++}
++
++static struct page ** __init vvar_setup_pages(void)
++{
++	struct page **pagelist;
+ 
++	/* .pages is NULL-terminated */
++	pagelist = kcalloc(2, sizeof(struct page *), GFP_KERNEL);
++	if (!pagelist)
++		panic("%s: Cannot allocate page list for VVAR", __func__);
++
++	pagelist[0] = virt_to_page(vdso_data);
+ 	return pagelist;
+ }
+ 
+@@ -295,6 +317,8 @@ static int __init vdso_init(void)
+ 	if (IS_ENABLED(CONFIG_PPC64))
+ 		vdso64_spec.pages = vdso_setup_pages(&vdso64_start, &vdso64_end);
+ 
++	vvar_spec.pages = vvar_setup_pages();
++
+ 	smp_wmb();
+ 
+ 	return 0;
+diff --git a/arch/powerpc/kexec/file_load_64.c b/arch/powerpc/kexec/file_load_64.c
+index 02b9e4d0dc40b..a8a7cb71086b3 100644
+--- a/arch/powerpc/kexec/file_load_64.c
++++ b/arch/powerpc/kexec/file_load_64.c
+@@ -960,6 +960,93 @@ unsigned int kexec_fdt_totalsize_ppc64(struct kimage *image)
+ 	return fdt_size;
+ }
+ 
++/**
++ * add_node_props - Reads node properties from device node structure and add
++ *                  them to fdt.
++ * @fdt:            Flattened device tree of the kernel
++ * @node_offset:    offset of the node to add a property at
++ * @dn:             device node pointer
++ *
++ * Returns 0 on success, negative errno on error.
++ */
++static int add_node_props(void *fdt, int node_offset, const struct device_node *dn)
++{
++	int ret = 0;
++	struct property *pp;
++
++	if (!dn)
++		return -EINVAL;
++
++	for_each_property_of_node(dn, pp) {
++		ret = fdt_setprop(fdt, node_offset, pp->name, pp->value, pp->length);
++		if (ret < 0) {
++			pr_err("Unable to add %s property: %s\n", pp->name, fdt_strerror(ret));
++			return ret;
++		}
++	}
++	return ret;
++}
++
++/**
++ * update_cpus_node - Update cpus node of flattened device tree using of_root
++ *                    device node.
++ * @fdt:              Flattened device tree of the kernel.
++ *
++ * Returns 0 on success, negative errno on error.
++ */
++static int update_cpus_node(void *fdt)
++{
++	struct device_node *cpus_node, *dn;
++	int cpus_offset, cpus_subnode_offset, ret = 0;
++
++	cpus_offset = fdt_path_offset(fdt, "/cpus");
++	if (cpus_offset < 0 && cpus_offset != -FDT_ERR_NOTFOUND) {
++		pr_err("Malformed device tree: error reading /cpus node: %s\n",
++		       fdt_strerror(cpus_offset));
++		return cpus_offset;
++	}
++
++	if (cpus_offset > 0) {
++		ret = fdt_del_node(fdt, cpus_offset);
++		if (ret < 0) {
++			pr_err("Error deleting /cpus node: %s\n", fdt_strerror(ret));
++			return -EINVAL;
++		}
++	}
++
++	/* Add cpus node to fdt */
++	cpus_offset = fdt_add_subnode(fdt, fdt_path_offset(fdt, "/"), "cpus");
++	if (cpus_offset < 0) {
++		pr_err("Error creating /cpus node: %s\n", fdt_strerror(cpus_offset));
++		return -EINVAL;
++	}
++
++	/* Add cpus node properties */
++	cpus_node = of_find_node_by_path("/cpus");
++	ret = add_node_props(fdt, cpus_offset, cpus_node);
++	of_node_put(cpus_node);
++	if (ret < 0)
++		return ret;
++
++	/* Loop through all subnodes of cpus and add them to fdt */
++	for_each_node_by_type(dn, "cpu") {
++		cpus_subnode_offset = fdt_add_subnode(fdt, cpus_offset, dn->full_name);
++		if (cpus_subnode_offset < 0) {
++			pr_err("Unable to add %s subnode: %s\n", dn->full_name,
++			       fdt_strerror(cpus_subnode_offset));
++			ret = cpus_subnode_offset;
++			goto out;
++		}
++
++		ret = add_node_props(fdt, cpus_subnode_offset, dn);
++		if (ret < 0)
++			goto out;
++	}
++out:
++	of_node_put(dn);
++	return ret;
++}
++
+ /**
+  * setup_new_fdt_ppc64 - Update the flattend device-tree of the kernel
+  *                       being loaded.
+@@ -1020,6 +1107,11 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt,
+ 		}
+ 	}
+ 
++	/* Update cpus nodes information to account hotplug CPUs. */
++	ret =  update_cpus_node(fdt);
++	if (ret < 0)
++		goto out;
++
+ 	/* Update memory reserve map */
+ 	ret = get_reserved_memory_ranges(&rmem);
+ 	if (ret)
+diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
+index e452158a18d77..c3e31fef0be1c 100644
+--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
++++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
+@@ -8,6 +8,7 @@
+  */
+ 
+ #include <linux/kvm_host.h>
++#include <linux/pkeys.h>
+ 
+ #include <asm/kvm_ppc.h>
+ #include <asm/kvm_book3s.h>
+@@ -133,6 +134,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
+ 	else
+ 		kvmppc_mmu_flush_icache(pfn);
+ 
++	rflags |= pte_to_hpte_pkey_bits(0, HPTE_USE_KERNEL_KEY);
+ 	rflags = (rflags & ~HPTE_R_WIMG) | orig_pte->wimg;
+ 
+ 	/*
+diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
+index d4efc182662a8..248f7c9e36fce 100644
+--- a/arch/powerpc/lib/Makefile
++++ b/arch/powerpc/lib/Makefile
+@@ -5,6 +5,9 @@
+ 
+ ccflags-$(CONFIG_PPC64)	:= $(NO_MINIMAL_TOC)
+ 
++CFLAGS_code-patching.o += -fno-stack-protector
++CFLAGS_feature-fixups.o += -fno-stack-protector
++
+ CFLAGS_REMOVE_code-patching.o = $(CC_FLAGS_FTRACE)
+ CFLAGS_REMOVE_feature-fixups.o = $(CC_FLAGS_FTRACE)
+ 
+diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c
+index 7e2c78e2ca6b0..d71f7c49a721a 100644
+--- a/arch/riscv/kernel/probes/kprobes.c
++++ b/arch/riscv/kernel/probes/kprobes.c
+@@ -260,8 +260,10 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr)
+ 
+ 		if (kcb->kprobe_status == KPROBE_REENTER)
+ 			restore_previous_kprobe(kcb);
+-		else
++		else {
++			kprobes_restore_local_irqflag(kcb, regs);
+ 			reset_current_kprobe();
++		}
+ 
+ 		break;
+ 	case KPROBE_HIT_ACTIVE:
+diff --git a/arch/s390/crypto/arch_random.c b/arch/s390/crypto/arch_random.c
+index 7b947728d57ef..56007c763902a 100644
+--- a/arch/s390/crypto/arch_random.c
++++ b/arch/s390/crypto/arch_random.c
+@@ -54,6 +54,10 @@ static DECLARE_DELAYED_WORK(arch_rng_work, arch_rng_refill_buffer);
+ 
+ bool s390_arch_random_generate(u8 *buf, unsigned int nbytes)
+ {
++	/* max hunk is ARCH_RNG_BUF_SIZE */
++	if (nbytes > ARCH_RNG_BUF_SIZE)
++		return false;
++
+ 	/* lock rng buffer */
+ 	if (!spin_trylock(&arch_rng_lock))
+ 		return false;
+diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
+index d9215c7106f0d..8fc52679543d6 100644
+--- a/arch/s390/include/asm/qdio.h
++++ b/arch/s390/include/asm/qdio.h
+@@ -246,21 +246,8 @@ struct slsb {
+ 	u8 val[QDIO_MAX_BUFFERS_PER_Q];
+ } __attribute__ ((packed, aligned(256)));
+ 
+-/**
+- * struct qdio_outbuf_state - SBAL related asynchronous operation information
+- *   (for communication with upper layer programs)
+- *   (only required for use with completion queues)
+- * @user: pointer to upper layer program's state information related to SBAL
+- *        (stored in user1 data of QAOB)
+- */
+-struct qdio_outbuf_state {
+-	void *user;
+-};
+-
+-#define CHSC_AC1_INITIATE_INPUTQ	0x80
+-
+-
+ /* qdio adapter-characteristics-1 flag */
++#define CHSC_AC1_INITIATE_INPUTQ	0x80
+ #define AC1_SIGA_INPUT_NEEDED		0x40	/* process input queues */
+ #define AC1_SIGA_OUTPUT_NEEDED		0x20	/* process output queues */
+ #define AC1_SIGA_SYNC_NEEDED		0x10	/* ask hypervisor to sync */
+@@ -338,7 +325,6 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int,
+  * @int_parm: interruption parameter
+  * @input_sbal_addr_array:  per-queue array, each element points to 128 SBALs
+  * @output_sbal_addr_array: per-queue array, each element points to 128 SBALs
+- * @output_sbal_state_array: no_output_qs * 128 state info (for CQ or NULL)
+  */
+ struct qdio_initialize {
+ 	unsigned char q_format;
+@@ -357,7 +343,6 @@ struct qdio_initialize {
+ 	unsigned long int_parm;
+ 	struct qdio_buffer ***input_sbal_addr_array;
+ 	struct qdio_buffer ***output_sbal_addr_array;
+-	struct qdio_outbuf_state *output_sbal_state_array;
+ };
+ 
+ #define QDIO_STATE_INACTIVE		0x00000002 /* after qdio_cleanup */
+@@ -378,9 +363,10 @@ extern int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs,
+ extern int qdio_establish(struct ccw_device *cdev,
+ 			  struct qdio_initialize *init_data);
+ extern int qdio_activate(struct ccw_device *);
++extern struct qaob *qdio_allocate_aob(void);
+ extern void qdio_release_aob(struct qaob *);
+-extern int do_QDIO(struct ccw_device *, unsigned int, int, unsigned int,
+-		   unsigned int);
++extern int do_QDIO(struct ccw_device *cdev, unsigned int callflags, int q_nr,
++		   unsigned int bufnr, unsigned int count, struct qaob *aob);
+ extern int qdio_start_irq(struct ccw_device *cdev);
+ extern int qdio_stop_irq(struct ccw_device *cdev);
+ extern int qdio_get_next_buffers(struct ccw_device *, int, int *, int *);
+diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
+index a7eab7be4db05..5412efe328f80 100644
+--- a/arch/s390/kernel/dis.c
++++ b/arch/s390/kernel/dis.c
+@@ -563,7 +563,7 @@ void show_code(struct pt_regs *regs)
+ 
+ void print_fn_code(unsigned char *code, unsigned long len)
+ {
+-	char buffer[64], *ptr;
++	char buffer[128], *ptr;
+ 	int opsize, i;
+ 
+ 	while (len) {
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 2792879d398ee..268b7d5c98354 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -1406,7 +1406,7 @@ config HIGHMEM4G
+ 
+ config HIGHMEM64G
+ 	bool "64GB"
+-	depends on !M486 && !M586 && !M586TSC && !M586MMX && !MGEODE_LX && !MGEODEGX1 && !MCYRIXIII && !MELAN && !MWINCHIPC6 && !WINCHIP3D && !MK6
++	depends on !M486SX && !M486 && !M586 && !M586TSC && !M586MMX && !MGEODE_LX && !MGEODEGX1 && !MCYRIXIII && !MELAN && !MWINCHIPC6 && !WINCHIP3D && !MK6
+ 	select X86_PAE
+ 	help
+ 	  Select this if you have a 32-bit processor and more than 4
+diff --git a/arch/x86/Makefile b/arch/x86/Makefile
+index 9a85eae37b173..78faf9c7e3aed 100644
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -33,6 +33,7 @@ REALMODE_CFLAGS += -ffreestanding
+ REALMODE_CFLAGS += -fno-stack-protector
+ REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -Wno-address-of-packed-member)
+ REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4))
++REALMODE_CFLAGS += $(CLANG_FLAGS)
+ export REALMODE_CFLAGS
+ 
+ # BITS is used as extension for files which are available in a 32 bit
+diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
+index e0bc3988c3faa..6e5522aebbbd4 100644
+--- a/arch/x86/boot/compressed/Makefile
++++ b/arch/x86/boot/compressed/Makefile
+@@ -46,6 +46,7 @@ KBUILD_CFLAGS += -D__DISABLE_EXPORTS
+ # Disable relocation relaxation in case the link is not PIE.
+ KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mrelax-relocations=no)
+ KBUILD_CFLAGS += -include $(srctree)/include/linux/hidden.h
++KBUILD_CFLAGS += $(CLANG_FLAGS)
+ 
+ # sev-es.c indirectly inludes inat-table.h which is generated during
+ # compilation and stored in $(objtree). Add the directory to the includes so
+diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S
+index aa561795efd16..a6dea4e8a082f 100644
+--- a/arch/x86/boot/compressed/mem_encrypt.S
++++ b/arch/x86/boot/compressed/mem_encrypt.S
+@@ -23,12 +23,6 @@ SYM_FUNC_START(get_sev_encryption_bit)
+ 	push	%ecx
+ 	push	%edx
+ 
+-	/* Check if running under a hypervisor */
+-	movl	$1, %eax
+-	cpuid
+-	bt	$31, %ecx		/* Check the hypervisor bit */
+-	jnc	.Lno_sev
+-
+ 	movl	$0x80000000, %eax	/* CPUID to check the highest leaf */
+ 	cpuid
+ 	cmpl	$0x8000001f, %eax	/* See if 0x8000001f is available */
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index ab640abe26b68..1e576cc831c15 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1850,7 +1850,7 @@ static inline void setup_getcpu(int cpu)
+ 	unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu));
+ 	struct desc_struct d = { };
+ 
+-	if (boot_cpu_has(X86_FEATURE_RDTSCP))
++	if (boot_cpu_has(X86_FEATURE_RDTSCP) || boot_cpu_has(X86_FEATURE_RDPID))
+ 		write_rdtscp_aux(cpudata);
+ 
+ 	/* Store CPU and node number in limit. */
+diff --git a/arch/x86/kernel/sev-es-shared.c b/arch/x86/kernel/sev-es-shared.c
+index cdc04d0912423..387b716698187 100644
+--- a/arch/x86/kernel/sev-es-shared.c
++++ b/arch/x86/kernel/sev-es-shared.c
+@@ -186,7 +186,6 @@ void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
+ 	 * make it accessible to the hypervisor.
+ 	 *
+ 	 * In particular, check for:
+-	 *	- Hypervisor CPUID bit
+ 	 *	- Availability of CPUID leaf 0x8000001f
+ 	 *	- SEV CPUID bit.
+ 	 *
+@@ -194,10 +193,7 @@ void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
+ 	 * can't be checked here.
+ 	 */
+ 
+-	if ((fn == 1 && !(regs->cx & BIT(31))))
+-		/* Hypervisor bit */
+-		goto fail;
+-	else if (fn == 0x80000000 && (regs->ax < 0x8000001f))
++	if (fn == 0x80000000 && (regs->ax < 0x8000001f))
+ 		/* SEV leaf check */
+ 		goto fail;
+ 	else if ((fn == 0x8000001f && !(regs->ax & BIT(1))))
+diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
+index 6c5eb6f3f14f4..a19374d261013 100644
+--- a/arch/x86/mm/mem_encrypt_identity.c
++++ b/arch/x86/mm/mem_encrypt_identity.c
+@@ -503,14 +503,10 @@ void __init sme_enable(struct boot_params *bp)
+ 
+ #define AMD_SME_BIT	BIT(0)
+ #define AMD_SEV_BIT	BIT(1)
+-	/*
+-	 * Set the feature mask (SME or SEV) based on whether we are
+-	 * running under a hypervisor.
+-	 */
+-	eax = 1;
+-	ecx = 0;
+-	native_cpuid(&eax, &ebx, &ecx, &edx);
+-	feature_mask = (ecx & BIT(31)) ? AMD_SEV_BIT : AMD_SME_BIT;
++
++	/* Check the SEV MSR whether SEV or SME is enabled */
++	sev_status   = __rdmsr(MSR_AMD64_SEV);
++	feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
+ 
+ 	/*
+ 	 * Check for the SME/SEV feature:
+@@ -530,19 +526,26 @@ void __init sme_enable(struct boot_params *bp)
+ 
+ 	/* Check if memory encryption is enabled */
+ 	if (feature_mask == AMD_SME_BIT) {
++		/*
++		 * No SME if Hypervisor bit is set. This check is here to
++		 * prevent a guest from trying to enable SME. For running as a
++		 * KVM guest the MSR_K8_SYSCFG will be sufficient, but there
++		 * might be other hypervisors which emulate that MSR as non-zero
++		 * or even pass it through to the guest.
++		 * A malicious hypervisor can still trick a guest into this
++		 * path, but there is no way to protect against that.
++		 */
++		eax = 1;
++		ecx = 0;
++		native_cpuid(&eax, &ebx, &ecx, &edx);
++		if (ecx & BIT(31))
++			return;
++
+ 		/* For SME, check the SYSCFG MSR */
+ 		msr = __rdmsr(MSR_K8_SYSCFG);
+ 		if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
+ 			return;
+ 	} else {
+-		/* For SEV, check the SEV MSR */
+-		msr = __rdmsr(MSR_AMD64_SEV);
+-		if (!(msr & MSR_AMD64_SEV_ENABLED))
+-			return;
+-
+-		/* Save SEV_STATUS to avoid reading MSR again */
+-		sev_status = msr;
+-
+ 		/* SEV state cannot be controlled by a command line option */
+ 		sme_me_mask = me_mask;
+ 		sev_enabled = true;
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
+index 95586137194ee..20ba5db0f61cd 100644
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -1012,7 +1012,7 @@ static void
+ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
+ 		      struct bfq_io_cq *bic, bool bfq_already_existing)
+ {
+-	unsigned int old_wr_coeff = bfqq->wr_coeff;
++	unsigned int old_wr_coeff = 1;
+ 	bool busy = bfq_already_existing && bfq_bfqq_busy(bfqq);
+ 
+ 	if (bic->saved_has_short_ttime)
+@@ -1033,7 +1033,13 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
+ 	bfqq->ttime = bic->saved_ttime;
+ 	bfqq->io_start_time = bic->saved_io_start_time;
+ 	bfqq->tot_idle_time = bic->saved_tot_idle_time;
+-	bfqq->wr_coeff = bic->saved_wr_coeff;
++	/*
++	 * Restore weight coefficient only if low_latency is on
++	 */
++	if (bfqd->low_latency) {
++		old_wr_coeff = bfqq->wr_coeff;
++		bfqq->wr_coeff = bic->saved_wr_coeff;
++	}
+ 	bfqq->service_from_wr = bic->saved_service_from_wr;
+ 	bfqq->wr_start_at_switch_to_srt = bic->saved_wr_start_at_switch_to_srt;
+ 	bfqq->last_wr_start_finish = bic->saved_last_wr_start_finish;
+diff --git a/crypto/api.c b/crypto/api.c
+index ed08cbd5b9d3f..c4eda56cff891 100644
+--- a/crypto/api.c
++++ b/crypto/api.c
+@@ -562,7 +562,7 @@ void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm)
+ {
+ 	struct crypto_alg *alg;
+ 
+-	if (unlikely(!mem))
++	if (IS_ERR_OR_NULL(mem))
+ 		return;
+ 
+ 	alg = tfm->__crt_alg;
+diff --git a/crypto/rng.c b/crypto/rng.c
+index a888d84b524a4..fea082b25fe4b 100644
+--- a/crypto/rng.c
++++ b/crypto/rng.c
+@@ -34,22 +34,18 @@ int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
+ 	u8 *buf = NULL;
+ 	int err;
+ 
+-	crypto_stats_get(alg);
+ 	if (!seed && slen) {
+ 		buf = kmalloc(slen, GFP_KERNEL);
+-		if (!buf) {
+-			crypto_alg_put(alg);
++		if (!buf)
+ 			return -ENOMEM;
+-		}
+ 
+ 		err = get_random_bytes_wait(buf, slen);
+-		if (err) {
+-			crypto_alg_put(alg);
++		if (err)
+ 			goto out;
+-		}
+ 		seed = buf;
+ 	}
+ 
++	crypto_stats_get(alg);
+ 	err = crypto_rng_alg(tfm)->seed(tfm, seed, slen);
+ 	crypto_stats_rng_seed(alg, err);
+ out:
+diff --git a/drivers/acpi/arm64/gtdt.c b/drivers/acpi/arm64/gtdt.c
+index f2d0e5915dab5..0a0a982f9c28d 100644
+--- a/drivers/acpi/arm64/gtdt.c
++++ b/drivers/acpi/arm64/gtdt.c
+@@ -329,7 +329,7 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
+ 					int index)
+ {
+ 	struct platform_device *pdev;
+-	int irq = map_gt_gsi(wd->timer_interrupt, wd->timer_flags);
++	int irq;
+ 
+ 	/*
+ 	 * According to SBSA specification the size of refresh and control
+@@ -338,7 +338,7 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
+ 	struct resource res[] = {
+ 		DEFINE_RES_MEM(wd->control_frame_address, SZ_4K),
+ 		DEFINE_RES_MEM(wd->refresh_frame_address, SZ_4K),
+-		DEFINE_RES_IRQ(irq),
++		{},
+ 	};
+ 	int nr_res = ARRAY_SIZE(res);
+ 
+@@ -348,10 +348,11 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
+ 
+ 	if (!(wd->refresh_frame_address && wd->control_frame_address)) {
+ 		pr_err(FW_BUG "failed to get the Watchdog base address.\n");
+-		acpi_unregister_gsi(wd->timer_interrupt);
+ 		return -EINVAL;
+ 	}
+ 
++	irq = map_gt_gsi(wd->timer_interrupt, wd->timer_flags);
++	res[2] = (struct resource)DEFINE_RES_IRQ(irq);
+ 	if (irq <= 0) {
+ 		pr_warn("failed to map the Watchdog interrupt.\n");
+ 		nr_res--;
+@@ -364,7 +365,8 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
+ 	 */
+ 	pdev = platform_device_register_simple("sbsa-gwdt", index, res, nr_res);
+ 	if (IS_ERR(pdev)) {
+-		acpi_unregister_gsi(wd->timer_interrupt);
++		if (irq > 0)
++			acpi_unregister_gsi(wd->timer_interrupt);
+ 		return PTR_ERR(pdev);
+ 	}
+ 
+diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
+index 7b54dc95d36b3..4058e02410917 100644
+--- a/drivers/acpi/custom_method.c
++++ b/drivers/acpi/custom_method.c
+@@ -42,6 +42,8 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
+ 				   sizeof(struct acpi_table_header)))
+ 			return -EFAULT;
+ 		uncopied_bytes = max_size = table.length;
++		/* make sure the buf is not allocated */
++		kfree(buf);
+ 		buf = kzalloc(max_size, GFP_KERNEL);
+ 		if (!buf)
+ 			return -ENOMEM;
+@@ -55,6 +57,7 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
+ 	    (*ppos + count < count) ||
+ 	    (count > uncopied_bytes)) {
+ 		kfree(buf);
++		buf = NULL;
+ 		return -EINVAL;
+ 	}
+ 
+@@ -76,7 +79,6 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
+ 		add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
+ 	}
+ 
+-	kfree(buf);
+ 	return count;
+ }
+ 
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 00ba8e5a1ccc0..33192a8f687d6 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -1772,6 +1772,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 		hpriv->flags |= AHCI_HFLAG_NO_DEVSLP;
+ 
+ #ifdef CONFIG_ARM64
++	if (pdev->vendor == PCI_VENDOR_ID_HUAWEI &&
++	    pdev->device == 0xa235 &&
++	    pdev->revision < 0x30)
++		hpriv->flags |= AHCI_HFLAG_NO_SXS;
++
+ 	if (pdev->vendor == 0x177d && pdev->device == 0xa01c)
+ 		hpriv->irq_handler = ahci_thunderx_irq_handler;
+ #endif
+diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
+index 98b8baa47dc5e..d1f284f0c83d9 100644
+--- a/drivers/ata/ahci.h
++++ b/drivers/ata/ahci.h
+@@ -242,6 +242,7 @@ enum {
+ 							suspend/resume */
+ 	AHCI_HFLAG_IGN_NOTSUPP_POWER_ON	= (1 << 27), /* ignore -EOPNOTSUPP
+ 							from phy_power_on() */
++	AHCI_HFLAG_NO_SXS		= (1 << 28), /* SXS not supported */
+ 
+ 	/* ap->flags bits */
+ 
+diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
+index ea5bf5f4cbed5..fec2e9754aed2 100644
+--- a/drivers/ata/libahci.c
++++ b/drivers/ata/libahci.c
+@@ -493,6 +493,11 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
+ 		cap |= HOST_CAP_ALPM;
+ 	}
+ 
++	if ((cap & HOST_CAP_SXS) && (hpriv->flags & AHCI_HFLAG_NO_SXS)) {
++		dev_info(dev, "controller does not support SXS, disabling CAP_SXS\n");
++		cap &= ~HOST_CAP_SXS;
++	}
++
+ 	if (hpriv->force_port_map && port_map != hpriv->force_port_map) {
+ 		dev_info(dev, "forcing port_map 0x%x -> 0x%x\n",
+ 			 port_map, hpriv->force_port_map);
+diff --git a/drivers/block/rnbd/rnbd-clt-sysfs.c b/drivers/block/rnbd/rnbd-clt-sysfs.c
+index d4aa6bfc95557..526c77cd7a506 100644
+--- a/drivers/block/rnbd/rnbd-clt-sysfs.c
++++ b/drivers/block/rnbd/rnbd-clt-sysfs.c
+@@ -432,10 +432,14 @@ void rnbd_clt_remove_dev_symlink(struct rnbd_clt_dev *dev)
+ 	 * i.e. rnbd_clt_unmap_dev_store() leading to a sysfs warning because
+ 	 * of sysfs link already was removed already.
+ 	 */
+-	if (dev->blk_symlink_name && try_module_get(THIS_MODULE)) {
+-		sysfs_remove_link(rnbd_devs_kobj, dev->blk_symlink_name);
++	if (dev->blk_symlink_name) {
++		if (try_module_get(THIS_MODULE)) {
++			sysfs_remove_link(rnbd_devs_kobj, dev->blk_symlink_name);
++			module_put(THIS_MODULE);
++		}
++		/* It should be freed always. */
+ 		kfree(dev->blk_symlink_name);
+-		module_put(THIS_MODULE);
++		dev->blk_symlink_name = NULL;
+ 	}
+ }
+ 
+diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
+index a6a68d44f517c..677770f32843f 100644
+--- a/drivers/block/rnbd/rnbd-srv.c
++++ b/drivers/block/rnbd/rnbd-srv.c
+@@ -341,7 +341,9 @@ void rnbd_srv_sess_dev_force_close(struct rnbd_srv_sess_dev *sess_dev)
+ 	struct rnbd_srv_session	*sess = sess_dev->sess;
+ 
+ 	sess_dev->keep_id = true;
+-	mutex_lock(&sess->lock);
++	/* It is already started to close by client's close message. */
++	if (!mutex_trylock(&sess->lock))
++		return;
+ 	rnbd_srv_destroy_dev_session_sysfs(sess_dev);
+ 	mutex_unlock(&sess->lock);
+ }
+diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c
+index be4eebb0971bc..08b7f4a06bfcc 100644
+--- a/drivers/bus/mhi/core/init.c
++++ b/drivers/bus/mhi/core/init.c
+@@ -508,8 +508,6 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
+ 
+ 	/* Setup wake db */
+ 	mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB);
+-	mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 4, 0);
+-	mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 0, 0);
+ 	mhi_cntrl->wake_set = false;
+ 
+ 	/* Setup channel db address for each channel in tre_ring */
+@@ -552,6 +550,7 @@ void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
+ 	struct mhi_ring *buf_ring;
+ 	struct mhi_ring *tre_ring;
+ 	struct mhi_chan_ctxt *chan_ctxt;
++	u32 tmp;
+ 
+ 	buf_ring = &mhi_chan->buf_ring;
+ 	tre_ring = &mhi_chan->tre_ring;
+@@ -565,7 +564,19 @@ void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
+ 	vfree(buf_ring->base);
+ 
+ 	buf_ring->base = tre_ring->base = NULL;
++	tre_ring->ctxt_wp = NULL;
+ 	chan_ctxt->rbase = 0;
++	chan_ctxt->rlen = 0;
++	chan_ctxt->rp = 0;
++	chan_ctxt->wp = 0;
++
++	tmp = chan_ctxt->chcfg;
++	tmp &= ~CHAN_CTX_CHSTATE_MASK;
++	tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT);
++	chan_ctxt->chcfg = tmp;
++
++	/* Update to all cores */
++	smp_wmb();
+ }
+ 
+ int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
+@@ -863,12 +874,10 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
+ 	u32 soc_info;
+ 	int ret, i;
+ 
+-	if (!mhi_cntrl)
+-		return -EINVAL;
+-
+-	if (!mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put ||
++	if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->regs ||
++	    !mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put ||
+ 	    !mhi_cntrl->status_cb || !mhi_cntrl->read_reg ||
+-	    !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs)
++	    !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs || !mhi_cntrl->irq)
+ 		return -EINVAL;
+ 
+ 	ret = parse_config(mhi_cntrl, config);
+@@ -890,8 +899,7 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
+ 	INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
+ 	init_waitqueue_head(&mhi_cntrl->state_event);
+ 
+-	mhi_cntrl->hiprio_wq = alloc_ordered_workqueue
+-				("mhi_hiprio_wq", WQ_MEM_RECLAIM | WQ_HIGHPRI);
++	mhi_cntrl->hiprio_wq = alloc_ordered_workqueue("mhi_hiprio_wq", WQ_HIGHPRI);
+ 	if (!mhi_cntrl->hiprio_wq) {
+ 		dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate workqueue\n");
+ 		ret = -ENOMEM;
+@@ -1296,7 +1304,8 @@ static int mhi_driver_remove(struct device *dev)
+ 
+ 		mutex_lock(&mhi_chan->mutex);
+ 
+-		if (ch_state[dir] == MHI_CH_STATE_ENABLED &&
++		if ((ch_state[dir] == MHI_CH_STATE_ENABLED ||
++		     ch_state[dir] == MHI_CH_STATE_STOP) &&
+ 		    !mhi_chan->offload_ch)
+ 			mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
+ 
+diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c
+index 4e0131b94056b..61c37b23dd718 100644
+--- a/drivers/bus/mhi/core/main.c
++++ b/drivers/bus/mhi/core/main.c
+@@ -242,10 +242,17 @@ static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
+ 	smp_wmb();
+ }
+ 
++static bool is_valid_ring_ptr(struct mhi_ring *ring, dma_addr_t addr)
++{
++	return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len;
++}
++
+ int mhi_destroy_device(struct device *dev, void *data)
+ {
++	struct mhi_chan *ul_chan, *dl_chan;
+ 	struct mhi_device *mhi_dev;
+ 	struct mhi_controller *mhi_cntrl;
++	enum mhi_ee_type ee = MHI_EE_MAX;
+ 
+ 	if (dev->bus != &mhi_bus_type)
+ 		return 0;
+@@ -257,6 +264,17 @@ int mhi_destroy_device(struct device *dev, void *data)
+ 	if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
+ 		return 0;
+ 
++	ul_chan = mhi_dev->ul_chan;
++	dl_chan = mhi_dev->dl_chan;
++
++	/*
++	 * If execution environment is specified, remove only those devices that
++	 * started in them based on ee_mask for the channels as we move on to a
++	 * different execution environment
++	 */
++	if (data)
++		ee = *(enum mhi_ee_type *)data;
++
+ 	/*
+ 	 * For the suspend and resume case, this function will get called
+ 	 * without mhi_unregister_controller(). Hence, we need to drop the
+@@ -264,11 +282,19 @@ int mhi_destroy_device(struct device *dev, void *data)
+ 	 * be sure that there will be no instances of mhi_dev left after
+ 	 * this.
+ 	 */
+-	if (mhi_dev->ul_chan)
+-		put_device(&mhi_dev->ul_chan->mhi_dev->dev);
++	if (ul_chan) {
++		if (ee != MHI_EE_MAX && !(ul_chan->ee_mask & BIT(ee)))
++			return 0;
+ 
+-	if (mhi_dev->dl_chan)
+-		put_device(&mhi_dev->dl_chan->mhi_dev->dev);
++		put_device(&ul_chan->mhi_dev->dev);
++	}
++
++	if (dl_chan) {
++		if (ee != MHI_EE_MAX && !(dl_chan->ee_mask & BIT(ee)))
++			return 0;
++
++		put_device(&dl_chan->mhi_dev->dev);
++	}
+ 
+ 	dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n",
+ 		 mhi_dev->name);
+@@ -383,7 +409,16 @@ irqreturn_t mhi_irq_handler(int irq_number, void *dev)
+ 	struct mhi_event_ctxt *er_ctxt =
+ 		&mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
+ 	struct mhi_ring *ev_ring = &mhi_event->ring;
+-	void *dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
++	dma_addr_t ptr = er_ctxt->rp;
++	void *dev_rp;
++
++	if (!is_valid_ring_ptr(ev_ring, ptr)) {
++		dev_err(&mhi_cntrl->mhi_dev->dev,
++			"Event ring rp points outside of the event ring\n");
++		return IRQ_HANDLED;
++	}
++
++	dev_rp = mhi_to_virtual(ev_ring, ptr);
+ 
+ 	/* Only proceed if event ring has pending events */
+ 	if (ev_ring->rp == dev_rp)
+@@ -409,7 +444,7 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
+ 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ 	enum mhi_state state = MHI_STATE_MAX;
+ 	enum mhi_pm_state pm_state = 0;
+-	enum mhi_ee_type ee = 0;
++	enum mhi_ee_type ee = MHI_EE_MAX;
+ 
+ 	write_lock_irq(&mhi_cntrl->pm_lock);
+ 	if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+@@ -418,8 +453,7 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
+ 	}
+ 
+ 	state = mhi_get_mhi_state(mhi_cntrl);
+-	ee = mhi_cntrl->ee;
+-	mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
++	ee = mhi_get_exec_env(mhi_cntrl);
+ 	dev_dbg(dev, "local ee:%s device ee:%s dev_state:%s\n",
+ 		TO_MHI_EXEC_STR(mhi_cntrl->ee), TO_MHI_EXEC_STR(ee),
+ 		TO_MHI_STATE_STR(state));
+@@ -431,27 +465,30 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
+ 	}
+ 	write_unlock_irq(&mhi_cntrl->pm_lock);
+ 
+-	 /* If device supports RDDM don't bother processing SYS error */
+-	if (mhi_cntrl->rddm_image) {
+-		/* host may be performing a device power down already */
+-		if (!mhi_is_active(mhi_cntrl))
+-			goto exit_intvec;
++	if (pm_state != MHI_PM_SYS_ERR_DETECT || ee == mhi_cntrl->ee)
++		goto exit_intvec;
+ 
+-		if (mhi_cntrl->ee == MHI_EE_RDDM && mhi_cntrl->ee != ee) {
++	switch (ee) {
++	case MHI_EE_RDDM:
++		/* proceed if power down is not already in progress */
++		if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) {
+ 			mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
++			mhi_cntrl->ee = ee;
+ 			wake_up_all(&mhi_cntrl->state_event);
+ 		}
+-		goto exit_intvec;
+-	}
+-
+-	if (pm_state == MHI_PM_SYS_ERR_DETECT) {
++		break;
++	case MHI_EE_PBL:
++	case MHI_EE_EDL:
++	case MHI_EE_PTHRU:
++		mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
++		mhi_cntrl->ee = ee;
+ 		wake_up_all(&mhi_cntrl->state_event);
+-
+-		/* For fatal errors, we let controller decide next step */
+-		if (MHI_IN_PBL(ee))
+-			mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
+-		else
+-			mhi_pm_sys_err_handler(mhi_cntrl);
++		mhi_pm_sys_err_handler(mhi_cntrl);
++		break;
++	default:
++		wake_up_all(&mhi_cntrl->state_event);
++		mhi_pm_sys_err_handler(mhi_cntrl);
++		break;
+ 	}
+ 
+ exit_intvec:
+@@ -536,6 +573,11 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
+ 		struct mhi_buf_info *buf_info;
+ 		u16 xfer_len;
+ 
++		if (!is_valid_ring_ptr(tre_ring, ptr)) {
++			dev_err(&mhi_cntrl->mhi_dev->dev,
++				"Event element points outside of the tre ring\n");
++			break;
++		}
+ 		/* Get the TRB this event points to */
+ 		ev_tre = mhi_to_virtual(tre_ring, ptr);
+ 
+@@ -570,8 +612,11 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
+ 			/* notify client */
+ 			mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+ 
+-			if (mhi_chan->dir == DMA_TO_DEVICE)
++			if (mhi_chan->dir == DMA_TO_DEVICE) {
+ 				atomic_dec(&mhi_cntrl->pending_pkts);
++				/* Release the reference got from mhi_queue() */
++				mhi_cntrl->runtime_put(mhi_cntrl);
++			}
+ 
+ 			/*
+ 			 * Recycle the buffer if buffer is pre-allocated,
+@@ -695,6 +740,12 @@ static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
+ 	struct mhi_chan *mhi_chan;
+ 	u32 chan;
+ 
++	if (!is_valid_ring_ptr(mhi_ring, ptr)) {
++		dev_err(&mhi_cntrl->mhi_dev->dev,
++			"Event element points outside of the cmd ring\n");
++		return;
++	}
++
+ 	cmd_pkt = mhi_to_virtual(mhi_ring, ptr);
+ 
+ 	chan = MHI_TRE_GET_CMD_CHID(cmd_pkt);
+@@ -719,6 +770,7 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
+ 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ 	u32 chan;
+ 	int count = 0;
++	dma_addr_t ptr = er_ctxt->rp;
+ 
+ 	/*
+ 	 * This is a quick check to avoid unnecessary event processing
+@@ -728,7 +780,13 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
+ 	if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
+ 		return -EIO;
+ 
+-	dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
++	if (!is_valid_ring_ptr(ev_ring, ptr)) {
++		dev_err(&mhi_cntrl->mhi_dev->dev,
++			"Event ring rp points outside of the event ring\n");
++		return -EIO;
++	}
++
++	dev_rp = mhi_to_virtual(ev_ring, ptr);
+ 	local_rp = ev_ring->rp;
+ 
+ 	while (dev_rp != local_rp) {
+@@ -834,6 +892,8 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
+ 			 */
+ 			if (chan < mhi_cntrl->max_chan) {
+ 				mhi_chan = &mhi_cntrl->mhi_chan[chan];
++				if (!mhi_chan->configured)
++					break;
+ 				parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
+ 				event_quota--;
+ 			}
+@@ -845,7 +905,15 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
+ 
+ 		mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
+ 		local_rp = ev_ring->rp;
+-		dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
++
++		ptr = er_ctxt->rp;
++		if (!is_valid_ring_ptr(ev_ring, ptr)) {
++			dev_err(&mhi_cntrl->mhi_dev->dev,
++				"Event ring rp points outside of the event ring\n");
++			return -EIO;
++		}
++
++		dev_rp = mhi_to_virtual(ev_ring, ptr);
+ 		count++;
+ 	}
+ 
+@@ -868,11 +936,18 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
+ 	int count = 0;
+ 	u32 chan;
+ 	struct mhi_chan *mhi_chan;
++	dma_addr_t ptr = er_ctxt->rp;
+ 
+ 	if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
+ 		return -EIO;
+ 
+-	dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
++	if (!is_valid_ring_ptr(ev_ring, ptr)) {
++		dev_err(&mhi_cntrl->mhi_dev->dev,
++			"Event ring rp points outside of the event ring\n");
++		return -EIO;
++	}
++
++	dev_rp = mhi_to_virtual(ev_ring, ptr);
+ 	local_rp = ev_ring->rp;
+ 
+ 	while (dev_rp != local_rp && event_quota > 0) {
+@@ -886,7 +961,8 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
+ 		 * Only process the event ring elements whose channel
+ 		 * ID is within the maximum supported range.
+ 		 */
+-		if (chan < mhi_cntrl->max_chan) {
++		if (chan < mhi_cntrl->max_chan &&
++		    mhi_cntrl->mhi_chan[chan].configured) {
+ 			mhi_chan = &mhi_cntrl->mhi_chan[chan];
+ 
+ 			if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
+@@ -900,7 +976,15 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
+ 
+ 		mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
+ 		local_rp = ev_ring->rp;
+-		dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
++
++		ptr = er_ctxt->rp;
++		if (!is_valid_ring_ptr(ev_ring, ptr)) {
++			dev_err(&mhi_cntrl->mhi_dev->dev,
++				"Event ring rp points outside of the event ring\n");
++			return -EIO;
++		}
++
++		dev_rp = mhi_to_virtual(ev_ring, ptr);
+ 		count++;
+ 	}
+ 	read_lock_bh(&mhi_cntrl->pm_lock);
+@@ -1004,9 +1088,11 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
+ 	if (unlikely(ret))
+ 		goto exit_unlock;
+ 
+-	/* trigger M3 exit if necessary */
+-	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
+-		mhi_trigger_resume(mhi_cntrl);
++	/* Packet is queued, take a usage ref to exit M3 if necessary
++	 * for host->device buffer, balanced put is done on buffer completion
++	 * for device->host buffer, balanced put is after ringing the DB
++	 */
++	mhi_cntrl->runtime_get(mhi_cntrl);
+ 
+ 	/* Assert dev_wake (to exit/prevent M1/M2)*/
+ 	mhi_cntrl->wake_toggle(mhi_cntrl);
+@@ -1014,12 +1100,11 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
+ 	if (mhi_chan->dir == DMA_TO_DEVICE)
+ 		atomic_inc(&mhi_cntrl->pending_pkts);
+ 
+-	if (unlikely(!MHI_DB_ACCESS_VALID(mhi_cntrl))) {
+-		ret = -EIO;
+-		goto exit_unlock;
+-	}
++	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
++		mhi_ring_chan_db(mhi_cntrl, mhi_chan);
+ 
+-	mhi_ring_chan_db(mhi_cntrl, mhi_chan);
++	if (dir == DMA_FROM_DEVICE)
++		mhi_cntrl->runtime_put(mhi_cntrl);
+ 
+ exit_unlock:
+ 	read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
+@@ -1365,6 +1450,7 @@ static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
+ 	struct mhi_ring *ev_ring;
+ 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ 	unsigned long flags;
++	dma_addr_t ptr;
+ 
+ 	dev_dbg(dev, "Marking all events for chan: %d as stale\n", chan);
+ 
+@@ -1372,7 +1458,15 @@ static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
+ 
+ 	/* mark all stale events related to channel as STALE event */
+ 	spin_lock_irqsave(&mhi_event->lock, flags);
+-	dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
++
++	ptr = er_ctxt->rp;
++	if (!is_valid_ring_ptr(ev_ring, ptr)) {
++		dev_err(&mhi_cntrl->mhi_dev->dev,
++			"Event ring rp points outside of the event ring\n");
++		dev_rp = ev_ring->rp;
++	} else {
++		dev_rp = mhi_to_virtual(ev_ring, ptr);
++	}
+ 
+ 	local_rp = ev_ring->rp;
+ 	while (dev_rp != local_rp) {
+@@ -1403,8 +1497,11 @@ static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl,
+ 	while (tre_ring->rp != tre_ring->wp) {
+ 		struct mhi_buf_info *buf_info = buf_ring->rp;
+ 
+-		if (mhi_chan->dir == DMA_TO_DEVICE)
++		if (mhi_chan->dir == DMA_TO_DEVICE) {
+ 			atomic_dec(&mhi_cntrl->pending_pkts);
++			/* Release the reference got from mhi_queue() */
++			mhi_cntrl->runtime_put(mhi_cntrl);
++		}
+ 
+ 		if (!buf_info->pre_mapped)
+ 			mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
+diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c
+index 681960c72d2a8..277704af7eb6f 100644
+--- a/drivers/bus/mhi/core/pm.c
++++ b/drivers/bus/mhi/core/pm.c
+@@ -377,24 +377,28 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
+ {
+ 	struct mhi_event *mhi_event;
+ 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
++	enum mhi_ee_type ee = MHI_EE_MAX, current_ee = mhi_cntrl->ee;
+ 	int i, ret;
+ 
+ 	dev_dbg(dev, "Processing Mission Mode transition\n");
+ 
+ 	write_lock_irq(&mhi_cntrl->pm_lock);
+ 	if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
+-		mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
++		ee = mhi_get_exec_env(mhi_cntrl);
+ 
+-	if (!MHI_IN_MISSION_MODE(mhi_cntrl->ee)) {
++	if (!MHI_IN_MISSION_MODE(ee)) {
+ 		mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
+ 		write_unlock_irq(&mhi_cntrl->pm_lock);
+ 		wake_up_all(&mhi_cntrl->state_event);
+ 		return -EIO;
+ 	}
++	mhi_cntrl->ee = ee;
+ 	write_unlock_irq(&mhi_cntrl->pm_lock);
+ 
+ 	wake_up_all(&mhi_cntrl->state_event);
+ 
++	device_for_each_child(&mhi_cntrl->mhi_dev->dev, &current_ee,
++			      mhi_destroy_device);
+ 	mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE);
+ 
+ 	/* Force MHI to be in M0 state before continuing */
+@@ -1092,7 +1096,7 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
+ 							   &val) ||
+ 					!val,
+ 				msecs_to_jiffies(mhi_cntrl->timeout_ms));
+-		if (ret) {
++		if (!ret) {
+ 			ret = -EIO;
+ 			dev_info(dev, "Failed to reset MHI due to syserr state\n");
+ 			goto error_bhi_offset;
+diff --git a/drivers/bus/mhi/pci_generic.c b/drivers/bus/mhi/pci_generic.c
+index 20673a4b4a3c6..ef549c695b55c 100644
+--- a/drivers/bus/mhi/pci_generic.c
++++ b/drivers/bus/mhi/pci_generic.c
+@@ -230,6 +230,21 @@ static void mhi_pci_status_cb(struct mhi_controller *mhi_cntrl,
+ 	}
+ }
+ 
++static void mhi_pci_wake_get_nop(struct mhi_controller *mhi_cntrl, bool force)
++{
++	/* no-op */
++}
++
++static void mhi_pci_wake_put_nop(struct mhi_controller *mhi_cntrl, bool override)
++{
++	/* no-op */
++}
++
++static void mhi_pci_wake_toggle_nop(struct mhi_controller *mhi_cntrl)
++{
++	/* no-op */
++}
++
+ static bool mhi_pci_is_alive(struct mhi_controller *mhi_cntrl)
+ {
+ 	struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
+@@ -433,6 +448,9 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	mhi_cntrl->status_cb = mhi_pci_status_cb;
+ 	mhi_cntrl->runtime_get = mhi_pci_runtime_get;
+ 	mhi_cntrl->runtime_put = mhi_pci_runtime_put;
++	mhi_cntrl->wake_get = mhi_pci_wake_get_nop;
++	mhi_cntrl->wake_put = mhi_pci_wake_put_nop;
++	mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop;
+ 
+ 	err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width));
+ 	if (err)
+@@ -498,6 +516,12 @@ static void mhi_pci_remove(struct pci_dev *pdev)
+ 	mhi_unregister_controller(mhi_cntrl);
+ }
+ 
++static void mhi_pci_shutdown(struct pci_dev *pdev)
++{
++	mhi_pci_remove(pdev);
++	pci_set_power_state(pdev, PCI_D3hot);
++}
++
+ static void mhi_pci_reset_prepare(struct pci_dev *pdev)
+ {
+ 	struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
+@@ -668,6 +692,7 @@ static struct pci_driver mhi_pci_driver = {
+ 	.id_table	= mhi_pci_id_table,
+ 	.probe		= mhi_pci_probe,
+ 	.remove		= mhi_pci_remove,
++	.shutdown	= mhi_pci_shutdown,
+ 	.err_handler	= &mhi_pci_err_handler,
+ 	.driver.pm	= &mhi_pci_pm_ops
+ };
+diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
+index 3d74f237f005b..9e535336689fd 100644
+--- a/drivers/bus/ti-sysc.c
++++ b/drivers/bus/ti-sysc.c
+@@ -635,6 +635,51 @@ static int sysc_parse_and_check_child_range(struct sysc *ddata)
+ 	return 0;
+ }
+ 
++/* Interconnect instances to probe before l4_per instances */
++static struct resource early_bus_ranges[] = {
++	/* am3/4 l4_wkup */
++	{ .start = 0x44c00000, .end = 0x44c00000 + 0x300000, },
++	/* omap4/5 and dra7 l4_cfg */
++	{ .start = 0x4a000000, .end = 0x4a000000 + 0x300000, },
++	/* omap4 l4_wkup */
++	{ .start = 0x4a300000, .end = 0x4a300000 + 0x30000,  },
++	/* omap5 and dra7 l4_wkup without dra7 dcan segment */
++	{ .start = 0x4ae00000, .end = 0x4ae00000 + 0x30000,  },
++};
++
++static atomic_t sysc_defer = ATOMIC_INIT(10);
++
++/**
++ * sysc_defer_non_critical - defer non_critical interconnect probing
++ * @ddata: device driver data
++ *
++ * We want to probe l4_cfg and l4_wkup interconnect instances before any
++ * l4_per instances as l4_per instances depend on resources on l4_cfg and
++ * l4_wkup interconnects.
++ */
++static int sysc_defer_non_critical(struct sysc *ddata)
++{
++	struct resource *res;
++	int i;
++
++	if (!atomic_read(&sysc_defer))
++		return 0;
++
++	for (i = 0; i < ARRAY_SIZE(early_bus_ranges); i++) {
++		res = &early_bus_ranges[i];
++		if (ddata->module_pa >= res->start &&
++		    ddata->module_pa <= res->end) {
++			atomic_set(&sysc_defer, 0);
++
++			return 0;
++		}
++	}
++
++	atomic_dec_if_positive(&sysc_defer);
++
++	return -EPROBE_DEFER;
++}
++
+ static struct device_node *stdout_path;
+ 
+ static void sysc_init_stdout_path(struct sysc *ddata)
+@@ -863,6 +908,10 @@ static int sysc_map_and_check_registers(struct sysc *ddata)
+ 	if (error)
+ 		return error;
+ 
++	error = sysc_defer_non_critical(ddata);
++	if (error)
++		return error;
++
+ 	sysc_check_children(ddata);
+ 
+ 	error = sysc_parse_registers(ddata);
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 0fe9e200e4c84..5d6acfecd919b 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -819,7 +819,7 @@ static bool __init crng_init_try_arch_early(struct crng_state *crng)
+ 
+ static void __maybe_unused crng_initialize_secondary(struct crng_state *crng)
+ {
+-	memcpy(&crng->state[0], "expand 32-byte k", 16);
++	chacha_init_consts(crng->state);
+ 	_get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
+ 	crng_init_try_arch(crng);
+ 	crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
+@@ -827,7 +827,7 @@ static void __maybe_unused crng_initialize_secondary(struct crng_state *crng)
+ 
+ static void __init crng_initialize_primary(struct crng_state *crng)
+ {
+-	memcpy(&crng->state[0], "expand 32-byte k", 16);
++	chacha_init_consts(crng->state);
+ 	_extract_entropy(&input_pool, &crng->state[4], sizeof(__u32) * 12, 0);
+ 	if (crng_init_try_arch_early(crng) && trust_cpu) {
+ 		invalidate_batched_entropy();
+diff --git a/drivers/char/tpm/eventlog/acpi.c b/drivers/char/tpm/eventlog/acpi.c
+index 3633ed70f48fa..1b18ce5ebab1e 100644
+--- a/drivers/char/tpm/eventlog/acpi.c
++++ b/drivers/char/tpm/eventlog/acpi.c
+@@ -41,6 +41,27 @@ struct acpi_tcpa {
+ 	};
+ };
+ 
++/* Check that the given log is indeed a TPM2 log. */
++static bool tpm_is_tpm2_log(void *bios_event_log, u64 len)
++{
++	struct tcg_efi_specid_event_head *efispecid;
++	struct tcg_pcr_event *event_header;
++	int n;
++
++	if (len < sizeof(*event_header))
++		return false;
++	len -= sizeof(*event_header);
++	event_header = bios_event_log;
++
++	if (len < sizeof(*efispecid))
++		return false;
++	efispecid = (struct tcg_efi_specid_event_head *)event_header->event;
++
++	n = memcmp(efispecid->signature, TCG_SPECID_SIG,
++		   sizeof(TCG_SPECID_SIG));
++	return n == 0;
++}
++
+ /* read binary bios log */
+ int tpm_read_log_acpi(struct tpm_chip *chip)
+ {
+@@ -52,6 +73,7 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
+ 	struct acpi_table_tpm2 *tbl;
+ 	struct acpi_tpm2_phy *tpm2_phy;
+ 	int format;
++	int ret;
+ 
+ 	log = &chip->log;
+ 
+@@ -112,6 +134,7 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
+ 
+ 	log->bios_event_log_end = log->bios_event_log + len;
+ 
++	ret = -EIO;
+ 	virt = acpi_os_map_iomem(start, len);
+ 	if (!virt)
+ 		goto err;
+@@ -119,11 +142,19 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
+ 	memcpy_fromio(log->bios_event_log, virt, len);
+ 
+ 	acpi_os_unmap_iomem(virt, len);
++
++	if (chip->flags & TPM_CHIP_FLAG_TPM2 &&
++	    !tpm_is_tpm2_log(log->bios_event_log, len)) {
++		/* try EFI log next */
++		ret = -ENODEV;
++		goto err;
++	}
++
+ 	return format;
+ 
+ err:
+ 	kfree(log->bios_event_log);
+ 	log->bios_event_log = NULL;
+-	return -EIO;
++	return ret;
+ 
+ }
+diff --git a/drivers/char/tpm/eventlog/common.c b/drivers/char/tpm/eventlog/common.c
+index 7460f230bae4c..8512ec76d5260 100644
+--- a/drivers/char/tpm/eventlog/common.c
++++ b/drivers/char/tpm/eventlog/common.c
+@@ -107,6 +107,9 @@ void tpm_bios_log_setup(struct tpm_chip *chip)
+ 	int log_version;
+ 	int rc = 0;
+ 
++	if (chip->flags & TPM_CHIP_FLAG_VIRTUAL)
++		return;
++
+ 	rc = tpm_read_log(chip);
+ 	if (rc < 0)
+ 		return;
+diff --git a/drivers/char/tpm/eventlog/efi.c b/drivers/char/tpm/eventlog/efi.c
+index 35229e5143cac..e6cb9d525e30c 100644
+--- a/drivers/char/tpm/eventlog/efi.c
++++ b/drivers/char/tpm/eventlog/efi.c
+@@ -17,6 +17,7 @@ int tpm_read_log_efi(struct tpm_chip *chip)
+ {
+ 
+ 	struct efi_tcg2_final_events_table *final_tbl = NULL;
++	int final_events_log_size = efi_tpm_final_log_size;
+ 	struct linux_efi_tpm_eventlog *log_tbl;
+ 	struct tpm_bios_log *log;
+ 	u32 log_size;
+@@ -66,12 +67,12 @@ int tpm_read_log_efi(struct tpm_chip *chip)
+ 	ret = tpm_log_version;
+ 
+ 	if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR ||
+-	    efi_tpm_final_log_size == 0 ||
++	    final_events_log_size == 0 ||
+ 	    tpm_log_version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2)
+ 		goto out;
+ 
+ 	final_tbl = memremap(efi.tpm_final_log,
+-			     sizeof(*final_tbl) + efi_tpm_final_log_size,
++			     sizeof(*final_tbl) + final_events_log_size,
+ 			     MEMREMAP_WB);
+ 	if (!final_tbl) {
+ 		pr_err("Could not map UEFI TPM final log\n");
+@@ -80,10 +81,18 @@ int tpm_read_log_efi(struct tpm_chip *chip)
+ 		goto out;
+ 	}
+ 
+-	efi_tpm_final_log_size -= log_tbl->final_events_preboot_size;
++	/*
++	 * The 'final events log' size excludes the 'final events preboot log'
++	 * at its beginning.
++	 */
++	final_events_log_size -= log_tbl->final_events_preboot_size;
+ 
++	/*
++	 * Allocate memory for the 'combined log' where we will append the
++	 * 'final events log' to.
++	 */
+ 	tmp = krealloc(log->bios_event_log,
+-		       log_size + efi_tpm_final_log_size,
++		       log_size + final_events_log_size,
+ 		       GFP_KERNEL);
+ 	if (!tmp) {
+ 		kfree(log->bios_event_log);
+@@ -94,15 +103,19 @@ int tpm_read_log_efi(struct tpm_chip *chip)
+ 	log->bios_event_log = tmp;
+ 
+ 	/*
+-	 * Copy any of the final events log that didn't also end up in the
+-	 * main log. Events can be logged in both if events are generated
++	 * Append any of the 'final events log' that didn't also end up in the
++	 * 'main log'. Events can be logged in both if events are generated
+ 	 * between GetEventLog() and ExitBootServices().
+ 	 */
+ 	memcpy((void *)log->bios_event_log + log_size,
+ 	       final_tbl->events + log_tbl->final_events_preboot_size,
+-	       efi_tpm_final_log_size);
++	       final_events_log_size);
++	/*
++	 * The size of the 'combined log' is the size of the 'main log' plus
++	 * the size of the 'final events log'.
++	 */
+ 	log->bios_event_log_end = log->bios_event_log +
+-		log_size + efi_tpm_final_log_size;
++		log_size + final_events_log_size;
+ 
+ out:
+ 	memunmap(final_tbl);
+diff --git a/drivers/clk/socfpga/clk-gate-a10.c b/drivers/clk/socfpga/clk-gate-a10.c
+index cd5df91036142..d62778884208c 100644
+--- a/drivers/clk/socfpga/clk-gate-a10.c
++++ b/drivers/clk/socfpga/clk-gate-a10.c
+@@ -146,6 +146,7 @@ static void __init __socfpga_gate_init(struct device_node *node,
+ 		if (IS_ERR(socfpga_clk->sys_mgr_base_addr)) {
+ 			pr_err("%s: failed to find altr,sys-mgr regmap!\n",
+ 					__func__);
++			kfree(socfpga_clk);
+ 			return;
+ 		}
+ 	}
+diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
+index 42e7e43b8fcd9..b1e2b697b21bd 100644
+--- a/drivers/clocksource/dw_apb_timer_of.c
++++ b/drivers/clocksource/dw_apb_timer_of.c
+@@ -52,18 +52,34 @@ static int __init timer_get_base_and_rate(struct device_node *np,
+ 		return 0;
+ 
+ 	timer_clk = of_clk_get_by_name(np, "timer");
+-	if (IS_ERR(timer_clk))
+-		return PTR_ERR(timer_clk);
++	if (IS_ERR(timer_clk)) {
++		ret = PTR_ERR(timer_clk);
++		goto out_pclk_disable;
++	}
+ 
+ 	ret = clk_prepare_enable(timer_clk);
+ 	if (ret)
+-		return ret;
++		goto out_timer_clk_put;
+ 
+ 	*rate = clk_get_rate(timer_clk);
+-	if (!(*rate))
+-		return -EINVAL;
++	if (!(*rate)) {
++		ret = -EINVAL;
++		goto out_timer_clk_disable;
++	}
+ 
+ 	return 0;
++
++out_timer_clk_disable:
++	clk_disable_unprepare(timer_clk);
++out_timer_clk_put:
++	clk_put(timer_clk);
++out_pclk_disable:
++	if (!IS_ERR(pclk)) {
++		clk_disable_unprepare(pclk);
++		clk_put(pclk);
++	}
++	iounmap(*base);
++	return ret;
+ }
+ 
+ static int __init add_clockevent(struct device_node *event_timer)
+diff --git a/drivers/cpuidle/cpuidle-tegra.c b/drivers/cpuidle/cpuidle-tegra.c
+index 191966dc8d023..29c5e83500d33 100644
+--- a/drivers/cpuidle/cpuidle-tegra.c
++++ b/drivers/cpuidle/cpuidle-tegra.c
+@@ -135,13 +135,13 @@ static int tegra_cpuidle_c7_enter(void)
+ {
+ 	int err;
+ 
+-	if (tegra_cpuidle_using_firmware()) {
+-		err = call_firmware_op(prepare_idle, TF_PM_MODE_LP2_NOFLUSH_L2);
+-		if (err)
+-			return err;
++	err = call_firmware_op(prepare_idle, TF_PM_MODE_LP2_NOFLUSH_L2);
++	if (err && err != -ENOSYS)
++		return err;
+ 
+-		return call_firmware_op(do_idle, 0);
+-	}
++	err = call_firmware_op(do_idle, 0);
++	if (err != -ENOSYS)
++		return err;
+ 
+ 	return cpu_suspend(0, tegra30_pm_secondary_cpu_suspend);
+ }
+diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
+index c2e6f5ed1d797..dec79fa3ebafb 100644
+--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
++++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
+@@ -561,7 +561,7 @@ int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
+ 				    sizeof(struct sun4i_cipher_req_ctx) +
+ 				    crypto_skcipher_reqsize(op->fallback_tfm));
+ 
+-	err = pm_runtime_get_sync(op->ss->dev);
++	err = pm_runtime_resume_and_get(op->ss->dev);
+ 	if (err < 0)
+ 		goto error_pm;
+ 
+diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
+index 709905ec46806..02a2d34845f24 100644
+--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
++++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
+@@ -459,7 +459,7 @@ static int sun4i_ss_probe(struct platform_device *pdev)
+ 	 * this info could be useful
+ 	 */
+ 
+-	err = pm_runtime_get_sync(ss->dev);
++	err = pm_runtime_resume_and_get(ss->dev);
+ 	if (err < 0)
+ 		goto error_pm;
+ 
+diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
+index c1b4585e9bbc7..d28292762b324 100644
+--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
++++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
+@@ -27,7 +27,7 @@ int sun4i_hash_crainit(struct crypto_tfm *tfm)
+ 	algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash);
+ 	op->ss = algt->ss;
+ 
+-	err = pm_runtime_get_sync(op->ss->dev);
++	err = pm_runtime_resume_and_get(op->ss->dev);
+ 	if (err < 0)
+ 		return err;
+ 
+diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c
+index 443160a114bb0..491fcb7b81b40 100644
+--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c
++++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c
+@@ -29,7 +29,7 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
+ 	algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng);
+ 	ss = algt->ss;
+ 
+-	err = pm_runtime_get_sync(ss->dev);
++	err = pm_runtime_resume_and_get(ss->dev);
+ 	if (err < 0)
+ 		return err;
+ 
+diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+index 158422ff5695c..00194d1d9ae69 100644
+--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
++++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+@@ -932,7 +932,7 @@ static int sun8i_ce_probe(struct platform_device *pdev)
+ 	if (err)
+ 		goto error_alg;
+ 
+-	err = pm_runtime_get_sync(ce->dev);
++	err = pm_runtime_resume_and_get(ce->dev);
+ 	if (err < 0)
+ 		goto error_alg;
+ 
+diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+index ed2a69f82e1c1..7c355bc2fb066 100644
+--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
++++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+@@ -351,7 +351,7 @@ int sun8i_ss_cipher_init(struct crypto_tfm *tfm)
+ 	op->enginectx.op.prepare_request = NULL;
+ 	op->enginectx.op.unprepare_request = NULL;
+ 
+-	err = pm_runtime_get_sync(op->ss->dev);
++	err = pm_runtime_resume_and_get(op->ss->dev);
+ 	if (err < 0) {
+ 		dev_err(op->ss->dev, "pm error %d\n", err);
+ 		goto error_pm;
+diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
+index e0ddc684798dc..80e89066dbd1a 100644
+--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
++++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
+@@ -753,7 +753,7 @@ static int sun8i_ss_probe(struct platform_device *pdev)
+ 	if (err)
+ 		goto error_alg;
+ 
+-	err = pm_runtime_get_sync(ss->dev);
++	err = pm_runtime_resume_and_get(ss->dev);
+ 	if (err < 0)
+ 		goto error_alg;
+ 
+diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
+index 2eaa516b32311..8adcbb3271267 100644
+--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
++++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
+@@ -546,7 +546,7 @@ static int sec_skcipher_init(struct crypto_skcipher *tfm)
+ 	crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
+ 	ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);
+ 	if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
+-		dev_err(SEC_CTX_DEV(ctx), "get error skcipher iv size!\n");
++		pr_err("get error skcipher iv size!\n");
+ 		return -EINVAL;
+ 	}
+ 
+diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
+index a45bdcf3026df..0dd4c6b157de9 100644
+--- a/drivers/crypto/omap-aes.c
++++ b/drivers/crypto/omap-aes.c
+@@ -103,9 +103,8 @@ static int omap_aes_hw_init(struct omap_aes_dev *dd)
+ 		dd->err = 0;
+ 	}
+ 
+-	err = pm_runtime_get_sync(dd->dev);
++	err = pm_runtime_resume_and_get(dd->dev);
+ 	if (err < 0) {
+-		pm_runtime_put_noidle(dd->dev);
+ 		dev_err(dd->dev, "failed to get sync: %d\n", err);
+ 		return err;
+ 	}
+@@ -1134,7 +1133,7 @@ static int omap_aes_probe(struct platform_device *pdev)
+ 	pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
+ 
+ 	pm_runtime_enable(dev);
+-	err = pm_runtime_get_sync(dev);
++	err = pm_runtime_resume_and_get(dev);
+ 	if (err < 0) {
+ 		dev_err(dev, "%s: failed to get_sync(%d)\n",
+ 			__func__, err);
+@@ -1303,7 +1302,7 @@ static int omap_aes_suspend(struct device *dev)
+ 
+ static int omap_aes_resume(struct device *dev)
+ {
+-	pm_runtime_get_sync(dev);
++	pm_runtime_resume_and_get(dev);
+ 	return 0;
+ }
+ #endif
+diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
+index ff78c73c47e38..ea1c6899290d7 100644
+--- a/drivers/crypto/qat/qat_common/qat_algs.c
++++ b/drivers/crypto/qat/qat_common/qat_algs.c
+@@ -719,7 +719,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
+ 	struct qat_alg_buf_list *bufl;
+ 	struct qat_alg_buf_list *buflout = NULL;
+ 	dma_addr_t blp;
+-	dma_addr_t bloutp = 0;
++	dma_addr_t bloutp;
+ 	struct scatterlist *sg;
+ 	size_t sz_out, sz = struct_size(bufl, bufers, n + 1);
+ 
+@@ -731,6 +731,9 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
+ 	if (unlikely(!bufl))
+ 		return -ENOMEM;
+ 
++	for_each_sg(sgl, sg, n, i)
++		bufl->bufers[i].addr = DMA_MAPPING_ERROR;
++
+ 	blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
+ 	if (unlikely(dma_mapping_error(dev, blp)))
+ 		goto err_in;
+@@ -764,10 +767,14 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
+ 				       dev_to_node(&GET_DEV(inst->accel_dev)));
+ 		if (unlikely(!buflout))
+ 			goto err_in;
++
++		bufers = buflout->bufers;
++		for_each_sg(sglout, sg, n, i)
++			bufers[i].addr = DMA_MAPPING_ERROR;
++
+ 		bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
+ 		if (unlikely(dma_mapping_error(dev, bloutp)))
+ 			goto err_out;
+-		bufers = buflout->bufers;
+ 		for_each_sg(sglout, sg, n, i) {
+ 			int y = sg_nctr;
+ 
+diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
+index f300b0a5958a5..d7b1628fb4848 100644
+--- a/drivers/crypto/sa2ul.c
++++ b/drivers/crypto/sa2ul.c
+@@ -2350,7 +2350,7 @@ static int sa_ul_probe(struct platform_device *pdev)
+ 	dev_set_drvdata(sa_k3_dev, dev_data);
+ 
+ 	pm_runtime_enable(dev);
+-	ret = pm_runtime_get_sync(dev);
++	ret = pm_runtime_resume_and_get(dev);
+ 	if (ret < 0) {
+ 		dev_err(&pdev->dev, "%s: failed to get sync: %d\n", __func__,
+ 			ret);
+diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
+index 2a4793176c713..7389a0536ff02 100644
+--- a/drivers/crypto/stm32/stm32-cryp.c
++++ b/drivers/crypto/stm32/stm32-cryp.c
+@@ -542,7 +542,7 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
+ 	int ret;
+ 	u32 cfg, hw_mode;
+ 
+-	pm_runtime_get_sync(cryp->dev);
++	pm_runtime_resume_and_get(cryp->dev);
+ 
+ 	/* Disable interrupt */
+ 	stm32_cryp_write(cryp, CRYP_IMSCR, 0);
+@@ -2043,7 +2043,7 @@ static int stm32_cryp_remove(struct platform_device *pdev)
+ 	if (!cryp)
+ 		return -ENODEV;
+ 
+-	ret = pm_runtime_get_sync(cryp->dev);
++	ret = pm_runtime_resume_and_get(cryp->dev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
+index 7ac0573ef6630..389de9e3302d5 100644
+--- a/drivers/crypto/stm32/stm32-hash.c
++++ b/drivers/crypto/stm32/stm32-hash.c
+@@ -813,7 +813,7 @@ static void stm32_hash_finish_req(struct ahash_request *req, int err)
+ static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
+ 			      struct stm32_hash_request_ctx *rctx)
+ {
+-	pm_runtime_get_sync(hdev->dev);
++	pm_runtime_resume_and_get(hdev->dev);
+ 
+ 	if (!(HASH_FLAGS_INIT & hdev->flags)) {
+ 		stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT);
+@@ -962,7 +962,7 @@ static int stm32_hash_export(struct ahash_request *req, void *out)
+ 	u32 *preg;
+ 	unsigned int i;
+ 
+-	pm_runtime_get_sync(hdev->dev);
++	pm_runtime_resume_and_get(hdev->dev);
+ 
+ 	while ((stm32_hash_read(hdev, HASH_SR) & HASH_SR_BUSY))
+ 		cpu_relax();
+@@ -1000,7 +1000,7 @@ static int stm32_hash_import(struct ahash_request *req, const void *in)
+ 
+ 	preg = rctx->hw_context;
+ 
+-	pm_runtime_get_sync(hdev->dev);
++	pm_runtime_resume_and_get(hdev->dev);
+ 
+ 	stm32_hash_write(hdev, HASH_IMR, *preg++);
+ 	stm32_hash_write(hdev, HASH_STR, *preg++);
+@@ -1566,7 +1566,7 @@ static int stm32_hash_remove(struct platform_device *pdev)
+ 	if (!hdev)
+ 		return -ENODEV;
+ 
+-	ret = pm_runtime_get_sync(hdev->dev);
++	ret = pm_runtime_resume_and_get(hdev->dev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
+index aae82db542a5e..76aacbac5869d 100644
+--- a/drivers/extcon/extcon-arizona.c
++++ b/drivers/extcon/extcon-arizona.c
+@@ -601,7 +601,7 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data)
+ 	struct arizona *arizona = info->arizona;
+ 	int id_gpio = arizona->pdata.hpdet_id_gpio;
+ 	unsigned int report = EXTCON_JACK_HEADPHONE;
+-	int ret, reading;
++	int ret, reading, state;
+ 	bool mic = false;
+ 
+ 	mutex_lock(&info->lock);
+@@ -614,12 +614,11 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data)
+ 	}
+ 
+ 	/* If the cable was removed while measuring ignore the result */
+-	ret = extcon_get_state(info->edev, EXTCON_MECHANICAL);
+-	if (ret < 0) {
+-		dev_err(arizona->dev, "Failed to check cable state: %d\n",
+-			ret);
++	state = extcon_get_state(info->edev, EXTCON_MECHANICAL);
++	if (state < 0) {
++		dev_err(arizona->dev, "Failed to check cable state: %d\n", state);
+ 		goto out;
+-	} else if (!ret) {
++	} else if (!state) {
+ 		dev_dbg(arizona->dev, "Ignoring HPDET for removed cable\n");
+ 		goto done;
+ 	}
+@@ -667,7 +666,7 @@ done:
+ 		gpio_set_value_cansleep(id_gpio, 0);
+ 
+ 	/* If we have a mic then reenable MICDET */
+-	if (mic || info->mic)
++	if (state && (mic || info->mic))
+ 		arizona_start_mic(info);
+ 
+ 	if (info->hpdet_active) {
+@@ -675,7 +674,9 @@ done:
+ 		info->hpdet_active = false;
+ 	}
+ 
+-	info->hpdet_done = true;
++	/* Do not set hp_det done when the cable has been unplugged */
++	if (state)
++		info->hpdet_done = true;
+ 
+ out:
+ 	mutex_unlock(&info->lock);
+@@ -1759,25 +1760,6 @@ static int arizona_extcon_remove(struct platform_device *pdev)
+ 	bool change;
+ 	int ret;
+ 
+-	ret = regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1,
+-				       ARIZONA_MICD_ENA, 0,
+-				       &change);
+-	if (ret < 0) {
+-		dev_err(&pdev->dev, "Failed to disable micd on remove: %d\n",
+-			ret);
+-	} else if (change) {
+-		regulator_disable(info->micvdd);
+-		pm_runtime_put(info->dev);
+-	}
+-
+-	gpiod_put(info->micd_pol_gpio);
+-
+-	pm_runtime_disable(&pdev->dev);
+-
+-	regmap_update_bits(arizona->regmap,
+-			   ARIZONA_MICD_CLAMP_CONTROL,
+-			   ARIZONA_MICD_CLAMP_MODE_MASK, 0);
+-
+ 	if (info->micd_clamp) {
+ 		jack_irq_rise = ARIZONA_IRQ_MICD_CLAMP_RISE;
+ 		jack_irq_fall = ARIZONA_IRQ_MICD_CLAMP_FALL;
+@@ -1793,10 +1775,31 @@ static int arizona_extcon_remove(struct platform_device *pdev)
+ 	arizona_free_irq(arizona, jack_irq_rise, info);
+ 	arizona_free_irq(arizona, jack_irq_fall, info);
+ 	cancel_delayed_work_sync(&info->hpdet_work);
++	cancel_delayed_work_sync(&info->micd_detect_work);
++	cancel_delayed_work_sync(&info->micd_timeout_work);
++
++	ret = regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1,
++				       ARIZONA_MICD_ENA, 0,
++				       &change);
++	if (ret < 0) {
++		dev_err(&pdev->dev, "Failed to disable micd on remove: %d\n",
++			ret);
++	} else if (change) {
++		regulator_disable(info->micvdd);
++		pm_runtime_put(info->dev);
++	}
++
++	regmap_update_bits(arizona->regmap,
++			   ARIZONA_MICD_CLAMP_CONTROL,
++			   ARIZONA_MICD_CLAMP_MODE_MASK, 0);
+ 	regmap_update_bits(arizona->regmap, ARIZONA_JACK_DETECT_ANALOGUE,
+ 			   ARIZONA_JD1_ENA, 0);
+ 	arizona_clk32k_disable(arizona);
+ 
++	gpiod_put(info->micd_pol_gpio);
++
++	pm_runtime_disable(&pdev->dev);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
+index c23466e05e60d..d0537573501e9 100644
+--- a/drivers/firmware/efi/libstub/Makefile
++++ b/drivers/firmware/efi/libstub/Makefile
+@@ -13,7 +13,8 @@ cflags-$(CONFIG_X86)		+= -m$(BITS) -D__KERNEL__ \
+ 				   -Wno-pointer-sign \
+ 				   $(call cc-disable-warning, address-of-packed-member) \
+ 				   $(call cc-disable-warning, gnu) \
+-				   -fno-asynchronous-unwind-tables
++				   -fno-asynchronous-unwind-tables \
++				   $(CLANG_FLAGS)
+ 
+ # arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly
+ # disable the stackleak plugin
+diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c
+index 04e47e266f26d..b44523ea8c91a 100644
+--- a/drivers/fpga/dfl-pci.c
++++ b/drivers/fpga/dfl-pci.c
+@@ -69,14 +69,16 @@ static void cci_pci_free_irq(struct pci_dev *pcidev)
+ }
+ 
+ /* PCI Device ID */
+-#define PCIE_DEVICE_ID_PF_INT_5_X	0xBCBD
+-#define PCIE_DEVICE_ID_PF_INT_6_X	0xBCC0
+-#define PCIE_DEVICE_ID_PF_DSC_1_X	0x09C4
+-#define PCIE_DEVICE_ID_INTEL_PAC_N3000	0x0B30
++#define PCIE_DEVICE_ID_PF_INT_5_X		0xBCBD
++#define PCIE_DEVICE_ID_PF_INT_6_X		0xBCC0
++#define PCIE_DEVICE_ID_PF_DSC_1_X		0x09C4
++#define PCIE_DEVICE_ID_INTEL_PAC_N3000		0x0B30
++#define PCIE_DEVICE_ID_INTEL_PAC_D5005		0x0B2B
+ /* VF Device */
+-#define PCIE_DEVICE_ID_VF_INT_5_X	0xBCBF
+-#define PCIE_DEVICE_ID_VF_INT_6_X	0xBCC1
+-#define PCIE_DEVICE_ID_VF_DSC_1_X	0x09C5
++#define PCIE_DEVICE_ID_VF_INT_5_X		0xBCBF
++#define PCIE_DEVICE_ID_VF_INT_6_X		0xBCC1
++#define PCIE_DEVICE_ID_VF_DSC_1_X		0x09C5
++#define PCIE_DEVICE_ID_INTEL_PAC_D5005_VF	0x0B2C
+ 
+ static struct pci_device_id cci_pcie_id_tbl[] = {
+ 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_5_X),},
+@@ -86,6 +88,8 @@ static struct pci_device_id cci_pcie_id_tbl[] = {
+ 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_DSC_1_X),},
+ 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_DSC_1_X),},
+ 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_N3000),},
++	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005),},
++	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005_VF),},
+ 	{0,}
+ };
+ MODULE_DEVICE_TABLE(pci, cci_pcie_id_tbl);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 8a5a8ff5d3629..5eee251e3335b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -3613,6 +3613,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
+ {
+ 	dev_info(adev->dev, "amdgpu: finishing device.\n");
+ 	flush_delayed_work(&adev->delayed_init_work);
++	ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
+ 	adev->shutdown = true;
+ 
+ 	kfree(adev->pci_state);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+index d56f4023ebb31..7e8e46c39dbd3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+@@ -533,6 +533,8 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
+ 
+ 		if (!ring || !ring->fence_drv.initialized)
+ 			continue;
++		if (!ring->no_scheduler)
++			drm_sched_fini(&ring->sched);
+ 		r = amdgpu_fence_wait_empty(ring);
+ 		if (r) {
+ 			/* no need to trigger GPU reset as we are unloading */
+@@ -541,8 +543,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
+ 		if (ring->fence_drv.irq_src)
+ 			amdgpu_irq_put(adev, ring->fence_drv.irq_src,
+ 				       ring->fence_drv.irq_type);
+-		if (!ring->no_scheduler)
+-			drm_sched_fini(&ring->sched);
++
+ 		del_timer_sync(&ring->fence_drv.fallback_timer);
+ 		for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
+ 			dma_fence_put(ring->fence_drv.fences[j]);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+index afbbec82a289c..9be945d8e72f8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+@@ -535,7 +535,7 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
+ 		for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
+ 			struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
+ 
+-			if (!src)
++			if (!src || !src->funcs || !src->funcs->set)
+ 				continue;
+ 			for (k = 0; k < src->num_types; k++)
+ 				amdgpu_irq_update(adev, src, k);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index 5efa331e3ee82..383c178cf0746 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -942,7 +942,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_bo_device *bdev,
+ 		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+ 
+ 	/* double check that we don't free the table twice */
+-	if (!ttm->sg->sgl)
++	if (!ttm->sg || !ttm->sg->sgl)
+ 		return;
+ 
+ 	/* unmap the pages mapped to the device */
+@@ -1162,13 +1162,13 @@ static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
+ 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ 	int r;
+ 
+-	if (!gtt->bound)
+-		return;
+-
+ 	/* if the pages have userptr pinning then clear that first */
+ 	if (gtt->userptr)
+ 		amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
+ 
++	if (!gtt->bound)
++		return;
++
+ 	if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
+ 		return;
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+index e2ed4689118a9..c6dbc08016045 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+@@ -259,7 +259,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
+ 		if ((adev->asic_type == CHIP_POLARIS10 ||
+ 		     adev->asic_type == CHIP_POLARIS11) &&
+ 		    (adev->uvd.fw_version < FW_1_66_16))
+-			DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n",
++			DRM_ERROR("POLARIS10/11 UVD firmware version %u.%u is too old.\n",
+ 				  version_major, version_minor);
+ 	} else {
+ 		unsigned int enc_major, enc_minor, dec_minor;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+index 659b385b27b5a..4d3a24fdeb9ca 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+@@ -468,15 +468,22 @@ int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_dev
+ }
+ 
+ 
++/*
++ * NOTE psp_xgmi_node_info.num_hops layout is as follows:
++ * num_hops[7:6] = link type (0 = xGMI2, 1 = xGMI3, 2/3 = reserved)
++ * num_hops[5:3] = reserved
++ * num_hops[2:0] = number of hops
++ */
+ int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
+ 		struct amdgpu_device *peer_adev)
+ {
+ 	struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
++	uint8_t num_hops_mask = 0x7;
+ 	int i;
+ 
+ 	for (i = 0 ; i < top->num_nodes; ++i)
+ 		if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id)
+-			return top->nodes[i].num_hops;
++			return top->nodes[i].num_hops & num_hops_mask;
+ 	return	-EINVAL;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+index 88626d83e07be..ca8efa5c6978d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+@@ -220,10 +220,8 @@ static int vega10_ih_enable_ring(struct amdgpu_device *adev,
+ 	tmp = vega10_ih_rb_cntl(ih, tmp);
+ 	if (ih == &adev->irq.ih)
+ 		tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
+-	if (ih == &adev->irq.ih1) {
+-		tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
++	if (ih == &adev->irq.ih1)
+ 		tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
+-	}
+ 	if (amdgpu_sriov_vf(adev)) {
+ 		if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
+ 			dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n");
+@@ -265,7 +263,6 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
+ 	u32 ih_chicken;
+ 	int ret;
+ 	int i;
+-	u32 tmp;
+ 
+ 	/* disable irqs */
+ 	ret = vega10_ih_toggle_interrupts(adev, false);
+@@ -291,15 +288,6 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
+ 		}
+ 	}
+ 
+-	tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
+-	tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
+-			    CLIENT18_IS_STORM_CLIENT, 1);
+-	WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp);
+-
+-	tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL);
+-	tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
+-	WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp);
+-
+ 	pci_set_master(adev->pdev);
+ 
+ 	/* enable interrupts */
+@@ -345,11 +333,17 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev,
+ 	u32 wptr, tmp;
+ 	struct amdgpu_ih_regs *ih_regs;
+ 
+-	wptr = le32_to_cpu(*ih->wptr_cpu);
+-	ih_regs = &ih->ih_regs;
++	if (ih == &adev->irq.ih) {
++		/* Only ring0 supports writeback. On other rings fall back
++		 * to register-based code with overflow checking below.
++		 */
++		wptr = le32_to_cpu(*ih->wptr_cpu);
+ 
+-	if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+-		goto out;
++		if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
++			goto out;
++	}
++
++	ih_regs = &ih->ih_regs;
+ 
+ 	/* Double check that the overflow wasn't already cleared. */
+ 	wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
+@@ -440,15 +434,11 @@ static int vega10_ih_self_irq(struct amdgpu_device *adev,
+ 			      struct amdgpu_irq_src *source,
+ 			      struct amdgpu_iv_entry *entry)
+ {
+-	uint32_t wptr = cpu_to_le32(entry->src_data[0]);
+-
+ 	switch (entry->ring_id) {
+ 	case 1:
+-		*adev->irq.ih1.wptr_cpu = wptr;
+ 		schedule_work(&adev->irq.ih1_work);
+ 		break;
+ 	case 2:
+-		*adev->irq.ih2.wptr_cpu = wptr;
+ 		schedule_work(&adev->irq.ih2_work);
+ 		break;
+ 	default: break;
+diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+index 5a3c867d58811..86dcf448e0c22 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+@@ -104,6 +104,8 @@ static int vega20_ih_toggle_ring_interrupts(struct amdgpu_device *adev,
+ 
+ 	tmp = RREG32(ih_regs->ih_rb_cntl);
+ 	tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
++	tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_GPU_TS_ENABLE, 1);
++
+ 	/* enable_intr field is only valid in ring0 */
+ 	if (ih == &adev->irq.ih)
+ 		tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
+@@ -220,10 +222,8 @@ static int vega20_ih_enable_ring(struct amdgpu_device *adev,
+ 	tmp = vega20_ih_rb_cntl(ih, tmp);
+ 	if (ih == &adev->irq.ih)
+ 		tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
+-	if (ih == &adev->irq.ih1) {
+-		tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
++	if (ih == &adev->irq.ih1)
+ 		tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
+-	}
+ 	if (amdgpu_sriov_vf(adev)) {
+ 		if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
+ 			dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n");
+@@ -297,7 +297,6 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev)
+ 	u32 ih_chicken;
+ 	int ret;
+ 	int i;
+-	u32 tmp;
+ 
+ 	/* disable irqs */
+ 	ret = vega20_ih_toggle_interrupts(adev, false);
+@@ -326,15 +325,6 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev)
+ 		}
+ 	}
+ 
+-	tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
+-	tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
+-			    CLIENT18_IS_STORM_CLIENT, 1);
+-	WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp);
+-
+-	tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL);
+-	tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
+-	WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp);
+-
+ 	pci_set_master(adev->pdev);
+ 
+ 	/* enable interrupts */
+@@ -380,11 +370,17 @@ static u32 vega20_ih_get_wptr(struct amdgpu_device *adev,
+ 	u32 wptr, tmp;
+ 	struct amdgpu_ih_regs *ih_regs;
+ 
+-	wptr = le32_to_cpu(*ih->wptr_cpu);
+-	ih_regs = &ih->ih_regs;
++	if (ih == &adev->irq.ih) {
++		/* Only ring0 supports writeback. On other rings fall back
++		 * to register-based code with overflow checking below.
++		 */
++		wptr = le32_to_cpu(*ih->wptr_cpu);
+ 
+-	if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+-		goto out;
++		if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
++			goto out;
++	}
++
++	ih_regs = &ih->ih_regs;
+ 
+ 	/* Double check that the overflow wasn't already cleared. */
+ 	wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
+@@ -476,15 +472,11 @@ static int vega20_ih_self_irq(struct amdgpu_device *adev,
+ 			      struct amdgpu_irq_src *source,
+ 			      struct amdgpu_iv_entry *entry)
+ {
+-	uint32_t wptr = cpu_to_le32(entry->src_data[0]);
+-
+ 	switch (entry->ring_id) {
+ 	case 1:
+-		*adev->irq.ih1.wptr_cpu = wptr;
+ 		schedule_work(&adev->irq.ih1_work);
+ 		break;
+ 	case 2:
+-		*adev->irq.ih2.wptr_cpu = wptr;
+ 		schedule_work(&adev->irq.ih2_work);
+ 		break;
+ 	default: break;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
+index 511712c2e382d..673d5e34f213c 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
+@@ -33,6 +33,11 @@ static int kfd_debugfs_open(struct inode *inode, struct file *file)
+ 
+ 	return single_open(file, show, NULL);
+ }
++static int kfd_debugfs_hang_hws_read(struct seq_file *m, void *data)
++{
++	seq_printf(m, "echo gpu_id > hang_hws\n");
++	return 0;
++}
+ 
+ static ssize_t kfd_debugfs_hang_hws_write(struct file *file,
+ 	const char __user *user_buf, size_t size, loff_t *ppos)
+@@ -94,7 +99,7 @@ void kfd_debugfs_init(void)
+ 	debugfs_create_file("rls", S_IFREG | 0444, debugfs_root,
+ 			    kfd_debugfs_rls_by_device, &kfd_debugfs_fops);
+ 	debugfs_create_file("hang_hws", S_IFREG | 0200, debugfs_root,
+-			    NULL, &kfd_debugfs_hang_hws_fops);
++			    kfd_debugfs_hang_hws_read, &kfd_debugfs_hang_hws_fops);
+ }
+ 
+ void kfd_debugfs_fini(void)
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index 4598a9a581251..a4266c4bca135 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -1128,6 +1128,9 @@ static int set_sched_resources(struct device_queue_manager *dqm)
+ 
+ static int initialize_cpsch(struct device_queue_manager *dqm)
+ {
++	uint64_t num_sdma_queues;
++	uint64_t num_xgmi_sdma_queues;
++
+ 	pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
+ 
+ 	mutex_init(&dqm->lock_hidden);
+@@ -1136,8 +1139,18 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
+ 	dqm->active_cp_queue_count = 0;
+ 	dqm->gws_queue_count = 0;
+ 	dqm->active_runlist = false;
+-	dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
+-	dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
++
++	num_sdma_queues = get_num_sdma_queues(dqm);
++	if (num_sdma_queues >= BITS_PER_TYPE(dqm->sdma_bitmap))
++		dqm->sdma_bitmap = ULLONG_MAX;
++	else
++		dqm->sdma_bitmap = (BIT_ULL(num_sdma_queues) - 1);
++
++	num_xgmi_sdma_queues = get_num_xgmi_sdma_queues(dqm);
++	if (num_xgmi_sdma_queues >= BITS_PER_TYPE(dqm->xgmi_sdma_bitmap))
++		dqm->xgmi_sdma_bitmap = ULLONG_MAX;
++	else
++		dqm->xgmi_sdma_bitmap = (BIT_ULL(num_xgmi_sdma_queues) - 1);
+ 
+ 	INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index d699a5cf6c118..29ca1708458c1 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -1191,6 +1191,15 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
+ 	if (adev->dm.dc)
+ 		dc_deinit_callbacks(adev->dm.dc);
+ #endif
++
++#if defined(CONFIG_DRM_AMD_DC_DCN)
++	if (adev->dm.vblank_workqueue) {
++		adev->dm.vblank_workqueue->dm = NULL;
++		kfree(adev->dm.vblank_workqueue);
++		adev->dm.vblank_workqueue = NULL;
++	}
++#endif
++
+ 	if (adev->dm.dc->ctx->dmub_srv) {
+ 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
+ 		adev->dm.dc->ctx->dmub_srv = NULL;
+@@ -5863,6 +5872,15 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+ 
+ 	} while (stream == NULL && requested_bpc >= 6);
+ 
++	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
++		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
++
++		aconnector->force_yuv420_output = true;
++		stream = create_validate_stream_for_sink(aconnector, drm_mode,
++						dm_state, old_stream);
++		aconnector->force_yuv420_output = false;
++	}
++
+ 	return stream;
+ }
+ 
+@@ -7417,10 +7435,6 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
+ 	int x, y;
+ 	int xorigin = 0, yorigin = 0;
+ 
+-	position->enable = false;
+-	position->x = 0;
+-	position->y = 0;
+-
+ 	if (!crtc || !plane->state->fb)
+ 		return 0;
+ 
+@@ -7467,7 +7481,7 @@ static void handle_cursor_update(struct drm_plane *plane,
+ 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
+ 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ 	uint64_t address = afb ? afb->address : 0;
+-	struct dc_cursor_position position;
++	struct dc_cursor_position position = {0};
+ 	struct dc_cursor_attributes attributes;
+ 	int ret;
+ 
+@@ -9383,7 +9397,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ 	}
+ 
+ #if defined(CONFIG_DRM_AMD_DC_DCN)
+-	if (adev->asic_type >= CHIP_NAVI10) {
++	if (dc_resource_is_dsc_encoding_supported(dc)) {
+ 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
+ 				ret = add_affected_mst_dsc_crtcs(state, crtc);
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+index 8bfe901cf2374..52cc817052802 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+@@ -68,18 +68,6 @@ struct common_irq_params {
+ 	enum dc_irq_source irq_src;
+ };
+ 
+-/**
+- * struct irq_list_head - Linked-list for low context IRQ handlers.
+- *
+- * @head: The list_head within &struct handler_data
+- * @work: A work_struct containing the deferred handler work
+- */
+-struct irq_list_head {
+-	struct list_head head;
+-	/* In case this interrupt needs post-processing, 'work' will be queued*/
+-	struct work_struct work;
+-};
+-
+ /**
+  * struct dm_compressor_info - Buffer info used by frame buffer compression
+  * @cpu_addr: MMIO cpu addr
+@@ -293,7 +281,7 @@ struct amdgpu_display_manager {
+ 	 * Note that handlers are called in the same order as they were
+ 	 * registered (FIFO).
+ 	 */
+-	struct irq_list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER];
++	struct list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER];
+ 
+ 	/**
+ 	 * @irq_handler_list_high_tab:
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+index 360952129b6d5..29139b34dbe2b 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+@@ -150,7 +150,7 @@ static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size,
+  *
+  * --- to get dp configuration
+  *
+- * cat link_settings
++ * cat /sys/kernel/debug/dri/0/DP-x/link_settings
+  *
+  * It will list current, verified, reported, preferred dp configuration.
+  * current -- for current video mode
+@@ -163,7 +163,7 @@ static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size,
+  * echo <lane_count>  <link_rate> > link_settings
+  *
+  * for example, to force to  2 lane, 2.7GHz,
+- * echo 4 0xa > link_settings
++ * echo 4 0xa > /sys/kernel/debug/dri/0/DP-x/link_settings
+  *
+  * spread_spectrum could not be changed dynamically.
+  *
+@@ -171,7 +171,7 @@ static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size,
+  * done. please check link settings after force operation to see if HW get
+  * programming.
+  *
+- * cat link_settings
++ * cat /sys/kernel/debug/dri/0/DP-x/link_settings
+  *
+  * check current and preferred settings.
+  *
+@@ -255,7 +255,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
+ 	int max_param_num = 2;
+ 	uint8_t param_nums = 0;
+ 	long param[2];
+-	bool valid_input = false;
++	bool valid_input = true;
+ 
+ 	if (size == 0)
+ 		return -EINVAL;
+@@ -282,9 +282,9 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
+ 	case LANE_COUNT_ONE:
+ 	case LANE_COUNT_TWO:
+ 	case LANE_COUNT_FOUR:
+-		valid_input = true;
+ 		break;
+ 	default:
++		valid_input = false;
+ 		break;
+ 	}
+ 
+@@ -294,9 +294,9 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
+ 	case LINK_RATE_RBR2:
+ 	case LINK_RATE_HIGH2:
+ 	case LINK_RATE_HIGH3:
+-		valid_input = true;
+ 		break;
+ 	default:
++		valid_input = false;
+ 		break;
+ 	}
+ 
+@@ -310,10 +310,11 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
+ 	 * spread spectrum will not be changed
+ 	 */
+ 	prefer_link_settings.link_spread = link->cur_link_settings.link_spread;
++	prefer_link_settings.use_link_rate_set = false;
+ 	prefer_link_settings.lane_count = param[0];
+ 	prefer_link_settings.link_rate = param[1];
+ 
+-	dc_link_set_preferred_link_settings(dc, &prefer_link_settings, link);
++	dc_link_set_preferred_training_settings(dc, &prefer_link_settings, NULL, link, true);
+ 
+ 	kfree(wr_buf);
+ 	return size;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+index e0000c180ed1f..8ce10d0973c5b 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+@@ -82,6 +82,7 @@ struct amdgpu_dm_irq_handler_data {
+ 	struct amdgpu_display_manager *dm;
+ 	/* DAL irq source which registered for this interrupt. */
+ 	enum dc_irq_source irq_source;
++	struct work_struct work;
+ };
+ 
+ #define DM_IRQ_TABLE_LOCK(adev, flags) \
+@@ -111,20 +112,10 @@ static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd,
+  */
+ static void dm_irq_work_func(struct work_struct *work)
+ {
+-	struct irq_list_head *irq_list_head =
+-		container_of(work, struct irq_list_head, work);
+-	struct list_head *handler_list = &irq_list_head->head;
+-	struct amdgpu_dm_irq_handler_data *handler_data;
+-
+-	list_for_each_entry(handler_data, handler_list, list) {
+-		DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n",
+-				handler_data->irq_source);
++	struct amdgpu_dm_irq_handler_data *handler_data =
++		container_of(work, struct amdgpu_dm_irq_handler_data, work);
+ 
+-		DRM_DEBUG_KMS("DM_IRQ: schedule_work: for dal_src=%d\n",
+-			handler_data->irq_source);
+-
+-		handler_data->handler(handler_data->handler_arg);
+-	}
++	handler_data->handler(handler_data->handler_arg);
+ 
+ 	/* Call a DAL subcomponent which registered for interrupt notification
+ 	 * at INTERRUPT_LOW_IRQ_CONTEXT.
+@@ -156,7 +147,7 @@ static struct list_head *remove_irq_handler(struct amdgpu_device *adev,
+ 		break;
+ 	case INTERRUPT_LOW_IRQ_CONTEXT:
+ 	default:
+-		hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
++		hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source];
+ 		break;
+ 	}
+ 
+@@ -290,7 +281,8 @@ void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
+ 		break;
+ 	case INTERRUPT_LOW_IRQ_CONTEXT:
+ 	default:
+-		hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
++		hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source];
++		INIT_WORK(&handler_data->work, dm_irq_work_func);
+ 		break;
+ 	}
+ 
+@@ -372,7 +364,7 @@ void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev,
+ int amdgpu_dm_irq_init(struct amdgpu_device *adev)
+ {
+ 	int src;
+-	struct irq_list_head *lh;
++	struct list_head *lh;
+ 
+ 	DRM_DEBUG_KMS("DM_IRQ\n");
+ 
+@@ -381,9 +373,7 @@ int amdgpu_dm_irq_init(struct amdgpu_device *adev)
+ 	for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
+ 		/* low context handler list init */
+ 		lh = &adev->dm.irq_handler_list_low_tab[src];
+-		INIT_LIST_HEAD(&lh->head);
+-		INIT_WORK(&lh->work, dm_irq_work_func);
+-
++		INIT_LIST_HEAD(lh);
+ 		/* high context handler init */
+ 		INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]);
+ 	}
+@@ -400,8 +390,11 @@ int amdgpu_dm_irq_init(struct amdgpu_device *adev)
+ void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
+ {
+ 	int src;
+-	struct irq_list_head *lh;
++	struct list_head *lh;
++	struct list_head *entry, *tmp;
++	struct amdgpu_dm_irq_handler_data *handler;
+ 	unsigned long irq_table_flags;
++
+ 	DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n");
+ 	for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
+ 		DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+@@ -410,7 +403,16 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
+ 		 * (because no code can schedule a new one). */
+ 		lh = &adev->dm.irq_handler_list_low_tab[src];
+ 		DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+-		flush_work(&lh->work);
++
++		if (!list_empty(lh)) {
++			list_for_each_safe(entry, tmp, lh) {
++				handler = list_entry(
++					entry,
++					struct amdgpu_dm_irq_handler_data,
++					list);
++				flush_work(&handler->work);
++			}
++		}
+ 	}
+ }
+ 
+@@ -420,6 +422,8 @@ int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
+ 	struct list_head *hnd_list_h;
+ 	struct list_head *hnd_list_l;
+ 	unsigned long irq_table_flags;
++	struct list_head *entry, *tmp;
++	struct amdgpu_dm_irq_handler_data *handler;
+ 
+ 	DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+ 
+@@ -430,14 +434,22 @@ int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
+ 	 * will be disabled from manage_dm_interrupts on disable CRTC.
+ 	 */
+ 	for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
+-		hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
++		hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
+ 		hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
+ 		if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
+ 			dc_interrupt_set(adev->dm.dc, src, false);
+ 
+ 		DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+-		flush_work(&adev->dm.irq_handler_list_low_tab[src].work);
+ 
++		if (!list_empty(hnd_list_l)) {
++			list_for_each_safe (entry, tmp, hnd_list_l) {
++				handler = list_entry(
++					entry,
++					struct amdgpu_dm_irq_handler_data,
++					list);
++				flush_work(&handler->work);
++			}
++		}
+ 		DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+ 	}
+ 
+@@ -457,7 +469,7 @@ int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
+ 
+ 	/* re-enable short pulse interrupts HW interrupt */
+ 	for (src = DC_IRQ_SOURCE_HPD1RX; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
+-		hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
++		hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
+ 		hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
+ 		if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
+ 			dc_interrupt_set(adev->dm.dc, src, true);
+@@ -483,7 +495,7 @@ int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
+ 	 * will be enabled from manage_dm_interrupts on enable CRTC.
+ 	 */
+ 	for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6; src++) {
+-		hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
++		hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
+ 		hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
+ 		if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
+ 			dc_interrupt_set(adev->dm.dc, src, true);
+@@ -500,22 +512,53 @@ int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
+ static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev,
+ 					enum dc_irq_source irq_source)
+ {
+-	unsigned long irq_table_flags;
+-	struct work_struct *work = NULL;
++	struct  list_head *handler_list = &adev->dm.irq_handler_list_low_tab[irq_source];
++	struct  amdgpu_dm_irq_handler_data *handler_data;
++	bool    work_queued = false;
+ 
+-	DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
++	if (list_empty(handler_list))
++		return;
++
++	list_for_each_entry (handler_data, handler_list, list) {
++		if (!queue_work(system_highpri_wq, &handler_data->work)) {
++			continue;
++		} else {
++			work_queued = true;
++			break;
++		}
++	}
+ 
+-	if (!list_empty(&adev->dm.irq_handler_list_low_tab[irq_source].head))
+-		work = &adev->dm.irq_handler_list_low_tab[irq_source].work;
++	if (!work_queued) {
++		struct  amdgpu_dm_irq_handler_data *handler_data_add;
++		/*get the amdgpu_dm_irq_handler_data of first item pointed by handler_list*/
++		handler_data = container_of(handler_list->next, struct amdgpu_dm_irq_handler_data, list);
+ 
+-	DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
++		/*allocate a new amdgpu_dm_irq_handler_data*/
++		handler_data_add = kzalloc(sizeof(*handler_data), GFP_KERNEL);
++		if (!handler_data_add) {
++			DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
++			return;
++		}
+ 
+-	if (work) {
+-		if (!schedule_work(work))
+-			DRM_INFO("amdgpu_dm_irq_schedule_work FAILED src %d\n",
+-						irq_source);
+-	}
++		/*copy new amdgpu_dm_irq_handler_data members from handler_data*/
++		handler_data_add->handler       = handler_data->handler;
++		handler_data_add->handler_arg   = handler_data->handler_arg;
++		handler_data_add->dm            = handler_data->dm;
++		handler_data_add->irq_source    = irq_source;
+ 
++		list_add_tail(&handler_data_add->list, handler_list);
++
++		INIT_WORK(&handler_data_add->work, dm_irq_work_func);
++
++		if (queue_work(system_highpri_wq, &handler_data_add->work))
++			DRM_DEBUG("Queued work for handling interrupt from "
++				  "display for IRQ source %d\n",
++				  irq_source);
++		else
++			DRM_ERROR("Failed to queue work for handling interrupt "
++				  "from display for IRQ source %d\n",
++				  irq_source);
++	}
+ }
+ 
+ /*
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+index 995ffbbf64e7c..1ee27f2f28f1d 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+@@ -217,6 +217,9 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
+ 		if (ASICREV_IS_SIENNA_CICHLID_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
+ 			dcn3_clk_mgr_destroy(clk_mgr);
+ 		}
++		if (ASICREV_IS_DIMGREY_CAVEFISH_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
++			dcn3_clk_mgr_destroy(clk_mgr);
++		}
+ 		break;
+ 
+ 	case FAMILY_VGH:
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
+index c7e5a64e06afb..81ea5d3a1947b 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
+@@ -252,6 +252,7 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
+ 	bool force_reset = false;
+ 	bool update_uclk = false;
+ 	bool p_state_change_support;
++	int total_plane_count;
+ 
+ 	if (dc->work_arounds.skip_clock_update || !clk_mgr->smu_present)
+ 		return;
+@@ -292,7 +293,8 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
+ 		clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz;
+ 
+ 	clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
+-	p_state_change_support = new_clocks->p_state_change_support || (display_count == 0);
++	total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
++	p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0);
+ 	if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
+ 		clk_mgr_base->clks.p_state_change_support = p_state_change_support;
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 8f8a13c7cf73d..c0b827d162684 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -2398,7 +2398,8 @@ static void commit_planes_do_stream_update(struct dc *dc,
+ 					if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
+ 						pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
+ 
+-					dc->hwss.optimize_bandwidth(dc, dc->current_state);
++					dc->optimized_required = true;
++
+ 				} else {
+ 					if (dc->optimize_seamless_boot_streams == 0)
+ 						dc->hwss.prepare_bandwidth(dc, dc->current_state);
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
+index 277484cf853e4..d4be5954d7aa3 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
+@@ -99,7 +99,6 @@ struct dce110_aux_registers {
+ 	AUX_SF(AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\
+ 	AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ 	AUX_SF(AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\
+-	AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ 	AUX_SF(AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\
+ 	AUX_SF(AUX_SW_DATA, AUX_SW_DATA, mask_sh),\
+ 	AUX_SF(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+index 69e34bef274c1..febccb35ddade 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+@@ -81,13 +81,18 @@ static void dmub_psr_get_state(struct dmub_psr *dmub, enum dc_psr_state *state)
+ {
+ 	struct dmub_srv *srv = dmub->ctx->dmub_srv->dmub;
+ 	uint32_t raw_state;
++	enum dmub_status status = DMUB_STATUS_INVALID;
+ 
+ 	// Send gpint command and wait for ack
+-	dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_PSR_STATE, 0, 30);
+-
+-	dmub_srv_get_gpint_response(srv, &raw_state);
+-
+-	*state = convert_psr_state(raw_state);
++	status = dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_PSR_STATE, 0, 30);
++
++	if (status == DMUB_STATUS_OK) {
++		// GPINT was executed, get response
++		dmub_srv_get_gpint_response(srv, &raw_state);
++		*state = convert_psr_state(raw_state);
++	} else
++		// Return invalid state when GPINT times out
++		*state = 0xFF;
+ }
+ 
+ /*
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+index 06dc1e2e8383a..07c8d2e2c09c2 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+@@ -848,7 +848,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
+ 
+ 					cmd.mall.cursor_copy_src.quad_part = cursor_attr.address.quad_part;
+ 					cmd.mall.cursor_copy_dst.quad_part =
+-							plane->address.grph.cursor_cache_addr.quad_part;
++							(plane->address.grph.cursor_cache_addr.quad_part + 2047) & ~2047;
+ 					cmd.mall.cursor_width = cursor_attr.width;
+ 					cmd.mall.cursor_height = cursor_attr.height;
+ 					cmd.mall.cursor_pitch = cursor_attr.pitch;
+@@ -858,8 +858,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
+ 					dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+ 
+ 					/* Use copied cursor, and it's okay to not switch back */
+-					cursor_attr.address.quad_part =
+-							plane->address.grph.cursor_cache_addr.quad_part;
++					cursor_attr.address.quad_part = cmd.mall.cursor_copy_dst.quad_part;
+ 					dc_stream_set_cursor_attributes(stream, &cursor_attr);
+ 				}
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
+index 3e6f76096119c..a7598356f37d2 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
+@@ -143,16 +143,18 @@ static void mpc3_power_on_ogam_lut(
+ {
+ 	struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+ 
+-	if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) {
+-		// Force power on
+-		REG_UPDATE(MPCC_MEM_PWR_CTRL[mpcc_id], MPCC_OGAM_MEM_PWR_DIS, power_on == true ? 1:0);
+-		// Wait for confirmation when powering on
+-		if (power_on)
+-			REG_WAIT(MPCC_MEM_PWR_CTRL[mpcc_id], MPCC_OGAM_MEM_PWR_STATE, 0, 10, 10);
+-	} else {
+-		REG_SET(MPCC_MEM_PWR_CTRL[mpcc_id], 0,
+-				MPCC_OGAM_MEM_PWR_FORCE, power_on == true ? 0 : 1);
+-	}
++	/*
++	 * Powering on: force memory active so the LUT can be updated.
++	 * Powering off: allow entering memory low power mode
++	 *
++	 * Memory low power mode is controlled during MPC OGAM LUT init.
++	 */
++	REG_UPDATE(MPCC_MEM_PWR_CTRL[mpcc_id],
++		   MPCC_OGAM_MEM_PWR_DIS, power_on != 0);
++
++	/* Wait for memory to be powered on - we won't be able to write to it otherwise. */
++	if (power_on)
++		REG_WAIT(MPCC_MEM_PWR_CTRL[mpcc_id], MPCC_OGAM_MEM_PWR_STATE, 0, 10, 10);
+ }
+ 
+ static void mpc3_configure_ogam_lut(
+@@ -1427,7 +1429,7 @@ const struct mpc_funcs dcn30_mpc_funcs = {
+ 	.acquire_rmu = mpcc3_acquire_rmu,
+ 	.program_3dlut = mpc3_program_3dlut,
+ 	.release_rmu = mpcc3_release_rmu,
+-	.power_on_mpc_mem_pwr = mpc20_power_on_ogam_lut,
++	.power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut,
+ 	.get_mpc_out_mux = mpc1_get_mpc_out_mux,
+ 
+ };
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+index fb7f1dea3c467..71e2d5e025710 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+@@ -181,7 +181,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_0_soc = {
+ 		},
+ 	.min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */
+ 	.num_states = 1,
+-	.sr_exit_time_us = 12,
++	.sr_exit_time_us = 15.5,
+ 	.sr_enter_plus_exit_time_us = 20,
+ 	.urgent_latency_us = 4.0,
+ 	.urgent_latency_pixel_data_only_us = 4.0,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+index 4b659b63f75b3..d03b1975e4178 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+@@ -164,7 +164,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_02_soc = {
+ 
+ 		.min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */
+ 		.num_states = 1,
+-		.sr_exit_time_us = 12,
++		.sr_exit_time_us = 15.5,
+ 		.sr_enter_plus_exit_time_us = 20,
+ 		.urgent_latency_us = 4.0,
+ 		.urgent_latency_pixel_data_only_us = 4.0,
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
+index 0f3f510fd83b8..9729cf292e849 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
+@@ -3437,6 +3437,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ 			mode_lib->vba.DCCEnabledInAnyPlane = true;
+ 		}
+ 	}
++	mode_lib->vba.UrgentLatency = mode_lib->vba.UrgentLatencyPixelDataOnly;
+ 	for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
+ 		locals->FabricAndDRAMBandwidthPerState[i] = dml_min(
+ 				mode_lib->vba.DRAMSpeedPerState[i] * mode_lib->vba.NumberOfChannels
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+index 210c96cd5b03b..51098c2c98548 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+@@ -3544,6 +3544,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
+ 			mode_lib->vba.DCCEnabledInAnyPlane = true;
+ 		}
+ 	}
++	mode_lib->vba.UrgentLatency = mode_lib->vba.UrgentLatencyPixelDataOnly;
+ 	for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
+ 		locals->FabricAndDRAMBandwidthPerState[i] = dml_min(
+ 				mode_lib->vba.DRAMSpeedPerState[i] * mode_lib->vba.NumberOfChannels
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
+index 72423dc425dc0..799bae229e679 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
+@@ -293,13 +293,31 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
+ 	if (surf_linear) {
+ 		log2_swath_height_l = 0;
+ 		log2_swath_height_c = 0;
+-	} else if (!surf_vert) {
+-		log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
+-		log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
+ 	} else {
+-		log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
+-		log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
++		unsigned int swath_height_l;
++		unsigned int swath_height_c;
++
++		if (!surf_vert) {
++			swath_height_l = rq_param->misc.rq_l.blk256_height;
++			swath_height_c = rq_param->misc.rq_c.blk256_height;
++		} else {
++			swath_height_l = rq_param->misc.rq_l.blk256_width;
++			swath_height_c = rq_param->misc.rq_c.blk256_width;
++		}
++
++		if (swath_height_l > 0)
++			log2_swath_height_l = dml_log2(swath_height_l);
++
++		if (req128_l && log2_swath_height_l > 0)
++			log2_swath_height_l -= 1;
++
++		if (swath_height_c > 0)
++			log2_swath_height_c = dml_log2(swath_height_c);
++
++		if (req128_c && log2_swath_height_c > 0)
++			log2_swath_height_c -= 1;
+ 	}
++
+ 	rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
+ 	rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
+index 9c78446c3a9d8..6a6d5970d1d58 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
+@@ -293,13 +293,31 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
+ 	if (surf_linear) {
+ 		log2_swath_height_l = 0;
+ 		log2_swath_height_c = 0;
+-	} else if (!surf_vert) {
+-		log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
+-		log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
+ 	} else {
+-		log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
+-		log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
++		unsigned int swath_height_l;
++		unsigned int swath_height_c;
++
++		if (!surf_vert) {
++			swath_height_l = rq_param->misc.rq_l.blk256_height;
++			swath_height_c = rq_param->misc.rq_c.blk256_height;
++		} else {
++			swath_height_l = rq_param->misc.rq_l.blk256_width;
++			swath_height_c = rq_param->misc.rq_c.blk256_width;
++		}
++
++		if (swath_height_l > 0)
++			log2_swath_height_l = dml_log2(swath_height_l);
++
++		if (req128_l && log2_swath_height_l > 0)
++			log2_swath_height_l -= 1;
++
++		if (swath_height_c > 0)
++			log2_swath_height_c = dml_log2(swath_height_c);
++
++		if (req128_c && log2_swath_height_c > 0)
++			log2_swath_height_c -= 1;
+ 	}
++
+ 	rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
+ 	rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
+index edd41d3582910..dc1c81a6e3771 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
+@@ -277,13 +277,31 @@ static void handle_det_buf_split(
+ 	if (surf_linear) {
+ 		log2_swath_height_l = 0;
+ 		log2_swath_height_c = 0;
+-	} else if (!surf_vert) {
+-		log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
+-		log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
+ 	} else {
+-		log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
+-		log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
++		unsigned int swath_height_l;
++		unsigned int swath_height_c;
++
++		if (!surf_vert) {
++			swath_height_l = rq_param->misc.rq_l.blk256_height;
++			swath_height_c = rq_param->misc.rq_c.blk256_height;
++		} else {
++			swath_height_l = rq_param->misc.rq_l.blk256_width;
++			swath_height_c = rq_param->misc.rq_c.blk256_width;
++		}
++
++		if (swath_height_l > 0)
++			log2_swath_height_l = dml_log2(swath_height_l);
++
++		if (req128_l && log2_swath_height_l > 0)
++			log2_swath_height_l -= 1;
++
++		if (swath_height_c > 0)
++			log2_swath_height_c = dml_log2(swath_height_c);
++
++		if (req128_c && log2_swath_height_c > 0)
++			log2_swath_height_c -= 1;
+ 	}
++
+ 	rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
+ 	rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
+index 0f14f205ebe59..04601a767a8f1 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
+@@ -237,13 +237,31 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
+ 	if (surf_linear) {
+ 		log2_swath_height_l = 0;
+ 		log2_swath_height_c = 0;
+-	} else if (!surf_vert) {
+-		log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
+-		log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
+ 	} else {
+-		log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
+-		log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
++		unsigned int swath_height_l;
++		unsigned int swath_height_c;
++
++		if (!surf_vert) {
++			swath_height_l = rq_param->misc.rq_l.blk256_height;
++			swath_height_c = rq_param->misc.rq_c.blk256_height;
++		} else {
++			swath_height_l = rq_param->misc.rq_l.blk256_width;
++			swath_height_c = rq_param->misc.rq_c.blk256_width;
++		}
++
++		if (swath_height_l > 0)
++			log2_swath_height_l = dml_log2(swath_height_l);
++
++		if (req128_l && log2_swath_height_l > 0)
++			log2_swath_height_l -= 1;
++
++		if (swath_height_c > 0)
++			log2_swath_height_c = dml_log2(swath_height_c);
++
++		if (req128_c && log2_swath_height_c > 0)
++			log2_swath_height_c -= 1;
+ 	}
++
+ 	rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
+ 	rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
+index 4c3e9cc301679..414da64f57340 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
+@@ -344,13 +344,31 @@ static void handle_det_buf_split(
+ 	if (surf_linear) {
+ 		log2_swath_height_l = 0;
+ 		log2_swath_height_c = 0;
+-	} else if (!surf_vert) {
+-		log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
+-		log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
+ 	} else {
+-		log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
+-		log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
++		unsigned int swath_height_l;
++		unsigned int swath_height_c;
++
++		if (!surf_vert) {
++			swath_height_l = rq_param->misc.rq_l.blk256_height;
++			swath_height_c = rq_param->misc.rq_c.blk256_height;
++		} else {
++			swath_height_l = rq_param->misc.rq_l.blk256_width;
++			swath_height_c = rq_param->misc.rq_c.blk256_width;
++		}
++
++		if (swath_height_l > 0)
++			log2_swath_height_l = dml_log2(swath_height_l);
++
++		if (req128_l && log2_swath_height_l > 0)
++			log2_swath_height_l -= 1;
++
++		if (swath_height_c > 0)
++			log2_swath_height_c = dml_log2(swath_height_c);
++
++		if (req128_c && log2_swath_height_c > 0)
++			log2_swath_height_c -= 1;
+ 	}
++
+ 	rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
+ 	rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
+ 
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+index ed05a30d1139d..e2a56a7f3d7a0 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+@@ -1526,20 +1526,6 @@ static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr,
+ 
+ 		smu10_data->gfx_actual_soft_min_freq = min_freq;
+ 		smu10_data->gfx_actual_soft_max_freq = max_freq;
+-
+-		ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+-					PPSMC_MSG_SetHardMinGfxClk,
+-					min_freq,
+-					NULL);
+-		if (ret)
+-			return ret;
+-
+-		ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+-					PPSMC_MSG_SetSoftMaxGfxClk,
+-					max_freq,
+-					NULL);
+-		if (ret)
+-			return ret;
+ 	} else if (type == PP_OD_COMMIT_DPM_TABLE) {
+ 		if (size != 0) {
+ 			pr_err("Input parameter number not correct\n");
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+index 599ec97266018..959143eff6510 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+@@ -5160,7 +5160,7 @@ static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
+ 
+ out:
+ 	smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
+-						1 << power_profile_mode,
++						(!power_profile_mode) ? 0 : 1 << (power_profile_mode - 1),
+ 						NULL);
+ 	hwmgr->power_profile_mode = power_profile_mode;
+ 
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+index cd905e41080e5..42c4dbe3e3624 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+@@ -279,35 +279,25 @@ static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_
+ 	if (smu->adev->in_suspend)
+ 		return;
+ 
+-	/*
+-	 * mclk, fclk and socclk are interdependent
+-	 * on each other
+-	 */
+ 	if (clk == SMU_MCLK) {
+-		/* reset clock dependency */
+ 		smu->user_dpm_profile.clk_dependency = 0;
+-		/* set mclk dependent clocks(fclk and socclk) */
+ 		smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK);
+ 	} else if (clk == SMU_FCLK) {
+-		/* give priority to mclk, if mclk dependent clocks are set */
++		/* MCLK takes precedence over FCLK */
+ 		if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
+ 			return;
+ 
+-		/* reset clock dependency */
+ 		smu->user_dpm_profile.clk_dependency = 0;
+-		/* set fclk dependent clocks(mclk and socclk) */
+ 		smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK);
+ 	} else if (clk == SMU_SOCCLK) {
+-		/* give priority to mclk, if mclk dependent clocks are set */
++		/* MCLK takes precedence over SOCCLK */
+ 		if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
+ 			return;
+ 
+-		/* reset clock dependency */
+ 		smu->user_dpm_profile.clk_dependency = 0;
+-		/* set socclk dependent clocks(mclk and fclk) */
+ 		smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK);
+ 	} else
+-		/* add clk dependencies here, if any */
++		/* Add clk dependencies here, if any */
+ 		return;
+ }
+ 
+@@ -331,7 +321,7 @@ static void smu_restore_dpm_user_profile(struct smu_context *smu)
+ 		return;
+ 
+ 	/* Enable restore flag */
+-	smu->user_dpm_profile.flags = SMU_DPM_USER_PROFILE_RESTORE;
++	smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE;
+ 
+ 	/* set the user dpm power limit */
+ 	if (smu->user_dpm_profile.power_limit) {
+@@ -354,8 +344,8 @@ static void smu_restore_dpm_user_profile(struct smu_context *smu)
+ 				ret = smu_force_clk_levels(smu, clk_type,
+ 						smu->user_dpm_profile.clk_mask[clk_type]);
+ 				if (ret)
+-					dev_err(smu->adev->dev, "Failed to set clock type = %d\n",
+-							clk_type);
++					dev_err(smu->adev->dev,
++						"Failed to set clock type = %d\n", clk_type);
+ 			}
+ 		}
+ 	}
+@@ -1777,7 +1767,7 @@ int smu_force_clk_levels(struct smu_context *smu,
+ 
+ 	if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) {
+ 		ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
+-		if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE) {
++		if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
+ 			smu->user_dpm_profile.clk_mask[clk_type] = mask;
+ 			smu_set_user_clk_dependencies(smu, clk_type);
+ 		}
+@@ -2034,7 +2024,7 @@ int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
+ 	if (smu->ppt_funcs->set_fan_speed_percent) {
+ 		percent = speed * 100 / smu->fan_max_rpm;
+ 		ret = smu->ppt_funcs->set_fan_speed_percent(smu, percent);
+-		if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE)
++		if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
+ 			smu->user_dpm_profile.fan_speed_percent = percent;
+ 	}
+ 
+@@ -2104,7 +2094,7 @@ int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
+ 
+ 	if (smu->ppt_funcs->set_power_limit) {
+ 		ret = smu->ppt_funcs->set_power_limit(smu, limit);
+-		if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE)
++		if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
+ 			smu->user_dpm_profile.power_limit = limit;
+ 	}
+ 
+@@ -2285,7 +2275,7 @@ int smu_set_fan_control_mode(struct smu_context *smu, int value)
+ 
+ 	if (smu->ppt_funcs->set_fan_control_mode) {
+ 		ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
+-		if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE)
++		if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
+ 			smu->user_dpm_profile.fan_mode = value;
+ 	}
+ 
+@@ -2293,7 +2283,7 @@ int smu_set_fan_control_mode(struct smu_context *smu, int value)
+ 
+ 	/* reset user dpm fan speed */
+ 	if (!ret && value != AMD_FAN_CTRL_MANUAL &&
+-			smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE)
++			!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
+ 		smu->user_dpm_profile.fan_speed_percent = 0;
+ 
+ 	return ret;
+@@ -2335,7 +2325,7 @@ int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
+ 		if (speed > 100)
+ 			speed = 100;
+ 		ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed);
+-		if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE)
++		if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
+ 			smu->user_dpm_profile.fan_speed_percent = speed;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+index 101eaa20db9bd..a80f551771b93 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+@@ -1462,7 +1462,6 @@ static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TAB
+ 					long input[], uint32_t size)
+ {
+ 	int ret = 0;
+-	int i;
+ 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ 
+ 	if (!(smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)) {
+@@ -1535,43 +1534,6 @@ static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TAB
+ 			smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+ 			smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
+ 			smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
+-
+-			ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
+-									smu->gfx_actual_hard_min_freq, NULL);
+-			if (ret) {
+-				dev_err(smu->adev->dev, "Restore the default hard min sclk failed!");
+-				return ret;
+-			}
+-
+-			ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
+-									smu->gfx_actual_soft_max_freq, NULL);
+-			if (ret) {
+-				dev_err(smu->adev->dev, "Restore the default soft max sclk failed!");
+-				return ret;
+-			}
+-
+-			if (smu->adev->pm.fw_version < 0x43f1b00) {
+-				dev_warn(smu->adev->dev, "CPUSoftMax/CPUSoftMin are not supported, please update SBIOS!\n");
+-				break;
+-			}
+-
+-			for (i = 0; i < smu->cpu_core_num; i++) {
+-				ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinCclk,
+-								      (i << 20) | smu->cpu_actual_soft_min_freq,
+-								      NULL);
+-				if (ret) {
+-					dev_err(smu->adev->dev, "Set hard min cclk failed!");
+-					return ret;
+-				}
+-
+-				ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxCclk,
+-								      (i << 20) | smu->cpu_actual_soft_max_freq,
+-								      NULL);
+-				if (ret) {
+-					dev_err(smu->adev->dev, "Set soft max cclk failed!");
+-					return ret;
+-				}
+-			}
+ 		}
+ 		break;
+ 	case PP_OD_COMMIT_DPM_TABLE:
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+index 5493388fcb10c..dbe6d0caddb78 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+@@ -389,24 +389,6 @@ static int renoir_od_edit_dpm_table(struct smu_context *smu,
+ 		}
+ 		smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ 		smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+-
+-		ret = smu_cmn_send_smc_msg_with_param(smu,
+-								SMU_MSG_SetHardMinGfxClk,
+-								smu->gfx_actual_hard_min_freq,
+-								NULL);
+-		if (ret) {
+-			dev_err(smu->adev->dev, "Restore the default hard min sclk failed!");
+-			return ret;
+-		}
+-
+-		ret = smu_cmn_send_smc_msg_with_param(smu,
+-								SMU_MSG_SetSoftMaxGfxClk,
+-								smu->gfx_actual_soft_max_freq,
+-								NULL);
+-		if (ret) {
+-			dev_err(smu->adev->dev, "Restore the default soft max sclk failed!");
+-			return ret;
+-		}
+ 		break;
+ 	case PP_OD_COMMIT_DPM_TABLE:
+ 		if (size != 0) {
+diff --git a/drivers/gpu/drm/arm/display/include/malidp_utils.h b/drivers/gpu/drm/arm/display/include/malidp_utils.h
+index 3bc383d5bf73d..49a1d7f3539c2 100644
+--- a/drivers/gpu/drm/arm/display/include/malidp_utils.h
++++ b/drivers/gpu/drm/arm/display/include/malidp_utils.h
+@@ -13,9 +13,6 @@
+ #define has_bit(nr, mask)	(BIT(nr) & (mask))
+ #define has_bits(bits, mask)	(((bits) & (mask)) == (bits))
+ 
+-#define dp_for_each_set_bit(bit, mask) \
+-	for_each_set_bit((bit), ((unsigned long *)&(mask)), sizeof(mask) * 8)
+-
+ #define dp_wait_cond(__cond, __tries, __min_range, __max_range)	\
+ ({							\
+ 	int num_tries = __tries;			\
+diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
+index 719a79728e24f..06c595378dda2 100644
+--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
++++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
+@@ -46,8 +46,9 @@ void komeda_pipeline_destroy(struct komeda_dev *mdev,
+ {
+ 	struct komeda_component *c;
+ 	int i;
++	unsigned long avail_comps = pipe->avail_comps;
+ 
+-	dp_for_each_set_bit(i, pipe->avail_comps) {
++	for_each_set_bit(i, &avail_comps, 32) {
+ 		c = komeda_pipeline_get_component(pipe, i);
+ 		komeda_component_destroy(mdev, c);
+ 	}
+@@ -247,6 +248,7 @@ static void komeda_pipeline_dump(struct komeda_pipeline *pipe)
+ {
+ 	struct komeda_component *c;
+ 	int id;
++	unsigned long avail_comps = pipe->avail_comps;
+ 
+ 	DRM_INFO("Pipeline-%d: n_layers: %d, n_scalers: %d, output: %s.\n",
+ 		 pipe->id, pipe->n_layers, pipe->n_scalers,
+@@ -258,7 +260,7 @@ static void komeda_pipeline_dump(struct komeda_pipeline *pipe)
+ 		 pipe->of_output_links[1] ?
+ 		 pipe->of_output_links[1]->full_name : "none");
+ 
+-	dp_for_each_set_bit(id, pipe->avail_comps) {
++	for_each_set_bit(id, &avail_comps, 32) {
+ 		c = komeda_pipeline_get_component(pipe, id);
+ 
+ 		komeda_component_dump(c);
+@@ -270,8 +272,9 @@ static void komeda_component_verify_inputs(struct komeda_component *c)
+ 	struct komeda_pipeline *pipe = c->pipeline;
+ 	struct komeda_component *input;
+ 	int id;
++	unsigned long supported_inputs = c->supported_inputs;
+ 
+-	dp_for_each_set_bit(id, c->supported_inputs) {
++	for_each_set_bit(id, &supported_inputs, 32) {
+ 		input = komeda_pipeline_get_component(pipe, id);
+ 		if (!input) {
+ 			c->supported_inputs &= ~(BIT(id));
+@@ -302,8 +305,9 @@ static void komeda_pipeline_assemble(struct komeda_pipeline *pipe)
+ 	struct komeda_component *c;
+ 	struct komeda_layer *layer;
+ 	int i, id;
++	unsigned long avail_comps = pipe->avail_comps;
+ 
+-	dp_for_each_set_bit(id, pipe->avail_comps) {
++	for_each_set_bit(id, &avail_comps, 32) {
+ 		c = komeda_pipeline_get_component(pipe, id);
+ 		komeda_component_verify_inputs(c);
+ 	}
+@@ -355,13 +359,15 @@ void komeda_pipeline_dump_register(struct komeda_pipeline *pipe,
+ {
+ 	struct komeda_component *c;
+ 	u32 id;
++	unsigned long avail_comps;
+ 
+ 	seq_printf(sf, "\n======== Pipeline-%d ==========\n", pipe->id);
+ 
+ 	if (pipe->funcs && pipe->funcs->dump_register)
+ 		pipe->funcs->dump_register(pipe, sf);
+ 
+-	dp_for_each_set_bit(id, pipe->avail_comps) {
++	avail_comps = pipe->avail_comps;
++	for_each_set_bit(id, &avail_comps, 32) {
+ 		c = komeda_pipeline_get_component(pipe, id);
+ 
+ 		seq_printf(sf, "\n------%s------\n", c->name);
+diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
+index 5c085116de3f8..e672b9cffee3c 100644
+--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
++++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
+@@ -1231,14 +1231,15 @@ komeda_pipeline_unbound_components(struct komeda_pipeline *pipe,
+ 	struct komeda_pipeline_state *old = priv_to_pipe_st(pipe->obj.state);
+ 	struct komeda_component_state *c_st;
+ 	struct komeda_component *c;
+-	u32 disabling_comps, id;
++	u32 id;
++	unsigned long disabling_comps;
+ 
+ 	WARN_ON(!old);
+ 
+ 	disabling_comps = (~new->active_comps) & old->active_comps;
+ 
+ 	/* unbound all disabling component */
+-	dp_for_each_set_bit(id, disabling_comps) {
++	for_each_set_bit(id, &disabling_comps, 32) {
+ 		c = komeda_pipeline_get_component(pipe, id);
+ 		c_st = komeda_component_get_state_and_set_user(c,
+ 				drm_st, NULL, new->crtc);
+@@ -1286,7 +1287,8 @@ bool komeda_pipeline_disable(struct komeda_pipeline *pipe,
+ 	struct komeda_pipeline_state *old;
+ 	struct komeda_component *c;
+ 	struct komeda_component_state *c_st;
+-	u32 id, disabling_comps = 0;
++	u32 id;
++	unsigned long disabling_comps;
+ 
+ 	old = komeda_pipeline_get_old_state(pipe, old_state);
+ 
+@@ -1296,10 +1298,10 @@ bool komeda_pipeline_disable(struct komeda_pipeline *pipe,
+ 		disabling_comps = old->active_comps &
+ 				  pipe->standalone_disabled_comps;
+ 
+-	DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, disabling_comps: 0x%x.\n",
++	DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, disabling_comps: 0x%lx.\n",
+ 			 pipe->id, old->active_comps, disabling_comps);
+ 
+-	dp_for_each_set_bit(id, disabling_comps) {
++	for_each_set_bit(id, &disabling_comps, 32) {
+ 		c = komeda_pipeline_get_component(pipe, id);
+ 		c_st = priv_to_comp_st(c->obj.state);
+ 
+@@ -1330,16 +1332,17 @@ void komeda_pipeline_update(struct komeda_pipeline *pipe,
+ 	struct komeda_pipeline_state *new = priv_to_pipe_st(pipe->obj.state);
+ 	struct komeda_pipeline_state *old;
+ 	struct komeda_component *c;
+-	u32 id, changed_comps = 0;
++	u32 id;
++	unsigned long changed_comps;
+ 
+ 	old = komeda_pipeline_get_old_state(pipe, old_state);
+ 
+ 	changed_comps = new->active_comps | old->active_comps;
+ 
+-	DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, changed: 0x%x.\n",
++	DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, changed: 0x%lx.\n",
+ 			 pipe->id, new->active_comps, changed_comps);
+ 
+-	dp_for_each_set_bit(id, changed_comps) {
++	for_each_set_bit(id, &changed_comps, 32) {
+ 		c = komeda_pipeline_get_component(pipe, id);
+ 
+ 		if (new->active_comps & BIT(c->id))
+diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
+index ea8164e7a6dc6..01837bea18c25 100644
+--- a/drivers/gpu/drm/ast/ast_drv.c
++++ b/drivers/gpu/drm/ast/ast_drv.c
+@@ -30,6 +30,7 @@
+ #include <linux/module.h>
+ #include <linux/pci.h>
+ 
++#include <drm/drm_atomic_helper.h>
+ #include <drm/drm_crtc_helper.h>
+ #include <drm/drm_drv.h>
+ #include <drm/drm_fb_helper.h>
+@@ -138,6 +139,7 @@ static void ast_pci_remove(struct pci_dev *pdev)
+ 	struct drm_device *dev = pci_get_drvdata(pdev);
+ 
+ 	drm_dev_unregister(dev);
++	drm_atomic_helper_shutdown(dev);
+ }
+ 
+ static int ast_drm_freeze(struct drm_device *dev)
+diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
+index 988b270fea5eb..758c69aa7232c 100644
+--- a/drivers/gpu/drm/ast/ast_mode.c
++++ b/drivers/gpu/drm/ast/ast_mode.c
+@@ -688,7 +688,7 @@ ast_cursor_plane_helper_atomic_update(struct drm_plane *plane,
+ 	unsigned int offset_x, offset_y;
+ 
+ 	offset_x = AST_MAX_HWC_WIDTH - fb->width;
+-	offset_y = AST_MAX_HWC_WIDTH - fb->height;
++	offset_y = AST_MAX_HWC_HEIGHT - fb->height;
+ 
+ 	if (state->fb != old_state->fb) {
+ 		/* A new cursor image was installed. */
+diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+index 58f5dc2f6dd52..f6bdec7fa9253 100644
+--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
++++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+@@ -84,6 +84,13 @@ static const struct drm_dmi_panel_orientation_data itworks_tw891 = {
+ 	.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
+ };
+ 
++static const struct drm_dmi_panel_orientation_data onegx1_pro = {
++	.width = 1200,
++	.height = 1920,
++	.bios_dates = (const char * const []){ "12/17/2020", NULL },
++	.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
++};
++
+ static const struct drm_dmi_panel_orientation_data lcd720x1280_rightside_up = {
+ 	.width = 720,
+ 	.height = 1280,
+@@ -211,6 +218,13 @@ static const struct dmi_system_id orientation_data[] = {
+ 		  DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"),
+ 		},
+ 		.driver_data = (void *)&lcd1200x1920_rightside_up,
++	}, {	/* OneGX1 Pro */
++		.matches = {
++		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SYSTEM_MANUFACTURER"),
++		  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SYSTEM_PRODUCT_NAME"),
++		  DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Default string"),
++		},
++		.driver_data = (void *)&onegx1_pro,
+ 	}, {	/* VIOS LTH17 */
+ 		.matches = {
+ 		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"),
+diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
+index 4b4d8d034782f..4ba20f959a71a 100644
+--- a/drivers/gpu/drm/i915/intel_pm.c
++++ b/drivers/gpu/drm/i915/intel_pm.c
+@@ -2993,7 +2993,7 @@ int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
+ 
+ static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
+ 				   const char *name,
+-				   const u16 wm[8])
++				   const u16 wm[])
+ {
+ 	int level, max_level = ilk_wm_max_level(dev_priv);
+ 
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+index 91cf46f840257..3d55e153fa9c3 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+@@ -246,7 +246,7 @@ static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
+ }
+ 
+ struct a6xx_gmu_oob_bits {
+-	int set, ack, set_new, ack_new;
++	int set, ack, set_new, ack_new, clear, clear_new;
+ 	const char *name;
+ };
+ 
+@@ -260,6 +260,8 @@ static const struct a6xx_gmu_oob_bits a6xx_gmu_oob_bits[] = {
+ 		.ack = 24,
+ 		.set_new = 30,
+ 		.ack_new = 31,
++		.clear = 24,
++		.clear_new = 31,
+ 	},
+ 
+ 	[GMU_OOB_PERFCOUNTER_SET] = {
+@@ -268,18 +270,22 @@ static const struct a6xx_gmu_oob_bits a6xx_gmu_oob_bits[] = {
+ 		.ack = 25,
+ 		.set_new = 28,
+ 		.ack_new = 30,
++		.clear = 25,
++		.clear_new = 29,
+ 	},
+ 
+ 	[GMU_OOB_BOOT_SLUMBER] = {
+ 		.name = "BOOT_SLUMBER",
+ 		.set = 22,
+ 		.ack = 30,
++		.clear = 30,
+ 	},
+ 
+ 	[GMU_OOB_DCVS_SET] = {
+ 		.name = "GPU_DCVS",
+ 		.set = 23,
+ 		.ack = 31,
++		.clear = 31,
+ 	},
+ };
+ 
+@@ -335,9 +341,9 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
+ 		return;
+ 
+ 	if (gmu->legacy)
+-		bit = a6xx_gmu_oob_bits[state].ack;
++		bit = a6xx_gmu_oob_bits[state].clear;
+ 	else
+-		bit = a6xx_gmu_oob_bits[state].ack_new;
++		bit = a6xx_gmu_oob_bits[state].clear_new;
+ 
+ 	gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << bit);
+ }
+diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
+index ff2c1d583c792..0392d4dfe270a 100644
+--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
++++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
+@@ -20,7 +20,7 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
+ {
+ 	struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ 	struct device *dev = encoder->dev->dev;
+-	u32 total_lines_x100, vclks_line, cfg;
++	u32 total_lines, vclks_line, cfg;
+ 	long vsync_clk_speed;
+ 	struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
+ 	int pp_id = mixer->pp;
+@@ -30,8 +30,8 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
+ 		return -EINVAL;
+ 	}
+ 
+-	total_lines_x100 = mode->vtotal * drm_mode_vrefresh(mode);
+-	if (!total_lines_x100) {
++	total_lines = mode->vtotal * drm_mode_vrefresh(mode);
++	if (!total_lines) {
+ 		DRM_DEV_ERROR(dev, "%s: vtotal(%d) or vrefresh(%d) is 0\n",
+ 			      __func__, mode->vtotal, drm_mode_vrefresh(mode));
+ 		return -EINVAL;
+@@ -43,15 +43,23 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
+ 							vsync_clk_speed);
+ 		return -EINVAL;
+ 	}
+-	vclks_line = vsync_clk_speed * 100 / total_lines_x100;
++	vclks_line = vsync_clk_speed / total_lines;
+ 
+ 	cfg = MDP5_PP_SYNC_CONFIG_VSYNC_COUNTER_EN
+ 		| MDP5_PP_SYNC_CONFIG_VSYNC_IN_EN;
+ 	cfg |= MDP5_PP_SYNC_CONFIG_VSYNC_COUNT(vclks_line);
+ 
++	/*
++	 * Tearcheck emits a blanking signal every vclks_line * vtotal * 2 ticks on
++	 * the vsync_clk equating to roughly half the desired panel refresh rate.
++	 * This is only necessary as stability fallback if interrupts from the
++	 * panel arrive too late or not at all, but is currently used by default
++	 * because these panel interrupts are not wired up yet.
++	 */
+ 	mdp5_write(mdp5_kms, REG_MDP5_PP_SYNC_CONFIG_VSYNC(pp_id), cfg);
+ 	mdp5_write(mdp5_kms,
+-		REG_MDP5_PP_SYNC_CONFIG_HEIGHT(pp_id), 0xfff0);
++		REG_MDP5_PP_SYNC_CONFIG_HEIGHT(pp_id), (2 * mode->vtotal));
++
+ 	mdp5_write(mdp5_kms,
+ 		REG_MDP5_PP_VSYNC_INIT_VAL(pp_id), mode->vdisplay);
+ 	mdp5_write(mdp5_kms, REG_MDP5_PP_RD_PTR_IRQ(pp_id), mode->vdisplay + 1);
+diff --git a/drivers/gpu/drm/msm/dp/dp_hpd.c b/drivers/gpu/drm/msm/dp/dp_hpd.c
+index 5b8fe32022b5f..e1c90fa47411f 100644
+--- a/drivers/gpu/drm/msm/dp/dp_hpd.c
++++ b/drivers/gpu/drm/msm/dp/dp_hpd.c
+@@ -34,8 +34,8 @@ int dp_hpd_connect(struct dp_usbpd *dp_usbpd, bool hpd)
+ 
+ 	dp_usbpd->hpd_high = hpd;
+ 
+-	if (!hpd_priv->dp_cb && !hpd_priv->dp_cb->configure
+-				&& !hpd_priv->dp_cb->disconnect) {
++	if (!hpd_priv->dp_cb || !hpd_priv->dp_cb->configure
++				|| !hpd_priv->dp_cb->disconnect) {
+ 		pr_err("hpd dp_cb not initialized\n");
+ 		return -EINVAL;
+ 	}
+diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
+index 10738e04c09b8..56e0c6c625e9a 100644
+--- a/drivers/gpu/drm/qxl/qxl_display.c
++++ b/drivers/gpu/drm/qxl/qxl_display.c
+@@ -1228,6 +1228,10 @@ int qxl_modeset_init(struct qxl_device *qdev)
+ 
+ void qxl_modeset_fini(struct qxl_device *qdev)
+ {
++	if (qdev->dumb_shadow_bo) {
++		drm_gem_object_put(&qdev->dumb_shadow_bo->tbo.base);
++		qdev->dumb_shadow_bo = NULL;
++	}
+ 	qxl_destroy_monitors_object(qdev);
+ 	drm_mode_config_cleanup(&qdev->ddev);
+ }
+diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
+index 1864467f10639..1b09bbe980551 100644
+--- a/drivers/gpu/drm/qxl/qxl_drv.c
++++ b/drivers/gpu/drm/qxl/qxl_drv.c
+@@ -144,6 +144,8 @@ static void qxl_drm_release(struct drm_device *dev)
+ 	 * reordering qxl_modeset_fini() + qxl_device_fini() calls is
+ 	 * non-trivial though.
+ 	 */
++	if (!dev->registered)
++		return;
+ 	qxl_modeset_fini(qdev);
+ 	qxl_device_fini(qdev);
+ }
+diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
+index 9b81786782de7..499ce55e34ccf 100644
+--- a/drivers/gpu/drm/radeon/radeon_object.c
++++ b/drivers/gpu/drm/radeon/radeon_object.c
+@@ -384,6 +384,8 @@ int radeon_bo_evict_vram(struct radeon_device *rdev)
+ 	}
+ #endif
+ 	man = ttm_manager_type(bdev, TTM_PL_VRAM);
++	if (!man)
++		return 0;
+ 	return ttm_resource_manager_evict_all(bdev, man);
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
+index 78893bea85aed..c0258d213a72f 100644
+--- a/drivers/gpu/drm/radeon/radeon_ttm.c
++++ b/drivers/gpu/drm/radeon/radeon_ttm.c
+@@ -485,13 +485,14 @@ static void radeon_ttm_backend_unbind(struct ttm_bo_device *bdev, struct ttm_tt
+ 	struct radeon_ttm_tt *gtt = (void *)ttm;
+ 	struct radeon_device *rdev = radeon_get_rdev(bdev);
+ 
++	if (gtt->userptr)
++		radeon_ttm_tt_unpin_userptr(bdev, ttm);
++
+ 	if (!gtt->bound)
+ 		return;
+ 
+ 	radeon_gart_unbind(rdev, gtt->offset, ttm->num_pages);
+ 
+-	if (gtt->userptr)
+-		radeon_ttm_tt_unpin_userptr(bdev, ttm);
+ 	gtt->bound = false;
+ }
+ 
+diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+index 23eb6d772e405..669f2ee395154 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
++++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+@@ -174,7 +174,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
+ 		if (!sync_file) {
+ 			dma_fence_put(&out_fence->f);
+ 			ret = -ENOMEM;
+-			goto out_memdup;
++			goto out_unresv;
+ 		}
+ 
+ 		exbuf->fence_fd = out_fence_fd;
+diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
+index d69a5b6da5532..4ff1ec28e630d 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_object.c
++++ b/drivers/gpu/drm/virtio/virtgpu_object.c
+@@ -248,6 +248,7 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
+ 
+ 	ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
+ 	if (ret != 0) {
++		virtio_gpu_array_put_free(objs);
+ 		virtio_gpu_free_object(&shmem_obj->base);
+ 		return ret;
+ 	}
+diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
+index 0443b7deeaef6..758d8a98d96b3 100644
+--- a/drivers/gpu/drm/vkms/vkms_crtc.c
++++ b/drivers/gpu/drm/vkms/vkms_crtc.c
+@@ -18,7 +18,8 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
+ 
+ 	ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer,
+ 					  output->period_ns);
+-	WARN_ON(ret_overrun != 1);
++	if (ret_overrun != 1)
++		pr_warn("%s: vblank timer overrun\n", __func__);
+ 
+ 	spin_lock(&output->lock);
+ 	ret = drm_crtc_handle_vblank(crtc);
+diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
+index f72803a023910..28509b02a0b56 100644
+--- a/drivers/hwtracing/intel_th/gth.c
++++ b/drivers/hwtracing/intel_th/gth.c
+@@ -543,7 +543,7 @@ static void intel_th_gth_disable(struct intel_th_device *thdev,
+ 	output->active = false;
+ 
+ 	for_each_set_bit(master, gth->output[output->port].master,
+-			 TH_CONFIGURABLE_MASTERS) {
++			 TH_CONFIGURABLE_MASTERS + 1) {
+ 		gth_master_set(gth, master, -1);
+ 	}
+ 	spin_unlock(&gth->gth_lock);
+@@ -697,7 +697,7 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev,
+ 	othdev->output.port = -1;
+ 	othdev->output.active = false;
+ 	gth->output[port].output = NULL;
+-	for (master = 0; master <= TH_CONFIGURABLE_MASTERS; master++)
++	for (master = 0; master < TH_CONFIGURABLE_MASTERS + 1; master++)
+ 		if (gth->master[master] == port)
+ 			gth->master[master] = -1;
+ 	spin_unlock(&gth->gth_lock);
+diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
+index 251e75c9ba9d0..817cdb29bbd89 100644
+--- a/drivers/hwtracing/intel_th/pci.c
++++ b/drivers/hwtracing/intel_th/pci.c
+@@ -273,11 +273,21 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
+ 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x51a6),
+ 		.driver_data = (kernel_ulong_t)&intel_th_2x,
+ 	},
++	{
++		/* Alder Lake-M */
++		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x54a6),
++		.driver_data = (kernel_ulong_t)&intel_th_2x,
++	},
+ 	{
+ 		/* Alder Lake CPU */
+ 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x466f),
+ 		.driver_data = (kernel_ulong_t)&intel_th_2x,
+ 	},
++	{
++		/* Rocket Lake CPU */
++		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4c19),
++		.driver_data = (kernel_ulong_t)&intel_th_2x,
++	},
+ 	{ 0 },
+ };
+ 
+diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
+index d8fccf048bf44..30576a5f2f045 100644
+--- a/drivers/input/touchscreen/ili210x.c
++++ b/drivers/input/touchscreen/ili210x.c
+@@ -87,7 +87,7 @@ static bool ili210x_touchdata_to_coords(const u8 *touchdata,
+ 					unsigned int *x, unsigned int *y,
+ 					unsigned int *z)
+ {
+-	if (touchdata[0] & BIT(finger))
++	if (!(touchdata[0] & BIT(finger)))
+ 		return false;
+ 
+ 	*x = get_unaligned_be16(touchdata + 1 + (finger * 4) + 0);
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index ee0932307d646..881c9f2a5c7d9 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -2289,6 +2289,41 @@ static inline int hardware_largepage_caps(struct dmar_domain *domain,
+ 	return level;
+ }
+ 
++/*
++ * Ensure that old small page tables are removed to make room for superpage(s).
++ * We're going to add new large pages, so make sure we don't remove their parent
++ * tables. The IOTLB/devTLBs should be flushed if any PDE/PTEs are cleared.
++ */
++static void switch_to_super_page(struct dmar_domain *domain,
++				 unsigned long start_pfn,
++				 unsigned long end_pfn, int level)
++{
++	unsigned long lvl_pages = lvl_to_nr_pages(level);
++	struct dma_pte *pte = NULL;
++	int i;
++
++	while (start_pfn <= end_pfn) {
++		if (!pte)
++			pte = pfn_to_dma_pte(domain, start_pfn, &level);
++
++		if (dma_pte_present(pte)) {
++			dma_pte_free_pagetable(domain, start_pfn,
++					       start_pfn + lvl_pages - 1,
++					       level + 1);
++
++			for_each_domain_iommu(i, domain)
++				iommu_flush_iotlb_psi(g_iommus[i], domain,
++						      start_pfn, lvl_pages,
++						      0, 0);
++		}
++
++		pte++;
++		start_pfn += lvl_pages;
++		if (first_pte_in_page(pte))
++			pte = NULL;
++	}
++}
++
+ static int
+ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
+ 		 unsigned long phys_pfn, unsigned long nr_pages, int prot)
+@@ -2329,22 +2364,11 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
+ 				return -ENOMEM;
+ 			/* It is large page*/
+ 			if (largepage_lvl > 1) {
+-				unsigned long nr_superpages, end_pfn;
++				unsigned long end_pfn;
+ 
+ 				pteval |= DMA_PTE_LARGE_PAGE;
+-				lvl_pages = lvl_to_nr_pages(largepage_lvl);
+-
+-				nr_superpages = nr_pages / lvl_pages;
+-				end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
+-
+-				/*
+-				 * Ensure that old small page tables are
+-				 * removed to make room for superpage(s).
+-				 * We're adding new large pages, so make sure
+-				 * we don't remove their parent tables.
+-				 */
+-				dma_pte_free_pagetable(domain, iov_pfn, end_pfn,
+-						       largepage_lvl + 1);
++				end_pfn = ((iov_pfn + nr_pages) & level_mask(largepage_lvl)) - 1;
++				switch_to_super_page(domain, iov_pfn, end_pfn, largepage_lvl);
+ 			} else {
+ 				pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
+ 			}
+diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
+index eb0ee356a6294..00404024d7cd5 100644
+--- a/drivers/irqchip/irq-gic-v3.c
++++ b/drivers/irqchip/irq-gic-v3.c
+@@ -648,6 +648,10 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
+ 
+ 	irqnr = gic_read_iar();
+ 
++	/* Check for special IDs first */
++	if ((irqnr >= 1020 && irqnr <= 1023))
++		return;
++
+ 	if (gic_supports_nmi() &&
+ 	    unlikely(gic_read_rpr() == GICD_INT_NMI_PRI)) {
+ 		gic_handle_nmi(irqnr, regs);
+@@ -659,10 +663,6 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
+ 		gic_arch_enable_irqs();
+ 	}
+ 
+-	/* Check for special IDs first */
+-	if ((irqnr >= 1020 && irqnr <= 1023))
+-		return;
+-
+ 	if (static_branch_likely(&supports_deactivate_key))
+ 		gic_write_eoir(irqnr);
+ 	else
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index 46b5d542b8fe6..362c887d33b3d 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -4039,6 +4039,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ 			if (val >= (uint64_t)UINT_MAX * 1000 / HZ) {
+ 				r = -EINVAL;
+ 				ti->error = "Invalid bitmap_flush_interval argument";
++				goto bad;
+ 			}
+ 			ic->bitmap_flush_interval = msecs_to_jiffies(val);
+ 		} else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
+diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
+index cab12b2251bac..91461b6904c1d 100644
+--- a/drivers/md/dm-raid.c
++++ b/drivers/md/dm-raid.c
+@@ -1868,6 +1868,14 @@ static bool rs_takeover_requested(struct raid_set *rs)
+ 	return rs->md.new_level != rs->md.level;
+ }
+ 
++/* True if layout is set to reshape. */
++static bool rs_is_layout_change(struct raid_set *rs, bool use_mddev)
++{
++	return (use_mddev ? rs->md.delta_disks : rs->delta_disks) ||
++	       rs->md.new_layout != rs->md.layout ||
++	       rs->md.new_chunk_sectors != rs->md.chunk_sectors;
++}
++
+ /* True if @rs is requested to reshape by ctr */
+ static bool rs_reshape_requested(struct raid_set *rs)
+ {
+@@ -1880,9 +1888,7 @@ static bool rs_reshape_requested(struct raid_set *rs)
+ 	if (rs_is_raid0(rs))
+ 		return false;
+ 
+-	change = mddev->new_layout != mddev->layout ||
+-		 mddev->new_chunk_sectors != mddev->chunk_sectors ||
+-		 rs->delta_disks;
++	change = rs_is_layout_change(rs, false);
+ 
+ 	/* Historical case to support raid1 reshape without delta disks */
+ 	if (rs_is_raid1(rs)) {
+@@ -2817,7 +2823,7 @@ static sector_t _get_reshape_sectors(struct raid_set *rs)
+ }
+ 
+ /*
+- *
++ * Reshape:
+  * - change raid layout
+  * - change chunk size
+  * - add disks
+@@ -2926,6 +2932,20 @@ static int rs_setup_reshape(struct raid_set *rs)
+ 	return r;
+ }
+ 
++/*
++ * If the md resync thread has updated superblock with max reshape position
++ * at the end of a reshape but not (yet) reset the layout configuration
++ * changes -> reset the latter.
++ */
++static void rs_reset_inconclusive_reshape(struct raid_set *rs)
++{
++	if (!rs_is_reshaping(rs) && rs_is_layout_change(rs, true)) {
++		rs_set_cur(rs);
++		rs->md.delta_disks = 0;
++		rs->md.reshape_backwards = 0;
++	}
++}
++
+ /*
+  * Enable/disable discard support on RAID set depending on
+  * RAID level and discard properties of underlying RAID members.
+@@ -3212,11 +3232,14 @@ size_check:
+ 	if (r)
+ 		goto bad;
+ 
++	/* Catch any inconclusive reshape superblock content. */
++	rs_reset_inconclusive_reshape(rs);
++
+ 	/* Start raid set read-only and assumed clean to change in raid_resume() */
+ 	rs->md.ro = 1;
+ 	rs->md.in_sync = 1;
+ 
+-	/* Keep array frozen */
++	/* Keep array frozen until resume. */
+ 	set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
+ 
+ 	/* Has to be held on running the array */
+@@ -3230,7 +3253,6 @@ size_check:
+ 	}
+ 
+ 	r = md_start(&rs->md);
+-
+ 	if (r) {
+ 		ti->error = "Failed to start raid array";
+ 		mddev_unlock(&rs->md);
+diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
+index 13b4385f4d5a9..9c3bc3711b335 100644
+--- a/drivers/md/dm-rq.c
++++ b/drivers/md/dm-rq.c
+@@ -569,6 +569,7 @@ out_tag_set:
+ 	blk_mq_free_tag_set(md->tag_set);
+ out_kfree_tag_set:
+ 	kfree(md->tag_set);
++	md->tag_set = NULL;
+ 
+ 	return err;
+ }
+@@ -578,6 +579,7 @@ void dm_mq_cleanup_mapped_device(struct mapped_device *md)
+ 	if (md->tag_set) {
+ 		blk_mq_free_tag_set(md->tag_set);
+ 		kfree(md->tag_set);
++		md->tag_set = NULL;
+ 	}
+ }
+ 
+diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
+index fe073d92f01e6..70cfdea27efdf 100644
+--- a/drivers/md/persistent-data/dm-btree-internal.h
++++ b/drivers/md/persistent-data/dm-btree-internal.h
+@@ -34,12 +34,12 @@ struct node_header {
+ 	__le32 max_entries;
+ 	__le32 value_size;
+ 	__le32 padding;
+-} __packed;
++} __attribute__((packed, aligned(8)));
+ 
+ struct btree_node {
+ 	struct node_header header;
+ 	__le64 keys[];
+-} __packed;
++} __attribute__((packed, aligned(8)));
+ 
+ 
+ /*
+diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
+index d8b4125e338ca..a213bf11738fb 100644
+--- a/drivers/md/persistent-data/dm-space-map-common.c
++++ b/drivers/md/persistent-data/dm-space-map-common.c
+@@ -339,6 +339,8 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
+ 	 */
+ 	begin = do_div(index_begin, ll->entries_per_block);
+ 	end = do_div(end, ll->entries_per_block);
++	if (end == 0)
++		end = ll->entries_per_block;
+ 
+ 	for (i = index_begin; i < index_end; i++, begin = 0) {
+ 		struct dm_block *blk;
+diff --git a/drivers/md/persistent-data/dm-space-map-common.h b/drivers/md/persistent-data/dm-space-map-common.h
+index 8de63ce39bdd5..87e17909ef521 100644
+--- a/drivers/md/persistent-data/dm-space-map-common.h
++++ b/drivers/md/persistent-data/dm-space-map-common.h
+@@ -33,7 +33,7 @@ struct disk_index_entry {
+ 	__le64 blocknr;
+ 	__le32 nr_free;
+ 	__le32 none_free_before;
+-} __packed;
++} __attribute__ ((packed, aligned(8)));
+ 
+ 
+ #define MAX_METADATA_BITMAPS 255
+@@ -43,7 +43,7 @@ struct disk_metadata_index {
+ 	__le64 blocknr;
+ 
+ 	struct disk_index_entry index[MAX_METADATA_BITMAPS];
+-} __packed;
++} __attribute__ ((packed, aligned(8)));
+ 
+ struct ll_disk;
+ 
+@@ -86,7 +86,7 @@ struct disk_sm_root {
+ 	__le64 nr_allocated;
+ 	__le64 bitmap_root;
+ 	__le64 ref_count_root;
+-} __packed;
++} __attribute__ ((packed, aligned(8)));
+ 
+ #define ENTRIES_PER_BYTE 4
+ 
+@@ -94,7 +94,7 @@ struct disk_bitmap_header {
+ 	__le32 csum;
+ 	__le32 not_used;
+ 	__le64 blocknr;
+-} __packed;
++} __attribute__ ((packed, aligned(8)));
+ 
+ enum allocation_event {
+ 	SM_NONE,
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index d2378765dc154..ced076ba560e1 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -478,6 +478,8 @@ static void raid1_end_write_request(struct bio *bio)
+ 		if (!test_bit(Faulty, &rdev->flags))
+ 			set_bit(R1BIO_WriteError, &r1_bio->state);
+ 		else {
++			/* Fail the request */
++			set_bit(R1BIO_Degraded, &r1_bio->state);
+ 			/* Finished with this branch */
+ 			r1_bio->bios[mirror] = NULL;
+ 			to_put = bio;
+diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
+index 5ff7bedee2477..3862ddc86ec48 100644
+--- a/drivers/media/dvb-core/dvbdev.c
++++ b/drivers/media/dvb-core/dvbdev.c
+@@ -241,6 +241,7 @@ static void dvb_media_device_free(struct dvb_device *dvbdev)
+ 
+ 	if (dvbdev->adapter->conn) {
+ 		media_device_unregister_entity(dvbdev->adapter->conn);
++		kfree(dvbdev->adapter->conn);
+ 		dvbdev->adapter->conn = NULL;
+ 		kfree(dvbdev->adapter->conn_pads);
+ 		dvbdev->adapter->conn_pads = NULL;
+diff --git a/drivers/media/i2c/adv7511-v4l2.c b/drivers/media/i2c/adv7511-v4l2.c
+index a3161d7090153..ab7883cff8b22 100644
+--- a/drivers/media/i2c/adv7511-v4l2.c
++++ b/drivers/media/i2c/adv7511-v4l2.c
+@@ -1964,7 +1964,7 @@ static int adv7511_remove(struct i2c_client *client)
+ 
+ 	adv7511_set_isr(sd, false);
+ 	adv7511_init_setup(sd);
+-	cancel_delayed_work(&state->edid_handler);
++	cancel_delayed_work_sync(&state->edid_handler);
+ 	i2c_unregister_device(state->i2c_edid);
+ 	i2c_unregister_device(state->i2c_cec);
+ 	i2c_unregister_device(state->i2c_pktmem);
+diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
+index 09004d928d11f..d1f58795794fd 100644
+--- a/drivers/media/i2c/adv7604.c
++++ b/drivers/media/i2c/adv7604.c
+@@ -3616,7 +3616,7 @@ static int adv76xx_remove(struct i2c_client *client)
+ 	io_write(sd, 0x6e, 0);
+ 	io_write(sd, 0x73, 0);
+ 
+-	cancel_delayed_work(&state->delayed_work_enable_hotplug);
++	cancel_delayed_work_sync(&state->delayed_work_enable_hotplug);
+ 	v4l2_async_unregister_subdev(sd);
+ 	media_entity_cleanup(&sd->entity);
+ 	adv76xx_unregister_clients(to_state(sd));
+diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
+index 0855f648416d1..f7d2b6cd3008b 100644
+--- a/drivers/media/i2c/adv7842.c
++++ b/drivers/media/i2c/adv7842.c
+@@ -3586,7 +3586,7 @@ static int adv7842_remove(struct i2c_client *client)
+ 	struct adv7842_state *state = to_state(sd);
+ 
+ 	adv7842_irq_enable(sd, false);
+-	cancel_delayed_work(&state->delayed_work_enable_hotplug);
++	cancel_delayed_work_sync(&state->delayed_work_enable_hotplug);
+ 	v4l2_device_unregister_subdev(sd);
+ 	media_entity_cleanup(&sd->entity);
+ 	adv7842_unregister_clients(sd);
+diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
+index 831b5b54fd78c..1b309bb743c7b 100644
+--- a/drivers/media/i2c/tc358743.c
++++ b/drivers/media/i2c/tc358743.c
+@@ -2193,7 +2193,7 @@ static int tc358743_remove(struct i2c_client *client)
+ 		del_timer_sync(&state->timer);
+ 		flush_work(&state->work_i2c_poll);
+ 	}
+-	cancel_delayed_work(&state->delayed_work_enable_hotplug);
++	cancel_delayed_work_sync(&state->delayed_work_enable_hotplug);
+ 	cec_unregister_adapter(state->cec_adap);
+ 	v4l2_async_unregister_subdev(sd);
+ 	v4l2_device_unregister_subdev(sd);
+diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
+index a09bf0a39d058..89bb7e6dc7a42 100644
+--- a/drivers/media/i2c/tda1997x.c
++++ b/drivers/media/i2c/tda1997x.c
+@@ -2804,7 +2804,7 @@ static int tda1997x_remove(struct i2c_client *client)
+ 	media_entity_cleanup(&sd->entity);
+ 	v4l2_ctrl_handler_free(&state->hdl);
+ 	regulator_bulk_disable(TDA1997X_NUM_SUPPLIES, state->supplies);
+-	cancel_delayed_work(&state->delayed_work_enable_hpd);
++	cancel_delayed_work_sync(&state->delayed_work_enable_hpd);
+ 	mutex_destroy(&state->page_lock);
+ 	mutex_destroy(&state->lock);
+ 
+diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
+index 22f55a7840a62..d0ca260ecf700 100644
+--- a/drivers/media/pci/cx23885/cx23885-core.c
++++ b/drivers/media/pci/cx23885/cx23885-core.c
+@@ -2077,6 +2077,15 @@ static struct {
+ 	 * 0x1423 is the PCI ID for the IOMMU found on Kaveri
+ 	 */
+ 	{ PCI_VENDOR_ID_AMD, 0x1423 },
++	/* 0x1481 is the PCI ID for the IOMMU found on Starship/Matisse
++	 */
++	{ PCI_VENDOR_ID_AMD, 0x1481 },
++	/* 0x1419 is the PCI ID for the IOMMU found on 15h (Models 10h-1fh) family
++	 */
++	{ PCI_VENDOR_ID_AMD, 0x1419 },
++	/* 0x5a23 is the PCI ID for the IOMMU found on RD890S/RD990
++	 */
++	{ PCI_VENDOR_ID_ATI, 0x5a23 },
+ };
+ 
+ static bool cx23885_does_need_dma_reset(void)
+diff --git a/drivers/media/pci/saa7164/saa7164-encoder.c b/drivers/media/pci/saa7164/saa7164-encoder.c
+index 11e1eb6a6809e..1d1d32e043f16 100644
+--- a/drivers/media/pci/saa7164/saa7164-encoder.c
++++ b/drivers/media/pci/saa7164/saa7164-encoder.c
+@@ -1008,7 +1008,7 @@ int saa7164_encoder_register(struct saa7164_port *port)
+ 		printk(KERN_ERR "%s() failed (errno = %d), NO PCI configuration\n",
+ 			__func__, result);
+ 		result = -ENOMEM;
+-		goto failed;
++		goto fail_pci;
+ 	}
+ 
+ 	/* Establish encoder defaults here */
+@@ -1062,7 +1062,7 @@ int saa7164_encoder_register(struct saa7164_port *port)
+ 			  100000, ENCODER_DEF_BITRATE);
+ 	if (hdl->error) {
+ 		result = hdl->error;
+-		goto failed;
++		goto fail_hdl;
+ 	}
+ 
+ 	port->std = V4L2_STD_NTSC_M;
+@@ -1080,7 +1080,7 @@ int saa7164_encoder_register(struct saa7164_port *port)
+ 		printk(KERN_INFO "%s: can't allocate mpeg device\n",
+ 			dev->name);
+ 		result = -ENOMEM;
+-		goto failed;
++		goto fail_hdl;
+ 	}
+ 
+ 	port->v4l_device->ctrl_handler = hdl;
+@@ -1091,10 +1091,7 @@ int saa7164_encoder_register(struct saa7164_port *port)
+ 	if (result < 0) {
+ 		printk(KERN_INFO "%s: can't register mpeg device\n",
+ 			dev->name);
+-		/* TODO: We're going to leak here if we don't dealloc
+-		 The buffers above. The unreg function can't deal wit it.
+-		*/
+-		goto failed;
++		goto fail_reg;
+ 	}
+ 
+ 	printk(KERN_INFO "%s: registered device video%d [mpeg]\n",
+@@ -1116,9 +1113,14 @@ int saa7164_encoder_register(struct saa7164_port *port)
+ 
+ 	saa7164_api_set_encoder(port);
+ 	saa7164_api_get_encoder(port);
++	return 0;
+ 
+-	result = 0;
+-failed:
++fail_reg:
++	video_device_release(port->v4l_device);
++	port->v4l_device = NULL;
++fail_hdl:
++	v4l2_ctrl_handler_free(hdl);
++fail_pci:
+ 	return result;
+ }
+ 
+diff --git a/drivers/media/pci/sta2x11/Kconfig b/drivers/media/pci/sta2x11/Kconfig
+index 4dd98f94a91ed..27bb785136319 100644
+--- a/drivers/media/pci/sta2x11/Kconfig
++++ b/drivers/media/pci/sta2x11/Kconfig
+@@ -3,6 +3,7 @@ config STA2X11_VIP
+ 	tristate "STA2X11 VIP Video For Linux"
+ 	depends on PCI && VIDEO_V4L2 && VIRT_TO_BUS && I2C
+ 	depends on STA2X11 || COMPILE_TEST
++	select GPIOLIB if MEDIA_SUBDRV_AUTOSELECT
+ 	select VIDEO_ADV7180 if MEDIA_SUBDRV_AUTOSELECT
+ 	select VIDEOBUF2_DMA_CONTIG
+ 	select MEDIA_CONTROLLER
+diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
+index 995e95272e511..e600764dce968 100644
+--- a/drivers/media/platform/coda/coda-common.c
++++ b/drivers/media/platform/coda/coda-common.c
+@@ -2062,7 +2062,9 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
+ 	if (q_data_dst->fourcc == V4L2_PIX_FMT_JPEG)
+ 		ctx->params.gop_size = 1;
+ 	ctx->gopcounter = ctx->params.gop_size - 1;
+-	v4l2_ctrl_s_ctrl(ctx->mb_err_cnt_ctrl, 0);
++	/* Only decoders have this control */
++	if (ctx->mb_err_cnt_ctrl)
++		v4l2_ctrl_s_ctrl(ctx->mb_err_cnt_ctrl, 0);
+ 
+ 	ret = ctx->ops->start_streaming(ctx);
+ 	if (ctx->inst_type == CODA_INST_DECODER) {
+diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
+index f9896c121fd89..d2842f496b474 100644
+--- a/drivers/media/platform/qcom/venus/core.c
++++ b/drivers/media/platform/qcom/venus/core.c
+@@ -218,7 +218,6 @@ static int venus_probe(struct platform_device *pdev)
+ 		return -ENOMEM;
+ 
+ 	core->dev = dev;
+-	platform_set_drvdata(pdev, core);
+ 
+ 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ 	core->base = devm_ioremap_resource(dev, r);
+@@ -248,7 +247,7 @@ static int venus_probe(struct platform_device *pdev)
+ 		return -ENODEV;
+ 
+ 	if (core->pm_ops->core_get) {
+-		ret = core->pm_ops->core_get(dev);
++		ret = core->pm_ops->core_get(core);
+ 		if (ret)
+ 			return ret;
+ 	}
+@@ -273,6 +272,12 @@ static int venus_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		goto err_core_put;
+ 
++	ret = v4l2_device_register(dev, &core->v4l2_dev);
++	if (ret)
++		goto err_core_deinit;
++
++	platform_set_drvdata(pdev, core);
++
+ 	pm_runtime_enable(dev);
+ 
+ 	ret = pm_runtime_get_sync(dev);
+@@ -307,10 +312,6 @@ static int venus_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		goto err_venus_shutdown;
+ 
+-	ret = v4l2_device_register(dev, &core->v4l2_dev);
+-	if (ret)
+-		goto err_core_deinit;
+-
+ 	ret = pm_runtime_put_sync(dev);
+ 	if (ret) {
+ 		pm_runtime_get_noresume(dev);
+@@ -323,8 +324,6 @@ static int venus_probe(struct platform_device *pdev)
+ 
+ err_dev_unregister:
+ 	v4l2_device_unregister(&core->v4l2_dev);
+-err_core_deinit:
+-	hfi_core_deinit(core, false);
+ err_venus_shutdown:
+ 	venus_shutdown(core);
+ err_runtime_disable:
+@@ -332,9 +331,11 @@ err_runtime_disable:
+ 	pm_runtime_set_suspended(dev);
+ 	pm_runtime_disable(dev);
+ 	hfi_destroy(core);
++err_core_deinit:
++	hfi_core_deinit(core, false);
+ err_core_put:
+ 	if (core->pm_ops->core_put)
+-		core->pm_ops->core_put(dev);
++		core->pm_ops->core_put(core);
+ 	return ret;
+ }
+ 
+@@ -360,7 +361,9 @@ static int venus_remove(struct platform_device *pdev)
+ 	pm_runtime_disable(dev);
+ 
+ 	if (pm_ops->core_put)
+-		pm_ops->core_put(dev);
++		pm_ops->core_put(core);
++
++	v4l2_device_unregister(&core->v4l2_dev);
+ 
+ 	hfi_destroy(core);
+ 
+@@ -368,6 +371,7 @@ static int venus_remove(struct platform_device *pdev)
+ 	icc_put(core->cpucfg_path);
+ 
+ 	v4l2_device_unregister(&core->v4l2_dev);
++
+ 	mutex_destroy(&core->pm_lock);
+ 	mutex_destroy(&core->lock);
+ 	venus_dbgfs_deinit(core);
+@@ -396,7 +400,7 @@ static __maybe_unused int venus_runtime_suspend(struct device *dev)
+ 		return ret;
+ 
+ 	if (pm_ops->core_power) {
+-		ret = pm_ops->core_power(dev, POWER_OFF);
++		ret = pm_ops->core_power(core, POWER_OFF);
+ 		if (ret)
+ 			return ret;
+ 	}
+@@ -414,7 +418,7 @@ static __maybe_unused int venus_runtime_suspend(struct device *dev)
+ err_video_path:
+ 	icc_set_bw(core->cpucfg_path, kbps_to_icc(1000), 0);
+ err_cpucfg_path:
+-	pm_ops->core_power(dev, POWER_ON);
++	pm_ops->core_power(core, POWER_ON);
+ 
+ 	return ret;
+ }
+@@ -434,7 +438,7 @@ static __maybe_unused int venus_runtime_resume(struct device *dev)
+ 		return ret;
+ 
+ 	if (pm_ops->core_power) {
+-		ret = pm_ops->core_power(dev, POWER_ON);
++		ret = pm_ops->core_power(core, POWER_ON);
+ 		if (ret)
+ 			return ret;
+ 	}
+diff --git a/drivers/media/platform/qcom/venus/hfi_cmds.c b/drivers/media/platform/qcom/venus/hfi_cmds.c
+index 4f75658344690..558510a8dfc82 100644
+--- a/drivers/media/platform/qcom/venus/hfi_cmds.c
++++ b/drivers/media/platform/qcom/venus/hfi_cmds.c
+@@ -1039,6 +1039,18 @@ static int pkt_session_set_property_1x(struct hfi_session_set_property_pkt *pkt,
+ 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*hierp);
+ 		break;
+ 	}
++	case HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO: {
++		struct hfi_uncompressed_plane_actual_info *in = pdata;
++		struct hfi_uncompressed_plane_actual_info *info = prop_data;
++
++		info->buffer_type = in->buffer_type;
++		info->num_planes = in->num_planes;
++		info->plane_format[0] = in->plane_format[0];
++		if (in->num_planes > 1)
++			info->plane_format[1] = in->plane_format[1];
++		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*info);
++		break;
++	}
+ 
+ 	/* FOLLOWING PROPERTIES ARE NOT IMPLEMENTED IN CORE YET */
+ 	case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS:
+@@ -1205,18 +1217,6 @@ pkt_session_set_property_4xx(struct hfi_session_set_property_pkt *pkt,
+ 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*cu);
+ 		break;
+ 	}
+-	case HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO: {
+-		struct hfi_uncompressed_plane_actual_info *in = pdata;
+-		struct hfi_uncompressed_plane_actual_info *info = prop_data;
+-
+-		info->buffer_type = in->buffer_type;
+-		info->num_planes = in->num_planes;
+-		info->plane_format[0] = in->plane_format[0];
+-		if (in->num_planes > 1)
+-			info->plane_format[1] = in->plane_format[1];
+-		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*info);
+-		break;
+-	}
+ 	case HFI_PROPERTY_CONFIG_VENC_MAX_BITRATE:
+ 	case HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER:
+ 	case HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE:
+diff --git a/drivers/media/platform/qcom/venus/hfi_parser.c b/drivers/media/platform/qcom/venus/hfi_parser.c
+index 7263c0c326954..5b8389b982993 100644
+--- a/drivers/media/platform/qcom/venus/hfi_parser.c
++++ b/drivers/media/platform/qcom/venus/hfi_parser.c
+@@ -235,13 +235,13 @@ static int hfi_platform_parser(struct venus_core *core, struct venus_inst *inst)
+ 	u32 enc_codecs, dec_codecs, count = 0;
+ 	unsigned int entries;
+ 
+-	if (inst)
+-		return 0;
+-
+ 	plat = hfi_platform_get(core->res->hfi_version);
+ 	if (!plat)
+ 		return -EINVAL;
+ 
++	if (inst)
++		return 0;
++
+ 	if (plat->codecs)
+ 		plat->codecs(&enc_codecs, &dec_codecs, &count);
+ 
+@@ -277,8 +277,10 @@ u32 hfi_parser(struct venus_core *core, struct venus_inst *inst, void *buf,
+ 
+ 	parser_init(inst, &codecs, &domain);
+ 
+-	core->codecs_count = 0;
+-	memset(core->caps, 0, sizeof(core->caps));
++	if (core->res->hfi_version > HFI_VERSION_1XX) {
++		core->codecs_count = 0;
++		memset(core->caps, 0, sizeof(core->caps));
++	}
+ 
+ 	while (words_count) {
+ 		data = word + 1;
+diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c
+index 43c4e3d9e2818..95b4d40ff6a5e 100644
+--- a/drivers/media/platform/qcom/venus/pm_helpers.c
++++ b/drivers/media/platform/qcom/venus/pm_helpers.c
+@@ -277,16 +277,28 @@ set_freq:
+ 	return 0;
+ }
+ 
+-static int core_get_v1(struct device *dev)
++static int core_get_v1(struct venus_core *core)
+ {
+-	struct venus_core *core = dev_get_drvdata(dev);
++	int ret;
++
++	ret = core_clks_get(core);
++	if (ret)
++		return ret;
+ 
+-	return core_clks_get(core);
++	core->opp_table = dev_pm_opp_set_clkname(core->dev, "core");
++	if (IS_ERR(core->opp_table))
++		return PTR_ERR(core->opp_table);
++
++	return 0;
+ }
+ 
+-static int core_power_v1(struct device *dev, int on)
++static void core_put_v1(struct venus_core *core)
++{
++	dev_pm_opp_put_clkname(core->opp_table);
++}
++
++static int core_power_v1(struct venus_core *core, int on)
+ {
+-	struct venus_core *core = dev_get_drvdata(dev);
+ 	int ret = 0;
+ 
+ 	if (on == POWER_ON)
+@@ -299,6 +311,7 @@ static int core_power_v1(struct device *dev, int on)
+ 
+ static const struct venus_pm_ops pm_ops_v1 = {
+ 	.core_get = core_get_v1,
++	.core_put = core_put_v1,
+ 	.core_power = core_power_v1,
+ 	.load_scale = load_scale_v1,
+ };
+@@ -371,6 +384,7 @@ static int venc_power_v3(struct device *dev, int on)
+ 
+ static const struct venus_pm_ops pm_ops_v3 = {
+ 	.core_get = core_get_v1,
++	.core_put = core_put_v1,
+ 	.core_power = core_power_v1,
+ 	.vdec_get = vdec_get_v3,
+ 	.vdec_power = vdec_power_v3,
+@@ -753,12 +767,12 @@ static int venc_power_v4(struct device *dev, int on)
+ 	return ret;
+ }
+ 
+-static int vcodec_domains_get(struct device *dev)
++static int vcodec_domains_get(struct venus_core *core)
+ {
+ 	int ret;
+ 	struct opp_table *opp_table;
+ 	struct device **opp_virt_dev;
+-	struct venus_core *core = dev_get_drvdata(dev);
++	struct device *dev = core->dev;
+ 	const struct venus_resources *res = core->res;
+ 	struct device *pd;
+ 	unsigned int i;
+@@ -809,9 +823,8 @@ opp_attach_err:
+ 	return ret;
+ }
+ 
+-static void vcodec_domains_put(struct device *dev)
++static void vcodec_domains_put(struct venus_core *core)
+ {
+-	struct venus_core *core = dev_get_drvdata(dev);
+ 	const struct venus_resources *res = core->res;
+ 	unsigned int i;
+ 
+@@ -834,9 +847,9 @@ skip_pmdomains:
+ 	dev_pm_opp_detach_genpd(core->opp_table);
+ }
+ 
+-static int core_get_v4(struct device *dev)
++static int core_get_v4(struct venus_core *core)
+ {
+-	struct venus_core *core = dev_get_drvdata(dev);
++	struct device *dev = core->dev;
+ 	const struct venus_resources *res = core->res;
+ 	int ret;
+ 
+@@ -875,7 +888,7 @@ static int core_get_v4(struct device *dev)
+ 		}
+ 	}
+ 
+-	ret = vcodec_domains_get(dev);
++	ret = vcodec_domains_get(core);
+ 	if (ret) {
+ 		if (core->has_opp_table)
+ 			dev_pm_opp_of_remove_table(dev);
+@@ -886,14 +899,14 @@ static int core_get_v4(struct device *dev)
+ 	return 0;
+ }
+ 
+-static void core_put_v4(struct device *dev)
++static void core_put_v4(struct venus_core *core)
+ {
+-	struct venus_core *core = dev_get_drvdata(dev);
++	struct device *dev = core->dev;
+ 
+ 	if (legacy_binding)
+ 		return;
+ 
+-	vcodec_domains_put(dev);
++	vcodec_domains_put(core);
+ 
+ 	if (core->has_opp_table)
+ 		dev_pm_opp_of_remove_table(dev);
+@@ -901,9 +914,9 @@ static void core_put_v4(struct device *dev)
+ 
+ }
+ 
+-static int core_power_v4(struct device *dev, int on)
++static int core_power_v4(struct venus_core *core, int on)
+ {
+-	struct venus_core *core = dev_get_drvdata(dev);
++	struct device *dev = core->dev;
+ 	struct device *pmctrl = core->pmdomains[0];
+ 	int ret = 0;
+ 
+diff --git a/drivers/media/platform/qcom/venus/pm_helpers.h b/drivers/media/platform/qcom/venus/pm_helpers.h
+index aa2f6afa23544..a492c50c5543c 100644
+--- a/drivers/media/platform/qcom/venus/pm_helpers.h
++++ b/drivers/media/platform/qcom/venus/pm_helpers.h
+@@ -4,14 +4,15 @@
+ #define __VENUS_PM_HELPERS_H__
+ 
+ struct device;
++struct venus_core;
+ 
+ #define POWER_ON	1
+ #define POWER_OFF	0
+ 
+ struct venus_pm_ops {
+-	int (*core_get)(struct device *dev);
+-	void (*core_put)(struct device *dev);
+-	int (*core_power)(struct device *dev, int on);
++	int (*core_get)(struct venus_core *core);
++	void (*core_put)(struct venus_core *core);
++	int (*core_power)(struct venus_core *core, int on);
+ 
+ 	int (*vdec_get)(struct device *dev);
+ 	void (*vdec_put)(struct device *dev);
+diff --git a/drivers/media/platform/qcom/venus/venc_ctrls.c b/drivers/media/platform/qcom/venus/venc_ctrls.c
+index a52b800551737..abef0037bf55b 100644
+--- a/drivers/media/platform/qcom/venus/venc_ctrls.c
++++ b/drivers/media/platform/qcom/venus/venc_ctrls.c
+@@ -359,7 +359,7 @@ int venc_ctrl_init(struct venus_inst *inst)
+ 		V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME,
+ 		~((1 << V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE) |
+ 		(1 << V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME)),
+-		V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE);
++		V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME);
+ 
+ 	v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
+ 		V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE,
+diff --git a/drivers/media/platform/sti/bdisp/bdisp-debug.c b/drivers/media/platform/sti/bdisp/bdisp-debug.c
+index 2b270093009c7..a27f638df11c6 100644
+--- a/drivers/media/platform/sti/bdisp/bdisp-debug.c
++++ b/drivers/media/platform/sti/bdisp/bdisp-debug.c
+@@ -480,7 +480,7 @@ static int regs_show(struct seq_file *s, void *data)
+ 	int ret;
+ 	unsigned int i;
+ 
+-	ret = pm_runtime_get_sync(bdisp->dev);
++	ret = pm_runtime_resume_and_get(bdisp->dev);
+ 	if (ret < 0) {
+ 		seq_puts(s, "Cannot wake up IP\n");
+ 		return 0;
+diff --git a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
+index ed863bf5ea804..671e4a928993d 100644
+--- a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
++++ b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
+@@ -589,7 +589,7 @@ static int deinterlace_start_streaming(struct vb2_queue *vq, unsigned int count)
+ 	int ret;
+ 
+ 	if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
+-		ret = pm_runtime_get_sync(dev);
++		ret = pm_runtime_resume_and_get(dev);
+ 		if (ret < 0) {
+ 			dev_err(dev, "Failed to enable module\n");
+ 
+diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
+index 0c6229592e132..e5c4a6941d26b 100644
+--- a/drivers/media/rc/ite-cir.c
++++ b/drivers/media/rc/ite-cir.c
+@@ -276,8 +276,14 @@ static irqreturn_t ite_cir_isr(int irq, void *data)
+ 	/* read the interrupt flags */
+ 	iflags = dev->params.get_irq_causes(dev);
+ 
++	/* Check for RX overflow */
++	if (iflags & ITE_IRQ_RX_FIFO_OVERRUN) {
++		dev_warn(&dev->rdev->dev, "receive overflow\n");
++		ir_raw_event_reset(dev->rdev);
++	}
++
+ 	/* check for the receive interrupt */
+-	if (iflags & (ITE_IRQ_RX_FIFO | ITE_IRQ_RX_FIFO_OVERRUN)) {
++	if (iflags & ITE_IRQ_RX_FIFO) {
+ 		/* read the FIFO bytes */
+ 		rx_bytes =
+ 			dev->params.get_rx_bytes(dev, rx_buf,
+diff --git a/drivers/media/test-drivers/vivid/vivid-core.c b/drivers/media/test-drivers/vivid/vivid-core.c
+index 0dc65ef3aa14d..ca0ebf6ad9ccf 100644
+--- a/drivers/media/test-drivers/vivid/vivid-core.c
++++ b/drivers/media/test-drivers/vivid/vivid-core.c
+@@ -205,13 +205,13 @@ static const u8 vivid_hdmi_edid[256] = {
+ 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x7b,
+ 
+-	0x02, 0x03, 0x3f, 0xf0, 0x51, 0x61, 0x60, 0x5f,
++	0x02, 0x03, 0x3f, 0xf1, 0x51, 0x61, 0x60, 0x5f,
+ 	0x5e, 0x5d, 0x10, 0x1f, 0x04, 0x13, 0x22, 0x21,
+ 	0x20, 0x05, 0x14, 0x02, 0x11, 0x01, 0x23, 0x09,
+ 	0x07, 0x07, 0x83, 0x01, 0x00, 0x00, 0x6d, 0x03,
+ 	0x0c, 0x00, 0x10, 0x00, 0x00, 0x3c, 0x21, 0x00,
+ 	0x60, 0x01, 0x02, 0x03, 0x67, 0xd8, 0x5d, 0xc4,
+-	0x01, 0x78, 0x00, 0x00, 0xe2, 0x00, 0xea, 0xe3,
++	0x01, 0x78, 0x00, 0x00, 0xe2, 0x00, 0xca, 0xe3,
+ 	0x05, 0x00, 0x00, 0xe3, 0x06, 0x01, 0x00, 0x4d,
+ 	0xd0, 0x00, 0xa0, 0xf0, 0x70, 0x3e, 0x80, 0x30,
+ 	0x20, 0x35, 0x00, 0xc0, 0x1c, 0x32, 0x00, 0x00,
+@@ -220,7 +220,7 @@ static const u8 vivid_hdmi_edid[256] = {
+ 	0x00, 0x00, 0x1a, 0x1a, 0x1d, 0x00, 0x80, 0x51,
+ 	0xd0, 0x1c, 0x20, 0x40, 0x80, 0x35, 0x00, 0xc0,
+ 	0x1c, 0x32, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00,
+-	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63,
++	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x82,
+ };
+ 
+ static int vidioc_querycap(struct file *file, void  *priv,
+diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c
+index c1a7634e27b43..28e1fd64dd3c2 100644
+--- a/drivers/media/usb/dvb-usb/dvb-usb-init.c
++++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c
+@@ -79,11 +79,17 @@ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs)
+ 			}
+ 		}
+ 
+-		if ((ret = dvb_usb_adapter_stream_init(adap)) ||
+-			(ret = dvb_usb_adapter_dvb_init(adap, adapter_nrs)) ||
+-			(ret = dvb_usb_adapter_frontend_init(adap))) {
++		ret = dvb_usb_adapter_stream_init(adap);
++		if (ret)
+ 			return ret;
+-		}
++
++		ret = dvb_usb_adapter_dvb_init(adap, adapter_nrs);
++		if (ret)
++			goto dvb_init_err;
++
++		ret = dvb_usb_adapter_frontend_init(adap);
++		if (ret)
++			goto frontend_init_err;
+ 
+ 		/* use exclusive FE lock if there is multiple shared FEs */
+ 		if (adap->fe_adap[1].fe)
+@@ -103,6 +109,12 @@ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs)
+ 	}
+ 
+ 	return 0;
++
++frontend_init_err:
++	dvb_usb_adapter_dvb_exit(adap);
++dvb_init_err:
++	dvb_usb_adapter_stream_exit(adap);
++	return ret;
+ }
+ 
+ static int dvb_usb_adapter_exit(struct dvb_usb_device *d)
+@@ -158,22 +170,20 @@ static int dvb_usb_init(struct dvb_usb_device *d, short *adapter_nums)
+ 
+ 		if (d->props.priv_init != NULL) {
+ 			ret = d->props.priv_init(d);
+-			if (ret != 0) {
+-				kfree(d->priv);
+-				d->priv = NULL;
+-				return ret;
+-			}
++			if (ret != 0)
++				goto err_priv_init;
+ 		}
+ 	}
+ 
+ 	/* check the capabilities and set appropriate variables */
+ 	dvb_usb_device_power_ctrl(d, 1);
+ 
+-	if ((ret = dvb_usb_i2c_init(d)) ||
+-		(ret = dvb_usb_adapter_init(d, adapter_nums))) {
+-		dvb_usb_exit(d);
+-		return ret;
+-	}
++	ret = dvb_usb_i2c_init(d);
++	if (ret)
++		goto err_i2c_init;
++	ret = dvb_usb_adapter_init(d, adapter_nums);
++	if (ret)
++		goto err_adapter_init;
+ 
+ 	if ((ret = dvb_usb_remote_init(d)))
+ 		err("could not initialize remote control.");
+@@ -181,6 +191,17 @@ static int dvb_usb_init(struct dvb_usb_device *d, short *adapter_nums)
+ 	dvb_usb_device_power_ctrl(d, 0);
+ 
+ 	return 0;
++
++err_adapter_init:
++	dvb_usb_adapter_exit(d);
++err_i2c_init:
++	dvb_usb_i2c_exit(d);
++	if (d->priv && d->props.priv_destroy)
++		d->props.priv_destroy(d);
++err_priv_init:
++	kfree(d->priv);
++	d->priv = NULL;
++	return ret;
+ }
+ 
+ /* determine the name and the state of the just found USB device */
+@@ -255,41 +276,50 @@ int dvb_usb_device_init(struct usb_interface *intf,
+ 	if (du != NULL)
+ 		*du = NULL;
+ 
+-	if ((desc = dvb_usb_find_device(udev, props, &cold)) == NULL) {
++	d = kzalloc(sizeof(*d), GFP_KERNEL);
++	if (!d) {
++		err("no memory for 'struct dvb_usb_device'");
++		return -ENOMEM;
++	}
++
++	memcpy(&d->props, props, sizeof(struct dvb_usb_device_properties));
++
++	desc = dvb_usb_find_device(udev, &d->props, &cold);
++	if (!desc) {
+ 		deb_err("something went very wrong, device was not found in current device list - let's see what comes next.\n");
+-		return -ENODEV;
++		ret = -ENODEV;
++		goto error;
+ 	}
+ 
+ 	if (cold) {
+ 		info("found a '%s' in cold state, will try to load a firmware", desc->name);
+ 		ret = dvb_usb_download_firmware(udev, props);
+ 		if (!props->no_reconnect || ret != 0)
+-			return ret;
++			goto error;
+ 	}
+ 
+ 	info("found a '%s' in warm state.", desc->name);
+-	d = kzalloc(sizeof(struct dvb_usb_device), GFP_KERNEL);
+-	if (d == NULL) {
+-		err("no memory for 'struct dvb_usb_device'");
+-		return -ENOMEM;
+-	}
+-
+ 	d->udev = udev;
+-	memcpy(&d->props, props, sizeof(struct dvb_usb_device_properties));
+ 	d->desc = desc;
+ 	d->owner = owner;
+ 
+ 	usb_set_intfdata(intf, d);
+ 
+-	if (du != NULL)
++	ret = dvb_usb_init(d, adapter_nums);
++	if (ret) {
++		info("%s error while loading driver (%d)", desc->name, ret);
++		goto error;
++	}
++
++	if (du)
+ 		*du = d;
+ 
+-	ret = dvb_usb_init(d, adapter_nums);
++	info("%s successfully initialized and connected.", desc->name);
++	return 0;
+ 
+-	if (ret == 0)
+-		info("%s successfully initialized and connected.", desc->name);
+-	else
+-		info("%s error while loading driver (%d)", desc->name, ret);
++ error:
++	usb_set_intfdata(intf, NULL);
++	kfree(d);
+ 	return ret;
+ }
+ EXPORT_SYMBOL(dvb_usb_device_init);
+diff --git a/drivers/media/usb/dvb-usb/dvb-usb.h b/drivers/media/usb/dvb-usb/dvb-usb.h
+index 741be0e694471..2b8ad2bde8a48 100644
+--- a/drivers/media/usb/dvb-usb/dvb-usb.h
++++ b/drivers/media/usb/dvb-usb/dvb-usb.h
+@@ -487,7 +487,7 @@ extern int __must_check
+ dvb_usb_generic_write(struct dvb_usb_device *, u8 *, u16);
+ 
+ /* commonly used remote control parsing */
+-extern int dvb_usb_nec_rc_key_to_event(struct dvb_usb_device *, u8[], u32 *, int *);
++extern int dvb_usb_nec_rc_key_to_event(struct dvb_usb_device *, u8[5], u32 *, int *);
+ 
+ /* commonly used firmware download types and function */
+ struct hexline {
+diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
+index 5264242796370..471bd74667e3c 100644
+--- a/drivers/media/usb/em28xx/em28xx-dvb.c
++++ b/drivers/media/usb/em28xx/em28xx-dvb.c
+@@ -2010,6 +2010,7 @@ ret:
+ 	return result;
+ 
+ out_free:
++	em28xx_uninit_usb_xfer(dev, EM28XX_DIGITAL_MODE);
+ 	kfree(dvb);
+ 	dev->dvb = NULL;
+ 	goto ret;
+diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
+index 158c8e28ed2cc..47d8f28bfdfc2 100644
+--- a/drivers/media/usb/gspca/gspca.c
++++ b/drivers/media/usb/gspca/gspca.c
+@@ -1576,6 +1576,8 @@ out:
+ #endif
+ 	v4l2_ctrl_handler_free(gspca_dev->vdev.ctrl_handler);
+ 	v4l2_device_unregister(&gspca_dev->v4l2_dev);
++	if (sd_desc->probe_error)
++		sd_desc->probe_error(gspca_dev);
+ 	kfree(gspca_dev->usb_buf);
+ 	kfree(gspca_dev);
+ 	return ret;
+diff --git a/drivers/media/usb/gspca/gspca.h b/drivers/media/usb/gspca/gspca.h
+index b0ced2e140064..a6554d5e9e1a5 100644
+--- a/drivers/media/usb/gspca/gspca.h
++++ b/drivers/media/usb/gspca/gspca.h
+@@ -105,6 +105,7 @@ struct sd_desc {
+ 	cam_cf_op config;	/* called on probe */
+ 	cam_op init;		/* called on probe and resume */
+ 	cam_op init_controls;	/* called on probe */
++	cam_v_op probe_error;	/* called if probe failed, do cleanup here */
+ 	cam_op start;		/* called on stream on after URBs creation */
+ 	cam_pkt_op pkt_scan;
+ /* optional operations */
+diff --git a/drivers/media/usb/gspca/sq905.c b/drivers/media/usb/gspca/sq905.c
+index 97799cfb832e3..9491110709718 100644
+--- a/drivers/media/usb/gspca/sq905.c
++++ b/drivers/media/usb/gspca/sq905.c
+@@ -158,7 +158,7 @@ static int
+ sq905_read_data(struct gspca_dev *gspca_dev, u8 *data, int size, int need_lock)
+ {
+ 	int ret;
+-	int act_len;
++	int act_len = 0;
+ 
+ 	gspca_dev->usb_buf[0] = '\0';
+ 	if (need_lock)
+diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx.c b/drivers/media/usb/gspca/stv06xx/stv06xx.c
+index 95673fc0a99c5..d9bc2aacc8851 100644
+--- a/drivers/media/usb/gspca/stv06xx/stv06xx.c
++++ b/drivers/media/usb/gspca/stv06xx/stv06xx.c
+@@ -529,12 +529,21 @@ static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
+ static int stv06xx_config(struct gspca_dev *gspca_dev,
+ 			  const struct usb_device_id *id);
+ 
++static void stv06xx_probe_error(struct gspca_dev *gspca_dev)
++{
++	struct sd *sd = (struct sd *)gspca_dev;
++
++	kfree(sd->sensor_priv);
++	sd->sensor_priv = NULL;
++}
++
+ /* sub-driver description */
+ static const struct sd_desc sd_desc = {
+ 	.name = MODULE_NAME,
+ 	.config = stv06xx_config,
+ 	.init = stv06xx_init,
+ 	.init_controls = stv06xx_init_controls,
++	.probe_error = stv06xx_probe_error,
+ 	.start = stv06xx_start,
+ 	.stopN = stv06xx_stopN,
+ 	.pkt_scan = stv06xx_pkt_scan,
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
+index 30ef2a3110f72..9a791d8ef200d 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -1712,10 +1712,35 @@ static int uvc_scan_chain_forward(struct uvc_video_chain *chain,
+ 			if (forward->bNrInPins != 1) {
+ 				uvc_dbg(chain->dev, DESCR,
+ 					"Extension unit %d has more than 1 input pin\n",
+-					entity->id);
++					forward->id);
+ 				return -EINVAL;
+ 			}
+ 
++			/*
++			 * Some devices reference an output terminal as the
++			 * source of extension units. This is incorrect, as
++			 * output terminals only have an input pin, and thus
++			 * can't be connected to any entity in the forward
++			 * direction. The resulting topology would cause issues
++			 * when registering the media controller graph. To
++			 * avoid this problem, connect the extension unit to
++			 * the source of the output terminal instead.
++			 */
++			if (UVC_ENTITY_IS_OTERM(entity)) {
++				struct uvc_entity *source;
++
++				source = uvc_entity_by_id(chain->dev,
++							  entity->baSourceID[0]);
++				if (!source) {
++					uvc_dbg(chain->dev, DESCR,
++						"Can't connect extension unit %u in chain\n",
++						forward->id);
++					break;
++				}
++
++				forward->baSourceID[0] = source->id;
++			}
++
+ 			list_add_tail(&forward->chain, &chain->entities);
+ 			if (!found)
+ 				uvc_dbg_cont(PROBE, " (->");
+@@ -1735,6 +1760,13 @@ static int uvc_scan_chain_forward(struct uvc_video_chain *chain,
+ 				return -EINVAL;
+ 			}
+ 
++			if (UVC_ENTITY_IS_OTERM(entity)) {
++				uvc_dbg(chain->dev, DESCR,
++					"Unsupported connection between output terminals %u and %u\n",
++					entity->id, forward->id);
++				break;
++			}
++
+ 			list_add_tail(&forward->chain, &chain->entities);
+ 			if (!found)
+ 				uvc_dbg_cont(PROBE, " (->");
+diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
+index d29b861367ea7..1ef611e083237 100644
+--- a/drivers/media/usb/zr364xx/zr364xx.c
++++ b/drivers/media/usb/zr364xx/zr364xx.c
+@@ -1430,7 +1430,7 @@ static int zr364xx_probe(struct usb_interface *intf,
+ 	if (hdl->error) {
+ 		err = hdl->error;
+ 		dev_err(&udev->dev, "couldn't register control\n");
+-		goto unregister;
++		goto free_hdlr_and_unreg_dev;
+ 	}
+ 	/* save the init method used by this camera */
+ 	cam->method = id->driver_info;
+@@ -1503,7 +1503,7 @@ static int zr364xx_probe(struct usb_interface *intf,
+ 	if (!cam->read_endpoint) {
+ 		err = -ENOMEM;
+ 		dev_err(&intf->dev, "Could not find bulk-in endpoint\n");
+-		goto unregister;
++		goto free_hdlr_and_unreg_dev;
+ 	}
+ 
+ 	/* v4l */
+@@ -1515,7 +1515,7 @@ static int zr364xx_probe(struct usb_interface *intf,
+ 	/* load zr364xx board specific */
+ 	err = zr364xx_board_init(cam);
+ 	if (err)
+-		goto unregister;
++		goto free_hdlr_and_unreg_dev;
+ 	err = v4l2_ctrl_handler_setup(hdl);
+ 	if (err)
+ 		goto board_uninit;
+@@ -1533,7 +1533,7 @@ static int zr364xx_probe(struct usb_interface *intf,
+ 	err = video_register_device(&cam->vdev, VFL_TYPE_VIDEO, -1);
+ 	if (err) {
+ 		dev_err(&udev->dev, "video_register_device failed\n");
+-		goto free_handler;
++		goto board_uninit;
+ 	}
+ 	cam->v4l2_dev.release = zr364xx_release;
+ 
+@@ -1541,11 +1541,10 @@ static int zr364xx_probe(struct usb_interface *intf,
+ 		 video_device_node_name(&cam->vdev));
+ 	return 0;
+ 
+-free_handler:
+-	v4l2_ctrl_handler_free(hdl);
+ board_uninit:
+ 	zr364xx_board_uninit(cam);
+-unregister:
++free_hdlr_and_unreg_dev:
++	v4l2_ctrl_handler_free(hdl);
+ 	v4l2_device_unregister(&cam->v4l2_dev);
+ free_cam:
+ 	kfree(cam);
+diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
+index 016cf6204cbb8..4f0209695f132 100644
+--- a/drivers/media/v4l2-core/v4l2-ctrls.c
++++ b/drivers/media/v4l2-core/v4l2-ctrls.c
+@@ -1675,6 +1675,8 @@ static void std_init_compound(const struct v4l2_ctrl *ctrl, u32 idx,
+ 		p_fwht_params->version = V4L2_FWHT_VERSION;
+ 		p_fwht_params->width = 1280;
+ 		p_fwht_params->height = 720;
++		p_fwht_params->flags = V4L2_FWHT_FL_PIXENC_YUV |
++			(2 << V4L2_FWHT_FL_COMPONENTS_NUM_OFFSET);
+ 		break;
+ 	}
+ }
+@@ -2395,7 +2397,16 @@ static void new_to_req(struct v4l2_ctrl_ref *ref)
+ 	if (!ref)
+ 		return;
+ 	ptr_to_ptr(ref->ctrl, ref->ctrl->p_new, ref->p_req);
+-	ref->req = ref;
++	ref->valid_p_req = true;
++}
++
++/* Copy the current value to the request value */
++static void cur_to_req(struct v4l2_ctrl_ref *ref)
++{
++	if (!ref)
++		return;
++	ptr_to_ptr(ref->ctrl, ref->ctrl->p_cur, ref->p_req);
++	ref->valid_p_req = true;
+ }
+ 
+ /* Copy the request value to the new value */
+@@ -2403,8 +2414,8 @@ static void req_to_new(struct v4l2_ctrl_ref *ref)
+ {
+ 	if (!ref)
+ 		return;
+-	if (ref->req)
+-		ptr_to_ptr(ref->ctrl, ref->req->p_req, ref->ctrl->p_new);
++	if (ref->valid_p_req)
++		ptr_to_ptr(ref->ctrl, ref->p_req, ref->ctrl->p_new);
+ 	else
+ 		ptr_to_ptr(ref->ctrl, ref->ctrl->p_cur, ref->ctrl->p_new);
+ }
+@@ -3571,39 +3582,8 @@ static void v4l2_ctrl_request_queue(struct media_request_object *obj)
+ 	struct v4l2_ctrl_handler *hdl =
+ 		container_of(obj, struct v4l2_ctrl_handler, req_obj);
+ 	struct v4l2_ctrl_handler *main_hdl = obj->priv;
+-	struct v4l2_ctrl_handler *prev_hdl = NULL;
+-	struct v4l2_ctrl_ref *ref_ctrl, *ref_ctrl_prev = NULL;
+ 
+ 	mutex_lock(main_hdl->lock);
+-	if (list_empty(&main_hdl->requests_queued))
+-		goto queue;
+-
+-	prev_hdl = list_last_entry(&main_hdl->requests_queued,
+-				   struct v4l2_ctrl_handler, requests_queued);
+-	/*
+-	 * Note: prev_hdl and hdl must contain the same list of control
+-	 * references, so if any differences are detected then that is a
+-	 * driver bug and the WARN_ON is triggered.
+-	 */
+-	mutex_lock(prev_hdl->lock);
+-	ref_ctrl_prev = list_first_entry(&prev_hdl->ctrl_refs,
+-					 struct v4l2_ctrl_ref, node);
+-	list_for_each_entry(ref_ctrl, &hdl->ctrl_refs, node) {
+-		if (ref_ctrl->req)
+-			continue;
+-		while (ref_ctrl_prev->ctrl->id < ref_ctrl->ctrl->id) {
+-			/* Should never happen, but just in case... */
+-			if (list_is_last(&ref_ctrl_prev->node,
+-					 &prev_hdl->ctrl_refs))
+-				break;
+-			ref_ctrl_prev = list_next_entry(ref_ctrl_prev, node);
+-		}
+-		if (WARN_ON(ref_ctrl_prev->ctrl->id != ref_ctrl->ctrl->id))
+-			break;
+-		ref_ctrl->req = ref_ctrl_prev->req;
+-	}
+-	mutex_unlock(prev_hdl->lock);
+-queue:
+ 	list_add_tail(&hdl->requests_queued, &main_hdl->requests_queued);
+ 	hdl->request_is_queued = true;
+ 	mutex_unlock(main_hdl->lock);
+@@ -3660,7 +3640,7 @@ v4l2_ctrl_request_hdl_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id)
+ {
+ 	struct v4l2_ctrl_ref *ref = find_ref_lock(hdl, id);
+ 
+-	return (ref && ref->req == ref) ? ref->ctrl : NULL;
++	return (ref && ref->valid_p_req) ? ref->ctrl : NULL;
+ }
+ EXPORT_SYMBOL_GPL(v4l2_ctrl_request_hdl_ctrl_find);
+ 
+@@ -3846,7 +3826,13 @@ static int class_check(struct v4l2_ctrl_handler *hdl, u32 which)
+ 	return find_ref_lock(hdl, which | 1) ? 0 : -EINVAL;
+ }
+ 
+-/* Get extended controls. Allocates the helpers array if needed. */
++/*
++ * Get extended controls. Allocates the helpers array if needed.
++ *
++ * Note that v4l2_g_ext_ctrls_common() with 'which' set to
++ * V4L2_CTRL_WHICH_REQUEST_VAL is only called if the request was
++ * completed, and in that case valid_p_req is true for all controls.
++ */
+ static int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
+ 				   struct v4l2_ext_controls *cs,
+ 				   struct video_device *vdev)
+@@ -3855,9 +3841,10 @@ static int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
+ 	struct v4l2_ctrl_helper *helpers = helper;
+ 	int ret;
+ 	int i, j;
+-	bool def_value;
++	bool is_default, is_request;
+ 
+-	def_value = (cs->which == V4L2_CTRL_WHICH_DEF_VAL);
++	is_default = (cs->which == V4L2_CTRL_WHICH_DEF_VAL);
++	is_request = (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL);
+ 
+ 	cs->error_idx = cs->count;
+ 	cs->which = V4L2_CTRL_ID2WHICH(cs->which);
+@@ -3883,11 +3870,9 @@ static int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
+ 			ret = -EACCES;
+ 
+ 	for (i = 0; !ret && i < cs->count; i++) {
+-		int (*ctrl_to_user)(struct v4l2_ext_control *c,
+-				    struct v4l2_ctrl *ctrl);
+ 		struct v4l2_ctrl *master;
+-
+-		ctrl_to_user = def_value ? def_to_user : cur_to_user;
++		bool is_volatile = false;
++		u32 idx = i;
+ 
+ 		if (helpers[i].mref == NULL)
+ 			continue;
+@@ -3897,31 +3882,48 @@ static int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
+ 
+ 		v4l2_ctrl_lock(master);
+ 
+-		/* g_volatile_ctrl will update the new control values */
+-		if (!def_value &&
++		/*
++		 * g_volatile_ctrl will update the new control values.
++		 * This makes no sense for V4L2_CTRL_WHICH_DEF_VAL and
++		 * V4L2_CTRL_WHICH_REQUEST_VAL. In the case of requests
++		 * it is v4l2_ctrl_request_complete() that copies the
++		 * volatile controls at the time of request completion
++		 * to the request, so you don't want to do that again.
++		 */
++		if (!is_default && !is_request &&
+ 		    ((master->flags & V4L2_CTRL_FLAG_VOLATILE) ||
+ 		    (master->has_volatiles && !is_cur_manual(master)))) {
+ 			for (j = 0; j < master->ncontrols; j++)
+ 				cur_to_new(master->cluster[j]);
+ 			ret = call_op(master, g_volatile_ctrl);
+-			ctrl_to_user = new_to_user;
++			is_volatile = true;
+ 		}
+-		/* If OK, then copy the current (for non-volatile controls)
+-		   or the new (for volatile controls) control values to the
+-		   caller */
+-		if (!ret) {
+-			u32 idx = i;
+ 
+-			do {
+-				if (helpers[idx].ref->req)
+-					ret = req_to_user(cs->controls + idx,
+-						helpers[idx].ref->req);
+-				else
+-					ret = ctrl_to_user(cs->controls + idx,
+-						helpers[idx].ref->ctrl);
+-				idx = helpers[idx].next;
+-			} while (!ret && idx);
++		if (ret) {
++			v4l2_ctrl_unlock(master);
++			break;
+ 		}
++
++		/*
++		 * Copy the default value (if is_default is true), the
++		 * request value (if is_request is true and p_req is valid),
++		 * the new volatile value (if is_volatile is true) or the
++		 * current value.
++		 */
++		do {
++			struct v4l2_ctrl_ref *ref = helpers[idx].ref;
++
++			if (is_default)
++				ret = def_to_user(cs->controls + idx, ref->ctrl);
++			else if (is_request && ref->valid_p_req)
++				ret = req_to_user(cs->controls + idx, ref);
++			else if (is_volatile)
++				ret = new_to_user(cs->controls + idx, ref->ctrl);
++			else
++				ret = cur_to_user(cs->controls + idx, ref->ctrl);
++			idx = helpers[idx].next;
++		} while (!ret && idx);
++
+ 		v4l2_ctrl_unlock(master);
+ 	}
+ 
+@@ -4564,8 +4566,6 @@ void v4l2_ctrl_request_complete(struct media_request *req,
+ 		unsigned int i;
+ 
+ 		if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) {
+-			ref->req = ref;
+-
+ 			v4l2_ctrl_lock(master);
+ 			/* g_volatile_ctrl will update the current control values */
+ 			for (i = 0; i < master->ncontrols; i++)
+@@ -4575,21 +4575,12 @@ void v4l2_ctrl_request_complete(struct media_request *req,
+ 			v4l2_ctrl_unlock(master);
+ 			continue;
+ 		}
+-		if (ref->req == ref)
++		if (ref->valid_p_req)
+ 			continue;
+ 
++		/* Copy the current control value into the request */
+ 		v4l2_ctrl_lock(ctrl);
+-		if (ref->req) {
+-			ptr_to_ptr(ctrl, ref->req->p_req, ref->p_req);
+-		} else {
+-			ptr_to_ptr(ctrl, ctrl->p_cur, ref->p_req);
+-			/*
+-			 * Set ref->req to ensure that when userspace wants to
+-			 * obtain the controls of this request it will take
+-			 * this value and not the current value of the control.
+-			 */
+-			ref->req = ref;
+-		}
++		cur_to_req(ref);
+ 		v4l2_ctrl_unlock(ctrl);
+ 	}
+ 
+@@ -4653,7 +4644,7 @@ int v4l2_ctrl_request_setup(struct media_request *req,
+ 				struct v4l2_ctrl_ref *r =
+ 					find_ref(hdl, master->cluster[i]->id);
+ 
+-				if (r->req && r == r->req) {
++				if (r->valid_p_req) {
+ 					have_new_data = true;
+ 					break;
+ 				}
+diff --git a/drivers/mfd/arizona-irq.c b/drivers/mfd/arizona-irq.c
+index 077d9ab112b71..d919ae9691e23 100644
+--- a/drivers/mfd/arizona-irq.c
++++ b/drivers/mfd/arizona-irq.c
+@@ -100,7 +100,7 @@ static irqreturn_t arizona_irq_thread(int irq, void *data)
+ 	unsigned int val;
+ 	int ret;
+ 
+-	ret = pm_runtime_get_sync(arizona->dev);
++	ret = pm_runtime_resume_and_get(arizona->dev);
+ 	if (ret < 0) {
+ 		dev_err(arizona->dev, "Failed to resume device: %d\n", ret);
+ 		return IRQ_NONE;
+diff --git a/drivers/mfd/da9063-i2c.c b/drivers/mfd/da9063-i2c.c
+index 3781d0bb77865..783a14af18e26 100644
+--- a/drivers/mfd/da9063-i2c.c
++++ b/drivers/mfd/da9063-i2c.c
+@@ -442,6 +442,16 @@ static int da9063_i2c_probe(struct i2c_client *i2c,
+ 		return ret;
+ 	}
+ 
++	/* If SMBus is not available and only I2C is possible, enter I2C mode */
++	if (i2c_check_functionality(i2c->adapter, I2C_FUNC_I2C)) {
++		ret = regmap_clear_bits(da9063->regmap, DA9063_REG_CONFIG_J,
++					DA9063_TWOWIRE_TO);
++		if (ret < 0) {
++			dev_err(da9063->dev, "Failed to set Two-Wire Bus Mode.\n");
++			return -EIO;
++		}
++	}
++
+ 	return da9063_device_init(da9063, i2c->irq);
+ }
+ 
+diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
+index d666e24fbe0e0..a4c06ef673943 100644
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -572,6 +572,18 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
+ 		main_md->part_curr = value & EXT_CSD_PART_CONFIG_ACC_MASK;
+ 	}
+ 
++	/*
++	 * Make sure to update CACHE_CTRL in case it was changed. The cache
++	 * will get turned back on if the card is re-initialized, e.g.
++	 * suspend/resume or hw reset in recovery.
++	 */
++	if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_CACHE_CTRL) &&
++	    (cmd.opcode == MMC_SWITCH)) {
++		u8 value = MMC_EXTRACT_VALUE_FROM_ARG(cmd.arg) & 1;
++
++		card->ext_csd.cache_ctrl = value;
++	}
++
+ 	/*
+ 	 * According to the SD specs, some commands require a delay after
+ 	 * issuing the command.
+@@ -2224,6 +2236,10 @@ enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req)
+ 	case MMC_ISSUE_ASYNC:
+ 		switch (req_op(req)) {
+ 		case REQ_OP_FLUSH:
++			if (!mmc_cache_enabled(host)) {
++				blk_mq_end_request(req, BLK_STS_OK);
++				return MMC_REQ_FINISHED;
++			}
+ 			ret = mmc_blk_cqe_issue_flush(mq, req);
+ 			break;
+ 		case REQ_OP_READ:
+diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
+index 1136b859ddd86..e30c4e88e4043 100644
+--- a/drivers/mmc/core/core.c
++++ b/drivers/mmc/core/core.c
+@@ -1207,7 +1207,7 @@ int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
+ 
+ 	err = mmc_wait_for_cmd(host, &cmd, 0);
+ 	if (err)
+-		return err;
++		goto power_cycle;
+ 
+ 	if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
+ 		return -EIO;
+@@ -2369,80 +2369,6 @@ void mmc_stop_host(struct mmc_host *host)
+ 	mmc_release_host(host);
+ }
+ 
+-#ifdef CONFIG_PM_SLEEP
+-/* Do the card removal on suspend if card is assumed removeable
+- * Do that in pm notifier while userspace isn't yet frozen, so we will be able
+-   to sync the card.
+-*/
+-static int mmc_pm_notify(struct notifier_block *notify_block,
+-			unsigned long mode, void *unused)
+-{
+-	struct mmc_host *host = container_of(
+-		notify_block, struct mmc_host, pm_notify);
+-	unsigned long flags;
+-	int err = 0;
+-
+-	switch (mode) {
+-	case PM_HIBERNATION_PREPARE:
+-	case PM_SUSPEND_PREPARE:
+-	case PM_RESTORE_PREPARE:
+-		spin_lock_irqsave(&host->lock, flags);
+-		host->rescan_disable = 1;
+-		spin_unlock_irqrestore(&host->lock, flags);
+-		cancel_delayed_work_sync(&host->detect);
+-
+-		if (!host->bus_ops)
+-			break;
+-
+-		/* Validate prerequisites for suspend */
+-		if (host->bus_ops->pre_suspend)
+-			err = host->bus_ops->pre_suspend(host);
+-		if (!err)
+-			break;
+-
+-		if (!mmc_card_is_removable(host)) {
+-			dev_warn(mmc_dev(host),
+-				 "pre_suspend failed for non-removable host: "
+-				 "%d\n", err);
+-			/* Avoid removing non-removable hosts */
+-			break;
+-		}
+-
+-		/* Calling bus_ops->remove() with a claimed host can deadlock */
+-		host->bus_ops->remove(host);
+-		mmc_claim_host(host);
+-		mmc_detach_bus(host);
+-		mmc_power_off(host);
+-		mmc_release_host(host);
+-		host->pm_flags = 0;
+-		break;
+-
+-	case PM_POST_SUSPEND:
+-	case PM_POST_HIBERNATION:
+-	case PM_POST_RESTORE:
+-
+-		spin_lock_irqsave(&host->lock, flags);
+-		host->rescan_disable = 0;
+-		spin_unlock_irqrestore(&host->lock, flags);
+-		_mmc_detect_change(host, 0, false);
+-
+-	}
+-
+-	return 0;
+-}
+-
+-void mmc_register_pm_notifier(struct mmc_host *host)
+-{
+-	host->pm_notify.notifier_call = mmc_pm_notify;
+-	register_pm_notifier(&host->pm_notify);
+-}
+-
+-void mmc_unregister_pm_notifier(struct mmc_host *host)
+-{
+-	unregister_pm_notifier(&host->pm_notify);
+-}
+-#endif
+-
+ static int __init mmc_init(void)
+ {
+ 	int ret;
+diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
+index 575ac0257af2f..db3c9c68875d8 100644
+--- a/drivers/mmc/core/core.h
++++ b/drivers/mmc/core/core.h
+@@ -29,6 +29,7 @@ struct mmc_bus_ops {
+ 	int (*shutdown)(struct mmc_host *);
+ 	int (*hw_reset)(struct mmc_host *);
+ 	int (*sw_reset)(struct mmc_host *);
++	bool (*cache_enabled)(struct mmc_host *);
+ };
+ 
+ void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops);
+@@ -93,14 +94,6 @@ int mmc_execute_tuning(struct mmc_card *card);
+ int mmc_hs200_to_hs400(struct mmc_card *card);
+ int mmc_hs400_to_hs200(struct mmc_card *card);
+ 
+-#ifdef CONFIG_PM_SLEEP
+-void mmc_register_pm_notifier(struct mmc_host *host);
+-void mmc_unregister_pm_notifier(struct mmc_host *host);
+-#else
+-static inline void mmc_register_pm_notifier(struct mmc_host *host) { }
+-static inline void mmc_unregister_pm_notifier(struct mmc_host *host) { }
+-#endif
+-
+ void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq);
+ bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq);
+ 
+@@ -171,4 +164,12 @@ static inline void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
+ 		host->ops->post_req(host, mrq, err);
+ }
+ 
++static inline bool mmc_cache_enabled(struct mmc_host *host)
++{
++	if (host->bus_ops->cache_enabled)
++		return host->bus_ops->cache_enabled(host);
++
++	return false;
++}
++
+ #endif
+diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
+index 9b89a91b6b476..fe05b3645fe98 100644
+--- a/drivers/mmc/core/host.c
++++ b/drivers/mmc/core/host.c
+@@ -35,6 +35,42 @@
+ 
+ static DEFINE_IDA(mmc_host_ida);
+ 
++#ifdef CONFIG_PM_SLEEP
++static int mmc_host_class_prepare(struct device *dev)
++{
++	struct mmc_host *host = cls_dev_to_mmc_host(dev);
++
++	/*
++	 * It's safe to access the bus_ops pointer, as both userspace and the
++	 * workqueue for detecting cards are frozen at this point.
++	 */
++	if (!host->bus_ops)
++		return 0;
++
++	/* Validate conditions for system suspend. */
++	if (host->bus_ops->pre_suspend)
++		return host->bus_ops->pre_suspend(host);
++
++	return 0;
++}
++
++static void mmc_host_class_complete(struct device *dev)
++{
++	struct mmc_host *host = cls_dev_to_mmc_host(dev);
++
++	_mmc_detect_change(host, 0, false);
++}
++
++static const struct dev_pm_ops mmc_host_class_dev_pm_ops = {
++	.prepare = mmc_host_class_prepare,
++	.complete = mmc_host_class_complete,
++};
++
++#define MMC_HOST_CLASS_DEV_PM_OPS (&mmc_host_class_dev_pm_ops)
++#else
++#define MMC_HOST_CLASS_DEV_PM_OPS NULL
++#endif
++
+ static void mmc_host_classdev_release(struct device *dev)
+ {
+ 	struct mmc_host *host = cls_dev_to_mmc_host(dev);
+@@ -46,6 +82,7 @@ static void mmc_host_classdev_release(struct device *dev)
+ static struct class mmc_host_class = {
+ 	.name		= "mmc_host",
+ 	.dev_release	= mmc_host_classdev_release,
++	.pm		= MMC_HOST_CLASS_DEV_PM_OPS,
+ };
+ 
+ int mmc_register_host_class(void)
+@@ -538,8 +575,6 @@ int mmc_add_host(struct mmc_host *host)
+ #endif
+ 
+ 	mmc_start_host(host);
+-	mmc_register_pm_notifier(host);
+-
+ 	return 0;
+ }
+ 
+@@ -555,7 +590,6 @@ EXPORT_SYMBOL(mmc_add_host);
+  */
+ void mmc_remove_host(struct mmc_host *host)
+ {
+-	mmc_unregister_pm_notifier(host);
+ 	mmc_stop_host(host);
+ 
+ #ifdef CONFIG_DEBUG_FS
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+index 8741271d39712..4d2b4b0da93cd 100644
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -2029,6 +2029,12 @@ static void mmc_detect(struct mmc_host *host)
+ 	}
+ }
+ 
++static bool _mmc_cache_enabled(struct mmc_host *host)
++{
++	return host->card->ext_csd.cache_size > 0 &&
++	       host->card->ext_csd.cache_ctrl & 1;
++}
++
+ static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
+ {
+ 	int err = 0;
+@@ -2208,6 +2214,7 @@ static const struct mmc_bus_ops mmc_ops = {
+ 	.alive = mmc_alive,
+ 	.shutdown = mmc_shutdown,
+ 	.hw_reset = _mmc_hw_reset,
++	.cache_enabled = _mmc_cache_enabled,
+ };
+ 
+ /*
+diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
+index 265d95ec82ce1..c458f6b626a2f 100644
+--- a/drivers/mmc/core/mmc_ops.c
++++ b/drivers/mmc/core/mmc_ops.c
+@@ -988,9 +988,7 @@ int mmc_flush_cache(struct mmc_card *card)
+ {
+ 	int err = 0;
+ 
+-	if (mmc_card_mmc(card) &&
+-			(card->ext_csd.cache_size > 0) &&
+-			(card->ext_csd.cache_ctrl & 1)) {
++	if (mmc_cache_enabled(card->host)) {
+ 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ 				 EXT_CSD_FLUSH_CACHE, 1,
+ 				 MMC_CACHE_FLUSH_TIMEOUT_MS);
+diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
+index 6fa51a6ed0585..2c48d65041013 100644
+--- a/drivers/mmc/core/sd.c
++++ b/drivers/mmc/core/sd.c
+@@ -135,6 +135,9 @@ static int mmc_decode_csd(struct mmc_card *card)
+ 			csd->erase_size = UNSTUFF_BITS(resp, 39, 7) + 1;
+ 			csd->erase_size <<= csd->write_blkbits - 9;
+ 		}
++
++		if (UNSTUFF_BITS(resp, 13, 1))
++			mmc_card_set_readonly(card);
+ 		break;
+ 	case 1:
+ 		/*
+@@ -169,6 +172,9 @@ static int mmc_decode_csd(struct mmc_card *card)
+ 		csd->write_blkbits = 9;
+ 		csd->write_partial = 0;
+ 		csd->erase_size = 1;
++
++		if (UNSTUFF_BITS(resp, 13, 1))
++			mmc_card_set_readonly(card);
+ 		break;
+ 	default:
+ 		pr_err("%s: unrecognised CSD structure version %d\n",
+diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
+index 0fda7784cab20..3eb94ac2712e7 100644
+--- a/drivers/mmc/core/sdio.c
++++ b/drivers/mmc/core/sdio.c
+@@ -985,21 +985,37 @@ out:
+  */
+ static int mmc_sdio_pre_suspend(struct mmc_host *host)
+ {
+-	int i, err = 0;
++	int i;
+ 
+ 	for (i = 0; i < host->card->sdio_funcs; i++) {
+ 		struct sdio_func *func = host->card->sdio_func[i];
+ 		if (func && sdio_func_present(func) && func->dev.driver) {
+ 			const struct dev_pm_ops *pmops = func->dev.driver->pm;
+-			if (!pmops || !pmops->suspend || !pmops->resume) {
++			if (!pmops || !pmops->suspend || !pmops->resume)
+ 				/* force removal of entire card in that case */
+-				err = -ENOSYS;
+-				break;
+-			}
++				goto remove;
+ 		}
+ 	}
+ 
+-	return err;
++	return 0;
++
++remove:
++	if (!mmc_card_is_removable(host)) {
++		dev_warn(mmc_dev(host),
++			 "missing suspend/resume ops for non-removable SDIO card\n");
++		/* Don't remove a non-removable card - we can't re-detect it. */
++		return 0;
++	}
++
++	/* Remove the SDIO card and let it be re-detected later on. */
++	mmc_sdio_remove(host);
++	mmc_claim_host(host);
++	mmc_detach_bus(host);
++	mmc_power_off(host);
++	mmc_release_host(host);
++	host->pm_flags = 0;
++
++	return 0;
+ }
+ 
+ /*
+diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
+index f9780c65ebe98..f24623aac2dbe 100644
+--- a/drivers/mmc/host/sdhci-brcmstb.c
++++ b/drivers/mmc/host/sdhci-brcmstb.c
+@@ -199,7 +199,6 @@ static int sdhci_brcmstb_add_host(struct sdhci_host *host,
+ 	if (dma64) {
+ 		dev_dbg(mmc_dev(host->mmc), "Using 64 bit DMA\n");
+ 		cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
+-		cq_host->quirks |= CQHCI_QUIRK_SHORT_TXFR_DESC_SZ;
+ 	}
+ 
+ 	ret = cqhci_init(cq_host, host->mmc, dma64);
+diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
+index a20459744d213..94327988da914 100644
+--- a/drivers/mmc/host/sdhci-esdhc-imx.c
++++ b/drivers/mmc/host/sdhci-esdhc-imx.c
+@@ -1488,7 +1488,7 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
+ 
+ 	mmc_of_parse_voltage(np, &host->ocr_mask);
+ 
+-	if (esdhc_is_usdhc(imx_data)) {
++	if (esdhc_is_usdhc(imx_data) && !IS_ERR(imx_data->pinctrl)) {
+ 		imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl,
+ 						ESDHC_PINCTRL_STATE_100MHZ);
+ 		imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
+diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
+index 9552708846ca3..bf04a08eeba13 100644
+--- a/drivers/mmc/host/sdhci-pci-core.c
++++ b/drivers/mmc/host/sdhci-pci-core.c
+@@ -516,6 +516,7 @@ struct intel_host {
+ 	int	drv_strength;
+ 	bool	d3_retune;
+ 	bool	rpm_retune_ok;
++	bool	needs_pwr_off;
+ 	u32	glk_rx_ctrl1;
+ 	u32	glk_tun_val;
+ 	u32	active_ltr;
+@@ -643,9 +644,25 @@ out:
+ static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
+ 				  unsigned short vdd)
+ {
++	struct sdhci_pci_slot *slot = sdhci_priv(host);
++	struct intel_host *intel_host = sdhci_pci_priv(slot);
+ 	int cntr;
+ 	u8 reg;
+ 
++	/*
++	 * Bus power may control card power, but a full reset still may not
++	 * reset the power, whereas a direct write to SDHCI_POWER_CONTROL can.
++	 * That might be needed to initialize correctly, if the card was left
++	 * powered on previously.
++	 */
++	if (intel_host->needs_pwr_off) {
++		intel_host->needs_pwr_off = false;
++		if (mode != MMC_POWER_OFF) {
++			sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
++			usleep_range(10000, 12500);
++		}
++	}
++
+ 	sdhci_set_power(host, mode, vdd);
+ 
+ 	if (mode == MMC_POWER_OFF)
+@@ -1135,6 +1152,14 @@ static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
+ 	return 0;
+ }
+ 
++static void byt_needs_pwr_off(struct sdhci_pci_slot *slot)
++{
++	struct intel_host *intel_host = sdhci_pci_priv(slot);
++	u8 reg = sdhci_readb(slot->host, SDHCI_POWER_CONTROL);
++
++	intel_host->needs_pwr_off = reg  & SDHCI_POWER_ON;
++}
++
+ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
+ {
+ 	byt_probe_slot(slot);
+@@ -1152,6 +1177,8 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
+ 	    slot->chip->pdev->subsystem_device == PCI_SUBDEVICE_ID_NI_78E3)
+ 		slot->host->mmc->caps2 |= MMC_CAP2_AVOID_3_3V;
+ 
++	byt_needs_pwr_off(slot);
++
+ 	return 0;
+ }
+ 
+@@ -1903,6 +1930,8 @@ static const struct pci_device_id pci_ids[] = {
+ 	SDHCI_PCI_DEVICE(INTEL, CMLH_SD,   intel_byt_sd),
+ 	SDHCI_PCI_DEVICE(INTEL, JSL_EMMC,  intel_glk_emmc),
+ 	SDHCI_PCI_DEVICE(INTEL, JSL_SD,    intel_byt_sd),
++	SDHCI_PCI_DEVICE(INTEL, LKF_EMMC,  intel_glk_emmc),
++	SDHCI_PCI_DEVICE(INTEL, LKF_SD,    intel_byt_sd),
+ 	SDHCI_PCI_DEVICE(O2, 8120,     o2),
+ 	SDHCI_PCI_DEVICE(O2, 8220,     o2),
+ 	SDHCI_PCI_DEVICE(O2, 8221,     o2),
+diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
+index d0ed232af0eb8..8f90c4163bb5c 100644
+--- a/drivers/mmc/host/sdhci-pci.h
++++ b/drivers/mmc/host/sdhci-pci.h
+@@ -57,6 +57,8 @@
+ #define PCI_DEVICE_ID_INTEL_CMLH_SD	0x06f5
+ #define PCI_DEVICE_ID_INTEL_JSL_EMMC	0x4dc4
+ #define PCI_DEVICE_ID_INTEL_JSL_SD	0x4df8
++#define PCI_DEVICE_ID_INTEL_LKF_EMMC	0x98c4
++#define PCI_DEVICE_ID_INTEL_LKF_SD	0x98f8
+ 
+ #define PCI_DEVICE_ID_SYSKONNECT_8000	0x8000
+ #define PCI_DEVICE_ID_VIA_95D0		0x95d0
+diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
+index 41d193fa77bbf..8ea9132ebca4e 100644
+--- a/drivers/mmc/host/sdhci-tegra.c
++++ b/drivers/mmc/host/sdhci-tegra.c
+@@ -119,6 +119,10 @@
+ /* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */
+ #define SDHCI_TEGRA_CQE_BASE_ADDR			0xF000
+ 
++#define SDHCI_TEGRA_CQE_TRNS_MODE	(SDHCI_TRNS_MULTI | \
++					 SDHCI_TRNS_BLK_CNT_EN | \
++					 SDHCI_TRNS_DMA)
++
+ struct sdhci_tegra_soc_data {
+ 	const struct sdhci_pltfm_data *pdata;
+ 	u64 dma_mask;
+@@ -1156,6 +1160,7 @@ static void tegra_sdhci_voltage_switch(struct sdhci_host *host)
+ static void tegra_cqhci_writel(struct cqhci_host *cq_host, u32 val, int reg)
+ {
+ 	struct mmc_host *mmc = cq_host->mmc;
++	struct sdhci_host *host = mmc_priv(mmc);
+ 	u8 ctrl;
+ 	ktime_t timeout;
+ 	bool timed_out;
+@@ -1170,6 +1175,7 @@ static void tegra_cqhci_writel(struct cqhci_host *cq_host, u32 val, int reg)
+ 	 */
+ 	if (reg == CQHCI_CTL && !(val & CQHCI_HALT) &&
+ 	    cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) {
++		sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
+ 		sdhci_cqe_enable(mmc);
+ 		writel(val, cq_host->mmio + reg);
+ 		timeout = ktime_add_us(ktime_get(), 50);
+@@ -1205,6 +1211,7 @@ static void sdhci_tegra_update_dcmd_desc(struct mmc_host *mmc,
+ static void sdhci_tegra_cqe_enable(struct mmc_host *mmc)
+ {
+ 	struct cqhci_host *cq_host = mmc->cqe_private;
++	struct sdhci_host *host = mmc_priv(mmc);
+ 	u32 val;
+ 
+ 	/*
+@@ -1218,6 +1225,7 @@ static void sdhci_tegra_cqe_enable(struct mmc_host *mmc)
+ 		if (val & CQHCI_ENABLE)
+ 			cqhci_writel(cq_host, (val & ~CQHCI_ENABLE),
+ 				     CQHCI_CFG);
++		sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
+ 		sdhci_cqe_enable(mmc);
+ 		if (val & CQHCI_ENABLE)
+ 			cqhci_writel(cq_host, val, CQHCI_CFG);
+@@ -1281,12 +1289,36 @@ static void tegra_sdhci_set_timeout(struct sdhci_host *host,
+ 	__sdhci_set_timeout(host, cmd);
+ }
+ 
++static void sdhci_tegra_cqe_pre_enable(struct mmc_host *mmc)
++{
++	struct cqhci_host *cq_host = mmc->cqe_private;
++	u32 reg;
++
++	reg = cqhci_readl(cq_host, CQHCI_CFG);
++	reg |= CQHCI_ENABLE;
++	cqhci_writel(cq_host, reg, CQHCI_CFG);
++}
++
++static void sdhci_tegra_cqe_post_disable(struct mmc_host *mmc)
++{
++	struct cqhci_host *cq_host = mmc->cqe_private;
++	struct sdhci_host *host = mmc_priv(mmc);
++	u32 reg;
++
++	reg = cqhci_readl(cq_host, CQHCI_CFG);
++	reg &= ~CQHCI_ENABLE;
++	cqhci_writel(cq_host, reg, CQHCI_CFG);
++	sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
++}
++
+ static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = {
+ 	.write_l    = tegra_cqhci_writel,
+ 	.enable	= sdhci_tegra_cqe_enable,
+ 	.disable = sdhci_cqe_disable,
+ 	.dumpregs = sdhci_tegra_dumpregs,
+ 	.update_dcmd_desc = sdhci_tegra_update_dcmd_desc,
++	.pre_enable = sdhci_tegra_cqe_pre_enable,
++	.post_disable = sdhci_tegra_cqe_post_disable,
+ };
+ 
+ static int tegra_sdhci_set_dma_mask(struct sdhci_host *host)
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 2d73407ee52ec..a9e20818ff3a3 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -2996,6 +2996,37 @@ static bool sdhci_request_done(struct sdhci_host *host)
+ 		return true;
+ 	}
+ 
++	/*
++	 * The controller needs a reset of internal state machines
++	 * upon error conditions.
++	 */
++	if (sdhci_needs_reset(host, mrq)) {
++		/*
++		 * Do not finish until command and data lines are available for
++		 * reset. Note there can only be one other mrq, so it cannot
++		 * also be in mrqs_done, otherwise host->cmd and host->data_cmd
++		 * would both be null.
++		 */
++		if (host->cmd || host->data_cmd) {
++			spin_unlock_irqrestore(&host->lock, flags);
++			return true;
++		}
++
++		/* Some controllers need this kick or reset won't work here */
++		if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
++			/* This is to force an update */
++			host->ops->set_clock(host, host->clock);
++
++		/*
++		 * Spec says we should do both at the same time, but Ricoh
++		 * controllers do not like that.
++		 */
++		sdhci_do_reset(host, SDHCI_RESET_CMD);
++		sdhci_do_reset(host, SDHCI_RESET_DATA);
++
++		host->pending_reset = false;
++	}
++
+ 	/*
+ 	 * Always unmap the data buffers if they were mapped by
+ 	 * sdhci_prepare_data() whenever we finish with a request.
+@@ -3059,35 +3090,6 @@ static bool sdhci_request_done(struct sdhci_host *host)
+ 		}
+ 	}
+ 
+-	/*
+-	 * The controller needs a reset of internal state machines
+-	 * upon error conditions.
+-	 */
+-	if (sdhci_needs_reset(host, mrq)) {
+-		/*
+-		 * Do not finish until command and data lines are available for
+-		 * reset. Note there can only be one other mrq, so it cannot
+-		 * also be in mrqs_done, otherwise host->cmd and host->data_cmd
+-		 * would both be null.
+-		 */
+-		if (host->cmd || host->data_cmd) {
+-			spin_unlock_irqrestore(&host->lock, flags);
+-			return true;
+-		}
+-
+-		/* Some controllers need this kick or reset won't work here */
+-		if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
+-			/* This is to force an update */
+-			host->ops->set_clock(host, host->clock);
+-
+-		/* Spec says we should do both at the same time, but Ricoh
+-		   controllers do not like that. */
+-		sdhci_do_reset(host, SDHCI_RESET_CMD);
+-		sdhci_do_reset(host, SDHCI_RESET_DATA);
+-
+-		host->pending_reset = false;
+-	}
+-
+ 	host->mrqs_done[i] = NULL;
+ 
+ 	spin_unlock_irqrestore(&host->lock, flags);
+diff --git a/drivers/mmc/host/uniphier-sd.c b/drivers/mmc/host/uniphier-sd.c
+index 2413b6750cec3..ccbf9885a52be 100644
+--- a/drivers/mmc/host/uniphier-sd.c
++++ b/drivers/mmc/host/uniphier-sd.c
+@@ -635,7 +635,7 @@ static int uniphier_sd_probe(struct platform_device *pdev)
+ 
+ 	ret = tmio_mmc_host_probe(host);
+ 	if (ret)
+-		goto free_host;
++		goto disable_clk;
+ 
+ 	ret = devm_request_irq(dev, irq, tmio_mmc_irq, IRQF_SHARED,
+ 			       dev_name(dev), host);
+@@ -646,6 +646,8 @@ static int uniphier_sd_probe(struct platform_device *pdev)
+ 
+ remove_host:
+ 	tmio_mmc_host_remove(host);
++disable_clk:
++	uniphier_sd_clk_disable(host);
+ free_host:
+ 	tmio_mmc_host_free(host);
+ 
+@@ -658,6 +660,7 @@ static int uniphier_sd_remove(struct platform_device *pdev)
+ 
+ 	tmio_mmc_host_remove(host);
+ 	uniphier_sd_clk_disable(host);
++	tmio_mmc_host_free(host);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/mtd/maps/physmap-bt1-rom.c b/drivers/mtd/maps/physmap-bt1-rom.c
+index a35450002284c..58782cfaf71cf 100644
+--- a/drivers/mtd/maps/physmap-bt1-rom.c
++++ b/drivers/mtd/maps/physmap-bt1-rom.c
+@@ -79,7 +79,7 @@ static void __xipram bt1_rom_map_copy_from(struct map_info *map,
+ 	if (shift) {
+ 		chunk = min_t(ssize_t, 4 - shift, len);
+ 		data = readl_relaxed(src - shift);
+-		memcpy(to, &data + shift, chunk);
++		memcpy(to, (char *)&data + shift, chunk);
+ 		src += chunk;
+ 		to += chunk;
+ 		len -= chunk;
+diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
+index e6ceec8f50dce..8aab1017b4600 100644
+--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
++++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
+@@ -883,10 +883,12 @@ static int atmel_nand_pmecc_correct_data(struct nand_chip *chip, void *buf,
+ 							  NULL, 0,
+ 							  chip->ecc.strength);
+ 
+-		if (ret >= 0)
++		if (ret >= 0) {
++			mtd->ecc_stats.corrected += ret;
+ 			max_bitflips = max(ret, max_bitflips);
+-		else
++		} else {
+ 			mtd->ecc_stats.failed++;
++		}
+ 
+ 		databuf += chip->ecc.size;
+ 		eccbuf += chip->ecc.bytes;
+diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
+index 61d932c1b7180..17f63f95f4a28 100644
+--- a/drivers/mtd/nand/spi/core.c
++++ b/drivers/mtd/nand/spi/core.c
+@@ -1263,12 +1263,14 @@ static const struct spi_device_id spinand_ids[] = {
+ 	{ .name = "spi-nand" },
+ 	{ /* sentinel */ },
+ };
++MODULE_DEVICE_TABLE(spi, spinand_ids);
+ 
+ #ifdef CONFIG_OF
+ static const struct of_device_id spinand_of_ids[] = {
+ 	{ .compatible = "spi-nand" },
+ 	{ /* sentinel */ },
+ };
++MODULE_DEVICE_TABLE(of, spinand_of_ids);
+ #endif
+ 
+ static struct spi_mem_driver spinand_drv = {
+diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
+index 0522304f52fad..72bc1342c3ff8 100644
+--- a/drivers/mtd/spi-nor/core.c
++++ b/drivers/mtd/spi-nor/core.c
+@@ -3301,6 +3301,37 @@ static void spi_nor_resume(struct mtd_info *mtd)
+ 		dev_err(dev, "resume() failed\n");
+ }
+ 
++static int spi_nor_get_device(struct mtd_info *mtd)
++{
++	struct mtd_info *master = mtd_get_master(mtd);
++	struct spi_nor *nor = mtd_to_spi_nor(master);
++	struct device *dev;
++
++	if (nor->spimem)
++		dev = nor->spimem->spi->controller->dev.parent;
++	else
++		dev = nor->dev;
++
++	if (!try_module_get(dev->driver->owner))
++		return -ENODEV;
++
++	return 0;
++}
++
++static void spi_nor_put_device(struct mtd_info *mtd)
++{
++	struct mtd_info *master = mtd_get_master(mtd);
++	struct spi_nor *nor = mtd_to_spi_nor(master);
++	struct device *dev;
++
++	if (nor->spimem)
++		dev = nor->spimem->spi->controller->dev.parent;
++	else
++		dev = nor->dev;
++
++	module_put(dev->driver->owner);
++}
++
+ void spi_nor_restore(struct spi_nor *nor)
+ {
+ 	/* restore the addressing mode */
+@@ -3495,6 +3526,8 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
+ 	mtd->_read = spi_nor_read;
+ 	mtd->_suspend = spi_nor_suspend;
+ 	mtd->_resume = spi_nor_resume;
++	mtd->_get_device = spi_nor_get_device;
++	mtd->_put_device = spi_nor_put_device;
+ 
+ 	if (nor->params->locking_ops) {
+ 		mtd->_lock = spi_nor_lock;
+diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c
+index 9203abaac2297..662b212787d4d 100644
+--- a/drivers/mtd/spi-nor/macronix.c
++++ b/drivers/mtd/spi-nor/macronix.c
+@@ -73,9 +73,6 @@ static const struct flash_info macronix_parts[] = {
+ 			      SECT_4K | SPI_NOR_DUAL_READ |
+ 			      SPI_NOR_QUAD_READ) },
+ 	{ "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
+-	{ "mx25l51245g", INFO(0xc2201a, 0, 64 * 1024, 1024,
+-			      SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+-			      SPI_NOR_4B_OPCODES) },
+ 	{ "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024,
+ 			      SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ 			      SPI_NOR_4B_OPCODES) },
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
+index 7846a21555ef8..1f6bc0c7e91dd 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
+@@ -535,6 +535,16 @@ mlxsw_sp_mr_route_evif_resolve(struct mlxsw_sp_mr_table *mr_table,
+ 	u16 erif_index = 0;
+ 	int err;
+ 
++	/* Add the eRIF */
++	if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) {
++		erif_index = mlxsw_sp_rif_index(rve->mr_vif->rif);
++		err = mr->mr_ops->route_erif_add(mlxsw_sp,
++						 rve->mr_route->route_priv,
++						 erif_index);
++		if (err)
++			return err;
++	}
++
+ 	/* Update the route action, as the new eVIF can be a tunnel or a pimreg
+ 	 * device which will require updating the action.
+ 	 */
+@@ -544,17 +554,7 @@ mlxsw_sp_mr_route_evif_resolve(struct mlxsw_sp_mr_table *mr_table,
+ 						      rve->mr_route->route_priv,
+ 						      route_action);
+ 		if (err)
+-			return err;
+-	}
+-
+-	/* Add the eRIF */
+-	if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) {
+-		erif_index = mlxsw_sp_rif_index(rve->mr_vif->rif);
+-		err = mr->mr_ops->route_erif_add(mlxsw_sp,
+-						 rve->mr_route->route_priv,
+-						 erif_index);
+-		if (err)
+-			goto err_route_erif_add;
++			goto err_route_action_update;
+ 	}
+ 
+ 	/* Update the minimum MTU */
+@@ -572,14 +572,14 @@ mlxsw_sp_mr_route_evif_resolve(struct mlxsw_sp_mr_table *mr_table,
+ 	return 0;
+ 
+ err_route_min_mtu_update:
+-	if (mlxsw_sp_mr_vif_valid(rve->mr_vif))
+-		mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv,
+-					   erif_index);
+-err_route_erif_add:
+ 	if (route_action != rve->mr_route->route_action)
+ 		mr->mr_ops->route_action_update(mlxsw_sp,
+ 						rve->mr_route->route_priv,
+ 						rve->mr_route->route_action);
++err_route_action_update:
++	if (mlxsw_sp_mr_vif_valid(rve->mr_vif))
++		mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv,
++					   erif_index);
+ 	return err;
+ }
+ 
+diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
+index 1bfeee283ea90..a3ca406a35611 100644
+--- a/drivers/net/ethernet/sfc/efx_channels.c
++++ b/drivers/net/ethernet/sfc/efx_channels.c
+@@ -914,6 +914,8 @@ int efx_set_channels(struct efx_nic *efx)
+ 			}
+ 		}
+ 	}
++	if (xdp_queue_number)
++		efx->xdp_tx_queue_count = xdp_queue_number;
+ 
+ 	rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
+ 	if (rc)
+diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
+index d75cf5ff5686a..49df02ecee912 100644
+--- a/drivers/net/ethernet/sfc/farch.c
++++ b/drivers/net/ethernet/sfc/farch.c
+@@ -835,14 +835,14 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
+ 		/* Transmit completion */
+ 		tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
+ 		tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
+-		tx_queue = efx_channel_get_tx_queue(
+-			channel, tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
++		tx_queue = channel->tx_queue +
++				(tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
+ 		efx_xmit_done(tx_queue, tx_ev_desc_ptr);
+ 	} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
+ 		/* Rewrite the FIFO write pointer */
+ 		tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
+-		tx_queue = efx_channel_get_tx_queue(
+-			channel, tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
++		tx_queue = channel->tx_queue +
++				(tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
+ 
+ 		netif_tx_lock(efx->net_dev);
+ 		efx_farch_notify_tx_desc(tx_queue);
+@@ -1081,16 +1081,16 @@ static void
+ efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
+ {
+ 	struct efx_tx_queue *tx_queue;
++	struct efx_channel *channel;
+ 	int qid;
+ 
+ 	qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
+ 	if (qid < EFX_MAX_TXQ_PER_CHANNEL * (efx->n_tx_channels + efx->n_extra_tx_channels)) {
+-		tx_queue = efx_get_tx_queue(efx, qid / EFX_MAX_TXQ_PER_CHANNEL,
+-					    qid % EFX_MAX_TXQ_PER_CHANNEL);
+-		if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
++		channel = efx_get_tx_channel(efx, qid / EFX_MAX_TXQ_PER_CHANNEL);
++		tx_queue = channel->tx_queue + (qid % EFX_MAX_TXQ_PER_CHANNEL);
++		if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0))
+ 			efx_farch_magic_event(tx_queue->channel,
+ 					      EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
+-		}
+ 	}
+ }
+ 
+diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
+index fe0287b22a254..e0c502bc42707 100644
+--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
++++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
+@@ -1513,7 +1513,7 @@ static int rsi_restore(struct device *dev)
+ }
+ static const struct dev_pm_ops rsi_pm_ops = {
+ 	.suspend = rsi_suspend,
+-	.resume = rsi_resume,
++	.resume_noirq = rsi_resume,
+ 	.freeze = rsi_freeze,
+ 	.thaw = rsi_thaw,
+ 	.restore = rsi_restore,
+diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
+index fe6b8aa90b534..81224447605b9 100644
+--- a/drivers/nvme/target/admin-cmd.c
++++ b/drivers/nvme/target/admin-cmd.c
+@@ -919,15 +919,21 @@ void nvmet_execute_async_event(struct nvmet_req *req)
+ void nvmet_execute_keep_alive(struct nvmet_req *req)
+ {
+ 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
++	u16 status = 0;
+ 
+ 	if (!nvmet_check_transfer_len(req, 0))
+ 		return;
+ 
++	if (!ctrl->kato) {
++		status = NVME_SC_KA_TIMEOUT_INVALID;
++		goto out;
++	}
++
+ 	pr_debug("ctrl %d update keep-alive timer for %d secs\n",
+ 		ctrl->cntlid, ctrl->kato);
+-
+ 	mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
+-	nvmet_req_complete(req, 0);
++out:
++	nvmet_req_complete(req, status);
+ }
+ 
+ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
+diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
+index 682854e0e079d..4845d12e374ac 100644
+--- a/drivers/nvme/target/discovery.c
++++ b/drivers/nvme/target/discovery.c
+@@ -178,12 +178,14 @@ static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
+ 	if (req->cmd->get_log_page.lid != NVME_LOG_DISC) {
+ 		req->error_loc =
+ 			offsetof(struct nvme_get_log_page_command, lid);
+-		status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
++		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ 		goto out;
+ 	}
+ 
+ 	/* Spec requires dword aligned offsets */
+ 	if (offset & 0x3) {
++		req->error_loc =
++			offsetof(struct nvme_get_log_page_command, lpo);
+ 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ 		goto out;
+ 	}
+@@ -250,7 +252,7 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req)
+ 
+ 	if (req->cmd->identify.cns != NVME_ID_CNS_CTRL) {
+ 		req->error_loc = offsetof(struct nvme_identify, cns);
+-		status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
++		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ 		goto out;
+ 	}
+ 
+diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
+index 1c25d83371510..8d028a88b375b 100644
+--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
++++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
+@@ -705,6 +705,8 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+ 		}
+ 	}
+ 
++	dw_pcie_iatu_detect(pci);
++
+ 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
+ 	if (!res)
+ 		return -EINVAL;
+diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
+index 7e55b2b661821..24192b40e3a2a 100644
+--- a/drivers/pci/controller/dwc/pcie-designware-host.c
++++ b/drivers/pci/controller/dwc/pcie-designware-host.c
+@@ -398,6 +398,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
+ 		if (ret)
+ 			goto err_free_msi;
+ 	}
++	dw_pcie_iatu_detect(pci);
+ 
+ 	dw_pcie_setup_rc(pp);
+ 	dw_pcie_msi_init(pp);
+diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
+index 004cb860e2667..a945f0c0e73dc 100644
+--- a/drivers/pci/controller/dwc/pcie-designware.c
++++ b/drivers/pci/controller/dwc/pcie-designware.c
+@@ -660,11 +660,9 @@ static void dw_pcie_iatu_detect_regions(struct dw_pcie *pci)
+ 	pci->num_ob_windows = ob;
+ }
+ 
+-void dw_pcie_setup(struct dw_pcie *pci)
++void dw_pcie_iatu_detect(struct dw_pcie *pci)
+ {
+-	u32 val;
+ 	struct device *dev = pci->dev;
+-	struct device_node *np = dev->of_node;
+ 	struct platform_device *pdev = to_platform_device(dev);
+ 
+ 	if (pci->version >= 0x480A || (!pci->version &&
+@@ -693,6 +691,13 @@ void dw_pcie_setup(struct dw_pcie *pci)
+ 
+ 	dev_info(pci->dev, "Detected iATU regions: %u outbound, %u inbound",
+ 		 pci->num_ob_windows, pci->num_ib_windows);
++}
++
++void dw_pcie_setup(struct dw_pcie *pci)
++{
++	u32 val;
++	struct device *dev = pci->dev;
++	struct device_node *np = dev->of_node;
+ 
+ 	if (pci->link_gen > 0)
+ 		dw_pcie_link_set_max_speed(pci, pci->link_gen);
+diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
+index 7247c8b01f048..7d6e9b7576be5 100644
+--- a/drivers/pci/controller/dwc/pcie-designware.h
++++ b/drivers/pci/controller/dwc/pcie-designware.h
+@@ -306,6 +306,7 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
+ void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
+ 			 enum dw_pcie_region_type type);
+ void dw_pcie_setup(struct dw_pcie *pci);
++void dw_pcie_iatu_detect(struct dw_pcie *pci);
+ 
+ static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)
+ {
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 16a17215f633d..e4d4e399004b4 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -1870,20 +1870,10 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
+ 	int err;
+ 	int i, bars = 0;
+ 
+-	/*
+-	 * Power state could be unknown at this point, either due to a fresh
+-	 * boot or a device removal call.  So get the current power state
+-	 * so that things like MSI message writing will behave as expected
+-	 * (e.g. if the device really is in D0 at enable time).
+-	 */
+-	if (dev->pm_cap) {
+-		u16 pmcsr;
+-		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
+-		dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
+-	}
+-
+-	if (atomic_inc_return(&dev->enable_cnt) > 1)
++	if (atomic_inc_return(&dev->enable_cnt) > 1) {
++		pci_update_current_state(dev, dev->current_state);
+ 		return 0;		/* already enabled */
++	}
+ 
+ 	bridge = pci_upstream_bridge(dev);
+ 	if (bridge)
+diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
+index 933bd8410fc2a..ef9676418c9f4 100644
+--- a/drivers/perf/arm_pmu_platform.c
++++ b/drivers/perf/arm_pmu_platform.c
+@@ -6,6 +6,7 @@
+  * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
+  */
+ #define pr_fmt(fmt) "hw perfevents: " fmt
++#define dev_fmt pr_fmt
+ 
+ #include <linux/bug.h>
+ #include <linux/cpumask.h>
+@@ -100,10 +101,8 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
+ 	struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
+ 
+ 	num_irqs = platform_irq_count(pdev);
+-	if (num_irqs < 0) {
+-		pr_err("unable to count PMU IRQs\n");
+-		return num_irqs;
+-	}
++	if (num_irqs < 0)
++		return dev_err_probe(&pdev->dev, num_irqs, "unable to count PMU IRQs\n");
+ 
+ 	/*
+ 	 * In this case we have no idea which CPUs are covered by the PMU.
+@@ -236,7 +235,7 @@ int arm_pmu_device_probe(struct platform_device *pdev,
+ 
+ 	ret = armpmu_register(pmu);
+ 	if (ret)
+-		goto out_free;
++		goto out_free_irqs;
+ 
+ 	return 0;
+ 
+diff --git a/drivers/phy/ti/phy-twl4030-usb.c b/drivers/phy/ti/phy-twl4030-usb.c
+index 9887f908f5401..812e5409d3595 100644
+--- a/drivers/phy/ti/phy-twl4030-usb.c
++++ b/drivers/phy/ti/phy-twl4030-usb.c
+@@ -779,7 +779,7 @@ static int twl4030_usb_remove(struct platform_device *pdev)
+ 
+ 	usb_remove_phy(&twl->phy);
+ 	pm_runtime_get_sync(twl->dev);
+-	cancel_delayed_work(&twl->id_workaround_work);
++	cancel_delayed_work_sync(&twl->id_workaround_work);
+ 	device_remove_file(twl->dev, &dev_attr_vbus);
+ 
+ 	/* set transceiver mode to power on defaults */
+diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
+index f2746125b0777..3de0f767b7d17 100644
+--- a/drivers/pinctrl/pinctrl-ingenic.c
++++ b/drivers/pinctrl/pinctrl-ingenic.c
+@@ -667,7 +667,9 @@ static int jz4770_pwm_pwm7_pins[] = { 0x6b, };
+ static int jz4770_mac_rmii_pins[] = {
+ 	0xa9, 0xab, 0xaa, 0xac, 0xa5, 0xa4, 0xad, 0xae, 0xa6, 0xa8,
+ };
+-static int jz4770_mac_mii_pins[] = { 0xa7, 0xaf, };
++static int jz4770_mac_mii_pins[] = {
++	0x7b, 0x7a, 0x7d, 0x7c, 0xa7, 0x24, 0xaf,
++};
+ 
+ static const struct group_desc jz4770_groups[] = {
+ 	INGENIC_PIN_GROUP("uart0-data", jz4770_uart0_data, 0),
+@@ -2107,26 +2109,48 @@ static int ingenic_pinconf_get(struct pinctrl_dev *pctldev,
+ 	enum pin_config_param param = pinconf_to_config_param(*config);
+ 	unsigned int idx = pin % PINS_PER_GPIO_CHIP;
+ 	unsigned int offt = pin / PINS_PER_GPIO_CHIP;
+-	bool pull;
++	unsigned int bias;
++	bool pull, pullup, pulldown;
+ 
+-	if (jzpc->info->version >= ID_JZ4770)
+-		pull = !ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_PEN);
+-	else
+-		pull = !ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_PULL_DIS);
++	if (jzpc->info->version >= ID_X1830) {
++		unsigned int half = PINS_PER_GPIO_CHIP / 2;
++		unsigned int idxh = (pin % half) * 2;
++
++		if (idx < half)
++			regmap_read(jzpc->map, offt * jzpc->info->reg_offset +
++					X1830_GPIO_PEL, &bias);
++		else
++			regmap_read(jzpc->map, offt * jzpc->info->reg_offset +
++					X1830_GPIO_PEH, &bias);
++
++		bias = (bias >> idxh) & (GPIO_PULL_UP | GPIO_PULL_DOWN);
++
++		pullup = (bias == GPIO_PULL_UP) && (jzpc->info->pull_ups[offt] & BIT(idx));
++		pulldown = (bias == GPIO_PULL_DOWN) && (jzpc->info->pull_downs[offt] & BIT(idx));
++
++	} else {
++		if (jzpc->info->version >= ID_JZ4770)
++			pull = !ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_PEN);
++		else
++			pull = !ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_PULL_DIS);
++
++		pullup = pull && (jzpc->info->pull_ups[offt] & BIT(idx));
++		pulldown = pull && (jzpc->info->pull_downs[offt] & BIT(idx));
++	}
+ 
+ 	switch (param) {
+ 	case PIN_CONFIG_BIAS_DISABLE:
+-		if (pull)
++		if (pullup || pulldown)
+ 			return -EINVAL;
+ 		break;
+ 
+ 	case PIN_CONFIG_BIAS_PULL_UP:
+-		if (!pull || !(jzpc->info->pull_ups[offt] & BIT(idx)))
++		if (!pullup)
+ 			return -EINVAL;
+ 		break;
+ 
+ 	case PIN_CONFIG_BIAS_PULL_DOWN:
+-		if (!pull || !(jzpc->info->pull_downs[offt] & BIT(idx)))
++		if (!pulldown)
+ 			return -EINVAL;
+ 		break;
+ 
+@@ -2144,7 +2168,7 @@ static void ingenic_set_bias(struct ingenic_pinctrl *jzpc,
+ 	if (jzpc->info->version >= ID_X1830) {
+ 		unsigned int idx = pin % PINS_PER_GPIO_CHIP;
+ 		unsigned int half = PINS_PER_GPIO_CHIP / 2;
+-		unsigned int idxh = pin % half * 2;
++		unsigned int idxh = (pin % half) * 2;
+ 		unsigned int offt = pin / PINS_PER_GPIO_CHIP;
+ 
+ 		if (idx < half) {
+diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
+index b5888aeb4bcff..260d49dca1ad1 100644
+--- a/drivers/platform/x86/intel_pmc_core.c
++++ b/drivers/platform/x86/intel_pmc_core.c
+@@ -1186,9 +1186,15 @@ static const struct pci_device_id pmc_pci_ids[] = {
+  * the platform BIOS enforces 24Mhz crystal to shutdown
+  * before PMC can assert SLP_S0#.
+  */
++static bool xtal_ignore;
+ static int quirk_xtal_ignore(const struct dmi_system_id *id)
+ {
+-	struct pmc_dev *pmcdev = &pmc;
++	xtal_ignore = true;
++	return 0;
++}
++
++static void pmc_core_xtal_ignore(struct pmc_dev *pmcdev)
++{
+ 	u32 value;
+ 
+ 	value = pmc_core_reg_read(pmcdev, pmcdev->map->pm_vric1_offset);
+@@ -1197,7 +1203,6 @@ static int quirk_xtal_ignore(const struct dmi_system_id *id)
+ 	/* Low Voltage Mode Enable */
+ 	value &= ~SPT_PMC_VRIC1_SLPS0LVEN;
+ 	pmc_core_reg_write(pmcdev, pmcdev->map->pm_vric1_offset, value);
+-	return 0;
+ }
+ 
+ static const struct dmi_system_id pmc_core_dmi_table[]  = {
+@@ -1212,6 +1217,14 @@ static const struct dmi_system_id pmc_core_dmi_table[]  = {
+ 	{}
+ };
+ 
++static void pmc_core_do_dmi_quirks(struct pmc_dev *pmcdev)
++{
++	dmi_check_system(pmc_core_dmi_table);
++
++	if (xtal_ignore)
++		pmc_core_xtal_ignore(pmcdev);
++}
++
+ static int pmc_core_probe(struct platform_device *pdev)
+ {
+ 	static bool device_initialized;
+@@ -1253,7 +1266,7 @@ static int pmc_core_probe(struct platform_device *pdev)
+ 	mutex_init(&pmcdev->lock);
+ 	platform_set_drvdata(pdev, pmcdev);
+ 	pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit();
+-	dmi_check_system(pmc_core_dmi_table);
++	pmc_core_do_dmi_quirks(pmcdev);
+ 
+ 	/*
+ 	 * On TGL, due to a hardware limitation, the GBE LTR blocks PC10 when
+diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c b/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c
+index a2a2d923e60cb..df1fc6c719f32 100644
+--- a/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c
++++ b/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c
+@@ -21,12 +21,16 @@
+ #define PUNIT_MAILBOX_BUSY_BIT		31
+ 
+ /*
+- * The average time to complete some commands is about 40us. The current
+- * count is enough to satisfy 40us. But when the firmware is very busy, this
+- * causes timeout occasionally.  So increase to deal with some worst case
+- * scenarios. Most of the command still complete in few us.
++ * The average time to complete mailbox commands is less than 40us. Most of
++ * the commands complete in few micro seconds. But the same firmware handles
++ * requests from all power management features.
++ * We can create a scenario where we flood the firmware with requests then
++ * the mailbox response can be delayed for 100s of micro seconds. So define
++ * two timeouts. One for average case and one for long.
++ * If the firmware is taking more than average, just call cond_resched().
+  */
+-#define OS_MAILBOX_RETRY_COUNT		100
++#define OS_MAILBOX_TIMEOUT_AVG_US	40
++#define OS_MAILBOX_TIMEOUT_MAX_US	1000
+ 
+ struct isst_if_device {
+ 	struct mutex mutex;
+@@ -35,11 +39,13 @@ struct isst_if_device {
+ static int isst_if_mbox_cmd(struct pci_dev *pdev,
+ 			    struct isst_if_mbox_cmd *mbox_cmd)
+ {
+-	u32 retries, data;
++	s64 tm_delta = 0;
++	ktime_t tm;
++	u32 data;
+ 	int ret;
+ 
+ 	/* Poll for rb bit == 0 */
+-	retries = OS_MAILBOX_RETRY_COUNT;
++	tm = ktime_get();
+ 	do {
+ 		ret = pci_read_config_dword(pdev, PUNIT_MAILBOX_INTERFACE,
+ 					    &data);
+@@ -48,11 +54,14 @@ static int isst_if_mbox_cmd(struct pci_dev *pdev,
+ 
+ 		if (data & BIT_ULL(PUNIT_MAILBOX_BUSY_BIT)) {
+ 			ret = -EBUSY;
++			tm_delta = ktime_us_delta(ktime_get(), tm);
++			if (tm_delta > OS_MAILBOX_TIMEOUT_AVG_US)
++				cond_resched();
+ 			continue;
+ 		}
+ 		ret = 0;
+ 		break;
+-	} while (--retries);
++	} while (tm_delta < OS_MAILBOX_TIMEOUT_MAX_US);
+ 
+ 	if (ret)
+ 		return ret;
+@@ -74,7 +83,8 @@ static int isst_if_mbox_cmd(struct pci_dev *pdev,
+ 		return ret;
+ 
+ 	/* Poll for rb bit == 0 */
+-	retries = OS_MAILBOX_RETRY_COUNT;
++	tm_delta = 0;
++	tm = ktime_get();
+ 	do {
+ 		ret = pci_read_config_dword(pdev, PUNIT_MAILBOX_INTERFACE,
+ 					    &data);
+@@ -83,6 +93,9 @@ static int isst_if_mbox_cmd(struct pci_dev *pdev,
+ 
+ 		if (data & BIT_ULL(PUNIT_MAILBOX_BUSY_BIT)) {
+ 			ret = -EBUSY;
++			tm_delta = ktime_us_delta(ktime_get(), tm);
++			if (tm_delta > OS_MAILBOX_TIMEOUT_AVG_US)
++				cond_resched();
+ 			continue;
+ 		}
+ 
+@@ -96,7 +109,7 @@ static int isst_if_mbox_cmd(struct pci_dev *pdev,
+ 		mbox_cmd->resp_data = data;
+ 		ret = 0;
+ 		break;
+-	} while (--retries);
++	} while (tm_delta < OS_MAILBOX_TIMEOUT_MAX_US);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
+index 4c4a7b1c64c57..0262109ac285e 100644
+--- a/drivers/power/supply/bq27xxx_battery.c
++++ b/drivers/power/supply/bq27xxx_battery.c
+@@ -1661,27 +1661,6 @@ static int bq27xxx_battery_read_time(struct bq27xxx_device_info *di, u8 reg)
+ 	return tval * 60;
+ }
+ 
+-/*
+- * Read an average power register.
+- * Return < 0 if something fails.
+- */
+-static int bq27xxx_battery_read_pwr_avg(struct bq27xxx_device_info *di)
+-{
+-	int tval;
+-
+-	tval = bq27xxx_read(di, BQ27XXX_REG_AP, false);
+-	if (tval < 0) {
+-		dev_err(di->dev, "error reading average power register  %02x: %d\n",
+-			BQ27XXX_REG_AP, tval);
+-		return tval;
+-	}
+-
+-	if (di->opts & BQ27XXX_O_ZERO)
+-		return (tval * BQ27XXX_POWER_CONSTANT) / BQ27XXX_RS;
+-	else
+-		return tval;
+-}
+-
+ /*
+  * Returns true if a battery over temperature condition is detected
+  */
+@@ -1769,8 +1748,6 @@ void bq27xxx_battery_update(struct bq27xxx_device_info *di)
+ 		}
+ 		if (di->regs[BQ27XXX_REG_CYCT] != INVALID_REG_ADDR)
+ 			cache.cycle_count = bq27xxx_battery_read_cyct(di);
+-		if (di->regs[BQ27XXX_REG_AP] != INVALID_REG_ADDR)
+-			cache.power_avg = bq27xxx_battery_read_pwr_avg(di);
+ 
+ 		/* We only have to read charge design full once */
+ 		if (di->charge_design_full <= 0)
+@@ -1833,6 +1810,32 @@ static int bq27xxx_battery_current(struct bq27xxx_device_info *di,
+ 	return 0;
+ }
+ 
++/*
++ * Get the average power in µW
++ * Return < 0 if something fails.
++ */
++static int bq27xxx_battery_pwr_avg(struct bq27xxx_device_info *di,
++				   union power_supply_propval *val)
++{
++	int power;
++
++	power = bq27xxx_read(di, BQ27XXX_REG_AP, false);
++	if (power < 0) {
++		dev_err(di->dev,
++			"error reading average power register %02x: %d\n",
++			BQ27XXX_REG_AP, power);
++		return power;
++	}
++
++	if (di->opts & BQ27XXX_O_ZERO)
++		val->intval = (power * BQ27XXX_POWER_CONSTANT) / BQ27XXX_RS;
++	else
++		/* Other gauges return a signed value in units of 10mW */
++		val->intval = (int)((s16)power) * 10000;
++
++	return 0;
++}
++
+ static int bq27xxx_battery_status(struct bq27xxx_device_info *di,
+ 				  union power_supply_propval *val)
+ {
+@@ -2020,7 +2023,7 @@ static int bq27xxx_battery_get_property(struct power_supply *psy,
+ 		ret = bq27xxx_simple_value(di->cache.energy, val);
+ 		break;
+ 	case POWER_SUPPLY_PROP_POWER_AVG:
+-		ret = bq27xxx_simple_value(di->cache.power_avg, val);
++		ret = bq27xxx_battery_pwr_avg(di, val);
+ 		break;
+ 	case POWER_SUPPLY_PROP_HEALTH:
+ 		ret = bq27xxx_simple_value(di->cache.health, val);
+diff --git a/drivers/power/supply/cpcap-battery.c b/drivers/power/supply/cpcap-battery.c
+index 6d5bcdb9f45d6..a3fc0084cda00 100644
+--- a/drivers/power/supply/cpcap-battery.c
++++ b/drivers/power/supply/cpcap-battery.c
+@@ -786,7 +786,7 @@ static irqreturn_t cpcap_battery_irq_thread(int irq, void *data)
+ 			break;
+ 	}
+ 
+-	if (!d)
++	if (list_entry_is_head(d, &ddata->irq_list, node))
+ 		return IRQ_NONE;
+ 
+ 	latest = cpcap_battery_latest(ddata);
+diff --git a/drivers/power/supply/cpcap-charger.c b/drivers/power/supply/cpcap-charger.c
+index 641dcad1133f7..2a8915c3e73ef 100644
+--- a/drivers/power/supply/cpcap-charger.c
++++ b/drivers/power/supply/cpcap-charger.c
+@@ -318,7 +318,7 @@ static int cpcap_charger_current_to_regval(int microamp)
+ 		return CPCAP_REG_CRM_ICHRG(0x0);
+ 	if (miliamp < 177)
+ 		return CPCAP_REG_CRM_ICHRG(0x1);
+-	if (miliamp > 1596)
++	if (miliamp >= 1596)
+ 		return CPCAP_REG_CRM_ICHRG(0xe);
+ 
+ 	res = microamp / 88666;
+@@ -668,6 +668,9 @@ static void cpcap_usb_detect(struct work_struct *work)
+ 		return;
+ 	}
+ 
++	/* Delay for 80ms to avoid vbus bouncing when usb cable is plugged in */
++	usleep_range(80000, 120000);
++
+ 	/* Throttle chrgcurr2 interrupt for charger done and retry */
+ 	switch (ddata->status) {
+ 	case POWER_SUPPLY_STATUS_CHARGING:
+diff --git a/drivers/power/supply/generic-adc-battery.c b/drivers/power/supply/generic-adc-battery.c
+index 0032069fbc2bb..66039c665dd1e 100644
+--- a/drivers/power/supply/generic-adc-battery.c
++++ b/drivers/power/supply/generic-adc-battery.c
+@@ -373,7 +373,7 @@ static int gab_remove(struct platform_device *pdev)
+ 	}
+ 
+ 	kfree(adc_bat->psy_desc.properties);
+-	cancel_delayed_work(&adc_bat->bat_work);
++	cancel_delayed_work_sync(&adc_bat->bat_work);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/power/supply/lp8788-charger.c b/drivers/power/supply/lp8788-charger.c
+index e7931ffb7151d..397e5a03b7d9a 100644
+--- a/drivers/power/supply/lp8788-charger.c
++++ b/drivers/power/supply/lp8788-charger.c
+@@ -501,7 +501,7 @@ static int lp8788_set_irqs(struct platform_device *pdev,
+ 
+ 		ret = request_threaded_irq(virq, NULL,
+ 					lp8788_charger_irq_thread,
+-					0, name, pchg);
++					IRQF_ONESHOT, name, pchg);
+ 		if (ret)
+ 			break;
+ 	}
+diff --git a/drivers/power/supply/pm2301_charger.c b/drivers/power/supply/pm2301_charger.c
+index ac06ecf7fc9ca..a3bfb9612b174 100644
+--- a/drivers/power/supply/pm2301_charger.c
++++ b/drivers/power/supply/pm2301_charger.c
+@@ -1089,7 +1089,7 @@ static int pm2xxx_wall_charger_probe(struct i2c_client *i2c_client,
+ 	ret = request_threaded_irq(gpio_to_irq(pm2->pdata->gpio_irq_number),
+ 				NULL,
+ 				pm2xxx_charger_irq[0].isr,
+-				pm2->pdata->irq_type,
++				pm2->pdata->irq_type | IRQF_ONESHOT,
+ 				pm2xxx_charger_irq[0].name, pm2);
+ 
+ 	if (ret != 0) {
+diff --git a/drivers/power/supply/s3c_adc_battery.c b/drivers/power/supply/s3c_adc_battery.c
+index a2addc24ee8b8..3e3a598f114d1 100644
+--- a/drivers/power/supply/s3c_adc_battery.c
++++ b/drivers/power/supply/s3c_adc_battery.c
+@@ -395,7 +395,7 @@ static int s3c_adc_bat_remove(struct platform_device *pdev)
+ 	if (main_bat.charge_finished)
+ 		free_irq(gpiod_to_irq(main_bat.charge_finished), NULL);
+ 
+-	cancel_delayed_work(&bat_work);
++	cancel_delayed_work_sync(&bat_work);
+ 
+ 	if (pdata->exit)
+ 		pdata->exit();
+diff --git a/drivers/power/supply/tps65090-charger.c b/drivers/power/supply/tps65090-charger.c
+index 6b0098e5a88b5..0990b2fa6cd8d 100644
+--- a/drivers/power/supply/tps65090-charger.c
++++ b/drivers/power/supply/tps65090-charger.c
+@@ -301,7 +301,7 @@ static int tps65090_charger_probe(struct platform_device *pdev)
+ 
+ 	if (irq != -ENXIO) {
+ 		ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+-			tps65090_charger_isr, 0, "tps65090-charger", cdata);
++			tps65090_charger_isr, IRQF_ONESHOT, "tps65090-charger", cdata);
+ 		if (ret) {
+ 			dev_err(cdata->dev,
+ 				"Unable to register irq %d err %d\n", irq,
+diff --git a/drivers/power/supply/tps65217_charger.c b/drivers/power/supply/tps65217_charger.c
+index 814c2b81fdfec..ba33d1617e0b6 100644
+--- a/drivers/power/supply/tps65217_charger.c
++++ b/drivers/power/supply/tps65217_charger.c
+@@ -238,7 +238,7 @@ static int tps65217_charger_probe(struct platform_device *pdev)
+ 	for (i = 0; i < NUM_CHARGER_IRQS; i++) {
+ 		ret = devm_request_threaded_irq(&pdev->dev, irq[i], NULL,
+ 						tps65217_charger_irq,
+-						0, "tps65217-charger",
++						IRQF_ONESHOT, "tps65217-charger",
+ 						charger);
+ 		if (ret) {
+ 			dev_err(charger->dev,
+diff --git a/drivers/regulator/da9121-regulator.c b/drivers/regulator/da9121-regulator.c
+index a2ede7d7897eb..08cbf688e14d3 100644
+--- a/drivers/regulator/da9121-regulator.c
++++ b/drivers/regulator/da9121-regulator.c
+@@ -40,6 +40,7 @@ struct da9121 {
+ 	unsigned int passive_delay;
+ 	int chip_irq;
+ 	int variant_id;
++	int subvariant_id;
+ };
+ 
+ /* Define ranges for different variants, enabling translation to/from
+@@ -812,7 +813,6 @@ static struct regmap_config da9121_2ch_regmap_config = {
+ static int da9121_check_device_type(struct i2c_client *i2c, struct da9121 *chip)
+ {
+ 	u32 device_id;
+-	u8 chip_id = chip->variant_id;
+ 	u32 variant_id;
+ 	u8 variant_mrc, variant_vrc;
+ 	char *type;
+@@ -839,22 +839,34 @@ static int da9121_check_device_type(struct i2c_client *i2c, struct da9121 *chip)
+ 
+ 	variant_vrc = variant_id & DA9121_MASK_OTP_VARIANT_ID_VRC;
+ 
+-	switch (variant_vrc) {
+-	case DA9121_VARIANT_VRC:
+-		type = "DA9121/DA9130";
+-		config_match = (chip_id == DA9121_TYPE_DA9121_DA9130);
++	switch (chip->subvariant_id) {
++	case DA9121_SUBTYPE_DA9121:
++		type = "DA9121";
++		config_match = (variant_vrc == DA9121_VARIANT_VRC);
+ 		break;
+-	case DA9220_VARIANT_VRC:
+-		type = "DA9220/DA9132";
+-		config_match = (chip_id == DA9121_TYPE_DA9220_DA9132);
++	case DA9121_SUBTYPE_DA9130:
++		type = "DA9130";
++		config_match = (variant_vrc == DA9130_VARIANT_VRC);
+ 		break;
+-	case DA9122_VARIANT_VRC:
+-		type = "DA9122/DA9131";
+-		config_match = (chip_id == DA9121_TYPE_DA9122_DA9131);
++	case DA9121_SUBTYPE_DA9220:
++		type = "DA9220";
++		config_match = (variant_vrc == DA9220_VARIANT_VRC);
+ 		break;
+-	case DA9217_VARIANT_VRC:
++	case DA9121_SUBTYPE_DA9132:
++		type = "DA9132";
++		config_match = (variant_vrc == DA9132_VARIANT_VRC);
++		break;
++	case DA9121_SUBTYPE_DA9122:
++		type = "DA9122";
++		config_match = (variant_vrc == DA9122_VARIANT_VRC);
++		break;
++	case DA9121_SUBTYPE_DA9131:
++		type = "DA9131";
++		config_match = (variant_vrc == DA9131_VARIANT_VRC);
++		break;
++	case DA9121_SUBTYPE_DA9217:
+ 		type = "DA9217";
+-		config_match = (chip_id == DA9121_TYPE_DA9217);
++		config_match = (variant_vrc == DA9217_VARIANT_VRC);
+ 		break;
+ 	default:
+ 		type = "Unknown";
+@@ -892,15 +904,27 @@ static int da9121_assign_chip_model(struct i2c_client *i2c,
+ 
+ 	chip->dev = &i2c->dev;
+ 
+-	switch (chip->variant_id) {
+-	case DA9121_TYPE_DA9121_DA9130:
+-		fallthrough;
+-	case DA9121_TYPE_DA9217:
++	/* Use configured subtype to select the regulator descriptor index and
++	 * register map, common to both consumer and automotive grade variants
++	 */
++	switch (chip->subvariant_id) {
++	case DA9121_SUBTYPE_DA9121:
++	case DA9121_SUBTYPE_DA9130:
++		chip->variant_id = DA9121_TYPE_DA9121_DA9130;
+ 		regmap = &da9121_1ch_regmap_config;
+ 		break;
+-	case DA9121_TYPE_DA9122_DA9131:
+-		fallthrough;
+-	case DA9121_TYPE_DA9220_DA9132:
++	case DA9121_SUBTYPE_DA9217:
++		chip->variant_id = DA9121_TYPE_DA9217;
++		regmap = &da9121_1ch_regmap_config;
++		break;
++	case DA9121_SUBTYPE_DA9122:
++	case DA9121_SUBTYPE_DA9131:
++		chip->variant_id = DA9121_TYPE_DA9122_DA9131;
++		regmap = &da9121_2ch_regmap_config;
++		break;
++	case DA9121_SUBTYPE_DA9220:
++	case DA9121_SUBTYPE_DA9132:
++		chip->variant_id = DA9121_TYPE_DA9220_DA9132;
+ 		regmap = &da9121_2ch_regmap_config;
+ 		break;
+ 	}
+@@ -975,13 +999,13 @@ regmap_error:
+ }
+ 
+ static const struct of_device_id da9121_dt_ids[] = {
+-	{ .compatible = "dlg,da9121", .data = (void *) DA9121_TYPE_DA9121_DA9130 },
+-	{ .compatible = "dlg,da9130", .data = (void *) DA9121_TYPE_DA9121_DA9130 },
+-	{ .compatible = "dlg,da9217", .data = (void *) DA9121_TYPE_DA9217 },
+-	{ .compatible = "dlg,da9122", .data = (void *) DA9121_TYPE_DA9122_DA9131 },
+-	{ .compatible = "dlg,da9131", .data = (void *) DA9121_TYPE_DA9122_DA9131 },
+-	{ .compatible = "dlg,da9220", .data = (void *) DA9121_TYPE_DA9220_DA9132 },
+-	{ .compatible = "dlg,da9132", .data = (void *) DA9121_TYPE_DA9220_DA9132 },
++	{ .compatible = "dlg,da9121", .data = (void *) DA9121_SUBTYPE_DA9121 },
++	{ .compatible = "dlg,da9130", .data = (void *) DA9121_SUBTYPE_DA9130 },
++	{ .compatible = "dlg,da9217", .data = (void *) DA9121_SUBTYPE_DA9217 },
++	{ .compatible = "dlg,da9122", .data = (void *) DA9121_SUBTYPE_DA9122 },
++	{ .compatible = "dlg,da9131", .data = (void *) DA9121_SUBTYPE_DA9131 },
++	{ .compatible = "dlg,da9220", .data = (void *) DA9121_SUBTYPE_DA9220 },
++	{ .compatible = "dlg,da9132", .data = (void *) DA9121_SUBTYPE_DA9132 },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(of, da9121_dt_ids);
+@@ -1011,7 +1035,7 @@ static int da9121_i2c_probe(struct i2c_client *i2c,
+ 	}
+ 
+ 	chip->pdata = i2c->dev.platform_data;
+-	chip->variant_id = da9121_of_get_id(&i2c->dev);
++	chip->subvariant_id = da9121_of_get_id(&i2c->dev);
+ 
+ 	ret = da9121_assign_chip_model(i2c, chip);
+ 	if (ret < 0)
+diff --git a/drivers/regulator/da9121-regulator.h b/drivers/regulator/da9121-regulator.h
+index 3c34cb889ca87..357f416e17c1d 100644
+--- a/drivers/regulator/da9121-regulator.h
++++ b/drivers/regulator/da9121-regulator.h
+@@ -29,6 +29,16 @@ enum da9121_variant {
+ 	DA9121_TYPE_DA9217
+ };
+ 
++enum da9121_subvariant {
++	DA9121_SUBTYPE_DA9121,
++	DA9121_SUBTYPE_DA9130,
++	DA9121_SUBTYPE_DA9220,
++	DA9121_SUBTYPE_DA9132,
++	DA9121_SUBTYPE_DA9122,
++	DA9121_SUBTYPE_DA9131,
++	DA9121_SUBTYPE_DA9217
++};
++
+ /* Minimum, maximum and default polling millisecond periods are provided
+  * here as an example. It is expected that any final implementation will
+  * include a modification of these settings to match the required
+@@ -279,6 +289,9 @@ enum da9121_variant {
+ #define DA9220_VARIANT_VRC	0x0
+ #define DA9122_VARIANT_VRC	0x2
+ #define DA9217_VARIANT_VRC	0x7
++#define DA9130_VARIANT_VRC	0x0
++#define DA9131_VARIANT_VRC	0x1
++#define DA9132_VARIANT_VRC	0x2
+ 
+ /* DA9121_REG_OTP_CUSTOMER_ID */
+ 
+diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
+index 3f026021e95e4..84f659cafe766 100644
+--- a/drivers/s390/cio/device.c
++++ b/drivers/s390/cio/device.c
+@@ -1532,8 +1532,7 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
+ 	switch (action) {
+ 	case IO_SCH_ORPH_UNREG:
+ 	case IO_SCH_UNREG:
+-		if (!cdev)
+-			css_sch_device_unregister(sch);
++		css_sch_device_unregister(sch);
+ 		break;
+ 	case IO_SCH_ORPH_ATTACH:
+ 	case IO_SCH_UNREG_ATTACH:
+diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
+index 34bf2f197c714..0e0044d708440 100644
+--- a/drivers/s390/cio/qdio.h
++++ b/drivers/s390/cio/qdio.h
+@@ -181,12 +181,6 @@ struct qdio_input_q {
+ struct qdio_output_q {
+ 	/* PCIs are enabled for the queue */
+ 	int pci_out_enabled;
+-	/* cq: use asynchronous output buffers */
+-	int use_cq;
+-	/* cq: aobs used for particual SBAL */
+-	struct qaob **aobs;
+-	/* cq: sbal state related to asynchronous operation */
+-	struct qdio_outbuf_state *sbal_state;
+ 	/* timer to check for more outbound work */
+ 	struct timer_list timer;
+ 	/* tasklet to check for completions */
+@@ -379,12 +373,8 @@ int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data);
+ void qdio_shutdown_irq(struct qdio_irq *irq);
+ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr);
+ void qdio_free_queues(struct qdio_irq *irq_ptr);
+-void qdio_free_async_data(struct qdio_irq *irq_ptr);
+ int qdio_setup_init(void);
+ void qdio_setup_exit(void);
+-int qdio_enable_async_operation(struct qdio_output_q *q);
+-void qdio_disable_async_operation(struct qdio_output_q *q);
+-struct qaob *qdio_allocate_aob(void);
+ 
+ int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
+ 			unsigned char *state);
+diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
+index 03a0116199089..307ce7ff5ca44 100644
+--- a/drivers/s390/cio/qdio_main.c
++++ b/drivers/s390/cio/qdio_main.c
+@@ -517,24 +517,6 @@ static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
+ 	return 1;
+ }
+ 
+-static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
+-					int bufnr)
+-{
+-	unsigned long phys_aob = 0;
+-
+-	if (!q->aobs[bufnr]) {
+-		struct qaob *aob = qdio_allocate_aob();
+-		q->aobs[bufnr] = aob;
+-	}
+-	if (q->aobs[bufnr]) {
+-		q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
+-		phys_aob = virt_to_phys(q->aobs[bufnr]);
+-		WARN_ON_ONCE(phys_aob & 0xFF);
+-	}
+-
+-	return phys_aob;
+-}
+-
+ static inline int qdio_tasklet_schedule(struct qdio_q *q)
+ {
+ 	if (likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) {
+@@ -548,7 +530,6 @@ static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start,
+ 					unsigned int *error)
+ {
+ 	unsigned char state = 0;
+-	unsigned int i;
+ 	int count;
+ 
+ 	q->timestamp = get_tod_clock_fast();
+@@ -570,10 +551,6 @@ static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start,
+ 
+ 	switch (state) {
+ 	case SLSB_P_OUTPUT_PENDING:
+-		/* detach the utilized QAOBs: */
+-		for (i = 0; i < count; i++)
+-			q->u.out.aobs[QDIO_BUFNR(start + i)] = NULL;
+-
+ 		*error = QDIO_ERROR_SLSB_PENDING;
+ 		fallthrough;
+ 	case SLSB_P_OUTPUT_EMPTY:
+@@ -999,7 +976,6 @@ int qdio_free(struct ccw_device *cdev)
+ 	cdev->private->qdio_data = NULL;
+ 	mutex_unlock(&irq_ptr->setup_mutex);
+ 
+-	qdio_free_async_data(irq_ptr);
+ 	qdio_free_queues(irq_ptr);
+ 	free_page((unsigned long) irq_ptr->qdr);
+ 	free_page(irq_ptr->chsc_page);
+@@ -1075,28 +1051,6 @@ err_dbf:
+ }
+ EXPORT_SYMBOL_GPL(qdio_allocate);
+ 
+-static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
+-{
+-	struct qdio_q *q = irq_ptr->input_qs[0];
+-	int i, use_cq = 0;
+-
+-	if (irq_ptr->nr_input_qs > 1 && queue_type(q) == QDIO_IQDIO_QFMT)
+-		use_cq = 1;
+-
+-	for_each_output_queue(irq_ptr, q, i) {
+-		if (use_cq) {
+-			if (multicast_outbound(q))
+-				continue;
+-			if (qdio_enable_async_operation(&q->u.out) < 0) {
+-				use_cq = 0;
+-				continue;
+-			}
+-		} else
+-			qdio_disable_async_operation(&q->u.out);
+-	}
+-	DBF_EVENT("use_cq:%d", use_cq);
+-}
+-
+ static void qdio_trace_init_data(struct qdio_irq *irq,
+ 				 struct qdio_initialize *data)
+ {
+@@ -1191,8 +1145,6 @@ int qdio_establish(struct ccw_device *cdev,
+ 
+ 	qdio_setup_ssqd_info(irq_ptr);
+ 
+-	qdio_detect_hsicq(irq_ptr);
+-
+ 	/* qebsm is now setup if available, initialize buffer states */
+ 	qdio_init_buf_states(irq_ptr);
+ 
+@@ -1297,9 +1249,11 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags,
+  * @callflags: flags
+  * @bufnr: first buffer to process
+  * @count: how many buffers are filled
++ * @aob: asynchronous operation block
+  */
+ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
+-			   unsigned int bufnr, unsigned int count)
++			   unsigned int bufnr, unsigned int count,
++			   struct qaob *aob)
+ {
+ 	const unsigned int scan_threshold = q->irq_ptr->scan_threshold;
+ 	unsigned char state = 0;
+@@ -1320,11 +1274,9 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
+ 		q->u.out.pci_out_enabled = 0;
+ 
+ 	if (queue_type(q) == QDIO_IQDIO_QFMT) {
+-		unsigned long phys_aob = 0;
+-
+-		if (q->u.out.use_cq && count == 1)
+-			phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
++		unsigned long phys_aob = aob ? virt_to_phys(aob) : 0;
+ 
++		WARN_ON_ONCE(!IS_ALIGNED(phys_aob, 256));
+ 		rc = qdio_kick_outbound_q(q, count, phys_aob);
+ 	} else if (need_siga_sync(q)) {
+ 		rc = qdio_siga_sync_q(q);
+@@ -1359,9 +1311,10 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
+  * @q_nr: queue number
+  * @bufnr: buffer number
+  * @count: how many buffers to process
++ * @aob: asynchronous operation block (outbound only)
+  */
+ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
+-	    int q_nr, unsigned int bufnr, unsigned int count)
++	    int q_nr, unsigned int bufnr, unsigned int count, struct qaob *aob)
+ {
+ 	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+ 
+@@ -1383,7 +1336,7 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
+ 				      callflags, bufnr, count);
+ 	else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
+ 		return handle_outbound(irq_ptr->output_qs[q_nr],
+-				       callflags, bufnr, count);
++				       callflags, bufnr, count, aob);
+ 	return -EINVAL;
+ }
+ EXPORT_SYMBOL_GPL(do_QDIO);
+diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
+index c8b9620bc6883..da67e49794023 100644
+--- a/drivers/s390/cio/qdio_setup.c
++++ b/drivers/s390/cio/qdio_setup.c
+@@ -30,6 +30,7 @@ struct qaob *qdio_allocate_aob(void)
+ {
+ 	return kmem_cache_zalloc(qdio_aob_cache, GFP_ATOMIC);
+ }
++EXPORT_SYMBOL_GPL(qdio_allocate_aob);
+ 
+ void qdio_release_aob(struct qaob *aob)
+ {
+@@ -247,8 +248,6 @@ static void setup_queues(struct qdio_irq *irq_ptr,
+ 			 struct qdio_initialize *qdio_init)
+ {
+ 	struct qdio_q *q;
+-	struct qdio_outbuf_state *output_sbal_state_array =
+-				  qdio_init->output_sbal_state_array;
+ 	int i;
+ 
+ 	for_each_input_queue(irq_ptr, q, i) {
+@@ -265,9 +264,6 @@ static void setup_queues(struct qdio_irq *irq_ptr,
+ 		DBF_EVENT("outq:%1d", i);
+ 		setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i);
+ 
+-		q->u.out.sbal_state = output_sbal_state_array;
+-		output_sbal_state_array += QDIO_MAX_BUFFERS_PER_Q;
+-
+ 		q->is_input_q = 0;
+ 		setup_storage_lists(q, irq_ptr,
+ 				    qdio_init->output_sbal_addr_array[i], i);
+@@ -372,30 +368,6 @@ void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr)
+ 	DBF_EVENT("3:%4x qib:%4x", irq_ptr->ssqd_desc.qdioac3, irq_ptr->qib.ac);
+ }
+ 
+-void qdio_free_async_data(struct qdio_irq *irq_ptr)
+-{
+-	struct qdio_q *q;
+-	int i;
+-
+-	for (i = 0; i < irq_ptr->max_output_qs; i++) {
+-		q = irq_ptr->output_qs[i];
+-		if (q->u.out.use_cq) {
+-			unsigned int n;
+-
+-			for (n = 0; n < QDIO_MAX_BUFFERS_PER_Q; n++) {
+-				struct qaob *aob = q->u.out.aobs[n];
+-
+-				if (aob) {
+-					qdio_release_aob(aob);
+-					q->u.out.aobs[n] = NULL;
+-				}
+-			}
+-
+-			qdio_disable_async_operation(&q->u.out);
+-		}
+-	}
+-}
+-
+ static void qdio_fill_qdr_desc(struct qdesfmt0 *desc, struct qdio_q *queue)
+ {
+ 	desc->sliba = virt_to_phys(queue->slib);
+@@ -545,25 +517,6 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr)
+ 	printk(KERN_INFO "%s", s);
+ }
+ 
+-int qdio_enable_async_operation(struct qdio_output_q *outq)
+-{
+-	outq->aobs = kcalloc(QDIO_MAX_BUFFERS_PER_Q, sizeof(struct qaob *),
+-			     GFP_KERNEL);
+-	if (!outq->aobs) {
+-		outq->use_cq = 0;
+-		return -ENOMEM;
+-	}
+-	outq->use_cq = 1;
+-	return 0;
+-}
+-
+-void qdio_disable_async_operation(struct qdio_output_q *q)
+-{
+-	kfree(q->aobs);
+-	q->aobs = NULL;
+-	q->use_cq = 0;
+-}
+-
+ int __init qdio_setup_init(void)
+ {
+ 	int rc;
+diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
+index 1ffdd411201cd..6946a7e26eff7 100644
+--- a/drivers/s390/crypto/vfio_ap_ops.c
++++ b/drivers/s390/crypto/vfio_ap_ops.c
+@@ -294,6 +294,19 @@ static int handle_pqap(struct kvm_vcpu *vcpu)
+ 	matrix_mdev = container_of(vcpu->kvm->arch.crypto.pqap_hook,
+ 				   struct ap_matrix_mdev, pqap_hook);
+ 
++	/*
++	 * If the KVM pointer is in the process of being set, wait until the
++	 * process has completed.
++	 */
++	wait_event_cmd(matrix_mdev->wait_for_kvm,
++		       !matrix_mdev->kvm_busy,
++		       mutex_unlock(&matrix_dev->lock),
++		       mutex_lock(&matrix_dev->lock));
++
++	/* If the there is no guest using the mdev, there is nothing to do */
++	if (!matrix_mdev->kvm)
++		goto out_unlock;
++
+ 	q = vfio_ap_get_queue(matrix_mdev, apqn);
+ 	if (!q)
+ 		goto out_unlock;
+@@ -337,6 +350,7 @@ static int vfio_ap_mdev_create(struct kobject *kobj, struct mdev_device *mdev)
+ 
+ 	matrix_mdev->mdev = mdev;
+ 	vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix);
++	init_waitqueue_head(&matrix_mdev->wait_for_kvm);
+ 	mdev_set_drvdata(mdev, matrix_mdev);
+ 	matrix_mdev->pqap_hook.hook = handle_pqap;
+ 	matrix_mdev->pqap_hook.owner = THIS_MODULE;
+@@ -351,17 +365,23 @@ static int vfio_ap_mdev_remove(struct mdev_device *mdev)
+ {
+ 	struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ 
+-	if (matrix_mdev->kvm)
++	mutex_lock(&matrix_dev->lock);
++
++	/*
++	 * If the KVM pointer is in flux or the guest is running, disallow
++	 * un-assignment of control domain.
++	 */
++	if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
++		mutex_unlock(&matrix_dev->lock);
+ 		return -EBUSY;
++	}
+ 
+-	mutex_lock(&matrix_dev->lock);
+ 	vfio_ap_mdev_reset_queues(mdev);
+ 	list_del(&matrix_mdev->node);
+-	mutex_unlock(&matrix_dev->lock);
+-
+ 	kfree(matrix_mdev);
+ 	mdev_set_drvdata(mdev, NULL);
+ 	atomic_inc(&matrix_dev->available_instances);
++	mutex_unlock(&matrix_dev->lock);
+ 
+ 	return 0;
+ }
+@@ -606,24 +626,31 @@ static ssize_t assign_adapter_store(struct device *dev,
+ 	struct mdev_device *mdev = mdev_from_dev(dev);
+ 	struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ 
+-	/* If the guest is running, disallow assignment of adapter */
+-	if (matrix_mdev->kvm)
+-		return -EBUSY;
++	mutex_lock(&matrix_dev->lock);
++
++	/*
++	 * If the KVM pointer is in flux or the guest is running, disallow
++	 * un-assignment of adapter
++	 */
++	if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
++		ret = -EBUSY;
++		goto done;
++	}
+ 
+ 	ret = kstrtoul(buf, 0, &apid);
+ 	if (ret)
+-		return ret;
++		goto done;
+ 
+-	if (apid > matrix_mdev->matrix.apm_max)
+-		return -ENODEV;
++	if (apid > matrix_mdev->matrix.apm_max) {
++		ret = -ENODEV;
++		goto done;
++	}
+ 
+ 	/*
+ 	 * Set the bit in the AP mask (APM) corresponding to the AP adapter
+ 	 * number (APID). The bits in the mask, from most significant to least
+ 	 * significant bit, correspond to APIDs 0-255.
+ 	 */
+-	mutex_lock(&matrix_dev->lock);
+-
+ 	ret = vfio_ap_mdev_verify_queues_reserved_for_apid(matrix_mdev, apid);
+ 	if (ret)
+ 		goto done;
+@@ -672,22 +699,31 @@ static ssize_t unassign_adapter_store(struct device *dev,
+ 	struct mdev_device *mdev = mdev_from_dev(dev);
+ 	struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ 
+-	/* If the guest is running, disallow un-assignment of adapter */
+-	if (matrix_mdev->kvm)
+-		return -EBUSY;
++	mutex_lock(&matrix_dev->lock);
++
++	/*
++	 * If the KVM pointer is in flux or the guest is running, disallow
++	 * un-assignment of adapter
++	 */
++	if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
++		ret = -EBUSY;
++		goto done;
++	}
+ 
+ 	ret = kstrtoul(buf, 0, &apid);
+ 	if (ret)
+-		return ret;
++		goto done;
+ 
+-	if (apid > matrix_mdev->matrix.apm_max)
+-		return -ENODEV;
++	if (apid > matrix_mdev->matrix.apm_max) {
++		ret = -ENODEV;
++		goto done;
++	}
+ 
+-	mutex_lock(&matrix_dev->lock);
+ 	clear_bit_inv((unsigned long)apid, matrix_mdev->matrix.apm);
++	ret = count;
++done:
+ 	mutex_unlock(&matrix_dev->lock);
+-
+-	return count;
++	return ret;
+ }
+ static DEVICE_ATTR_WO(unassign_adapter);
+ 
+@@ -753,17 +789,24 @@ static ssize_t assign_domain_store(struct device *dev,
+ 	struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ 	unsigned long max_apqi = matrix_mdev->matrix.aqm_max;
+ 
+-	/* If the guest is running, disallow assignment of domain */
+-	if (matrix_mdev->kvm)
+-		return -EBUSY;
++	mutex_lock(&matrix_dev->lock);
++
++	/*
++	 * If the KVM pointer is in flux or the guest is running, disallow
++	 * assignment of domain
++	 */
++	if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
++		ret = -EBUSY;
++		goto done;
++	}
+ 
+ 	ret = kstrtoul(buf, 0, &apqi);
+ 	if (ret)
+-		return ret;
+-	if (apqi > max_apqi)
+-		return -ENODEV;
+-
+-	mutex_lock(&matrix_dev->lock);
++		goto done;
++	if (apqi > max_apqi) {
++		ret = -ENODEV;
++		goto done;
++	}
+ 
+ 	ret = vfio_ap_mdev_verify_queues_reserved_for_apqi(matrix_mdev, apqi);
+ 	if (ret)
+@@ -814,22 +857,32 @@ static ssize_t unassign_domain_store(struct device *dev,
+ 	struct mdev_device *mdev = mdev_from_dev(dev);
+ 	struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ 
+-	/* If the guest is running, disallow un-assignment of domain */
+-	if (matrix_mdev->kvm)
+-		return -EBUSY;
++	mutex_lock(&matrix_dev->lock);
++
++	/*
++	 * If the KVM pointer is in flux or the guest is running, disallow
++	 * un-assignment of domain
++	 */
++	if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
++		ret = -EBUSY;
++		goto done;
++	}
+ 
+ 	ret = kstrtoul(buf, 0, &apqi);
+ 	if (ret)
+-		return ret;
++		goto done;
+ 
+-	if (apqi > matrix_mdev->matrix.aqm_max)
+-		return -ENODEV;
++	if (apqi > matrix_mdev->matrix.aqm_max) {
++		ret = -ENODEV;
++		goto done;
++	}
+ 
+-	mutex_lock(&matrix_dev->lock);
+ 	clear_bit_inv((unsigned long)apqi, matrix_mdev->matrix.aqm);
+-	mutex_unlock(&matrix_dev->lock);
++	ret = count;
+ 
+-	return count;
++done:
++	mutex_unlock(&matrix_dev->lock);
++	return ret;
+ }
+ static DEVICE_ATTR_WO(unassign_domain);
+ 
+@@ -858,27 +911,36 @@ static ssize_t assign_control_domain_store(struct device *dev,
+ 	struct mdev_device *mdev = mdev_from_dev(dev);
+ 	struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ 
+-	/* If the guest is running, disallow assignment of control domain */
+-	if (matrix_mdev->kvm)
+-		return -EBUSY;
++	mutex_lock(&matrix_dev->lock);
++
++	/*
++	 * If the KVM pointer is in flux or the guest is running, disallow
++	 * assignment of control domain.
++	 */
++	if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
++		ret = -EBUSY;
++		goto done;
++	}
+ 
+ 	ret = kstrtoul(buf, 0, &id);
+ 	if (ret)
+-		return ret;
++		goto done;
+ 
+-	if (id > matrix_mdev->matrix.adm_max)
+-		return -ENODEV;
++	if (id > matrix_mdev->matrix.adm_max) {
++		ret = -ENODEV;
++		goto done;
++	}
+ 
+ 	/* Set the bit in the ADM (bitmask) corresponding to the AP control
+ 	 * domain number (id). The bits in the mask, from most significant to
+ 	 * least significant, correspond to IDs 0 up to the one less than the
+ 	 * number of control domains that can be assigned.
+ 	 */
+-	mutex_lock(&matrix_dev->lock);
+ 	set_bit_inv(id, matrix_mdev->matrix.adm);
++	ret = count;
++done:
+ 	mutex_unlock(&matrix_dev->lock);
+-
+-	return count;
++	return ret;
+ }
+ static DEVICE_ATTR_WO(assign_control_domain);
+ 
+@@ -908,21 +970,30 @@ static ssize_t unassign_control_domain_store(struct device *dev,
+ 	struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ 	unsigned long max_domid =  matrix_mdev->matrix.adm_max;
+ 
+-	/* If the guest is running, disallow un-assignment of control domain */
+-	if (matrix_mdev->kvm)
+-		return -EBUSY;
++	mutex_lock(&matrix_dev->lock);
++
++	/*
++	 * If the KVM pointer is in flux or the guest is running, disallow
++	 * un-assignment of control domain.
++	 */
++	if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
++		ret = -EBUSY;
++		goto done;
++	}
+ 
+ 	ret = kstrtoul(buf, 0, &domid);
+ 	if (ret)
+-		return ret;
+-	if (domid > max_domid)
+-		return -ENODEV;
++		goto done;
++	if (domid > max_domid) {
++		ret = -ENODEV;
++		goto done;
++	}
+ 
+-	mutex_lock(&matrix_dev->lock);
+ 	clear_bit_inv(domid, matrix_mdev->matrix.adm);
++	ret = count;
++done:
+ 	mutex_unlock(&matrix_dev->lock);
+-
+-	return count;
++	return ret;
+ }
+ static DEVICE_ATTR_WO(unassign_control_domain);
+ 
+@@ -1027,8 +1098,15 @@ static const struct attribute_group *vfio_ap_mdev_attr_groups[] = {
+  * @matrix_mdev: a mediated matrix device
+  * @kvm: reference to KVM instance
+  *
+- * Verifies no other mediated matrix device has @kvm and sets a reference to
+- * it in @matrix_mdev->kvm.
++ * Sets all data for @matrix_mdev that are needed to manage AP resources
++ * for the guest whose state is represented by @kvm.
++ *
++ * Note: The matrix_dev->lock must be taken prior to calling
++ * this function; however, the lock will be temporarily released while the
++ * guest's AP configuration is set to avoid a potential lockdep splat.
++ * The kvm->lock is taken to set the guest's AP configuration which, under
++ * certain circumstances, will result in a circular lock dependency if this is
++ * done under the @matrix_mdev->lock.
+  *
+  * Return 0 if no other mediated matrix device has a reference to @kvm;
+  * otherwise, returns an -EPERM.
+@@ -1038,14 +1116,25 @@ static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
+ {
+ 	struct ap_matrix_mdev *m;
+ 
+-	list_for_each_entry(m, &matrix_dev->mdev_list, node) {
+-		if ((m != matrix_mdev) && (m->kvm == kvm))
+-			return -EPERM;
+-	}
++	if (kvm->arch.crypto.crycbd) {
++		list_for_each_entry(m, &matrix_dev->mdev_list, node) {
++			if (m != matrix_mdev && m->kvm == kvm)
++				return -EPERM;
++		}
+ 
+-	matrix_mdev->kvm = kvm;
+-	kvm_get_kvm(kvm);
+-	kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
++		kvm_get_kvm(kvm);
++		matrix_mdev->kvm_busy = true;
++		mutex_unlock(&matrix_dev->lock);
++		kvm_arch_crypto_set_masks(kvm,
++					  matrix_mdev->matrix.apm,
++					  matrix_mdev->matrix.aqm,
++					  matrix_mdev->matrix.adm);
++		mutex_lock(&matrix_dev->lock);
++		kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
++		matrix_mdev->kvm = kvm;
++		matrix_mdev->kvm_busy = false;
++		wake_up_all(&matrix_mdev->wait_for_kvm);
++	}
+ 
+ 	return 0;
+ }
+@@ -1079,51 +1168,65 @@ static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb,
+ 	return NOTIFY_DONE;
+ }
+ 
++/**
++ * vfio_ap_mdev_unset_kvm
++ *
++ * @matrix_mdev: a matrix mediated device
++ *
++ * Performs clean-up of resources no longer needed by @matrix_mdev.
++ *
++ * Note: The matrix_dev->lock must be taken prior to calling
++ * this function; however, the lock will be temporarily released while the
++ * guest's AP configuration is cleared to avoid a potential lockdep splat.
++ * The kvm->lock is taken to clear the guest's AP configuration which, under
++ * certain circumstances, will result in a circular lock dependency if this is
++ * done under the @matrix_mdev->lock.
++ *
++ */
+ static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev)
+ {
+-	kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
+-	matrix_mdev->kvm->arch.crypto.pqap_hook = NULL;
+-	vfio_ap_mdev_reset_queues(matrix_mdev->mdev);
+-	kvm_put_kvm(matrix_mdev->kvm);
+-	matrix_mdev->kvm = NULL;
++	/*
++	 * If the KVM pointer is in the process of being set, wait until the
++	 * process has completed.
++	 */
++	wait_event_cmd(matrix_mdev->wait_for_kvm,
++		       !matrix_mdev->kvm_busy,
++		       mutex_unlock(&matrix_dev->lock),
++		       mutex_lock(&matrix_dev->lock));
++
++	if (matrix_mdev->kvm) {
++		matrix_mdev->kvm_busy = true;
++		mutex_unlock(&matrix_dev->lock);
++		kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
++		mutex_lock(&matrix_dev->lock);
++		vfio_ap_mdev_reset_queues(matrix_mdev->mdev);
++		matrix_mdev->kvm->arch.crypto.pqap_hook = NULL;
++		kvm_put_kvm(matrix_mdev->kvm);
++		matrix_mdev->kvm = NULL;
++		matrix_mdev->kvm_busy = false;
++		wake_up_all(&matrix_mdev->wait_for_kvm);
++	}
+ }
+ 
+ static int vfio_ap_mdev_group_notifier(struct notifier_block *nb,
+ 				       unsigned long action, void *data)
+ {
+-	int ret, notify_rc = NOTIFY_OK;
++	int notify_rc = NOTIFY_OK;
+ 	struct ap_matrix_mdev *matrix_mdev;
+ 
+ 	if (action != VFIO_GROUP_NOTIFY_SET_KVM)
+ 		return NOTIFY_OK;
+ 
+-	matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier);
+ 	mutex_lock(&matrix_dev->lock);
++	matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier);
+ 
+-	if (!data) {
+-		if (matrix_mdev->kvm)
+-			vfio_ap_mdev_unset_kvm(matrix_mdev);
+-		goto notify_done;
+-	}
+-
+-	ret = vfio_ap_mdev_set_kvm(matrix_mdev, data);
+-	if (ret) {
+-		notify_rc = NOTIFY_DONE;
+-		goto notify_done;
+-	}
+-
+-	/* If there is no CRYCB pointer, then we can't copy the masks */
+-	if (!matrix_mdev->kvm->arch.crypto.crycbd) {
++	if (!data)
++		vfio_ap_mdev_unset_kvm(matrix_mdev);
++	else if (vfio_ap_mdev_set_kvm(matrix_mdev, data))
+ 		notify_rc = NOTIFY_DONE;
+-		goto notify_done;
+-	}
+-
+-	kvm_arch_crypto_set_masks(matrix_mdev->kvm, matrix_mdev->matrix.apm,
+-				  matrix_mdev->matrix.aqm,
+-				  matrix_mdev->matrix.adm);
+ 
+-notify_done:
+ 	mutex_unlock(&matrix_dev->lock);
++
+ 	return notify_rc;
+ }
+ 
+@@ -1258,8 +1361,7 @@ static void vfio_ap_mdev_release(struct mdev_device *mdev)
+ 	struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ 
+ 	mutex_lock(&matrix_dev->lock);
+-	if (matrix_mdev->kvm)
+-		vfio_ap_mdev_unset_kvm(matrix_mdev);
++	vfio_ap_mdev_unset_kvm(matrix_mdev);
+ 	mutex_unlock(&matrix_dev->lock);
+ 
+ 	vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
+@@ -1293,6 +1395,7 @@ static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev,
+ 				    unsigned int cmd, unsigned long arg)
+ {
+ 	int ret;
++	struct ap_matrix_mdev *matrix_mdev;
+ 
+ 	mutex_lock(&matrix_dev->lock);
+ 	switch (cmd) {
+@@ -1300,6 +1403,21 @@ static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev,
+ 		ret = vfio_ap_mdev_get_device_info(arg);
+ 		break;
+ 	case VFIO_DEVICE_RESET:
++		matrix_mdev = mdev_get_drvdata(mdev);
++		if (WARN(!matrix_mdev, "Driver data missing from mdev!!")) {
++			ret = -EINVAL;
++			break;
++		}
++
++		/*
++		 * If the KVM pointer is in the process of being set, wait until
++		 * the process has completed.
++		 */
++		wait_event_cmd(matrix_mdev->wait_for_kvm,
++			       !matrix_mdev->kvm_busy,
++			       mutex_unlock(&matrix_dev->lock),
++			       mutex_lock(&matrix_dev->lock));
++
+ 		ret = vfio_ap_mdev_reset_queues(mdev);
+ 		break;
+ 	default:
+diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h
+index 28e9d99897682..f82a6396acae7 100644
+--- a/drivers/s390/crypto/vfio_ap_private.h
++++ b/drivers/s390/crypto/vfio_ap_private.h
+@@ -83,6 +83,8 @@ struct ap_matrix_mdev {
+ 	struct ap_matrix matrix;
+ 	struct notifier_block group_notifier;
+ 	struct notifier_block iommu_notifier;
++	bool kvm_busy;
++	wait_queue_head_t wait_for_kvm;
+ 	struct kvm *kvm;
+ 	struct kvm_s390_module_hook pqap_hook;
+ 	struct mdev_device *mdev;
+diff --git a/drivers/s390/crypto/zcrypt_card.c b/drivers/s390/crypto/zcrypt_card.c
+index 33b23884b133f..09fe6bb8880bc 100644
+--- a/drivers/s390/crypto/zcrypt_card.c
++++ b/drivers/s390/crypto/zcrypt_card.c
+@@ -192,5 +192,6 @@ void zcrypt_card_unregister(struct zcrypt_card *zc)
+ 	spin_unlock(&zcrypt_list_lock);
+ 	sysfs_remove_group(&zc->card->ap_dev.device.kobj,
+ 			   &zcrypt_card_attr_group);
++	zcrypt_card_put(zc);
+ }
+ EXPORT_SYMBOL(zcrypt_card_unregister);
+diff --git a/drivers/s390/crypto/zcrypt_queue.c b/drivers/s390/crypto/zcrypt_queue.c
+index 5062eae73d4aa..c3ffbd26b73ff 100644
+--- a/drivers/s390/crypto/zcrypt_queue.c
++++ b/drivers/s390/crypto/zcrypt_queue.c
+@@ -223,5 +223,6 @@ void zcrypt_queue_unregister(struct zcrypt_queue *zq)
+ 	sysfs_remove_group(&zq->queue->ap_dev.device.kobj,
+ 			   &zcrypt_queue_attr_group);
+ 	zcrypt_card_put(zc);
++	zcrypt_queue_put(zq);
+ }
+ EXPORT_SYMBOL(zcrypt_queue_unregister);
+diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
+index 91acff493612a..fd9b869d278ed 100644
+--- a/drivers/s390/net/qeth_core.h
++++ b/drivers/s390/net/qeth_core.h
+@@ -437,6 +437,7 @@ struct qeth_qdio_out_buffer {
+ 
+ 	struct qeth_qdio_out_q *q;
+ 	struct list_head list_entry;
++	struct qaob *aob;
+ };
+ 
+ struct qeth_card;
+@@ -499,7 +500,6 @@ struct qeth_out_q_stats {
+ struct qeth_qdio_out_q {
+ 	struct qdio_buffer *qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
+ 	struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q];
+-	struct qdio_outbuf_state *bufstates; /* convenience pointer */
+ 	struct list_head pending_bufs;
+ 	struct qeth_out_q_stats stats;
+ 	spinlock_t lock;
+@@ -563,7 +563,6 @@ struct qeth_qdio_info {
+ 	/* output */
+ 	unsigned int no_out_queues;
+ 	struct qeth_qdio_out_q *out_qs[QETH_MAX_OUT_QUEUES];
+-	struct qdio_outbuf_state *out_bufstates;
+ 
+ 	/* priority queueing */
+ 	int do_prio_queueing;
+diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
+index a814698387bc3..175b82b98f369 100644
+--- a/drivers/s390/net/qeth_core_main.c
++++ b/drivers/s390/net/qeth_core_main.c
+@@ -369,8 +369,7 @@ static int qeth_cq_init(struct qeth_card *card)
+ 				   QDIO_MAX_BUFFERS_PER_Q);
+ 		card->qdio.c_q->next_buf_to_init = 127;
+ 		rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT,
+-			     card->qdio.no_in_queues - 1, 0,
+-			     127);
++			     card->qdio.no_in_queues - 1, 0, 127, NULL);
+ 		if (rc) {
+ 			QETH_CARD_TEXT_(card, 2, "1err%d", rc);
+ 			goto out;
+@@ -383,48 +382,22 @@ out:
+ 
+ static int qeth_alloc_cq(struct qeth_card *card)
+ {
+-	int rc;
+-
+ 	if (card->options.cq == QETH_CQ_ENABLED) {
+-		int i;
+-		struct qdio_outbuf_state *outbuf_states;
+-
+ 		QETH_CARD_TEXT(card, 2, "cqon");
+ 		card->qdio.c_q = qeth_alloc_qdio_queue();
+ 		if (!card->qdio.c_q) {
+-			rc = -1;
+-			goto kmsg_out;
++			dev_err(&card->gdev->dev, "Failed to create completion queue\n");
++			return -ENOMEM;
+ 		}
++
+ 		card->qdio.no_in_queues = 2;
+-		card->qdio.out_bufstates =
+-			kcalloc(card->qdio.no_out_queues *
+-					QDIO_MAX_BUFFERS_PER_Q,
+-				sizeof(struct qdio_outbuf_state),
+-				GFP_KERNEL);
+-		outbuf_states = card->qdio.out_bufstates;
+-		if (outbuf_states == NULL) {
+-			rc = -1;
+-			goto free_cq_out;
+-		}
+-		for (i = 0; i < card->qdio.no_out_queues; ++i) {
+-			card->qdio.out_qs[i]->bufstates = outbuf_states;
+-			outbuf_states += QDIO_MAX_BUFFERS_PER_Q;
+-		}
+ 	} else {
+ 		QETH_CARD_TEXT(card, 2, "nocq");
+ 		card->qdio.c_q = NULL;
+ 		card->qdio.no_in_queues = 1;
+ 	}
+ 	QETH_CARD_TEXT_(card, 2, "iqc%d", card->qdio.no_in_queues);
+-	rc = 0;
+-out:
+-	return rc;
+-free_cq_out:
+-	qeth_free_qdio_queue(card->qdio.c_q);
+-	card->qdio.c_q = NULL;
+-kmsg_out:
+-	dev_err(&card->gdev->dev, "Failed to create completion queue\n");
+-	goto out;
++	return 0;
+ }
+ 
+ static void qeth_free_cq(struct qeth_card *card)
+@@ -434,8 +407,6 @@ static void qeth_free_cq(struct qeth_card *card)
+ 		qeth_free_qdio_queue(card->qdio.c_q);
+ 		card->qdio.c_q = NULL;
+ 	}
+-	kfree(card->qdio.out_bufstates);
+-	card->qdio.out_bufstates = NULL;
+ }
+ 
+ static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
+@@ -487,12 +458,12 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
+ 	switch (atomic_xchg(&buffer->state, new_state)) {
+ 	case QETH_QDIO_BUF_PRIMED:
+ 		/* Faster than TX completion code, let it handle the async
+-		 * completion for us.
++		 * completion for us. It will also recycle the QAOB.
+ 		 */
+ 		break;
+ 	case QETH_QDIO_BUF_PENDING:
+ 		/* TX completion code is active and will handle the async
+-		 * completion for us.
++		 * completion for us. It will also recycle the QAOB.
+ 		 */
+ 		break;
+ 	case QETH_QDIO_BUF_NEED_QAOB:
+@@ -501,7 +472,7 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
+ 		qeth_notify_skbs(buffer->q, buffer, notification);
+ 
+ 		/* Free dangling allocations. The attached skbs are handled by
+-		 * qeth_tx_complete_pending_bufs().
++		 * qeth_tx_complete_pending_bufs(), and so is the QAOB.
+ 		 */
+ 		for (i = 0;
+ 		     i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
+@@ -520,8 +491,6 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
+ 	default:
+ 		WARN_ON_ONCE(1);
+ 	}
+-
+-	qdio_release_aob(aob);
+ }
+ 
+ static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
+@@ -1451,6 +1420,13 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
+ 	atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
+ }
+ 
++static void qeth_free_out_buf(struct qeth_qdio_out_buffer *buf)
++{
++	if (buf->aob)
++		qdio_release_aob(buf->aob);
++	kmem_cache_free(qeth_qdio_outbuf_cache, buf);
++}
++
+ static void qeth_tx_complete_pending_bufs(struct qeth_card *card,
+ 					  struct qeth_qdio_out_q *queue,
+ 					  bool drain)
+@@ -1468,7 +1444,7 @@ static void qeth_tx_complete_pending_bufs(struct qeth_card *card,
+ 			qeth_tx_complete_buf(buf, drain, 0);
+ 
+ 			list_del(&buf->list_entry);
+-			kmem_cache_free(qeth_qdio_outbuf_cache, buf);
++			qeth_free_out_buf(buf);
+ 		}
+ 	}
+ }
+@@ -1485,7 +1461,7 @@ static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
+ 
+ 		qeth_clear_output_buffer(q, q->bufs[j], true, 0);
+ 		if (free) {
+-			kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
++			qeth_free_out_buf(q->bufs[j]);
+ 			q->bufs[j] = NULL;
+ 		}
+ 	}
+@@ -2637,7 +2613,7 @@ static struct qeth_qdio_out_q *qeth_alloc_output_queue(void)
+ 
+ err_out_bufs:
+ 	while (i > 0)
+-		kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[--i]);
++		qeth_free_out_buf(q->bufs[--i]);
+ 	qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
+ err_qdio_bufs:
+ 	kfree(q);
+@@ -3024,7 +3000,8 @@ static int qeth_init_qdio_queues(struct qeth_card *card)
+ 	}
+ 
+ 	card->qdio.in_q->next_buf_to_init = QDIO_BUFNR(rx_bufs);
+-	rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, rx_bufs);
++	rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, rx_bufs,
++		     NULL);
+ 	if (rc) {
+ 		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
+ 		return rc;
+@@ -3516,7 +3493,7 @@ static unsigned int qeth_rx_refill_queue(struct qeth_card *card,
+ 		}
+ 
+ 		rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
+-			     queue->next_buf_to_init, count);
++			     queue->next_buf_to_init, count, NULL);
+ 		if (rc) {
+ 			QETH_CARD_TEXT(card, 2, "qinberr");
+ 		}
+@@ -3625,6 +3602,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
+ 	struct qeth_qdio_out_buffer *buf = queue->bufs[index];
+ 	unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
+ 	struct qeth_card *card = queue->card;
++	struct qaob *aob = NULL;
+ 	int rc;
+ 	int i;
+ 
+@@ -3637,16 +3615,24 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
+ 				SBAL_EFLAGS_LAST_ENTRY;
+ 		queue->coalesced_frames += buf->frames;
+ 
+-		if (queue->bufstates)
+-			queue->bufstates[bidx].user = buf;
+-
+ 		if (IS_IQD(card)) {
+ 			skb_queue_walk(&buf->skb_list, skb)
+ 				skb_tx_timestamp(skb);
+ 		}
+ 	}
+ 
+-	if (!IS_IQD(card)) {
++	if (IS_IQD(card)) {
++		if (card->options.cq == QETH_CQ_ENABLED &&
++		    !qeth_iqd_is_mcast_queue(card, queue) &&
++		    count == 1) {
++			if (!buf->aob)
++				buf->aob = qdio_allocate_aob();
++			if (buf->aob) {
++				aob = buf->aob;
++				aob->user1 = (u64) buf;
++			}
++		}
++	} else {
+ 		if (!queue->do_pack) {
+ 			if ((atomic_read(&queue->used_buffers) >=
+ 				(QETH_HIGH_WATERMARK_PACK -
+@@ -3677,8 +3663,8 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
+ 	}
+ 
+ 	QETH_TXQ_STAT_INC(queue, doorbell);
+-	rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
+-		     queue->queue_no, index, count);
++	rc = do_QDIO(CARD_DDEV(card), qdio_flags, queue->queue_no, index, count,
++		     aob);
+ 
+ 	switch (rc) {
+ 	case 0:
+@@ -3814,8 +3800,7 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
+ 		qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
+ 	}
+ 	rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue,
+-		    card->qdio.c_q->next_buf_to_init,
+-		    count);
++		     cq->next_buf_to_init, count, NULL);
+ 	if (rc) {
+ 		dev_warn(&card->gdev->dev,
+ 			"QDIO reported an error, rc=%i\n", rc);
+@@ -5270,7 +5255,6 @@ static int qeth_qdio_establish(struct qeth_card *card)
+ 	init_data.int_parm               = (unsigned long) card;
+ 	init_data.input_sbal_addr_array  = in_sbal_ptrs;
+ 	init_data.output_sbal_addr_array = out_sbal_ptrs;
+-	init_data.output_sbal_state_array = card->qdio.out_bufstates;
+ 	init_data.scan_threshold	 = IS_IQD(card) ? 0 : 32;
+ 
+ 	if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
+@@ -6069,7 +6053,15 @@ static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
+ 	bool error = !!qdio_error;
+ 
+ 	if (qdio_error == QDIO_ERROR_SLSB_PENDING) {
+-		WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED);
++		struct qaob *aob = buffer->aob;
++
++		if (!aob) {
++			netdev_WARN_ONCE(card->dev,
++					 "Pending TX buffer %#x without QAOB on TX queue %u\n",
++					 bidx, queue->queue_no);
++			qeth_schedule_recovery(card);
++			return;
++		}
+ 
+ 		QETH_CARD_TEXT_(card, 5, "pel%u", bidx);
+ 
+@@ -6125,6 +6117,8 @@ static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
+ 		default:
+ 			WARN_ON_ONCE(1);
+ 		}
++
++		memset(aob, 0, sizeof(*aob));
+ 	} else if (card->options.cq == QETH_CQ_ENABLED) {
+ 		qeth_notify_skbs(queue, buffer,
+ 				 qeth_compute_cq_notification(sflags, 0));
+diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
+index 23ab16d65f2a4..049596cbfb5de 100644
+--- a/drivers/s390/scsi/zfcp_qdio.c
++++ b/drivers/s390/scsi/zfcp_qdio.c
+@@ -128,7 +128,7 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
+ 	/*
+ 	 * put SBALs back to response queue
+ 	 */
+-	if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count))
++	if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count, NULL))
+ 		zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2");
+ }
+ 
+@@ -298,7 +298,7 @@ int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
+ 	atomic_sub(sbal_number, &qdio->req_q_free);
+ 
+ 	retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0,
+-			 q_req->sbal_first, sbal_number);
++			 q_req->sbal_first, sbal_number, NULL);
+ 
+ 	if (unlikely(retval)) {
+ 		/* Failed to submit the IO, roll back our modifications. */
+@@ -463,7 +463,8 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
+ 		sbale->addr = 0;
+ 	}
+ 
+-	if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q))
++	if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q,
++		    NULL))
+ 		goto failed_qdio;
+ 
+ 	/* set index of first available SBALS / number of available SBALS */
+diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
+index ea436a14087f1..5eff3368143d3 100644
+--- a/drivers/scsi/device_handler/scsi_dh_alua.c
++++ b/drivers/scsi/device_handler/scsi_dh_alua.c
+@@ -573,10 +573,11 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
+ 		 * even though it shouldn't according to T10.
+ 		 * The retry without rtpg_ext_hdr_req set
+ 		 * handles this.
++		 * Note:  some arrays return a sense key of ILLEGAL_REQUEST
++		 * with ASC 00h if they don't support the extended header.
+ 		 */
+ 		if (!(pg->flags & ALUA_RTPG_EXT_HDR_UNSUPP) &&
+-		    sense_hdr.sense_key == ILLEGAL_REQUEST &&
+-		    sense_hdr.asc == 0x24 && sense_hdr.ascq == 0) {
++		    sense_hdr.sense_key == ILLEGAL_REQUEST) {
+ 			pg->flags |= ALUA_RTPG_EXT_HDR_UNSUPP;
+ 			goto retry;
+ 		}
+diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
+index 22826544da7e7..9989669beec3c 100644
+--- a/drivers/scsi/libfc/fc_lport.c
++++ b/drivers/scsi/libfc/fc_lport.c
+@@ -1731,7 +1731,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
+ 
+ 	if (mfs < FC_SP_MIN_MAX_PAYLOAD || mfs > FC_SP_MAX_MAX_PAYLOAD) {
+ 		FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, "
+-			     "lport->mfs:%hu\n", mfs, lport->mfs);
++			     "lport->mfs:%u\n", mfs, lport->mfs);
+ 		fc_lport_error(lport, fp);
+ 		goto out;
+ 	}
+diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
+index bdd9a29f4201c..0496a60735ef1 100644
+--- a/drivers/scsi/lpfc/lpfc_attr.c
++++ b/drivers/scsi/lpfc/lpfc_attr.c
+@@ -1687,8 +1687,7 @@ lpfc_set_trunking(struct lpfc_hba *phba, char *buff_out)
+ 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ 				"0071 Set trunk mode failed with status: %d",
+ 				rc);
+-	if (rc != MBX_TIMEOUT)
+-		mempool_free(mbox, phba->mbox_mem_pool);
++	mempool_free(mbox, phba->mbox_mem_pool);
+ 
+ 	return 0;
+ }
+@@ -6793,15 +6792,19 @@ lpfc_get_stats(struct Scsi_Host *shost)
+ 	pmboxq->ctx_buf = NULL;
+ 	pmboxq->vport = vport;
+ 
+-	if (vport->fc_flag & FC_OFFLINE_MODE)
++	if (vport->fc_flag & FC_OFFLINE_MODE) {
+ 		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
+-	else
+-		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
+-
+-	if (rc != MBX_SUCCESS) {
+-		if (rc != MBX_TIMEOUT)
++		if (rc != MBX_SUCCESS) {
+ 			mempool_free(pmboxq, phba->mbox_mem_pool);
+-		return NULL;
++			return NULL;
++		}
++	} else {
++		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
++		if (rc != MBX_SUCCESS) {
++			if (rc != MBX_TIMEOUT)
++				mempool_free(pmboxq, phba->mbox_mem_pool);
++			return NULL;
++		}
+ 	}
+ 
+ 	memset(hs, 0, sizeof (struct fc_host_statistics));
+@@ -6825,15 +6828,19 @@ lpfc_get_stats(struct Scsi_Host *shost)
+ 	pmboxq->ctx_buf = NULL;
+ 	pmboxq->vport = vport;
+ 
+-	if (vport->fc_flag & FC_OFFLINE_MODE)
++	if (vport->fc_flag & FC_OFFLINE_MODE) {
+ 		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
+-	else
+-		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
+-
+-	if (rc != MBX_SUCCESS) {
+-		if (rc != MBX_TIMEOUT)
++		if (rc != MBX_SUCCESS) {
+ 			mempool_free(pmboxq, phba->mbox_mem_pool);
+-		return NULL;
++			return NULL;
++		}
++	} else {
++		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
++		if (rc != MBX_SUCCESS) {
++			if (rc != MBX_TIMEOUT)
++				mempool_free(pmboxq, phba->mbox_mem_pool);
++			return NULL;
++		}
+ 	}
+ 
+ 	hs->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
+@@ -6906,15 +6913,19 @@ lpfc_reset_stats(struct Scsi_Host *shost)
+ 	pmboxq->vport = vport;
+ 
+ 	if ((vport->fc_flag & FC_OFFLINE_MODE) ||
+-		(!(psli->sli_flag & LPFC_SLI_ACTIVE)))
++		(!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
+ 		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
+-	else
+-		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
+-
+-	if (rc != MBX_SUCCESS) {
+-		if (rc != MBX_TIMEOUT)
++		if (rc != MBX_SUCCESS) {
+ 			mempool_free(pmboxq, phba->mbox_mem_pool);
+-		return;
++			return;
++		}
++	} else {
++		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
++		if (rc != MBX_SUCCESS) {
++			if (rc != MBX_TIMEOUT)
++				mempool_free(pmboxq, phba->mbox_mem_pool);
++			return;
++		}
+ 	}
+ 
+ 	memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+@@ -6924,15 +6935,19 @@ lpfc_reset_stats(struct Scsi_Host *shost)
+ 	pmboxq->vport = vport;
+ 
+ 	if ((vport->fc_flag & FC_OFFLINE_MODE) ||
+-	    (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
++	    (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
+ 		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
+-	else
++		if (rc != MBX_SUCCESS) {
++			mempool_free(pmboxq, phba->mbox_mem_pool);
++			return;
++		}
++	} else {
+ 		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
+-
+-	if (rc != MBX_SUCCESS) {
+-		if (rc != MBX_TIMEOUT)
+-			mempool_free( pmboxq, phba->mbox_mem_pool);
+-		return;
++		if (rc != MBX_SUCCESS) {
++			if (rc != MBX_TIMEOUT)
++				mempool_free(pmboxq, phba->mbox_mem_pool);
++			return;
++		}
+ 	}
+ 
+ 	lso->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
+diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
+index a0aad4896a459..763b1eeb0ca80 100644
+--- a/drivers/scsi/lpfc/lpfc_crtn.h
++++ b/drivers/scsi/lpfc/lpfc_crtn.h
+@@ -55,9 +55,6 @@ void lpfc_register_new_vport(struct lpfc_hba *, struct lpfc_vport *,
+ void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
+ void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
+ void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
+-void lpfc_supported_pages(struct lpfcMboxq *);
+-void lpfc_pc_sli4_params(struct lpfcMboxq *);
+-int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
+ int lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *, struct lpfcMboxq *,
+ 			   uint16_t, uint16_t, bool);
+ int lpfc_get_sli4_parameters(struct lpfc_hba *, LPFC_MBOXQ_t *);
+@@ -351,8 +348,8 @@ int lpfc_sli_hbq_size(void);
+ int lpfc_sli_issue_abort_iotag(struct lpfc_hba *, struct lpfc_sli_ring *,
+ 			       struct lpfc_iocbq *, void *);
+ int lpfc_sli_sum_iocb(struct lpfc_vport *, uint16_t, uint64_t, lpfc_ctx_cmd);
+-int lpfc_sli_abort_iocb(struct lpfc_vport *, struct lpfc_sli_ring *, uint16_t,
+-			uint64_t, lpfc_ctx_cmd);
++int lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
++			lpfc_ctx_cmd abort_cmd);
+ int
+ lpfc_sli_abort_taskmgmt(struct lpfc_vport *, struct lpfc_sli_ring *,
+ 			uint16_t, uint64_t, lpfc_ctx_cmd);
+diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
+index f0a758138ae88..fd18ac2acc135 100644
+--- a/drivers/scsi/lpfc/lpfc_els.c
++++ b/drivers/scsi/lpfc/lpfc_els.c
+@@ -1600,7 +1600,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
+ 	struct lpfc_nodelist *new_ndlp;
+ 	struct serv_parm *sp;
+ 	uint8_t  name[sizeof(struct lpfc_name)];
+-	uint32_t rc, keepDID = 0, keep_nlp_flag = 0;
++	uint32_t keepDID = 0, keep_nlp_flag = 0;
+ 	uint32_t keep_new_nlp_flag = 0;
+ 	uint16_t keep_nlp_state;
+ 	u32 keep_nlp_fc4_type = 0;
+@@ -1622,7 +1622,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
+ 	new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
+ 
+ 	/* return immediately if the WWPN matches ndlp */
+-	if (new_ndlp == ndlp)
++	if (!new_ndlp || (new_ndlp == ndlp))
+ 		return ndlp;
+ 
+ 	if (phba->sli_rev == LPFC_SLI_REV4) {
+@@ -1641,30 +1641,11 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
+ 			 (new_ndlp ? new_ndlp->nlp_flag : 0),
+ 			 (new_ndlp ? new_ndlp->nlp_fc4_type : 0));
+ 
+-	if (!new_ndlp) {
+-		rc = memcmp(&ndlp->nlp_portname, name,
+-			    sizeof(struct lpfc_name));
+-		if (!rc) {
+-			if (active_rrqs_xri_bitmap)
+-				mempool_free(active_rrqs_xri_bitmap,
+-					     phba->active_rrq_pool);
+-			return ndlp;
+-		}
+-		new_ndlp = lpfc_nlp_init(vport, ndlp->nlp_DID);
+-		if (!new_ndlp) {
+-			if (active_rrqs_xri_bitmap)
+-				mempool_free(active_rrqs_xri_bitmap,
+-					     phba->active_rrq_pool);
+-			return ndlp;
+-		}
+-	} else {
+-		keepDID = new_ndlp->nlp_DID;
+-		if (phba->sli_rev == LPFC_SLI_REV4 &&
+-		    active_rrqs_xri_bitmap)
+-			memcpy(active_rrqs_xri_bitmap,
+-			       new_ndlp->active_rrqs_xri_bitmap,
+-			       phba->cfg_rrq_xri_bitmap_sz);
+-	}
++	keepDID = new_ndlp->nlp_DID;
++
++	if (phba->sli_rev == LPFC_SLI_REV4 && active_rrqs_xri_bitmap)
++		memcpy(active_rrqs_xri_bitmap, new_ndlp->active_rrqs_xri_bitmap,
++		       phba->cfg_rrq_xri_bitmap_sz);
+ 
+ 	/* At this point in this routine, we know new_ndlp will be
+ 	 * returned. however, any previous GID_FTs that were done
+@@ -3829,7 +3810,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 		did = irsp->un.elsreq64.remoteID;
+ 		ndlp = lpfc_findnode_did(vport, did);
+ 		if (!ndlp && (cmd != ELS_CMD_PLOGI))
+-			return 1;
++			return 0;
+ 	}
+ 
+ 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+@@ -4473,10 +4454,7 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+  * nlp_flag bitmap in the ndlp data structure, if the mbox command reference
+  * field in the command IOCB is not NULL, the referred mailbox command will
+  * be send out, and then invokes the lpfc_els_free_iocb() routine to release
+- * the IOCB. Under error conditions, such as when a LS_RJT is returned or a
+- * link down event occurred during the discovery, the lpfc_nlp_not_used()
+- * routine shall be invoked trying to release the ndlp if no other threads
+- * are currently referring it.
++ * the IOCB.
+  **/
+ static void
+ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+@@ -4486,10 +4464,8 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 	struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL;
+ 	struct Scsi_Host  *shost = vport ? lpfc_shost_from_vport(vport) : NULL;
+ 	IOCB_t  *irsp;
+-	uint8_t *pcmd;
+ 	LPFC_MBOXQ_t *mbox = NULL;
+ 	struct lpfc_dmabuf *mp = NULL;
+-	uint32_t ls_rjt = 0;
+ 
+ 	irsp = &rspiocb->iocb;
+ 
+@@ -4501,18 +4477,6 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 	if (cmdiocb->context_un.mbox)
+ 		mbox = cmdiocb->context_un.mbox;
+ 
+-	/* First determine if this is a LS_RJT cmpl. Note, this callback
+-	 * function can have cmdiocb->contest1 (ndlp) field set to NULL.
+-	 */
+-	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
+-	if (ndlp && (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) {
+-		/* A LS_RJT associated with Default RPI cleanup has its own
+-		 * separate code path.
+-		 */
+-		if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI))
+-			ls_rjt = 1;
+-	}
+-
+ 	/* Check to see if link went down during discovery */
+ 	if (!ndlp || lpfc_els_chk_latt(vport)) {
+ 		if (mbox) {
+@@ -4523,15 +4487,6 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 			}
+ 			mempool_free(mbox, phba->mbox_mem_pool);
+ 		}
+-		if (ndlp && (ndlp->nlp_flag & NLP_RM_DFLT_RPI))
+-			if (lpfc_nlp_not_used(ndlp)) {
+-				ndlp = NULL;
+-				/* Indicate the node has already released,
+-				 * should not reference to it from within
+-				 * the routine lpfc_els_free_iocb.
+-				 */
+-				cmdiocb->context1 = NULL;
+-			}
+ 		goto out;
+ 	}
+ 
+@@ -4609,29 +4564,6 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 				"Data: x%x x%x x%x\n",
+ 				ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+ 				ndlp->nlp_rpi);
+-
+-			if (lpfc_nlp_not_used(ndlp)) {
+-				ndlp = NULL;
+-				/* Indicate node has already been released,
+-				 * should not reference to it from within
+-				 * the routine lpfc_els_free_iocb.
+-				 */
+-				cmdiocb->context1 = NULL;
+-			}
+-		} else {
+-			/* Do not drop node for lpfc_els_abort'ed ELS cmds */
+-			if (!lpfc_error_lost_link(irsp) &&
+-			    ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
+-				if (lpfc_nlp_not_used(ndlp)) {
+-					ndlp = NULL;
+-					/* Indicate node has already been
+-					 * released, should not reference
+-					 * to it from within the routine
+-					 * lpfc_els_free_iocb.
+-					 */
+-					cmdiocb->context1 = NULL;
+-				}
+-			}
+ 		}
+ 		mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
+ 		if (mp) {
+@@ -4647,19 +4579,6 @@ out:
+ 			ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
+ 		ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI;
+ 		spin_unlock_irq(&ndlp->lock);
+-
+-		/* If the node is not being used by another discovery thread,
+-		 * and we are sending a reject, we are done with it.
+-		 * Release driver reference count here and free associated
+-		 * resources.
+-		 */
+-		if (ls_rjt)
+-			if (lpfc_nlp_not_used(ndlp))
+-				/* Indicate node has already been released,
+-				 * should not reference to it from within
+-				 * the routine lpfc_els_free_iocb.
+-				 */
+-				cmdiocb->context1 = NULL;
+ 	}
+ 
+ 	/* Release the originating I/O reference. */
+diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
+index 48ca4a612f801..c5176f4063864 100644
+--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
++++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
+@@ -140,11 +140,8 @@ lpfc_terminate_rport_io(struct fc_rport *rport)
+ 			      "rport terminate: sid:x%x did:x%x flg:x%x",
+ 			      ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
+ 
+-	if (ndlp->nlp_sid != NLP_NO_SID) {
+-		lpfc_sli_abort_iocb(vport,
+-				    &vport->phba->sli.sli3_ring[LPFC_FCP_RING],
+-				    ndlp->nlp_sid, 0, LPFC_CTX_TGT);
+-	}
++	if (ndlp->nlp_sid != NLP_NO_SID)
++		lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
+ }
+ 
+ /*
+@@ -299,8 +296,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
+ 
+ 	if (ndlp->nlp_sid != NLP_NO_SID) {
+ 		warn_on = 1;
+-		lpfc_sli_abort_iocb(vport, &phba->sli.sli3_ring[LPFC_FCP_RING],
+-				    ndlp->nlp_sid, 0, LPFC_CTX_TGT);
++		lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
+ 	}
+ 
+ 	if (warn_on) {
+diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
+index 541b9aef6bfec..f5bc2c32a8179 100644
+--- a/drivers/scsi/lpfc/lpfc_hw4.h
++++ b/drivers/scsi/lpfc/lpfc_hw4.h
+@@ -124,6 +124,7 @@ struct lpfc_sli_intf {
+ /* Define SLI4 Alignment requirements. */
+ #define LPFC_ALIGN_16_BYTE	16
+ #define LPFC_ALIGN_64_BYTE	64
++#define SLI4_PAGE_SIZE		4096
+ 
+ /* Define SLI4 specific definitions. */
+ #define LPFC_MQ_CQE_BYTE_OFFSET	256
+@@ -2976,62 +2977,6 @@ struct lpfc_mbx_request_features {
+ #define lpfc_mbx_rq_ftr_rsp_mrqp_WORD		word3
+ };
+ 
+-struct lpfc_mbx_supp_pages {
+-	uint32_t word1;
+-#define qs_SHIFT 				0
+-#define qs_MASK					0x00000001
+-#define qs_WORD					word1
+-#define wr_SHIFT				1
+-#define wr_MASK 				0x00000001
+-#define wr_WORD					word1
+-#define pf_SHIFT				8
+-#define pf_MASK					0x000000ff
+-#define pf_WORD					word1
+-#define cpn_SHIFT				16
+-#define cpn_MASK				0x000000ff
+-#define cpn_WORD				word1
+-	uint32_t word2;
+-#define list_offset_SHIFT 			0
+-#define list_offset_MASK			0x000000ff
+-#define list_offset_WORD			word2
+-#define next_offset_SHIFT			8
+-#define next_offset_MASK			0x000000ff
+-#define next_offset_WORD			word2
+-#define elem_cnt_SHIFT				16
+-#define elem_cnt_MASK				0x000000ff
+-#define elem_cnt_WORD				word2
+-	uint32_t word3;
+-#define pn_0_SHIFT				24
+-#define pn_0_MASK  				0x000000ff
+-#define pn_0_WORD				word3
+-#define pn_1_SHIFT				16
+-#define pn_1_MASK				0x000000ff
+-#define pn_1_WORD				word3
+-#define pn_2_SHIFT				8
+-#define pn_2_MASK				0x000000ff
+-#define pn_2_WORD				word3
+-#define pn_3_SHIFT				0
+-#define pn_3_MASK				0x000000ff
+-#define pn_3_WORD				word3
+-	uint32_t word4;
+-#define pn_4_SHIFT				24
+-#define pn_4_MASK				0x000000ff
+-#define pn_4_WORD				word4
+-#define pn_5_SHIFT				16
+-#define pn_5_MASK				0x000000ff
+-#define pn_5_WORD				word4
+-#define pn_6_SHIFT				8
+-#define pn_6_MASK				0x000000ff
+-#define pn_6_WORD				word4
+-#define pn_7_SHIFT				0
+-#define pn_7_MASK				0x000000ff
+-#define pn_7_WORD				word4
+-	uint32_t rsvd[27];
+-#define LPFC_SUPP_PAGES			0
+-#define LPFC_BLOCK_GUARD_PROFILES	1
+-#define LPFC_SLI4_PARAMETERS		2
+-};
+-
+ struct lpfc_mbx_memory_dump_type3 {
+ 	uint32_t word1;
+ #define lpfc_mbx_memory_dump_type3_type_SHIFT    0
+@@ -3248,121 +3193,6 @@ struct user_eeprom {
+ 	uint8_t reserved191[57];
+ };
+ 
+-struct lpfc_mbx_pc_sli4_params {
+-	uint32_t word1;
+-#define qs_SHIFT				0
+-#define qs_MASK					0x00000001
+-#define qs_WORD					word1
+-#define wr_SHIFT				1
+-#define wr_MASK					0x00000001
+-#define wr_WORD					word1
+-#define pf_SHIFT				8
+-#define pf_MASK					0x000000ff
+-#define pf_WORD					word1
+-#define cpn_SHIFT				16
+-#define cpn_MASK				0x000000ff
+-#define cpn_WORD				word1
+-	uint32_t word2;
+-#define if_type_SHIFT				0
+-#define if_type_MASK				0x00000007
+-#define if_type_WORD				word2
+-#define sli_rev_SHIFT				4
+-#define sli_rev_MASK				0x0000000f
+-#define sli_rev_WORD				word2
+-#define sli_family_SHIFT			8
+-#define sli_family_MASK				0x000000ff
+-#define sli_family_WORD				word2
+-#define featurelevel_1_SHIFT			16
+-#define featurelevel_1_MASK			0x000000ff
+-#define featurelevel_1_WORD			word2
+-#define featurelevel_2_SHIFT			24
+-#define featurelevel_2_MASK			0x0000001f
+-#define featurelevel_2_WORD			word2
+-	uint32_t word3;
+-#define fcoe_SHIFT 				0
+-#define fcoe_MASK				0x00000001
+-#define fcoe_WORD				word3
+-#define fc_SHIFT				1
+-#define fc_MASK					0x00000001
+-#define fc_WORD					word3
+-#define nic_SHIFT				2
+-#define nic_MASK				0x00000001
+-#define nic_WORD				word3
+-#define iscsi_SHIFT				3
+-#define iscsi_MASK				0x00000001
+-#define iscsi_WORD				word3
+-#define rdma_SHIFT				4
+-#define rdma_MASK				0x00000001
+-#define rdma_WORD				word3
+-	uint32_t sge_supp_len;
+-#define SLI4_PAGE_SIZE 4096
+-	uint32_t word5;
+-#define if_page_sz_SHIFT			0
+-#define if_page_sz_MASK				0x0000ffff
+-#define if_page_sz_WORD				word5
+-#define loopbk_scope_SHIFT			24
+-#define loopbk_scope_MASK			0x0000000f
+-#define loopbk_scope_WORD			word5
+-#define rq_db_window_SHIFT			28
+-#define rq_db_window_MASK			0x0000000f
+-#define rq_db_window_WORD			word5
+-	uint32_t word6;
+-#define eq_pages_SHIFT				0
+-#define eq_pages_MASK				0x0000000f
+-#define eq_pages_WORD				word6
+-#define eqe_size_SHIFT				8
+-#define eqe_size_MASK				0x000000ff
+-#define eqe_size_WORD				word6
+-	uint32_t word7;
+-#define cq_pages_SHIFT				0
+-#define cq_pages_MASK				0x0000000f
+-#define cq_pages_WORD				word7
+-#define cqe_size_SHIFT				8
+-#define cqe_size_MASK				0x000000ff
+-#define cqe_size_WORD				word7
+-	uint32_t word8;
+-#define mq_pages_SHIFT				0
+-#define mq_pages_MASK				0x0000000f
+-#define mq_pages_WORD				word8
+-#define mqe_size_SHIFT				8
+-#define mqe_size_MASK				0x000000ff
+-#define mqe_size_WORD				word8
+-#define mq_elem_cnt_SHIFT			16
+-#define mq_elem_cnt_MASK			0x000000ff
+-#define mq_elem_cnt_WORD			word8
+-	uint32_t word9;
+-#define wq_pages_SHIFT				0
+-#define wq_pages_MASK				0x0000ffff
+-#define wq_pages_WORD				word9
+-#define wqe_size_SHIFT				8
+-#define wqe_size_MASK				0x000000ff
+-#define wqe_size_WORD				word9
+-	uint32_t word10;
+-#define rq_pages_SHIFT				0
+-#define rq_pages_MASK				0x0000ffff
+-#define rq_pages_WORD				word10
+-#define rqe_size_SHIFT				8
+-#define rqe_size_MASK				0x000000ff
+-#define rqe_size_WORD				word10
+-	uint32_t word11;
+-#define hdr_pages_SHIFT				0
+-#define hdr_pages_MASK				0x0000000f
+-#define hdr_pages_WORD				word11
+-#define hdr_size_SHIFT				8
+-#define hdr_size_MASK				0x0000000f
+-#define hdr_size_WORD				word11
+-#define hdr_pp_align_SHIFT			16
+-#define hdr_pp_align_MASK			0x0000ffff
+-#define hdr_pp_align_WORD			word11
+-	uint32_t word12;
+-#define sgl_pages_SHIFT				0
+-#define sgl_pages_MASK				0x0000000f
+-#define sgl_pages_WORD				word12
+-#define sgl_pp_align_SHIFT			16
+-#define sgl_pp_align_MASK			0x0000ffff
+-#define sgl_pp_align_WORD			word12
+-	uint32_t rsvd_13_63[51];
+-};
+ #define SLI4_PAGE_ALIGN(addr) (((addr)+((SLI4_PAGE_SIZE)-1)) \
+ 			       &(~((SLI4_PAGE_SIZE)-1)))
+ 
+@@ -3994,8 +3824,6 @@ struct lpfc_mqe {
+ 		struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
+ 		struct lpfc_mbx_query_fw_config query_fw_cfg;
+ 		struct lpfc_mbx_set_beacon_config beacon_config;
+-		struct lpfc_mbx_supp_pages supp_pages;
+-		struct lpfc_mbx_pc_sli4_params sli4_params;
+ 		struct lpfc_mbx_get_sli4_parameters get_sli4_parameters;
+ 		struct lpfc_mbx_set_link_diag_state link_diag_state;
+ 		struct lpfc_mbx_set_link_diag_loopback link_diag_loopback;
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index 71f340dd4fbdc..a67051ba3f127 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -6573,8 +6573,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
+ 	LPFC_MBOXQ_t *mboxq;
+ 	MAILBOX_t *mb;
+ 	int rc, i, max_buf_size;
+-	uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
+-	struct lpfc_mqe *mqe;
+ 	int longs;
+ 	int extra;
+ 	uint64_t wwn;
+@@ -6808,32 +6806,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
+ 
+ 	lpfc_nvme_mod_param_dep(phba);
+ 
+-	/* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
+-	lpfc_supported_pages(mboxq);
+-	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+-	if (!rc) {
+-		mqe = &mboxq->u.mqe;
+-		memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
+-		       LPFC_MAX_SUPPORTED_PAGES);
+-		for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
+-			switch (pn_page[i]) {
+-			case LPFC_SLI4_PARAMETERS:
+-				phba->sli4_hba.pc_sli4_params.supported = 1;
+-				break;
+-			default:
+-				break;
+-			}
+-		}
+-		/* Read the port's SLI4 Parameters capabilities if supported. */
+-		if (phba->sli4_hba.pc_sli4_params.supported)
+-			rc = lpfc_pc_sli4_params_get(phba, mboxq);
+-		if (rc) {
+-			mempool_free(mboxq, phba->mbox_mem_pool);
+-			rc = -EIO;
+-			goto out_free_bsmbx;
+-		}
+-	}
+-
+ 	/*
+ 	 * Get sli4 parameters that override parameters from Port capabilities.
+ 	 * If this call fails, it isn't critical unless the SLI4 parameters come
+@@ -9660,8 +9632,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
+ 				"3250 QUERY_FW_CFG mailbox failed with status "
+ 				"x%x add_status x%x, mbx status x%x\n",
+ 				shdr_status, shdr_add_status, rc);
+-		if (rc != MBX_TIMEOUT)
+-			mempool_free(mboxq, phba->mbox_mem_pool);
++		mempool_free(mboxq, phba->mbox_mem_pool);
+ 		rc = -ENXIO;
+ 		goto out_error;
+ 	}
+@@ -9677,8 +9648,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
+ 			"ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
+ 			phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
+ 
+-	if (rc != MBX_TIMEOUT)
+-		mempool_free(mboxq, phba->mbox_mem_pool);
++	mempool_free(mboxq, phba->mbox_mem_pool);
+ 
+ 	/*
+ 	 * Set up HBA Event Queues (EQs)
+@@ -10276,8 +10246,7 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
+ 		shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ 		shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+ 					 &shdr->response);
+-		if (rc != MBX_TIMEOUT)
+-			mempool_free(mboxq, phba->mbox_mem_pool);
++		mempool_free(mboxq, phba->mbox_mem_pool);
+ 		if (shdr_status || shdr_add_status || rc) {
+ 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ 					"0495 SLI_FUNCTION_RESET mailbox "
+@@ -12075,78 +12044,6 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
+ 		phba->pport->work_port_events = 0;
+ }
+ 
+- /**
+- * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
+- * @phba: Pointer to HBA context object.
+- * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
+- *
+- * This function is called in the SLI4 code path to read the port's
+- * sli4 capabilities.
+- *
+- * This function may be be called from any context that can block-wait
+- * for the completion.  The expectation is that this routine is called
+- * typically from probe_one or from the online routine.
+- **/
+-int
+-lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+-{
+-	int rc;
+-	struct lpfc_mqe *mqe;
+-	struct lpfc_pc_sli4_params *sli4_params;
+-	uint32_t mbox_tmo;
+-
+-	rc = 0;
+-	mqe = &mboxq->u.mqe;
+-
+-	/* Read the port's SLI4 Parameters port capabilities */
+-	lpfc_pc_sli4_params(mboxq);
+-	if (!phba->sli4_hba.intr_enable)
+-		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+-	else {
+-		mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
+-		rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
+-	}
+-
+-	if (unlikely(rc))
+-		return 1;
+-
+-	sli4_params = &phba->sli4_hba.pc_sli4_params;
+-	sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
+-	sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
+-	sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
+-	sli4_params->featurelevel_1 = bf_get(featurelevel_1,
+-					     &mqe->un.sli4_params);
+-	sli4_params->featurelevel_2 = bf_get(featurelevel_2,
+-					     &mqe->un.sli4_params);
+-	sli4_params->proto_types = mqe->un.sli4_params.word3;
+-	sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
+-	sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
+-	sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
+-	sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
+-	sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
+-	sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
+-	sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
+-	sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
+-	sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
+-	sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
+-	sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
+-	sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
+-	sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
+-	sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
+-	sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
+-	sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
+-	sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
+-	sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
+-	sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
+-	sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
+-
+-	/* Make sure that sge_supp_len can be handled by the driver */
+-	if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
+-		sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
+-
+-	return rc;
+-}
+-
+ /**
+  * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
+  * @phba: Pointer to HBA context object.
+@@ -12205,7 +12102,8 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+ 	else
+ 		phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
+ 	sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
+-	sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
++	sli4_params->loopbk_scope = bf_get(cfg_loopbk_scope,
++					   mbx_sli4_parameters);
+ 	sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
+ 	sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
+ 	sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
+diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
+index c03a7f12dd65b..72dd22ad5dccf 100644
+--- a/drivers/scsi/lpfc/lpfc_mbox.c
++++ b/drivers/scsi/lpfc/lpfc_mbox.c
+@@ -2624,39 +2624,3 @@ lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
+ 	resume_rpi->event_tag = ndlp->phba->fc_eventTag;
+ }
+ 
+-/**
+- * lpfc_supported_pages - Initialize the PORT_CAPABILITIES supported pages
+- *                        mailbox command.
+- * @mbox: pointer to lpfc mbox command to initialize.
+- *
+- * The PORT_CAPABILITIES supported pages mailbox command is issued to
+- * retrieve the particular feature pages supported by the port.
+- **/
+-void
+-lpfc_supported_pages(struct lpfcMboxq *mbox)
+-{
+-	struct lpfc_mbx_supp_pages *supp_pages;
+-
+-	memset(mbox, 0, sizeof(*mbox));
+-	supp_pages = &mbox->u.mqe.un.supp_pages;
+-	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
+-	bf_set(cpn, supp_pages, LPFC_SUPP_PAGES);
+-}
+-
+-/**
+- * lpfc_pc_sli4_params - Initialize the PORT_CAPABILITIES SLI4 Params mbox cmd.
+- * @mbox: pointer to lpfc mbox command to initialize.
+- *
+- * The PORT_CAPABILITIES SLI4 parameters mailbox command is issued to
+- * retrieve the particular SLI4 features supported by the port.
+- **/
+-void
+-lpfc_pc_sli4_params(struct lpfcMboxq *mbox)
+-{
+-	struct lpfc_mbx_pc_sli4_params *sli4_params;
+-
+-	memset(mbox, 0, sizeof(*mbox));
+-	sli4_params = &mbox->u.mqe.un.sli4_params;
+-	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
+-	bf_set(cpn, sli4_params, LPFC_SLI4_PARAMETERS);
+-}
+diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
+index 135d8e8a42ba2..9f05f5e329c6b 100644
+--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
++++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
+@@ -279,106 +279,43 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+ 	lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
+ }
+ 
+-/* lpfc_defer_pt2pt_acc - Complete SLI3 pt2pt processing on link up
++/* lpfc_defer_plogi_acc - Issue PLOGI ACC after reg_login completes
+  * @phba: pointer to lpfc hba data structure.
+- * @link_mbox: pointer to CONFIG_LINK mailbox object
++ * @login_mbox: pointer to REG_RPI mailbox object
+  *
+- * This routine is only called if we are SLI3, direct connect pt2pt
+- * mode and the remote NPort issues the PLOGI after link up.
++ * The ACC for a rcv'ed PLOGI is deferred until AFTER the REG_RPI completes
+  */
+ static void
+-lpfc_defer_pt2pt_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *link_mbox)
++lpfc_defer_plogi_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *login_mbox)
+ {
+-	LPFC_MBOXQ_t *login_mbox;
+-	MAILBOX_t *mb = &link_mbox->u.mb;
+ 	struct lpfc_iocbq *save_iocb;
+ 	struct lpfc_nodelist *ndlp;
++	MAILBOX_t *mb = &login_mbox->u.mb;
++
+ 	int rc;
+ 
+-	ndlp = link_mbox->ctx_ndlp;
+-	login_mbox = link_mbox->context3;
++	ndlp = login_mbox->ctx_ndlp;
+ 	save_iocb = login_mbox->context3;
+-	link_mbox->context3 = NULL;
+-	login_mbox->context3 = NULL;
+-
+-	/* Check for CONFIG_LINK error */
+-	if (mb->mbxStatus) {
+-		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+-				"4575 CONFIG_LINK fails pt2pt discovery: %x\n",
+-				mb->mbxStatus);
+-		mempool_free(login_mbox, phba->mbox_mem_pool);
+-		mempool_free(link_mbox, phba->mbox_mem_pool);
+-		kfree(save_iocb);
+-		return;
+-	}
+ 
+-	/* Now that CONFIG_LINK completed, and our SID is configured,
+-	 * we can now proceed with sending the PLOGI ACC.
+-	 */
+-	rc = lpfc_els_rsp_acc(link_mbox->vport, ELS_CMD_PLOGI,
+-			      save_iocb, ndlp, login_mbox);
+-	if (rc) {
+-		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+-				"4576 PLOGI ACC fails pt2pt discovery: %x\n",
+-				rc);
+-		mempool_free(login_mbox, phba->mbox_mem_pool);
++	if (mb->mbxStatus == MBX_SUCCESS) {
++		/* Now that REG_RPI completed successfully,
++		 * we can now proceed with sending the PLOGI ACC.
++		 */
++		rc = lpfc_els_rsp_acc(login_mbox->vport, ELS_CMD_PLOGI,
++				      save_iocb, ndlp, NULL);
++		if (rc) {
++			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
++					"4576 PLOGI ACC fails pt2pt discovery: "
++					"DID %x Data: %x\n", ndlp->nlp_DID, rc);
++		}
+ 	}
+ 
+-	mempool_free(link_mbox, phba->mbox_mem_pool);
++	/* Now process the REG_RPI cmpl */
++	lpfc_mbx_cmpl_reg_login(phba, login_mbox);
++	ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
+ 	kfree(save_iocb);
+ }
+ 
+-/**
+- * lpfc_defer_tgt_acc - Progress SLI4 target rcv PLOGI handler
+- * @phba: Pointer to HBA context object.
+- * @pmb: Pointer to mailbox object.
+- *
+- * This function provides the unreg rpi mailbox completion handler for a tgt.
+- * The routine frees the memory resources associated with the completed
+- * mailbox command and transmits the ELS ACC.
+- *
+- * This routine is only called if we are SLI4, acting in target
+- * mode and the remote NPort issues the PLOGI after link up.
+- **/
+-static void
+-lpfc_defer_acc_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+-{
+-	struct lpfc_vport *vport = pmb->vport;
+-	struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
+-	LPFC_MBOXQ_t *mbox = pmb->context3;
+-	struct lpfc_iocbq *piocb = NULL;
+-	int rc;
+-
+-	if (mbox) {
+-		pmb->context3 = NULL;
+-		piocb = mbox->context3;
+-		mbox->context3 = NULL;
+-	}
+-
+-	/*
+-	 * Complete the unreg rpi mbx request, and update flags.
+-	 * This will also restart any deferred events.
+-	 */
+-	lpfc_sli4_unreg_rpi_cmpl_clr(phba, pmb);
+-
+-	if (!piocb) {
+-		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+-				 "4578 PLOGI ACC fail\n");
+-		if (mbox)
+-			mempool_free(mbox, phba->mbox_mem_pool);
+-		return;
+-	}
+-
+-	rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, piocb, ndlp, mbox);
+-	if (rc) {
+-		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+-				 "4579 PLOGI ACC fail %x\n", rc);
+-		if (mbox)
+-			mempool_free(mbox, phba->mbox_mem_pool);
+-	}
+-	kfree(piocb);
+-}
+-
+ static int
+ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 	       struct lpfc_iocbq *cmdiocb)
+@@ -395,8 +332,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 	struct lpfc_iocbq *save_iocb;
+ 	struct ls_rjt stat;
+ 	uint32_t vid, flag;
+-	u16 rpi;
+-	int rc, defer_acc;
++	int rc;
+ 
+ 	memset(&stat, 0, sizeof (struct ls_rjt));
+ 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+@@ -445,7 +381,6 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 	else
+ 		ndlp->nlp_fcp_info |= CLASS3;
+ 
+-	defer_acc = 0;
+ 	ndlp->nlp_class_sup = 0;
+ 	if (sp->cls1.classValid)
+ 		ndlp->nlp_class_sup |= FC_COS_CLASS1;
+@@ -539,27 +474,26 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 
+ 		memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
+ 
+-		/* Issue config_link / reg_vfi to account for updated TOV's */
+-
++		/* Issue CONFIG_LINK for SLI3 or REG_VFI for SLI4,
++		 * to account for updated TOV's / parameters
++		 */
+ 		if (phba->sli_rev == LPFC_SLI_REV4)
+ 			lpfc_issue_reg_vfi(vport);
+ 		else {
+-			defer_acc = 1;
+ 			link_mbox = mempool_alloc(phba->mbox_mem_pool,
+ 						  GFP_KERNEL);
+ 			if (!link_mbox)
+ 				goto out;
+ 			lpfc_config_link(phba, link_mbox);
+-			link_mbox->mbox_cmpl = lpfc_defer_pt2pt_acc;
++			link_mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ 			link_mbox->vport = vport;
+ 			link_mbox->ctx_ndlp = ndlp;
+ 
+-			save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
+-			if (!save_iocb)
++			rc = lpfc_sli_issue_mbox(phba, link_mbox, MBX_NOWAIT);
++			if (rc == MBX_NOT_FINISHED) {
++				mempool_free(link_mbox, phba->mbox_mem_pool);
+ 				goto out;
+-			/* Save info from cmd IOCB used in rsp */
+-			memcpy((uint8_t *)save_iocb, (uint8_t *)cmdiocb,
+-			       sizeof(struct lpfc_iocbq));
++			}
+ 		}
+ 
+ 		lpfc_can_disctmo(vport);
+@@ -578,59 +512,28 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 	if (!login_mbox)
+ 		goto out;
+ 
+-	/* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
+-	if (phba->nvmet_support && !defer_acc) {
+-		link_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+-		if (!link_mbox)
+-			goto out;
+-
+-		/* As unique identifiers such as iotag would be overwritten
+-		 * with those from the cmdiocb, allocate separate temporary
+-		 * storage for the copy.
+-		 */
+-		save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
+-		if (!save_iocb)
+-			goto out;
+-
+-		/* Unreg RPI is required for SLI4. */
+-		rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+-		lpfc_unreg_login(phba, vport->vpi, rpi, link_mbox);
+-		link_mbox->vport = vport;
+-		link_mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
+-		if (!link_mbox->ctx_ndlp)
+-			goto out;
+-
+-		link_mbox->mbox_cmpl = lpfc_defer_acc_rsp;
+-
+-		if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
+-		    (!(vport->fc_flag & FC_OFFLINE_MODE)))
+-			ndlp->nlp_flag |= NLP_UNREG_INP;
++	save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
++	if (!save_iocb)
++		goto out;
+ 
+-		/* Save info from cmd IOCB used in rsp */
+-		memcpy(save_iocb, cmdiocb, sizeof(*save_iocb));
++	/* Save info from cmd IOCB to be used in rsp after all mbox completes */
++	memcpy((uint8_t *)save_iocb, (uint8_t *)cmdiocb,
++	       sizeof(struct lpfc_iocbq));
+ 
+-		/* Delay sending ACC till unreg RPI completes. */
+-		defer_acc = 1;
+-	} else if (phba->sli_rev == LPFC_SLI_REV4)
++	/* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
++	if (phba->sli_rev == LPFC_SLI_REV4)
+ 		lpfc_unreg_rpi(vport, ndlp);
+ 
++	/* Issue REG_LOGIN first, before ACCing the PLOGI, thus we will
++	 * always be deferring the ACC.
++	 */
+ 	rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
+ 			    (uint8_t *)sp, login_mbox, ndlp->nlp_rpi);
+ 	if (rc)
+ 		goto out;
+ 
+-	/* ACC PLOGI rsp command needs to execute first,
+-	 * queue this login_mbox command to be processed later.
+-	 */
+ 	login_mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
+-	/*
+-	 * login_mbox->ctx_ndlp = lpfc_nlp_get(ndlp) deferred until mailbox
+-	 * command issued in lpfc_cmpl_els_acc().
+-	 */
+ 	login_mbox->vport = vport;
+-	spin_lock_irq(&ndlp->lock);
+-	ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
+-	spin_unlock_irq(&ndlp->lock);
+ 
+ 	/*
+ 	 * If there is an outstanding PLOGI issued, abort it before
+@@ -660,7 +563,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 		 * to register, then unregister the RPI.
+ 		 */
+ 		spin_lock_irq(&ndlp->lock);
+-		ndlp->nlp_flag |= NLP_RM_DFLT_RPI;
++		ndlp->nlp_flag |= (NLP_RM_DFLT_RPI | NLP_ACC_REGLOGIN |
++				   NLP_RCV_PLOGI);
+ 		spin_unlock_irq(&ndlp->lock);
+ 		stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
+ 		stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+@@ -670,42 +574,39 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 			mempool_free(login_mbox, phba->mbox_mem_pool);
+ 		return 1;
+ 	}
+-	if (defer_acc) {
+-		/* So the order here should be:
+-		 * SLI3 pt2pt
+-		 *   Issue CONFIG_LINK mbox
+-		 *   CONFIG_LINK cmpl
+-		 * SLI4 tgt
+-		 *   Issue UNREG RPI mbx
+-		 *   UNREG RPI cmpl
+-		 * Issue PLOGI ACC
+-		 * PLOGI ACC cmpl
+-		 * Issue REG_LOGIN mbox
+-		 */
+ 
+-		/* Save the REG_LOGIN mbox for and rcv IOCB copy later */
+-		link_mbox->context3 = login_mbox;
+-		login_mbox->context3 = save_iocb;
++	/* So the order here should be:
++	 * SLI3 pt2pt
++	 *   Issue CONFIG_LINK mbox
++	 *   CONFIG_LINK cmpl
++	 * SLI4 pt2pt
++	 *   Issue REG_VFI mbox
++	 *   REG_VFI cmpl
++	 * SLI4
++	 *   Issue UNREG RPI mbx
++	 *   UNREG RPI cmpl
++	 * Issue REG_RPI mbox
++	 * REG RPI cmpl
++	 * Issue PLOGI ACC
++	 * PLOGI ACC cmpl
++	 */
++	login_mbox->mbox_cmpl = lpfc_defer_plogi_acc;
++	login_mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
++	login_mbox->context3 = save_iocb; /* For PLOGI ACC */
+ 
+-		/* Start the ball rolling by issuing CONFIG_LINK here */
+-		rc = lpfc_sli_issue_mbox(phba, link_mbox, MBX_NOWAIT);
+-		if (rc == MBX_NOT_FINISHED)
+-			goto out;
+-		return 1;
+-	}
++	spin_lock_irq(&ndlp->lock);
++	ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
++	spin_unlock_irq(&ndlp->lock);
++
++	/* Start the ball rolling by issuing REG_LOGIN here */
++	rc = lpfc_sli_issue_mbox(phba, login_mbox, MBX_NOWAIT);
++	if (rc == MBX_NOT_FINISHED)
++		goto out;
++	lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
+ 
+-	rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, login_mbox);
+-	if (rc)
+-		mempool_free(login_mbox, phba->mbox_mem_pool);
+ 	return 1;
+ out:
+-	if (defer_acc)
+-		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+-				"4577 discovery failure: %p %p %p\n",
+-				save_iocb, link_mbox, login_mbox);
+ 	kfree(save_iocb);
+-	if (link_mbox)
+-		mempool_free(link_mbox, phba->mbox_mem_pool);
+ 	if (login_mbox)
+ 		mempool_free(login_mbox, phba->mbox_mem_pool);
+ 
+@@ -913,9 +814,14 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 		}
+ 	} else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
+ 		((ndlp->nlp_type & NLP_FCP_TARGET) ||
+-		!(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
++		(ndlp->nlp_type & NLP_NVME_TARGET) ||
++		(vport->fc_flag & FC_PT2PT))) ||
+ 		(ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
+-		/* Only try to re-login if this is NOT a Fabric Node */
++		/* Only try to re-login if this is NOT a Fabric Node
++		 * AND the remote NPORT is a FCP/NVME Target or we
++		 * are in pt2pt mode. NLP_STE_ADISC_ISSUE is a special
++		 * case for LOGO as a response to ADISC behavior.
++		 */
+ 		mod_timer(&ndlp->nlp_delayfunc,
+ 			  jiffies + msecs_to_jiffies(1000 * 1));
+ 		spin_lock_irq(&ndlp->lock);
+@@ -1985,8 +1891,6 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
+ 		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
+ 
+ 		lpfc_issue_els_logo(vport, ndlp, 0);
+-		ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
+-		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ 		return ndlp->nlp_state;
+ 	}
+ 
+@@ -2633,12 +2537,10 @@ static uint32_t
+ lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 			  void *arg, uint32_t evt)
+ {
+-	struct lpfc_hba  *phba = vport->phba;
+ 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+ 
+ 	/* flush the target */
+-	lpfc_sli_abort_iocb(vport, &phba->sli.sli3_ring[LPFC_FCP_RING],
+-			    ndlp->nlp_sid, 0, LPFC_CTX_TGT);
++	lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
+ 
+ 	/* Treat like rcv logo */
+ 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
+diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
+index bb2a4a0d12959..a3fd959f7431c 100644
+--- a/drivers/scsi/lpfc/lpfc_nvmet.c
++++ b/drivers/scsi/lpfc/lpfc_nvmet.c
+@@ -3304,7 +3304,6 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
+ 	bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
+ 
+ 	/* Word 10 */
+-	bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1);
+ 	bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
+ 	bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
+ 	       LPFC_WQE_LENLOC_WORD12);
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index fa1a714a78f09..bd31feb3d5e18 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -5683,12 +5683,10 @@ lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
+ 			phba->sli4_hba.lnk_info.lnk_no,
+ 			phba->BIOSVersion);
+ out_free_mboxq:
+-	if (rc != MBX_TIMEOUT) {
+-		if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
+-			lpfc_sli4_mbox_cmd_free(phba, mboxq);
+-		else
+-			mempool_free(mboxq, phba->mbox_mem_pool);
+-	}
++	if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
++		lpfc_sli4_mbox_cmd_free(phba, mboxq);
++	else
++		mempool_free(mboxq, phba->mbox_mem_pool);
+ 	return rc;
+ }
+ 
+@@ -5789,12 +5787,10 @@ retrieve_ppname:
+ 	}
+ 
+ out_free_mboxq:
+-	if (rc != MBX_TIMEOUT) {
+-		if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
+-			lpfc_sli4_mbox_cmd_free(phba, mboxq);
+-		else
+-			mempool_free(mboxq, phba->mbox_mem_pool);
+-	}
++	if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
++		lpfc_sli4_mbox_cmd_free(phba, mboxq);
++	else
++		mempool_free(mboxq, phba->mbox_mem_pool);
+ 	return rc;
+ }
+ 
+@@ -11647,7 +11643,7 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ 	icmd = &cmdiocb->iocb;
+ 	if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
+ 	    icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
+-	    (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
++	    cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED)
+ 		return IOCB_ABORTING;
+ 
+ 	if (!pring) {
+@@ -11945,7 +11941,6 @@ lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ /**
+  * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
+  * @vport: Pointer to virtual port.
+- * @pring: Pointer to driver SLI ring object.
+  * @tgt_id: SCSI ID of the target.
+  * @lun_id: LUN ID of the scsi device.
+  * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
+@@ -11960,18 +11955,22 @@ lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+  * FCP iocbs associated with SCSI target specified by tgt_id parameter.
+  * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
+  * FCP iocbs associated with virtual port.
++ * The pring used for SLI3 is sli3_ring[LPFC_FCP_RING], for SLI4
++ * lpfc_sli4_calc_ring is used.
+  * This function returns number of iocbs it failed to abort.
+  * This function is called with no locks held.
+  **/
+ int
+-lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
+-		    uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
++lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
++		    lpfc_ctx_cmd abort_cmd)
+ {
+ 	struct lpfc_hba *phba = vport->phba;
++	struct lpfc_sli_ring *pring = NULL;
+ 	struct lpfc_iocbq *iocbq;
+ 	int errcnt = 0, ret_val = 0;
+ 	unsigned long iflags;
+ 	int i;
++	void *fcp_cmpl = NULL;
+ 
+ 	/* all I/Os are in process of being flushed */
+ 	if (phba->hba_flag & HBA_IOQ_FLUSH)
+@@ -11985,8 +11984,15 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
+ 			continue;
+ 
+ 		spin_lock_irqsave(&phba->hbalock, iflags);
++		if (phba->sli_rev == LPFC_SLI_REV3) {
++			pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
++			fcp_cmpl = lpfc_sli_abort_fcp_cmpl;
++		} else if (phba->sli_rev == LPFC_SLI_REV4) {
++			pring = lpfc_sli4_calc_ring(phba, iocbq);
++			fcp_cmpl = lpfc_sli4_abort_fcp_cmpl;
++		}
+ 		ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocbq,
+-						     lpfc_sli_abort_fcp_cmpl);
++						     fcp_cmpl);
+ 		spin_unlock_irqrestore(&phba->hbalock, iflags);
+ 		if (ret_val != IOCB_SUCCESS)
+ 			errcnt++;
+@@ -17072,8 +17078,7 @@ lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
+ 				"2509 RQ_DESTROY mailbox failed with "
+ 				"status x%x add_status x%x, mbx status x%x\n",
+ 				shdr_status, shdr_add_status, rc);
+-		if (rc != MBX_TIMEOUT)
+-			mempool_free(mbox, hrq->phba->mbox_mem_pool);
++		mempool_free(mbox, hrq->phba->mbox_mem_pool);
+ 		return -ENXIO;
+ 	}
+ 	bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
+@@ -17170,7 +17175,9 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
+ 	shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
+ 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+-	if (rc != MBX_TIMEOUT)
++	if (!phba->sli4_hba.intr_enable)
++		mempool_free(mbox, phba->mbox_mem_pool);
++	else if (rc != MBX_TIMEOUT)
+ 		mempool_free(mbox, phba->mbox_mem_pool);
+ 	if (shdr_status || shdr_add_status || rc) {
+ 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+@@ -17367,7 +17374,9 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
+ 	shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
+ 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+-	if (rc != MBX_TIMEOUT)
++	if (!phba->sli4_hba.intr_enable)
++		lpfc_sli4_mbox_cmd_free(phba, mbox);
++	else if (rc != MBX_TIMEOUT)
+ 		lpfc_sli4_mbox_cmd_free(phba, mbox);
+ 	if (shdr_status || shdr_add_status || rc) {
+ 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+@@ -17480,7 +17489,9 @@ lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
+ 	shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
+ 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+-	if (rc != MBX_TIMEOUT)
++	if (!phba->sli4_hba.intr_enable)
++		lpfc_sli4_mbox_cmd_free(phba, mbox);
++	else if (rc != MBX_TIMEOUT)
+ 		lpfc_sli4_mbox_cmd_free(phba, mbox);
+ 	if (shdr_status || shdr_add_status || rc) {
+ 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+@@ -18064,7 +18075,6 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
+ 	if (cmd_iocbq) {
+ 		ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
+ 		lpfc_nlp_put(ndlp);
+-		lpfc_nlp_not_used(ndlp);
+ 		lpfc_sli_release_iocbq(phba, cmd_iocbq);
+ 	}
+ 
+@@ -18831,8 +18841,7 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
+ 	shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
+ 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+-	if (rc != MBX_TIMEOUT)
+-		mempool_free(mboxq, phba->mbox_mem_pool);
++	mempool_free(mboxq, phba->mbox_mem_pool);
+ 	if (shdr_status || shdr_add_status || rc) {
+ 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ 				"2514 POST_RPI_HDR mailbox failed with "
+@@ -20076,7 +20085,9 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
+ 			break;
+ 		}
+ 	}
+-	if (rc != MBX_TIMEOUT)
++	if (!phba->sli4_hba.intr_enable)
++		mempool_free(mbox, phba->mbox_mem_pool);
++	else if (rc != MBX_TIMEOUT)
+ 		mempool_free(mbox, phba->mbox_mem_pool);
+ 	if (shdr_status || shdr_add_status || rc) {
+ 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
+index ac0eef975f179..b6beacfd0f628 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
+@@ -7252,6 +7252,8 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
+ 
+ 	ioc_info(ioc, "sending diag reset !!\n");
+ 
++	pci_cfg_access_lock(ioc->pdev);
++
+ 	drsprintk(ioc, ioc_info(ioc, "clear interrupts\n"));
+ 
+ 	count = 0;
+@@ -7342,10 +7344,12 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
+ 		goto out;
+ 	}
+ 
++	pci_cfg_access_unlock(ioc->pdev);
+ 	ioc_info(ioc, "diag reset: SUCCESS\n");
+ 	return 0;
+ 
+  out:
++	pci_cfg_access_unlock(ioc->pdev);
+ 	ioc_err(ioc, "diag reset: FAILED\n");
+ 	return -EFAULT;
+ }
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+index 44f9a05db94e1..2ec11be62a827 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+@@ -2507,7 +2507,7 @@ _ctl_addnl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+ 		    __func__, karg.unique_id);
+ 		return -EPERM;
+ 	}
+-	memset(&karg.buffer_rel_condition, 0, sizeof(struct htb_rel_query));
++	memset(&karg.rel_query, 0, sizeof(karg.rel_query));
+ 	if ((ioc->diag_buffer_status[buffer_type] &
+ 	    MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ 		ioc_info(ioc, "%s: buffer_type(0x%02x) is not registered\n",
+@@ -2520,8 +2520,7 @@ _ctl_addnl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+ 		    __func__, buffer_type);
+ 		return -EPERM;
+ 	}
+-	memcpy(&karg.buffer_rel_condition, &ioc->htb_rel,
+-	    sizeof(struct  htb_rel_query));
++	memcpy(&karg.rel_query, &ioc->htb_rel, sizeof(karg.rel_query));
+ out:
+ 	if (copy_to_user(arg, &karg, sizeof(struct mpt3_addnl_diag_query))) {
+ 		ioc_err(ioc, "%s: unable to write mpt3_addnl_diag_query data @ %p\n",
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
+index d2ccdafb8df2a..8f6ffb40261c9 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.h
++++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
+@@ -50,6 +50,8 @@
+ #include <linux/miscdevice.h>
+ #endif
+ 
++#include "mpt3sas_base.h"
++
+ #ifndef MPT2SAS_MINOR
+ #define MPT2SAS_MINOR		(MPT_MINOR + 1)
+ #endif
+@@ -436,19 +438,13 @@ struct mpt3_diag_read_buffer {
+  * struct mpt3_addnl_diag_query - diagnostic buffer release reason
+  * @hdr - generic header
+  * @unique_id - unique id associated with this buffer.
+- * @buffer_rel_condition - Release condition ioctl/sysfs/reset
+- * @reserved1
+- * @trigger_type - Master/Event/scsi/MPI
+- * @trigger_info_dwords - Data Correspondig to trigger type
++ * @rel_query - release query.
+  * @reserved2
+  */
+ struct mpt3_addnl_diag_query {
+ 	struct mpt3_ioctl_header hdr;
+ 	uint32_t unique_id;
+-	uint16_t buffer_rel_condition;
+-	uint16_t reserved1;
+-	uint32_t trigger_type;
+-	uint32_t trigger_info_dwords[2];
++	struct htb_rel_query rel_query;
+ 	uint32_t reserved2[2];
+ };
+ 
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+index 6aa6de7291876..ae1973878cc7d 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+@@ -6483,6 +6483,9 @@ _scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num)
+ 		if (!vphy)
+ 			return NULL;
+ 
++		if (!port->vphys_mask)
++			INIT_LIST_HEAD(&port->vphys_list);
++
+ 		/*
+ 		 * Enable bit corresponding to HBA phy number on its
+ 		 * parent hba_port object's vphys_mask field.
+@@ -6490,7 +6493,6 @@ _scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num)
+ 		port->vphys_mask |= (1 << phy_num);
+ 		vphy->phy_mask |= (1 << phy_num);
+ 
+-		INIT_LIST_HEAD(&port->vphys_list);
+ 		list_add_tail(&vphy->list, &port->vphys_list);
+ 
+ 		ioc_info(ioc,
+diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
+index 63391c9be05dd..3aa9869f6fae2 100644
+--- a/drivers/scsi/qla2xxx/qla_attr.c
++++ b/drivers/scsi/qla2xxx/qla_attr.c
+@@ -2864,6 +2864,8 @@ qla2x00_reset_host_stats(struct Scsi_Host *shost)
+ 	vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
+ 
+ 	if (IS_FWI2_CAPABLE(ha)) {
++		int rval;
++
+ 		stats = dma_alloc_coherent(&ha->pdev->dev,
+ 		    sizeof(*stats), &stats_dma, GFP_KERNEL);
+ 		if (!stats) {
+@@ -2873,7 +2875,11 @@ qla2x00_reset_host_stats(struct Scsi_Host *shost)
+ 		}
+ 
+ 		/* reset firmware statistics */
+-		qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0);
++		rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0);
++		if (rval != QLA_SUCCESS)
++			ql_log(ql_log_warn, vha, 0x70de,
++			       "Resetting ISP statistics failed: rval = %d\n",
++			       rval);
+ 
+ 		dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
+ 		    stats, stats_dma);
+diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
+index bee8cf9f8123c..d021e51344f57 100644
+--- a/drivers/scsi/qla2xxx/qla_bsg.c
++++ b/drivers/scsi/qla2xxx/qla_bsg.c
+@@ -25,10 +25,11 @@ void qla2x00_bsg_job_done(srb_t *sp, int res)
+ 	struct bsg_job *bsg_job = sp->u.bsg_job;
+ 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ 
++	sp->free(sp);
++
+ 	bsg_reply->result = res;
+ 	bsg_job_done(bsg_job, bsg_reply->result,
+ 		       bsg_reply->reply_payload_rcv_len);
+-	sp->free(sp);
+ }
+ 
+ void qla2x00_bsg_sp_free(srb_t *sp)
+diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
+index 5e188375c8713..af4831c9edf9a 100644
+--- a/drivers/scsi/qla2xxx/qla_isr.c
++++ b/drivers/scsi/qla2xxx/qla_isr.c
+@@ -4005,11 +4005,11 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
+ 	if (USER_CTRL_IRQ(ha) || !ha->mqiobase) {
+ 		/* user wants to control IRQ setting for target mode */
+ 		ret = pci_alloc_irq_vectors(ha->pdev, min_vecs,
+-		    min((u16)ha->msix_count, (u16)num_online_cpus()),
++		    min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)),
+ 		    PCI_IRQ_MSIX);
+ 	} else
+ 		ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs,
+-		    min((u16)ha->msix_count, (u16)num_online_cpus()),
++		    min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)),
+ 		    PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
+ 		    &desc);
+ 
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index 074392560f3d1..0e07b98dfae8e 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -1013,8 +1013,6 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
+ 	if (rval != QLA_SUCCESS) {
+ 		ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078,
+ 		    "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
+-		if (rval == QLA_INTERFACE_ERROR)
+-			goto qc24_free_sp_fail_command;
+ 		goto qc24_host_busy_free_sp;
+ 	}
+ 
+@@ -1026,11 +1024,6 @@ qc24_host_busy_free_sp:
+ qc24_target_busy:
+ 	return SCSI_MLQUEUE_TARGET_BUSY;
+ 
+-qc24_free_sp_fail_command:
+-	sp->free(sp);
+-	CMD_SP(cmd) = NULL;
+-	qla2xxx_rel_qpair_sp(sp->qpair, sp);
+-
+ qc24_fail_command:
+ 	cmd->scsi_done(cmd);
+ 
+diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
+index a1dacb6e993e7..c30f6047410fc 100644
+--- a/drivers/scsi/smartpqi/smartpqi_init.c
++++ b/drivers/scsi/smartpqi/smartpqi_init.c
+@@ -5488,6 +5488,8 @@ static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
+ 
+ 				list_del(&io_request->request_list_entry);
+ 				set_host_byte(scmd, DID_RESET);
++				pqi_free_io_request(io_request);
++				scsi_dma_unmap(scmd);
+ 				pqi_scsi_done(scmd);
+ 			}
+ 
+@@ -5524,6 +5526,8 @@ static void pqi_fail_io_queued_for_all_devices(struct pqi_ctrl_info *ctrl_info)
+ 
+ 				list_del(&io_request->request_list_entry);
+ 				set_host_byte(scmd, DID_RESET);
++				pqi_free_io_request(io_request);
++				scsi_dma_unmap(scmd);
+ 				pqi_scsi_done(scmd);
+ 			}
+ 
+@@ -6598,6 +6602,7 @@ static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
+ 	shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
+ 	shost->unique_id = shost->irq;
+ 	shost->nr_hw_queues = ctrl_info->num_queue_groups;
++	shost->host_tagset = 1;
+ 	shost->hostdata[0] = (unsigned long)ctrl_info;
+ 
+ 	rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
+@@ -8216,6 +8221,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
+ 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 			       0x152d, 0x8a37)
+ 	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       0x193d, 0x8460)
++	},
+ 	{
+ 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 			       0x193d, 0x1104)
+@@ -8288,6 +8297,22 @@ static const struct pci_device_id pqi_pci_id_table[] = {
+ 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 			       0x1bd4, 0x004f)
+ 	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       0x1bd4, 0x0051)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       0x1bd4, 0x0052)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       0x1bd4, 0x0053)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       0x1bd4, 0x0054)
++	},
+ 	{
+ 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 			       0x19e5, 0xd227)
+@@ -8448,6 +8473,122 @@ static const struct pci_device_id pqi_pci_id_table[] = {
+ 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 			       PCI_VENDOR_ID_ADAPTEC2, 0x1380)
+ 	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x1400)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x1402)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x1410)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x1411)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x1412)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x1420)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x1430)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x1440)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x1441)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x1450)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x1452)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x1460)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x1461)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x1462)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x1470)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x1471)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x1472)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x1480)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x1490)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x1491)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x14a0)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x14a1)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x14b0)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x14b1)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x14c0)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x14c1)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x14d0)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x14e0)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x14f0)
++	},
+ 	{
+ 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 			       PCI_VENDOR_ID_ADVANTECH, 0x8312)
+@@ -8512,6 +8653,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
+ 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 			       PCI_VENDOR_ID_HP, 0x1001)
+ 	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_HP, 0x1002)
++	},
+ 	{
+ 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 			       PCI_VENDOR_ID_HP, 0x1100)
+@@ -8520,6 +8665,22 @@ static const struct pci_device_id pqi_pci_id_table[] = {
+ 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 			       PCI_VENDOR_ID_HP, 0x1101)
+ 	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       0x1590, 0x0294)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       0x1590, 0x02db)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       0x1590, 0x02dc)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       0x1590, 0x032e)
++	},
+ 	{
+ 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 			       0x1d8d, 0x0800)
+diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
+index df9a5ca8c99c4..0118bd986f902 100644
+--- a/drivers/soc/tegra/pmc.c
++++ b/drivers/soc/tegra/pmc.c
+@@ -317,6 +317,8 @@ struct tegra_pmc_soc {
+ 				   bool invert);
+ 	int (*irq_set_wake)(struct irq_data *data, unsigned int on);
+ 	int (*irq_set_type)(struct irq_data *data, unsigned int type);
++	int (*powergate_set)(struct tegra_pmc *pmc, unsigned int id,
++			     bool new_state);
+ 
+ 	const char * const *reset_sources;
+ 	unsigned int num_reset_sources;
+@@ -517,6 +519,63 @@ static int tegra_powergate_lookup(struct tegra_pmc *pmc, const char *name)
+ 	return -ENODEV;
+ }
+ 
++static int tegra20_powergate_set(struct tegra_pmc *pmc, unsigned int id,
++				 bool new_state)
++{
++	unsigned int retries = 100;
++	bool status;
++	int ret;
++
++	/*
++	 * As per TRM documentation, the toggle command will be dropped by PMC
++	 * if there is contention with a HW-initiated toggling (i.e. CPU core
++	 * power-gated), the command should be retried in that case.
++	 */
++	do {
++		tegra_pmc_writel(pmc, PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
++
++		/* wait for PMC to execute the command */
++		ret = readx_poll_timeout(tegra_powergate_state, id, status,
++					 status == new_state, 1, 10);
++	} while (ret == -ETIMEDOUT && retries--);
++
++	return ret;
++}
++
++static inline bool tegra_powergate_toggle_ready(struct tegra_pmc *pmc)
++{
++	return !(tegra_pmc_readl(pmc, PWRGATE_TOGGLE) & PWRGATE_TOGGLE_START);
++}
++
++static int tegra114_powergate_set(struct tegra_pmc *pmc, unsigned int id,
++				  bool new_state)
++{
++	bool status;
++	int err;
++
++	/* wait while PMC power gating is contended */
++	err = readx_poll_timeout(tegra_powergate_toggle_ready, pmc, status,
++				 status == true, 1, 100);
++	if (err)
++		return err;
++
++	tegra_pmc_writel(pmc, PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
++
++	/* wait for PMC to accept the command */
++	err = readx_poll_timeout(tegra_powergate_toggle_ready, pmc, status,
++				 status == true, 1, 100);
++	if (err)
++		return err;
++
++	/* wait for PMC to execute the command */
++	err = readx_poll_timeout(tegra_powergate_state, id, status,
++				 status == new_state, 10, 100000);
++	if (err)
++		return err;
++
++	return 0;
++}
++
+ /**
+  * tegra_powergate_set() - set the state of a partition
+  * @pmc: power management controller
+@@ -526,7 +585,6 @@ static int tegra_powergate_lookup(struct tegra_pmc *pmc, const char *name)
+ static int tegra_powergate_set(struct tegra_pmc *pmc, unsigned int id,
+ 			       bool new_state)
+ {
+-	bool status;
+ 	int err;
+ 
+ 	if (id == TEGRA_POWERGATE_3D && pmc->soc->has_gpu_clamps)
+@@ -539,10 +597,7 @@ static int tegra_powergate_set(struct tegra_pmc *pmc, unsigned int id,
+ 		return 0;
+ 	}
+ 
+-	tegra_pmc_writel(pmc, PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
+-
+-	err = readx_poll_timeout(tegra_powergate_state, id, status,
+-				 status == new_state, 10, 100000);
++	err = pmc->soc->powergate_set(pmc, id, new_state);
+ 
+ 	mutex_unlock(&pmc->powergates_lock);
+ 
+@@ -2699,6 +2754,7 @@ static const struct tegra_pmc_soc tegra20_pmc_soc = {
+ 	.regs = &tegra20_pmc_regs,
+ 	.init = tegra20_pmc_init,
+ 	.setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
++	.powergate_set = tegra20_powergate_set,
+ 	.reset_sources = NULL,
+ 	.num_reset_sources = 0,
+ 	.reset_levels = NULL,
+@@ -2757,6 +2813,7 @@ static const struct tegra_pmc_soc tegra30_pmc_soc = {
+ 	.regs = &tegra20_pmc_regs,
+ 	.init = tegra20_pmc_init,
+ 	.setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
++	.powergate_set = tegra20_powergate_set,
+ 	.reset_sources = tegra30_reset_sources,
+ 	.num_reset_sources = ARRAY_SIZE(tegra30_reset_sources),
+ 	.reset_levels = NULL,
+@@ -2811,6 +2868,7 @@ static const struct tegra_pmc_soc tegra114_pmc_soc = {
+ 	.regs = &tegra20_pmc_regs,
+ 	.init = tegra20_pmc_init,
+ 	.setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
++	.powergate_set = tegra114_powergate_set,
+ 	.reset_sources = tegra30_reset_sources,
+ 	.num_reset_sources = ARRAY_SIZE(tegra30_reset_sources),
+ 	.reset_levels = NULL,
+@@ -2925,6 +2983,7 @@ static const struct tegra_pmc_soc tegra124_pmc_soc = {
+ 	.regs = &tegra20_pmc_regs,
+ 	.init = tegra20_pmc_init,
+ 	.setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
++	.powergate_set = tegra114_powergate_set,
+ 	.reset_sources = tegra30_reset_sources,
+ 	.num_reset_sources = ARRAY_SIZE(tegra30_reset_sources),
+ 	.reset_levels = NULL,
+@@ -3048,6 +3107,7 @@ static const struct tegra_pmc_soc tegra210_pmc_soc = {
+ 	.regs = &tegra20_pmc_regs,
+ 	.init = tegra20_pmc_init,
+ 	.setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
++	.powergate_set = tegra114_powergate_set,
+ 	.irq_set_wake = tegra210_pmc_irq_set_wake,
+ 	.irq_set_type = tegra210_pmc_irq_set_type,
+ 	.reset_sources = tegra210_reset_sources,
+diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
+index d05442e646a38..57c59a33ce616 100644
+--- a/drivers/soundwire/cadence_master.c
++++ b/drivers/soundwire/cadence_master.c
+@@ -1450,10 +1450,12 @@ int sdw_cdns_clock_stop(struct sdw_cdns *cdns, bool block_wake)
+ 	}
+ 
+ 	/* Prepare slaves for clock stop */
+-	ret = sdw_bus_prep_clk_stop(&cdns->bus);
+-	if (ret < 0) {
+-		dev_err(cdns->dev, "prepare clock stop failed %d", ret);
+-		return ret;
++	if (slave_present) {
++		ret = sdw_bus_prep_clk_stop(&cdns->bus);
++		if (ret < 0 && ret != -ENODATA) {
++			dev_err(cdns->dev, "prepare clock stop failed %d\n", ret);
++			return ret;
++		}
+ 	}
+ 
+ 	/*
+diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
+index eb9a243e95265..98ace748cd986 100644
+--- a/drivers/spi/spi-ath79.c
++++ b/drivers/spi/spi-ath79.c
+@@ -156,8 +156,7 @@ static int ath79_spi_probe(struct platform_device *pdev)
+ 
+ 	master->use_gpio_descriptors = true;
+ 	master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
+-	master->setup = spi_bitbang_setup;
+-	master->cleanup = spi_bitbang_cleanup;
++	master->flags = SPI_MASTER_GPIO_SS;
+ 	if (pdata) {
+ 		master->bus_num = pdata->bus_num;
+ 		master->num_chipselect = pdata->num_chipselect;
+diff --git a/drivers/spi/spi-dln2.c b/drivers/spi/spi-dln2.c
+index 75b33d7d14b04..9a4d942fafcf5 100644
+--- a/drivers/spi/spi-dln2.c
++++ b/drivers/spi/spi-dln2.c
+@@ -780,7 +780,7 @@ exit_free_master:
+ 
+ static int dln2_spi_remove(struct platform_device *pdev)
+ {
+-	struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
++	struct spi_master *master = platform_get_drvdata(pdev);
+ 	struct dln2_spi *dln2 = spi_master_get_devdata(master);
+ 
+ 	pm_runtime_disable(&pdev->dev);
+diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c
+index 36a4922a134a1..ccd817ee4917b 100644
+--- a/drivers/spi/spi-omap-100k.c
++++ b/drivers/spi/spi-omap-100k.c
+@@ -424,7 +424,7 @@ err:
+ 
+ static int omap1_spi100k_remove(struct platform_device *pdev)
+ {
+-	struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
++	struct spi_master *master = platform_get_drvdata(pdev);
+ 	struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
+ 
+ 	pm_runtime_disable(&pdev->dev);
+@@ -438,7 +438,7 @@ static int omap1_spi100k_remove(struct platform_device *pdev)
+ #ifdef CONFIG_PM
+ static int omap1_spi100k_runtime_suspend(struct device *dev)
+ {
+-	struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
++	struct spi_master *master = dev_get_drvdata(dev);
+ 	struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
+ 
+ 	clk_disable_unprepare(spi100k->ick);
+@@ -449,7 +449,7 @@ static int omap1_spi100k_runtime_suspend(struct device *dev)
+ 
+ static int omap1_spi100k_runtime_resume(struct device *dev)
+ {
+-	struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
++	struct spi_master *master = dev_get_drvdata(dev);
+ 	struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
+ 	int ret;
+ 
+diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
+index 8dcb2e70735c9..d39dec6d1c91e 100644
+--- a/drivers/spi/spi-qup.c
++++ b/drivers/spi/spi-qup.c
+@@ -1263,7 +1263,7 @@ static int spi_qup_remove(struct platform_device *pdev)
+ 	struct spi_qup *controller = spi_master_get_devdata(master);
+ 	int ret;
+ 
+-	ret = pm_runtime_get_sync(&pdev->dev);
++	ret = pm_runtime_resume_and_get(&pdev->dev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
+index 947e6b9dc9f4d..2786470a52011 100644
+--- a/drivers/spi/spi-stm32-qspi.c
++++ b/drivers/spi/spi-stm32-qspi.c
+@@ -727,21 +727,31 @@ static int __maybe_unused stm32_qspi_suspend(struct device *dev)
+ {
+ 	pinctrl_pm_select_sleep_state(dev);
+ 
+-	return 0;
++	return pm_runtime_force_suspend(dev);
+ }
+ 
+ static int __maybe_unused stm32_qspi_resume(struct device *dev)
+ {
+ 	struct stm32_qspi *qspi = dev_get_drvdata(dev);
++	int ret;
++
++	ret = pm_runtime_force_resume(dev);
++	if (ret < 0)
++		return ret;
+ 
+ 	pinctrl_pm_select_default_state(dev);
+-	clk_prepare_enable(qspi->clk);
++
++	ret = pm_runtime_get_sync(dev);
++	if (ret < 0) {
++		pm_runtime_put_noidle(dev);
++		return ret;
++	}
+ 
+ 	writel_relaxed(qspi->cr_reg, qspi->io_base + QSPI_CR);
+ 	writel_relaxed(qspi->dcr_reg, qspi->io_base + QSPI_DCR);
+ 
+-	pm_runtime_mark_last_busy(qspi->dev);
+-	pm_runtime_put_autosuspend(qspi->dev);
++	pm_runtime_mark_last_busy(dev);
++	pm_runtime_put_autosuspend(dev);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
+index 9417385c09217..e06aafe169e0c 100644
+--- a/drivers/spi/spi-ti-qspi.c
++++ b/drivers/spi/spi-ti-qspi.c
+@@ -733,6 +733,17 @@ static int ti_qspi_runtime_resume(struct device *dev)
+ 	return 0;
+ }
+ 
++static void ti_qspi_dma_cleanup(struct ti_qspi *qspi)
++{
++	if (qspi->rx_bb_addr)
++		dma_free_coherent(qspi->dev, QSPI_DMA_BUFFER_SIZE,
++				  qspi->rx_bb_addr,
++				  qspi->rx_bb_dma_addr);
++
++	if (qspi->rx_chan)
++		dma_release_channel(qspi->rx_chan);
++}
++
+ static const struct of_device_id ti_qspi_match[] = {
+ 	{.compatible = "ti,dra7xxx-qspi" },
+ 	{.compatible = "ti,am4372-qspi" },
+@@ -886,6 +897,8 @@ no_dma:
+ 	if (!ret)
+ 		return 0;
+ 
++	ti_qspi_dma_cleanup(qspi);
++
+ 	pm_runtime_disable(&pdev->dev);
+ free_master:
+ 	spi_master_put(master);
+@@ -904,12 +917,7 @@ static int ti_qspi_remove(struct platform_device *pdev)
+ 	pm_runtime_put_sync(&pdev->dev);
+ 	pm_runtime_disable(&pdev->dev);
+ 
+-	if (qspi->rx_bb_addr)
+-		dma_free_coherent(qspi->dev, QSPI_DMA_BUFFER_SIZE,
+-				  qspi->rx_bb_addr,
+-				  qspi->rx_bb_dma_addr);
+-	if (qspi->rx_chan)
+-		dma_release_channel(qspi->rx_chan);
++	ti_qspi_dma_cleanup(qspi);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index b08efe88ccd6c..927c2a28011f6 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -795,7 +795,7 @@ int spi_register_board_info(struct spi_board_info const *info, unsigned n)
+ 
+ /*-------------------------------------------------------------------------*/
+ 
+-static void spi_set_cs(struct spi_device *spi, bool enable)
++static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
+ {
+ 	bool enable1 = enable;
+ 
+@@ -803,7 +803,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
+ 	 * Avoid calling into the driver (or doing delays) if the chip select
+ 	 * isn't actually changing from the last time this was called.
+ 	 */
+-	if ((spi->controller->last_cs_enable == enable) &&
++	if (!force && (spi->controller->last_cs_enable == enable) &&
+ 	    (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
+ 		return;
+ 
+@@ -1253,7 +1253,7 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
+ 	struct spi_statistics *statm = &ctlr->statistics;
+ 	struct spi_statistics *stats = &msg->spi->statistics;
+ 
+-	spi_set_cs(msg->spi, true);
++	spi_set_cs(msg->spi, true, false);
+ 
+ 	SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
+ 	SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
+@@ -1321,9 +1321,9 @@ fallback_pio:
+ 					 &msg->transfers)) {
+ 				keep_cs = true;
+ 			} else {
+-				spi_set_cs(msg->spi, false);
++				spi_set_cs(msg->spi, false, false);
+ 				_spi_transfer_cs_change_delay(msg, xfer);
+-				spi_set_cs(msg->spi, true);
++				spi_set_cs(msg->spi, true, false);
+ 			}
+ 		}
+ 
+@@ -1332,7 +1332,7 @@ fallback_pio:
+ 
+ out:
+ 	if (ret != 0 || !keep_cs)
+-		spi_set_cs(msg->spi, false);
++		spi_set_cs(msg->spi, false, false);
+ 
+ 	if (msg->status == -EINPROGRESS)
+ 		msg->status = ret;
+@@ -3423,11 +3423,11 @@ int spi_setup(struct spi_device *spi)
+ 		 */
+ 		status = 0;
+ 
+-		spi_set_cs(spi, false);
++		spi_set_cs(spi, false, true);
+ 		pm_runtime_mark_last_busy(spi->controller->dev.parent);
+ 		pm_runtime_put_autosuspend(spi->controller->dev.parent);
+ 	} else {
+-		spi_set_cs(spi, false);
++		spi_set_cs(spi, false, true);
+ 	}
+ 
+ 	mutex_unlock(&spi->controller->io_mutex);
+diff --git a/drivers/staging/media/atomisp/pci/atomisp_fops.c b/drivers/staging/media/atomisp/pci/atomisp_fops.c
+index 453bb69135505..f1e6b25978534 100644
+--- a/drivers/staging/media/atomisp/pci/atomisp_fops.c
++++ b/drivers/staging/media/atomisp/pci/atomisp_fops.c
+@@ -221,6 +221,9 @@ int atomisp_q_video_buffers_to_css(struct atomisp_sub_device *asd,
+ 	unsigned long irqflags;
+ 	int err = 0;
+ 
++	if (WARN_ON(css_pipe_id >= IA_CSS_PIPE_ID_NUM))
++		return -EINVAL;
++
+ 	while (pipe->buffers_in_css < ATOMISP_CSS_Q_DEPTH) {
+ 		struct videobuf_buffer *vb;
+ 
+diff --git a/drivers/staging/media/imx/imx-media-capture.c b/drivers/staging/media/imx/imx-media-capture.c
+index e10ce103a5b4f..94a0467d673b8 100644
+--- a/drivers/staging/media/imx/imx-media-capture.c
++++ b/drivers/staging/media/imx/imx-media-capture.c
+@@ -557,7 +557,7 @@ static int capture_validate_fmt(struct capture_priv *priv)
+ 		priv->vdev.fmt.fmt.pix.height != f.fmt.pix.height ||
+ 		priv->vdev.cc->cs != cc->cs ||
+ 		priv->vdev.compose.width != compose.width ||
+-		priv->vdev.compose.height != compose.height) ? -EINVAL : 0;
++		priv->vdev.compose.height != compose.height) ? -EPIPE : 0;
+ }
+ 
+ static int capture_start_streaming(struct vb2_queue *vq, unsigned int count)
+diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c
+index 60aa02eb7d2a9..6d9c49b395314 100644
+--- a/drivers/staging/media/ipu3/ipu3-v4l2.c
++++ b/drivers/staging/media/ipu3/ipu3-v4l2.c
+@@ -686,6 +686,7 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
+ 
+ 	dev_dbg(dev, "IPU3 pipe %u pipe_id = %u", pipe, css_pipe->pipe_id);
+ 
++	css_q = imgu_node_to_queue(node);
+ 	for (i = 0; i < IPU3_CSS_QUEUES; i++) {
+ 		unsigned int inode = imgu_map_node(imgu, i);
+ 
+@@ -693,6 +694,18 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
+ 		if (inode == IMGU_NODE_STAT_3A || inode == IMGU_NODE_PARAMS)
+ 			continue;
+ 
++		/* CSS expects some format on OUT queue */
++		if (i != IPU3_CSS_QUEUE_OUT &&
++		    !imgu_pipe->nodes[inode].enabled) {
++			fmts[i] = NULL;
++			continue;
++		}
++
++		if (i == css_q) {
++			fmts[i] = &f->fmt.pix_mp;
++			continue;
++		}
++
+ 		if (try) {
+ 			fmts[i] = kmemdup(&imgu_pipe->nodes[inode].vdev_fmt.fmt.pix_mp,
+ 					  sizeof(struct v4l2_pix_format_mplane),
+@@ -705,10 +718,6 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
+ 			fmts[i] = &imgu_pipe->nodes[inode].vdev_fmt.fmt.pix_mp;
+ 		}
+ 
+-		/* CSS expects some format on OUT queue */
+-		if (i != IPU3_CSS_QUEUE_OUT &&
+-		    !imgu_pipe->nodes[inode].enabled)
+-			fmts[i] = NULL;
+ 	}
+ 
+ 	if (!try) {
+@@ -725,16 +734,10 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
+ 		rects[IPU3_CSS_RECT_GDC]->height = pad_fmt.height;
+ 	}
+ 
+-	/*
+-	 * imgu doesn't set the node to the value given by user
+-	 * before we return success from this function, so set it here.
+-	 */
+-	css_q = imgu_node_to_queue(node);
+ 	if (!fmts[css_q]) {
+ 		ret = -EINVAL;
+ 		goto out;
+ 	}
+-	*fmts[css_q] = f->fmt.pix_mp;
+ 
+ 	if (try)
+ 		ret = imgu_css_fmt_try(&imgu->css, fmts, rects, pipe);
+@@ -745,15 +748,18 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
+ 	if (ret < 0)
+ 		goto out;
+ 
+-	if (try)
+-		f->fmt.pix_mp = *fmts[css_q];
+-	else
+-		f->fmt = imgu_pipe->nodes[node].vdev_fmt.fmt;
++	/*
++	 * imgu doesn't set the node to the value given by user
++	 * before we return success from this function, so set it here.
++	 */
++	if (!try)
++		imgu_pipe->nodes[node].vdev_fmt.fmt.pix_mp = f->fmt.pix_mp;
+ 
+ out:
+ 	if (try) {
+ 		for (i = 0; i < IPU3_CSS_QUEUES; i++)
+-			kfree(fmts[i]);
++			if (i != css_q)
++				kfree(fmts[i]);
+ 	}
+ 
+ 	return ret;
+diff --git a/drivers/staging/wimax/i2400m/op-rfkill.c b/drivers/staging/wimax/i2400m/op-rfkill.c
+index fbddf2e18c142..44698a1aae87a 100644
+--- a/drivers/staging/wimax/i2400m/op-rfkill.c
++++ b/drivers/staging/wimax/i2400m/op-rfkill.c
+@@ -86,7 +86,7 @@ int i2400m_op_rfkill_sw_toggle(struct wimax_dev *wimax_dev,
+ 	if (cmd == NULL)
+ 		goto error_alloc;
+ 	cmd->hdr.type = cpu_to_le16(I2400M_MT_CMD_RF_CONTROL);
+-	cmd->hdr.length = sizeof(cmd->sw_rf);
++	cmd->hdr.length = cpu_to_le16(sizeof(cmd->sw_rf));
+ 	cmd->hdr.version = cpu_to_le16(I2400M_L3L4_VERSION);
+ 	cmd->sw_rf.hdr.type = cpu_to_le16(I2400M_TLV_RF_OPERATION);
+ 	cmd->sw_rf.hdr.length = cpu_to_le16(sizeof(cmd->sw_rf.status));
+diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
+index 9ee797b8cb7ea..508b49b0eaf51 100644
+--- a/drivers/target/target_core_pscsi.c
++++ b/drivers/target/target_core_pscsi.c
+@@ -620,8 +620,9 @@ static void pscsi_complete_cmd(struct se_cmd *cmd, u8 scsi_status,
+ 			unsigned char *buf;
+ 
+ 			buf = transport_kmap_data_sg(cmd);
+-			if (!buf)
++			if (!buf) {
+ 				; /* XXX: TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE */
++			}
+ 
+ 			if (cdb[0] == MODE_SENSE_10) {
+ 				if (!(buf[3] & 0x80))
+diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
+index 319a1e701163b..ddb8f9ecf3078 100644
+--- a/drivers/tee/optee/core.c
++++ b/drivers/tee/optee/core.c
+@@ -79,16 +79,6 @@ int optee_from_msg_param(struct tee_param *params, size_t num_params,
+ 				return rc;
+ 			p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa;
+ 			p->u.memref.shm = shm;
+-
+-			/* Check that the memref is covered by the shm object */
+-			if (p->u.memref.size) {
+-				size_t o = p->u.memref.shm_offs +
+-					   p->u.memref.size - 1;
+-
+-				rc = tee_shm_get_pa(shm, o, NULL);
+-				if (rc)
+-					return rc;
+-			}
+ 			break;
+ 		case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
+ 		case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
+diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c
+index 10af3341e5eaa..6956581ed7a4f 100644
+--- a/drivers/thermal/cpufreq_cooling.c
++++ b/drivers/thermal/cpufreq_cooling.c
+@@ -125,7 +125,7 @@ static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_cdev,
+ {
+ 	int i;
+ 
+-	for (i = cpufreq_cdev->max_level; i >= 0; i--) {
++	for (i = cpufreq_cdev->max_level; i > 0; i--) {
+ 		if (power >= cpufreq_cdev->em->table[i].power)
+ 			break;
+ 	}
+diff --git a/drivers/thermal/gov_fair_share.c b/drivers/thermal/gov_fair_share.c
+index aaa07180ab482..645432ce63659 100644
+--- a/drivers/thermal/gov_fair_share.c
++++ b/drivers/thermal/gov_fair_share.c
+@@ -82,6 +82,8 @@ static int fair_share_throttle(struct thermal_zone_device *tz, int trip)
+ 	int total_instance = 0;
+ 	int cur_trip_level = get_trip_level(tz);
+ 
++	mutex_lock(&tz->lock);
++
+ 	list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+ 		if (instance->trip != trip)
+ 			continue;
+@@ -110,6 +112,8 @@ static int fair_share_throttle(struct thermal_zone_device *tz, int trip)
+ 		mutex_unlock(&instance->cdev->lock);
+ 		thermal_cdev_update(cdev);
+ 	}
++
++	mutex_unlock(&tz->lock);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
+index 51dafc06f5414..2406653d38b78 100644
+--- a/drivers/tty/n_gsm.c
++++ b/drivers/tty/n_gsm.c
+@@ -2384,8 +2384,18 @@ static int gsmld_attach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
+ 		/* Don't register device 0 - this is the control channel and not
+ 		   a usable tty interface */
+ 		base = mux_num_to_base(gsm); /* Base for this MUX */
+-		for (i = 1; i < NUM_DLCI; i++)
+-			tty_register_device(gsm_tty_driver, base + i, NULL);
++		for (i = 1; i < NUM_DLCI; i++) {
++			struct device *dev;
++
++			dev = tty_register_device(gsm_tty_driver,
++							base + i, NULL);
++			if (IS_ERR(dev)) {
++				for (i--; i >= 1; i--)
++					tty_unregister_device(gsm_tty_driver,
++								base + i);
++				return PTR_ERR(dev);
++			}
++		}
+ 	}
+ 	return ret;
+ }
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index 284b07224c555..0cc360da5426a 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -1381,6 +1381,7 @@ struct vc_data *vc_deallocate(unsigned int currcons)
+ 		atomic_notifier_call_chain(&vt_notifier_list, VT_DEALLOCATE, &param);
+ 		vcs_remove_sysfs(currcons);
+ 		visual_deinit(vc);
++		con_free_unimap(vc);
+ 		put_pid(vc->vt_pid);
+ 		vc_uniscr_set(vc, NULL);
+ 		kfree(vc->vc_screenbuf);
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 7f71218cc1e54..404507d1b76f1 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -3556,7 +3556,7 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
+ 	u16		portchange, portstatus;
+ 
+ 	if (!test_and_set_bit(port1, hub->child_usage_bits)) {
+-		status = pm_runtime_get_sync(&port_dev->dev);
++		status = pm_runtime_resume_and_get(&port_dev->dev);
+ 		if (status < 0) {
+ 			dev_dbg(&udev->dev, "can't resume usb port, status %d\n",
+ 					status);
+diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
+index 55f1d14fc4148..800c8b6c55ff1 100644
+--- a/drivers/usb/dwc2/core_intr.c
++++ b/drivers/usb/dwc2/core_intr.c
+@@ -307,6 +307,7 @@ static void dwc2_handle_conn_id_status_change_intr(struct dwc2_hsotg *hsotg)
+ static void dwc2_handle_session_req_intr(struct dwc2_hsotg *hsotg)
+ {
+ 	int ret;
++	u32 hprt0;
+ 
+ 	/* Clear interrupt */
+ 	dwc2_writel(hsotg, GINTSTS_SESSREQINT, GINTSTS);
+@@ -327,6 +328,13 @@ static void dwc2_handle_session_req_intr(struct dwc2_hsotg *hsotg)
+ 		 * established
+ 		 */
+ 		dwc2_hsotg_disconnect(hsotg);
++	} else {
++		/* Turn on the port power bit. */
++		hprt0 = dwc2_read_hprt0(hsotg);
++		hprt0 |= HPRT0_PWR;
++		dwc2_writel(hsotg, hprt0, HPRT0);
++		/* Connect hcd after port power is set. */
++		dwc2_hcd_connect(hsotg);
+ 	}
+ }
+ 
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index f2448d0a9d393..126f0e10b3ef4 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -114,6 +114,8 @@ void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode)
+ 	dwc->current_dr_role = mode;
+ }
+ 
++static int dwc3_core_soft_reset(struct dwc3 *dwc);
++
+ static void __dwc3_set_mode(struct work_struct *work)
+ {
+ 	struct dwc3 *dwc = work_to_dwc(work);
+@@ -121,6 +123,8 @@ static void __dwc3_set_mode(struct work_struct *work)
+ 	int ret;
+ 	u32 reg;
+ 
++	mutex_lock(&dwc->mutex);
++
+ 	pm_runtime_get_sync(dwc->dev);
+ 
+ 	if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_OTG)
+@@ -154,6 +158,25 @@ static void __dwc3_set_mode(struct work_struct *work)
+ 		break;
+ 	}
+ 
++	/* For DRD host or device mode only */
++	if (dwc->desired_dr_role != DWC3_GCTL_PRTCAP_OTG) {
++		reg = dwc3_readl(dwc->regs, DWC3_GCTL);
++		reg |= DWC3_GCTL_CORESOFTRESET;
++		dwc3_writel(dwc->regs, DWC3_GCTL, reg);
++
++		/*
++		 * Wait for internal clocks to synchronized. DWC_usb31 and
++		 * DWC_usb32 may need at least 50ms (less for DWC_usb3). To
++		 * keep it consistent across different IPs, let's wait up to
++		 * 100ms before clearing GCTL.CORESOFTRESET.
++		 */
++		msleep(100);
++
++		reg = dwc3_readl(dwc->regs, DWC3_GCTL);
++		reg &= ~DWC3_GCTL_CORESOFTRESET;
++		dwc3_writel(dwc->regs, DWC3_GCTL, reg);
++	}
++
+ 	spin_lock_irqsave(&dwc->lock, flags);
+ 
+ 	dwc3_set_prtcap(dwc, dwc->desired_dr_role);
+@@ -178,6 +201,8 @@ static void __dwc3_set_mode(struct work_struct *work)
+ 		}
+ 		break;
+ 	case DWC3_GCTL_PRTCAP_DEVICE:
++		dwc3_core_soft_reset(dwc);
++
+ 		dwc3_event_buffers_setup(dwc);
+ 
+ 		if (dwc->usb2_phy)
+@@ -200,6 +225,7 @@ static void __dwc3_set_mode(struct work_struct *work)
+ out:
+ 	pm_runtime_mark_last_busy(dwc->dev);
+ 	pm_runtime_put_autosuspend(dwc->dev);
++	mutex_unlock(&dwc->mutex);
+ }
+ 
+ void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
+@@ -1277,6 +1303,8 @@ static void dwc3_get_properties(struct dwc3 *dwc)
+ 				"snps,usb3_lpm_capable");
+ 	dwc->usb2_lpm_disable = device_property_read_bool(dev,
+ 				"snps,usb2-lpm-disable");
++	dwc->usb2_gadget_lpm_disable = device_property_read_bool(dev,
++				"snps,usb2-gadget-lpm-disable");
+ 	device_property_read_u8(dev, "snps,rx-thr-num-pkt-prd",
+ 				&rx_thr_num_pkt_prd);
+ 	device_property_read_u8(dev, "snps,rx-max-burst-prd",
+@@ -1543,6 +1571,7 @@ static int dwc3_probe(struct platform_device *pdev)
+ 	dwc3_cache_hwparams(dwc);
+ 
+ 	spin_lock_init(&dwc->lock);
++	mutex_init(&dwc->mutex);
+ 
+ 	pm_runtime_set_active(dev);
+ 	pm_runtime_use_autosuspend(dev);
+diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
+index 052b20d526510..453cfebd4d044 100644
+--- a/drivers/usb/dwc3/core.h
++++ b/drivers/usb/dwc3/core.h
+@@ -13,6 +13,7 @@
+ 
+ #include <linux/device.h>
+ #include <linux/spinlock.h>
++#include <linux/mutex.h>
+ #include <linux/ioport.h>
+ #include <linux/list.h>
+ #include <linux/bitops.h>
+@@ -946,6 +947,7 @@ struct dwc3_scratchpad_array {
+  * @scratch_addr: dma address of scratchbuf
+  * @ep0_in_setup: one control transfer is completed and enter setup phase
+  * @lock: for synchronizing
++ * @mutex: for mode switching
+  * @dev: pointer to our struct device
+  * @sysdev: pointer to the DMA-capable device
+  * @xhci: pointer to our xHCI child
+@@ -1034,7 +1036,8 @@ struct dwc3_scratchpad_array {
+  * @dis_start_transfer_quirk: set if start_transfer failure SW workaround is
+  *			not needed for DWC_usb31 version 1.70a-ea06 and below
+  * @usb3_lpm_capable: set if hadrware supports Link Power Management
+- * @usb2_lpm_disable: set to disable usb2 lpm
++ * @usb2_lpm_disable: set to disable usb2 lpm for host
++ * @usb2_gadget_lpm_disable: set to disable usb2 lpm for gadget
+  * @disable_scramble_quirk: set if we enable the disable scramble quirk
+  * @u2exit_lfps_quirk: set if we enable u2exit lfps quirk
+  * @u2ss_inp3_quirk: set if we enable P3 OK for U2/SS Inactive quirk
+@@ -1085,6 +1088,9 @@ struct dwc3 {
+ 	/* device lock */
+ 	spinlock_t		lock;
+ 
++	/* mode switching lock */
++	struct mutex		mutex;
++
+ 	struct device		*dev;
+ 	struct device		*sysdev;
+ 
+@@ -1238,6 +1244,7 @@ struct dwc3 {
+ 	unsigned		dis_start_transfer_quirk:1;
+ 	unsigned		usb3_lpm_capable:1;
+ 	unsigned		usb2_lpm_disable:1;
++	unsigned		usb2_gadget_lpm_disable:1;
+ 
+ 	unsigned		disable_scramble_quirk:1;
+ 	unsigned		u2exit_lfps_quirk:1;
+diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
+index 4c5c6972124a4..95f954a1d7be9 100644
+--- a/drivers/usb/dwc3/dwc3-pci.c
++++ b/drivers/usb/dwc3/dwc3-pci.c
+@@ -41,6 +41,7 @@
+ #define PCI_DEVICE_ID_INTEL_TGPH		0x43ee
+ #define PCI_DEVICE_ID_INTEL_JSP			0x4dee
+ #define PCI_DEVICE_ID_INTEL_ADLP		0x51ee
++#define PCI_DEVICE_ID_INTEL_ADLM		0x54ee
+ #define PCI_DEVICE_ID_INTEL_ADLS		0x7ae1
+ #define PCI_DEVICE_ID_INTEL_TGL			0x9a15
+ 
+@@ -388,6 +389,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
+ 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLP),
+ 	  (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+ 
++	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLM),
++	  (kernel_ulong_t) &dwc3_pci_intel_swnode, },
++
+ 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLS),
+ 	  (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+ 
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index c7ef218e7a8cf..f85eda6bc988e 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -308,13 +308,12 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
+ 	}
+ 
+ 	if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
+-		int		needs_wakeup;
++		int link_state;
+ 
+-		needs_wakeup = (dwc->link_state == DWC3_LINK_STATE_U1 ||
+-				dwc->link_state == DWC3_LINK_STATE_U2 ||
+-				dwc->link_state == DWC3_LINK_STATE_U3);
+-
+-		if (unlikely(needs_wakeup)) {
++		link_state = dwc3_gadget_get_link_state(dwc);
++		if (link_state == DWC3_LINK_STATE_U1 ||
++		    link_state == DWC3_LINK_STATE_U2 ||
++		    link_state == DWC3_LINK_STATE_U3) {
+ 			ret = __dwc3_gadget_wakeup(dwc);
+ 			dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n",
+ 					ret);
+@@ -608,12 +607,14 @@ static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action)
+ 		u8 bInterval_m1;
+ 
+ 		/*
+-		 * Valid range for DEPCFG.bInterval_m1 is from 0 to 13, and it
+-		 * must be set to 0 when the controller operates in full-speed.
++		 * Valid range for DEPCFG.bInterval_m1 is from 0 to 13.
++		 *
++		 * NOTE: The programming guide incorrectly stated bInterval_m1
++		 * must be set to 0 when operating in fullspeed. Internally the
++		 * controller does not have this limitation. See DWC_usb3x
++		 * programming guide section 3.2.2.1.
+ 		 */
+ 		bInterval_m1 = min_t(u8, desc->bInterval - 1, 13);
+-		if (dwc->gadget->speed == USB_SPEED_FULL)
+-			bInterval_m1 = 0;
+ 
+ 		if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT &&
+ 		    dwc->gadget->speed == USB_SPEED_FULL)
+@@ -1973,6 +1974,8 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
+ 	case DWC3_LINK_STATE_RESET:
+ 	case DWC3_LINK_STATE_RX_DET:	/* in HS, means Early Suspend */
+ 	case DWC3_LINK_STATE_U3:	/* in HS, means SUSPEND */
++	case DWC3_LINK_STATE_U2:	/* in HS, means Sleep (L1) */
++	case DWC3_LINK_STATE_U1:
+ 	case DWC3_LINK_STATE_RESUME:
+ 		break;
+ 	default:
+@@ -3322,6 +3325,15 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
+ {
+ 	u32			reg;
+ 
++	/*
++	 * Ideally, dwc3_reset_gadget() would trigger the function
++	 * drivers to stop any active transfers through ep disable.
++	 * However, for functions which defer ep disable, such as mass
++	 * storage, we will need to rely on the call to stop active
++	 * transfers here, and avoid allowing of request queuing.
++	 */
++	dwc->connected = false;
++
+ 	/*
+ 	 * WORKAROUND: DWC3 revisions <1.88a have an issue which
+ 	 * would cause a missing Disconnect Event if there's a
+@@ -3460,6 +3472,7 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
+ 	/* Enable USB2 LPM Capability */
+ 
+ 	if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A) &&
++	    !dwc->usb2_gadget_lpm_disable &&
+ 	    (speed != DWC3_DSTS_SUPERSPEED) &&
+ 	    (speed != DWC3_DSTS_SUPERSPEED_PLUS)) {
+ 		reg = dwc3_readl(dwc->regs, DWC3_DCFG);
+@@ -3486,6 +3499,12 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
+ 
+ 		dwc3_gadget_dctl_write_safe(dwc, reg);
+ 	} else {
++		if (dwc->usb2_gadget_lpm_disable) {
++			reg = dwc3_readl(dwc->regs, DWC3_DCFG);
++			reg &= ~DWC3_DCFG_LPM_CAP;
++			dwc3_writel(dwc->regs, DWC3_DCFG, reg);
++		}
++
+ 		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ 		reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
+ 		dwc3_gadget_dctl_write_safe(dwc, reg);
+@@ -3934,7 +3953,7 @@ int dwc3_gadget_init(struct dwc3 *dwc)
+ 	dwc->gadget->ssp_rate		= USB_SSP_GEN_UNKNOWN;
+ 	dwc->gadget->sg_supported	= true;
+ 	dwc->gadget->name		= "dwc3-gadget";
+-	dwc->gadget->lpm_capable	= true;
++	dwc->gadget->lpm_capable	= !dwc->usb2_gadget_lpm_disable;
+ 
+ 	/*
+ 	 * FIXME We might be setting max_speed to <SUPER, however versions
+diff --git a/drivers/usb/gadget/config.c b/drivers/usb/gadget/config.c
+index 2d115353424c2..8bb25773b61e9 100644
+--- a/drivers/usb/gadget/config.c
++++ b/drivers/usb/gadget/config.c
+@@ -194,9 +194,13 @@ EXPORT_SYMBOL_GPL(usb_assign_descriptors);
+ void usb_free_all_descriptors(struct usb_function *f)
+ {
+ 	usb_free_descriptors(f->fs_descriptors);
++	f->fs_descriptors = NULL;
+ 	usb_free_descriptors(f->hs_descriptors);
++	f->hs_descriptors = NULL;
+ 	usb_free_descriptors(f->ss_descriptors);
++	f->ss_descriptors = NULL;
+ 	usb_free_descriptors(f->ssp_descriptors);
++	f->ssp_descriptors = NULL;
+ }
+ EXPORT_SYMBOL_GPL(usb_free_all_descriptors);
+ 
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index 801a8b668a35a..10a5d9f0f2b90 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -2640,6 +2640,7 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
+ 
+ 	do { /* lang_count > 0 so we can use do-while */
+ 		unsigned needed = needed_count;
++		u32 str_per_lang = str_count;
+ 
+ 		if (len < 3)
+ 			goto error_free;
+@@ -2675,7 +2676,7 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
+ 
+ 			data += length + 1;
+ 			len -= length + 1;
+-		} while (--str_count);
++		} while (--str_per_lang);
+ 
+ 		s->id = 0;   /* terminator */
+ 		s->s = NULL;
+diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
+index 560382e0a8f38..e65f474ad7b3b 100644
+--- a/drivers/usb/gadget/function/f_uac1.c
++++ b/drivers/usb/gadget/function/f_uac1.c
+@@ -19,6 +19,9 @@
+ #include "u_audio.h"
+ #include "u_uac1.h"
+ 
++/* UAC1 spec: 3.7.2.3 Audio Channel Cluster Format */
++#define UAC1_CHANNEL_MASK 0x0FFF
++
+ struct f_uac1 {
+ 	struct g_audio g_audio;
+ 	u8 ac_intf, as_in_intf, as_out_intf;
+@@ -30,6 +33,11 @@ static inline struct f_uac1 *func_to_uac1(struct usb_function *f)
+ 	return container_of(f, struct f_uac1, g_audio.func);
+ }
+ 
++static inline struct f_uac1_opts *g_audio_to_uac1_opts(struct g_audio *audio)
++{
++	return container_of(audio->func.fi, struct f_uac1_opts, func_inst);
++}
++
+ /*
+  * DESCRIPTORS ... most are static, but strings and full
+  * configuration descriptors are built on demand.
+@@ -505,11 +513,42 @@ static void f_audio_disable(struct usb_function *f)
+ 
+ /*-------------------------------------------------------------------------*/
+ 
++static int f_audio_validate_opts(struct g_audio *audio, struct device *dev)
++{
++	struct f_uac1_opts *opts = g_audio_to_uac1_opts(audio);
++
++	if (!opts->p_chmask && !opts->c_chmask) {
++		dev_err(dev, "Error: no playback and capture channels\n");
++		return -EINVAL;
++	} else if (opts->p_chmask & ~UAC1_CHANNEL_MASK) {
++		dev_err(dev, "Error: unsupported playback channels mask\n");
++		return -EINVAL;
++	} else if (opts->c_chmask & ~UAC1_CHANNEL_MASK) {
++		dev_err(dev, "Error: unsupported capture channels mask\n");
++		return -EINVAL;
++	} else if ((opts->p_ssize < 1) || (opts->p_ssize > 4)) {
++		dev_err(dev, "Error: incorrect playback sample size\n");
++		return -EINVAL;
++	} else if ((opts->c_ssize < 1) || (opts->c_ssize > 4)) {
++		dev_err(dev, "Error: incorrect capture sample size\n");
++		return -EINVAL;
++	} else if (!opts->p_srate) {
++		dev_err(dev, "Error: incorrect playback sampling rate\n");
++		return -EINVAL;
++	} else if (!opts->c_srate) {
++		dev_err(dev, "Error: incorrect capture sampling rate\n");
++		return -EINVAL;
++	}
++
++	return 0;
++}
++
+ /* audio function driver setup/binding */
+ static int f_audio_bind(struct usb_configuration *c, struct usb_function *f)
+ {
+ 	struct usb_composite_dev	*cdev = c->cdev;
+ 	struct usb_gadget		*gadget = cdev->gadget;
++	struct device			*dev = &gadget->dev;
+ 	struct f_uac1			*uac1 = func_to_uac1(f);
+ 	struct g_audio			*audio = func_to_g_audio(f);
+ 	struct f_uac1_opts		*audio_opts;
+@@ -519,6 +558,10 @@ static int f_audio_bind(struct usb_configuration *c, struct usb_function *f)
+ 	int				rate;
+ 	int				status;
+ 
++	status = f_audio_validate_opts(audio, dev);
++	if (status)
++		return status;
++
+ 	audio_opts = container_of(f->fi, struct f_uac1_opts, func_inst);
+ 
+ 	us = usb_gstrings_attach(cdev, uac1_strings, ARRAY_SIZE(strings_uac1));
+diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
+index 6f03e944e0e31..dd960cea642f3 100644
+--- a/drivers/usb/gadget/function/f_uac2.c
++++ b/drivers/usb/gadget/function/f_uac2.c
+@@ -14,6 +14,9 @@
+ #include "u_audio.h"
+ #include "u_uac2.h"
+ 
++/* UAC2 spec: 4.1 Audio Channel Cluster Descriptor */
++#define UAC2_CHANNEL_MASK 0x07FFFFFF
++
+ /*
+  * The driver implements a simple UAC_2 topology.
+  * USB-OUT -> IT_1 -> OT_3 -> ALSA_Capture
+@@ -604,6 +607,36 @@ static void setup_descriptor(struct f_uac2_opts *opts)
+ 	hs_audio_desc[i] = NULL;
+ }
+ 
++static int afunc_validate_opts(struct g_audio *agdev, struct device *dev)
++{
++	struct f_uac2_opts *opts = g_audio_to_uac2_opts(agdev);
++
++	if (!opts->p_chmask && !opts->c_chmask) {
++		dev_err(dev, "Error: no playback and capture channels\n");
++		return -EINVAL;
++	} else if (opts->p_chmask & ~UAC2_CHANNEL_MASK) {
++		dev_err(dev, "Error: unsupported playback channels mask\n");
++		return -EINVAL;
++	} else if (opts->c_chmask & ~UAC2_CHANNEL_MASK) {
++		dev_err(dev, "Error: unsupported capture channels mask\n");
++		return -EINVAL;
++	} else if ((opts->p_ssize < 1) || (opts->p_ssize > 4)) {
++		dev_err(dev, "Error: incorrect playback sample size\n");
++		return -EINVAL;
++	} else if ((opts->c_ssize < 1) || (opts->c_ssize > 4)) {
++		dev_err(dev, "Error: incorrect capture sample size\n");
++		return -EINVAL;
++	} else if (!opts->p_srate) {
++		dev_err(dev, "Error: incorrect playback sampling rate\n");
++		return -EINVAL;
++	} else if (!opts->c_srate) {
++		dev_err(dev, "Error: incorrect capture sampling rate\n");
++		return -EINVAL;
++	}
++
++	return 0;
++}
++
+ static int
+ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
+ {
+@@ -612,11 +645,13 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
+ 	struct usb_composite_dev *cdev = cfg->cdev;
+ 	struct usb_gadget *gadget = cdev->gadget;
+ 	struct device *dev = &gadget->dev;
+-	struct f_uac2_opts *uac2_opts;
++	struct f_uac2_opts *uac2_opts = g_audio_to_uac2_opts(agdev);
+ 	struct usb_string *us;
+ 	int ret;
+ 
+-	uac2_opts = container_of(fn->fi, struct f_uac2_opts, func_inst);
++	ret = afunc_validate_opts(agdev, dev);
++	if (ret)
++		return ret;
+ 
+ 	us = usb_gstrings_attach(cdev, fn_strings, ARRAY_SIZE(strings_fn));
+ 	if (IS_ERR(us))
+diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
+index 44b4352a26765..f48a00e497945 100644
+--- a/drivers/usb/gadget/function/f_uvc.c
++++ b/drivers/usb/gadget/function/f_uvc.c
+@@ -633,7 +633,12 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
+ 
+ 	uvc_hs_streaming_ep.wMaxPacketSize =
+ 		cpu_to_le16(max_packet_size | ((max_packet_mult - 1) << 11));
+-	uvc_hs_streaming_ep.bInterval = opts->streaming_interval;
++
++	/* A high-bandwidth endpoint must specify a bInterval value of 1 */
++	if (max_packet_mult > 1)
++		uvc_hs_streaming_ep.bInterval = 1;
++	else
++		uvc_hs_streaming_ep.bInterval = opts->streaming_interval;
+ 
+ 	uvc_ss_streaming_ep.wMaxPacketSize = cpu_to_le16(max_packet_size);
+ 	uvc_ss_streaming_ep.bInterval = opts->streaming_interval;
+@@ -817,6 +822,7 @@ static struct usb_function_instance *uvc_alloc_inst(void)
+ 	pd->bmControls[0]		= 1;
+ 	pd->bmControls[1]		= 0;
+ 	pd->iProcessing			= 0;
++	pd->bmVideoStandards		= 0;
+ 
+ 	od = &opts->uvc_output_terminal;
+ 	od->bLength			= UVC_DT_OUTPUT_TERMINAL_SIZE;
+diff --git a/drivers/usb/gadget/legacy/webcam.c b/drivers/usb/gadget/legacy/webcam.c
+index a9f8eb8e1c767..2c9eab2b863d2 100644
+--- a/drivers/usb/gadget/legacy/webcam.c
++++ b/drivers/usb/gadget/legacy/webcam.c
+@@ -125,6 +125,7 @@ static const struct uvc_processing_unit_descriptor uvc_processing = {
+ 	.bmControls[0]		= 1,
+ 	.bmControls[1]		= 0,
+ 	.iProcessing		= 0,
++	.bmVideoStandards	= 0,
+ };
+ 
+ static const struct uvc_output_terminal_descriptor uvc_output_terminal = {
+diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
+index 57067763b1005..5f474ffe2be1e 100644
+--- a/drivers/usb/gadget/udc/dummy_hcd.c
++++ b/drivers/usb/gadget/udc/dummy_hcd.c
+@@ -903,6 +903,21 @@ static int dummy_pullup(struct usb_gadget *_gadget, int value)
+ 	spin_lock_irqsave(&dum->lock, flags);
+ 	dum->pullup = (value != 0);
+ 	set_link_state(dum_hcd);
++	if (value == 0) {
++		/*
++		 * Emulate synchronize_irq(): wait for callbacks to finish.
++		 * This seems to be the best place to emulate the call to
++		 * synchronize_irq() that's in usb_gadget_remove_driver().
++		 * Doing it in dummy_udc_stop() would be too late since it
++		 * is called after the unbind callback and unbind shouldn't
++		 * be invoked until all the other callbacks are finished.
++		 */
++		while (dum->callback_usage > 0) {
++			spin_unlock_irqrestore(&dum->lock, flags);
++			usleep_range(1000, 2000);
++			spin_lock_irqsave(&dum->lock, flags);
++		}
++	}
+ 	spin_unlock_irqrestore(&dum->lock, flags);
+ 
+ 	usb_hcd_poll_rh_status(dummy_hcd_to_hcd(dum_hcd));
+@@ -1004,14 +1019,6 @@ static int dummy_udc_stop(struct usb_gadget *g)
+ 	spin_lock_irq(&dum->lock);
+ 	dum->ints_enabled = 0;
+ 	stop_activity(dum);
+-
+-	/* emulate synchronize_irq(): wait for callbacks to finish */
+-	while (dum->callback_usage > 0) {
+-		spin_unlock_irq(&dum->lock);
+-		usleep_range(1000, 2000);
+-		spin_lock_irq(&dum->lock);
+-	}
+-
+ 	dum->driver = NULL;
+ 	spin_unlock_irq(&dum->lock);
+ 
+diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c
+index 580bef8eb4cbc..2319c9737c2bd 100644
+--- a/drivers/usb/gadget/udc/tegra-xudc.c
++++ b/drivers/usb/gadget/udc/tegra-xudc.c
+@@ -3883,7 +3883,7 @@ static int tegra_xudc_remove(struct platform_device *pdev)
+ 
+ 	pm_runtime_get_sync(xudc->dev);
+ 
+-	cancel_delayed_work(&xudc->plc_reset_work);
++	cancel_delayed_work_sync(&xudc->plc_reset_work);
+ 	cancel_work_sync(&xudc->usb_role_sw_work);
+ 
+ 	usb_del_gadget_udc(&xudc->gadget);
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index f2c4ee7c4786e..717c122f94490 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -2129,6 +2129,15 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
+ 
+ 	if (major_revision == 0x03) {
+ 		rhub = &xhci->usb3_rhub;
++		/*
++		 * Some hosts incorrectly use sub-minor version for minor
++		 * version (i.e. 0x02 instead of 0x20 for bcdUSB 0x320 and 0x01
++		 * for bcdUSB 0x310). Since there is no USB release with sub
++		 * minor version 0x301 to 0x309, we can assume that they are
++		 * incorrect and fix it here.
++		 */
++		if (minor_revision > 0x00 && minor_revision < 0x10)
++			minor_revision <<= 4;
+ 	} else if (major_revision <= 0x02) {
+ 		rhub = &xhci->usb2_rhub;
+ 	} else {
+@@ -2240,6 +2249,9 @@ static void xhci_create_rhub_port_array(struct xhci_hcd *xhci,
+ 		return;
+ 	rhub->ports = kcalloc_node(rhub->num_ports, sizeof(*rhub->ports),
+ 			flags, dev_to_node(dev));
++	if (!rhub->ports)
++		return;
++
+ 	for (i = 0; i < HCS_MAX_PORTS(xhci->hcs_params1); i++) {
+ 		if (xhci->hw_ports[i].rhub != rhub ||
+ 		    xhci->hw_ports[i].hcd_portnum == DUPLICATE_ENTRY)
+diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
+index 2f27dc0d9c6bd..1c331577fca92 100644
+--- a/drivers/usb/host/xhci-mtk.c
++++ b/drivers/usb/host/xhci-mtk.c
+@@ -397,6 +397,8 @@ static void xhci_mtk_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 	xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
+ 	if (mtk->lpm_support)
+ 		xhci->quirks |= XHCI_LPM_SUPPORT;
++	if (mtk->u2_lpm_disable)
++		xhci->quirks |= XHCI_HW_LPM_DISABLE;
+ 
+ 	/*
+ 	 * MTK xHCI 0.96: PSA is 1 by default even if doesn't support stream,
+@@ -469,6 +471,7 @@ static int xhci_mtk_probe(struct platform_device *pdev)
+ 		return ret;
+ 
+ 	mtk->lpm_support = of_property_read_bool(node, "usb3-lpm-capable");
++	mtk->u2_lpm_disable = of_property_read_bool(node, "usb2-lpm-disable");
+ 	/* optional property, ignore the error if it does not exist */
+ 	of_property_read_u32(node, "mediatek,u3p-dis-msk",
+ 			     &mtk->u3p_dis_msk);
+diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
+index cbb09dfea62e0..080109012b9ac 100644
+--- a/drivers/usb/host/xhci-mtk.h
++++ b/drivers/usb/host/xhci-mtk.h
+@@ -150,6 +150,7 @@ struct xhci_hcd_mtk {
+ 	struct phy **phys;
+ 	int num_phys;
+ 	bool lpm_support;
++	bool u2_lpm_disable;
+ 	/* usb remote wakeup */
+ 	bool uwk_en;
+ 	struct regmap *uwk;
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index ce38076901e25..59d41d2c200df 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -2129,16 +2129,13 @@ int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
+ 	return 0;
+ }
+ 
+-static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
+-	struct xhci_transfer_event *event, struct xhci_virt_ep *ep)
++static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
++		     struct xhci_ring *ep_ring, struct xhci_td *td,
++		     u32 trb_comp_code)
+ {
+ 	struct xhci_ep_ctx *ep_ctx;
+-	struct xhci_ring *ep_ring;
+-	u32 trb_comp_code;
+ 
+-	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
+ 	ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index);
+-	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
+ 
+ 	switch (trb_comp_code) {
+ 	case COMP_STOPPED_LENGTH_INVALID:
+@@ -2234,9 +2231,9 @@ static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring,
+ /*
+  * Process control tds, update urb status and actual_length.
+  */
+-static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
+-	union xhci_trb *ep_trb, struct xhci_transfer_event *event,
+-	struct xhci_virt_ep *ep)
++static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
++		struct xhci_ring *ep_ring,  struct xhci_td *td,
++			   union xhci_trb *ep_trb, struct xhci_transfer_event *event)
+ {
+ 	struct xhci_ep_ctx *ep_ctx;
+ 	u32 trb_comp_code;
+@@ -2324,15 +2321,15 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ 		td->urb->actual_length = requested;
+ 
+ finish_td:
+-	return finish_td(xhci, td, event, ep);
++	return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
+ }
+ 
+ /*
+  * Process isochronous tds, update urb packet status and actual_length.
+  */
+-static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
+-	union xhci_trb *ep_trb, struct xhci_transfer_event *event,
+-	struct xhci_virt_ep *ep)
++static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
++		struct xhci_ring *ep_ring, struct xhci_td *td,
++		union xhci_trb *ep_trb, struct xhci_transfer_event *event)
+ {
+ 	struct urb_priv *urb_priv;
+ 	int idx;
+@@ -2409,7 +2406,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ 
+ 	td->urb->actual_length += frame->actual_length;
+ 
+-	return finish_td(xhci, td, event, ep);
++	return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
+ }
+ 
+ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
+@@ -2441,17 +2438,15 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ /*
+  * Process bulk and interrupt tds, update urb status and actual_length.
+  */
+-static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
+-	union xhci_trb *ep_trb, struct xhci_transfer_event *event,
+-	struct xhci_virt_ep *ep)
++static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
++		struct xhci_ring *ep_ring, struct xhci_td *td,
++		union xhci_trb *ep_trb, struct xhci_transfer_event *event)
+ {
+ 	struct xhci_slot_ctx *slot_ctx;
+-	struct xhci_ring *ep_ring;
+ 	u32 trb_comp_code;
+ 	u32 remaining, requested, ep_trb_len;
+ 
+ 	slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx);
+-	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
+ 	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
+ 	remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+ 	ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
+@@ -2511,7 +2506,8 @@ finish_td:
+ 			  remaining);
+ 		td->urb->actual_length = 0;
+ 	}
+-	return finish_td(xhci, td, event, ep);
++
++	return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
+ }
+ 
+ /*
+@@ -2854,11 +2850,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ 
+ 		/* update the urb's actual_length and give back to the core */
+ 		if (usb_endpoint_xfer_control(&td->urb->ep->desc))
+-			process_ctrl_td(xhci, td, ep_trb, event, ep);
++			process_ctrl_td(xhci, ep, ep_ring, td, ep_trb, event);
+ 		else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
+-			process_isoc_td(xhci, td, ep_trb, event, ep);
++			process_isoc_td(xhci, ep, ep_ring, td, ep_trb, event);
+ 		else
+-			process_bulk_intr_td(xhci, td, ep_trb, event, ep);
++			process_bulk_intr_td(xhci, ep, ep_ring, td, ep_trb, event);
+ cleanup:
+ 		handling_skipped_tds = ep->skip &&
+ 			trb_comp_code != COMP_MISSED_SERVICE_ERROR &&
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 1975016f46bff..6672c2f403034 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -228,6 +228,7 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
+ 	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+ 	int err, i;
+ 	u64 val;
++	u32 intrs;
+ 
+ 	/*
+ 	 * Some Renesas controllers get into a weird state if they are
+@@ -266,7 +267,10 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
+ 	if (upper_32_bits(val))
+ 		xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
+ 
+-	for (i = 0; i < HCS_MAX_INTRS(xhci->hcs_params1); i++) {
++	intrs = min_t(u32, HCS_MAX_INTRS(xhci->hcs_params1),
++		      ARRAY_SIZE(xhci->run_regs->ir_set));
++
++	for (i = 0; i < intrs; i++) {
+ 		struct xhci_intr_reg __iomem *ir;
+ 
+ 		ir = &xhci->run_regs->ir_set[i];
+@@ -3269,6 +3273,14 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
+ 
+ 	/* config ep command clears toggle if add and drop ep flags are set */
+ 	ctrl_ctx = xhci_get_input_control_ctx(cfg_cmd->in_ctx);
++	if (!ctrl_ctx) {
++		spin_unlock_irqrestore(&xhci->lock, flags);
++		xhci_free_command(xhci, cfg_cmd);
++		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
++				__func__);
++		goto cleanup;
++	}
++
+ 	xhci_setup_input_ctx_for_config_ep(xhci, cfg_cmd->in_ctx, vdev->out_ctx,
+ 					   ctrl_ctx, ep_flag, ep_flag);
+ 	xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index);
+diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
+index fc0457db62e1a..8f09a387b7738 100644
+--- a/drivers/usb/musb/musb_core.c
++++ b/drivers/usb/musb/musb_core.c
+@@ -2070,7 +2070,7 @@ static void musb_irq_work(struct work_struct *data)
+ 	struct musb *musb = container_of(data, struct musb, irq_work.work);
+ 	int error;
+ 
+-	error = pm_runtime_get_sync(musb->controller);
++	error = pm_runtime_resume_and_get(musb->controller);
+ 	if (error < 0) {
+ 		dev_err(musb->controller, "Could not enable: %i\n", error);
+ 
+diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
+index bfa4c6ef554e5..c79d2f2387aaa 100644
+--- a/drivers/vhost/vdpa.c
++++ b/drivers/vhost/vdpa.c
+@@ -993,6 +993,7 @@ static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
+ 	if (vma->vm_end - vma->vm_start != notify.size)
+ 		return -ENOTSUPP;
+ 
++	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ 	vma->vm_ops = &vhost_vdpa_vm_ops;
+ 	return 0;
+ }
+diff --git a/drivers/video/backlight/qcom-wled.c b/drivers/video/backlight/qcom-wled.c
+index 091f07e7c1451..e9fbe24838440 100644
+--- a/drivers/video/backlight/qcom-wled.c
++++ b/drivers/video/backlight/qcom-wled.c
+@@ -336,19 +336,19 @@ static int wled3_sync_toggle(struct wled *wled)
+ 	unsigned int mask = GENMASK(wled->max_string_count - 1, 0);
+ 
+ 	rc = regmap_update_bits(wled->regmap,
+-				wled->ctrl_addr + WLED3_SINK_REG_SYNC,
++				wled->sink_addr + WLED3_SINK_REG_SYNC,
+ 				mask, mask);
+ 	if (rc < 0)
+ 		return rc;
+ 
+ 	rc = regmap_update_bits(wled->regmap,
+-				wled->ctrl_addr + WLED3_SINK_REG_SYNC,
++				wled->sink_addr + WLED3_SINK_REG_SYNC,
+ 				mask, WLED3_SINK_REG_SYNC_CLEAR);
+ 
+ 	return rc;
+ }
+ 
+-static int wled5_sync_toggle(struct wled *wled)
++static int wled5_mod_sync_toggle(struct wled *wled)
+ {
+ 	int rc;
+ 	u8 val;
+@@ -445,10 +445,23 @@ static int wled_update_status(struct backlight_device *bl)
+ 			goto unlock_mutex;
+ 		}
+ 
+-		rc = wled->wled_sync_toggle(wled);
+-		if (rc < 0) {
+-			dev_err(wled->dev, "wled sync failed rc:%d\n", rc);
+-			goto unlock_mutex;
++		if (wled->version < 5) {
++			rc = wled->wled_sync_toggle(wled);
++			if (rc < 0) {
++				dev_err(wled->dev, "wled sync failed rc:%d\n", rc);
++				goto unlock_mutex;
++			}
++		} else {
++			/*
++			 * For WLED5 toggling the MOD_SYNC_BIT updates the
++			 * brightness
++			 */
++			rc = wled5_mod_sync_toggle(wled);
++			if (rc < 0) {
++				dev_err(wled->dev, "wled mod sync failed rc:%d\n",
++					rc);
++				goto unlock_mutex;
++			}
+ 		}
+ 	}
+ 
+@@ -1459,7 +1472,7 @@ static int wled_configure(struct wled *wled)
+ 		size = ARRAY_SIZE(wled5_opts);
+ 		*cfg = wled5_config_defaults;
+ 		wled->wled_set_brightness = wled5_set_brightness;
+-		wled->wled_sync_toggle = wled5_sync_toggle;
++		wled->wled_sync_toggle = wled3_sync_toggle;
+ 		wled->wled_cabc_config = wled5_cabc_config;
+ 		wled->wled_ovp_delay = wled5_ovp_delay;
+ 		wled->wled_auto_detection_required =
+diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c
+index 757d5c3f620b7..ff09e57f3c380 100644
+--- a/drivers/video/fbdev/core/fbcmap.c
++++ b/drivers/video/fbdev/core/fbcmap.c
+@@ -101,17 +101,17 @@ int fb_alloc_cmap_gfp(struct fb_cmap *cmap, int len, int transp, gfp_t flags)
+ 		if (!len)
+ 			return 0;
+ 
+-		cmap->red = kmalloc(size, flags);
++		cmap->red = kzalloc(size, flags);
+ 		if (!cmap->red)
+ 			goto fail;
+-		cmap->green = kmalloc(size, flags);
++		cmap->green = kzalloc(size, flags);
+ 		if (!cmap->green)
+ 			goto fail;
+-		cmap->blue = kmalloc(size, flags);
++		cmap->blue = kzalloc(size, flags);
+ 		if (!cmap->blue)
+ 			goto fail;
+ 		if (transp) {
+-			cmap->transp = kmalloc(size, flags);
++			cmap->transp = kzalloc(size, flags);
+ 			if (!cmap->transp)
+ 				goto fail;
+ 		} else {
+diff --git a/drivers/virt/nitro_enclaves/ne_misc_dev.c b/drivers/virt/nitro_enclaves/ne_misc_dev.c
+index f1964ea4b8269..e21e1e86ad15f 100644
+--- a/drivers/virt/nitro_enclaves/ne_misc_dev.c
++++ b/drivers/virt/nitro_enclaves/ne_misc_dev.c
+@@ -1524,7 +1524,8 @@ static const struct file_operations ne_enclave_fops = {
+  *			  enclave file descriptor to be further used for enclave
+  *			  resources handling e.g. memory regions and CPUs.
+  * @ne_pci_dev :	Private data associated with the PCI device.
+- * @slot_uid:		Generated unique slot id associated with an enclave.
++ * @slot_uid:		User pointer to store the generated unique slot id
++ *			associated with an enclave to.
+  *
+  * Context: Process context. This function is called with the ne_pci_dev enclave
+  *	    mutex held.
+@@ -1532,7 +1533,7 @@ static const struct file_operations ne_enclave_fops = {
+  * * Enclave fd on success.
+  * * Negative return value on failure.
+  */
+-static int ne_create_vm_ioctl(struct ne_pci_dev *ne_pci_dev, u64 *slot_uid)
++static int ne_create_vm_ioctl(struct ne_pci_dev *ne_pci_dev, u64 __user *slot_uid)
+ {
+ 	struct ne_pci_dev_cmd_reply cmd_reply = {};
+ 	int enclave_fd = -1;
+@@ -1634,7 +1635,18 @@ static int ne_create_vm_ioctl(struct ne_pci_dev *ne_pci_dev, u64 *slot_uid)
+ 
+ 	list_add(&ne_enclave->enclave_list_entry, &ne_pci_dev->enclaves_list);
+ 
+-	*slot_uid = ne_enclave->slot_uid;
++	if (copy_to_user(slot_uid, &ne_enclave->slot_uid, sizeof(ne_enclave->slot_uid))) {
++		/*
++		 * As we're holding the only reference to 'enclave_file', fput()
++		 * will call ne_enclave_release() which will do a proper cleanup
++		 * of all so far allocated resources, leaving only the unused fd
++		 * for us to free.
++		 */
++		fput(enclave_file);
++		put_unused_fd(enclave_fd);
++
++		return -EFAULT;
++	}
+ 
+ 	fd_install(enclave_fd, enclave_file);
+ 
+@@ -1671,34 +1683,13 @@ static long ne_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 	switch (cmd) {
+ 	case NE_CREATE_VM: {
+ 		int enclave_fd = -1;
+-		struct file *enclave_file = NULL;
+ 		struct ne_pci_dev *ne_pci_dev = ne_devs.ne_pci_dev;
+-		int rc = -EINVAL;
+-		u64 slot_uid = 0;
++		u64 __user *slot_uid = (void __user *)arg;
+ 
+ 		mutex_lock(&ne_pci_dev->enclaves_list_mutex);
+-
+-		enclave_fd = ne_create_vm_ioctl(ne_pci_dev, &slot_uid);
+-		if (enclave_fd < 0) {
+-			rc = enclave_fd;
+-
+-			mutex_unlock(&ne_pci_dev->enclaves_list_mutex);
+-
+-			return rc;
+-		}
+-
++		enclave_fd = ne_create_vm_ioctl(ne_pci_dev, slot_uid);
+ 		mutex_unlock(&ne_pci_dev->enclaves_list_mutex);
+ 
+-		if (copy_to_user((void __user *)arg, &slot_uid, sizeof(slot_uid))) {
+-			enclave_file = fget(enclave_fd);
+-			/* Decrement file refs to have release() called. */
+-			fput(enclave_file);
+-			fput(enclave_file);
+-			put_unused_fd(enclave_fd);
+-
+-			return -EFAULT;
+-		}
+-
+ 		return enclave_fd;
+ 	}
+ 
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index 744b99ddc28ca..a7d9e147dee62 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -3269,6 +3269,7 @@ static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type)
+  */
+ void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
+ {
++	struct btrfs_transaction *cur_trans = trans->transaction;
+ 	struct btrfs_fs_info *fs_info = trans->fs_info;
+ 	struct btrfs_space_info *info;
+ 	u64 left;
+@@ -3283,6 +3284,7 @@ void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
+ 	lockdep_assert_held(&fs_info->chunk_mutex);
+ 
+ 	info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
++again:
+ 	spin_lock(&info->lock);
+ 	left = info->total_bytes - btrfs_space_info_used(info, true);
+ 	spin_unlock(&info->lock);
+@@ -3301,6 +3303,58 @@ void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
+ 
+ 	if (left < thresh) {
+ 		u64 flags = btrfs_system_alloc_profile(fs_info);
++		u64 reserved = atomic64_read(&cur_trans->chunk_bytes_reserved);
++
++		/*
++		 * If there's not available space for the chunk tree (system
++		 * space) and there are other tasks that reserved space for
++		 * creating a new system block group, wait for them to complete
++		 * the creation of their system block group and release excess
++		 * reserved space. We do this because:
++		 *
++		 * *) We can end up allocating more system chunks than necessary
++		 *    when there are multiple tasks that are concurrently
++		 *    allocating block groups, which can lead to exhaustion of
++		 *    the system array in the superblock;
++		 *
++		 * *) If we allocate extra and unnecessary system block groups,
++		 *    despite being empty for a long time, and possibly forever,
++		 *    they end not being added to the list of unused block groups
++		 *    because that typically happens only when deallocating the
++		 *    last extent from a block group - which never happens since
++		 *    we never allocate from them in the first place. The few
++		 *    exceptions are when mounting a filesystem or running scrub,
++		 *    which add unused block groups to the list of unused block
++		 *    groups, to be deleted by the cleaner kthread.
++		 *    And even when they are added to the list of unused block
++		 *    groups, it can take a long time until they get deleted,
++		 *    since the cleaner kthread might be sleeping or busy with
++		 *    other work (deleting subvolumes, running delayed iputs,
++		 *    defrag scheduling, etc);
++		 *
++		 * This is rare in practice, but can happen when too many tasks
++		 * are allocating blocks groups in parallel (via fallocate())
++		 * and before the one that reserved space for a new system block
++		 * group finishes the block group creation and releases the space
++		 * reserved in excess (at btrfs_create_pending_block_groups()),
++		 * other tasks end up here and see free system space temporarily
++		 * not enough for updating the chunk tree.
++		 *
++		 * We unlock the chunk mutex before waiting for such tasks and
++		 * lock it again after the wait, otherwise we would deadlock.
++		 * It is safe to do so because allocating a system chunk is the
++		 * first thing done while allocating a new block group.
++		 */
++		if (reserved > trans->chunk_bytes_reserved) {
++			const u64 min_needed = reserved - thresh;
++
++			mutex_unlock(&fs_info->chunk_mutex);
++			wait_event(cur_trans->chunk_reserve_wait,
++			   atomic64_read(&cur_trans->chunk_bytes_reserved) <=
++			   min_needed);
++			mutex_lock(&fs_info->chunk_mutex);
++			goto again;
++		}
+ 
+ 		/*
+ 		 * Ignore failure to create system chunk. We might end up not
+@@ -3315,8 +3369,10 @@ void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
+ 		ret = btrfs_block_rsv_add(fs_info->chunk_root,
+ 					  &fs_info->chunk_block_rsv,
+ 					  thresh, BTRFS_RESERVE_NO_FLUSH);
+-		if (!ret)
++		if (!ret) {
++			atomic64_add(thresh, &cur_trans->chunk_bytes_reserved);
+ 			trans->chunk_bytes_reserved += thresh;
++		}
+ 	}
+ }
+ 
+diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
+index 28e202e89660f..4189036049362 100644
+--- a/fs/btrfs/btrfs_inode.h
++++ b/fs/btrfs/btrfs_inode.h
+@@ -299,6 +299,21 @@ static inline void btrfs_mod_outstanding_extents(struct btrfs_inode *inode,
+ 						  mod);
+ }
+ 
++/*
++ * Called every time after doing a buffered, direct IO or memory mapped write.
++ *
++ * This is to ensure that if we write to a file that was previously fsynced in
++ * the current transaction, then try to fsync it again in the same transaction,
++ * we will know that there were changes in the file and that it needs to be
++ * logged.
++ */
++static inline void btrfs_set_inode_last_sub_trans(struct btrfs_inode *inode)
++{
++	spin_lock(&inode->lock);
++	inode->last_sub_trans = inode->root->log_transid;
++	spin_unlock(&inode->lock);
++}
++
+ static inline int btrfs_inode_in_log(struct btrfs_inode *inode, u64 generation)
+ {
+ 	int ret = 0;
+diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
+index 3f4c832abfed6..81387cdf334da 100644
+--- a/fs/btrfs/compression.c
++++ b/fs/btrfs/compression.c
+@@ -80,10 +80,15 @@ static int compression_compress_pages(int type, struct list_head *ws,
+ 	case BTRFS_COMPRESS_NONE:
+ 	default:
+ 		/*
+-		 * This can't happen, the type is validated several times
+-		 * before we get here. As a sane fallback, return what the
+-		 * callers will understand as 'no compression happened'.
++		 * This can happen when compression races with remount setting
++		 * it to 'no compress', while caller doesn't call
++		 * inode_need_compress() to check if we really need to
++		 * compress.
++		 *
++		 * Not a big deal, just need to inform caller that we
++		 * haven't allocated any pages yet.
+ 		 */
++		*out_pages = 0;
+ 		return -E2BIG;
+ 	}
+ }
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index 34b929bd5c1af..f43ce82a6aedc 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -1365,10 +1365,30 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
+ 				   "failed to read tree block %llu from get_old_root",
+ 				   logical);
+ 		} else {
++			struct tree_mod_elem *tm2;
++
+ 			btrfs_tree_read_lock(old);
+ 			eb = btrfs_clone_extent_buffer(old);
++			/*
++			 * After the lookup for the most recent tree mod operation
++			 * above and before we locked and cloned the extent buffer
++			 * 'old', a new tree mod log operation may have been added.
++			 * So lookup for a more recent one to make sure the number
++			 * of mod log operations we replay is consistent with the
++			 * number of items we have in the cloned extent buffer,
++			 * otherwise we can hit a BUG_ON when rewinding the extent
++			 * buffer.
++			 */
++			tm2 = tree_mod_log_search(fs_info, logical, time_seq);
+ 			btrfs_tree_read_unlock(old);
+ 			free_extent_buffer(old);
++			ASSERT(tm2);
++			ASSERT(tm2 == tm || tm2->seq > tm->seq);
++			if (!tm2 || tm2->seq < tm->seq) {
++				free_extent_buffer(eb);
++				return NULL;
++			}
++			tm = tm2;
+ 		}
+ 	} else if (old_root) {
+ 		eb_root_owner = btrfs_header_owner(eb_root);
+diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
+index bf25401c97681..c1d2b67861292 100644
+--- a/fs/btrfs/delayed-inode.c
++++ b/fs/btrfs/delayed-inode.c
+@@ -1589,8 +1589,8 @@ bool btrfs_readdir_get_delayed_items(struct inode *inode,
+ 	 * We can only do one readdir with delayed items at a time because of
+ 	 * item->readdir_list.
+ 	 */
+-	inode_unlock_shared(inode);
+-	inode_lock(inode);
++	btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
++	btrfs_inode_lock(inode, 0);
+ 
+ 	mutex_lock(&delayed_node->mutex);
+ 	item = __btrfs_first_delayed_insertion_item(delayed_node);
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 0e155f0138391..6eb72c9b15a7d 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -2014,14 +2014,8 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
+ 	else
+ 		num_written = btrfs_buffered_write(iocb, from);
+ 
+-	/*
+-	 * We also have to set last_sub_trans to the current log transid,
+-	 * otherwise subsequent syncs to a file that's been synced in this
+-	 * transaction will appear to have already occurred.
+-	 */
+-	spin_lock(&inode->lock);
+-	inode->last_sub_trans = inode->root->log_transid;
+-	spin_unlock(&inode->lock);
++	btrfs_set_inode_last_sub_trans(inode);
++
+ 	if (num_written > 0)
+ 		num_written = generic_write_sync(iocb, num_written);
+ 
+@@ -2122,7 +2116,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+ 	if (ret)
+ 		goto out;
+ 
+-	inode_lock(inode);
++	btrfs_inode_lock(inode, 0);
+ 
+ 	atomic_inc(&root->log_batch);
+ 
+@@ -2154,7 +2148,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+ 	 */
+ 	ret = start_ordered_ops(inode, start, end);
+ 	if (ret) {
+-		inode_unlock(inode);
++		btrfs_inode_unlock(inode, 0);
+ 		goto out;
+ 	}
+ 
+@@ -2255,7 +2249,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+ 	 * file again, but that will end up using the synchronization
+ 	 * inside btrfs_sync_log to keep things safe.
+ 	 */
+-	inode_unlock(inode);
++	btrfs_inode_unlock(inode, 0);
+ 
+ 	if (ret != BTRFS_NO_LOG_SYNC) {
+ 		if (!ret) {
+@@ -2285,7 +2279,7 @@ out:
+ 
+ out_release_extents:
+ 	btrfs_release_log_ctx_extents(&ctx);
+-	inode_unlock(inode);
++	btrfs_inode_unlock(inode, 0);
+ 	goto out;
+ }
+ 
+@@ -2735,8 +2729,6 @@ int btrfs_replace_file_extents(struct inode *inode, struct btrfs_path *path,
+ 			extent_info->file_offset += replace_len;
+ 		}
+ 
+-		cur_offset = drop_args.drop_end;
+-
+ 		ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+ 		if (ret)
+ 			break;
+@@ -2756,7 +2748,9 @@ int btrfs_replace_file_extents(struct inode *inode, struct btrfs_path *path,
+ 		BUG_ON(ret);	/* shouldn't happen */
+ 		trans->block_rsv = rsv;
+ 
+-		if (!extent_info) {
++		cur_offset = drop_args.drop_end;
++		len = end - cur_offset;
++		if (!extent_info && len) {
+ 			ret = find_first_non_hole(BTRFS_I(inode), &cur_offset,
+ 						  &len);
+ 			if (unlikely(ret < 0))
+@@ -2868,7 +2862,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
+ 	if (ret)
+ 		return ret;
+ 
+-	inode_lock(inode);
++	btrfs_inode_lock(inode, 0);
+ 	ino_size = round_up(inode->i_size, fs_info->sectorsize);
+ 	ret = find_first_non_hole(BTRFS_I(inode), &offset, &len);
+ 	if (ret < 0)
+@@ -2908,7 +2902,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
+ 		truncated_block = true;
+ 		ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0);
+ 		if (ret) {
+-			inode_unlock(inode);
++			btrfs_inode_unlock(inode, 0);
+ 			return ret;
+ 		}
+ 	}
+@@ -3009,7 +3003,7 @@ out_only_mutex:
+ 				ret = ret2;
+ 		}
+ 	}
+-	inode_unlock(inode);
++	btrfs_inode_unlock(inode, 0);
+ 	return ret;
+ }
+ 
+@@ -3377,7 +3371,7 @@ static long btrfs_fallocate(struct file *file, int mode,
+ 
+ 	if (mode & FALLOC_FL_ZERO_RANGE) {
+ 		ret = btrfs_zero_range(inode, offset, len, mode);
+-		inode_unlock(inode);
++		btrfs_inode_unlock(inode, 0);
+ 		return ret;
+ 	}
+ 
+@@ -3487,7 +3481,7 @@ out_unlock:
+ 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
+ 			     &cached_state);
+ out:
+-	inode_unlock(inode);
++	btrfs_inode_unlock(inode, 0);
+ 	/* Let go of our reservation. */
+ 	if (ret != 0 && !(mode & FALLOC_FL_ZERO_RANGE))
+ 		btrfs_free_reserved_data_space(BTRFS_I(inode), data_reserved,
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index a520775949a07..a922c3bcb65e1 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -8619,9 +8619,7 @@ again:
+ 	set_page_dirty(page);
+ 	SetPageUptodate(page);
+ 
+-	BTRFS_I(inode)->last_trans = fs_info->generation;
+-	BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
+-	BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit;
++	btrfs_set_inode_last_sub_trans(BTRFS_I(inode));
+ 
+ 	unlock_extent_cached(io_tree, page_start, page_end, &cached_state);
+ 
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index e8d53fea4c61a..5efd6c7fe6206 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -226,7 +226,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
+ 	if (ret)
+ 		return ret;
+ 
+-	inode_lock(inode);
++	btrfs_inode_lock(inode, 0);
+ 	fsflags = btrfs_mask_fsflags_for_type(inode, fsflags);
+ 	old_fsflags = btrfs_inode_flags_to_fsflags(binode->flags);
+ 
+@@ -353,7 +353,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
+  out_end_trans:
+ 	btrfs_end_transaction(trans);
+  out_unlock:
+-	inode_unlock(inode);
++	btrfs_inode_unlock(inode, 0);
+ 	mnt_drop_write_file(file);
+ 	return ret;
+ }
+@@ -449,7 +449,7 @@ static int btrfs_ioctl_fssetxattr(struct file *file, void __user *arg)
+ 	if (ret)
+ 		return ret;
+ 
+-	inode_lock(inode);
++	btrfs_inode_lock(inode, 0);
+ 
+ 	old_flags = binode->flags;
+ 	old_i_flags = inode->i_flags;
+@@ -501,7 +501,7 @@ out_unlock:
+ 		inode->i_flags = old_i_flags;
+ 	}
+ 
+-	inode_unlock(inode);
++	btrfs_inode_unlock(inode, 0);
+ 	mnt_drop_write_file(file);
+ 
+ 	return ret;
+@@ -697,8 +697,6 @@ static noinline int create_subvol(struct inode *dir,
+ 	btrfs_set_root_otransid(root_item, trans->transid);
+ 
+ 	btrfs_tree_unlock(leaf);
+-	free_extent_buffer(leaf);
+-	leaf = NULL;
+ 
+ 	btrfs_set_root_dirid(root_item, BTRFS_FIRST_FREE_OBJECTID);
+ 
+@@ -707,8 +705,22 @@ static noinline int create_subvol(struct inode *dir,
+ 	key.type = BTRFS_ROOT_ITEM_KEY;
+ 	ret = btrfs_insert_root(trans, fs_info->tree_root, &key,
+ 				root_item);
+-	if (ret)
++	if (ret) {
++		/*
++		 * Since we don't abort the transaction in this case, free the
++		 * tree block so that we don't leak space and leave the
++		 * filesystem in an inconsistent state (an extent item in the
++		 * extent tree without backreferences). Also no need to have
++		 * the tree block locked since it is not in any tree at this
++		 * point, so no other task can find it and use it.
++		 */
++		btrfs_free_tree_block(trans, root, leaf, 0, 1);
++		free_extent_buffer(leaf);
+ 		goto fail;
++	}
++
++	free_extent_buffer(leaf);
++	leaf = NULL;
+ 
+ 	key.offset = (u64)-1;
+ 	new_root = btrfs_get_new_fs_root(fs_info, objectid, anon_dev);
+@@ -1014,7 +1026,7 @@ out_up_read:
+ out_dput:
+ 	dput(dentry);
+ out_unlock:
+-	inode_unlock(dir);
++	btrfs_inode_unlock(dir, 0);
+ 	return error;
+ }
+ 
+@@ -1612,7 +1624,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
+ 			ra_index += cluster;
+ 		}
+ 
+-		inode_lock(inode);
++		btrfs_inode_lock(inode, 0);
+ 		if (IS_SWAPFILE(inode)) {
+ 			ret = -ETXTBSY;
+ 		} else {
+@@ -1621,13 +1633,13 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
+ 			ret = cluster_pages_for_defrag(inode, pages, i, cluster);
+ 		}
+ 		if (ret < 0) {
+-			inode_unlock(inode);
++			btrfs_inode_unlock(inode, 0);
+ 			goto out_ra;
+ 		}
+ 
+ 		defrag_count += ret;
+ 		balance_dirty_pages_ratelimited(inode->i_mapping);
+-		inode_unlock(inode);
++		btrfs_inode_unlock(inode, 0);
+ 
+ 		if (newer_than) {
+ 			if (newer_off == (u64)-1)
+@@ -1675,9 +1687,9 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
+ 
+ out_ra:
+ 	if (do_compress) {
+-		inode_lock(inode);
++		btrfs_inode_lock(inode, 0);
+ 		BTRFS_I(inode)->defrag_compress = BTRFS_COMPRESS_NONE;
+-		inode_unlock(inode);
++		btrfs_inode_unlock(inode, 0);
+ 	}
+ 	if (!file)
+ 		kfree(ra);
+@@ -3112,9 +3124,9 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
+ 		goto out_dput;
+ 	}
+ 
+-	inode_lock(inode);
++	btrfs_inode_lock(inode, 0);
+ 	err = btrfs_delete_subvolume(dir, dentry);
+-	inode_unlock(inode);
++	btrfs_inode_unlock(inode, 0);
+ 	if (!err) {
+ 		fsnotify_rmdir(dir, dentry);
+ 		d_delete(dentry);
+@@ -3123,7 +3135,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
+ out_dput:
+ 	dput(dentry);
+ out_unlock_dir:
+-	inode_unlock(dir);
++	btrfs_inode_unlock(dir, 0);
+ free_subvol_name:
+ 	kfree(subvol_name_ptr);
+ free_parent:
+diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c
+index 762881b777b3b..0abbf050580d1 100644
+--- a/fs/btrfs/reflink.c
++++ b/fs/btrfs/reflink.c
+@@ -833,7 +833,7 @@ loff_t btrfs_remap_file_range(struct file *src_file, loff_t off,
+ 		return -EINVAL;
+ 
+ 	if (same_inode)
+-		inode_lock(src_inode);
++		btrfs_inode_lock(src_inode, 0);
+ 	else
+ 		lock_two_nondirectories(src_inode, dst_inode);
+ 
+@@ -849,7 +849,7 @@ loff_t btrfs_remap_file_range(struct file *src_file, loff_t off,
+ 
+ out_unlock:
+ 	if (same_inode)
+-		inode_unlock(src_inode);
++		btrfs_inode_unlock(src_inode, 0);
+ 	else
+ 		unlock_two_nondirectories(src_inode, dst_inode);
+ 
+diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
+index 232d5da7b7bef..829dc8dcc1514 100644
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -733,10 +733,12 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
+ 	struct extent_buffer *eb;
+ 	struct btrfs_root_item *root_item;
+ 	struct btrfs_key root_key;
+-	int ret;
++	int ret = 0;
++	bool must_abort = false;
+ 
+ 	root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
+-	BUG_ON(!root_item);
++	if (!root_item)
++		return ERR_PTR(-ENOMEM);
+ 
+ 	root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
+ 	root_key.type = BTRFS_ROOT_ITEM_KEY;
+@@ -748,7 +750,9 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
+ 		/* called by btrfs_init_reloc_root */
+ 		ret = btrfs_copy_root(trans, root, root->commit_root, &eb,
+ 				      BTRFS_TREE_RELOC_OBJECTID);
+-		BUG_ON(ret);
++		if (ret)
++			goto fail;
++
+ 		/*
+ 		 * Set the last_snapshot field to the generation of the commit
+ 		 * root - like this ctree.c:btrfs_block_can_be_shared() behaves
+@@ -769,9 +773,16 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
+ 		 */
+ 		ret = btrfs_copy_root(trans, root, root->node, &eb,
+ 				      BTRFS_TREE_RELOC_OBJECTID);
+-		BUG_ON(ret);
++		if (ret)
++			goto fail;
+ 	}
+ 
++	/*
++	 * We have changed references at this point, we must abort the
++	 * transaction if anything fails.
++	 */
++	must_abort = true;
++
+ 	memcpy(root_item, &root->root_item, sizeof(*root_item));
+ 	btrfs_set_root_bytenr(root_item, eb->start);
+ 	btrfs_set_root_level(root_item, btrfs_header_level(eb));
+@@ -789,14 +800,25 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
+ 
+ 	ret = btrfs_insert_root(trans, fs_info->tree_root,
+ 				&root_key, root_item);
+-	BUG_ON(ret);
++	if (ret)
++		goto fail;
++
+ 	kfree(root_item);
+ 
+ 	reloc_root = btrfs_read_tree_root(fs_info->tree_root, &root_key);
+-	BUG_ON(IS_ERR(reloc_root));
++	if (IS_ERR(reloc_root)) {
++		ret = PTR_ERR(reloc_root);
++		goto abort;
++	}
+ 	set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state);
+ 	reloc_root->last_trans = trans->transid;
+ 	return reloc_root;
++fail:
++	kfree(root_item);
++abort:
++	if (must_abort)
++		btrfs_abort_transaction(trans, ret);
++	return ERR_PTR(ret);
+ }
+ 
+ /*
+@@ -875,7 +897,7 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
+ 	int ret;
+ 
+ 	if (!have_reloc_root(root))
+-		goto out;
++		return 0;
+ 
+ 	reloc_root = root->reloc_root;
+ 	root_item = &reloc_root->root_item;
+@@ -908,10 +930,8 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
+ 
+ 	ret = btrfs_update_root(trans, fs_info->tree_root,
+ 				&reloc_root->root_key, root_item);
+-	BUG_ON(ret);
+ 	btrfs_put_root(reloc_root);
+-out:
+-	return 0;
++	return ret;
+ }
+ 
+ /*
+@@ -1185,8 +1205,8 @@ int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc,
+ 	int ret;
+ 	int slot;
+ 
+-	BUG_ON(src->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
+-	BUG_ON(dest->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID);
++	ASSERT(src->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID);
++	ASSERT(dest->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
+ 
+ 	last_snapshot = btrfs_root_last_snapshot(&src->root_item);
+ again:
+@@ -1217,7 +1237,7 @@ again:
+ 	parent = eb;
+ 	while (1) {
+ 		level = btrfs_header_level(parent);
+-		BUG_ON(level < lowest_level);
++		ASSERT(level >= lowest_level);
+ 
+ 		ret = btrfs_bin_search(parent, &key, &slot);
+ 		if (ret < 0)
+@@ -2578,7 +2598,7 @@ static noinline_for_stack int prealloc_file_extent_cluster(
+ 		return btrfs_end_transaction(trans);
+ 	}
+ 
+-	inode_lock(&inode->vfs_inode);
++	btrfs_inode_lock(&inode->vfs_inode, 0);
+ 	for (nr = 0; nr < cluster->nr; nr++) {
+ 		start = cluster->boundary[nr] - offset;
+ 		if (nr + 1 < cluster->nr)
+@@ -2596,7 +2616,7 @@ static noinline_for_stack int prealloc_file_extent_cluster(
+ 		if (ret)
+ 			break;
+ 	}
+-	inode_unlock(&inode->vfs_inode);
++	btrfs_inode_unlock(&inode->vfs_inode, 0);
+ 
+ 	if (cur_offset < prealloc_end)
+ 		btrfs_free_reserved_data_space_noquota(inode->root->fs_info,
+diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
+index 3d9088eab2fca..b9202a1f1af10 100644
+--- a/fs/btrfs/scrub.c
++++ b/fs/btrfs/scrub.c
+@@ -3682,8 +3682,8 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
+ 			spin_lock(&cache->lock);
+ 			if (!cache->to_copy) {
+ 				spin_unlock(&cache->lock);
+-				ro_set = 0;
+-				goto done;
++				btrfs_put_block_group(cache);
++				goto skip;
+ 			}
+ 			spin_unlock(&cache->lock);
+ 		}
+@@ -3841,7 +3841,6 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
+ 						      cache, found_key.offset))
+ 			ro_set = 0;
+ 
+-done:
+ 		down_write(&dev_replace->rwsem);
+ 		dev_replace->cursor_left = dev_replace->cursor_right;
+ 		dev_replace->item_needs_writeback = 1;
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index acff6bb49a97b..d56d3e7ca3240 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -260,6 +260,7 @@ static inline int extwriter_counter_read(struct btrfs_transaction *trans)
+ void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
+ {
+ 	struct btrfs_fs_info *fs_info = trans->fs_info;
++	struct btrfs_transaction *cur_trans = trans->transaction;
+ 
+ 	if (!trans->chunk_bytes_reserved)
+ 		return;
+@@ -268,6 +269,8 @@ void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
+ 
+ 	btrfs_block_rsv_release(fs_info, &fs_info->chunk_block_rsv,
+ 				trans->chunk_bytes_reserved, NULL);
++	atomic64_sub(trans->chunk_bytes_reserved, &cur_trans->chunk_bytes_reserved);
++	cond_wake_up(&cur_trans->chunk_reserve_wait);
+ 	trans->chunk_bytes_reserved = 0;
+ }
+ 
+@@ -383,6 +386,8 @@ loop:
+ 	spin_lock_init(&cur_trans->dropped_roots_lock);
+ 	INIT_LIST_HEAD(&cur_trans->releasing_ebs);
+ 	spin_lock_init(&cur_trans->releasing_ebs_lock);
++	atomic64_set(&cur_trans->chunk_bytes_reserved, 0);
++	init_waitqueue_head(&cur_trans->chunk_reserve_wait);
+ 	list_add_tail(&cur_trans->list, &fs_info->trans_list);
+ 	extent_io_tree_init(fs_info, &cur_trans->dirty_pages,
+ 			IO_TREE_TRANS_DIRTY_PAGES, fs_info->btree_inode);
+@@ -1961,7 +1966,6 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans, int err)
+ 	 */
+ 	BUG_ON(list_empty(&cur_trans->list));
+ 
+-	list_del_init(&cur_trans->list);
+ 	if (cur_trans == fs_info->running_transaction) {
+ 		cur_trans->state = TRANS_STATE_COMMIT_DOING;
+ 		spin_unlock(&fs_info->trans_lock);
+@@ -1970,6 +1974,17 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans, int err)
+ 
+ 		spin_lock(&fs_info->trans_lock);
+ 	}
++
++	/*
++	 * Now that we know no one else is still using the transaction we can
++	 * remove the transaction from the list of transactions. This avoids
++	 * the transaction kthread from cleaning up the transaction while some
++	 * other task is still using it, which could result in a use-after-free
++	 * on things like log trees, as it forces the transaction kthread to
++	 * wait for this transaction to be cleaned up by us.
++	 */
++	list_del_init(&cur_trans->list);
++
+ 	spin_unlock(&fs_info->trans_lock);
+ 
+ 	btrfs_cleanup_one_transaction(trans->transaction, fs_info);
+diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
+index 6335716e513ff..364cfbb4c5c59 100644
+--- a/fs/btrfs/transaction.h
++++ b/fs/btrfs/transaction.h
+@@ -96,6 +96,13 @@ struct btrfs_transaction {
+ 
+ 	spinlock_t releasing_ebs_lock;
+ 	struct list_head releasing_ebs;
++
++	/*
++	 * The number of bytes currently reserved, by all transaction handles
++	 * attached to this transaction, for metadata extents of the chunk tree.
++	 */
++	atomic64_t chunk_bytes_reserved;
++	wait_queue_head_t chunk_reserve_wait;
+ };
+ 
+ #define __TRANS_FREEZABLE	(1U << 0)
+@@ -175,7 +182,7 @@ static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans,
+ 	spin_lock(&inode->lock);
+ 	inode->last_trans = trans->transaction->transid;
+ 	inode->last_sub_trans = inode->root->log_transid;
+-	inode->last_log_commit = inode->root->last_log_commit;
++	inode->last_log_commit = inode->last_sub_trans - 1;
+ 	spin_unlock(&inode->lock);
+ }
+ 
+diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
+index eeb3ebe11d7ae..70b23a0d03b10 100644
+--- a/fs/btrfs/zoned.c
++++ b/fs/btrfs/zoned.c
+@@ -342,6 +342,13 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device)
+ 	if (!IS_ALIGNED(nr_sectors, zone_sectors))
+ 		zone_info->nr_zones++;
+ 
++	if (bdev_is_zoned(bdev) && zone_info->max_zone_append_size == 0) {
++		btrfs_err(fs_info, "zoned: device %pg does not support zone append",
++			  bdev);
++		ret = -EINVAL;
++		goto out;
++	}
++
+ 	zone_info->seq_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
+ 	if (!zone_info->seq_zones) {
+ 		ret = -ENOMEM;
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index 5ddd20b62484d..fa896a1c8b075 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -834,7 +834,7 @@ cifs_smb3_do_mount(struct file_system_type *fs_type,
+ 		goto out;
+ 	}
+ 
+-	rc = cifs_setup_volume_info(cifs_sb->ctx, NULL, old_ctx->UNC);
++	rc = cifs_setup_volume_info(cifs_sb->ctx, NULL, NULL);
+ 	if (rc) {
+ 		root = ERR_PTR(rc);
+ 		goto out;
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 24668eb006c63..3d62d52d730b5 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -488,6 +488,7 @@ server_unresponsive(struct TCP_Server_Info *server)
+ 	 */
+ 	if ((server->tcpStatus == CifsGood ||
+ 	    server->tcpStatus == CifsNeedNegotiate) &&
++	    (!server->ops->can_echo || server->ops->can_echo(server)) &&
+ 	    time_after(jiffies, server->lstrp + 3 * server->echo_interval)) {
+ 		cifs_server_dbg(VFS, "has not responded in %lu seconds. Reconnecting...\n",
+ 			 (3 * server->echo_interval) / HZ);
+@@ -3175,17 +3176,29 @@ out:
+ int
+ cifs_setup_volume_info(struct smb3_fs_context *ctx, const char *mntopts, const char *devname)
+ {
+-	int rc = 0;
++	int rc;
+ 
+-	smb3_parse_devname(devname, ctx);
++	if (devname) {
++		cifs_dbg(FYI, "%s: devname=%s\n", __func__, devname);
++		rc = smb3_parse_devname(devname, ctx);
++		if (rc) {
++			cifs_dbg(VFS, "%s: failed to parse %s: %d\n", __func__, devname, rc);
++			return rc;
++		}
++	}
+ 
+ 	if (mntopts) {
+ 		char *ip;
+ 
+-		cifs_dbg(FYI, "%s: mntopts=%s\n", __func__, mntopts);
+ 		rc = smb3_parse_opt(mntopts, "ip", &ip);
+-		if (!rc && !cifs_convert_address((struct sockaddr *)&ctx->dstaddr, ip,
+-						 strlen(ip))) {
++		if (rc) {
++			cifs_dbg(VFS, "%s: failed to parse ip options: %d\n", __func__, rc);
++			return rc;
++		}
++
++		rc = cifs_convert_address((struct sockaddr *)&ctx->dstaddr, ip, strlen(ip));
++		kfree(ip);
++		if (!rc) {
+ 			cifs_dbg(VFS, "%s: failed to convert ip address\n", __func__);
+ 			return -EINVAL;
+ 		}
+@@ -3205,7 +3218,7 @@ cifs_setup_volume_info(struct smb3_fs_context *ctx, const char *mntopts, const c
+ 		return -EINVAL;
+ 	}
+ 
+-	return rc;
++	return 0;
+ }
+ 
+ static int
+diff --git a/fs/cifs/fs_context.c b/fs/cifs/fs_context.c
+index 78889024a7ed0..a7253eb2e9559 100644
+--- a/fs/cifs/fs_context.c
++++ b/fs/cifs/fs_context.c
+@@ -475,6 +475,7 @@ smb3_parse_devname(const char *devname, struct smb3_fs_context *ctx)
+ 
+ 	/* move "pos" up to delimiter or NULL */
+ 	pos += len;
++	kfree(ctx->UNC);
+ 	ctx->UNC = kstrndup(devname, pos - devname, GFP_KERNEL);
+ 	if (!ctx->UNC)
+ 		return -ENOMEM;
+@@ -485,6 +486,9 @@ smb3_parse_devname(const char *devname, struct smb3_fs_context *ctx)
+ 	if (*pos == '/' || *pos == '\\')
+ 		pos++;
+ 
++	kfree(ctx->prepath);
++	ctx->prepath = NULL;
++
+ 	/* If pos is NULL then no prepath */
+ 	if (!*pos)
+ 		return 0;
+@@ -995,6 +999,9 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
+ 			goto cifs_parse_mount_err;
+ 		}
+ 		ctx->max_channels = result.uint_32;
++		/* If more than one channel requested ... they want multichan */
++		if (result.uint_32 > 1)
++			ctx->multichannel = true;
+ 		break;
+ 	case Opt_handletimeout:
+ 		ctx->handle_timeout = result.uint_32;
+diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
+index 63d517b9f2ffe..a92a1fb7cb526 100644
+--- a/fs/cifs/sess.c
++++ b/fs/cifs/sess.c
+@@ -97,6 +97,12 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
+ 		return 0;
+ 	}
+ 
++	if (!(ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
++		cifs_dbg(VFS, "server %s does not support multichannel\n", ses->server->hostname);
++		ses->chan_max = 1;
++		return 0;
++	}
++
+ 	/*
+ 	 * Make a copy of the iface list at the time and use that
+ 	 * instead so as to not hold the iface spinlock for opening
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index f703204fb185d..5df6daacc230b 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -1763,18 +1763,14 @@ smb2_ioctl_query_info(const unsigned int xid,
+ 	}
+ 
+  iqinf_exit:
+-	kfree(vars);
+-	kfree(buffer);
+-	SMB2_open_free(&rqst[0]);
+-	if (qi.flags & PASSTHRU_FSCTL)
+-		SMB2_ioctl_free(&rqst[1]);
+-	else
+-		SMB2_query_info_free(&rqst[1]);
+-
+-	SMB2_close_free(&rqst[2]);
++	cifs_small_buf_release(rqst[0].rq_iov[0].iov_base);
++	cifs_small_buf_release(rqst[1].rq_iov[0].iov_base);
++	cifs_small_buf_release(rqst[2].rq_iov[0].iov_base);
+ 	free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
+ 	free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
+ 	free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
++	kfree(vars);
++	kfree(buffer);
+ 	return rc;
+ 
+ e_fault:
+@@ -2232,7 +2228,7 @@ smb3_notify(const unsigned int xid, struct file *pfile,
+ 
+ 	cifs_sb = CIFS_SB(inode->i_sb);
+ 
+-	utf16_path = cifs_convert_path_to_utf16(path + 1, cifs_sb);
++	utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
+ 	if (utf16_path == NULL) {
+ 		rc = -ENOMEM;
+ 		goto notify_exit;
+@@ -4178,7 +4174,7 @@ smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
+ 	}
+ 	spin_unlock(&cifs_tcp_ses_lock);
+ 
+-	return 1;
++	return -EAGAIN;
+ }
+ /*
+  * Encrypt or decrypt @rqst message. @rqst[0] has the following format:
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 2199a9bfae8f7..29272d99102c5 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -841,6 +841,8 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
+ 		req->SecurityMode = 0;
+ 
+ 	req->Capabilities = cpu_to_le32(server->vals->req_capabilities);
++	if (ses->chan_max > 1)
++		req->Capabilities |= cpu_to_le32(SMB2_GLOBAL_CAP_MULTI_CHANNEL);
+ 
+ 	/* ClientGUID must be zero for SMB2.02 dialect */
+ 	if (server->vals->protocol_id == SMB20_PROT_ID)
+@@ -1032,6 +1034,9 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
+ 
+ 	pneg_inbuf->Capabilities =
+ 			cpu_to_le32(server->vals->req_capabilities);
++	if (tcon->ses->chan_max > 1)
++		pneg_inbuf->Capabilities |= cpu_to_le32(SMB2_GLOBAL_CAP_MULTI_CHANNEL);
++
+ 	memcpy(pneg_inbuf->Guid, server->client_guid,
+ 					SMB2_CLIENT_GUID_SIZE);
+ 
+diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
+index cdf40a54a35d8..cf772c72ab2b8 100644
+--- a/fs/ecryptfs/main.c
++++ b/fs/ecryptfs/main.c
+@@ -492,6 +492,12 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
+ 		goto out;
+ 	}
+ 
++	if (!dev_name) {
++		rc = -EINVAL;
++		err = "Device name cannot be null";
++		goto out;
++	}
++
+ 	rc = ecryptfs_parse_options(sbi, raw_data, &check_ruid);
+ 	if (rc) {
+ 		err = "Error parsing options";
+diff --git a/fs/erofs/erofs_fs.h b/fs/erofs/erofs_fs.h
+index 9ad1615f44743..e8d04d808fa62 100644
+--- a/fs/erofs/erofs_fs.h
++++ b/fs/erofs/erofs_fs.h
+@@ -75,6 +75,9 @@ static inline bool erofs_inode_is_data_compressed(unsigned int datamode)
+ #define EROFS_I_VERSION_BIT             0
+ #define EROFS_I_DATALAYOUT_BIT          1
+ 
++#define EROFS_I_ALL	\
++	((1 << (EROFS_I_DATALAYOUT_BIT + EROFS_I_DATALAYOUT_BITS)) - 1)
++
+ /* 32-byte reduced form of an ondisk inode */
+ struct erofs_inode_compact {
+ 	__le16 i_format;	/* inode format hints */
+diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c
+index 119fdce1b5207..7ed2d73916928 100644
+--- a/fs/erofs/inode.c
++++ b/fs/erofs/inode.c
+@@ -44,6 +44,13 @@ static struct page *erofs_read_inode(struct inode *inode,
+ 	dic = page_address(page) + *ofs;
+ 	ifmt = le16_to_cpu(dic->i_format);
+ 
++	if (ifmt & ~EROFS_I_ALL) {
++		erofs_err(inode->i_sb, "unsupported i_format %u of nid %llu",
++			  ifmt, vi->nid);
++		err = -EOPNOTSUPP;
++		goto err_out;
++	}
++
+ 	vi->datalayout = erofs_inode_datalayout(ifmt);
+ 	if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) {
+ 		erofs_err(inode->i_sb, "unsupported datalayout %u of nid %llu",
+diff --git a/fs/eventpoll.c b/fs/eventpoll.c
+index 3196474cbe24c..e42477fcbfa05 100644
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -657,6 +657,12 @@ static void ep_done_scan(struct eventpoll *ep,
+ 	 */
+ 	list_splice(txlist, &ep->rdllist);
+ 	__pm_relax(ep->ws);
++
++	if (!list_empty(&ep->rdllist)) {
++		if (waitqueue_active(&ep->wq))
++			wake_up(&ep->wq);
++	}
++
+ 	write_unlock_irq(&ep->lock);
+ }
+ 
+diff --git a/fs/exfat/balloc.c b/fs/exfat/balloc.c
+index 761c79c3a4ba2..411fb0a8da10d 100644
+--- a/fs/exfat/balloc.c
++++ b/fs/exfat/balloc.c
+@@ -141,10 +141,6 @@ void exfat_free_bitmap(struct exfat_sb_info *sbi)
+ 	kfree(sbi->vol_amap);
+ }
+ 
+-/*
+- * If the value of "clu" is 0, it means cluster 2 which is the first cluster of
+- * the cluster heap.
+- */
+ int exfat_set_bitmap(struct inode *inode, unsigned int clu)
+ {
+ 	int i, b;
+@@ -162,10 +158,6 @@ int exfat_set_bitmap(struct inode *inode, unsigned int clu)
+ 	return 0;
+ }
+ 
+-/*
+- * If the value of "clu" is 0, it means cluster 2 which is the first cluster of
+- * the cluster heap.
+- */
+ void exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync)
+ {
+ 	int i, b;
+@@ -186,8 +178,7 @@ void exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync)
+ 		int ret_discard;
+ 
+ 		ret_discard = sb_issue_discard(sb,
+-			exfat_cluster_to_sector(sbi, clu +
+-						EXFAT_RESERVED_CLUSTERS),
++			exfat_cluster_to_sector(sbi, clu),
+ 			(1 << sbi->sect_per_clus_bits), GFP_NOFS, 0);
+ 
+ 		if (ret_discard == -EOPNOTSUPP) {
+diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c
+index 7541d0b5d7069..312273ed8a9fe 100644
+--- a/fs/ext4/fast_commit.c
++++ b/fs/ext4/fast_commit.c
+@@ -1088,8 +1088,10 @@ static int ext4_fc_perform_commit(journal_t *journal)
+ 		head.fc_tid = cpu_to_le32(
+ 			sbi->s_journal->j_running_transaction->t_tid);
+ 		if (!ext4_fc_add_tlv(sb, EXT4_FC_TAG_HEAD, sizeof(head),
+-			(u8 *)&head, &crc))
++			(u8 *)&head, &crc)) {
++			ret = -ENOSPC;
+ 			goto out;
++		}
+ 	}
+ 
+ 	spin_lock(&sbi->s_fc_lock);
+diff --git a/fs/ext4/file.c b/fs/ext4/file.c
+index 194f5d00fa326..7924634ab0bfe 100644
+--- a/fs/ext4/file.c
++++ b/fs/ext4/file.c
+@@ -371,15 +371,32 @@ truncate:
+ static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size,
+ 				 int error, unsigned int flags)
+ {
+-	loff_t offset = iocb->ki_pos;
++	loff_t pos = iocb->ki_pos;
+ 	struct inode *inode = file_inode(iocb->ki_filp);
+ 
+ 	if (error)
+ 		return error;
+ 
+-	if (size && flags & IOMAP_DIO_UNWRITTEN)
+-		return ext4_convert_unwritten_extents(NULL, inode,
+-						      offset, size);
++	if (size && flags & IOMAP_DIO_UNWRITTEN) {
++		error = ext4_convert_unwritten_extents(NULL, inode, pos, size);
++		if (error < 0)
++			return error;
++	}
++	/*
++	 * If we are extending the file, we have to update i_size here before
++	 * page cache gets invalidated in iomap_dio_rw(). Otherwise racing
++	 * buffered reads could zero out too much from page cache pages. Update
++	 * of on-disk size will happen later in ext4_dio_write_iter() where
++	 * we have enough information to also perform orphan list handling etc.
++	 * Note that we perform all extending writes synchronously under
++	 * i_rwsem held exclusively so i_size update is safe here in that case.
++	 * If the write was not extending, we cannot see pos > i_size here
++	 * because operations reducing i_size like truncate wait for all
++	 * outstanding DIO before updating i_size.
++	 */
++	pos += size;
++	if (pos > i_size_read(inode))
++		i_size_write(inode, pos);
+ 
+ 	return 0;
+ }
+diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
+index 633ae7becd61f..71d321b3b9844 100644
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -1292,7 +1292,8 @@ got:
+ 
+ 	ei->i_extra_isize = sbi->s_want_extra_isize;
+ 	ei->i_inline_off = 0;
+-	if (ext4_has_feature_inline_data(sb))
++	if (ext4_has_feature_inline_data(sb) &&
++	    (!(ei->i_flags & EXT4_DAX_FL) || S_ISDIR(mode)))
+ 		ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
+ 	ret = inode;
+ 	err = dquot_alloc_inode(inode);
+@@ -1513,6 +1514,7 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
+ 	handle_t *handle;
+ 	ext4_fsblk_t blk;
+ 	int num, ret = 0, used_blks = 0;
++	unsigned long used_inos = 0;
+ 
+ 	/* This should not happen, but just to be sure check this */
+ 	if (sb_rdonly(sb)) {
+@@ -1543,22 +1545,37 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
+ 	 * used inodes so we need to skip blocks with used inodes in
+ 	 * inode table.
+ 	 */
+-	if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)))
+-		used_blks = DIV_ROUND_UP((EXT4_INODES_PER_GROUP(sb) -
+-			    ext4_itable_unused_count(sb, gdp)),
+-			    sbi->s_inodes_per_block);
+-
+-	if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group) ||
+-	    ((group == 0) && ((EXT4_INODES_PER_GROUP(sb) -
+-			       ext4_itable_unused_count(sb, gdp)) <
+-			      EXT4_FIRST_INO(sb)))) {
+-		ext4_error(sb, "Something is wrong with group %u: "
+-			   "used itable blocks: %d; "
+-			   "itable unused count: %u",
+-			   group, used_blks,
+-			   ext4_itable_unused_count(sb, gdp));
+-		ret = 1;
+-		goto err_out;
++	if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) {
++		used_inos = EXT4_INODES_PER_GROUP(sb) -
++			    ext4_itable_unused_count(sb, gdp);
++		used_blks = DIV_ROUND_UP(used_inos, sbi->s_inodes_per_block);
++
++		/* Bogus inode unused count? */
++		if (used_blks < 0 || used_blks > sbi->s_itb_per_group) {
++			ext4_error(sb, "Something is wrong with group %u: "
++				   "used itable blocks: %d; "
++				   "itable unused count: %u",
++				   group, used_blks,
++				   ext4_itable_unused_count(sb, gdp));
++			ret = 1;
++			goto err_out;
++		}
++
++		used_inos += group * EXT4_INODES_PER_GROUP(sb);
++		/*
++		 * Are there some uninitialized inodes in the inode table
++		 * before the first normal inode?
++		 */
++		if ((used_blks != sbi->s_itb_per_group) &&
++		     (used_inos < EXT4_FIRST_INO(sb))) {
++			ext4_error(sb, "Something is wrong with group %u: "
++				   "itable unused count: %u; "
++				   "itables initialized count: %ld",
++				   group, ext4_itable_unused_count(sb, gdp),
++				   used_inos);
++			ret = 1;
++			goto err_out;
++		}
+ 	}
+ 
+ 	blk = ext4_inode_table(sb, gdp) + used_blks;
+diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
+index a2cf35066f46b..0796bfa72829c 100644
+--- a/fs/ext4/ioctl.c
++++ b/fs/ext4/ioctl.c
+@@ -315,6 +315,12 @@ static void ext4_dax_dontcache(struct inode *inode, unsigned int flags)
+ static bool dax_compatible(struct inode *inode, unsigned int oldflags,
+ 			   unsigned int flags)
+ {
++	/* Allow the DAX flag to be changed on inline directories */
++	if (S_ISDIR(inode->i_mode)) {
++		flags &= ~EXT4_INLINE_DATA_FL;
++		oldflags &= ~EXT4_INLINE_DATA_FL;
++	}
++
+ 	if (flags & EXT4_DAX_FL) {
+ 		if ((oldflags & EXT4_DAX_MUT_EXCL) ||
+ 		     ext4_test_inode_state(inode,
+diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
+index 795c3ff2907c2..68fbeedd627bc 100644
+--- a/fs/ext4/mmp.c
++++ b/fs/ext4/mmp.c
+@@ -56,7 +56,7 @@ static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
+ 	wait_on_buffer(bh);
+ 	sb_end_write(sb);
+ 	if (unlikely(!buffer_uptodate(bh)))
+-		return 1;
++		return -EIO;
+ 
+ 	return 0;
+ }
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index b9693680463aa..77c1cb2582623 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -667,9 +667,6 @@ static void ext4_handle_error(struct super_block *sb, bool force_ro, int error,
+ 			ext4_commit_super(sb);
+ 	}
+ 
+-	if (sb_rdonly(sb) || continue_fs)
+-		return;
+-
+ 	/*
+ 	 * We force ERRORS_RO behavior when system is rebooting. Otherwise we
+ 	 * could panic during 'reboot -f' as the underlying device got already
+@@ -679,6 +676,10 @@ static void ext4_handle_error(struct super_block *sb, bool force_ro, int error,
+ 		panic("EXT4-fs (device %s): panic forced after error\n",
+ 			sb->s_id);
+ 	}
++
++	if (sb_rdonly(sb) || continue_fs)
++		return;
++
+ 	ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
+ 	/*
+ 	 * Make sure updated value of ->s_mount_flags will be visible before
+@@ -3023,9 +3024,6 @@ static void ext4_orphan_cleanup(struct super_block *sb,
+ 		sb->s_flags &= ~SB_RDONLY;
+ 	}
+ #ifdef CONFIG_QUOTA
+-	/* Needed for iput() to work correctly and not trash data */
+-	sb->s_flags |= SB_ACTIVE;
+-
+ 	/*
+ 	 * Turn on quotas which were not enabled for read-only mounts if
+ 	 * filesystem has quota feature, so that they are updated correctly.
+@@ -5561,8 +5559,10 @@ static int ext4_commit_super(struct super_block *sb)
+ 	struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
+ 	int error = 0;
+ 
+-	if (!sbh || block_device_ejected(sb))
+-		return error;
++	if (!sbh)
++		return -EINVAL;
++	if (block_device_ejected(sb))
++		return -ENODEV;
+ 
+ 	ext4_update_super(sb);
+ 
+diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
+index 4b0e2e3c2c886..45c8cf1afe666 100644
+--- a/fs/f2fs/node.c
++++ b/fs/f2fs/node.c
+@@ -2785,6 +2785,9 @@ static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
+ 		struct f2fs_nat_entry raw_ne;
+ 		nid_t nid = le32_to_cpu(nid_in_journal(journal, i));
+ 
++		if (f2fs_check_nid_range(sbi, nid))
++			continue;
++
+ 		raw_ne = nat_in_journal(journal, i);
+ 
+ 		ne = __lookup_nat_cache(nm_i, nid);
+diff --git a/fs/f2fs/verity.c b/fs/f2fs/verity.c
+index 054ec852b5ea4..15ba36926fad7 100644
+--- a/fs/f2fs/verity.c
++++ b/fs/f2fs/verity.c
+@@ -152,40 +152,73 @@ static int f2fs_end_enable_verity(struct file *filp, const void *desc,
+ 				  size_t desc_size, u64 merkle_tree_size)
+ {
+ 	struct inode *inode = file_inode(filp);
++	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ 	u64 desc_pos = f2fs_verity_metadata_pos(inode) + merkle_tree_size;
+ 	struct fsverity_descriptor_location dloc = {
+ 		.version = cpu_to_le32(F2FS_VERIFY_VER),
+ 		.size = cpu_to_le32(desc_size),
+ 		.pos = cpu_to_le64(desc_pos),
+ 	};
+-	int err = 0;
++	int err = 0, err2 = 0;
+ 
+-	if (desc != NULL) {
+-		/* Succeeded; write the verity descriptor. */
+-		err = pagecache_write(inode, desc, desc_size, desc_pos);
++	/*
++	 * If an error already occurred (which fs/verity/ signals by passing
++	 * desc == NULL), then only clean-up is needed.
++	 */
++	if (desc == NULL)
++		goto cleanup;
+ 
+-		/* Write all pages before clearing FI_VERITY_IN_PROGRESS. */
+-		if (!err)
+-			err = filemap_write_and_wait(inode->i_mapping);
+-	}
++	/* Append the verity descriptor. */
++	err = pagecache_write(inode, desc, desc_size, desc_pos);
++	if (err)
++		goto cleanup;
++
++	/*
++	 * Write all pages (both data and verity metadata).  Note that this must
++	 * happen before clearing FI_VERITY_IN_PROGRESS; otherwise pages beyond
++	 * i_size won't be written properly.  For crash consistency, this also
++	 * must happen before the verity inode flag gets persisted.
++	 */
++	err = filemap_write_and_wait(inode->i_mapping);
++	if (err)
++		goto cleanup;
++
++	/* Set the verity xattr. */
++	err = f2fs_setxattr(inode, F2FS_XATTR_INDEX_VERITY,
++			    F2FS_XATTR_NAME_VERITY, &dloc, sizeof(dloc),
++			    NULL, XATTR_CREATE);
++	if (err)
++		goto cleanup;
+ 
+-	/* If we failed, truncate anything we wrote past i_size. */
+-	if (desc == NULL || err)
+-		f2fs_truncate(inode);
++	/* Finally, set the verity inode flag. */
++	file_set_verity(inode);
++	f2fs_set_inode_flags(inode);
++	f2fs_mark_inode_dirty_sync(inode, true);
+ 
+ 	clear_inode_flag(inode, FI_VERITY_IN_PROGRESS);
++	return 0;
+ 
+-	if (desc != NULL && !err) {
+-		err = f2fs_setxattr(inode, F2FS_XATTR_INDEX_VERITY,
+-				    F2FS_XATTR_NAME_VERITY, &dloc, sizeof(dloc),
+-				    NULL, XATTR_CREATE);
+-		if (!err) {
+-			file_set_verity(inode);
+-			f2fs_set_inode_flags(inode);
+-			f2fs_mark_inode_dirty_sync(inode, true);
+-		}
++cleanup:
++	/*
++	 * Verity failed to be enabled, so clean up by truncating any verity
++	 * metadata that was written beyond i_size (both from cache and from
++	 * disk) and clearing FI_VERITY_IN_PROGRESS.
++	 *
++	 * Taking i_gc_rwsem[WRITE] is needed to stop f2fs garbage collection
++	 * from re-instantiating cached pages we are truncating (since unlike
++	 * normal file accesses, garbage collection isn't limited by i_size).
++	 */
++	down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
++	truncate_inode_pages(inode->i_mapping, inode->i_size);
++	err2 = f2fs_truncate(inode);
++	if (err2) {
++		f2fs_err(sbi, "Truncating verity metadata failed (errno=%d)",
++			 err2);
++		set_sbi_flag(sbi, SBI_NEED_FSCK);
+ 	}
+-	return err;
++	up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
++	clear_inode_flag(inode, FI_VERITY_IN_PROGRESS);
++	return err ?: err2;
+ }
+ 
+ static int f2fs_get_verity_descriptor(struct inode *inode, void *buf,
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index 8cccecb55fb80..eff4abaa87da0 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -1099,6 +1099,7 @@ static ssize_t fuse_send_write_pages(struct fuse_io_args *ia,
+ 	struct fuse_file *ff = file->private_data;
+ 	struct fuse_mount *fm = ff->fm;
+ 	unsigned int offset, i;
++	bool short_write;
+ 	int err;
+ 
+ 	for (i = 0; i < ap->num_pages; i++)
+@@ -1113,32 +1114,38 @@ static ssize_t fuse_send_write_pages(struct fuse_io_args *ia,
+ 	if (!err && ia->write.out.size > count)
+ 		err = -EIO;
+ 
++	short_write = ia->write.out.size < count;
+ 	offset = ap->descs[0].offset;
+ 	count = ia->write.out.size;
+ 	for (i = 0; i < ap->num_pages; i++) {
+ 		struct page *page = ap->pages[i];
+ 
+-		if (!err && !offset && count >= PAGE_SIZE)
+-			SetPageUptodate(page);
+-
+-		if (count > PAGE_SIZE - offset)
+-			count -= PAGE_SIZE - offset;
+-		else
+-			count = 0;
+-		offset = 0;
+-
+-		unlock_page(page);
++		if (err) {
++			ClearPageUptodate(page);
++		} else {
++			if (count >= PAGE_SIZE - offset)
++				count -= PAGE_SIZE - offset;
++			else {
++				if (short_write)
++					ClearPageUptodate(page);
++				count = 0;
++			}
++			offset = 0;
++		}
++		if (ia->write.page_locked && (i == ap->num_pages - 1))
++			unlock_page(page);
+ 		put_page(page);
+ 	}
+ 
+ 	return err;
+ }
+ 
+-static ssize_t fuse_fill_write_pages(struct fuse_args_pages *ap,
++static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
+ 				     struct address_space *mapping,
+ 				     struct iov_iter *ii, loff_t pos,
+ 				     unsigned int max_pages)
+ {
++	struct fuse_args_pages *ap = &ia->ap;
+ 	struct fuse_conn *fc = get_fuse_conn(mapping->host);
+ 	unsigned offset = pos & (PAGE_SIZE - 1);
+ 	size_t count = 0;
+@@ -1191,6 +1198,16 @@ static ssize_t fuse_fill_write_pages(struct fuse_args_pages *ap,
+ 		if (offset == PAGE_SIZE)
+ 			offset = 0;
+ 
++		/* If we copied full page, mark it uptodate */
++		if (tmp == PAGE_SIZE)
++			SetPageUptodate(page);
++
++		if (PageUptodate(page)) {
++			unlock_page(page);
++		} else {
++			ia->write.page_locked = true;
++			break;
++		}
+ 		if (!fc->big_writes)
+ 			break;
+ 	} while (iov_iter_count(ii) && count < fc->max_write &&
+@@ -1234,7 +1251,7 @@ static ssize_t fuse_perform_write(struct kiocb *iocb,
+ 			break;
+ 		}
+ 
+-		count = fuse_fill_write_pages(ap, mapping, ii, pos, nr_pages);
++		count = fuse_fill_write_pages(&ia, mapping, ii, pos, nr_pages);
+ 		if (count <= 0) {
+ 			err = count;
+ 		} else {
+diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
+index 63d97a15ffde9..74d888c78fa4e 100644
+--- a/fs/fuse/fuse_i.h
++++ b/fs/fuse/fuse_i.h
+@@ -912,6 +912,7 @@ struct fuse_io_args {
+ 		struct {
+ 			struct fuse_write_in in;
+ 			struct fuse_write_out out;
++			bool page_locked;
+ 		} write;
+ 	};
+ 	struct fuse_args_pages ap;
+diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c
+index 4ee6f734ba838..1e5affed158e9 100644
+--- a/fs/fuse/virtio_fs.c
++++ b/fs/fuse/virtio_fs.c
+@@ -896,6 +896,7 @@ static int virtio_fs_probe(struct virtio_device *vdev)
+ out_vqs:
+ 	vdev->config->reset(vdev);
+ 	virtio_fs_cleanup_vqs(vdev, fs);
++	kfree(fs->vqs);
+ 
+ out:
+ 	vdev->priv = NULL;
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index dff34975d86bc..0b5fbbd969cbd 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -1008,7 +1008,7 @@ static void io_uring_del_task_file(unsigned long index);
+ static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
+ 					 struct task_struct *task,
+ 					 struct files_struct *files);
+-static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx);
++static void io_uring_cancel_sqpoll(struct io_sq_data *sqd);
+ static void destroy_fixed_rsrc_ref_node(struct fixed_rsrc_ref_node *ref_node);
+ static struct fixed_rsrc_ref_node *alloc_fixed_rsrc_ref_node(
+ 			struct io_ring_ctx *ctx);
+@@ -6710,6 +6710,10 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
+ 		if (!list_empty(&ctx->iopoll_list))
+ 			io_do_iopoll(ctx, &nr_events, 0);
+ 
++		/*
++		 * Don't submit if refs are dying, good for io_uring_register(),
++		 * but also it is relied upon by io_ring_exit_work()
++		 */
+ 		if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) &&
+ 		    !(ctx->flags & IORING_SETUP_R_DISABLED))
+ 			ret = io_submit_sqes(ctx, to_submit);
+@@ -6832,15 +6836,14 @@ static int io_sq_thread(void *data)
+ 		timeout = jiffies + sqd->sq_thread_idle;
+ 	}
+ 
+-	list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
+-		io_uring_cancel_sqpoll(ctx);
++	io_uring_cancel_sqpoll(sqd);
+ 	sqd->thread = NULL;
+ 	list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
+ 		io_ring_set_wakeup_flag(ctx);
+-	mutex_unlock(&sqd->lock);
+-
+ 	io_run_task_work();
+ 	io_run_task_work_head(&sqd->park_task_work);
++	mutex_unlock(&sqd->lock);
++
+ 	complete(&sqd->exited);
+ 	do_exit(0);
+ }
+@@ -7200,8 +7203,6 @@ static void io_sq_thread_finish(struct io_ring_ctx *ctx)
+ 
+ 		io_put_sq_data(sqd);
+ 		ctx->sq_data = NULL;
+-		if (ctx->sq_creds)
+-			put_cred(ctx->sq_creds);
+ 	}
+ }
+ 
+@@ -8469,6 +8470,8 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
+ 	mutex_unlock(&ctx->uring_lock);
+ 	io_eventfd_unregister(ctx);
+ 	io_destroy_buffers(ctx);
++	if (ctx->sq_creds)
++		put_cred(ctx->sq_creds);
+ 
+ #if defined(CONFIG_UNIX)
+ 	if (ctx->ring_sock) {
+@@ -8568,6 +8571,13 @@ static void io_tctx_exit_cb(struct callback_head *cb)
+ 	complete(&work->completion);
+ }
+ 
++static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
++{
++	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
++
++	return req->ctx == data;
++}
++
+ static void io_ring_exit_work(struct work_struct *work)
+ {
+ 	struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
+@@ -8576,14 +8586,6 @@ static void io_ring_exit_work(struct work_struct *work)
+ 	struct io_tctx_node *node;
+ 	int ret;
+ 
+-	/* prevent SQPOLL from submitting new requests */
+-	if (ctx->sq_data) {
+-		io_sq_thread_park(ctx->sq_data);
+-		list_del_init(&ctx->sqd_list);
+-		io_sqd_update_thread_idle(ctx->sq_data);
+-		io_sq_thread_unpark(ctx->sq_data);
+-	}
+-
+ 	/*
+ 	 * If we're doing polled IO and end up having requests being
+ 	 * submitted async (out-of-line), then completions can come in while
+@@ -8592,6 +8594,17 @@ static void io_ring_exit_work(struct work_struct *work)
+ 	 */
+ 	do {
+ 		io_uring_try_cancel_requests(ctx, NULL, NULL);
++		if (ctx->sq_data) {
++			struct io_sq_data *sqd = ctx->sq_data;
++			struct task_struct *tsk;
++
++			io_sq_thread_park(sqd);
++			tsk = sqd->thread;
++			if (tsk && tsk->io_uring && tsk->io_uring->io_wq)
++				io_wq_cancel_cb(tsk->io_uring->io_wq,
++						io_cancel_ctx_cb, ctx, true);
++			io_sq_thread_unpark(sqd);
++		}
+ 
+ 		WARN_ON_ONCE(time_after(jiffies, timeout));
+ 	} while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
+@@ -8736,13 +8749,6 @@ static bool io_cancel_defer_files(struct io_ring_ctx *ctx,
+ 	return true;
+ }
+ 
+-static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
+-{
+-	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+-
+-	return req->ctx == data;
+-}
+-
+ static bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
+ {
+ 	struct io_tctx_node *node;
+@@ -8935,11 +8941,11 @@ static s64 tctx_inflight(struct io_uring_task *tctx)
+ static void io_sqpoll_cancel_cb(struct callback_head *cb)
+ {
+ 	struct io_tctx_exit *work = container_of(cb, struct io_tctx_exit, task_work);
+-	struct io_ring_ctx *ctx = work->ctx;
+-	struct io_sq_data *sqd = ctx->sq_data;
++	struct io_sq_data *sqd = work->ctx->sq_data;
+ 
+ 	if (sqd->thread)
+-		io_uring_cancel_sqpoll(ctx);
++		io_uring_cancel_sqpoll(sqd);
++	list_del_init(&work->ctx->sqd_list);
+ 	complete(&work->completion);
+ }
+ 
+@@ -8950,7 +8956,6 @@ static void io_sqpoll_cancel_sync(struct io_ring_ctx *ctx)
+ 	struct task_struct *task;
+ 
+ 	io_sq_thread_park(sqd);
+-	list_del_init(&ctx->sqd_list);
+ 	io_sqd_update_thread_idle(sqd);
+ 	task = sqd->thread;
+ 	if (task) {
+@@ -8958,6 +8963,8 @@ static void io_sqpoll_cancel_sync(struct io_ring_ctx *ctx)
+ 		init_task_work(&work.task_work, io_sqpoll_cancel_cb);
+ 		io_task_work_add_head(&sqd->park_task_work, &work.task_work);
+ 		wake_up_process(task);
++	} else {
++		list_del_init(&ctx->sqd_list);
+ 	}
+ 	io_sq_thread_unpark(sqd);
+ 
+@@ -8991,14 +8998,16 @@ void __io_uring_files_cancel(struct files_struct *files)
+ }
+ 
+ /* should only be called by SQPOLL task */
+-static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx)
++static void io_uring_cancel_sqpoll(struct io_sq_data *sqd)
+ {
+-	struct io_sq_data *sqd = ctx->sq_data;
+ 	struct io_uring_task *tctx = current->io_uring;
++	struct io_ring_ctx *ctx;
+ 	s64 inflight;
+ 	DEFINE_WAIT(wait);
+ 
+-	WARN_ON_ONCE(!sqd || ctx->sq_data->thread != current);
++	if (!current->io_uring)
++		return;
++	WARN_ON_ONCE(!sqd || sqd->thread != current);
+ 
+ 	atomic_inc(&tctx->in_idle);
+ 	do {
+@@ -9006,7 +9015,8 @@ static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx)
+ 		inflight = tctx_inflight(tctx);
+ 		if (!inflight)
+ 			break;
+-		io_uring_try_cancel_requests(ctx, current, NULL);
++		list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
++			io_uring_try_cancel_requests(ctx, current, NULL);
+ 
+ 		prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
+ 		/*
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index 9396666b73145..e8fc45fd751fb 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -349,7 +349,12 @@ static int start_this_handle(journal_t *journal, handle_t *handle,
+ 	}
+ 
+ alloc_transaction:
+-	if (!journal->j_running_transaction) {
++	/*
++	 * This check is racy but it is just an optimization of allocating new
++	 * transaction early if there are high chances we'll need it. If we
++	 * guess wrong, we'll retry or free unused transaction.
++	 */
++	if (!data_race(journal->j_running_transaction)) {
+ 		/*
+ 		 * If __GFP_FS is not present, then we may be being called from
+ 		 * inside the fs writeback layer, so we MUST NOT fail.
+@@ -1474,8 +1479,8 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
+ 	 * crucial to catch bugs so let's do a reliable check until the
+ 	 * lockless handling is fully proven.
+ 	 */
+-	if (jh->b_transaction != transaction &&
+-	    jh->b_next_transaction != transaction) {
++	if (data_race(jh->b_transaction != transaction &&
++	    jh->b_next_transaction != transaction)) {
+ 		spin_lock(&jh->b_state_lock);
+ 		J_ASSERT_JH(jh, jh->b_transaction == transaction ||
+ 				jh->b_next_transaction == transaction);
+@@ -1483,8 +1488,8 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
+ 	}
+ 	if (jh->b_modified == 1) {
+ 		/* If it's in our transaction it must be in BJ_Metadata list. */
+-		if (jh->b_transaction == transaction &&
+-		    jh->b_jlist != BJ_Metadata) {
++		if (data_race(jh->b_transaction == transaction &&
++		    jh->b_jlist != BJ_Metadata)) {
+ 			spin_lock(&jh->b_state_lock);
+ 			if (jh->b_transaction == transaction &&
+ 			    jh->b_jlist != BJ_Metadata)
+diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
+index f8fb89b10227c..4fc8cd698d1a4 100644
+--- a/fs/jffs2/file.c
++++ b/fs/jffs2/file.c
+@@ -57,6 +57,7 @@ const struct file_operations jffs2_file_operations =
+ 	.mmap =		generic_file_readonly_mmap,
+ 	.fsync =	jffs2_fsync,
+ 	.splice_read =	generic_file_splice_read,
++	.splice_write = iter_file_splice_write,
+ };
+ 
+ /* jffs2_file_inode_operations */
+diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
+index db72a9d2d0afb..b676056826beb 100644
+--- a/fs/jffs2/scan.c
++++ b/fs/jffs2/scan.c
+@@ -1079,7 +1079,7 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo
+ 	memcpy(&fd->name, rd->name, checkedlen);
+ 	fd->name[checkedlen] = 0;
+ 
+-	crc = crc32(0, fd->name, rd->nsize);
++	crc = crc32(0, fd->name, checkedlen);
+ 	if (crc != je32_to_cpu(rd->name_crc)) {
+ 		pr_notice("%s(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+ 			  __func__, ofs, je32_to_cpu(rd->name_crc), crc);
+diff --git a/fs/nfs/fs_context.c b/fs/nfs/fs_context.c
+index 971a9251c1d97..902db1262d2bf 100644
+--- a/fs/nfs/fs_context.c
++++ b/fs/nfs/fs_context.c
+@@ -973,6 +973,15 @@ static int nfs23_parse_monolithic(struct fs_context *fc,
+ 			memset(mntfh->data + mntfh->size, 0,
+ 			       sizeof(mntfh->data) - mntfh->size);
+ 
++		/*
++		 * for proto == XPRT_TRANSPORT_UDP, which is what uses
++		 * to_exponential, implying shift: limit the shift value
++		 * to BITS_PER_LONG (majortimeo is unsigned long)
++		 */
++		if (!(data->flags & NFS_MOUNT_TCP)) /* this will be UDP */
++			if (data->retrans >= 64) /* shift value is too large */
++				goto out_invalid_data;
++
+ 		/*
+ 		 * Translate to nfs_fs_context, which nfs_fill_super
+ 		 * can deal with.
+@@ -1073,6 +1082,9 @@ out_no_address:
+ 
+ out_invalid_fh:
+ 	return nfs_invalf(fc, "NFS: invalid root filehandle");
++
++out_invalid_data:
++	return nfs_invalf(fc, "NFS: invalid binary mount data");
+ }
+ 
+ #if IS_ENABLED(CONFIG_NFS_V4)
+diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
+index 102b66e0bdeff..f726f8b12b7e8 100644
+--- a/fs/nfs/pnfs.c
++++ b/fs/nfs/pnfs.c
+@@ -1344,7 +1344,7 @@ _pnfs_return_layout(struct inode *ino)
+ 	}
+ 	valid_layout = pnfs_layout_is_valid(lo);
+ 	pnfs_clear_layoutcommit(ino, &tmp_list);
+-	pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL, 0);
++	pnfs_mark_matching_lsegs_return(lo, &tmp_list, NULL, 0);
+ 
+ 	if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
+ 		struct pnfs_layout_range range = {
+@@ -2468,6 +2468,9 @@ pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
+ 
+ 	assert_spin_locked(&lo->plh_inode->i_lock);
+ 
++	if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
++		tmp_list = &lo->plh_return_segs;
++
+ 	list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
+ 		if (pnfs_match_lseg_recall(lseg, return_range, seq)) {
+ 			dprintk("%s: marking lseg %p iomode %d "
+@@ -2475,6 +2478,8 @@ pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
+ 				lseg, lseg->pls_range.iomode,
+ 				lseg->pls_range.offset,
+ 				lseg->pls_range.length);
++			if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
++				tmp_list = &lo->plh_return_segs;
+ 			if (mark_lseg_invalid(lseg, tmp_list))
+ 				continue;
+ 			remaining++;
+diff --git a/fs/stat.c b/fs/stat.c
+index fbc171d038aa5..1fa38bdec1a68 100644
+--- a/fs/stat.c
++++ b/fs/stat.c
+@@ -86,12 +86,20 @@ int vfs_getattr_nosec(const struct path *path, struct kstat *stat,
+ 	/* SB_NOATIME means filesystem supplies dummy atime value */
+ 	if (inode->i_sb->s_flags & SB_NOATIME)
+ 		stat->result_mask &= ~STATX_ATIME;
++
++	/*
++	 * Note: If you add another clause to set an attribute flag, please
++	 * update attributes_mask below.
++	 */
+ 	if (IS_AUTOMOUNT(inode))
+ 		stat->attributes |= STATX_ATTR_AUTOMOUNT;
+ 
+ 	if (IS_DAX(inode))
+ 		stat->attributes |= STATX_ATTR_DAX;
+ 
++	stat->attributes_mask |= (STATX_ATTR_AUTOMOUNT |
++				  STATX_ATTR_DAX);
++
+ 	mnt_userns = mnt_user_ns(path->mnt);
+ 	if (inode->i_op->getattr)
+ 		return inode->i_op->getattr(mnt_userns, path, stat,
+diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
+index 0f8a6a16421b4..1929ec63a0cb6 100644
+--- a/fs/ubifs/replay.c
++++ b/fs/ubifs/replay.c
+@@ -223,7 +223,8 @@ static bool inode_still_linked(struct ubifs_info *c, struct replay_entry *rino)
+ 	 */
+ 	list_for_each_entry_reverse(r, &c->replay_list, list) {
+ 		ubifs_assert(c, r->sqnum >= rino->sqnum);
+-		if (key_inum(c, &r->key) == key_inum(c, &rino->key))
++		if (key_inum(c, &r->key) == key_inum(c, &rino->key) &&
++		    key_type(c, &r->key) == UBIFS_INO_KEY)
+ 			return r->deletion == 0;
+ 
+ 	}
+diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h
+index fcde59c65a81b..cb3d6b1c655de 100644
+--- a/include/crypto/acompress.h
++++ b/include/crypto/acompress.h
+@@ -165,6 +165,8 @@ static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req)
+  * crypto_free_acomp() -- free ACOMPRESS tfm handle
+  *
+  * @tfm:	ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
++ *
++ * If @tfm is a NULL or error pointer, this function does nothing.
+  */
+ static inline void crypto_free_acomp(struct crypto_acomp *tfm)
+ {
+diff --git a/include/crypto/aead.h b/include/crypto/aead.h
+index fcc12c593ef8b..e728469c4cccb 100644
+--- a/include/crypto/aead.h
++++ b/include/crypto/aead.h
+@@ -185,6 +185,8 @@ static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
+ /**
+  * crypto_free_aead() - zeroize and free aead handle
+  * @tfm: cipher handle to be freed
++ *
++ * If @tfm is a NULL or error pointer, this function does nothing.
+  */
+ static inline void crypto_free_aead(struct crypto_aead *tfm)
+ {
+diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h
+index 1d3aa252cabaf..5764b46bd1ec1 100644
+--- a/include/crypto/akcipher.h
++++ b/include/crypto/akcipher.h
+@@ -174,6 +174,8 @@ static inline struct crypto_akcipher *crypto_akcipher_reqtfm(
+  * crypto_free_akcipher() - free AKCIPHER tfm handle
+  *
+  * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher()
++ *
++ * If @tfm is a NULL or error pointer, this function does nothing.
+  */
+ static inline void crypto_free_akcipher(struct crypto_akcipher *tfm)
+ {
+diff --git a/include/crypto/chacha.h b/include/crypto/chacha.h
+index 3a1c72fdb7cf5..dabaee6987186 100644
+--- a/include/crypto/chacha.h
++++ b/include/crypto/chacha.h
+@@ -47,13 +47,18 @@ static inline void hchacha_block(const u32 *state, u32 *out, int nrounds)
+ 		hchacha_block_generic(state, out, nrounds);
+ }
+ 
+-void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv);
+-static inline void chacha_init_generic(u32 *state, const u32 *key, const u8 *iv)
++static inline void chacha_init_consts(u32 *state)
+ {
+ 	state[0]  = 0x61707865; /* "expa" */
+ 	state[1]  = 0x3320646e; /* "nd 3" */
+ 	state[2]  = 0x79622d32; /* "2-by" */
+ 	state[3]  = 0x6b206574; /* "te k" */
++}
++
++void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv);
++static inline void chacha_init_generic(u32 *state, const u32 *key, const u8 *iv)
++{
++	chacha_init_consts(state);
+ 	state[4]  = key[0];
+ 	state[5]  = key[1];
+ 	state[6]  = key[2];
+diff --git a/include/crypto/hash.h b/include/crypto/hash.h
+index 13f8a6a54ca87..b2bc1e46e86a7 100644
+--- a/include/crypto/hash.h
++++ b/include/crypto/hash.h
+@@ -281,6 +281,8 @@ static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm)
+ /**
+  * crypto_free_ahash() - zeroize and free the ahash handle
+  * @tfm: cipher handle to be freed
++ *
++ * If @tfm is a NULL or error pointer, this function does nothing.
+  */
+ static inline void crypto_free_ahash(struct crypto_ahash *tfm)
+ {
+@@ -724,6 +726,8 @@ static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm)
+ /**
+  * crypto_free_shash() - zeroize and free the message digest handle
+  * @tfm: cipher handle to be freed
++ *
++ * If @tfm is a NULL or error pointer, this function does nothing.
+  */
+ static inline void crypto_free_shash(struct crypto_shash *tfm)
+ {
+diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h
+index 88b591215d5c8..cccceadc164b9 100644
+--- a/include/crypto/kpp.h
++++ b/include/crypto/kpp.h
+@@ -154,6 +154,8 @@ static inline void crypto_kpp_set_flags(struct crypto_kpp *tfm, u32 flags)
+  * crypto_free_kpp() - free KPP tfm handle
+  *
+  * @tfm: KPP tfm handle allocated with crypto_alloc_kpp()
++ *
++ * If @tfm is a NULL or error pointer, this function does nothing.
+  */
+ static inline void crypto_free_kpp(struct crypto_kpp *tfm)
+ {
+diff --git a/include/crypto/rng.h b/include/crypto/rng.h
+index 8b4b844b4eef8..17bb3673d3c17 100644
+--- a/include/crypto/rng.h
++++ b/include/crypto/rng.h
+@@ -111,6 +111,8 @@ static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm)
+ /**
+  * crypto_free_rng() - zeroize and free RNG handle
+  * @tfm: cipher handle to be freed
++ *
++ * If @tfm is a NULL or error pointer, this function does nothing.
+  */
+ static inline void crypto_free_rng(struct crypto_rng *tfm)
+ {
+diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
+index 6a733b171a5d0..ef0fc9ed4342e 100644
+--- a/include/crypto/skcipher.h
++++ b/include/crypto/skcipher.h
+@@ -196,6 +196,8 @@ static inline struct crypto_tfm *crypto_skcipher_tfm(
+ /**
+  * crypto_free_skcipher() - zeroize and free cipher handle
+  * @tfm: cipher handle to be freed
++ *
++ * If @tfm is a NULL or error pointer, this function does nothing.
+  */
+ static inline void crypto_free_skcipher(struct crypto_skcipher *tfm)
+ {
+diff --git a/include/linux/ioport.h b/include/linux/ioport.h
+index 55de385c839cf..647744d8514e0 100644
+--- a/include/linux/ioport.h
++++ b/include/linux/ioport.h
+@@ -331,7 +331,7 @@ static inline void irqresource_disabled(struct resource *res, u32 irq)
+ {
+ 	res->start = irq;
+ 	res->end = irq;
+-	res->flags = IORESOURCE_IRQ | IORESOURCE_DISABLED | IORESOURCE_UNSET;
++	res->flags |= IORESOURCE_IRQ | IORESOURCE_DISABLED | IORESOURCE_UNSET;
+ }
+ 
+ extern struct address_space *iomem_get_mapping(void);
+diff --git a/include/linux/mfd/da9063/registers.h b/include/linux/mfd/da9063/registers.h
+index 1dbabf1b3cb81..6e0f66a2e7279 100644
+--- a/include/linux/mfd/da9063/registers.h
++++ b/include/linux/mfd/da9063/registers.h
+@@ -1037,6 +1037,9 @@
+ #define		DA9063_NONKEY_PIN_AUTODOWN	0x02
+ #define		DA9063_NONKEY_PIN_AUTOFLPRT	0x03
+ 
++/* DA9063_REG_CONFIG_J (addr=0x10F) */
++#define DA9063_TWOWIRE_TO			0x40
++
+ /* DA9063_REG_MON_REG_5 (addr=0x116) */
+ #define DA9063_MON_A8_IDX_MASK			0x07
+ #define		DA9063_MON_A8_IDX_NONE		0x00
+diff --git a/include/linux/mfd/intel-m10-bmc.h b/include/linux/mfd/intel-m10-bmc.h
+index 74d4e193966a9..9b54ca13eac3c 100644
+--- a/include/linux/mfd/intel-m10-bmc.h
++++ b/include/linux/mfd/intel-m10-bmc.h
+@@ -11,7 +11,7 @@
+ 
+ #define M10BMC_LEGACY_SYS_BASE		0x300400
+ #define M10BMC_SYS_BASE			0x300800
+-#define M10BMC_MEM_END			0x200000fc
++#define M10BMC_MEM_END			0x1fffffff
+ 
+ /* Register offset of system registers */
+ #define NIOS2_FW_VERSION		0x0
+diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
+index 26a3c7bc29ae5..a3a4e374f8028 100644
+--- a/include/linux/mmc/host.h
++++ b/include/linux/mmc/host.h
+@@ -302,9 +302,6 @@ struct mmc_host {
+ 	u32			ocr_avail_sdio;	/* SDIO-specific OCR */
+ 	u32			ocr_avail_sd;	/* SD-specific OCR */
+ 	u32			ocr_avail_mmc;	/* MMC-specific OCR */
+-#ifdef CONFIG_PM_SLEEP
+-	struct notifier_block	pm_notify;
+-#endif
+ 	struct wakeup_source	*ws;		/* Enable consume of uevents */
+ 	u32			max_current_330;
+ 	u32			max_current_300;
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index 3f7f89ea5e512..3d478abf411cd 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -607,6 +607,7 @@ struct swevent_hlist {
+ #define PERF_ATTACH_TASK_DATA	0x08
+ #define PERF_ATTACH_ITRACE	0x10
+ #define PERF_ATTACH_SCHED_CB	0x20
++#define PERF_ATTACH_CHILD	0x40
+ 
+ struct perf_cgroup;
+ struct perf_buffer;
+diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
+index 111a40d0d3d50..8d5f4f40fb418 100644
+--- a/include/linux/power/bq27xxx_battery.h
++++ b/include/linux/power/bq27xxx_battery.h
+@@ -53,7 +53,6 @@ struct bq27xxx_reg_cache {
+ 	int capacity;
+ 	int energy;
+ 	int flags;
+-	int power_avg;
+ 	int health;
+ };
+ 
+diff --git a/include/linux/reset.h b/include/linux/reset.h
+index b9109efa2a5cd..9700124affa34 100644
+--- a/include/linux/reset.h
++++ b/include/linux/reset.h
+@@ -47,6 +47,11 @@ static inline int reset_control_reset(struct reset_control *rstc)
+ 	return 0;
+ }
+ 
++static inline int reset_control_rearm(struct reset_control *rstc)
++{
++	return 0;
++}
++
+ static inline int reset_control_assert(struct reset_control *rstc)
+ {
+ 	return 0;
+diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
+index 167ca8c8424f6..2fe4019b749f6 100644
+--- a/include/media/v4l2-ctrls.h
++++ b/include/media/v4l2-ctrls.h
+@@ -301,12 +301,14 @@ struct v4l2_ctrl {
+  *		the control has been applied. This prevents applying controls
+  *		from a cluster with multiple controls twice (when the first
+  *		control of a cluster is applied, they all are).
+- * @req:	If set, this refers to another request that sets this control.
++ * @valid_p_req: If set, then p_req contains the control value for the request.
+  * @p_req:	If the control handler containing this control reference
+  *		is bound to a media request, then this points to the
+- *		value of the control that should be applied when the request
++ *		value of the control that must be applied when the request
+  *		is executed, or to the value of the control at the time
+- *		that the request was completed.
++ *		that the request was completed. If @valid_p_req is false,
++ *		then this control was never set for this request and the
++ *		control will not be updated when this request is applied.
+  *
+  * Each control handler has a list of these refs. The list_head is used to
+  * keep a sorted-by-control-ID list of all controls, while the next pointer
+@@ -319,7 +321,7 @@ struct v4l2_ctrl_ref {
+ 	struct v4l2_ctrl_helper *helper;
+ 	bool from_other_dev;
+ 	bool req_done;
+-	struct v4l2_ctrl_ref *req;
++	bool valid_p_req;
+ 	union v4l2_ctrl_ptr p_req;
+ };
+ 
+@@ -346,7 +348,7 @@ struct v4l2_ctrl_ref {
+  * @error:	The error code of the first failed control addition.
+  * @request_is_queued: True if the request was queued.
+  * @requests:	List to keep track of open control handler request objects.
+- *		For the parent control handler (@req_obj.req == NULL) this
++ *		For the parent control handler (@req_obj.ops == NULL) this
+  *		is the list header. When the parent control handler is
+  *		removed, it has to unbind and put all these requests since
+  *		they refer to the parent.
+diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
+index 2568cb0627ec0..fac8e89aed81d 100644
+--- a/include/scsi/libfcoe.h
++++ b/include/scsi/libfcoe.h
+@@ -249,7 +249,7 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *, struct fc_lport *,
+ 			 struct fc_frame *);
+ 
+ /* libfcoe funcs */
+-u64 fcoe_wwn_from_mac(unsigned char mac[], unsigned int, unsigned int);
++u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN], unsigned int, unsigned int);
+ int fcoe_libfc_config(struct fc_lport *, struct fcoe_ctlr *,
+ 		      const struct libfc_function_template *, int init_fcp);
+ u32 fcoe_fc_crc(struct fc_frame *fp);
+diff --git a/include/uapi/linux/usb/video.h b/include/uapi/linux/usb/video.h
+index d854cb19c42c3..bfdae12cdacf8 100644
+--- a/include/uapi/linux/usb/video.h
++++ b/include/uapi/linux/usb/video.h
+@@ -302,9 +302,10 @@ struct uvc_processing_unit_descriptor {
+ 	__u8   bControlSize;
+ 	__u8   bmControls[2];
+ 	__u8   iProcessing;
++	__u8   bmVideoStandards;
+ } __attribute__((__packed__));
+ 
+-#define UVC_DT_PROCESSING_UNIT_SIZE(n)			(9+(n))
++#define UVC_DT_PROCESSING_UNIT_SIZE(n)			(10+(n))
+ 
+ /* 3.7.2.6. Extension Unit Descriptor */
+ struct uvc_extension_unit_descriptor {
+diff --git a/kernel/.gitignore b/kernel/.gitignore
+index 78701ea37c973..5518835ac35c7 100644
+--- a/kernel/.gitignore
++++ b/kernel/.gitignore
+@@ -1,4 +1,5 @@
+ # SPDX-License-Identifier: GPL-2.0-only
++/config_data
+ kheaders.md5
+ timeconst.h
+ hz.bc
+diff --git a/kernel/Makefile b/kernel/Makefile
+index 320f1f3941b79..605ec3e70cb78 100644
+--- a/kernel/Makefile
++++ b/kernel/Makefile
+@@ -138,10 +138,15 @@ obj-$(CONFIG_SCF_TORTURE_TEST) += scftorture.o
+ 
+ $(obj)/configs.o: $(obj)/config_data.gz
+ 
+-targets += config_data.gz
+-$(obj)/config_data.gz: $(KCONFIG_CONFIG) FORCE
++targets += config_data config_data.gz
++$(obj)/config_data.gz: $(obj)/config_data FORCE
+ 	$(call if_changed,gzip)
+ 
++filechk_cat = cat $<
++
++$(obj)/config_data: $(KCONFIG_CONFIG) FORCE
++	$(call filechk,cat)
++
+ $(obj)/kheaders.o: $(obj)/kheaders_data.tar.xz
+ 
+ quiet_cmd_genikh = CHK     $(obj)/kheaders_data.tar.xz
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 43ceb8dae2644..c24ea952e7ae4 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -2204,6 +2204,26 @@ out:
+ 	perf_event__header_size(leader);
+ }
+ 
++static void sync_child_event(struct perf_event *child_event);
++
++static void perf_child_detach(struct perf_event *event)
++{
++	struct perf_event *parent_event = event->parent;
++
++	if (!(event->attach_state & PERF_ATTACH_CHILD))
++		return;
++
++	event->attach_state &= ~PERF_ATTACH_CHILD;
++
++	if (WARN_ON_ONCE(!parent_event))
++		return;
++
++	lockdep_assert_held(&parent_event->child_mutex);
++
++	sync_child_event(event);
++	list_del_init(&event->child_list);
++}
++
+ static bool is_orphaned_event(struct perf_event *event)
+ {
+ 	return event->state == PERF_EVENT_STATE_DEAD;
+@@ -2311,6 +2331,7 @@ group_sched_out(struct perf_event *group_event,
+ }
+ 
+ #define DETACH_GROUP	0x01UL
++#define DETACH_CHILD	0x02UL
+ 
+ /*
+  * Cross CPU call to remove a performance event
+@@ -2334,6 +2355,8 @@ __perf_remove_from_context(struct perf_event *event,
+ 	event_sched_out(event, cpuctx, ctx);
+ 	if (flags & DETACH_GROUP)
+ 		perf_group_detach(event);
++	if (flags & DETACH_CHILD)
++		perf_child_detach(event);
+ 	list_del_event(event, ctx);
+ 
+ 	if (!ctx->nr_events && ctx->is_active) {
+@@ -2362,25 +2385,21 @@ static void perf_remove_from_context(struct perf_event *event, unsigned long fla
+ 
+ 	lockdep_assert_held(&ctx->mutex);
+ 
+-	event_function_call(event, __perf_remove_from_context, (void *)flags);
+-
+ 	/*
+-	 * The above event_function_call() can NO-OP when it hits
+-	 * TASK_TOMBSTONE. In that case we must already have been detached
+-	 * from the context (by perf_event_exit_event()) but the grouping
+-	 * might still be in-tact.
++	 * Because of perf_event_exit_task(), perf_remove_from_context() ought
++	 * to work in the face of TASK_TOMBSTONE, unlike every other
++	 * event_function_call() user.
+ 	 */
+-	WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
+-	if ((flags & DETACH_GROUP) &&
+-	    (event->attach_state & PERF_ATTACH_GROUP)) {
+-		/*
+-		 * Since in that case we cannot possibly be scheduled, simply
+-		 * detach now.
+-		 */
+-		raw_spin_lock_irq(&ctx->lock);
+-		perf_group_detach(event);
++	raw_spin_lock_irq(&ctx->lock);
++	if (!ctx->is_active) {
++		__perf_remove_from_context(event, __get_cpu_context(ctx),
++					   ctx, (void *)flags);
+ 		raw_spin_unlock_irq(&ctx->lock);
++		return;
+ 	}
++	raw_spin_unlock_irq(&ctx->lock);
++
++	event_function_call(event, __perf_remove_from_context, (void *)flags);
+ }
+ 
+ /*
+@@ -12373,14 +12392,17 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
+ }
+ EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
+ 
+-static void sync_child_event(struct perf_event *child_event,
+-			       struct task_struct *child)
++static void sync_child_event(struct perf_event *child_event)
+ {
+ 	struct perf_event *parent_event = child_event->parent;
+ 	u64 child_val;
+ 
+-	if (child_event->attr.inherit_stat)
+-		perf_event_read_event(child_event, child);
++	if (child_event->attr.inherit_stat) {
++		struct task_struct *task = child_event->ctx->task;
++
++		if (task && task != TASK_TOMBSTONE)
++			perf_event_read_event(child_event, task);
++	}
+ 
+ 	child_val = perf_event_count(child_event);
+ 
+@@ -12395,60 +12417,53 @@ static void sync_child_event(struct perf_event *child_event,
+ }
+ 
+ static void
+-perf_event_exit_event(struct perf_event *child_event,
+-		      struct perf_event_context *child_ctx,
+-		      struct task_struct *child)
++perf_event_exit_event(struct perf_event *event, struct perf_event_context *ctx)
+ {
+-	struct perf_event *parent_event = child_event->parent;
++	struct perf_event *parent_event = event->parent;
++	unsigned long detach_flags = 0;
+ 
+-	/*
+-	 * Do not destroy the 'original' grouping; because of the context
+-	 * switch optimization the original events could've ended up in a
+-	 * random child task.
+-	 *
+-	 * If we were to destroy the original group, all group related
+-	 * operations would cease to function properly after this random
+-	 * child dies.
+-	 *
+-	 * Do destroy all inherited groups, we don't care about those
+-	 * and being thorough is better.
+-	 */
+-	raw_spin_lock_irq(&child_ctx->lock);
+-	WARN_ON_ONCE(child_ctx->is_active);
++	if (parent_event) {
++		/*
++		 * Do not destroy the 'original' grouping; because of the
++		 * context switch optimization the original events could've
++		 * ended up in a random child task.
++		 *
++		 * If we were to destroy the original group, all group related
++		 * operations would cease to function properly after this
++		 * random child dies.
++		 *
++		 * Do destroy all inherited groups, we don't care about those
++		 * and being thorough is better.
++		 */
++		detach_flags = DETACH_GROUP | DETACH_CHILD;
++		mutex_lock(&parent_event->child_mutex);
++	}
+ 
+-	if (parent_event)
+-		perf_group_detach(child_event);
+-	list_del_event(child_event, child_ctx);
+-	perf_event_set_state(child_event, PERF_EVENT_STATE_EXIT); /* is_event_hup() */
+-	raw_spin_unlock_irq(&child_ctx->lock);
++	perf_remove_from_context(event, detach_flags);
++
++	raw_spin_lock_irq(&ctx->lock);
++	if (event->state > PERF_EVENT_STATE_EXIT)
++		perf_event_set_state(event, PERF_EVENT_STATE_EXIT);
++	raw_spin_unlock_irq(&ctx->lock);
+ 
+ 	/*
+-	 * Parent events are governed by their filedesc, retain them.
++	 * Child events can be freed.
+ 	 */
+-	if (!parent_event) {
+-		perf_event_wakeup(child_event);
++	if (parent_event) {
++		mutex_unlock(&parent_event->child_mutex);
++		/*
++		 * Kick perf_poll() for is_event_hup();
++		 */
++		perf_event_wakeup(parent_event);
++		free_event(event);
++		put_event(parent_event);
+ 		return;
+ 	}
+-	/*
+-	 * Child events can be cleaned up.
+-	 */
+-
+-	sync_child_event(child_event, child);
+ 
+ 	/*
+-	 * Remove this event from the parent's list
+-	 */
+-	WARN_ON_ONCE(parent_event->ctx->parent_ctx);
+-	mutex_lock(&parent_event->child_mutex);
+-	list_del_init(&child_event->child_list);
+-	mutex_unlock(&parent_event->child_mutex);
+-
+-	/*
+-	 * Kick perf_poll() for is_event_hup().
++	 * Parent events are governed by their filedesc, retain them.
+ 	 */
+-	perf_event_wakeup(parent_event);
+-	free_event(child_event);
+-	put_event(parent_event);
++	perf_event_wakeup(event);
+ }
+ 
+ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
+@@ -12505,7 +12520,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
+ 	perf_event_task(child, child_ctx, 0);
+ 
+ 	list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry)
+-		perf_event_exit_event(child_event, child_ctx, child);
++		perf_event_exit_event(child_event, child_ctx);
+ 
+ 	mutex_unlock(&child_ctx->mutex);
+ 
+@@ -12765,6 +12780,7 @@ inherit_event(struct perf_event *parent_event,
+ 	 */
+ 	raw_spin_lock_irqsave(&child_ctx->lock, flags);
+ 	add_event_to_ctx(child_event, child_ctx);
++	child_event->attach_state |= PERF_ATTACH_CHILD;
+ 	raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
+ 
+ 	/*
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 00febd6dea9cc..a8629b695d38e 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -3711,8 +3711,7 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
+ 
+ 	if (op & FUTEX_CLOCK_REALTIME) {
+ 		flags |= FLAGS_CLOCKRT;
+-		if (cmd != FUTEX_WAIT && cmd != FUTEX_WAIT_BITSET && \
+-		    cmd != FUTEX_WAIT_REQUEUE_PI)
++		if (cmd != FUTEX_WAIT_BITSET &&	cmd != FUTEX_WAIT_REQUEUE_PI)
+ 			return -ENOSYS;
+ 	}
+ 
+@@ -3782,7 +3781,7 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
+ 		t = timespec64_to_ktime(ts);
+ 		if (cmd == FUTEX_WAIT)
+ 			t = ktime_add_safe(ktime_get(), t);
+-		else if (!(op & FUTEX_CLOCK_REALTIME))
++		else if (cmd != FUTEX_LOCK_PI && !(op & FUTEX_CLOCK_REALTIME))
+ 			t = timens_ktime_to_host(CLOCK_MONOTONIC, t);
+ 		tp = &t;
+ 	}
+@@ -3976,7 +3975,7 @@ SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val,
+ 		t = timespec64_to_ktime(ts);
+ 		if (cmd == FUTEX_WAIT)
+ 			t = ktime_add_safe(ktime_get(), t);
+-		else if (!(op & FUTEX_CLOCK_REALTIME))
++		else if (cmd != FUTEX_LOCK_PI && !(op & FUTEX_CLOCK_REALTIME))
+ 			t = timens_ktime_to_host(CLOCK_MONOTONIC, t);
+ 		tp = &t;
+ 	}
+diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c
+index 651a4ad6d711f..8e586858bcf41 100644
+--- a/kernel/irq/matrix.c
++++ b/kernel/irq/matrix.c
+@@ -423,7 +423,9 @@ void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
+ 	if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end))
+ 		return;
+ 
+-	clear_bit(bit, cm->alloc_map);
++	if (WARN_ON_ONCE(!test_and_clear_bit(bit, cm->alloc_map)))
++		return;
++
+ 	cm->allocated--;
+ 	if(managed)
+ 		cm->managed_allocated--;
+diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
+index 3bf98db9c702d..23e7acb5c6679 100644
+--- a/kernel/kcsan/core.c
++++ b/kernel/kcsan/core.c
+@@ -639,8 +639,6 @@ void __init kcsan_init(void)
+ 
+ 	BUG_ON(!in_task());
+ 
+-	kcsan_debugfs_init();
+-
+ 	for_each_possible_cpu(cpu)
+ 		per_cpu(kcsan_rand_state, cpu) = (u32)get_cycles();
+ 
+diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c
+index 3c8093a371b1c..209ad8dcfcecf 100644
+--- a/kernel/kcsan/debugfs.c
++++ b/kernel/kcsan/debugfs.c
+@@ -261,7 +261,9 @@ static const struct file_operations debugfs_ops =
+ 	.release = single_release
+ };
+ 
+-void __init kcsan_debugfs_init(void)
++static void __init kcsan_debugfs_init(void)
+ {
+ 	debugfs_create_file("kcsan", 0644, NULL, NULL, &debugfs_ops);
+ }
++
++late_initcall(kcsan_debugfs_init);
+diff --git a/kernel/kcsan/kcsan.h b/kernel/kcsan/kcsan.h
+index 8d4bf3431b3cc..87ccdb3b051fd 100644
+--- a/kernel/kcsan/kcsan.h
++++ b/kernel/kcsan/kcsan.h
+@@ -30,11 +30,6 @@ extern bool kcsan_enabled;
+ void kcsan_save_irqtrace(struct task_struct *task);
+ void kcsan_restore_irqtrace(struct task_struct *task);
+ 
+-/*
+- * Initialize debugfs file.
+- */
+-void kcsan_debugfs_init(void);
+-
+ /*
+  * Statistics counters displayed via debugfs; should only be modified in
+  * slow-paths.
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index da6f5213fb74c..2a739c5fcca5a 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -3464,7 +3464,7 @@ static void fill_page_cache_func(struct work_struct *work)
+ 
+ 	for (i = 0; i < rcu_min_cached_objs; i++) {
+ 		bnode = (struct kvfree_rcu_bulk_data *)
+-			__get_free_page(GFP_KERNEL | __GFP_NOWARN);
++			__get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
+ 
+ 		if (bnode) {
+ 			raw_spin_lock_irqsave(&krcp->lock, flags);
+diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
+index 2d603771c7dce..0796a75b6e0e8 100644
+--- a/kernel/rcu/tree_plugin.h
++++ b/kernel/rcu/tree_plugin.h
+@@ -1646,7 +1646,11 @@ static bool wake_nocb_gp(struct rcu_data *rdp, bool force,
+ 		rcu_nocb_unlock_irqrestore(rdp, flags);
+ 		return false;
+ 	}
+-	del_timer(&rdp->nocb_timer);
++
++	if (READ_ONCE(rdp->nocb_defer_wakeup) > RCU_NOCB_WAKE_NOT) {
++		WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
++		del_timer(&rdp->nocb_timer);
++	}
+ 	rcu_nocb_unlock_irqrestore(rdp, flags);
+ 	raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
+ 	if (force || READ_ONCE(rdp_gp->nocb_gp_sleep)) {
+@@ -2265,7 +2269,6 @@ static bool do_nocb_deferred_wakeup_common(struct rcu_data *rdp)
+ 		return false;
+ 	}
+ 	ndw = READ_ONCE(rdp->nocb_defer_wakeup);
+-	WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
+ 	ret = wake_nocb_gp(rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
+ 	trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake"));
+ 
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 794c2cb945f8b..0eeeeeb66f33a 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -682,7 +682,13 @@ static u64 __sched_period(unsigned long nr_running)
+  */
+ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+-	u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
++	unsigned int nr_running = cfs_rq->nr_running;
++	u64 slice;
++
++	if (sched_feat(ALT_PERIOD))
++		nr_running = rq_of(cfs_rq)->cfs.h_nr_running;
++
++	slice = __sched_period(nr_running + !se->on_rq);
+ 
+ 	for_each_sched_entity(se) {
+ 		struct load_weight *load;
+@@ -699,6 +705,10 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ 		}
+ 		slice = __calc_delta(slice, se->load.weight, load);
+ 	}
++
++	if (sched_feat(BASE_SLICE))
++		slice = max(slice, (u64)sysctl_sched_min_granularity);
++
+ 	return slice;
+ }
+ 
+@@ -3941,6 +3951,8 @@ static inline void util_est_dequeue(struct cfs_rq *cfs_rq,
+ 	trace_sched_util_est_cfs_tp(cfs_rq);
+ }
+ 
++#define UTIL_EST_MARGIN (SCHED_CAPACITY_SCALE / 100)
++
+ /*
+  * Check if a (signed) value is within a specified (unsigned) margin,
+  * based on the observation that:
+@@ -3958,7 +3970,7 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
+ 				   struct task_struct *p,
+ 				   bool task_sleep)
+ {
+-	long last_ewma_diff;
++	long last_ewma_diff, last_enqueued_diff;
+ 	struct util_est ue;
+ 
+ 	if (!sched_feat(UTIL_EST))
+@@ -3979,6 +3991,8 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
+ 	if (ue.enqueued & UTIL_AVG_UNCHANGED)
+ 		return;
+ 
++	last_enqueued_diff = ue.enqueued;
++
+ 	/*
+ 	 * Reset EWMA on utilization increases, the moving average is used only
+ 	 * to smooth utilization decreases.
+@@ -3992,12 +4006,17 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
+ 	}
+ 
+ 	/*
+-	 * Skip update of task's estimated utilization when its EWMA is
++	 * Skip update of task's estimated utilization when its members are
+ 	 * already ~1% close to its last activation value.
+ 	 */
+ 	last_ewma_diff = ue.enqueued - ue.ewma;
+-	if (within_margin(last_ewma_diff, (SCHED_CAPACITY_SCALE / 100)))
++	last_enqueued_diff -= ue.enqueued;
++	if (within_margin(last_ewma_diff, UTIL_EST_MARGIN)) {
++		if (!within_margin(last_enqueued_diff, UTIL_EST_MARGIN))
++			goto done;
++
+ 		return;
++	}
+ 
+ 	/*
+ 	 * To avoid overestimation of actual task utilization, skip updates if
+@@ -6098,6 +6117,24 @@ static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpu
+ 	return -1;
+ }
+ 
++/*
++ * Scan the local SMT mask for idle CPUs.
++ */
++static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
++{
++	int cpu;
++
++	for_each_cpu(cpu, cpu_smt_mask(target)) {
++		if (!cpumask_test_cpu(cpu, p->cpus_ptr) ||
++		    !cpumask_test_cpu(cpu, sched_domain_span(sd)))
++			continue;
++		if (available_idle_cpu(cpu) || sched_idle_cpu(cpu))
++			return cpu;
++	}
++
++	return -1;
++}
++
+ #else /* CONFIG_SCHED_SMT */
+ 
+ static inline void set_idle_cores(int cpu, int val)
+@@ -6114,6 +6151,11 @@ static inline int select_idle_core(struct task_struct *p, int core, struct cpuma
+ 	return __select_idle_cpu(core);
+ }
+ 
++static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
++{
++	return -1;
++}
++
+ #endif /* CONFIG_SCHED_SMT */
+ 
+ /*
+@@ -6121,11 +6163,10 @@ static inline int select_idle_core(struct task_struct *p, int core, struct cpuma
+  * comparing the average scan cost (tracked in sd->avg_scan_cost) against the
+  * average idle time for this rq (as found in rq->avg_idle).
+  */
+-static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target)
++static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool has_idle_core, int target)
+ {
+ 	struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
+ 	int i, cpu, idle_cpu = -1, nr = INT_MAX;
+-	bool smt = test_idle_cores(target, false);
+ 	int this = smp_processor_id();
+ 	struct sched_domain *this_sd;
+ 	u64 time;
+@@ -6136,7 +6177,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
+ 
+ 	cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
+ 
+-	if (sched_feat(SIS_PROP) && !smt) {
++	if (sched_feat(SIS_PROP) && !has_idle_core) {
+ 		u64 avg_cost, avg_idle, span_avg;
+ 
+ 		/*
+@@ -6156,7 +6197,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
+ 	}
+ 
+ 	for_each_cpu_wrap(cpu, cpus, target) {
+-		if (smt) {
++		if (has_idle_core) {
+ 			i = select_idle_core(p, cpu, cpus, &idle_cpu);
+ 			if ((unsigned int)i < nr_cpumask_bits)
+ 				return i;
+@@ -6170,10 +6211,10 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
+ 		}
+ 	}
+ 
+-	if (smt)
++	if (has_idle_core)
+ 		set_idle_cores(this, false);
+ 
+-	if (sched_feat(SIS_PROP) && !smt) {
++	if (sched_feat(SIS_PROP) && !has_idle_core) {
+ 		time = cpu_clock(this) - time;
+ 		update_avg(&this_sd->avg_scan_cost, time);
+ 	}
+@@ -6228,6 +6269,7 @@ static inline bool asym_fits_capacity(int task_util, int cpu)
+  */
+ static int select_idle_sibling(struct task_struct *p, int prev, int target)
+ {
++	bool has_idle_core = false;
+ 	struct sched_domain *sd;
+ 	unsigned long task_util;
+ 	int i, recent_used_cpu;
+@@ -6307,7 +6349,17 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
+ 	if (!sd)
+ 		return target;
+ 
+-	i = select_idle_cpu(p, sd, target);
++	if (sched_smt_active()) {
++		has_idle_core = test_idle_cores(target, false);
++
++		if (!has_idle_core && cpus_share_cache(prev, target)) {
++			i = select_idle_smt(p, sd, prev);
++			if ((unsigned int)i < nr_cpumask_bits)
++				return i;
++		}
++	}
++
++	i = select_idle_cpu(p, sd, has_idle_core, target);
+ 	if ((unsigned)i < nr_cpumask_bits)
+ 		return i;
+ 
+@@ -6518,8 +6570,24 @@ compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd)
+ 	 * its pd list and will not be accounted by compute_energy().
+ 	 */
+ 	for_each_cpu_and(cpu, pd_mask, cpu_online_mask) {
+-		unsigned long cpu_util, util_cfs = cpu_util_next(cpu, p, dst_cpu);
+-		struct task_struct *tsk = cpu == dst_cpu ? p : NULL;
++		unsigned long util_freq = cpu_util_next(cpu, p, dst_cpu);
++		unsigned long cpu_util, util_running = util_freq;
++		struct task_struct *tsk = NULL;
++
++		/*
++		 * When @p is placed on @cpu:
++		 *
++		 * util_running = max(cpu_util, cpu_util_est) +
++		 *		  max(task_util, _task_util_est)
++		 *
++		 * while cpu_util_next is: max(cpu_util + task_util,
++		 *			       cpu_util_est + _task_util_est)
++		 */
++		if (cpu == dst_cpu) {
++			tsk = p;
++			util_running =
++				cpu_util_next(cpu, p, -1) + task_util_est(p);
++		}
+ 
+ 		/*
+ 		 * Busy time computation: utilization clamping is not
+@@ -6527,7 +6595,7 @@ compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd)
+ 		 * is already enough to scale the EM reported power
+ 		 * consumption at the (eventually clamped) cpu_capacity.
+ 		 */
+-		sum_util += effective_cpu_util(cpu, util_cfs, cpu_cap,
++		sum_util += effective_cpu_util(cpu, util_running, cpu_cap,
+ 					       ENERGY_UTIL, NULL);
+ 
+ 		/*
+@@ -6537,7 +6605,7 @@ compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd)
+ 		 * NOTE: in case RT tasks are running, by default the
+ 		 * FREQUENCY_UTIL's utilization can be max OPP.
+ 		 */
+-		cpu_util = effective_cpu_util(cpu, util_cfs, cpu_cap,
++		cpu_util = effective_cpu_util(cpu, util_freq, cpu_cap,
+ 					      FREQUENCY_UTIL, tsk);
+ 		max_util = max(max_util, cpu_util);
+ 	}
+@@ -7539,6 +7607,10 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
+ 	if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
+ 		return 0;
+ 
++	/* Disregard pcpu kthreads; they are where they need to be. */
++	if ((p->flags & PF_KTHREAD) && kthread_is_per_cpu(p))
++		return 0;
++
+ 	if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) {
+ 		int cpu;
+ 
+diff --git a/kernel/sched/features.h b/kernel/sched/features.h
+index 1bc2b158fc515..e911111df83a7 100644
+--- a/kernel/sched/features.h
++++ b/kernel/sched/features.h
+@@ -90,3 +90,6 @@ SCHED_FEAT(WA_BIAS, true)
+  */
+ SCHED_FEAT(UTIL_EST, true)
+ SCHED_FEAT(UTIL_EST_FASTUP, true)
++
++SCHED_FEAT(ALT_PERIOD, true)
++SCHED_FEAT(BASE_SLICE, true)
+diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
+index 967732c0766c5..651218ded9817 100644
+--- a/kernel/sched/psi.c
++++ b/kernel/sched/psi.c
+@@ -711,14 +711,15 @@ static void psi_group_change(struct psi_group *group, int cpu,
+ 	for (t = 0, m = clear; m; m &= ~(1 << t), t++) {
+ 		if (!(m & (1 << t)))
+ 			continue;
+-		if (groupc->tasks[t] == 0 && !psi_bug) {
++		if (groupc->tasks[t]) {
++			groupc->tasks[t]--;
++		} else if (!psi_bug) {
+ 			printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u %u] clear=%x set=%x\n",
+ 					cpu, t, groupc->tasks[0],
+ 					groupc->tasks[1], groupc->tasks[2],
+ 					groupc->tasks[3], clear, set);
+ 			psi_bug = 1;
+ 		}
+-		groupc->tasks[t]--;
+ 	}
+ 
+ 	for (t = 0; set; set &= ~(1 << t), t++)
+diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
+index 09d35044bd889..12f80587e127c 100644
+--- a/kernel/sched/topology.c
++++ b/kernel/sched/topology.c
+@@ -723,35 +723,6 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
+ 	for (tmp = sd; tmp; tmp = tmp->parent)
+ 		numa_distance += !!(tmp->flags & SD_NUMA);
+ 
+-	/*
+-	 * FIXME: Diameter >=3 is misrepresented.
+-	 *
+-	 * Smallest diameter=3 topology is:
+-	 *
+-	 *   node   0   1   2   3
+-	 *     0:  10  20  30  40
+-	 *     1:  20  10  20  30
+-	 *     2:  30  20  10  20
+-	 *     3:  40  30  20  10
+-	 *
+-	 *   0 --- 1 --- 2 --- 3
+-	 *
+-	 * NUMA-3	0-3		N/A		N/A		0-3
+-	 *  groups:	{0-2},{1-3}					{1-3},{0-2}
+-	 *
+-	 * NUMA-2	0-2		0-3		0-3		1-3
+-	 *  groups:	{0-1},{1-3}	{0-2},{2-3}	{1-3},{0-1}	{2-3},{0-2}
+-	 *
+-	 * NUMA-1	0-1		0-2		1-3		2-3
+-	 *  groups:	{0},{1}		{1},{2},{0}	{2},{3},{1}	{3},{2}
+-	 *
+-	 * NUMA-0	0		1		2		3
+-	 *
+-	 * The NUMA-2 groups for nodes 0 and 3 are obviously buggered, as the
+-	 * group span isn't a subset of the domain span.
+-	 */
+-	WARN_ONCE(numa_distance > 2, "Shortest NUMA path spans too many nodes\n");
+-
+ 	sched_domain_debug(sd, cpu);
+ 
+ 	rq_attach_root(rq, rd);
+@@ -982,6 +953,31 @@ static void init_overlap_sched_group(struct sched_domain *sd,
+ 	sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;
+ }
+ 
++static struct sched_domain *
++find_descended_sibling(struct sched_domain *sd, struct sched_domain *sibling)
++{
++	/*
++	 * The proper descendant would be the one whose child won't span out
++	 * of sd
++	 */
++	while (sibling->child &&
++	       !cpumask_subset(sched_domain_span(sibling->child),
++			       sched_domain_span(sd)))
++		sibling = sibling->child;
++
++	/*
++	 * As we are referencing sgc across different topology level, we need
++	 * to go down to skip those sched_domains which don't contribute to
++	 * scheduling because they will be degenerated in cpu_attach_domain
++	 */
++	while (sibling->child &&
++	       cpumask_equal(sched_domain_span(sibling->child),
++			     sched_domain_span(sibling)))
++		sibling = sibling->child;
++
++	return sibling;
++}
++
+ static int
+ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
+ {
+@@ -1015,6 +1011,41 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
+ 		if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
+ 			continue;
+ 
++		/*
++		 * Usually we build sched_group by sibling's child sched_domain
++		 * But for machines whose NUMA diameter are 3 or above, we move
++		 * to build sched_group by sibling's proper descendant's child
++		 * domain because sibling's child sched_domain will span out of
++		 * the sched_domain being built as below.
++		 *
++		 * Smallest diameter=3 topology is:
++		 *
++		 *   node   0   1   2   3
++		 *     0:  10  20  30  40
++		 *     1:  20  10  20  30
++		 *     2:  30  20  10  20
++		 *     3:  40  30  20  10
++		 *
++		 *   0 --- 1 --- 2 --- 3
++		 *
++		 * NUMA-3       0-3             N/A             N/A             0-3
++		 *  groups:     {0-2},{1-3}                                     {1-3},{0-2}
++		 *
++		 * NUMA-2       0-2             0-3             0-3             1-3
++		 *  groups:     {0-1},{1-3}     {0-2},{2-3}     {1-3},{0-1}     {2-3},{0-2}
++		 *
++		 * NUMA-1       0-1             0-2             1-3             2-3
++		 *  groups:     {0},{1}         {1},{2},{0}     {2},{3},{1}     {3},{2}
++		 *
++		 * NUMA-0       0               1               2               3
++		 *
++		 * The NUMA-2 groups for nodes 0 and 3 are obviously buggered, as the
++		 * group span isn't a subset of the domain span.
++		 */
++		if (sibling->child &&
++		    !cpumask_subset(sched_domain_span(sibling->child), span))
++			sibling = find_descended_sibling(sd, sibling);
++
+ 		sg = build_group_from_child_sched_domain(sibling, cpu);
+ 		if (!sg)
+ 			goto fail;
+@@ -1022,7 +1053,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
+ 		sg_span = sched_group_span(sg);
+ 		cpumask_or(covered, covered, sg_span);
+ 
+-		init_overlap_sched_group(sd, sg);
++		init_overlap_sched_group(sibling, sg);
+ 
+ 		if (!first)
+ 			first = sg;
+diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
+index bf540f5a4115a..dd5697d7347b1 100644
+--- a/kernel/time/posix-timers.c
++++ b/kernel/time/posix-timers.c
+@@ -1191,8 +1191,8 @@ SYSCALL_DEFINE2(clock_adjtime32, clockid_t, which_clock,
+ 
+ 	err = do_clock_adjtime(which_clock, &ktx);
+ 
+-	if (err >= 0)
+-		err = put_old_timex32(utp, &ktx);
++	if (err >= 0 && put_old_timex32(utp, &ktx))
++		return -EFAULT;
+ 
+ 	return err;
+ }
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 3ba52d4e13142..826b88b727a62 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -5631,7 +5631,10 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
+ 
+ 	parser = &iter->parser;
+ 	if (trace_parser_loaded(parser)) {
+-		ftrace_match_records(iter->hash, parser->buffer, parser->idx);
++		int enable = !(iter->flags & FTRACE_ITER_NOTRACE);
++
++		ftrace_process_regex(iter, parser->buffer,
++				     parser->idx, enable);
+ 	}
+ 
+ 	trace_parser_put(parser);
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index c0c9aa5cd8e2c..67c01dc5cdeb7 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -2390,14 +2390,13 @@ static void tracing_stop_tr(struct trace_array *tr)
+ 
+ static int trace_save_cmdline(struct task_struct *tsk)
+ {
+-	unsigned pid, idx;
++	unsigned tpid, idx;
+ 
+ 	/* treat recording of idle task as a success */
+ 	if (!tsk->pid)
+ 		return 1;
+ 
+-	if (unlikely(tsk->pid > PID_MAX_DEFAULT))
+-		return 0;
++	tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
+ 
+ 	/*
+ 	 * It's not the end of the world if we don't get
+@@ -2408,26 +2407,15 @@ static int trace_save_cmdline(struct task_struct *tsk)
+ 	if (!arch_spin_trylock(&trace_cmdline_lock))
+ 		return 0;
+ 
+-	idx = savedcmd->map_pid_to_cmdline[tsk->pid];
++	idx = savedcmd->map_pid_to_cmdline[tpid];
+ 	if (idx == NO_CMDLINE_MAP) {
+ 		idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
+ 
+-		/*
+-		 * Check whether the cmdline buffer at idx has a pid
+-		 * mapped. We are going to overwrite that entry so we
+-		 * need to clear the map_pid_to_cmdline. Otherwise we
+-		 * would read the new comm for the old pid.
+-		 */
+-		pid = savedcmd->map_cmdline_to_pid[idx];
+-		if (pid != NO_CMDLINE_MAP)
+-			savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
+-
+-		savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
+-		savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
+-
++		savedcmd->map_pid_to_cmdline[tpid] = idx;
+ 		savedcmd->cmdline_idx = idx;
+ 	}
+ 
++	savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
+ 	set_cmdline(idx, tsk->comm);
+ 
+ 	arch_spin_unlock(&trace_cmdline_lock);
+@@ -2438,6 +2426,7 @@ static int trace_save_cmdline(struct task_struct *tsk)
+ static void __trace_find_cmdline(int pid, char comm[])
+ {
+ 	unsigned map;
++	int tpid;
+ 
+ 	if (!pid) {
+ 		strcpy(comm, "<idle>");
+@@ -2449,16 +2438,16 @@ static void __trace_find_cmdline(int pid, char comm[])
+ 		return;
+ 	}
+ 
+-	if (pid > PID_MAX_DEFAULT) {
+-		strcpy(comm, "<...>");
+-		return;
++	tpid = pid & (PID_MAX_DEFAULT - 1);
++	map = savedcmd->map_pid_to_cmdline[tpid];
++	if (map != NO_CMDLINE_MAP) {
++		tpid = savedcmd->map_cmdline_to_pid[map];
++		if (tpid == pid) {
++			strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
++			return;
++		}
+ 	}
+-
+-	map = savedcmd->map_pid_to_cmdline[pid];
+-	if (map != NO_CMDLINE_MAP)
+-		strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
+-	else
+-		strcpy(comm, "<...>");
++	strcpy(comm, "<...>");
+ }
+ 
+ void trace_find_cmdline(int pid, char comm[])
+diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
+index aaf6793ededaa..c1637f90c8a38 100644
+--- a/kernel/trace/trace_clock.c
++++ b/kernel/trace/trace_clock.c
+@@ -95,33 +95,49 @@ u64 notrace trace_clock_global(void)
+ {
+ 	unsigned long flags;
+ 	int this_cpu;
+-	u64 now;
++	u64 now, prev_time;
+ 
+ 	raw_local_irq_save(flags);
+ 
+ 	this_cpu = raw_smp_processor_id();
+-	now = sched_clock_cpu(this_cpu);
++
+ 	/*
+-	 * If in an NMI context then dont risk lockups and return the
+-	 * cpu_clock() time:
++	 * The global clock "guarantees" that the events are ordered
++	 * between CPUs. But if two events on two different CPUS call
++	 * trace_clock_global at roughly the same time, it really does
++	 * not matter which one gets the earlier time. Just make sure
++	 * that the same CPU will always show a monotonic clock.
++	 *
++	 * Use a read memory barrier to get the latest written
++	 * time that was recorded.
+ 	 */
+-	if (unlikely(in_nmi()))
+-		goto out;
++	smp_rmb();
++	prev_time = READ_ONCE(trace_clock_struct.prev_time);
++	now = sched_clock_cpu(this_cpu);
+ 
+-	arch_spin_lock(&trace_clock_struct.lock);
++	/* Make sure that now is always greater than prev_time */
++	if ((s64)(now - prev_time) < 0)
++		now = prev_time + 1;
+ 
+ 	/*
+-	 * TODO: if this happens often then maybe we should reset
+-	 * my_scd->clock to prev_time+1, to make sure
+-	 * we start ticking with the local clock from now on?
++	 * If in an NMI context then dont risk lockups and simply return
++	 * the current time.
+ 	 */
+-	if ((s64)(now - trace_clock_struct.prev_time) < 0)
+-		now = trace_clock_struct.prev_time + 1;
++	if (unlikely(in_nmi()))
++		goto out;
+ 
+-	trace_clock_struct.prev_time = now;
++	/* Tracing can cause strange recursion, always use a try lock */
++	if (arch_spin_trylock(&trace_clock_struct.lock)) {
++		/* Reread prev_time in case it was already updated */
++		prev_time = READ_ONCE(trace_clock_struct.prev_time);
++		if ((s64)(now - prev_time) < 0)
++			now = prev_time + 1;
+ 
+-	arch_spin_unlock(&trace_clock_struct.lock);
++		trace_clock_struct.prev_time = now;
+ 
++		/* The unlock acts as the wmb for the above rmb */
++		arch_spin_unlock(&trace_clock_struct.lock);
++	}
+  out:
+ 	raw_local_irq_restore(flags);
+ 
+diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
+index c70d6347afa2b..921d0a654243c 100644
+--- a/lib/dynamic_debug.c
++++ b/lib/dynamic_debug.c
+@@ -396,7 +396,7 @@ static int ddebug_parse_query(char *words[], int nwords,
+ 			/* tail :$info is function or line-range */
+ 			fline = strchr(query->filename, ':');
+ 			if (!fline)
+-				break;
++				continue;
+ 			*fline++ = '\0';
+ 			if (isalpha(*fline) || *fline == '*' || *fline == '?') {
+ 				/* take as function name */
+diff --git a/lib/vsprintf.c b/lib/vsprintf.c
+index 41ddc353ebb82..39ef2e314da5e 100644
+--- a/lib/vsprintf.c
++++ b/lib/vsprintf.c
+@@ -3135,8 +3135,6 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
+ 			switch (*fmt) {
+ 			case 'S':
+ 			case 's':
+-			case 'F':
+-			case 'f':
+ 			case 'x':
+ 			case 'K':
+ 			case 'e':
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index cfc72873961d9..4bb3cdfc47f87 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -764,32 +764,36 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
+  */
+ void init_mem_debugging_and_hardening(void)
+ {
++	bool page_poisoning_requested = false;
++
++#ifdef CONFIG_PAGE_POISONING
++	/*
++	 * Page poisoning is debug page alloc for some arches. If
++	 * either of those options are enabled, enable poisoning.
++	 */
++	if (page_poisoning_enabled() ||
++	     (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
++	      debug_pagealloc_enabled())) {
++		static_branch_enable(&_page_poisoning_enabled);
++		page_poisoning_requested = true;
++	}
++#endif
++
+ 	if (_init_on_alloc_enabled_early) {
+-		if (page_poisoning_enabled())
++		if (page_poisoning_requested)
+ 			pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
+ 				"will take precedence over init_on_alloc\n");
+ 		else
+ 			static_branch_enable(&init_on_alloc);
+ 	}
+ 	if (_init_on_free_enabled_early) {
+-		if (page_poisoning_enabled())
++		if (page_poisoning_requested)
+ 			pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
+ 				"will take precedence over init_on_free\n");
+ 		else
+ 			static_branch_enable(&init_on_free);
+ 	}
+ 
+-#ifdef CONFIG_PAGE_POISONING
+-	/*
+-	 * Page poisoning is debug page alloc for some arches. If
+-	 * either of those options are enabled, enable poisoning.
+-	 */
+-	if (page_poisoning_enabled() ||
+-	     (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
+-	      debug_pagealloc_enabled()))
+-		static_branch_enable(&_page_poisoning_enabled);
+-#endif
+-
+ #ifdef CONFIG_DEBUG_PAGEALLOC
+ 	if (!debug_pagealloc_enabled())
+ 		return;
+diff --git a/net/bluetooth/ecdh_helper.h b/net/bluetooth/ecdh_helper.h
+index a6f8d03d4aaf6..830723971cf83 100644
+--- a/net/bluetooth/ecdh_helper.h
++++ b/net/bluetooth/ecdh_helper.h
+@@ -25,6 +25,6 @@
+ 
+ int compute_ecdh_secret(struct crypto_kpp *tfm, const u8 pair_public_key[64],
+ 			u8 secret[32]);
+-int set_ecdh_privkey(struct crypto_kpp *tfm, const u8 *private_key);
++int set_ecdh_privkey(struct crypto_kpp *tfm, const u8 private_key[32]);
+ int generate_ecdh_public_key(struct crypto_kpp *tfm, u8 public_key[64]);
+ int generate_ecdh_keys(struct crypto_kpp *tfm, u8 public_key[64]);
+diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
+index ca44c327baced..79641c4afee93 100644
+--- a/net/ceph/auth_x.c
++++ b/net/ceph/auth_x.c
+@@ -526,7 +526,7 @@ static int ceph_x_build_request(struct ceph_auth_client *ac,
+ 		if (ret < 0)
+ 			return ret;
+ 
+-		auth->struct_v = 2;  /* nautilus+ */
++		auth->struct_v = 3;  /* nautilus+ */
+ 		auth->key = 0;
+ 		for (u = (u64 *)enc_buf; u + 1 <= (u64 *)(enc_buf + ret); u++)
+ 			auth->key ^= *(__le64 *)u;
+diff --git a/net/ceph/decode.c b/net/ceph/decode.c
+index b44f7651be04b..bc109a1a4616f 100644
+--- a/net/ceph/decode.c
++++ b/net/ceph/decode.c
+@@ -4,6 +4,7 @@
+ #include <linux/inet.h>
+ 
+ #include <linux/ceph/decode.h>
++#include <linux/ceph/messenger.h>  /* for ceph_pr_addr() */
+ 
+ static int
+ ceph_decode_entity_addr_versioned(void **p, void *end,
+@@ -110,6 +111,7 @@ int ceph_decode_entity_addrvec(void **p, void *end, bool msgr2,
+ 	}
+ 
+ 	ceph_decode_32_safe(p, end, addr_cnt, e_inval);
++	dout("%s addr_cnt %d\n", __func__, addr_cnt);
+ 
+ 	found = false;
+ 	for (i = 0; i < addr_cnt; i++) {
+@@ -117,6 +119,7 @@ int ceph_decode_entity_addrvec(void **p, void *end, bool msgr2,
+ 		if (ret)
+ 			return ret;
+ 
++		dout("%s i %d addr %s\n", __func__, i, ceph_pr_addr(&tmp_addr));
+ 		if (tmp_addr.type == my_type) {
+ 			if (found) {
+ 				pr_err("another match of type %d in addrvec\n",
+@@ -128,13 +131,18 @@ int ceph_decode_entity_addrvec(void **p, void *end, bool msgr2,
+ 			found = true;
+ 		}
+ 	}
+-	if (!found && addr_cnt != 0) {
+-		pr_err("no match of type %d in addrvec\n",
+-		       le32_to_cpu(my_type));
+-		return -ENOENT;
+-	}
+ 
+-	return 0;
++	if (found)
++		return 0;
++
++	if (!addr_cnt)
++		return 0;  /* normal -- e.g. unused OSD id/slot */
++
++	if (addr_cnt == 1 && !memchr_inv(&tmp_addr, 0, sizeof(tmp_addr)))
++		return 0;  /* weird but effectively the same as !addr_cnt */
++
++	pr_err("no match of type %d in addrvec\n", le32_to_cpu(my_type));
++	return -ENOENT;
+ 
+ e_inval:
+ 	return -EINVAL;
+diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
+index 92a0b67b27282..77d924ab8cdb9 100644
+--- a/net/openvswitch/actions.c
++++ b/net/openvswitch/actions.c
+@@ -827,17 +827,17 @@ static void ovs_fragment(struct net *net, struct vport *vport,
+ 	}
+ 
+ 	if (key->eth.type == htons(ETH_P_IP)) {
+-		struct dst_entry ovs_dst;
++		struct rtable ovs_rt = { 0 };
+ 		unsigned long orig_dst;
+ 
+ 		prepare_frag(vport, skb, orig_network_offset,
+ 			     ovs_key_mac_proto(key));
+-		dst_init(&ovs_dst, &ovs_dst_ops, NULL, 1,
++		dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
+ 			 DST_OBSOLETE_NONE, DST_NOCOUNT);
+-		ovs_dst.dev = vport->dev;
++		ovs_rt.dst.dev = vport->dev;
+ 
+ 		orig_dst = skb->_skb_refdst;
+-		skb_dst_set_noref(skb, &ovs_dst);
++		skb_dst_set_noref(skb, &ovs_rt.dst);
+ 		IPCB(skb)->frag_max_size = mru;
+ 
+ 		ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
+diff --git a/net/sched/sch_frag.c b/net/sched/sch_frag.c
+index e1e77d3fb6c02..8c06381391d6f 100644
+--- a/net/sched/sch_frag.c
++++ b/net/sched/sch_frag.c
+@@ -90,16 +90,16 @@ static int sch_fragment(struct net *net, struct sk_buff *skb,
+ 	}
+ 
+ 	if (skb_protocol(skb, true) == htons(ETH_P_IP)) {
+-		struct dst_entry sch_frag_dst;
++		struct rtable sch_frag_rt = { 0 };
+ 		unsigned long orig_dst;
+ 
+ 		sch_frag_prepare_frag(skb, xmit);
+-		dst_init(&sch_frag_dst, &sch_frag_dst_ops, NULL, 1,
++		dst_init(&sch_frag_rt.dst, &sch_frag_dst_ops, NULL, 1,
+ 			 DST_OBSOLETE_NONE, DST_NOCOUNT);
+-		sch_frag_dst.dev = skb->dev;
++		sch_frag_rt.dst.dev = skb->dev;
+ 
+ 		orig_dst = skb->_skb_refdst;
+-		skb_dst_set_noref(skb, &sch_frag_dst);
++		skb_dst_set_noref(skb, &sch_frag_rt.dst);
+ 		IPCB(skb)->frag_max_size = mru;
+ 
+ 		ret = ip_do_fragment(net, skb->sk, skb, sch_frag_xmit);
+diff --git a/security/commoncap.c b/security/commoncap.c
+index 1c519c8752176..5cdeb73ca8fa9 100644
+--- a/security/commoncap.c
++++ b/security/commoncap.c
+@@ -400,7 +400,7 @@ int cap_inode_getsecurity(struct user_namespace *mnt_userns,
+ 				      &tmpbuf, size, GFP_NOFS);
+ 	dput(dentry);
+ 
+-	if (ret < 0)
++	if (ret < 0 || !tmpbuf)
+ 		return ret;
+ 
+ 	fs_ns = inode->i_sb->s_user_ns;
+diff --git a/sound/isa/sb/emu8000.c b/sound/isa/sb/emu8000.c
+index 0aa545ac6e60c..1c90421a88dcd 100644
+--- a/sound/isa/sb/emu8000.c
++++ b/sound/isa/sb/emu8000.c
+@@ -1029,8 +1029,10 @@ snd_emu8000_create_mixer(struct snd_card *card, struct snd_emu8000 *emu)
+ 
+ 	memset(emu->controls, 0, sizeof(emu->controls));
+ 	for (i = 0; i < EMU8000_NUM_CONTROLS; i++) {
+-		if ((err = snd_ctl_add(card, emu->controls[i] = snd_ctl_new1(mixer_defs[i], emu))) < 0)
++		if ((err = snd_ctl_add(card, emu->controls[i] = snd_ctl_new1(mixer_defs[i], emu))) < 0) {
++			emu->controls[i] = NULL;
+ 			goto __error;
++		}
+ 	}
+ 	return 0;
+ 
+diff --git a/sound/isa/sb/sb16_csp.c b/sound/isa/sb/sb16_csp.c
+index 8635a2b6b36b2..4789345a8fdde 100644
+--- a/sound/isa/sb/sb16_csp.c
++++ b/sound/isa/sb/sb16_csp.c
+@@ -1045,10 +1045,14 @@ static int snd_sb_qsound_build(struct snd_sb_csp * p)
+ 
+ 	spin_lock_init(&p->q_lock);
+ 
+-	if ((err = snd_ctl_add(card, p->qsound_switch = snd_ctl_new1(&snd_sb_qsound_switch, p))) < 0)
++	if ((err = snd_ctl_add(card, p->qsound_switch = snd_ctl_new1(&snd_sb_qsound_switch, p))) < 0) {
++		p->qsound_switch = NULL;
+ 		goto __error;
+-	if ((err = snd_ctl_add(card, p->qsound_space = snd_ctl_new1(&snd_sb_qsound_space, p))) < 0)
++	}
++	if ((err = snd_ctl_add(card, p->qsound_space = snd_ctl_new1(&snd_sb_qsound_space, p))) < 0) {
++		p->qsound_space = NULL;
+ 		goto __error;
++	}
+ 
+ 	return 0;
+ 
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index dfef9c17e1404..d111258c6f458 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -930,18 +930,18 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
+ 	SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
+ 	SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
+-	SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
+-	SND_PCI_QUIRK(0x103c, 0x83b2, "HP EliteBook 840 G5", CXT_FIXUP_HP_DOCK),
+-	SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
+-	SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
+-	SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
+ 	SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
+ 	SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO),
++	SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
+ 	SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO),
+-	SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO),
+-	SND_PCI_QUIRK(0x103c, 0x837f, "HP ProBook 470 G5", CXT_FIXUP_MUTE_LED_GPIO),
++	SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
+ 	SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO),
++	SND_PCI_QUIRK(0x103c, 0x837f, "HP ProBook 470 G5", CXT_FIXUP_MUTE_LED_GPIO),
++	SND_PCI_QUIRK(0x103c, 0x83b2, "HP EliteBook 840 G5", CXT_FIXUP_HP_DOCK),
++	SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
++	SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
+ 	SND_PCI_QUIRK(0x103c, 0x8402, "HP ProBook 645 G4", CXT_FIXUP_MUTE_LED_GPIO),
+ 	SND_PCI_QUIRK(0x103c, 0x8427, "HP ZBook Studio G5", CXT_FIXUP_HP_ZBOOK_MUTE_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x844f, "HP ZBook Studio G5", CXT_FIXUP_HP_ZBOOK_MUTE_LED),
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index a7544b77d3f7c..d05d16ddbdf2c 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -2552,8 +2552,10 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1558, 0x65d1, "Clevo PB51[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK(0x1558, 0x65d2, "Clevo PB51R[CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK(0x1558, 0x65e1, "Clevo PB51[ED][DF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
++	SND_PCI_QUIRK(0x1558, 0x65e5, "Clevo PC50D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
++	SND_PCI_QUIRK(0x1558, 0x67e5, "Clevo PC70D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK(0x1558, 0x70d1, "Clevo PC70[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK(0x1558, 0x7714, "Clevo X170", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
+@@ -4438,6 +4440,25 @@ static void alc236_fixup_hp_mute_led(struct hda_codec *codec,
+ 	alc236_fixup_hp_coef_micmute_led(codec, fix, action);
+ }
+ 
++static void alc236_fixup_hp_micmute_led_vref(struct hda_codec *codec,
++				const struct hda_fixup *fix, int action)
++{
++	struct alc_spec *spec = codec->spec;
++
++	if (action == HDA_FIXUP_ACT_PRE_PROBE) {
++		spec->cap_mute_led_nid = 0x1a;
++		snd_hda_gen_add_micmute_led_cdev(codec, vref_micmute_led_set);
++		codec->power_filter = led_power_filter;
++	}
++}
++
++static void alc236_fixup_hp_mute_led_micmute_vref(struct hda_codec *codec,
++				const struct hda_fixup *fix, int action)
++{
++	alc236_fixup_hp_mute_led_coefbit(codec, fix, action);
++	alc236_fixup_hp_micmute_led_vref(codec, fix, action);
++}
++
+ #if IS_REACHABLE(CONFIG_INPUT)
+ static void gpio2_mic_hotkey_event(struct hda_codec *codec,
+ 				   struct hda_jack_callback *event)
+@@ -6400,6 +6421,7 @@ enum {
+ 	ALC285_FIXUP_HP_MUTE_LED,
+ 	ALC236_FIXUP_HP_GPIO_LED,
+ 	ALC236_FIXUP_HP_MUTE_LED,
++	ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF,
+ 	ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET,
+ 	ALC295_FIXUP_ASUS_MIC_NO_PRESENCE,
+ 	ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS,
+@@ -6415,6 +6437,8 @@ enum {
+ 	ALC269_FIXUP_LEMOTE_A1802,
+ 	ALC269_FIXUP_LEMOTE_A190X,
+ 	ALC256_FIXUP_INTEL_NUC8_RUGGED,
++	ALC233_FIXUP_INTEL_NUC8_DMIC,
++	ALC233_FIXUP_INTEL_NUC8_BOOST,
+ 	ALC256_FIXUP_INTEL_NUC10,
+ 	ALC255_FIXUP_XIAOMI_HEADSET_MIC,
+ 	ALC274_FIXUP_HP_MIC,
+@@ -7136,6 +7160,16 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc233_fixup_lenovo_line2_mic_hotkey,
+ 	},
++	[ALC233_FIXUP_INTEL_NUC8_DMIC] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc_fixup_inv_dmic,
++		.chained = true,
++		.chain_id = ALC233_FIXUP_INTEL_NUC8_BOOST,
++	},
++	[ALC233_FIXUP_INTEL_NUC8_BOOST] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc269_fixup_limit_int_mic_boost
++	},
+ 	[ALC255_FIXUP_DELL_SPK_NOISE] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc_fixup_disable_aamix,
+@@ -7646,6 +7680,10 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc236_fixup_hp_mute_led,
+ 	},
++	[ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc236_fixup_hp_mute_led_micmute_vref,
++	},
+ 	[ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET] = {
+ 		.type = HDA_FIXUP_VERBS,
+ 		.v.verbs = (const struct hda_verb[]) {
+@@ -8051,6 +8089,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x103c, 0x802e, "HP Z240 SFF", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x802f, "HP Z240", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x103c, 0x8077, "HP", ALC256_FIXUP_HP_HEADSET_MIC),
++	SND_PCI_QUIRK(0x103c, 0x8158, "HP", ALC256_FIXUP_HP_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x103c, 0x820d, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ 	SND_PCI_QUIRK(0x103c, 0x8256, "HP", ALC221_FIXUP_HP_FRONT_MIC),
+ 	SND_PCI_QUIRK(0x103c, 0x827e, "HP x360", ALC295_FIXUP_HP_X360),
+@@ -8063,6 +8103,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8724, "HP EliteBook 850 G7", ALC285_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8730, "HP ProBook 445 G7", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ 	SND_PCI_QUIRK(0x103c, 0x8760, "HP", ALC285_FIXUP_HP_MUTE_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED),
+@@ -8113,6 +8154,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
+ 	SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
++	SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401),
+ 	SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401),
+ 	SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
+ 	SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
+@@ -8279,6 +8321,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
+ 	SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
++	SND_PCI_QUIRK(0x8086, 0x2074, "Intel NUC 8", ALC233_FIXUP_INTEL_NUC8_DMIC),
+ 	SND_PCI_QUIRK(0x8086, 0x2080, "Intel NUC 8 Rugged", ALC256_FIXUP_INTEL_NUC8_RUGGED),
+ 	SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", ALC256_FIXUP_INTEL_NUC10),
+ 
+@@ -8733,12 +8776,7 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ 		{0x12, 0x90a60130},
+ 		{0x19, 0x03a11020},
+ 		{0x21, 0x0321101f}),
+-	SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK,
+-		{0x14, 0x90170110},
+-		{0x19, 0x04a11040},
+-		{0x21, 0x04211020}),
+ 	SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
+-		{0x12, 0x90a60130},
+ 		{0x14, 0x90170110},
+ 		{0x19, 0x04a11040},
+ 		{0x21, 0x04211020}),
+@@ -8909,6 +8947,10 @@ static const struct snd_hda_pin_quirk alc269_fallback_pin_fixup_tbl[] = {
+ 	SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
+ 		{0x19, 0x40000000},
+ 		{0x1a, 0x40000000}),
++	SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK,
++		{0x14, 0x90170110},
++		{0x19, 0x04a11040},
++		{0x21, 0x04211020}),
+ 	{}
+ };
+ 
+diff --git a/sound/usb/clock.c b/sound/usb/clock.c
+index a746802d0ac3b..17bbde73d4d15 100644
+--- a/sound/usb/clock.c
++++ b/sound/usb/clock.c
+@@ -296,7 +296,7 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip,
+ 
+ 	selector = snd_usb_find_clock_selector(chip->ctrl_intf, entity_id);
+ 	if (selector) {
+-		int ret, i, cur;
++		int ret, i, cur, err;
+ 
+ 		if (selector->bNrInPins == 1) {
+ 			ret = 1;
+@@ -324,13 +324,17 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip,
+ 		ret = __uac_clock_find_source(chip, fmt,
+ 					      selector->baCSourceID[ret - 1],
+ 					      visited, validate);
++		if (ret > 0) {
++			err = uac_clock_selector_set_val(chip, entity_id, cur);
++			if (err < 0)
++				return err;
++		}
++
+ 		if (!validate || ret > 0 || !chip->autoclock)
+ 			return ret;
+ 
+ 		/* The current clock source is invalid, try others. */
+ 		for (i = 1; i <= selector->bNrInPins; i++) {
+-			int err;
+-
+ 			if (i == cur)
+ 				continue;
+ 
+@@ -396,7 +400,7 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip,
+ 
+ 	selector = snd_usb_find_clock_selector_v3(chip->ctrl_intf, entity_id);
+ 	if (selector) {
+-		int ret, i, cur;
++		int ret, i, cur, err;
+ 
+ 		/* the entity ID we are looking for is a selector.
+ 		 * find out what it currently selects */
+@@ -418,6 +422,12 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip,
+ 		ret = __uac3_clock_find_source(chip, fmt,
+ 					       selector->baCSourceID[ret - 1],
+ 					       visited, validate);
++		if (ret > 0) {
++			err = uac_clock_selector_set_val(chip, entity_id, cur);
++			if (err < 0)
++				return err;
++		}
++
+ 		if (!validate || ret > 0 || !chip->autoclock)
+ 			return ret;
+ 
+diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
+index 646deb6244b15..c5794e83fd800 100644
+--- a/sound/usb/mixer_maps.c
++++ b/sound/usb/mixer_maps.c
+@@ -337,6 +337,13 @@ static const struct usbmix_name_map bose_companion5_map[] = {
+ 	{ 0 }	/* terminator */
+ };
+ 
++/* Sennheiser Communications Headset [PC 8], the dB value is reported as -6 negative maximum  */
++static const struct usbmix_dB_map sennheiser_pc8_dB = {-9500, 0};
++static const struct usbmix_name_map sennheiser_pc8_map[] = {
++	{ 9, NULL, .dB = &sennheiser_pc8_dB },
++	{ 0 }   /* terminator */
++};
++
+ /*
+  * Dell usb dock with ALC4020 codec had a firmware problem where it got
+  * screwed up when zero volume is passed; just skip it as a workaround
+@@ -593,6 +600,11 @@ static const struct usbmix_ctl_map usbmix_ctl_maps[] = {
+ 		.id = USB_ID(0x17aa, 0x1046),
+ 		.map = lenovo_p620_rear_map,
+ 	},
++	{
++		/* Sennheiser Communications Headset [PC 8] */
++		.id = USB_ID(0x1395, 0x0025),
++		.map = sennheiser_pc8_map,
++	},
+ 	{ 0 } /* terminator */
+ };
+ 
+diff --git a/tools/power/x86/intel-speed-select/isst-display.c b/tools/power/x86/intel-speed-select/isst-display.c
+index 8e54ce47648e2..3bf1820c0da11 100644
+--- a/tools/power/x86/intel-speed-select/isst-display.c
++++ b/tools/power/x86/intel-speed-select/isst-display.c
+@@ -25,10 +25,14 @@ static void printcpulist(int str_len, char *str, int mask_size,
+ 			index = snprintf(&str[curr_index],
+ 					 str_len - curr_index, ",");
+ 			curr_index += index;
++			if (curr_index >= str_len)
++				break;
+ 		}
+ 		index = snprintf(&str[curr_index], str_len - curr_index, "%d",
+ 				 i);
+ 		curr_index += index;
++		if (curr_index >= str_len)
++			break;
+ 		first = 0;
+ 	}
+ }
+@@ -64,10 +68,14 @@ static void printcpumask(int str_len, char *str, int mask_size,
+ 		index = snprintf(&str[curr_index], str_len - curr_index, "%08x",
+ 				 mask[i]);
+ 		curr_index += index;
++		if (curr_index >= str_len)
++			break;
+ 		if (i) {
+ 			strncat(&str[curr_index], ",", str_len - curr_index);
+ 			curr_index++;
+ 		}
++		if (curr_index >= str_len)
++			break;
+ 	}
+ 
+ 	free(mask);
+@@ -185,7 +193,7 @@ static void _isst_pbf_display_information(int cpu, FILE *outf, int level,
+ 					  int disp_level)
+ {
+ 	char header[256];
+-	char value[256];
++	char value[512];
+ 
+ 	snprintf(header, sizeof(header), "speed-select-base-freq-properties");
+ 	format_and_print(outf, disp_level, header, NULL);
+@@ -349,7 +357,7 @@ void isst_ctdp_display_information(int cpu, FILE *outf, int tdp_level,
+ 				   struct isst_pkg_ctdp *pkg_dev)
+ {
+ 	char header[256];
+-	char value[256];
++	char value[512];
+ 	static int level;
+ 	int i;
+ 
+diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
+index a7c4f0772e534..490c9a496fe28 100644
+--- a/tools/power/x86/turbostat/turbostat.c
++++ b/tools/power/x86/turbostat/turbostat.c
+@@ -291,13 +291,16 @@ struct msr_sum_array {
+ /* The percpu MSR sum array.*/
+ struct msr_sum_array *per_cpu_msr_sum;
+ 
+-int idx_to_offset(int idx)
++off_t idx_to_offset(int idx)
+ {
+-	int offset;
++	off_t offset;
+ 
+ 	switch (idx) {
+ 	case IDX_PKG_ENERGY:
+-		offset = MSR_PKG_ENERGY_STATUS;
++		if (do_rapl & RAPL_AMD_F17H)
++			offset = MSR_PKG_ENERGY_STAT;
++		else
++			offset = MSR_PKG_ENERGY_STATUS;
+ 		break;
+ 	case IDX_DRAM_ENERGY:
+ 		offset = MSR_DRAM_ENERGY_STATUS;
+@@ -320,12 +323,13 @@ int idx_to_offset(int idx)
+ 	return offset;
+ }
+ 
+-int offset_to_idx(int offset)
++int offset_to_idx(off_t offset)
+ {
+ 	int idx;
+ 
+ 	switch (offset) {
+ 	case MSR_PKG_ENERGY_STATUS:
++	case MSR_PKG_ENERGY_STAT:
+ 		idx = IDX_PKG_ENERGY;
+ 		break;
+ 	case MSR_DRAM_ENERGY_STATUS:
+@@ -353,7 +357,7 @@ int idx_valid(int idx)
+ {
+ 	switch (idx) {
+ 	case IDX_PKG_ENERGY:
+-		return do_rapl & RAPL_PKG;
++		return do_rapl & (RAPL_PKG | RAPL_AMD_F17H);
+ 	case IDX_DRAM_ENERGY:
+ 		return do_rapl & RAPL_DRAM;
+ 	case IDX_PP0_ENERGY:
+@@ -3272,7 +3276,7 @@ static int update_msr_sum(struct thread_data *t, struct core_data *c, struct pkg
+ 
+ 	for (i = IDX_PKG_ENERGY; i < IDX_COUNT; i++) {
+ 		unsigned long long msr_cur, msr_last;
+-		int offset;
++		off_t offset;
+ 
+ 		if (!idx_valid(i))
+ 			continue;
+@@ -3281,7 +3285,8 @@ static int update_msr_sum(struct thread_data *t, struct core_data *c, struct pkg
+ 			continue;
+ 		ret = get_msr(cpu, offset, &msr_cur);
+ 		if (ret) {
+-			fprintf(outf, "Can not update msr(0x%x)\n", offset);
++			fprintf(outf, "Can not update msr(0x%llx)\n",
++				(unsigned long long)offset);
+ 			continue;
+ 		}
+ 
+diff --git a/tools/testing/selftests/arm64/mte/Makefile b/tools/testing/selftests/arm64/mte/Makefile
+index 0b3af552632a6..df15d44aeb8d4 100644
+--- a/tools/testing/selftests/arm64/mte/Makefile
++++ b/tools/testing/selftests/arm64/mte/Makefile
+@@ -6,9 +6,7 @@ SRCS := $(filter-out mte_common_util.c,$(wildcard *.c))
+ PROGS := $(patsubst %.c,%,$(SRCS))
+ 
+ #Add mte compiler option
+-ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep gcc),)
+ CFLAGS += -march=armv8.5-a+memtag
+-endif
+ 
+ #check if the compiler works well
+ mte_cc_support := $(shell if ($(CC) $(CFLAGS) -E -x c /dev/null -o /dev/null 2>&1) then echo "1"; fi)
+diff --git a/tools/testing/selftests/arm64/mte/mte_common_util.c b/tools/testing/selftests/arm64/mte/mte_common_util.c
+index 39f8908988eab..70665ba88cbb1 100644
+--- a/tools/testing/selftests/arm64/mte/mte_common_util.c
++++ b/tools/testing/selftests/arm64/mte/mte_common_util.c
+@@ -278,22 +278,13 @@ int mte_switch_mode(int mte_option, unsigned long incl_mask)
+ 	return 0;
+ }
+ 
+-#define ID_AA64PFR1_MTE_SHIFT		8
+-#define ID_AA64PFR1_MTE			2
+-
+ int mte_default_setup(void)
+ {
+-	unsigned long hwcaps = getauxval(AT_HWCAP);
++	unsigned long hwcaps2 = getauxval(AT_HWCAP2);
+ 	unsigned long en = 0;
+ 	int ret;
+ 
+-	if (!(hwcaps & HWCAP_CPUID)) {
+-		ksft_print_msg("FAIL: CPUID registers unavailable\n");
+-		return KSFT_FAIL;
+-	}
+-	/* Read ID_AA64PFR1_EL1 register */
+-	asm volatile("mrs %0, id_aa64pfr1_el1" : "=r"(hwcaps) : : "memory");
+-	if (((hwcaps >> ID_AA64PFR1_MTE_SHIFT) & MT_TAG_MASK) != ID_AA64PFR1_MTE) {
++	if (!(hwcaps2 & HWCAP2_MTE)) {
+ 		ksft_print_msg("FAIL: MTE features unavailable\n");
+ 		return KSFT_SKIP;
+ 	}
+diff --git a/tools/testing/selftests/resctrl/Makefile b/tools/testing/selftests/resctrl/Makefile
+index d585cc1948cc7..6bcee2ec91a9c 100644
+--- a/tools/testing/selftests/resctrl/Makefile
++++ b/tools/testing/selftests/resctrl/Makefile
+@@ -1,5 +1,5 @@
+ CC = $(CROSS_COMPILE)gcc
+-CFLAGS = -g -Wall
++CFLAGS = -g -Wall -O2 -D_FORTIFY_SOURCE=2
+ SRCS=$(wildcard *.c)
+ OBJS=$(SRCS:.c=.o)
+ 
+diff --git a/tools/testing/selftests/resctrl/cache.c b/tools/testing/selftests/resctrl/cache.c
+index 38dbf4962e333..5922cc1b03867 100644
+--- a/tools/testing/selftests/resctrl/cache.c
++++ b/tools/testing/selftests/resctrl/cache.c
+@@ -182,7 +182,7 @@ int measure_cache_vals(struct resctrl_val_param *param, int bm_pid)
+ 	/*
+ 	 * Measure cache miss from perf.
+ 	 */
+-	if (!strcmp(param->resctrl_val, "cat")) {
++	if (!strncmp(param->resctrl_val, CAT_STR, sizeof(CAT_STR))) {
+ 		ret = get_llc_perf(&llc_perf_miss);
+ 		if (ret < 0)
+ 			return ret;
+@@ -192,7 +192,7 @@ int measure_cache_vals(struct resctrl_val_param *param, int bm_pid)
+ 	/*
+ 	 * Measure llc occupancy from resctrl.
+ 	 */
+-	if (!strcmp(param->resctrl_val, "cqm")) {
++	if (!strncmp(param->resctrl_val, CQM_STR, sizeof(CQM_STR))) {
+ 		ret = get_llc_occu_resctrl(&llc_occu_resc);
+ 		if (ret < 0)
+ 			return ret;
+@@ -234,7 +234,7 @@ int cat_val(struct resctrl_val_param *param)
+ 	if (ret)
+ 		return ret;
+ 
+-	if ((strcmp(resctrl_val, "cat") == 0)) {
++	if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR))) {
+ 		ret = initialize_llc_perf();
+ 		if (ret)
+ 			return ret;
+@@ -242,7 +242,7 @@ int cat_val(struct resctrl_val_param *param)
+ 
+ 	/* Test runs until the callback setup() tells the test to stop. */
+ 	while (1) {
+-		if (strcmp(resctrl_val, "cat") == 0) {
++		if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR))) {
+ 			ret = param->setup(1, param);
+ 			if (ret) {
+ 				ret = 0;
+diff --git a/tools/testing/selftests/resctrl/cat_test.c b/tools/testing/selftests/resctrl/cat_test.c
+index 5da43767b9731..20823725daca5 100644
+--- a/tools/testing/selftests/resctrl/cat_test.c
++++ b/tools/testing/selftests/resctrl/cat_test.c
+@@ -17,10 +17,10 @@
+ #define MAX_DIFF_PERCENT	4
+ #define MAX_DIFF		1000000
+ 
+-int count_of_bits;
+-char cbm_mask[256];
+-unsigned long long_mask;
+-unsigned long cache_size;
++static int count_of_bits;
++static char cbm_mask[256];
++static unsigned long long_mask;
++static unsigned long cache_size;
+ 
+ /*
+  * Change schemata. Write schemata to specified
+@@ -136,7 +136,7 @@ int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
+ 		return -1;
+ 
+ 	/* Get default cbm mask for L3/L2 cache */
+-	ret = get_cbm_mask(cache_type);
++	ret = get_cbm_mask(cache_type, cbm_mask);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -164,7 +164,7 @@ int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
+ 		return -1;
+ 
+ 	struct resctrl_val_param param = {
+-		.resctrl_val	= "cat",
++		.resctrl_val	= CAT_STR,
+ 		.cpu_no		= cpu_no,
+ 		.mum_resctrlfs	= 0,
+ 		.setup		= cat_setup,
+diff --git a/tools/testing/selftests/resctrl/cqm_test.c b/tools/testing/selftests/resctrl/cqm_test.c
+index c8756152bd615..271752e9ef5be 100644
+--- a/tools/testing/selftests/resctrl/cqm_test.c
++++ b/tools/testing/selftests/resctrl/cqm_test.c
+@@ -16,10 +16,10 @@
+ #define MAX_DIFF		2000000
+ #define MAX_DIFF_PERCENT	15
+ 
+-int count_of_bits;
+-char cbm_mask[256];
+-unsigned long long_mask;
+-unsigned long cache_size;
++static int count_of_bits;
++static char cbm_mask[256];
++static unsigned long long_mask;
++static unsigned long cache_size;
+ 
+ static int cqm_setup(int num, ...)
+ {
+@@ -86,7 +86,7 @@ static int check_results(struct resctrl_val_param *param, int no_of_bits)
+ 		return errno;
+ 	}
+ 
+-	while (fgets(temp, 1024, fp)) {
++	while (fgets(temp, sizeof(temp), fp)) {
+ 		char *token = strtok(temp, ":\t");
+ 		int fields = 0;
+ 
+@@ -125,7 +125,7 @@ int cqm_resctrl_val(int cpu_no, int n, char **benchmark_cmd)
+ 	if (!validate_resctrl_feature_request("cqm"))
+ 		return -1;
+ 
+-	ret = get_cbm_mask("L3");
++	ret = get_cbm_mask("L3", cbm_mask);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -145,7 +145,7 @@ int cqm_resctrl_val(int cpu_no, int n, char **benchmark_cmd)
+ 	}
+ 
+ 	struct resctrl_val_param param = {
+-		.resctrl_val	= "cqm",
++		.resctrl_val	= CQM_STR,
+ 		.ctrlgrp	= "c1",
+ 		.mongrp		= "m1",
+ 		.cpu_no		= cpu_no,
+diff --git a/tools/testing/selftests/resctrl/fill_buf.c b/tools/testing/selftests/resctrl/fill_buf.c
+index 79c611c99a3dd..51e5cf22632f7 100644
+--- a/tools/testing/selftests/resctrl/fill_buf.c
++++ b/tools/testing/selftests/resctrl/fill_buf.c
+@@ -115,7 +115,7 @@ static int fill_cache_read(unsigned char *start_ptr, unsigned char *end_ptr,
+ 
+ 	while (1) {
+ 		ret = fill_one_span_read(start_ptr, end_ptr);
+-		if (!strcmp(resctrl_val, "cat"))
++		if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)))
+ 			break;
+ 	}
+ 
+@@ -134,7 +134,7 @@ static int fill_cache_write(unsigned char *start_ptr, unsigned char *end_ptr,
+ {
+ 	while (1) {
+ 		fill_one_span_write(start_ptr, end_ptr);
+-		if (!strcmp(resctrl_val, "cat"))
++		if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)))
+ 			break;
+ 	}
+ 
+diff --git a/tools/testing/selftests/resctrl/mba_test.c b/tools/testing/selftests/resctrl/mba_test.c
+index 7bf8eaa6204bf..6449fbd96096a 100644
+--- a/tools/testing/selftests/resctrl/mba_test.c
++++ b/tools/testing/selftests/resctrl/mba_test.c
+@@ -141,7 +141,7 @@ void mba_test_cleanup(void)
+ int mba_schemata_change(int cpu_no, char *bw_report, char **benchmark_cmd)
+ {
+ 	struct resctrl_val_param param = {
+-		.resctrl_val	= "mba",
++		.resctrl_val	= MBA_STR,
+ 		.ctrlgrp	= "c1",
+ 		.mongrp		= "m1",
+ 		.cpu_no		= cpu_no,
+diff --git a/tools/testing/selftests/resctrl/mbm_test.c b/tools/testing/selftests/resctrl/mbm_test.c
+index 4700f7453f811..ec6cfe01c9c26 100644
+--- a/tools/testing/selftests/resctrl/mbm_test.c
++++ b/tools/testing/selftests/resctrl/mbm_test.c
+@@ -114,7 +114,7 @@ void mbm_test_cleanup(void)
+ int mbm_bw_change(int span, int cpu_no, char *bw_report, char **benchmark_cmd)
+ {
+ 	struct resctrl_val_param param = {
+-		.resctrl_val	= "mbm",
++		.resctrl_val	= MBM_STR,
+ 		.ctrlgrp	= "c1",
+ 		.mongrp		= "m1",
+ 		.span		= span,
+diff --git a/tools/testing/selftests/resctrl/resctrl.h b/tools/testing/selftests/resctrl/resctrl.h
+index 39bf59c6b9c56..9dcc96e1ad3d7 100644
+--- a/tools/testing/selftests/resctrl/resctrl.h
++++ b/tools/testing/selftests/resctrl/resctrl.h
+@@ -28,6 +28,10 @@
+ #define RESCTRL_PATH		"/sys/fs/resctrl"
+ #define PHYS_ID_PATH		"/sys/devices/system/cpu/cpu"
+ #define CBM_MASK_PATH		"/sys/fs/resctrl/info"
++#define L3_PATH			"/sys/fs/resctrl/info/L3"
++#define MB_PATH			"/sys/fs/resctrl/info/MB"
++#define L3_MON_PATH		"/sys/fs/resctrl/info/L3_MON"
++#define L3_MON_FEATURES_PATH	"/sys/fs/resctrl/info/L3_MON/mon_features"
+ 
+ #define PARENT_EXIT(err_msg)			\
+ 	do {					\
+@@ -62,11 +66,16 @@ struct resctrl_val_param {
+ 	int		(*setup)(int num, ...);
+ };
+ 
+-pid_t bm_pid, ppid;
+-int tests_run;
++#define MBM_STR			"mbm"
++#define MBA_STR			"mba"
++#define CQM_STR			"cqm"
++#define CAT_STR			"cat"
+ 
+-char llc_occup_path[1024];
+-bool is_amd;
++extern pid_t bm_pid, ppid;
++extern int tests_run;
++
++extern char llc_occup_path[1024];
++extern bool is_amd;
+ 
+ bool check_resctrlfs_support(void);
+ int filter_dmesg(void);
+@@ -74,7 +83,7 @@ int remount_resctrlfs(bool mum_resctrlfs);
+ int get_resource_id(int cpu_no, int *resource_id);
+ int umount_resctrlfs(void);
+ int validate_bw_report_request(char *bw_report);
+-bool validate_resctrl_feature_request(char *resctrl_val);
++bool validate_resctrl_feature_request(const char *resctrl_val);
+ char *fgrep(FILE *inf, const char *str);
+ int taskset_benchmark(pid_t bm_pid, int cpu_no);
+ void run_benchmark(int signum, siginfo_t *info, void *ucontext);
+@@ -92,7 +101,7 @@ void tests_cleanup(void);
+ void mbm_test_cleanup(void);
+ int mba_schemata_change(int cpu_no, char *bw_report, char **benchmark_cmd);
+ void mba_test_cleanup(void);
+-int get_cbm_mask(char *cache_type);
++int get_cbm_mask(char *cache_type, char *cbm_mask);
+ int get_cache_size(int cpu_no, char *cache_type, unsigned long *cache_size);
+ void ctrlc_handler(int signum, siginfo_t *info, void *ptr);
+ int cat_val(struct resctrl_val_param *param);
+diff --git a/tools/testing/selftests/resctrl/resctrl_tests.c b/tools/testing/selftests/resctrl/resctrl_tests.c
+index 425cc85ac8836..ac2269610aa9d 100644
+--- a/tools/testing/selftests/resctrl/resctrl_tests.c
++++ b/tools/testing/selftests/resctrl/resctrl_tests.c
+@@ -73,7 +73,7 @@ int main(int argc, char **argv)
+ 		}
+ 	}
+ 
+-	while ((c = getopt(argc_new, argv, "ht:b:")) != -1) {
++	while ((c = getopt(argc_new, argv, "ht:b:n:p:")) != -1) {
+ 		char *token;
+ 
+ 		switch (c) {
+@@ -85,13 +85,13 @@ int main(int argc, char **argv)
+ 			cqm_test = false;
+ 			cat_test = false;
+ 			while (token) {
+-				if (!strcmp(token, "mbm")) {
++				if (!strncmp(token, MBM_STR, sizeof(MBM_STR))) {
+ 					mbm_test = true;
+-				} else if (!strcmp(token, "mba")) {
++				} else if (!strncmp(token, MBA_STR, sizeof(MBA_STR))) {
+ 					mba_test = true;
+-				} else if (!strcmp(token, "cqm")) {
++				} else if (!strncmp(token, CQM_STR, sizeof(CQM_STR))) {
+ 					cqm_test = true;
+-				} else if (!strcmp(token, "cat")) {
++				} else if (!strncmp(token, CAT_STR, sizeof(CAT_STR))) {
+ 					cat_test = true;
+ 				} else {
+ 					printf("invalid argument\n");
+@@ -161,7 +161,7 @@ int main(int argc, char **argv)
+ 	if (!is_amd && mbm_test) {
+ 		printf("# Starting MBM BW change ...\n");
+ 		if (!has_ben)
+-			sprintf(benchmark_cmd[5], "%s", "mba");
++			sprintf(benchmark_cmd[5], "%s", MBA_STR);
+ 		res = mbm_bw_change(span, cpu_no, bw_report, benchmark_cmd);
+ 		printf("%sok MBM: bw change\n", res ? "not " : "");
+ 		mbm_test_cleanup();
+@@ -181,7 +181,7 @@ int main(int argc, char **argv)
+ 	if (cqm_test) {
+ 		printf("# Starting CQM test ...\n");
+ 		if (!has_ben)
+-			sprintf(benchmark_cmd[5], "%s", "cqm");
++			sprintf(benchmark_cmd[5], "%s", CQM_STR);
+ 		res = cqm_resctrl_val(cpu_no, no_of_bits, benchmark_cmd);
+ 		printf("%sok CQM: test\n", res ? "not " : "");
+ 		cqm_test_cleanup();
+diff --git a/tools/testing/selftests/resctrl/resctrl_val.c b/tools/testing/selftests/resctrl/resctrl_val.c
+index 520fea3606d17..8df557894059a 100644
+--- a/tools/testing/selftests/resctrl/resctrl_val.c
++++ b/tools/testing/selftests/resctrl/resctrl_val.c
+@@ -221,8 +221,8 @@ static int read_from_imc_dir(char *imc_dir, int count)
+  */
+ static int num_of_imcs(void)
+ {
++	char imc_dir[512], *temp;
+ 	unsigned int count = 0;
+-	char imc_dir[512];
+ 	struct dirent *ep;
+ 	int ret;
+ 	DIR *dp;
+@@ -230,7 +230,25 @@ static int num_of_imcs(void)
+ 	dp = opendir(DYN_PMU_PATH);
+ 	if (dp) {
+ 		while ((ep = readdir(dp))) {
+-			if (strstr(ep->d_name, UNCORE_IMC)) {
++			temp = strstr(ep->d_name, UNCORE_IMC);
++			if (!temp)
++				continue;
++
++			/*
++			 * imc counters are named as "uncore_imc_<n>", hence
++			 * increment the pointer to point to <n>. Note that
++			 * sizeof(UNCORE_IMC) would count for null character as
++			 * well and hence the last underscore character in
++			 * uncore_imc'_' need not be counted.
++			 */
++			temp = temp + sizeof(UNCORE_IMC);
++
++			/*
++			 * Some directories under "DYN_PMU_PATH" could have
++			 * names like "uncore_imc_free_running", hence, check if
++			 * first character is a numerical digit or not.
++			 */
++			if (temp[0] >= '0' && temp[0] <= '9') {
+ 				sprintf(imc_dir, "%s/%s/", DYN_PMU_PATH,
+ 					ep->d_name);
+ 				ret = read_from_imc_dir(imc_dir, count);
+@@ -282,9 +300,9 @@ static int initialize_mem_bw_imc(void)
+  * Memory B/W utilized by a process on a socket can be calculated using
+  * iMC counters. Perf events are used to read these counters.
+  *
+- * Return: >= 0 on success. < 0 on failure.
++ * Return: = 0 on success. < 0 on failure.
+  */
+-static float get_mem_bw_imc(int cpu_no, char *bw_report)
++static int get_mem_bw_imc(int cpu_no, char *bw_report, float *bw_imc)
+ {
+ 	float reads, writes, of_mul_read, of_mul_write;
+ 	int imc, j, ret;
+@@ -355,13 +373,18 @@ static float get_mem_bw_imc(int cpu_no, char *bw_report)
+ 		close(imc_counters_config[imc][WRITE].fd);
+ 	}
+ 
+-	if (strcmp(bw_report, "reads") == 0)
+-		return reads;
++	if (strcmp(bw_report, "reads") == 0) {
++		*bw_imc = reads;
++		return 0;
++	}
+ 
+-	if (strcmp(bw_report, "writes") == 0)
+-		return writes;
++	if (strcmp(bw_report, "writes") == 0) {
++		*bw_imc = writes;
++		return 0;
++	}
+ 
+-	return (reads + writes);
++	*bw_imc = reads + writes;
++	return 0;
+ }
+ 
+ void set_mbm_path(const char *ctrlgrp, const char *mongrp, int resource_id)
+@@ -397,10 +420,10 @@ static void initialize_mem_bw_resctrl(const char *ctrlgrp, const char *mongrp,
+ 		return;
+ 	}
+ 
+-	if (strcmp(resctrl_val, "mbm") == 0)
++	if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)))
+ 		set_mbm_path(ctrlgrp, mongrp, resource_id);
+ 
+-	if ((strcmp(resctrl_val, "mba") == 0)) {
++	if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
+ 		if (ctrlgrp)
+ 			sprintf(mbm_total_path, CON_MBM_LOCAL_BYTES_PATH,
+ 				RESCTRL_PATH, ctrlgrp, resource_id);
+@@ -420,9 +443,8 @@ static void initialize_mem_bw_resctrl(const char *ctrlgrp, const char *mongrp,
+  * 1. If con_mon grp is given, then read from it
+  * 2. If con_mon grp is not given, then read from root con_mon grp
+  */
+-static unsigned long get_mem_bw_resctrl(void)
++static int get_mem_bw_resctrl(unsigned long *mbm_total)
+ {
+-	unsigned long mbm_total = 0;
+ 	FILE *fp;
+ 
+ 	fp = fopen(mbm_total_path, "r");
+@@ -431,7 +453,7 @@ static unsigned long get_mem_bw_resctrl(void)
+ 
+ 		return -1;
+ 	}
+-	if (fscanf(fp, "%lu", &mbm_total) <= 0) {
++	if (fscanf(fp, "%lu", mbm_total) <= 0) {
+ 		perror("Could not get mbm local bytes");
+ 		fclose(fp);
+ 
+@@ -439,7 +461,7 @@ static unsigned long get_mem_bw_resctrl(void)
+ 	}
+ 	fclose(fp);
+ 
+-	return mbm_total;
++	return 0;
+ }
+ 
+ pid_t bm_pid, ppid;
+@@ -524,14 +546,15 @@ static void initialize_llc_occu_resctrl(const char *ctrlgrp, const char *mongrp,
+ 		return;
+ 	}
+ 
+-	if (strcmp(resctrl_val, "cqm") == 0)
++	if (!strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR)))
+ 		set_cqm_path(ctrlgrp, mongrp, resource_id);
+ }
+ 
+ static int
+ measure_vals(struct resctrl_val_param *param, unsigned long *bw_resc_start)
+ {
+-	unsigned long bw_imc, bw_resc, bw_resc_end;
++	unsigned long bw_resc, bw_resc_end;
++	float bw_imc;
+ 	int ret;
+ 
+ 	/*
+@@ -541,13 +564,13 @@ measure_vals(struct resctrl_val_param *param, unsigned long *bw_resc_start)
+ 	 * Compare the two values to validate resctrl value.
+ 	 * It takes 1sec to measure the data.
+ 	 */
+-	bw_imc = get_mem_bw_imc(param->cpu_no, param->bw_report);
+-	if (bw_imc <= 0)
+-		return bw_imc;
++	ret = get_mem_bw_imc(param->cpu_no, param->bw_report, &bw_imc);
++	if (ret < 0)
++		return ret;
+ 
+-	bw_resc_end = get_mem_bw_resctrl();
+-	if (bw_resc_end <= 0)
+-		return bw_resc_end;
++	ret = get_mem_bw_resctrl(&bw_resc_end);
++	if (ret < 0)
++		return ret;
+ 
+ 	bw_resc = (bw_resc_end - *bw_resc_start) / MB;
+ 	ret = print_results_bw(param->filename, bm_pid, bw_imc, bw_resc);
+@@ -579,8 +602,8 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
+ 	if (strcmp(param->filename, "") == 0)
+ 		sprintf(param->filename, "stdio");
+ 
+-	if ((strcmp(resctrl_val, "mba")) == 0 ||
+-	    (strcmp(resctrl_val, "mbm")) == 0) {
++	if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR)) ||
++	    !strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR))) {
+ 		ret = validate_bw_report_request(param->bw_report);
+ 		if (ret)
+ 			return ret;
+@@ -674,15 +697,15 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
+ 	if (ret)
+ 		goto out;
+ 
+-	if ((strcmp(resctrl_val, "mbm") == 0) ||
+-	    (strcmp(resctrl_val, "mba") == 0)) {
++	if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
++	    !strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
+ 		ret = initialize_mem_bw_imc();
+ 		if (ret)
+ 			goto out;
+ 
+ 		initialize_mem_bw_resctrl(param->ctrlgrp, param->mongrp,
+ 					  param->cpu_no, resctrl_val);
+-	} else if (strcmp(resctrl_val, "cqm") == 0)
++	} else if (!strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR)))
+ 		initialize_llc_occu_resctrl(param->ctrlgrp, param->mongrp,
+ 					    param->cpu_no, resctrl_val);
+ 
+@@ -710,8 +733,8 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
+ 
+ 	/* Test runs until the callback setup() tells the test to stop. */
+ 	while (1) {
+-		if ((strcmp(resctrl_val, "mbm") == 0) ||
+-		    (strcmp(resctrl_val, "mba") == 0)) {
++		if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
++		    !strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
+ 			ret = param->setup(1, param);
+ 			if (ret) {
+ 				ret = 0;
+@@ -721,7 +744,7 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
+ 			ret = measure_vals(param, &bw_resc_start);
+ 			if (ret)
+ 				break;
+-		} else if (strcmp(resctrl_val, "cqm") == 0) {
++		} else if (!strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR))) {
+ 			ret = param->setup(1, param);
+ 			if (ret) {
+ 				ret = 0;
+diff --git a/tools/testing/selftests/resctrl/resctrlfs.c b/tools/testing/selftests/resctrl/resctrlfs.c
+index 19c0ec4045a40..b57170f53861d 100644
+--- a/tools/testing/selftests/resctrl/resctrlfs.c
++++ b/tools/testing/selftests/resctrl/resctrlfs.c
+@@ -49,8 +49,6 @@ static int find_resctrl_mount(char *buffer)
+ 	return -ENOENT;
+ }
+ 
+-char cbm_mask[256];
+-
+ /*
+  * remount_resctrlfs - Remount resctrl FS at /sys/fs/resctrl
+  * @mum_resctrlfs:	Should the resctrl FS be remounted?
+@@ -205,16 +203,18 @@ int get_cache_size(int cpu_no, char *cache_type, unsigned long *cache_size)
+ /*
+  * get_cbm_mask - Get cbm mask for given cache
+  * @cache_type:	Cache level L2/L3
+- *
+- * Mask is stored in cbm_mask which is global variable.
++ * @cbm_mask:	cbm_mask returned as a string
+  *
+  * Return: = 0 on success, < 0 on failure.
+  */
+-int get_cbm_mask(char *cache_type)
++int get_cbm_mask(char *cache_type, char *cbm_mask)
+ {
+ 	char cbm_mask_path[1024];
+ 	FILE *fp;
+ 
++	if (!cbm_mask)
++		return -1;
++
+ 	sprintf(cbm_mask_path, "%s/%s/cbm_mask", CBM_MASK_PATH, cache_type);
+ 
+ 	fp = fopen(cbm_mask_path, "r");
+@@ -334,7 +334,7 @@ void run_benchmark(int signum, siginfo_t *info, void *ucontext)
+ 		operation = atoi(benchmark_cmd[4]);
+ 		sprintf(resctrl_val, "%s", benchmark_cmd[5]);
+ 
+-		if (strcmp(resctrl_val, "cqm") != 0)
++		if (strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR)))
+ 			buffer_span = span * MB;
+ 		else
+ 			buffer_span = span;
+@@ -459,8 +459,8 @@ int write_bm_pid_to_resctrl(pid_t bm_pid, char *ctrlgrp, char *mongrp,
+ 		goto out;
+ 
+ 	/* Create mon grp and write pid into it for "mbm" and "cqm" test */
+-	if ((strcmp(resctrl_val, "cqm") == 0) ||
+-	    (strcmp(resctrl_val, "mbm") == 0)) {
++	if (!strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR)) ||
++	    !strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR))) {
+ 		if (strlen(mongrp)) {
+ 			sprintf(monitorgroup_p, "%s/mon_groups", controlgroup);
+ 			sprintf(monitorgroup, "%s/%s", monitorgroup_p, mongrp);
+@@ -505,9 +505,9 @@ int write_schemata(char *ctrlgrp, char *schemata, int cpu_no, char *resctrl_val)
+ 	int resource_id, ret = 0;
+ 	FILE *fp;
+ 
+-	if ((strcmp(resctrl_val, "mba") != 0) &&
+-	    (strcmp(resctrl_val, "cat") != 0) &&
+-	    (strcmp(resctrl_val, "cqm") != 0))
++	if (strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR)) &&
++	    strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)) &&
++	    strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR)))
+ 		return -ENOENT;
+ 
+ 	if (!schemata) {
+@@ -528,9 +528,10 @@ int write_schemata(char *ctrlgrp, char *schemata, int cpu_no, char *resctrl_val)
+ 	else
+ 		sprintf(controlgroup, "%s/schemata", RESCTRL_PATH);
+ 
+-	if (!strcmp(resctrl_val, "cat") || !strcmp(resctrl_val, "cqm"))
++	if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)) ||
++	    !strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR)))
+ 		sprintf(schema, "%s%d%c%s", "L3:", resource_id, '=', schemata);
+-	if (strcmp(resctrl_val, "mba") == 0)
++	if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR)))
+ 		sprintf(schema, "%s%d%c%s", "MB:", resource_id, '=', schemata);
+ 
+ 	fp = fopen(controlgroup, "w");
+@@ -615,26 +616,56 @@ char *fgrep(FILE *inf, const char *str)
+  * validate_resctrl_feature_request - Check if requested feature is valid.
+  * @resctrl_val:	Requested feature
+  *
+- * Return: 0 on success, non-zero on failure
++ * Return: True if the feature is supported, else false
+  */
+-bool validate_resctrl_feature_request(char *resctrl_val)
++bool validate_resctrl_feature_request(const char *resctrl_val)
+ {
+-	FILE *inf = fopen("/proc/cpuinfo", "r");
++	struct stat statbuf;
+ 	bool found = false;
+ 	char *res;
++	FILE *inf;
+ 
+-	if (!inf)
++	if (!resctrl_val)
+ 		return false;
+ 
+-	res = fgrep(inf, "flags");
+-
+-	if (res) {
+-		char *s = strchr(res, ':');
++	if (remount_resctrlfs(false))
++		return false;
+ 
+-		found = s && !strstr(s, resctrl_val);
+-		free(res);
++	if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR))) {
++		if (!stat(L3_PATH, &statbuf))
++			return true;
++	} else if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
++		if (!stat(MB_PATH, &statbuf))
++			return true;
++	} else if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
++		   !strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR))) {
++		if (!stat(L3_MON_PATH, &statbuf)) {
++			inf = fopen(L3_MON_FEATURES_PATH, "r");
++			if (!inf)
++				return false;
++
++			if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR))) {
++				res = fgrep(inf, "llc_occupancy");
++				if (res) {
++					found = true;
++					free(res);
++				}
++			}
++
++			if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR))) {
++				res = fgrep(inf, "mbm_total_bytes");
++				if (res) {
++					free(res);
++					res = fgrep(inf, "mbm_local_bytes");
++					if (res) {
++						found = true;
++						free(res);
++					}
++				}
++			}
++			fclose(inf);
++		}
+ 	}
+-	fclose(inf);
+ 
+ 	return found;
+ }


^ permalink raw reply related	[flat|nested] 33+ messages in thread
* [gentoo-commits] proj/linux-patches:5.12 commit in: /
@ 2021-05-07 13:15 Alice Ferrazzi
  0 siblings, 0 replies; 33+ messages in thread
From: Alice Ferrazzi @ 2021-05-07 13:15 UTC (permalink / raw
  To: gentoo-commits

commit:     233a290d2b20c960c3b618131e019e0dbec7ec6a
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Fri May  7 13:14:44 2021 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Fri May  7 13:14:53 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=233a290d

Linux patch 5.12.2

Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>

 0000_README             |   4 +
 1001_linux-5.12.2.patch | 505 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 509 insertions(+)

diff --git a/0000_README b/0000_README
index 113ce8a..d1c77ca 100644
--- a/0000_README
+++ b/0000_README
@@ -47,6 +47,10 @@ Patch:  1000_linux-5.12.1.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.12.1
 
+Patch:  1001_linux-5.12.2.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.12.2
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1001_linux-5.12.2.patch b/1001_linux-5.12.2.patch
new file mode 100644
index 0000000..70dc5ca
--- /dev/null
+++ b/1001_linux-5.12.2.patch
@@ -0,0 +1,505 @@
+diff --git a/Makefile b/Makefile
+index 78b0941f0de40..5077411a1874b 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 12
+-SUBLEVEL = 1
++SUBLEVEL = 2
+ EXTRAVERSION =
+ NAME = Frozen Wasteland
+ 
+diff --git a/arch/mips/include/asm/vdso/gettimeofday.h b/arch/mips/include/asm/vdso/gettimeofday.h
+index 2203e2d0ae2ad..44a45f3fa4b01 100644
+--- a/arch/mips/include/asm/vdso/gettimeofday.h
++++ b/arch/mips/include/asm/vdso/gettimeofday.h
+@@ -20,6 +20,12 @@
+ 
+ #define VDSO_HAS_CLOCK_GETRES		1
+ 
++#if MIPS_ISA_REV < 6
++#define VDSO_SYSCALL_CLOBBERS "hi", "lo",
++#else
++#define VDSO_SYSCALL_CLOBBERS
++#endif
++
+ static __always_inline long gettimeofday_fallback(
+ 				struct __kernel_old_timeval *_tv,
+ 				struct timezone *_tz)
+@@ -35,7 +41,9 @@ static __always_inline long gettimeofday_fallback(
+ 	: "=r" (ret), "=r" (error)
+ 	: "r" (tv), "r" (tz), "r" (nr)
+ 	: "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
+-	  "$14", "$15", "$24", "$25", "hi", "lo", "memory");
++	  "$14", "$15", "$24", "$25",
++	  VDSO_SYSCALL_CLOBBERS
++	  "memory");
+ 
+ 	return error ? -ret : ret;
+ }
+@@ -59,7 +67,9 @@ static __always_inline long clock_gettime_fallback(
+ 	: "=r" (ret), "=r" (error)
+ 	: "r" (clkid), "r" (ts), "r" (nr)
+ 	: "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
+-	  "$14", "$15", "$24", "$25", "hi", "lo", "memory");
++	  "$14", "$15", "$24", "$25",
++	  VDSO_SYSCALL_CLOBBERS
++	  "memory");
+ 
+ 	return error ? -ret : ret;
+ }
+@@ -83,7 +93,9 @@ static __always_inline int clock_getres_fallback(
+ 	: "=r" (ret), "=r" (error)
+ 	: "r" (clkid), "r" (ts), "r" (nr)
+ 	: "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
+-	  "$14", "$15", "$24", "$25", "hi", "lo", "memory");
++	  "$14", "$15", "$24", "$25",
++	  VDSO_SYSCALL_CLOBBERS
++	  "memory");
+ 
+ 	return error ? -ret : ret;
+ }
+@@ -105,7 +117,9 @@ static __always_inline long clock_gettime32_fallback(
+ 	: "=r" (ret), "=r" (error)
+ 	: "r" (clkid), "r" (ts), "r" (nr)
+ 	: "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
+-	  "$14", "$15", "$24", "$25", "hi", "lo", "memory");
++	  "$14", "$15", "$24", "$25",
++	  VDSO_SYSCALL_CLOBBERS
++	  "memory");
+ 
+ 	return error ? -ret : ret;
+ }
+@@ -125,7 +139,9 @@ static __always_inline int clock_getres32_fallback(
+ 	: "=r" (ret), "=r" (error)
+ 	: "r" (clkid), "r" (ts), "r" (nr)
+ 	: "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
+-	  "$14", "$15", "$24", "$25", "hi", "lo", "memory");
++	  "$14", "$15", "$24", "$25",
++	  VDSO_SYSCALL_CLOBBERS
++	  "memory");
+ 
+ 	return error ? -ret : ret;
+ }
+diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
+index 8e9cb44e66e53..4ecb813c9bc7d 100644
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -1049,6 +1049,8 @@ static void intel_shutdown_encoders(struct drm_i915_private *dev_priv)
+ void i915_driver_shutdown(struct drm_i915_private *i915)
+ {
+ 	disable_rpm_wakeref_asserts(&i915->runtime_pm);
++	intel_runtime_pm_disable(&i915->runtime_pm);
++	intel_power_domains_disable(i915);
+ 
+ 	i915_gem_suspend(i915);
+ 
+@@ -1064,7 +1066,15 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
+ 	intel_suspend_encoders(i915);
+ 	intel_shutdown_encoders(i915);
+ 
++	/*
++	 * The only requirement is to reboot with display DC states disabled,
++	 * for now leaving all display power wells in the INIT power domain
++	 * enabled matching the driver reload sequence.
++	 */
++	intel_power_domains_driver_remove(i915);
+ 	enable_rpm_wakeref_asserts(&i915->runtime_pm);
++
++	intel_runtime_pm_driver_release(&i915->runtime_pm);
+ }
+ 
+ static bool suspend_to_idle(struct drm_i915_private *dev_priv)
+diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
+index d650b39b6e5dd..c1316718304d0 100644
+--- a/drivers/net/usb/ax88179_178a.c
++++ b/drivers/net/usb/ax88179_178a.c
+@@ -296,12 +296,12 @@ static int ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ 	int ret;
+ 
+ 	if (2 == size) {
+-		u16 buf;
++		u16 buf = 0;
+ 		ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 0);
+ 		le16_to_cpus(&buf);
+ 		*((u16 *)data) = buf;
+ 	} else if (4 == size) {
+-		u32 buf;
++		u32 buf = 0;
+ 		ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 0);
+ 		le32_to_cpus(&buf);
+ 		*((u32 *)data) = buf;
+@@ -1296,6 +1296,8 @@ static void ax88179_get_mac_addr(struct usbnet *dev)
+ {
+ 	u8 mac[ETH_ALEN];
+ 
++	memset(mac, 0, sizeof(mac));
++
+ 	/* Maybe the boot loader passed the MAC address via device tree */
+ 	if (!eth_platform_get_mac_address(&dev->udev->dev, mac)) {
+ 		netif_dbg(dev, ifup, dev->net,
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index 0d9e2ddbf904d..61f1c91c62de2 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -6260,6 +6260,7 @@ enum thermal_access_mode {
+ enum { /* TPACPI_THERMAL_TPEC_* */
+ 	TP_EC_THERMAL_TMP0 = 0x78,	/* ACPI EC regs TMP 0..7 */
+ 	TP_EC_THERMAL_TMP8 = 0xC0,	/* ACPI EC regs TMP 8..15 */
++	TP_EC_FUNCREV      = 0xEF,      /* ACPI EC Functional revision */
+ 	TP_EC_THERMAL_TMP_NA = -128,	/* ACPI EC sensor not available */
+ 
+ 	TPACPI_THERMAL_SENSOR_NA = -128000, /* Sensor not available */
+@@ -6458,7 +6459,7 @@ static const struct attribute_group thermal_temp_input8_group = {
+ 
+ static int __init thermal_init(struct ibm_init_struct *iibm)
+ {
+-	u8 t, ta1, ta2;
++	u8 t, ta1, ta2, ver = 0;
+ 	int i;
+ 	int acpi_tmp7;
+ 	int res;
+@@ -6473,7 +6474,14 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
+ 		 * 0x78-0x7F, 0xC0-0xC7.  Registers return 0x00 for
+ 		 * non-implemented, thermal sensors return 0x80 when
+ 		 * not available
++		 * The above rule is unfortunately flawed. This has been seen with
++		 * 0xC2 (power supply ID) causing thermal control problems.
++		 * The EC version can be determined by offset 0xEF and at least for
++		 * version 3 the Lenovo firmware team confirmed that registers 0xC0-0xC7
++		 * are not thermal registers.
+ 		 */
++		if (!acpi_ec_read(TP_EC_FUNCREV, &ver))
++			pr_warn("Thinkpad ACPI EC unable to access EC version\n");
+ 
+ 		ta1 = ta2 = 0;
+ 		for (i = 0; i < 8; i++) {
+@@ -6483,11 +6491,13 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
+ 				ta1 = 0;
+ 				break;
+ 			}
+-			if (acpi_ec_read(TP_EC_THERMAL_TMP8 + i, &t)) {
+-				ta2 |= t;
+-			} else {
+-				ta1 = 0;
+-				break;
++			if (ver < 3) {
++				if (acpi_ec_read(TP_EC_THERMAL_TMP8 + i, &t)) {
++					ta2 |= t;
++				} else {
++					ta1 = 0;
++					break;
++				}
+ 			}
+ 		}
+ 		if (ta1 == 0) {
+@@ -6500,9 +6510,12 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
+ 				thermal_read_mode = TPACPI_THERMAL_NONE;
+ 			}
+ 		} else {
+-			thermal_read_mode =
+-			    (ta2 != 0) ?
+-			    TPACPI_THERMAL_TPEC_16 : TPACPI_THERMAL_TPEC_8;
++			if (ver >= 3)
++				thermal_read_mode = TPACPI_THERMAL_TPEC_8;
++			else
++				thermal_read_mode =
++					(ta2 != 0) ?
++					TPACPI_THERMAL_TPEC_16 : TPACPI_THERMAL_TPEC_8;
+ 		}
+ 	} else if (acpi_tmp7) {
+ 		if (tpacpi_is_ibm() &&
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 76ac5d6555ae4..21e7522655ac9 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -406,6 +406,7 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 
+ 	/* Realtek hub in Dell WD19 (Type-C) */
+ 	{ USB_DEVICE(0x0bda, 0x0487), .driver_info = USB_QUIRK_NO_LPM },
++	{ USB_DEVICE(0x0bda, 0x5487), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
+ 	/* Generic RTL8153 based ethernet adapters */
+ 	{ USB_DEVICE(0x0bda, 0x8153), .driver_info = USB_QUIRK_NO_LPM },
+@@ -438,6 +439,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	{ USB_DEVICE(0x17ef, 0xa012), .driver_info =
+ 			USB_QUIRK_DISCONNECT_SUSPEND },
+ 
++	/* Lenovo ThinkPad USB-C Dock Gen2 Ethernet (RTL8153 GigE) */
++	{ USB_DEVICE(0x17ef, 0xa387), .driver_info = USB_QUIRK_NO_LPM },
++
+ 	/* BUILDWIN Photo Frame */
+ 	{ USB_DEVICE(0x1908, 0x1315), .driver_info =
+ 			USB_QUIRK_HONOR_BNUMINTERFACES },
+diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
+index 3fe05fb5d1459..71e264e2f16b6 100644
+--- a/fs/overlayfs/namei.c
++++ b/fs/overlayfs/namei.c
+@@ -919,6 +919,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
+ 			continue;
+ 
+ 		if ((uppermetacopy || d.metacopy) && !ofs->config.metacopy) {
++			dput(this);
+ 			err = -EPERM;
+ 			pr_warn_ratelimited("refusing to follow metacopy origin for (%pd2)\n", dentry);
+ 			goto out_put;
+diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
+index fdd72f1a9c5e0..8cf3433350296 100644
+--- a/fs/overlayfs/super.c
++++ b/fs/overlayfs/super.c
+@@ -1826,7 +1826,8 @@ out_err:
+  * - upper/work dir of any overlayfs instance
+  */
+ static int ovl_check_layer(struct super_block *sb, struct ovl_fs *ofs,
+-			   struct dentry *dentry, const char *name)
++			   struct dentry *dentry, const char *name,
++			   bool is_lower)
+ {
+ 	struct dentry *next = dentry, *parent;
+ 	int err = 0;
+@@ -1838,7 +1839,7 @@ static int ovl_check_layer(struct super_block *sb, struct ovl_fs *ofs,
+ 
+ 	/* Walk back ancestors to root (inclusive) looking for traps */
+ 	while (!err && parent != next) {
+-		if (ovl_lookup_trap_inode(sb, parent)) {
++		if (is_lower && ovl_lookup_trap_inode(sb, parent)) {
+ 			err = -ELOOP;
+ 			pr_err("overlapping %s path\n", name);
+ 		} else if (ovl_is_inuse(parent)) {
+@@ -1864,7 +1865,7 @@ static int ovl_check_overlapping_layers(struct super_block *sb,
+ 
+ 	if (ovl_upper_mnt(ofs)) {
+ 		err = ovl_check_layer(sb, ofs, ovl_upper_mnt(ofs)->mnt_root,
+-				      "upperdir");
++				      "upperdir", false);
+ 		if (err)
+ 			return err;
+ 
+@@ -1875,7 +1876,8 @@ static int ovl_check_overlapping_layers(struct super_block *sb,
+ 		 * workbasedir.  In that case, we already have their traps in
+ 		 * inode cache and we will catch that case on lookup.
+ 		 */
+-		err = ovl_check_layer(sb, ofs, ofs->workbasedir, "workdir");
++		err = ovl_check_layer(sb, ofs, ofs->workbasedir, "workdir",
++				      false);
+ 		if (err)
+ 			return err;
+ 	}
+@@ -1883,7 +1885,7 @@ static int ovl_check_overlapping_layers(struct super_block *sb,
+ 	for (i = 1; i < ofs->numlayer; i++) {
+ 		err = ovl_check_layer(sb, ofs,
+ 				      ofs->layers[i].mnt->mnt_root,
+-				      "lowerdir");
++				      "lowerdir", true);
+ 		if (err)
+ 			return err;
+ 	}
+diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
+index 971b33aca13d0..99bc82342ca02 100644
+--- a/include/linux/bpf_verifier.h
++++ b/include/linux/bpf_verifier.h
+@@ -299,10 +299,11 @@ struct bpf_verifier_state_list {
+ };
+ 
+ /* Possible states for alu_state member. */
+-#define BPF_ALU_SANITIZE_SRC		1U
+-#define BPF_ALU_SANITIZE_DST		2U
++#define BPF_ALU_SANITIZE_SRC		(1U << 0)
++#define BPF_ALU_SANITIZE_DST		(1U << 1)
+ #define BPF_ALU_NEG_VALUE		(1U << 2)
+ #define BPF_ALU_NON_POINTER		(1U << 3)
++#define BPF_ALU_IMMEDIATE		(1U << 4)
+ #define BPF_ALU_SANITIZE		(BPF_ALU_SANITIZE_SRC | \
+ 					 BPF_ALU_SANITIZE_DST)
+ 
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 0399ac092b363..a2ed7a7e27e2b 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -5952,6 +5952,7 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
+ {
+ 	struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : tmp_aux;
+ 	struct bpf_verifier_state *vstate = env->cur_state;
++	bool off_is_imm = tnum_is_const(off_reg->var_off);
+ 	bool off_is_neg = off_reg->smin_value < 0;
+ 	bool ptr_is_dst_reg = ptr_reg == dst_reg;
+ 	u8 opcode = BPF_OP(insn->code);
+@@ -5982,6 +5983,7 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
+ 		alu_limit = abs(tmp_aux->alu_limit - alu_limit);
+ 	} else {
+ 		alu_state  = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
++		alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0;
+ 		alu_state |= ptr_is_dst_reg ?
+ 			     BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
+ 	}
+@@ -11740,7 +11742,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
+ 			const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
+ 			struct bpf_insn insn_buf[16];
+ 			struct bpf_insn *patch = &insn_buf[0];
+-			bool issrc, isneg;
++			bool issrc, isneg, isimm;
+ 			u32 off_reg;
+ 
+ 			aux = &env->insn_aux_data[i + delta];
+@@ -11751,28 +11753,29 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
+ 			isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
+ 			issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
+ 				BPF_ALU_SANITIZE_SRC;
++			isimm = aux->alu_state & BPF_ALU_IMMEDIATE;
+ 
+ 			off_reg = issrc ? insn->src_reg : insn->dst_reg;
+-			if (isneg)
+-				*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
+-			*patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
+-			*patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
+-			*patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
+-			*patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
+-			*patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
+-			if (issrc) {
+-				*patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX,
+-							 off_reg);
+-				insn->src_reg = BPF_REG_AX;
++			if (isimm) {
++				*patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
+ 			} else {
+-				*patch++ = BPF_ALU64_REG(BPF_AND, off_reg,
+-							 BPF_REG_AX);
++				if (isneg)
++					*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
++				*patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
++				*patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
++				*patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
++				*patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
++				*patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
++				*patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg);
+ 			}
++			if (!issrc)
++				*patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg);
++			insn->src_reg = BPF_REG_AX;
+ 			if (isneg)
+ 				insn->code = insn->code == code_add ?
+ 					     code_sub : code_add;
+ 			*patch++ = *insn;
+-			if (issrc && isneg)
++			if (issrc && isneg && !isimm)
+ 				*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
+ 			cnt = patch - insn_buf;
+ 
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 03db40f6cba90..43ceb8dae2644 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -11829,12 +11829,12 @@ SYSCALL_DEFINE5(perf_event_open,
+ 			return err;
+ 	}
+ 
+-	err = security_locked_down(LOCKDOWN_PERF);
+-	if (err && (attr.sample_type & PERF_SAMPLE_REGS_INTR))
+-		/* REGS_INTR can leak data, lockdown must prevent this */
+-		return err;
+-
+-	err = 0;
++	/* REGS_INTR can leak data, lockdown must prevent this */
++	if (attr.sample_type & PERF_SAMPLE_REGS_INTR) {
++		err = security_locked_down(LOCKDOWN_PERF);
++		if (err)
++			return err;
++	}
+ 
+ 	/*
+ 	 * In cgroup mode, the pid argument is used to pass the fd
+diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
+index c6c0cb4656645..313d1c8ff066a 100644
+--- a/net/netfilter/nf_conntrack_standalone.c
++++ b/net/netfilter/nf_conntrack_standalone.c
+@@ -1060,16 +1060,10 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
+ 	nf_conntrack_standalone_init_dccp_sysctl(net, table);
+ 	nf_conntrack_standalone_init_gre_sysctl(net, table);
+ 
+-	/* Don't allow unprivileged users to alter certain sysctls */
+-	if (net->user_ns != &init_user_ns) {
++	/* Don't allow non-init_net ns to alter global sysctls */
++	if (!net_eq(&init_net, net)) {
+ 		table[NF_SYSCTL_CT_MAX].mode = 0444;
+ 		table[NF_SYSCTL_CT_EXPECT_MAX].mode = 0444;
+-		table[NF_SYSCTL_CT_HELPER].mode = 0444;
+-#ifdef CONFIG_NF_CONNTRACK_EVENTS
+-		table[NF_SYSCTL_CT_EVENTS].mode = 0444;
+-#endif
+-		table[NF_SYSCTL_CT_BUCKETS].mode = 0444;
+-	} else if (!net_eq(&init_net, net)) {
+ 		table[NF_SYSCTL_CT_BUCKETS].mode = 0444;
+ 	}
+ 
+diff --git a/net/qrtr/mhi.c b/net/qrtr/mhi.c
+index 2bf2b1943e61b..fa611678af052 100644
+--- a/net/qrtr/mhi.c
++++ b/net/qrtr/mhi.c
+@@ -50,6 +50,9 @@ static int qcom_mhi_qrtr_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
+ 	struct qrtr_mhi_dev *qdev = container_of(ep, struct qrtr_mhi_dev, ep);
+ 	int rc;
+ 
++	if (skb->sk)
++		sock_hold(skb->sk);
++
+ 	rc = skb_linearize(skb);
+ 	if (rc)
+ 		goto free_skb;
+@@ -59,12 +62,11 @@ static int qcom_mhi_qrtr_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
+ 	if (rc)
+ 		goto free_skb;
+ 
+-	if (skb->sk)
+-		sock_hold(skb->sk);
+-
+ 	return rc;
+ 
+ free_skb:
++	if (skb->sk)
++		sock_put(skb->sk);
+ 	kfree_skb(skb);
+ 
+ 	return rc;
+diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
+index 102d53515a76f..933586a895e7a 100644
+--- a/sound/usb/endpoint.c
++++ b/sound/usb/endpoint.c
+@@ -1442,11 +1442,11 @@ void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep)
+ 	if (snd_BUG_ON(!atomic_read(&ep->running)))
+ 		return;
+ 
+-	if (ep->sync_source)
+-		WRITE_ONCE(ep->sync_source->sync_sink, NULL);
+-
+-	if (!atomic_dec_return(&ep->running))
++	if (!atomic_dec_return(&ep->running)) {
++		if (ep->sync_source)
++			WRITE_ONCE(ep->sync_source->sync_sink, NULL);
+ 		stop_urbs(ep, false);
++	}
+ }
+ 
+ /**
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index 1165a5ac60f22..48facd2626585 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -2376,6 +2376,16 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ 	}
+ },
+ 
++{
++	USB_DEVICE_VENDOR_SPEC(0x0944, 0x0204),
++	.driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++		.vendor_name = "KORG, Inc.",
++		/* .product_name = "ToneLab EX", */
++		.ifnum = 3,
++		.type = QUIRK_MIDI_STANDARD_INTERFACE,
++	}
++},
++
+ /* AKAI devices */
+ {
+ 	USB_DEVICE(0x09e8, 0x0062),


^ permalink raw reply related	[flat|nested] 33+ messages in thread
* [gentoo-commits] proj/linux-patches:5.12 commit in: /
@ 2021-05-02 16:05 Mike Pagano
  0 siblings, 0 replies; 33+ messages in thread
From: Mike Pagano @ 2021-05-02 16:05 UTC (permalink / raw
  To: gentoo-commits

commit:     ac6dac81eca1361cb1a545ae72eedd8bc4dde1b0
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun May  2 16:05:10 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun May  2 16:05:10 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ac6dac81

Linux patch 5.12.1

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |   4 +
 1000_linux-5.12.1.patch | 209 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 213 insertions(+)

diff --git a/0000_README b/0000_README
index b249357..113ce8a 100644
--- a/0000_README
+++ b/0000_README
@@ -43,6 +43,10 @@ EXPERIMENTAL
 Individual Patch Descriptions:
 --------------------------------------------------------------------------
 
+Patch:  1000_linux-5.12.1.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.12.1
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1000_linux-5.12.1.patch b/1000_linux-5.12.1.patch
new file mode 100644
index 0000000..b4a5f96
--- /dev/null
+++ b/1000_linux-5.12.1.patch
@@ -0,0 +1,209 @@
+diff --git a/Makefile b/Makefile
+index 3a10a8e08b6d5..78b0941f0de40 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 12
+-SUBLEVEL = 0
++SUBLEVEL = 1
+ EXTRAVERSION =
+ NAME = Frozen Wasteland
+ 
+diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
+index 14be76d4c2e61..cb34925e10f15 100644
+--- a/drivers/misc/mei/hw-me-regs.h
++++ b/drivers/misc/mei/hw-me-regs.h
+@@ -105,6 +105,7 @@
+ 
+ #define MEI_DEV_ID_ADP_S      0x7AE8  /* Alder Lake Point S */
+ #define MEI_DEV_ID_ADP_LP     0x7A60  /* Alder Lake Point LP */
++#define MEI_DEV_ID_ADP_P      0x51E0  /* Alder Lake Point P */
+ 
+ /*
+  * MEI HW Section
+diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
+index a7e179626b635..c3393b383e598 100644
+--- a/drivers/misc/mei/pci-me.c
++++ b/drivers/misc/mei/pci-me.c
+@@ -111,6 +111,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
+ 
+ 	{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_S, MEI_ME_PCH15_CFG)},
+ 	{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_LP, MEI_ME_PCH15_CFG)},
++	{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)},
+ 
+ 	/* required last entry */
+ 	{0, }
+diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
+index 9bc58e64b5b7a..3ef4b2841402c 100644
+--- a/drivers/net/usb/hso.c
++++ b/drivers/net/usb/hso.c
+@@ -3104,7 +3104,7 @@ static void hso_free_interface(struct usb_interface *interface)
+ 			cancel_work_sync(&serial_table[i]->async_put_intf);
+ 			cancel_work_sync(&serial_table[i]->async_get_intf);
+ 			hso_serial_tty_unregister(serial);
+-			kref_put(&serial_table[i]->ref, hso_serial_ref_free);
++			kref_put(&serial->parent->ref, hso_serial_ref_free);
+ 		}
+ 	}
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+index 4456abb9a0742..34bde8c873244 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+@@ -40,6 +40,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
+ 	const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
+ 	u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
+ 	struct iwl_tfh_tfd *tfd;
++	unsigned long flags;
+ 
+ 	copy_size = sizeof(struct iwl_cmd_header_wide);
+ 	cmd_size = sizeof(struct iwl_cmd_header_wide);
+@@ -108,14 +109,14 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
+ 		goto free_dup_buf;
+ 	}
+ 
+-	spin_lock_bh(&txq->lock);
++	spin_lock_irqsave(&txq->lock, flags);
+ 
+ 	idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
+ 	tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr);
+ 	memset(tfd, 0, sizeof(*tfd));
+ 
+ 	if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
+-		spin_unlock_bh(&txq->lock);
++		spin_unlock_irqrestore(&txq->lock, flags);
+ 
+ 		IWL_ERR(trans, "No space in command queue\n");
+ 		iwl_op_mode_cmd_queue_full(trans->op_mode);
+@@ -250,7 +251,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
+ 	spin_unlock(&trans_pcie->reg_lock);
+ 
+ out:
+-	spin_unlock_bh(&txq->lock);
++	spin_unlock_irqrestore(&txq->lock, flags);
+ free_dup_buf:
+ 	if (idx < 0)
+ 		kfree(dup_buf);
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 3fda1ec961d7d..f5886c512fec1 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1634,12 +1634,13 @@ static int acm_resume(struct usb_interface *intf)
+ 	struct urb *urb;
+ 	int rv = 0;
+ 
+-	acm_unpoison_urbs(acm);
+ 	spin_lock_irq(&acm->write_lock);
+ 
+ 	if (--acm->susp_count)
+ 		goto out;
+ 
++	acm_unpoison_urbs(acm);
++
+ 	if (tty_port_initialized(&acm->port)) {
+ 		rv = usb_submit_urb(acm->ctrlurb, GFP_ATOMIC);
+ 
+diff --git a/net/wireless/core.c b/net/wireless/core.c
+index a2785379df6e5..589ee5a69a2e5 100644
+--- a/net/wireless/core.c
++++ b/net/wireless/core.c
+@@ -332,14 +332,29 @@ static void cfg80211_event_work(struct work_struct *work)
+ void cfg80211_destroy_ifaces(struct cfg80211_registered_device *rdev)
+ {
+ 	struct wireless_dev *wdev, *tmp;
++	bool found = false;
+ 
+ 	ASSERT_RTNL();
+-	lockdep_assert_wiphy(&rdev->wiphy);
+ 
++	list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
++		if (wdev->nl_owner_dead) {
++			if (wdev->netdev)
++				dev_close(wdev->netdev);
++			found = true;
++		}
++	}
++
++	if (!found)
++		return;
++
++	wiphy_lock(&rdev->wiphy);
+ 	list_for_each_entry_safe(wdev, tmp, &rdev->wiphy.wdev_list, list) {
+-		if (wdev->nl_owner_dead)
++		if (wdev->nl_owner_dead) {
++			cfg80211_leave(rdev, wdev);
+ 			rdev_del_virtual_intf(rdev, wdev);
++		}
+ 	}
++	wiphy_unlock(&rdev->wiphy);
+ }
+ 
+ static void cfg80211_destroy_iface_wk(struct work_struct *work)
+@@ -350,9 +365,7 @@ static void cfg80211_destroy_iface_wk(struct work_struct *work)
+ 			    destroy_work);
+ 
+ 	rtnl_lock();
+-	wiphy_lock(&rdev->wiphy);
+ 	cfg80211_destroy_ifaces(rdev);
+-	wiphy_unlock(&rdev->wiphy);
+ 	rtnl_unlock();
+ }
+ 
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index b1df42e4f1eb9..a5224da638328 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -3929,7 +3929,7 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
+ 	return err;
+ }
+ 
+-static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
++static int _nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
+ {
+ 	struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ 	struct vif_params params;
+@@ -3938,9 +3938,6 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
+ 	int err;
+ 	enum nl80211_iftype type = NL80211_IFTYPE_UNSPECIFIED;
+ 
+-	/* to avoid failing a new interface creation due to pending removal */
+-	cfg80211_destroy_ifaces(rdev);
+-
+ 	memset(&params, 0, sizeof(params));
+ 
+ 	if (!info->attrs[NL80211_ATTR_IFNAME])
+@@ -4028,6 +4025,21 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
+ 	return genlmsg_reply(msg, info);
+ }
+ 
++static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
++{
++	struct cfg80211_registered_device *rdev = info->user_ptr[0];
++	int ret;
++
++	/* to avoid failing a new interface creation due to pending removal */
++	cfg80211_destroy_ifaces(rdev);
++
++	wiphy_lock(&rdev->wiphy);
++	ret = _nl80211_new_interface(skb, info);
++	wiphy_unlock(&rdev->wiphy);
++
++	return ret;
++}
++
+ static int nl80211_del_interface(struct sk_buff *skb, struct genl_info *info)
+ {
+ 	struct cfg80211_registered_device *rdev = info->user_ptr[0];
+@@ -15040,7 +15052,9 @@ static const struct genl_small_ops nl80211_small_ops[] = {
+ 		.doit = nl80211_new_interface,
+ 		.flags = GENL_UNS_ADMIN_PERM,
+ 		.internal_flags = NL80211_FLAG_NEED_WIPHY |
+-				  NL80211_FLAG_NEED_RTNL,
++				  NL80211_FLAG_NEED_RTNL |
++				  /* we take the wiphy mutex later ourselves */
++				  NL80211_FLAG_NO_WIPHY_MTX,
+ 	},
+ 	{
+ 		.cmd = NL80211_CMD_DEL_INTERFACE,


^ permalink raw reply related	[flat|nested] 33+ messages in thread
* [gentoo-commits] proj/linux-patches:5.12 commit in: /
@ 2021-04-30 18:53 Mike Pagano
  0 siblings, 0 replies; 33+ messages in thread
From: Mike Pagano @ 2021-04-30 18:53 UTC (permalink / raw
  To: gentoo-commits

commit:     41c96f4a008ca0c9fcf5c12c877a77b7108ee903
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Apr 30 18:53:01 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Apr 30 18:53:01 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=41c96f4a

Rename cpu opt patch to standardize on naming format

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                                           | 4 ++--
 ...s-for-gcc10.patch => 5010_enable-cpu-optimizations-universal.patch | 0
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/0000_README b/0000_README
index bbb058a..b249357 100644
--- a/0000_README
+++ b/0000_README
@@ -63,9 +63,9 @@ Patch:  4567_distro-Gentoo-Kconfig.patch
 From:   Tom Wijsman <TomWij@gentoo.org>
 Desc:   Add Gentoo Linux support config settings and defaults.
 
-Patch:  5013_enable-cpu-optimizations-for-gcc10.patch
+Patch:  5010_enable-cpu-optimizations-universal.patch
 From:   https://github.com/graysky2/kernel_gcc_patch/
-Desc:   Kernel patch enables gcc = v10.1+ optimizations for additional CPUs.
+Desc:   Kernel >= 5.8 patch enables gcc = v9+ optimizations for additional CPUs.
 
 Patch:  5020_BMQ-and-PDS-io-scheduler-v5.12-r0.patch
 From:   https://gitlab.com/alfredchen/linux-prjc

diff --git a/5013_enable-cpu-optimizations-for-gcc10.patch b/5010_enable-cpu-optimizations-universal.patch
similarity index 100%
rename from 5013_enable-cpu-optimizations-for-gcc10.patch
rename to 5010_enable-cpu-optimizations-universal.patch


^ permalink raw reply related	[flat|nested] 33+ messages in thread
* [gentoo-commits] proj/linux-patches:5.12 commit in: /
@ 2021-04-27 11:53 Mike Pagano
  0 siblings, 0 replies; 33+ messages in thread
From: Mike Pagano @ 2021-04-27 11:53 UTC (permalink / raw
  To: gentoo-commits

commit:     0e42ec88e771cb237508891316a7db9d70cf0ea5
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Apr 27 11:52:32 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Apr 27 11:52:32 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0e42ec88

Add BMQ Patch 5.12-r0, default ALT SCHED=n

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                     |    8 +
 5020_BMQ-and-PDS-io-scheduler-v5.12-r0.patch    | 9805 +++++++++++++++++++++++
 5021_BMQ-and-PDS-gentoo-defaults-v5.12-r0.patch |   13 +
 3 files changed, 9826 insertions(+)

diff --git a/0000_README b/0000_README
index e856f62..bbb058a 100644
--- a/0000_README
+++ b/0000_README
@@ -66,3 +66,11 @@ Desc:   Add Gentoo Linux support config settings and defaults.
 Patch:  5013_enable-cpu-optimizations-for-gcc10.patch
 From:   https://github.com/graysky2/kernel_gcc_patch/
 Desc:   Kernel patch enables gcc = v10.1+ optimizations for additional CPUs.
+
+Patch:  5020_BMQ-and-PDS-io-scheduler-v5.12-r0.patch
+From:   https://gitlab.com/alfredchen/linux-prjc
+Desc:   BMQ(BitMap Queue) Scheduler. A new CPU scheduler developed from PDS(incld). Inspired by the scheduler in zircon.
+
+Patch:  5021_BMQ-and-PDS-gentoo-defaults-v5.12-r0.patch
+From:   https://gitweb.gentoo.org/proj/linux-patches.git/
+Desc:   Set defaults for BMQ. Add archs as people test, default to N

diff --git a/5020_BMQ-and-PDS-io-scheduler-v5.12-r0.patch b/5020_BMQ-and-PDS-io-scheduler-v5.12-r0.patch
new file mode 100644
index 0000000..81f4c55
--- /dev/null
+++ b/5020_BMQ-and-PDS-io-scheduler-v5.12-r0.patch
@@ -0,0 +1,9805 @@
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index 04545725f187..d560166b0cf1 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -4725,6 +4725,12 @@
+ 
+ 	sbni=		[NET] Granch SBNI12 leased line adapter
+ 
++	sched_timeslice=
++			[KNL] Time slice in us for BMQ/PDS scheduler.
++			Format: <int> (must be >= 1000)
++			Default: 4000
++			See Documentation/scheduler/sched-BMQ.txt
++
+ 	sched_debug	[KNL] Enables verbose scheduler debug messages.
+ 
+ 	schedstats=	[KNL,X86] Enable or disable scheduled statistics.
+diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
+index 1d56a6b73a4e..e08ffb857277 100644
+--- a/Documentation/admin-guide/sysctl/kernel.rst
++++ b/Documentation/admin-guide/sysctl/kernel.rst
+@@ -1515,3 +1515,13 @@ is 10 seconds.
+ 
+ The softlockup threshold is (``2 * watchdog_thresh``). Setting this
+ tunable to zero will disable lockup detection altogether.
++
++yield_type:
++===========
++
++BMQ/PDS CPU scheduler only. This determines what type of yield calls
++to sched_yield will perform.
++
++  0 - No yield.
++  1 - Deboost and requeue task. (default)
++  2 - Set run queue skip task.
+diff --git a/Documentation/scheduler/sched-BMQ.txt b/Documentation/scheduler/sched-BMQ.txt
+new file mode 100644
+index 000000000000..05c84eec0f31
+--- /dev/null
++++ b/Documentation/scheduler/sched-BMQ.txt
+@@ -0,0 +1,110 @@
++                         BitMap queue CPU Scheduler
++                         --------------------------
++
++CONTENT
++========
++
++ Background
++ Design
++   Overview
++   Task policy
++   Priority management
++   BitMap Queue
++   CPU Assignment and Migration
++
++
++Background
++==========
++
++BitMap Queue CPU scheduler, referred to as BMQ from here on, is an evolution
++of previous Priority and Deadline based Skiplist multiple queue scheduler(PDS),
++and inspired by Zircon scheduler. The goal of it is to keep the scheduler code
++simple, while efficiency and scalable for interactive tasks, such as desktop,
++movie playback and gaming etc.
++
++Design
++======
++
++Overview
++--------
++
++BMQ use per CPU run queue design, each CPU(logical) has it's own run queue,
++each CPU is responsible for scheduling the tasks that are putting into it's
++run queue.
++
++The run queue is a set of priority queues. Note that these queues are fifo
++queue for non-rt tasks or priority queue for rt tasks in data structure. See
++BitMap Queue below for details. BMQ is optimized for non-rt tasks in the fact
++that most applications are non-rt tasks. No matter the queue is fifo or
++priority, In each queue is an ordered list of runnable tasks awaiting execution
++and the data structures are the same. When it is time for a new task to run,
++the scheduler simply looks the lowest numbered queueue that contains a task,
++and runs the first task from the head of that queue. And per CPU idle task is
++also in the run queue, so the scheduler can always find a task to run on from
++its run queue.
++
++Each task will assigned the same timeslice(default 4ms) when it is picked to
++start running. Task will be reinserted at the end of the appropriate priority
++queue when it uses its whole timeslice. When the scheduler selects a new task
++from the priority queue it sets the CPU's preemption timer for the remainder of
++the previous timeslice. When that timer fires the scheduler will stop execution
++on that task, select another task and start over again.
++
++If a task blocks waiting for a shared resource then it's taken out of its
++priority queue and is placed in a wait queue for the shared resource. When it
++is unblocked it will be reinserted in the appropriate priority queue of an
++eligible CPU.
++
++Task policy
++-----------
++
++BMQ supports DEADLINE, FIFO, RR, NORMAL, BATCH and IDLE task policy like the
++mainline CFS scheduler. But BMQ is heavy optimized for non-rt task, that's
++NORMAL/BATCH/IDLE policy tasks. Below is the implementation detail of each
++policy.
++
++DEADLINE
++	It is squashed as priority 0 FIFO task.
++
++FIFO/RR
++	All RT tasks share one single priority queue in BMQ run queue designed. The
++complexity of insert operation is O(n). BMQ is not designed for system runs
++with major rt policy tasks.
++
++NORMAL/BATCH/IDLE
++	BATCH and IDLE tasks are treated as the same policy. They compete CPU with
++NORMAL policy tasks, but they just don't boost. To control the priority of
++NORMAL/BATCH/IDLE tasks, simply use nice level.
++
++ISO
++	ISO policy is not supported in BMQ. Please use nice level -20 NORMAL policy
++task instead.
++
++Priority management
++-------------------
++
++RT tasks have priority from 0-99. For non-rt tasks, there are three different
++factors used to determine the effective priority of a task. The effective
++priority being what is used to determine which queue it will be in.
++
++The first factor is simply the task’s static priority. Which is assigned from
++task's nice level, within [-20, 19] in userland's point of view and [0, 39]
++internally.
++
++The second factor is the priority boost. This is a value bounded between
++[-MAX_PRIORITY_ADJ, MAX_PRIORITY_ADJ] used to offset the base priority, it is
++modified by the following cases:
++
++*When a thread has used up its entire timeslice, always deboost its boost by
++increasing by one.
++*When a thread gives up cpu control(voluntary or non-voluntary) to reschedule,
++and its switch-in time(time after last switch and run) below the thredhold
++based on its priority boost, will boost its boost by decreasing by one buti is
++capped at 0 (won’t go negative).
++
++The intent in this system is to ensure that interactive threads are serviced
++quickly. These are usually the threads that interact directly with the user
++and cause user-perceivable latency. These threads usually do little work and
++spend most of their time blocked awaiting another user event. So they get the
++priority boost from unblocking while background threads that do most of the
++processing receive the priority penalty for using their entire timeslice.
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index 3851bfcdba56..732636ac3fd3 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -476,7 +476,7 @@ static int proc_pid_schedstat(struct seq_file *m, struct pid_namespace *ns,
+ 		seq_puts(m, "0 0 0\n");
+ 	else
+ 		seq_printf(m, "%llu %llu %lu\n",
+-		   (unsigned long long)task->se.sum_exec_runtime,
++		   (unsigned long long)tsk_seruntime(task),
+ 		   (unsigned long long)task->sched_info.run_delay,
+ 		   task->sched_info.pcount);
+ 
+diff --git a/include/asm-generic/resource.h b/include/asm-generic/resource.h
+index 8874f681b056..59eb72bf7d5f 100644
+--- a/include/asm-generic/resource.h
++++ b/include/asm-generic/resource.h
+@@ -23,7 +23,7 @@
+ 	[RLIMIT_LOCKS]		= {  RLIM_INFINITY,  RLIM_INFINITY },	\
+ 	[RLIMIT_SIGPENDING]	= { 		0,	       0 },	\
+ 	[RLIMIT_MSGQUEUE]	= {   MQ_BYTES_MAX,   MQ_BYTES_MAX },	\
+-	[RLIMIT_NICE]		= { 0, 0 },				\
++	[RLIMIT_NICE]		= { 30, 30 },				\
+ 	[RLIMIT_RTPRIO]		= { 0, 0 },				\
+ 	[RLIMIT_RTTIME]		= {  RLIM_INFINITY,  RLIM_INFINITY },	\
+ }
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index ef00bb22164c..2290806d8af2 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -35,6 +35,7 @@
+ #include <linux/rseq.h>
+ #include <linux/seqlock.h>
+ #include <linux/kcsan.h>
++#include <linux/skip_list.h>
+ #include <asm/kmap_size.h>
+ 
+ /* task_struct member predeclarations (sorted alphabetically): */
+@@ -670,12 +671,18 @@ struct task_struct {
+ 	unsigned int			ptrace;
+ 
+ #ifdef CONFIG_SMP
+-	int				on_cpu;
+ 	struct __call_single_node	wake_entry;
++#endif
++#if defined(CONFIG_SMP) || defined(CONFIG_SCHED_ALT)
++	int				on_cpu;
++#endif
++
++#ifdef CONFIG_SMP
+ #ifdef CONFIG_THREAD_INFO_IN_TASK
+ 	/* Current CPU: */
+ 	unsigned int			cpu;
+ #endif
++#ifndef CONFIG_SCHED_ALT
+ 	unsigned int			wakee_flips;
+ 	unsigned long			wakee_flip_decay_ts;
+ 	struct task_struct		*last_wakee;
+@@ -689,6 +696,7 @@ struct task_struct {
+ 	 */
+ 	int				recent_used_cpu;
+ 	int				wake_cpu;
++#endif /* !CONFIG_SCHED_ALT */
+ #endif
+ 	int				on_rq;
+ 
+@@ -697,13 +705,33 @@ struct task_struct {
+ 	int				normal_prio;
+ 	unsigned int			rt_priority;
+ 
++#ifdef CONFIG_SCHED_ALT
++	u64				last_ran;
++	s64				time_slice;
++#ifdef CONFIG_SCHED_BMQ
++	int				boost_prio;
++	int				bmq_idx;
++	struct list_head		bmq_node;
++#endif /* CONFIG_SCHED_BMQ */
++#ifdef CONFIG_SCHED_PDS
++	u64				deadline;
++	u64				priodl;
++	/* skip list level */
++	int				sl_level;
++	/* skip list node */
++	struct skiplist_node		sl_node;
++#endif /* CONFIG_SCHED_PDS */
++	/* sched_clock time spent running */
++	u64				sched_time;
++#else /* !CONFIG_SCHED_ALT */
+ 	const struct sched_class	*sched_class;
+ 	struct sched_entity		se;
+ 	struct sched_rt_entity		rt;
++	struct sched_dl_entity		dl;
++#endif
+ #ifdef CONFIG_CGROUP_SCHED
+ 	struct task_group		*sched_task_group;
+ #endif
+-	struct sched_dl_entity		dl;
+ 
+ #ifdef CONFIG_UCLAMP_TASK
+ 	/*
+@@ -1388,6 +1416,15 @@ struct task_struct {
+ 	 */
+ };
+ 
++#ifdef CONFIG_SCHED_ALT
++#define tsk_seruntime(t)		((t)->sched_time)
++/* replace the uncertian rt_timeout with 0UL */
++#define tsk_rttimeout(t)		(0UL)
++#else /* CFS */
++#define tsk_seruntime(t)	((t)->se.sum_exec_runtime)
++#define tsk_rttimeout(t)	((t)->rt.timeout)
++#endif /* !CONFIG_SCHED_ALT */
++
+ static inline struct pid *task_pid(struct task_struct *task)
+ {
+ 	return task->thread_pid;
+diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h
+index 1aff00b65f3c..179d77c8360e 100644
+--- a/include/linux/sched/deadline.h
++++ b/include/linux/sched/deadline.h
+@@ -1,5 +1,24 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+ 
++#ifdef CONFIG_SCHED_ALT
++
++static inline int dl_task(struct task_struct *p)
++{
++	return 0;
++}
++
++#ifdef CONFIG_SCHED_BMQ
++#define __tsk_deadline(p)	(0UL)
++#endif
++
++#ifdef CONFIG_SCHED_PDS
++#define __tsk_deadline(p)	((p)->priodl)
++#endif
++
++#else
++
++#define __tsk_deadline(p)	((p)->dl.deadline)
++
+ /*
+  * SCHED_DEADLINE tasks has negative priorities, reflecting
+  * the fact that any of them has higher prio than RT and
+@@ -19,6 +38,7 @@ static inline int dl_task(struct task_struct *p)
+ {
+ 	return dl_prio(p->prio);
+ }
++#endif /* CONFIG_SCHED_ALT */
+ 
+ static inline bool dl_time_before(u64 a, u64 b)
+ {
+diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h
+index ab83d85e1183..4d4f92bffeea 100644
+--- a/include/linux/sched/prio.h
++++ b/include/linux/sched/prio.h
+@@ -18,6 +18,14 @@
+ #define MAX_PRIO		(MAX_RT_PRIO + NICE_WIDTH)
+ #define DEFAULT_PRIO		(MAX_RT_PRIO + NICE_WIDTH / 2)
+ 
++/* +/- priority levels from the base priority */
++#ifdef CONFIG_SCHED_BMQ
++#define MAX_PRIORITY_ADJ	7
++#endif
++#ifdef CONFIG_SCHED_PDS
++#define MAX_PRIORITY_ADJ	0
++#endif
++
+ /*
+  * Convert user-nice values [ -20 ... 0 ... 19 ]
+  * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
+diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
+index e5af028c08b4..0a7565d0d3cf 100644
+--- a/include/linux/sched/rt.h
++++ b/include/linux/sched/rt.h
+@@ -24,8 +24,10 @@ static inline bool task_is_realtime(struct task_struct *tsk)
+ 
+ 	if (policy == SCHED_FIFO || policy == SCHED_RR)
+ 		return true;
++#ifndef CONFIG_SCHED_ALT
+ 	if (policy == SCHED_DEADLINE)
+ 		return true;
++#endif
+ 	return false;
+ }
+ 
+diff --git a/include/linux/skip_list.h b/include/linux/skip_list.h
+new file mode 100644
+index 000000000000..637c83ecbd6b
+--- /dev/null
++++ b/include/linux/skip_list.h
+@@ -0,0 +1,175 @@
++/*
++ * Copyright (C) 2016 Alfred Chen.
++ *
++ * Code based on Con Kolivas's skip list implementation for BFS, and
++ * which is based on example originally by William Pugh.
++ *
++ * Skip Lists are a probabilistic alternative to balanced trees, as
++ * described in the June 1990 issue of CACM and were invented by
++ * William Pugh in 1987.
++ *
++ * A couple of comments about this implementation:
++ *
++ * This file only provides a infrastructure of skip list.
++ *
++ * skiplist_node is embedded into container data structure, to get rid
++ * the dependency of kmalloc/kfree operation in scheduler code.
++ *
++ * A customized search function should be defined using DEFINE_SKIPLIST_INSERT
++ * macro and be used for skip list insert operation.
++ *
++ * Random Level is also not defined in this file, instead, it should be
++ * customized implemented and set to node->level then pass to the customized
++ * skiplist_insert function.
++ *
++ * Levels start at zero and go up to (NUM_SKIPLIST_LEVEL -1)
++ *
++ * NUM_SKIPLIST_LEVEL in this implementation is 8 instead of origin 16,
++ * considering that there will be 256 entries to enable the top level when using
++ * random level p=0.5, and that number is more than enough for a run queue usage
++ * in a scheduler usage. And it also help to reduce the memory usage of the
++ * embedded skip list node in task_struct to about 50%.
++ *
++ * The insertion routine has been implemented so as to use the
++ * dirty hack described in the CACM paper: if a random level is
++ * generated that is more than the current maximum level, the
++ * current maximum level plus one is used instead.
++ *
++ * BFS Notes: In this implementation of skiplists, there are bidirectional
++ * next/prev pointers and the insert function returns a pointer to the actual
++ * node the value is stored. The key here is chosen by the scheduler so as to
++ * sort tasks according to the priority list requirements and is no longer used
++ * by the scheduler after insertion. The scheduler lookup, however, occurs in
++ * O(1) time because it is always the first item in the level 0 linked list.
++ * Since the task struct stores a copy of the node pointer upon skiplist_insert,
++ * it can also remove it much faster than the original implementation with the
++ * aid of prev<->next pointer manipulation and no searching.
++ */
++#ifndef _LINUX_SKIP_LIST_H
++#define _LINUX_SKIP_LIST_H
++
++#include <linux/kernel.h>
++
++#define NUM_SKIPLIST_LEVEL (4)
++
++struct skiplist_node {
++	int level;	/* Levels in this node */
++	struct skiplist_node *next[NUM_SKIPLIST_LEVEL];
++	struct skiplist_node *prev[NUM_SKIPLIST_LEVEL];
++};
++
++#define SKIPLIST_NODE_INIT(name) { 0,\
++				   {&name, &name, &name, &name},\
++				   {&name, &name, &name, &name},\
++				 }
++
++/**
++ * INIT_SKIPLIST_NODE -- init a skiplist_node, expecially for header
++ * @node: the skip list node to be inited.
++ */
++static inline void INIT_SKIPLIST_NODE(struct skiplist_node *node)
++{
++	int i;
++
++	node->level = 0;
++	for (i = 0; i < NUM_SKIPLIST_LEVEL; i++) {
++		WRITE_ONCE(node->next[i], node);
++		node->prev[i] = node;
++	}
++}
++
++/**
++ * skiplist_entry - get the struct for this entry
++ * @ptr: the &struct skiplist_node pointer.
++ * @type:       the type of the struct this is embedded in.
++ * @member:     the name of the skiplist_node within the struct.
++ */
++#define skiplist_entry(ptr, type, member) \
++	container_of(ptr, type, member)
++
++/**
++ * DEFINE_SKIPLIST_INSERT_FUNC -- macro to define a customized skip list insert
++ * function, which takes two parameters, first one is the header node of the
++ * skip list, second one is the skip list node to be inserted
++ * @func_name: the customized skip list insert function name
++ * @search_func: the search function to be used, which takes two parameters,
++ * 1st one is the itrator of skiplist_node in the list, the 2nd is the skip list
++ * node to be inserted, the function should return true if search should be
++ * continued, otherwise return false.
++ * Returns 1 if @node is inserted as the first item of skip list at level zero,
++ * otherwise 0
++ */
++#define DEFINE_SKIPLIST_INSERT_FUNC(func_name, search_func)\
++static inline int func_name(struct skiplist_node *head, struct skiplist_node *node)\
++{\
++	struct skiplist_node *p, *q;\
++	unsigned int k = head->level;\
++	unsigned int l = node->level;\
++\
++	p = head;\
++	if (l > k) {\
++		l = node->level = ++head->level;\
++\
++		node->next[l] = head;\
++		node->prev[l] = head;\
++		head->next[l] = node;\
++		head->prev[l] = node;\
++\
++		do {\
++			while (q = p->next[k], q != head && search_func(q, node))\
++				p = q;\
++\
++			node->prev[k] = p;\
++			node->next[k] = q;\
++			q->prev[k] = node;\
++			p->next[k] = node;\
++		} while (k--);\
++\
++		return (p == head);\
++	}\
++\
++	while (k > l) {\
++		while (q = p->next[k], q != head && search_func(q, node))\
++			p = q;\
++		k--;\
++	}\
++\
++	do {\
++		while (q = p->next[k], q != head && search_func(q, node))\
++			p = q;\
++\
++		node->prev[k] = p;\
++		node->next[k] = q;\
++		q->prev[k] = node;\
++		p->next[k] = node;\
++	} while (k--);\
++\
++	return (p == head);\
++}
++
++/**
++ * skiplist_del_init -- delete skip list node from a skip list and reset it's
++ * init state
++ * @head: the header node of the skip list to be deleted from.
++ * @node: the skip list node to be deleted, the caller need to ensure @node is
++ * in skip list which @head represent.
++ * Returns 1 if @node is the first item of skip level at level zero, otherwise 0
++ */
++static inline int
++skiplist_del_init(struct skiplist_node *head, struct skiplist_node *node)
++{
++	unsigned int i, level = node->level;
++
++	for (i = 0; i <= level; i++) {
++		node->prev[i]->next[i] = node->next[i];
++		node->next[i]->prev[i] = node->prev[i];
++	}
++	if (level == head->level && level) {
++		while (head->next[level] == head && level)
++			level--;
++		head->level = level;
++	}
++
++	return (node->prev[0] == head);
++}
++#endif /* _LINUX_SKIP_LIST_H */
+diff --git a/init/Kconfig b/init/Kconfig
+index 5f5c776ef192..2529408ce0b5 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -779,9 +779,39 @@ config GENERIC_SCHED_CLOCK
+ 
+ menu "Scheduler features"
+ 
++menuconfig SCHED_ALT
++	bool "Alternative CPU Schedulers"
++	default y
++	help
++	  This feature enable alternative CPU scheduler"
++
++if SCHED_ALT
++
++choice
++	prompt "Alternative CPU Scheduler"
++	default SCHED_BMQ
++
++config SCHED_BMQ
++	bool "BMQ CPU scheduler"
++	help
++	  The BitMap Queue CPU scheduler for excellent interactivity and
++	  responsiveness on the desktop and solid scalability on normal
++	  hardware and commodity servers.
++
++config SCHED_PDS
++	bool "PDS CPU scheduler"
++	help
++	  The Priority and Deadline based Skip list multiple queue CPU
++	  Scheduler.
++
++endchoice
++
++endif
++
+ config UCLAMP_TASK
+ 	bool "Enable utilization clamping for RT/FAIR tasks"
+ 	depends on CPU_FREQ_GOV_SCHEDUTIL
++	depends on !SCHED_ALT
+ 	help
+ 	  This feature enables the scheduler to track the clamped utilization
+ 	  of each CPU based on RUNNABLE tasks scheduled on that CPU.
+@@ -867,6 +897,7 @@ config NUMA_BALANCING
+ 	depends on ARCH_SUPPORTS_NUMA_BALANCING
+ 	depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY
+ 	depends on SMP && NUMA && MIGRATION
++	depends on !SCHED_ALT
+ 	help
+ 	  This option adds support for automatic NUMA aware memory/task placement.
+ 	  The mechanism is quite primitive and is based on migrating memory when
+@@ -959,6 +990,7 @@ config FAIR_GROUP_SCHED
+ 	depends on CGROUP_SCHED
+ 	default CGROUP_SCHED
+ 
++if !SCHED_ALT
+ config CFS_BANDWIDTH
+ 	bool "CPU bandwidth provisioning for FAIR_GROUP_SCHED"
+ 	depends on FAIR_GROUP_SCHED
+@@ -981,6 +1013,7 @@ config RT_GROUP_SCHED
+ 	  realtime bandwidth for them.
+ 	  See Documentation/scheduler/sched-rt-group.rst for more information.
+ 
++endif #!SCHED_ALT
+ endif #CGROUP_SCHED
+ 
+ config UCLAMP_TASK_GROUP
+@@ -1210,6 +1243,7 @@ config CHECKPOINT_RESTORE
+ 
+ config SCHED_AUTOGROUP
+ 	bool "Automatic process group scheduling"
++	depends on !SCHED_ALT
+ 	select CGROUPS
+ 	select CGROUP_SCHED
+ 	select FAIR_GROUP_SCHED
+diff --git a/init/init_task.c b/init/init_task.c
+index 3711cdaafed2..47f57d2cc488 100644
+--- a/init/init_task.c
++++ b/init/init_task.c
+@@ -75,9 +75,20 @@ struct task_struct init_task
+ 	.stack		= init_stack,
+ 	.usage		= REFCOUNT_INIT(2),
+ 	.flags		= PF_KTHREAD,
++#ifdef CONFIG_SCHED_BMQ
++	.prio		= DEFAULT_PRIO + MAX_PRIORITY_ADJ,
++	.static_prio	= DEFAULT_PRIO,
++	.normal_prio	= DEFAULT_PRIO + MAX_PRIORITY_ADJ,
++#endif
++#ifdef CONFIG_SCHED_PDS
++	.prio		= MAX_RT_PRIO,
++	.static_prio	= DEFAULT_PRIO,
++	.normal_prio	= MAX_RT_PRIO,
++#else
+ 	.prio		= MAX_PRIO - 20,
+ 	.static_prio	= MAX_PRIO - 20,
+ 	.normal_prio	= MAX_PRIO - 20,
++#endif
+ 	.policy		= SCHED_NORMAL,
+ 	.cpus_ptr	= &init_task.cpus_mask,
+ 	.cpus_mask	= CPU_MASK_ALL,
+@@ -87,6 +98,19 @@ struct task_struct init_task
+ 	.restart_block	= {
+ 		.fn = do_no_restart_syscall,
+ 	},
++#ifdef CONFIG_SCHED_ALT
++#ifdef CONFIG_SCHED_BMQ
++	.boost_prio	= 0,
++	.bmq_idx	= 15,
++	.bmq_node	= LIST_HEAD_INIT(init_task.bmq_node),
++#endif
++#ifdef CONFIG_SCHED_PDS
++	.deadline	= 0,
++	.sl_level	= 0,
++	.sl_node	= SKIPLIST_NODE_INIT(init_task.sl_node),
++#endif
++	.time_slice	= HZ,
++#else
+ 	.se		= {
+ 		.group_node 	= LIST_HEAD_INIT(init_task.se.group_node),
+ 	},
+@@ -94,6 +118,7 @@ struct task_struct init_task
+ 		.run_list	= LIST_HEAD_INIT(init_task.rt.run_list),
+ 		.time_slice	= RR_TIMESLICE,
+ 	},
++#endif
+ 	.tasks		= LIST_HEAD_INIT(init_task.tasks),
+ #ifdef CONFIG_SMP
+ 	.pushable_tasks	= PLIST_NODE_INIT(init_task.pushable_tasks, MAX_PRIO),
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index 5258b68153e0..3eb670b1bb76 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -636,7 +636,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
+ 	return ret;
+ }
+ 
+-#ifdef CONFIG_SMP
++#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_ALT)
+ /*
+  * Helper routine for generate_sched_domains().
+  * Do cpusets a, b have overlapping effective cpus_allowed masks?
+@@ -1032,7 +1032,7 @@ static void rebuild_sched_domains_locked(void)
+ 	/* Have scheduler rebuild the domains */
+ 	partition_and_rebuild_sched_domains(ndoms, doms, attr);
+ }
+-#else /* !CONFIG_SMP */
++#else /* !CONFIG_SMP || CONFIG_SCHED_ALT */
+ static void rebuild_sched_domains_locked(void)
+ {
+ }
+diff --git a/kernel/delayacct.c b/kernel/delayacct.c
+index 27725754ac99..769d773c7182 100644
+--- a/kernel/delayacct.c
++++ b/kernel/delayacct.c
+@@ -106,7 +106,7 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
+ 	 */
+ 	t1 = tsk->sched_info.pcount;
+ 	t2 = tsk->sched_info.run_delay;
+-	t3 = tsk->se.sum_exec_runtime;
++	t3 = tsk_seruntime(tsk);
+ 
+ 	d->cpu_count += t1;
+ 
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 04029e35e69a..5ee0dc0b9175 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -122,7 +122,7 @@ static void __exit_signal(struct task_struct *tsk)
+ 			sig->curr_target = next_thread(tsk);
+ 	}
+ 
+-	add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
++	add_device_randomness((const void*) &tsk_seruntime(tsk),
+ 			      sizeof(unsigned long long));
+ 
+ 	/*
+@@ -143,7 +143,7 @@ static void __exit_signal(struct task_struct *tsk)
+ 	sig->inblock += task_io_get_inblock(tsk);
+ 	sig->oublock += task_io_get_oublock(tsk);
+ 	task_io_accounting_add(&sig->ioac, &tsk->ioac);
+-	sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
++	sig->sum_sched_runtime += tsk_seruntime(tsk);
+ 	sig->nr_threads--;
+ 	__unhash_process(tsk, group_dead);
+ 	write_sequnlock(&sig->stats_lock);
+diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c
+index f6310f848f34..4176ad070bc9 100644
+--- a/kernel/livepatch/transition.c
++++ b/kernel/livepatch/transition.c
+@@ -306,7 +306,11 @@ static bool klp_try_switch_task(struct task_struct *task)
+ 	 */
+ 	rq = task_rq_lock(task, &flags);
+ 
++#ifdef	CONFIG_SCHED_ALT
++	if (task_running(task) && task != current) {
++#else
+ 	if (task_running(rq, task) && task != current) {
++#endif
+ 		snprintf(err_buf, STACK_ERR_BUF_SIZE,
+ 			 "%s: %s:%d is running\n", __func__, task->comm,
+ 			 task->pid);
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index 48fff6437901..40506d5b5a2e 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -227,15 +227,19 @@ static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
+  * Only use with rt_mutex_waiter_{less,equal}()
+  */
+ #define task_to_waiter(p)	\
+-	&(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline }
++	&(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = __tsk_deadline(p) }
+ 
+ static inline int
+ rt_mutex_waiter_less(struct rt_mutex_waiter *left,
+ 		     struct rt_mutex_waiter *right)
+ {
++#ifdef CONFIG_SCHED_PDS
++	return (left->deadline < right->deadline);
++#else
+ 	if (left->prio < right->prio)
+ 		return 1;
+ 
++#ifndef CONFIG_SCHED_BMQ
+ 	/*
+ 	 * If both waiters have dl_prio(), we check the deadlines of the
+ 	 * associated tasks.
+@@ -244,17 +248,23 @@ rt_mutex_waiter_less(struct rt_mutex_waiter *left,
+ 	 */
+ 	if (dl_prio(left->prio))
+ 		return dl_time_before(left->deadline, right->deadline);
++#endif
+ 
+ 	return 0;
++#endif
+ }
+ 
+ static inline int
+ rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
+ 		      struct rt_mutex_waiter *right)
+ {
++#ifdef CONFIG_SCHED_PDS
++	return (left->deadline == right->deadline);
++#else
+ 	if (left->prio != right->prio)
+ 		return 0;
+ 
++#ifndef CONFIG_SCHED_BMQ
+ 	/*
+ 	 * If both waiters have dl_prio(), we check the deadlines of the
+ 	 * associated tasks.
+@@ -263,8 +273,10 @@ rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
+ 	 */
+ 	if (dl_prio(left->prio))
+ 		return left->deadline == right->deadline;
++#endif
+ 
+ 	return 1;
++#endif
+ }
+ 
+ #define __node_2_waiter(node) \
+@@ -660,7 +672,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
+ 	 * the values of the node being removed.
+ 	 */
+ 	waiter->prio = task->prio;
+-	waiter->deadline = task->dl.deadline;
++	waiter->deadline = __tsk_deadline(task);
+ 
+ 	rt_mutex_enqueue(lock, waiter);
+ 
+@@ -933,7 +945,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
+ 	waiter->task = task;
+ 	waiter->lock = lock;
+ 	waiter->prio = task->prio;
+-	waiter->deadline = task->dl.deadline;
++	waiter->deadline = __tsk_deadline(task);
+ 
+ 	/* Get the top priority waiter on the lock */
+ 	if (rt_mutex_has_waiters(lock))
+diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
+index 5fc9c9b70862..eb6d7d87779f 100644
+--- a/kernel/sched/Makefile
++++ b/kernel/sched/Makefile
+@@ -22,14 +22,20 @@ ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
+ CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer
+ endif
+ 
+-obj-y += core.o loadavg.o clock.o cputime.o
+-obj-y += idle.o fair.o rt.o deadline.o
+-obj-y += wait.o wait_bit.o swait.o completion.o
+-
+-obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o pelt.o
++ifdef CONFIG_SCHED_ALT
++obj-y += alt_core.o alt_debug.o
++else
++obj-y += core.o
++obj-y += fair.o rt.o deadline.o
++obj-$(CONFIG_SMP) += cpudeadline.o stop_task.o
+ obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o
+-obj-$(CONFIG_SCHEDSTATS) += stats.o
+ obj-$(CONFIG_SCHED_DEBUG) += debug.o
++endif
++obj-y += loadavg.o clock.o cputime.o
++obj-y += idle.o
++obj-y += wait.o wait_bit.o swait.o completion.o
++obj-$(CONFIG_SMP) += cpupri.o pelt.o topology.o
++obj-$(CONFIG_SCHEDSTATS) += stats.o
+ obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o
+ obj-$(CONFIG_CPU_FREQ) += cpufreq.o
+ obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
+diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
+new file mode 100644
+index 000000000000..f69ed4d89395
+--- /dev/null
++++ b/kernel/sched/alt_core.c
+@@ -0,0 +1,7149 @@
++/*
++ *  kernel/sched/alt_core.c
++ *
++ *  Core alternative kernel scheduler code and related syscalls
++ *
++ *  Copyright (C) 1991-2002  Linus Torvalds
++ *
++ *  2009-08-13	Brainfuck deadline scheduling policy by Con Kolivas deletes
++ *		a whole lot of those previous things.
++ *  2017-09-06	Priority and Deadline based Skip list multiple queue kernel
++ *		scheduler by Alfred Chen.
++ *  2019-02-20	BMQ(BitMap Queue) kernel scheduler by Alfred Chen.
++ */
++#define CREATE_TRACE_POINTS
++#include <trace/events/sched.h>
++#undef CREATE_TRACE_POINTS
++
++#include "sched.h"
++
++#include <linux/sched/rt.h>
++
++#include <linux/context_tracking.h>
++#include <linux/compat.h>
++#include <linux/blkdev.h>
++#include <linux/delayacct.h>
++#include <linux/freezer.h>
++#include <linux/init_task.h>
++#include <linux/kprobes.h>
++#include <linux/mmu_context.h>
++#include <linux/nmi.h>
++#include <linux/profile.h>
++#include <linux/rcupdate_wait.h>
++#include <linux/security.h>
++#include <linux/syscalls.h>
++#include <linux/wait_bit.h>
++
++#include <linux/kcov.h>
++#include <linux/scs.h>
++
++#include <asm/switch_to.h>
++
++#include "../workqueue_internal.h"
++#include "../../fs/io-wq.h"
++#include "../smpboot.h"
++
++#include "pelt.h"
++#include "smp.h"
++
++/*
++ * Export tracepoints that act as a bare tracehook (ie: have no trace event
++ * associated with them) to allow external modules to probe them.
++ */
++EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
++
++#define ALT_SCHED_VERSION "v5.11-r3"
++
++/* rt_prio(prio) defined in include/linux/sched/rt.h */
++#define rt_task(p)		rt_prio((p)->prio)
++#define rt_policy(policy)	((policy) == SCHED_FIFO || (policy) == SCHED_RR)
++#define task_has_rt_policy(p)	(rt_policy((p)->policy))
++
++#define STOP_PRIO		(MAX_RT_PRIO - 1)
++
++/* Default time slice is 4 in ms, can be set via kernel parameter "sched_timeslice" */
++u64 sched_timeslice_ns __read_mostly = (4 * 1000 * 1000);
++
++static int __init sched_timeslice(char *str)
++{
++	int timeslice_us;
++
++	get_option(&str, &timeslice_us);
++	if (timeslice_us >= 1000)
++		sched_timeslice_ns = timeslice_us * 1000;
++
++	return 0;
++}
++early_param("sched_timeslice", sched_timeslice);
++
++/* Reschedule if less than this many μs left */
++#define RESCHED_NS		(100 * 1000)
++
++/**
++ * sched_yield_type - Choose what sort of yield sched_yield will perform.
++ * 0: No yield.
++ * 1: Deboost and requeue task. (default)
++ * 2: Set rq skip task.
++ */
++int sched_yield_type __read_mostly = 1;
++
++#ifdef CONFIG_SMP
++static cpumask_t sched_rq_pending_mask ____cacheline_aligned_in_smp;
++
++DEFINE_PER_CPU(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_affinity_masks);
++DEFINE_PER_CPU(cpumask_t *, sched_cpu_affinity_end_mask);
++
++DEFINE_PER_CPU(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_topo_masks);
++DEFINE_PER_CPU(cpumask_t *, sched_cpu_llc_mask);
++
++#ifdef CONFIG_SCHED_SMT
++DEFINE_STATIC_KEY_FALSE(sched_smt_present);
++EXPORT_SYMBOL_GPL(sched_smt_present);
++#endif
++
++/*
++ * Keep a unique ID per domain (we use the first CPUs number in the cpumask of
++ * the domain), this allows us to quickly tell if two cpus are in the same cache
++ * domain, see cpus_share_cache().
++ */
++DEFINE_PER_CPU(int, sd_llc_id);
++#endif /* CONFIG_SMP */
++
++static DEFINE_MUTEX(sched_hotcpu_mutex);
++
++DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
++
++#ifndef prepare_arch_switch
++# define prepare_arch_switch(next)	do { } while (0)
++#endif
++#ifndef finish_arch_post_lock_switch
++# define finish_arch_post_lock_switch()	do { } while (0)
++#endif
++
++#define IDLE_WM	(IDLE_TASK_SCHED_PRIO)
++
++#ifdef CONFIG_SCHED_SMT
++static cpumask_t sched_sg_idle_mask ____cacheline_aligned_in_smp;
++#endif
++static cpumask_t sched_rq_watermark[SCHED_BITS] ____cacheline_aligned_in_smp;
++
++#ifdef CONFIG_SCHED_BMQ
++#include "bmq_imp.h"
++#endif
++#ifdef CONFIG_SCHED_PDS
++#include "pds_imp.h"
++#endif
++
++static inline void update_sched_rq_watermark(struct rq *rq)
++{
++	unsigned long watermark = sched_queue_watermark(rq);
++	unsigned long last_wm = rq->watermark;
++	unsigned long i;
++	int cpu;
++
++	/*printk(KERN_INFO "sched: watermark(%d) %d, last %d\n",
++	       cpu_of(rq), watermark, last_wm);*/
++	if (watermark == last_wm)
++		return;
++
++	rq->watermark = watermark;
++	cpu = cpu_of(rq);
++	if (watermark < last_wm) {
++		for (i = watermark + 1; i <= last_wm; i++)
++			cpumask_andnot(&sched_rq_watermark[i],
++				       &sched_rq_watermark[i], cpumask_of(cpu));
++#ifdef CONFIG_SCHED_SMT
++		if (!static_branch_likely(&sched_smt_present))
++			return;
++		if (IDLE_WM == last_wm)
++			cpumask_andnot(&sched_sg_idle_mask,
++				       &sched_sg_idle_mask, cpu_smt_mask(cpu));
++#endif
++		return;
++	}
++	/* last_wm < watermark */
++	for (i = last_wm + 1; i <= watermark; i++)
++		cpumask_set_cpu(cpu, &sched_rq_watermark[i]);
++#ifdef CONFIG_SCHED_SMT
++	if (!static_branch_likely(&sched_smt_present))
++		return;
++	if (IDLE_WM == watermark) {
++		cpumask_t tmp;
++		cpumask_and(&tmp, cpu_smt_mask(cpu), &sched_rq_watermark[IDLE_WM]);
++		if (cpumask_equal(&tmp, cpu_smt_mask(cpu)))
++			cpumask_or(&sched_sg_idle_mask, cpu_smt_mask(cpu),
++				   &sched_sg_idle_mask);
++	}
++#endif
++}
++
++static inline struct task_struct *rq_runnable_task(struct rq *rq)
++{
++	struct task_struct *next = sched_rq_first_task(rq);
++
++	if (unlikely(next == rq->skip))
++		next = sched_rq_next_task(next, rq);
++
++	return next;
++}
++
++/*
++ * Serialization rules:
++ *
++ * Lock order:
++ *
++ *   p->pi_lock
++ *     rq->lock
++ *       hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
++ *
++ *  rq1->lock
++ *    rq2->lock  where: rq1 < rq2
++ *
++ * Regular state:
++ *
++ * Normal scheduling state is serialized by rq->lock. __schedule() takes the
++ * local CPU's rq->lock, it optionally removes the task from the runqueue and
++ * always looks at the local rq data structures to find the most eligible task
++ * to run next.
++ *
++ * Task enqueue is also under rq->lock, possibly taken from another CPU.
++ * Wakeups from another LLC domain might use an IPI to transfer the enqueue to
++ * the local CPU to avoid bouncing the runqueue state around [ see
++ * ttwu_queue_wakelist() ]
++ *
++ * Task wakeup, specifically wakeups that involve migration, are horribly
++ * complicated to avoid having to take two rq->locks.
++ *
++ * Special state:
++ *
++ * System-calls and anything external will use task_rq_lock() which acquires
++ * both p->pi_lock and rq->lock. As a consequence the state they change is
++ * stable while holding either lock:
++ *
++ *  - sched_setaffinity()/
++ *    set_cpus_allowed_ptr():	p->cpus_ptr, p->nr_cpus_allowed
++ *  - set_user_nice():		p->se.load, p->*prio
++ *  - __sched_setscheduler():	p->sched_class, p->policy, p->*prio,
++ *				p->se.load, p->rt_priority,
++ *				p->dl.dl_{runtime, deadline, period, flags, bw, density}
++ *  - sched_setnuma():		p->numa_preferred_nid
++ *  - sched_move_task()/
++ *    cpu_cgroup_fork():	p->sched_task_group
++ *  - uclamp_update_active()	p->uclamp*
++ *
++ * p->state <- TASK_*:
++ *
++ *   is changed locklessly using set_current_state(), __set_current_state() or
++ *   set_special_state(), see their respective comments, or by
++ *   try_to_wake_up(). This latter uses p->pi_lock to serialize against
++ *   concurrent self.
++ *
++ * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
++ *
++ *   is set by activate_task() and cleared by deactivate_task(), under
++ *   rq->lock. Non-zero indicates the task is runnable, the special
++ *   ON_RQ_MIGRATING state is used for migration without holding both
++ *   rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
++ *
++ * p->on_cpu <- { 0, 1 }:
++ *
++ *   is set by prepare_task() and cleared by finish_task() such that it will be
++ *   set before p is scheduled-in and cleared after p is scheduled-out, both
++ *   under rq->lock. Non-zero indicates the task is running on its CPU.
++ *
++ *   [ The astute reader will observe that it is possible for two tasks on one
++ *     CPU to have ->on_cpu = 1 at the same time. ]
++ *
++ * task_cpu(p): is changed by set_task_cpu(), the rules are:
++ *
++ *  - Don't call set_task_cpu() on a blocked task:
++ *
++ *    We don't care what CPU we're not running on, this simplifies hotplug,
++ *    the CPU assignment of blocked tasks isn't required to be valid.
++ *
++ *  - for try_to_wake_up(), called under p->pi_lock:
++ *
++ *    This allows try_to_wake_up() to only take one rq->lock, see its comment.
++ *
++ *  - for migration called under rq->lock:
++ *    [ see task_on_rq_migrating() in task_rq_lock() ]
++ *
++ *    o move_queued_task()
++ *    o detach_task()
++ *
++ *  - for migration called under double_rq_lock():
++ *
++ *    o __migrate_swap_task()
++ *    o push_rt_task() / pull_rt_task()
++ *    o push_dl_task() / pull_dl_task()
++ *    o dl_task_offline_migration()
++ *
++ */
++
++/*
++ * Context: p->pi_lock
++ */
++static inline struct rq
++*__task_access_lock(struct task_struct *p, raw_spinlock_t **plock)
++{
++	struct rq *rq;
++	for (;;) {
++		rq = task_rq(p);
++		if (p->on_cpu || task_on_rq_queued(p)) {
++			raw_spin_lock(&rq->lock);
++			if (likely((p->on_cpu || task_on_rq_queued(p))
++				   && rq == task_rq(p))) {
++				*plock = &rq->lock;
++				return rq;
++			}
++			raw_spin_unlock(&rq->lock);
++		} else if (task_on_rq_migrating(p)) {
++			do {
++				cpu_relax();
++			} while (unlikely(task_on_rq_migrating(p)));
++		} else {
++			*plock = NULL;
++			return rq;
++		}
++	}
++}
++
++static inline void
++__task_access_unlock(struct task_struct *p, raw_spinlock_t *lock)
++{
++	if (NULL != lock)
++		raw_spin_unlock(lock);
++}
++
++static inline struct rq
++*task_access_lock_irqsave(struct task_struct *p, raw_spinlock_t **plock,
++			  unsigned long *flags)
++{
++	struct rq *rq;
++	for (;;) {
++		rq = task_rq(p);
++		if (p->on_cpu || task_on_rq_queued(p)) {
++			raw_spin_lock_irqsave(&rq->lock, *flags);
++			if (likely((p->on_cpu || task_on_rq_queued(p))
++				   && rq == task_rq(p))) {
++				*plock = &rq->lock;
++				return rq;
++			}
++			raw_spin_unlock_irqrestore(&rq->lock, *flags);
++		} else if (task_on_rq_migrating(p)) {
++			do {
++				cpu_relax();
++			} while (unlikely(task_on_rq_migrating(p)));
++		} else {
++			raw_spin_lock_irqsave(&p->pi_lock, *flags);
++			if (likely(!p->on_cpu && !p->on_rq &&
++				   rq == task_rq(p))) {
++				*plock = &p->pi_lock;
++				return rq;
++			}
++			raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
++		}
++	}
++}
++
++static inline void
++task_access_unlock_irqrestore(struct task_struct *p, raw_spinlock_t *lock,
++			      unsigned long *flags)
++{
++	raw_spin_unlock_irqrestore(lock, *flags);
++}
++
++/*
++ * __task_rq_lock - lock the rq @p resides on.
++ */
++struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
++	__acquires(rq->lock)
++{
++	struct rq *rq;
++
++	lockdep_assert_held(&p->pi_lock);
++
++	for (;;) {
++		rq = task_rq(p);
++		raw_spin_lock(&rq->lock);
++		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
++			return rq;
++		raw_spin_unlock(&rq->lock);
++
++		while (unlikely(task_on_rq_migrating(p)))
++			cpu_relax();
++	}
++}
++
++/*
++ * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
++ */
++struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
++	__acquires(p->pi_lock)
++	__acquires(rq->lock)
++{
++	struct rq *rq;
++
++	for (;;) {
++		raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
++		rq = task_rq(p);
++		raw_spin_lock(&rq->lock);
++		/*
++		 *	move_queued_task()		task_rq_lock()
++		 *
++		 *	ACQUIRE (rq->lock)
++		 *	[S] ->on_rq = MIGRATING		[L] rq = task_rq()
++		 *	WMB (__set_task_cpu())		ACQUIRE (rq->lock);
++		 *	[S] ->cpu = new_cpu		[L] task_rq()
++		 *					[L] ->on_rq
++		 *	RELEASE (rq->lock)
++		 *
++		 * If we observe the old CPU in task_rq_lock(), the acquire of
++		 * the old rq->lock will fully serialize against the stores.
++		 *
++		 * If we observe the new CPU in task_rq_lock(), the address
++		 * dependency headed by '[L] rq = task_rq()' and the acquire
++		 * will pair with the WMB to ensure we then also see migrating.
++		 */
++		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
++			return rq;
++		}
++		raw_spin_unlock(&rq->lock);
++		raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
++
++		while (unlikely(task_on_rq_migrating(p)))
++			cpu_relax();
++	}
++}
++
++static inline void
++rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
++	__acquires(rq->lock)
++{
++	raw_spin_lock_irqsave(&rq->lock, rf->flags);
++}
++
++static inline void
++rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
++	__releases(rq->lock)
++{
++	raw_spin_unlock_irqrestore(&rq->lock, rf->flags);
++}
++
++/*
++ * RQ-clock updating methods:
++ */
++
++static void update_rq_clock_task(struct rq *rq, s64 delta)
++{
++/*
++ * In theory, the compile should just see 0 here, and optimize out the call
++ * to sched_rt_avg_update. But I don't trust it...
++ */
++	s64 __maybe_unused steal = 0, irq_delta = 0;
++
++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
++	irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
++
++	/*
++	 * Since irq_time is only updated on {soft,}irq_exit, we might run into
++	 * this case when a previous update_rq_clock() happened inside a
++	 * {soft,}irq region.
++	 *
++	 * When this happens, we stop ->clock_task and only update the
++	 * prev_irq_time stamp to account for the part that fit, so that a next
++	 * update will consume the rest. This ensures ->clock_task is
++	 * monotonic.
++	 *
++	 * It does however cause some slight miss-attribution of {soft,}irq
++	 * time, a more accurate solution would be to update the irq_time using
++	 * the current rq->clock timestamp, except that would require using
++	 * atomic ops.
++	 */
++	if (irq_delta > delta)
++		irq_delta = delta;
++
++	rq->prev_irq_time += irq_delta;
++	delta -= irq_delta;
++#endif
++#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
++	if (static_key_false((&paravirt_steal_rq_enabled))) {
++		steal = paravirt_steal_clock(cpu_of(rq));
++		steal -= rq->prev_steal_time_rq;
++
++		if (unlikely(steal > delta))
++			steal = delta;
++
++		rq->prev_steal_time_rq += steal;
++		delta -= steal;
++	}
++#endif
++
++	rq->clock_task += delta;
++
++#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
++	if ((irq_delta + steal))
++		update_irq_load_avg(rq, irq_delta + steal);
++#endif
++}
++
++static inline void update_rq_clock(struct rq *rq)
++{
++	s64 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
++
++	if (unlikely(delta <= 0))
++		return;
++	rq->clock += delta;
++	update_rq_clock_task(rq, delta);
++}
++
++#ifdef CONFIG_NO_HZ_FULL
++/*
++ * Tick may be needed by tasks in the runqueue depending on their policy and
++ * requirements. If tick is needed, lets send the target an IPI to kick it out
++ * of nohz mode if necessary.
++ */
++static inline void sched_update_tick_dependency(struct rq *rq)
++{
++	int cpu = cpu_of(rq);
++
++	if (!tick_nohz_full_cpu(cpu))
++		return;
++
++	if (rq->nr_running < 2)
++		tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
++	else
++		tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
++}
++#else /* !CONFIG_NO_HZ_FULL */
++static inline void sched_update_tick_dependency(struct rq *rq) { }
++#endif
++
++/*
++ * Add/Remove/Requeue task to/from the runqueue routines
++ * Context: rq->lock
++ */
++static inline void dequeue_task(struct task_struct *p, struct rq *rq, int flags)
++{
++	lockdep_assert_held(&rq->lock);
++
++	/*printk(KERN_INFO "sched: dequeue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
++	WARN_ONCE(task_rq(p) != rq, "sched: dequeue task reside on cpu%d from cpu%d\n",
++		  task_cpu(p), cpu_of(rq));
++
++	__SCHED_DEQUEUE_TASK(p, rq, flags, update_sched_rq_watermark(rq));
++	--rq->nr_running;
++#ifdef CONFIG_SMP
++	if (1 == rq->nr_running)
++		cpumask_clear_cpu(cpu_of(rq), &sched_rq_pending_mask);
++#endif
++
++	sched_update_tick_dependency(rq);
++}
++
++static inline void enqueue_task(struct task_struct *p, struct rq *rq, int flags)
++{
++	lockdep_assert_held(&rq->lock);
++
++	/*printk(KERN_INFO "sched: enqueue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
++	WARN_ONCE(task_rq(p) != rq, "sched: enqueue task reside on cpu%d to cpu%d\n",
++		  task_cpu(p), cpu_of(rq));
++
++	__SCHED_ENQUEUE_TASK(p, rq, flags);
++	update_sched_rq_watermark(rq);
++	++rq->nr_running;
++#ifdef CONFIG_SMP
++	if (2 == rq->nr_running)
++		cpumask_set_cpu(cpu_of(rq), &sched_rq_pending_mask);
++#endif
++
++	sched_update_tick_dependency(rq);
++}
++
++static inline void requeue_task(struct task_struct *p, struct rq *rq)
++{
++	lockdep_assert_held(&rq->lock);
++	/*printk(KERN_INFO "sched: requeue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
++	WARN_ONCE(task_rq(p) != rq, "sched: cpu[%d] requeue task reside on cpu%d\n",
++		  cpu_of(rq), task_cpu(p));
++
++	__SCHED_REQUEUE_TASK(p, rq, update_sched_rq_watermark(rq));
++}
++
++/*
++ * cmpxchg based fetch_or, macro so it works for different integer types
++ */
++#define fetch_or(ptr, mask)						\
++	({								\
++		typeof(ptr) _ptr = (ptr);				\
++		typeof(mask) _mask = (mask);				\
++		typeof(*_ptr) _old, _val = *_ptr;			\
++									\
++		for (;;) {						\
++			_old = cmpxchg(_ptr, _val, _val | _mask);	\
++			if (_old == _val)				\
++				break;					\
++			_val = _old;					\
++		}							\
++	_old;								\
++})
++
++#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
++/*
++ * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
++ * this avoids any races wrt polling state changes and thereby avoids
++ * spurious IPIs.
++ */
++static bool set_nr_and_not_polling(struct task_struct *p)
++{
++	struct thread_info *ti = task_thread_info(p);
++	return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
++}
++
++/*
++ * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
++ *
++ * If this returns true, then the idle task promises to call
++ * sched_ttwu_pending() and reschedule soon.
++ */
++static bool set_nr_if_polling(struct task_struct *p)
++{
++	struct thread_info *ti = task_thread_info(p);
++	typeof(ti->flags) old, val = READ_ONCE(ti->flags);
++
++	for (;;) {
++		if (!(val & _TIF_POLLING_NRFLAG))
++			return false;
++		if (val & _TIF_NEED_RESCHED)
++			return true;
++		old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED);
++		if (old == val)
++			break;
++		val = old;
++	}
++	return true;
++}
++
++#else
++static bool set_nr_and_not_polling(struct task_struct *p)
++{
++	set_tsk_need_resched(p);
++	return true;
++}
++
++#ifdef CONFIG_SMP
++static bool set_nr_if_polling(struct task_struct *p)
++{
++	return false;
++}
++#endif
++#endif
++
++static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
++{
++	struct wake_q_node *node = &task->wake_q;
++
++	/*
++	 * Atomically grab the task, if ->wake_q is !nil already it means
++	 * it's already queued (either by us or someone else) and will get the
++	 * wakeup due to that.
++	 *
++	 * In order to ensure that a pending wakeup will observe our pending
++	 * state, even in the failed case, an explicit smp_mb() must be used.
++	 */
++	smp_mb__before_atomic();
++	if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
++		return false;
++
++	/*
++	 * The head is context local, there can be no concurrency.
++	 */
++	*head->lastp = node;
++	head->lastp = &node->next;
++	return true;
++}
++
++/**
++ * wake_q_add() - queue a wakeup for 'later' waking.
++ * @head: the wake_q_head to add @task to
++ * @task: the task to queue for 'later' wakeup
++ *
++ * Queue a task for later wakeup, most likely by the wake_up_q() call in the
++ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
++ * instantly.
++ *
++ * This function must be used as-if it were wake_up_process(); IOW the task
++ * must be ready to be woken at this location.
++ */
++void wake_q_add(struct wake_q_head *head, struct task_struct *task)
++{
++	if (__wake_q_add(head, task))
++		get_task_struct(task);
++}
++
++/**
++ * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
++ * @head: the wake_q_head to add @task to
++ * @task: the task to queue for 'later' wakeup
++ *
++ * Queue a task for later wakeup, most likely by the wake_up_q() call in the
++ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
++ * instantly.
++ *
++ * This function must be used as-if it were wake_up_process(); IOW the task
++ * must be ready to be woken at this location.
++ *
++ * This function is essentially a task-safe equivalent to wake_q_add(). Callers
++ * that already hold reference to @task can call the 'safe' version and trust
++ * wake_q to do the right thing depending whether or not the @task is already
++ * queued for wakeup.
++ */
++void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
++{
++	if (!__wake_q_add(head, task))
++		put_task_struct(task);
++}
++
++void wake_up_q(struct wake_q_head *head)
++{
++	struct wake_q_node *node = head->first;
++
++	while (node != WAKE_Q_TAIL) {
++		struct task_struct *task;
++
++		task = container_of(node, struct task_struct, wake_q);
++		BUG_ON(!task);
++		/* task can safely be re-inserted now: */
++		node = node->next;
++		task->wake_q.next = NULL;
++
++		/*
++		 * wake_up_process() executes a full barrier, which pairs with
++		 * the queueing in wake_q_add() so as not to miss wakeups.
++		 */
++		wake_up_process(task);
++		put_task_struct(task);
++	}
++}
++
++/*
++ * resched_curr - mark rq's current task 'to be rescheduled now'.
++ *
++ * On UP this means the setting of the need_resched flag, on SMP it
++ * might also involve a cross-CPU call to trigger the scheduler on
++ * the target CPU.
++ */
++void resched_curr(struct rq *rq)
++{
++	struct task_struct *curr = rq->curr;
++	int cpu;
++
++	lockdep_assert_held(&rq->lock);
++
++	if (test_tsk_need_resched(curr))
++		return;
++
++	cpu = cpu_of(rq);
++	if (cpu == smp_processor_id()) {
++		set_tsk_need_resched(curr);
++		set_preempt_need_resched();
++		return;
++	}
++
++	if (set_nr_and_not_polling(curr))
++		smp_send_reschedule(cpu);
++	else
++		trace_sched_wake_idle_without_ipi(cpu);
++}
++
++void resched_cpu(int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	if (cpu_online(cpu) || cpu == smp_processor_id())
++		resched_curr(cpu_rq(cpu));
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++}
++
++#ifdef CONFIG_SMP
++#ifdef CONFIG_NO_HZ_COMMON
++void nohz_balance_enter_idle(int cpu) {}
++
++void select_nohz_load_balancer(int stop_tick) {}
++
++void set_cpu_sd_state_idle(void) {}
++
++/*
++ * In the semi idle case, use the nearest busy CPU for migrating timers
++ * from an idle CPU.  This is good for power-savings.
++ *
++ * We don't do similar optimization for completely idle system, as
++ * selecting an idle CPU will add more delays to the timers than intended
++ * (as that CPU's timer base may not be uptodate wrt jiffies etc).
++ */
++int get_nohz_timer_target(void)
++{
++	int i, cpu = smp_processor_id(), default_cpu = -1;
++	struct cpumask *mask;
++
++	if (housekeeping_cpu(cpu, HK_FLAG_TIMER)) {
++		if (!idle_cpu(cpu))
++			return cpu;
++		default_cpu = cpu;
++	}
++
++	for (mask = per_cpu(sched_cpu_affinity_masks, cpu) + 1;
++	     mask < per_cpu(sched_cpu_affinity_end_mask, cpu); mask++)
++		for_each_cpu_and(i, mask, housekeeping_cpumask(HK_FLAG_TIMER))
++			if (!idle_cpu(i))
++				return i;
++
++	if (default_cpu == -1)
++		default_cpu = housekeeping_any_cpu(HK_FLAG_TIMER);
++	cpu = default_cpu;
++
++	return cpu;
++}
++
++/*
++ * When add_timer_on() enqueues a timer into the timer wheel of an
++ * idle CPU then this timer might expire before the next timer event
++ * which is scheduled to wake up that CPU. In case of a completely
++ * idle system the next event might even be infinite time into the
++ * future. wake_up_idle_cpu() ensures that the CPU is woken up and
++ * leaves the inner idle loop so the newly added timer is taken into
++ * account when the CPU goes back to idle and evaluates the timer
++ * wheel for the next timer event.
++ */
++static inline void wake_up_idle_cpu(int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	if (cpu == smp_processor_id())
++		return;
++
++	if (set_nr_and_not_polling(rq->idle))
++		smp_send_reschedule(cpu);
++	else
++		trace_sched_wake_idle_without_ipi(cpu);
++}
++
++static inline bool wake_up_full_nohz_cpu(int cpu)
++{
++	/*
++	 * We just need the target to call irq_exit() and re-evaluate
++	 * the next tick. The nohz full kick at least implies that.
++	 * If needed we can still optimize that later with an
++	 * empty IRQ.
++	 */
++	if (cpu_is_offline(cpu))
++		return true;  /* Don't try to wake offline CPUs. */
++	if (tick_nohz_full_cpu(cpu)) {
++		if (cpu != smp_processor_id() ||
++		    tick_nohz_tick_stopped())
++			tick_nohz_full_kick_cpu(cpu);
++		return true;
++	}
++
++	return false;
++}
++
++void wake_up_nohz_cpu(int cpu)
++{
++	if (!wake_up_full_nohz_cpu(cpu))
++		wake_up_idle_cpu(cpu);
++}
++
++static void nohz_csd_func(void *info)
++{
++	struct rq *rq = info;
++	int cpu = cpu_of(rq);
++	unsigned int flags;
++
++	/*
++	 * Release the rq::nohz_csd.
++	 */
++	flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(cpu));
++	WARN_ON(!(flags & NOHZ_KICK_MASK));
++
++	rq->idle_balance = idle_cpu(cpu);
++	if (rq->idle_balance && !need_resched()) {
++		rq->nohz_idle_balance = flags;
++		raise_softirq_irqoff(SCHED_SOFTIRQ);
++	}
++}
++
++#endif /* CONFIG_NO_HZ_COMMON */
++#endif /* CONFIG_SMP */
++
++static inline void check_preempt_curr(struct rq *rq)
++{
++	if (sched_rq_first_task(rq) != rq->curr)
++		resched_curr(rq);
++}
++
++#ifdef CONFIG_SCHED_HRTICK
++/*
++ * Use HR-timers to deliver accurate preemption points.
++ */
++
++static void hrtick_clear(struct rq *rq)
++{
++	if (hrtimer_active(&rq->hrtick_timer))
++		hrtimer_cancel(&rq->hrtick_timer);
++}
++
++/*
++ * High-resolution timer tick.
++ * Runs from hardirq context with interrupts disabled.
++ */
++static enum hrtimer_restart hrtick(struct hrtimer *timer)
++{
++	struct rq *rq = container_of(timer, struct rq, hrtick_timer);
++	struct task_struct *p;
++
++	WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
++
++	raw_spin_lock(&rq->lock);
++	p = rq->curr;
++	p->time_slice = 0;
++	resched_curr(rq);
++	raw_spin_unlock(&rq->lock);
++
++	return HRTIMER_NORESTART;
++}
++
++/*
++ * Use hrtick when:
++ *  - enabled by features
++ *  - hrtimer is actually high res
++ */
++static inline int hrtick_enabled(struct rq *rq)
++{
++	/**
++	 * Alt schedule FW doesn't support sched_feat yet
++	if (!sched_feat(HRTICK))
++		return 0;
++	*/
++	if (!cpu_active(cpu_of(rq)))
++		return 0;
++	return hrtimer_is_hres_active(&rq->hrtick_timer);
++}
++
++#ifdef CONFIG_SMP
++
++static void __hrtick_restart(struct rq *rq)
++{
++	struct hrtimer *timer = &rq->hrtick_timer;
++	ktime_t time = rq->hrtick_time;
++
++	hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD);
++}
++
++/*
++ * called from hardirq (IPI) context
++ */
++static void __hrtick_start(void *arg)
++{
++	struct rq *rq = arg;
++
++	raw_spin_lock(&rq->lock);
++	__hrtick_restart(rq);
++	raw_spin_unlock(&rq->lock);
++}
++
++/*
++ * Called to set the hrtick timer state.
++ *
++ * called with rq->lock held and irqs disabled
++ */
++void hrtick_start(struct rq *rq, u64 delay)
++{
++	struct hrtimer *timer = &rq->hrtick_timer;
++	s64 delta;
++
++	/*
++	 * Don't schedule slices shorter than 10000ns, that just
++	 * doesn't make sense and can cause timer DoS.
++	 */
++	delta = max_t(s64, delay, 10000LL);
++
++	rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta);
++
++	if (rq == this_rq())
++		__hrtick_restart(rq);
++	else
++		smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
++}
++
++#else
++/*
++ * Called to set the hrtick timer state.
++ *
++ * called with rq->lock held and irqs disabled
++ */
++void hrtick_start(struct rq *rq, u64 delay)
++{
++	/*
++	 * Don't schedule slices shorter than 10000ns, that just
++	 * doesn't make sense. Rely on vruntime for fairness.
++	 */
++	delay = max_t(u64, delay, 10000LL);
++	hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
++		      HRTIMER_MODE_REL_PINNED_HARD);
++}
++#endif /* CONFIG_SMP */
++
++static void hrtick_rq_init(struct rq *rq)
++{
++#ifdef CONFIG_SMP
++	INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq);
++#endif
++
++	hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
++	rq->hrtick_timer.function = hrtick;
++}
++#else	/* CONFIG_SCHED_HRTICK */
++static inline int hrtick_enabled(struct rq *rq)
++{
++	return 0;
++}
++
++static inline void hrtick_clear(struct rq *rq)
++{
++}
++
++static inline void hrtick_rq_init(struct rq *rq)
++{
++}
++#endif	/* CONFIG_SCHED_HRTICK */
++
++/*
++ * Calculate the current priority, i.e. the priority
++ * taken into account by the scheduler. This value might
++ * be boosted by RT tasks as it will be RT if the task got
++ * RT-boosted. If not then it returns p->normal_prio.
++ */
++static int effective_prio(struct task_struct *p)
++{
++	p->normal_prio = normal_prio(p);
++	/*
++	 * If we are RT tasks or we were boosted to RT priority,
++	 * keep the priority unchanged. Otherwise, update priority
++	 * to the normal priority:
++	 */
++	if (!rt_prio(p->prio))
++		return p->normal_prio;
++	return p->prio;
++}
++
++/*
++ * activate_task - move a task to the runqueue.
++ *
++ * Context: rq->lock
++ */
++static void activate_task(struct task_struct *p, struct rq *rq)
++{
++	enqueue_task(p, rq, ENQUEUE_WAKEUP);
++	p->on_rq = TASK_ON_RQ_QUEUED;
++
++	/*
++	 * If in_iowait is set, the code below may not trigger any cpufreq
++	 * utilization updates, so do it here explicitly with the IOWAIT flag
++	 * passed.
++	 */
++	cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT * p->in_iowait);
++}
++
++/*
++ * deactivate_task - remove a task from the runqueue.
++ *
++ * Context: rq->lock
++ */
++static inline void deactivate_task(struct task_struct *p, struct rq *rq)
++{
++	dequeue_task(p, rq, DEQUEUE_SLEEP);
++	p->on_rq = 0;
++	cpufreq_update_util(rq, 0);
++}
++
++static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
++{
++#ifdef CONFIG_SMP
++	/*
++	 * After ->cpu is set up to a new value, task_access_lock(p, ...) can be
++	 * successfully executed on another CPU. We must ensure that updates of
++	 * per-task data have been completed by this moment.
++	 */
++	smp_wmb();
++
++#ifdef CONFIG_THREAD_INFO_IN_TASK
++	WRITE_ONCE(p->cpu, cpu);
++#else
++	WRITE_ONCE(task_thread_info(p)->cpu, cpu);
++#endif
++#endif
++}
++
++static inline bool is_migration_disabled(struct task_struct *p)
++{
++#ifdef CONFIG_SMP
++	return p->migration_disabled;
++#else
++	return false;
++#endif
++}
++
++#define SCA_CHECK		0x01
++#define SCA_MIGRATE_DISABLE	0x02
++#define SCA_MIGRATE_ENABLE	0x04
++
++#ifdef CONFIG_SMP
++
++void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
++{
++#ifdef CONFIG_SCHED_DEBUG
++	/*
++	 * We should never call set_task_cpu() on a blocked task,
++	 * ttwu() will sort out the placement.
++	 */
++	WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
++		     !p->on_rq);
++#ifdef CONFIG_LOCKDEP
++	/*
++	 * The caller should hold either p->pi_lock or rq->lock, when changing
++	 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
++	 *
++	 * sched_move_task() holds both and thus holding either pins the cgroup,
++	 * see task_group().
++	 */
++	WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
++				      lockdep_is_held(&task_rq(p)->lock)));
++#endif
++	/*
++	 * Clearly, migrating tasks to offline CPUs is a fairly daft thing.
++	 */
++	WARN_ON_ONCE(!cpu_online(new_cpu));
++
++	WARN_ON_ONCE(is_migration_disabled(p));
++#endif
++	if (task_cpu(p) == new_cpu)
++		return;
++	trace_sched_migrate_task(p, new_cpu);
++	rseq_migrate(p);
++	perf_event_task_migrate(p);
++
++	__set_task_cpu(p, new_cpu);
++}
++
++static inline bool is_per_cpu_kthread(struct task_struct *p)
++{
++	return ((p->flags & PF_KTHREAD) && (1 == p->nr_cpus_allowed));
++}
++
++#define MDF_FORCE_ENABLED	0x80
++
++static void
++__do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags);
++
++static int __set_cpus_allowed_ptr(struct task_struct *p,
++				  const struct cpumask *new_mask,
++				  u32 flags);
++
++void migrate_disable(void)
++{
++	struct task_struct *p = current;
++
++	if (p->migration_disabled) {
++		p->migration_disabled++;
++		return;
++	}
++
++	preempt_disable();
++	this_rq()->nr_pinned++;
++	p->migration_disabled = 1;
++	p->migration_flags &= ~MDF_FORCE_ENABLED;
++
++	/*
++	 * Violates locking rules! see comment in __do_set_cpus_allowed().
++	 */
++	if (p->cpus_ptr == &p->cpus_mask)
++		__do_set_cpus_allowed(p, cpumask_of(smp_processor_id()), SCA_MIGRATE_DISABLE);
++
++	preempt_enable();
++}
++EXPORT_SYMBOL_GPL(migrate_disable);
++
++void migrate_enable(void)
++{
++	struct task_struct *p = current;
++
++	if (0 == p->migration_disabled)
++		return;
++
++	if (p->migration_disabled > 1) {
++		p->migration_disabled--;
++		return;
++	}
++
++	/*
++	 * Ensure stop_task runs either before or after this, and that
++	 * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
++	 */
++	preempt_disable();
++	/*
++	 * Assumption: current should be running on allowed cpu
++	 */
++	WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &p->cpus_mask));
++	if (p->cpus_ptr != &p->cpus_mask)
++		__do_set_cpus_allowed(p, &p->cpus_mask, SCA_MIGRATE_ENABLE);
++	/*
++	 * Mustn't clear migration_disabled() until cpus_ptr points back at the
++	 * regular cpus_mask, otherwise things that race (eg.
++	 * select_fallback_rq) get confused.
++	 */
++	barrier();
++	p->migration_disabled = 0;
++	this_rq()->nr_pinned--;
++	preempt_enable();
++}
++EXPORT_SYMBOL_GPL(migrate_enable);
++
++static inline bool rq_has_pinned_tasks(struct rq *rq)
++{
++	return rq->nr_pinned;
++}
++
++/*
++ * Per-CPU kthreads are allowed to run on !active && online CPUs, see
++ * __set_cpus_allowed_ptr() and select_fallback_rq().
++ */
++static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
++{
++	/* When not in the task's cpumask, no point in looking further. */
++	if (!cpumask_test_cpu(cpu, p->cpus_ptr))
++		return false;
++
++	/* migrate_disabled() must be allowed to finish. */
++	if (is_migration_disabled(p))
++		return cpu_online(cpu);
++
++	/* Non kernel threads are not allowed during either online or offline. */
++	if (!(p->flags & PF_KTHREAD))
++		return cpu_active(cpu);
++
++	/* KTHREAD_IS_PER_CPU is always allowed. */
++	if (kthread_is_per_cpu(p))
++		return cpu_online(cpu);
++
++	/* Regular kernel threads don't get to stay during offline. */
++	if (cpu_rq(cpu)->balance_push)
++		return false;
++
++	/* But are allowed during online. */
++	return cpu_online(cpu);
++}
++
++/*
++ * This is how migration works:
++ *
++ * 1) we invoke migration_cpu_stop() on the target CPU using
++ *    stop_one_cpu().
++ * 2) stopper starts to run (implicitly forcing the migrated thread
++ *    off the CPU)
++ * 3) it checks whether the migrated task is still in the wrong runqueue.
++ * 4) if it's in the wrong runqueue then the migration thread removes
++ *    it and puts it into the right queue.
++ * 5) stopper completes and stop_one_cpu() returns and the migration
++ *    is done.
++ */
++
++/*
++ * move_queued_task - move a queued task to new rq.
++ *
++ * Returns (locked) new rq. Old rq's lock is released.
++ */
++static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int
++				   new_cpu)
++{
++	lockdep_assert_held(&rq->lock);
++
++	WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
++	dequeue_task(p, rq, 0);
++	set_task_cpu(p, new_cpu);
++	raw_spin_unlock(&rq->lock);
++
++	rq = cpu_rq(new_cpu);
++
++	raw_spin_lock(&rq->lock);
++	BUG_ON(task_cpu(p) != new_cpu);
++	enqueue_task(p, rq, 0);
++	p->on_rq = TASK_ON_RQ_QUEUED;
++	check_preempt_curr(rq);
++
++	return rq;
++}
++
++struct migration_arg {
++	struct task_struct *task;
++	int dest_cpu;
++};
++
++/*
++ * Move (not current) task off this CPU, onto the destination CPU. We're doing
++ * this because either it can't run here any more (set_cpus_allowed()
++ * away from this CPU, or CPU going down), or because we're
++ * attempting to rebalance this task on exec (sched_exec).
++ *
++ * So we race with normal scheduler movements, but that's OK, as long
++ * as the task is no longer on this CPU.
++ */
++static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int
++				 dest_cpu)
++{
++	/* Affinity changed (again). */
++	if (!is_cpu_allowed(p, dest_cpu))
++		return rq;
++
++	update_rq_clock(rq);
++	return move_queued_task(rq, p, dest_cpu);
++}
++
++/*
++ * migration_cpu_stop - this will be executed by a highprio stopper thread
++ * and performs thread migration by bumping thread off CPU then
++ * 'pushing' onto another runqueue.
++ */
++static int migration_cpu_stop(void *data)
++{
++	struct migration_arg *arg = data;
++	struct task_struct *p = arg->task;
++	struct rq *rq = this_rq();
++
++	/*
++	 * The original target CPU might have gone down and we might
++	 * be on another CPU but it doesn't matter.
++	 */
++	local_irq_disable();
++	/*
++	 * We need to explicitly wake pending tasks before running
++	 * __migrate_task() such that we will not miss enforcing cpus_ptr
++	 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
++	 */
++	flush_smp_call_function_from_idle();
++
++	raw_spin_lock(&p->pi_lock);
++	raw_spin_lock(&rq->lock);
++	/*
++	 * If task_rq(p) != rq, it cannot be migrated here, because we're
++	 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
++	 * we're holding p->pi_lock.
++	 */
++	if (task_rq(p) == rq && task_on_rq_queued(p))
++		rq = __migrate_task(rq, p, arg->dest_cpu);
++	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock(&p->pi_lock);
++
++	local_irq_enable();
++	return 0;
++}
++
++static inline void
++set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags)
++{
++	if (flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) {
++		p->cpus_ptr = new_mask;
++		return;
++	}
++
++	cpumask_copy(&p->cpus_mask, new_mask);
++	p->nr_cpus_allowed = cpumask_weight(new_mask);
++}
++
++static void
++__do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags)
++{
++	/*
++	 * This here violates the locking rules for affinity, since we're only
++	 * supposed to change these variables while holding both rq->lock and
++	 * p->pi_lock.
++	 *
++	 * HOWEVER, it magically works, because ttwu() is the only code that
++	 * accesses these variables under p->pi_lock and only does so after
++	 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule()
++	 * before finish_task().
++	 *
++	 * XXX do further audits, this smells like something putrid.
++	 */
++	if (flags & (SCA_MIGRATE_DISABLE | SCA_MIGRATE_ENABLE))
++		SCHED_WARN_ON(!p->on_cpu);
++	else
++		lockdep_assert_held(&p->pi_lock);
++
++	set_cpus_allowed_common(p, new_mask, flags);
++}
++
++void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
++{
++	__do_set_cpus_allowed(p, new_mask, 0);
++}
++
++#endif
++
++/**
++ * task_curr - is this task currently executing on a CPU?
++ * @p: the task in question.
++ *
++ * Return: 1 if the task is currently executing. 0 otherwise.
++ */
++inline int task_curr(const struct task_struct *p)
++{
++	return cpu_curr(task_cpu(p)) == p;
++}
++
++#ifdef CONFIG_SMP
++/*
++ * wait_task_inactive - wait for a thread to unschedule.
++ *
++ * If @match_state is nonzero, it's the @p->state value just checked and
++ * not expected to change.  If it changes, i.e. @p might have woken up,
++ * then return zero.  When we succeed in waiting for @p to be off its CPU,
++ * we return a positive number (its total switch count).  If a second call
++ * a short while later returns the same number, the caller can be sure that
++ * @p has remained unscheduled the whole time.
++ *
++ * The caller must ensure that the task *will* unschedule sometime soon,
++ * else this function might spin for a *long* time. This function can't
++ * be called with interrupts off, or it may introduce deadlock with
++ * smp_call_function() if an IPI is sent by the same process we are
++ * waiting to become inactive.
++ */
++unsigned long wait_task_inactive(struct task_struct *p, long match_state)
++{
++	unsigned long flags;
++	bool running, on_rq;
++	unsigned long ncsw;
++	struct rq *rq;
++	raw_spinlock_t *lock;
++
++	for (;;) {
++		rq = task_rq(p);
++
++		/*
++		 * If the task is actively running on another CPU
++		 * still, just relax and busy-wait without holding
++		 * any locks.
++		 *
++		 * NOTE! Since we don't hold any locks, it's not
++		 * even sure that "rq" stays as the right runqueue!
++		 * But we don't care, since this will return false
++		 * if the runqueue has changed and p is actually now
++		 * running somewhere else!
++		 */
++		while (task_running(p) && p == rq->curr) {
++			if (match_state && unlikely(p->state != match_state))
++				return 0;
++			cpu_relax();
++		}
++
++		/*
++		 * Ok, time to look more closely! We need the rq
++		 * lock now, to be *sure*. If we're wrong, we'll
++		 * just go back and repeat.
++		 */
++		task_access_lock_irqsave(p, &lock, &flags);
++		trace_sched_wait_task(p);
++		running = task_running(p);
++		on_rq = p->on_rq;
++		ncsw = 0;
++		if (!match_state || p->state == match_state)
++			ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
++		task_access_unlock_irqrestore(p, lock, &flags);
++
++		/*
++		 * If it changed from the expected state, bail out now.
++		 */
++		if (unlikely(!ncsw))
++			break;
++
++		/*
++		 * Was it really running after all now that we
++		 * checked with the proper locks actually held?
++		 *
++		 * Oops. Go back and try again..
++		 */
++		if (unlikely(running)) {
++			cpu_relax();
++			continue;
++		}
++
++		/*
++		 * It's not enough that it's not actively running,
++		 * it must be off the runqueue _entirely_, and not
++		 * preempted!
++		 *
++		 * So if it was still runnable (but just not actively
++		 * running right now), it's preempted, and we should
++		 * yield - it could be a while.
++		 */
++		if (unlikely(on_rq)) {
++			ktime_t to = NSEC_PER_SEC / HZ;
++
++			set_current_state(TASK_UNINTERRUPTIBLE);
++			schedule_hrtimeout(&to, HRTIMER_MODE_REL);
++			continue;
++		}
++
++		/*
++		 * Ahh, all good. It wasn't running, and it wasn't
++		 * runnable, which means that it will never become
++		 * running in the future either. We're all done!
++		 */
++		break;
++	}
++
++	return ncsw;
++}
++
++/***
++ * kick_process - kick a running thread to enter/exit the kernel
++ * @p: the to-be-kicked thread
++ *
++ * Cause a process which is running on another CPU to enter
++ * kernel-mode, without any delay. (to get signals handled.)
++ *
++ * NOTE: this function doesn't have to take the runqueue lock,
++ * because all it wants to ensure is that the remote task enters
++ * the kernel. If the IPI races and the task has been migrated
++ * to another CPU then no harm is done and the purpose has been
++ * achieved as well.
++ */
++void kick_process(struct task_struct *p)
++{
++	int cpu;
++
++	preempt_disable();
++	cpu = task_cpu(p);
++	if ((cpu != smp_processor_id()) && task_curr(p))
++		smp_send_reschedule(cpu);
++	preempt_enable();
++}
++EXPORT_SYMBOL_GPL(kick_process);
++
++/*
++ * ->cpus_ptr is protected by both rq->lock and p->pi_lock
++ *
++ * A few notes on cpu_active vs cpu_online:
++ *
++ *  - cpu_active must be a subset of cpu_online
++ *
++ *  - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
++ *    see __set_cpus_allowed_ptr(). At this point the newly online
++ *    CPU isn't yet part of the sched domains, and balancing will not
++ *    see it.
++ *
++ *  - on cpu-down we clear cpu_active() to mask the sched domains and
++ *    avoid the load balancer to place new tasks on the to be removed
++ *    CPU. Existing tasks will remain running there and will be taken
++ *    off.
++ *
++ * This means that fallback selection must not select !active CPUs.
++ * And can assume that any active CPU must be online. Conversely
++ * select_task_rq() below may allow selection of !active CPUs in order
++ * to satisfy the above rules.
++ */
++static int select_fallback_rq(int cpu, struct task_struct *p)
++{
++	int nid = cpu_to_node(cpu);
++	const struct cpumask *nodemask = NULL;
++	enum { cpuset, possible, fail } state = cpuset;
++	int dest_cpu;
++
++	/*
++	 * If the node that the CPU is on has been offlined, cpu_to_node()
++	 * will return -1. There is no CPU on the node, and we should
++	 * select the CPU on the other node.
++	 */
++	if (nid != -1) {
++		nodemask = cpumask_of_node(nid);
++
++		/* Look for allowed, online CPU in same node. */
++		for_each_cpu(dest_cpu, nodemask) {
++			if (!cpu_active(dest_cpu))
++				continue;
++			if (cpumask_test_cpu(dest_cpu, p->cpus_ptr))
++				return dest_cpu;
++		}
++	}
++
++	for (;;) {
++		/* Any allowed, online CPU? */
++		for_each_cpu(dest_cpu, p->cpus_ptr) {
++			if (!is_cpu_allowed(p, dest_cpu))
++				continue;
++			goto out;
++		}
++
++		/* No more Mr. Nice Guy. */
++		switch (state) {
++		case cpuset:
++			if (IS_ENABLED(CONFIG_CPUSETS)) {
++				cpuset_cpus_allowed_fallback(p);
++				state = possible;
++				break;
++			}
++			fallthrough;
++		case possible:
++			/*
++			 * XXX When called from select_task_rq() we only
++			 * hold p->pi_lock and again violate locking order.
++			 *
++			 * More yuck to audit.
++			 */
++			do_set_cpus_allowed(p, cpu_possible_mask);
++			state = fail;
++			break;
++
++		case fail:
++			BUG();
++			break;
++		}
++	}
++
++out:
++	if (state != cpuset) {
++		/*
++		 * Don't tell them about moving exiting tasks or
++		 * kernel threads (both mm NULL), since they never
++		 * leave kernel.
++		 */
++		if (p->mm && printk_ratelimit()) {
++			printk_deferred("process %d (%s) no longer affine to cpu%d\n",
++					task_pid_nr(p), p->comm, cpu);
++		}
++	}
++
++	return dest_cpu;
++}
++
++static inline int select_task_rq(struct task_struct *p, struct rq *rq)
++{
++	cpumask_t chk_mask, tmp;
++
++	if (unlikely(!cpumask_and(&chk_mask, p->cpus_ptr, cpu_online_mask)))
++		return select_fallback_rq(task_cpu(p), p);
++
++	if (
++#ifdef CONFIG_SCHED_SMT
++	    cpumask_and(&tmp, &chk_mask, &sched_sg_idle_mask) ||
++#endif
++	    cpumask_and(&tmp, &chk_mask, &sched_rq_watermark[IDLE_WM]) ||
++	    cpumask_and(&tmp, &chk_mask,
++			&sched_rq_watermark[task_sched_prio(p, rq) + 1]))
++		return best_mask_cpu(task_cpu(p), &tmp);
++
++	return best_mask_cpu(task_cpu(p), &chk_mask);
++}
++
++void sched_set_stop_task(int cpu, struct task_struct *stop)
++{
++	static struct lock_class_key stop_pi_lock;
++	struct sched_param stop_param = { .sched_priority = STOP_PRIO };
++	struct sched_param start_param = { .sched_priority = 0 };
++	struct task_struct *old_stop = cpu_rq(cpu)->stop;
++
++	if (stop) {
++		/*
++		 * Make it appear like a SCHED_FIFO task, its something
++		 * userspace knows about and won't get confused about.
++		 *
++		 * Also, it will make PI more or less work without too
++		 * much confusion -- but then, stop work should not
++		 * rely on PI working anyway.
++		 */
++		sched_setscheduler_nocheck(stop, SCHED_FIFO, &stop_param);
++
++		/*
++		 * The PI code calls rt_mutex_setprio() with ->pi_lock held to
++		 * adjust the effective priority of a task. As a result,
++		 * rt_mutex_setprio() can trigger (RT) balancing operations,
++		 * which can then trigger wakeups of the stop thread to push
++		 * around the current task.
++		 *
++		 * The stop task itself will never be part of the PI-chain, it
++		 * never blocks, therefore that ->pi_lock recursion is safe.
++		 * Tell lockdep about this by placing the stop->pi_lock in its
++		 * own class.
++		 */
++		lockdep_set_class(&stop->pi_lock, &stop_pi_lock);
++	}
++
++	cpu_rq(cpu)->stop = stop;
++
++	if (old_stop) {
++		/*
++		 * Reset it back to a normal scheduling policy so that
++		 * it can die in pieces.
++		 */
++		sched_setscheduler_nocheck(old_stop, SCHED_NORMAL, &start_param);
++	}
++}
++
++/*
++ * Change a given task's CPU affinity. Migrate the thread to a
++ * proper CPU and schedule it away if the CPU it's executing on
++ * is removed from the allowed bitmask.
++ *
++ * NOTE: the caller must have a valid reference to the task, the
++ * task must not exit() & deallocate itself prematurely. The
++ * call is not atomic; no spinlocks may be held.
++ */
++static int __set_cpus_allowed_ptr(struct task_struct *p,
++				  const struct cpumask *new_mask,
++				  u32 flags)
++{
++	const struct cpumask *cpu_valid_mask = cpu_active_mask;
++	int dest_cpu;
++	unsigned long irq_flags;
++	struct rq *rq;
++	raw_spinlock_t *lock;
++	int ret = 0;
++
++	raw_spin_lock_irqsave(&p->pi_lock, irq_flags);
++	rq = __task_access_lock(p, &lock);
++
++	if (p->flags & PF_KTHREAD || is_migration_disabled(p)) {
++		/*
++		 * Kernel threads are allowed on online && !active CPUs,
++		 * however, during cpu-hot-unplug, even these might get pushed
++		 * away if not KTHREAD_IS_PER_CPU.
++		 *
++		 * Specifically, migration_disabled() tasks must not fail the
++		 * cpumask_any_and_distribute() pick below, esp. so on
++		 * SCA_MIGRATE_ENABLE, otherwise we'll not call
++		 * set_cpus_allowed_common() and actually reset p->cpus_ptr.
++		 */
++		cpu_valid_mask = cpu_online_mask;
++	}
++
++	/*
++	 * Must re-check here, to close a race against __kthread_bind(),
++	 * sched_setaffinity() is not guaranteed to observe the flag.
++	 */
++	if ((flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
++		ret = -EINVAL;
++		goto out;
++	}
++
++	if (cpumask_equal(&p->cpus_mask, new_mask))
++		goto out;
++
++	dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
++	if (dest_cpu >= nr_cpu_ids) {
++		ret = -EINVAL;
++		goto out;
++	}
++
++	__do_set_cpus_allowed(p, new_mask, flags);
++
++	/* Can the task run on the task's current CPU? If so, we're done */
++	if (cpumask_test_cpu(task_cpu(p), new_mask))
++		goto out;
++
++	if (p->migration_disabled) {
++		if (p->cpus_ptr != &p->cpus_mask)
++			__do_set_cpus_allowed(p, &p->cpus_mask, SCA_MIGRATE_ENABLE);
++		p->migration_disabled = 0;
++		p->migration_flags |= MDF_FORCE_ENABLED;
++		/* When p is migrate_disabled, rq->lock should be held */
++		rq->nr_pinned--;
++	}
++
++	if (task_running(p) || p->state == TASK_WAKING) {
++		struct migration_arg arg = { p, dest_cpu };
++
++		/* Need help from migration thread: drop lock and wait. */
++		__task_access_unlock(p, lock);
++		raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
++		stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
++		return 0;
++	}
++	if (task_on_rq_queued(p)) {
++		/*
++		 * OK, since we're going to drop the lock immediately
++		 * afterwards anyway.
++		 */
++		update_rq_clock(rq);
++		rq = move_queued_task(rq, p, dest_cpu);
++		lock = &rq->lock;
++	}
++
++out:
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
++
++	return ret;
++}
++
++int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
++{
++	return __set_cpus_allowed_ptr(p, new_mask, 0);
++}
++EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
++
++#else /* CONFIG_SMP */
++
++static inline int select_task_rq(struct task_struct *p, struct rq *rq)
++{
++	return 0;
++}
++
++static inline int
++__set_cpus_allowed_ptr(struct task_struct *p,
++		       const struct cpumask *new_mask,
++		       u32 flags)
++{
++	return set_cpus_allowed_ptr(p, new_mask);
++}
++
++static inline bool rq_has_pinned_tasks(struct rq *rq)
++{
++	return false;
++}
++
++#endif /* !CONFIG_SMP */
++
++static void
++ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
++{
++	struct rq *rq;
++
++	if (!schedstat_enabled())
++		return;
++
++	rq = this_rq();
++
++#ifdef CONFIG_SMP
++	if (cpu == rq->cpu)
++		__schedstat_inc(rq->ttwu_local);
++	else {
++		/** Alt schedule FW ToDo:
++		 * How to do ttwu_wake_remote
++		 */
++	}
++#endif /* CONFIG_SMP */
++
++	__schedstat_inc(rq->ttwu_count);
++}
++
++/*
++ * Mark the task runnable and perform wakeup-preemption.
++ */
++static inline void
++ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
++{
++	check_preempt_curr(rq);
++	p->state = TASK_RUNNING;
++	trace_sched_wakeup(p);
++}
++
++static inline void
++ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
++{
++	if (p->sched_contributes_to_load)
++		rq->nr_uninterruptible--;
++
++	if (
++#ifdef CONFIG_SMP
++	    !(wake_flags & WF_MIGRATED) &&
++#endif
++	    p->in_iowait) {
++		delayacct_blkio_end(p);
++		atomic_dec(&task_rq(p)->nr_iowait);
++	}
++
++	activate_task(p, rq);
++	ttwu_do_wakeup(rq, p, 0);
++}
++
++/*
++ * Consider @p being inside a wait loop:
++ *
++ *   for (;;) {
++ *      set_current_state(TASK_UNINTERRUPTIBLE);
++ *
++ *      if (CONDITION)
++ *         break;
++ *
++ *      schedule();
++ *   }
++ *   __set_current_state(TASK_RUNNING);
++ *
++ * between set_current_state() and schedule(). In this case @p is still
++ * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
++ * an atomic manner.
++ *
++ * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
++ * then schedule() must still happen and p->state can be changed to
++ * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we
++ * need to do a full wakeup with enqueue.
++ *
++ * Returns: %true when the wakeup is done,
++ *          %false otherwise.
++ */
++static int ttwu_runnable(struct task_struct *p, int wake_flags)
++{
++	struct rq *rq;
++	raw_spinlock_t *lock;
++	int ret = 0;
++
++	rq = __task_access_lock(p, &lock);
++	if (task_on_rq_queued(p)) {
++		/* check_preempt_curr() may use rq clock */
++		update_rq_clock(rq);
++		ttwu_do_wakeup(rq, p, wake_flags);
++		ret = 1;
++	}
++	__task_access_unlock(p, lock);
++
++	return ret;
++}
++
++#ifdef CONFIG_SMP
++void sched_ttwu_pending(void *arg)
++{
++	struct llist_node *llist = arg;
++	struct rq *rq = this_rq();
++	struct task_struct *p, *t;
++	struct rq_flags rf;
++
++	if (!llist)
++		return;
++
++	/*
++	 * rq::ttwu_pending racy indication of out-standing wakeups.
++	 * Races such that false-negatives are possible, since they
++	 * are shorter lived that false-positives would be.
++	 */
++	WRITE_ONCE(rq->ttwu_pending, 0);
++
++	rq_lock_irqsave(rq, &rf);
++	update_rq_clock(rq);
++
++	llist_for_each_entry_safe(p, t, llist, wake_entry.llist) {
++		if (WARN_ON_ONCE(p->on_cpu))
++			smp_cond_load_acquire(&p->on_cpu, !VAL);
++
++		if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq)))
++			set_task_cpu(p, cpu_of(rq));
++
++		ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0);
++	}
++
++	rq_unlock_irqrestore(rq, &rf);
++}
++
++void send_call_function_single_ipi(int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	if (!set_nr_if_polling(rq->idle))
++		arch_send_call_function_single_ipi(cpu);
++	else
++		trace_sched_wake_idle_without_ipi(cpu);
++}
++
++/*
++ * Queue a task on the target CPUs wake_list and wake the CPU via IPI if
++ * necessary. The wakee CPU on receipt of the IPI will queue the task
++ * via sched_ttwu_wakeup() for activation so the wakee incurs the cost
++ * of the wakeup instead of the waker.
++ */
++static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
++
++	WRITE_ONCE(rq->ttwu_pending, 1);
++	__smp_call_single_queue(cpu, &p->wake_entry.llist);
++}
++
++static inline bool ttwu_queue_cond(int cpu, int wake_flags)
++{
++	/*
++	 * Do not complicate things with the async wake_list while the CPU is
++	 * in hotplug state.
++	 */
++	if (!cpu_active(cpu))
++		return false;
++
++	/*
++	 * If the CPU does not share cache, then queue the task on the
++	 * remote rqs wakelist to avoid accessing remote data.
++	 */
++	if (!cpus_share_cache(smp_processor_id(), cpu))
++		return true;
++
++	/*
++	 * If the task is descheduling and the only running task on the
++	 * CPU then use the wakelist to offload the task activation to
++	 * the soon-to-be-idle CPU as the current CPU is likely busy.
++	 * nr_running is checked to avoid unnecessary task stacking.
++	 */
++	if ((wake_flags & WF_ON_CPU) && cpu_rq(cpu)->nr_running <= 1)
++		return true;
++
++	return false;
++}
++
++static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
++{
++	if (__is_defined(ALT_SCHED_TTWU_QUEUE) && ttwu_queue_cond(cpu, wake_flags)) {
++		if (WARN_ON_ONCE(cpu == smp_processor_id()))
++			return false;
++
++		sched_clock_cpu(cpu); /* Sync clocks across CPUs */
++		__ttwu_queue_wakelist(p, cpu, wake_flags);
++		return true;
++	}
++
++	return false;
++}
++
++void wake_up_if_idle(int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++
++	rcu_read_lock();
++
++	if (!is_idle_task(rcu_dereference(rq->curr)))
++		goto out;
++
++	if (set_nr_if_polling(rq->idle)) {
++		trace_sched_wake_idle_without_ipi(cpu);
++	} else {
++		raw_spin_lock_irqsave(&rq->lock, flags);
++		if (is_idle_task(rq->curr))
++			smp_send_reschedule(cpu);
++		/* Else CPU is not idle, do nothing here */
++		raw_spin_unlock_irqrestore(&rq->lock, flags);
++	}
++
++out:
++	rcu_read_unlock();
++}
++
++bool cpus_share_cache(int this_cpu, int that_cpu)
++{
++	return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
++}
++#else /* !CONFIG_SMP */
++
++static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
++{
++	return false;
++}
++
++#endif /* CONFIG_SMP */
++
++static inline void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	if (ttwu_queue_wakelist(p, cpu, wake_flags))
++		return;
++
++	raw_spin_lock(&rq->lock);
++	update_rq_clock(rq);
++	ttwu_do_activate(rq, p, wake_flags);
++	raw_spin_unlock(&rq->lock);
++}
++
++/*
++ * Notes on Program-Order guarantees on SMP systems.
++ *
++ *  MIGRATION
++ *
++ * The basic program-order guarantee on SMP systems is that when a task [t]
++ * migrates, all its activity on its old CPU [c0] happens-before any subsequent
++ * execution on its new CPU [c1].
++ *
++ * For migration (of runnable tasks) this is provided by the following means:
++ *
++ *  A) UNLOCK of the rq(c0)->lock scheduling out task t
++ *  B) migration for t is required to synchronize *both* rq(c0)->lock and
++ *     rq(c1)->lock (if not at the same time, then in that order).
++ *  C) LOCK of the rq(c1)->lock scheduling in task
++ *
++ * Transitivity guarantees that B happens after A and C after B.
++ * Note: we only require RCpc transitivity.
++ * Note: the CPU doing B need not be c0 or c1
++ *
++ * Example:
++ *
++ *   CPU0            CPU1            CPU2
++ *
++ *   LOCK rq(0)->lock
++ *   sched-out X
++ *   sched-in Y
++ *   UNLOCK rq(0)->lock
++ *
++ *                                   LOCK rq(0)->lock // orders against CPU0
++ *                                   dequeue X
++ *                                   UNLOCK rq(0)->lock
++ *
++ *                                   LOCK rq(1)->lock
++ *                                   enqueue X
++ *                                   UNLOCK rq(1)->lock
++ *
++ *                   LOCK rq(1)->lock // orders against CPU2
++ *                   sched-out Z
++ *                   sched-in X
++ *                   UNLOCK rq(1)->lock
++ *
++ *
++ *  BLOCKING -- aka. SLEEP + WAKEUP
++ *
++ * For blocking we (obviously) need to provide the same guarantee as for
++ * migration. However the means are completely different as there is no lock
++ * chain to provide order. Instead we do:
++ *
++ *   1) smp_store_release(X->on_cpu, 0)   -- finish_task()
++ *   2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
++ *
++ * Example:
++ *
++ *   CPU0 (schedule)  CPU1 (try_to_wake_up) CPU2 (schedule)
++ *
++ *   LOCK rq(0)->lock LOCK X->pi_lock
++ *   dequeue X
++ *   sched-out X
++ *   smp_store_release(X->on_cpu, 0);
++ *
++ *                    smp_cond_load_acquire(&X->on_cpu, !VAL);
++ *                    X->state = WAKING
++ *                    set_task_cpu(X,2)
++ *
++ *                    LOCK rq(2)->lock
++ *                    enqueue X
++ *                    X->state = RUNNING
++ *                    UNLOCK rq(2)->lock
++ *
++ *                                          LOCK rq(2)->lock // orders against CPU1
++ *                                          sched-out Z
++ *                                          sched-in X
++ *                                          UNLOCK rq(2)->lock
++ *
++ *                    UNLOCK X->pi_lock
++ *   UNLOCK rq(0)->lock
++ *
++ *
++ * However; for wakeups there is a second guarantee we must provide, namely we
++ * must observe the state that lead to our wakeup. That is, not only must our
++ * task observe its own prior state, it must also observe the stores prior to
++ * its wakeup.
++ *
++ * This means that any means of doing remote wakeups must order the CPU doing
++ * the wakeup against the CPU the task is going to end up running on. This,
++ * however, is already required for the regular Program-Order guarantee above,
++ * since the waking CPU is the one issueing the ACQUIRE (smp_cond_load_acquire).
++ *
++ */
++
++/**
++ * try_to_wake_up - wake up a thread
++ * @p: the thread to be awakened
++ * @state: the mask of task states that can be woken
++ * @wake_flags: wake modifier flags (WF_*)
++ *
++ * Conceptually does:
++ *
++ *   If (@state & @p->state) @p->state = TASK_RUNNING.
++ *
++ * If the task was not queued/runnable, also place it back on a runqueue.
++ *
++ * This function is atomic against schedule() which would dequeue the task.
++ *
++ * It issues a full memory barrier before accessing @p->state, see the comment
++ * with set_current_state().
++ *
++ * Uses p->pi_lock to serialize against concurrent wake-ups.
++ *
++ * Relies on p->pi_lock stabilizing:
++ *  - p->sched_class
++ *  - p->cpus_ptr
++ *  - p->sched_task_group
++ * in order to do migration, see its use of select_task_rq()/set_task_cpu().
++ *
++ * Tries really hard to only take one task_rq(p)->lock for performance.
++ * Takes rq->lock in:
++ *  - ttwu_runnable()    -- old rq, unavoidable, see comment there;
++ *  - ttwu_queue()       -- new rq, for enqueue of the task;
++ *  - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
++ *
++ * As a consequence we race really badly with just about everything. See the
++ * many memory barriers and their comments for details.
++ *
++ * Return: %true if @p->state changes (an actual wakeup was done),
++ *	   %false otherwise.
++ */
++static int try_to_wake_up(struct task_struct *p, unsigned int state,
++			  int wake_flags)
++{
++	unsigned long flags;
++	int cpu, success = 0;
++
++	preempt_disable();
++	if (p == current) {
++		/*
++		 * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
++		 * == smp_processor_id()'. Together this means we can special
++		 * case the whole 'p->on_rq && ttwu_runnable()' case below
++		 * without taking any locks.
++		 *
++		 * In particular:
++		 *  - we rely on Program-Order guarantees for all the ordering,
++		 *  - we're serialized against set_special_state() by virtue of
++		 *    it disabling IRQs (this allows not taking ->pi_lock).
++		 */
++		if (!(p->state & state))
++			goto out;
++
++		success = 1;
++		trace_sched_waking(p);
++		p->state = TASK_RUNNING;
++		trace_sched_wakeup(p);
++		goto out;
++	}
++
++	/*
++	 * If we are going to wake up a thread waiting for CONDITION we
++	 * need to ensure that CONDITION=1 done by the caller can not be
++	 * reordered with p->state check below. This pairs with smp_store_mb()
++	 * in set_current_state() that the waiting thread does.
++	 */
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++	smp_mb__after_spinlock();
++	if (!(p->state & state))
++		goto unlock;
++
++	trace_sched_waking(p);
++
++	/* We're going to change ->state: */
++	success = 1;
++
++	/*
++	 * Ensure we load p->on_rq _after_ p->state, otherwise it would
++	 * be possible to, falsely, observe p->on_rq == 0 and get stuck
++	 * in smp_cond_load_acquire() below.
++	 *
++	 * sched_ttwu_pending()			try_to_wake_up()
++	 *   STORE p->on_rq = 1			  LOAD p->state
++	 *   UNLOCK rq->lock
++	 *
++	 * __schedule() (switch to task 'p')
++	 *   LOCK rq->lock			  smp_rmb();
++	 *   smp_mb__after_spinlock();
++	 *   UNLOCK rq->lock
++	 *
++	 * [task p]
++	 *   STORE p->state = UNINTERRUPTIBLE	  LOAD p->on_rq
++	 *
++	 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
++	 * __schedule().  See the comment for smp_mb__after_spinlock().
++	 *
++	 * A similar smb_rmb() lives in try_invoke_on_locked_down_task().
++	 */
++	smp_rmb();
++	if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
++		goto unlock;
++
++#ifdef CONFIG_SMP
++	/*
++	 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
++	 * possible to, falsely, observe p->on_cpu == 0.
++	 *
++	 * One must be running (->on_cpu == 1) in order to remove oneself
++	 * from the runqueue.
++	 *
++	 * __schedule() (switch to task 'p')	try_to_wake_up()
++	 *   STORE p->on_cpu = 1		  LOAD p->on_rq
++	 *   UNLOCK rq->lock
++	 *
++	 * __schedule() (put 'p' to sleep)
++	 *   LOCK rq->lock			  smp_rmb();
++	 *   smp_mb__after_spinlock();
++	 *   STORE p->on_rq = 0			  LOAD p->on_cpu
++	 *
++	 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
++	 * __schedule().  See the comment for smp_mb__after_spinlock().
++	 *
++	 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure
++	 * schedule()'s deactivate_task() has 'happened' and p will no longer
++	 * care about it's own p->state. See the comment in __schedule().
++	 */
++	smp_acquire__after_ctrl_dep();
++
++	/*
++	 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
++	 * == 0), which means we need to do an enqueue, change p->state to
++	 * TASK_WAKING such that we can unlock p->pi_lock before doing the
++	 * enqueue, such as ttwu_queue_wakelist().
++	 */
++	p->state = TASK_WAKING;
++
++	/*
++	 * If the owning (remote) CPU is still in the middle of schedule() with
++	 * this task as prev, considering queueing p on the remote CPUs wake_list
++	 * which potentially sends an IPI instead of spinning on p->on_cpu to
++	 * let the waker make forward progress. This is safe because IRQs are
++	 * disabled and the IPI will deliver after on_cpu is cleared.
++	 *
++	 * Ensure we load task_cpu(p) after p->on_cpu:
++	 *
++	 * set_task_cpu(p, cpu);
++	 *   STORE p->cpu = @cpu
++	 * __schedule() (switch to task 'p')
++	 *   LOCK rq->lock
++	 *   smp_mb__after_spin_lock()          smp_cond_load_acquire(&p->on_cpu)
++	 *   STORE p->on_cpu = 1                LOAD p->cpu
++	 *
++	 * to ensure we observe the correct CPU on which the task is currently
++	 * scheduling.
++	 */
++	if (smp_load_acquire(&p->on_cpu) &&
++	    ttwu_queue_wakelist(p, task_cpu(p), wake_flags | WF_ON_CPU))
++		goto unlock;
++
++	/*
++	 * If the owning (remote) CPU is still in the middle of schedule() with
++	 * this task as prev, wait until it's done referencing the task.
++	 *
++	 * Pairs with the smp_store_release() in finish_task().
++	 *
++	 * This ensures that tasks getting woken will be fully ordered against
++	 * their previous state and preserve Program Order.
++	 */
++	smp_cond_load_acquire(&p->on_cpu, !VAL);
++
++	sched_task_ttwu(p);
++
++	cpu = select_task_rq(p, this_rq());
++
++	if (cpu != task_cpu(p)) {
++		if (p->in_iowait) {
++			delayacct_blkio_end(p);
++			atomic_dec(&task_rq(p)->nr_iowait);
++		}
++
++		wake_flags |= WF_MIGRATED;
++		psi_ttwu_dequeue(p);
++		set_task_cpu(p, cpu);
++	}
++#else
++	cpu = task_cpu(p);
++#endif /* CONFIG_SMP */
++
++	ttwu_queue(p, cpu, wake_flags);
++unlock:
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++out:
++	if (success)
++		ttwu_stat(p, task_cpu(p), wake_flags);
++	preempt_enable();
++
++	return success;
++}
++
++/**
++ * try_invoke_on_locked_down_task - Invoke a function on task in fixed state
++ * @p: Process for which the function is to be invoked, can be @current.
++ * @func: Function to invoke.
++ * @arg: Argument to function.
++ *
++ * If the specified task can be quickly locked into a definite state
++ * (either sleeping or on a given runqueue), arrange to keep it in that
++ * state while invoking @func(@arg).  This function can use ->on_rq and
++ * task_curr() to work out what the state is, if required.  Given that
++ * @func can be invoked with a runqueue lock held, it had better be quite
++ * lightweight.
++ *
++ * Returns:
++ *	@false if the task slipped out from under the locks.
++ *	@true if the task was locked onto a runqueue or is sleeping.
++ *		However, @func can override this by returning @false.
++ */
++bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct task_struct *t, void *arg), void *arg)
++{
++	struct rq_flags rf;
++	bool ret = false;
++	struct rq *rq;
++
++	raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
++	if (p->on_rq) {
++		rq = __task_rq_lock(p, &rf);
++		if (task_rq(p) == rq)
++			ret = func(p, arg);
++		__task_rq_unlock(rq, &rf);
++	} else {
++		switch (p->state) {
++		case TASK_RUNNING:
++		case TASK_WAKING:
++			break;
++		default:
++			smp_rmb(); // See smp_rmb() comment in try_to_wake_up().
++			if (!p->on_rq)
++				ret = func(p, arg);
++		}
++	}
++	raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
++	return ret;
++}
++
++/**
++ * wake_up_process - Wake up a specific process
++ * @p: The process to be woken up.
++ *
++ * Attempt to wake up the nominated process and move it to the set of runnable
++ * processes.
++ *
++ * Return: 1 if the process was woken up, 0 if it was already running.
++ *
++ * This function executes a full memory barrier before accessing the task state.
++ */
++int wake_up_process(struct task_struct *p)
++{
++	return try_to_wake_up(p, TASK_NORMAL, 0);
++}
++EXPORT_SYMBOL(wake_up_process);
++
++int wake_up_state(struct task_struct *p, unsigned int state)
++{
++	return try_to_wake_up(p, state, 0);
++}
++
++/*
++ * Perform scheduler related setup for a newly forked process p.
++ * p is forked by current.
++ *
++ * __sched_fork() is basic setup used by init_idle() too:
++ */
++static inline void __sched_fork(unsigned long clone_flags, struct task_struct *p)
++{
++	p->on_rq			= 0;
++	p->on_cpu			= 0;
++	p->utime			= 0;
++	p->stime			= 0;
++	p->sched_time			= 0;
++
++#ifdef CONFIG_PREEMPT_NOTIFIERS
++	INIT_HLIST_HEAD(&p->preempt_notifiers);
++#endif
++
++#ifdef CONFIG_COMPACTION
++	p->capture_control = NULL;
++#endif
++#ifdef CONFIG_SMP
++	p->wake_entry.u_flags = CSD_TYPE_TTWU;
++#endif
++}
++
++/*
++ * fork()/clone()-time setup:
++ */
++int sched_fork(unsigned long clone_flags, struct task_struct *p)
++{
++	unsigned long flags;
++	struct rq *rq;
++
++	__sched_fork(clone_flags, p);
++	/*
++	 * We mark the process as NEW here. This guarantees that
++	 * nobody will actually run it, and a signal or other external
++	 * event cannot wake it up and insert it on the runqueue either.
++	 */
++	p->state = TASK_NEW;
++
++	/*
++	 * Make sure we do not leak PI boosting priority to the child.
++	 */
++	p->prio = current->normal_prio;
++
++	/*
++	 * Revert to default priority/policy on fork if requested.
++	 */
++	if (unlikely(p->sched_reset_on_fork)) {
++		if (task_has_rt_policy(p)) {
++			p->policy = SCHED_NORMAL;
++			p->static_prio = NICE_TO_PRIO(0);
++			p->rt_priority = 0;
++		} else if (PRIO_TO_NICE(p->static_prio) < 0)
++			p->static_prio = NICE_TO_PRIO(0);
++
++		p->prio = p->normal_prio = normal_prio(p);
++
++		/*
++		 * We don't need the reset flag anymore after the fork. It has
++		 * fulfilled its duty:
++		 */
++		p->sched_reset_on_fork = 0;
++	}
++
++	/*
++	 * The child is not yet in the pid-hash so no cgroup attach races,
++	 * and the cgroup is pinned to this child due to cgroup_fork()
++	 * is ran before sched_fork().
++	 *
++	 * Silence PROVE_RCU.
++	 */
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++	/*
++	 * Share the timeslice between parent and child, thus the
++	 * total amount of pending timeslices in the system doesn't change,
++	 * resulting in more scheduling fairness.
++	 */
++	rq = this_rq();
++	raw_spin_lock(&rq->lock);
++
++	rq->curr->time_slice /= 2;
++	p->time_slice = rq->curr->time_slice;
++#ifdef CONFIG_SCHED_HRTICK
++	hrtick_start(rq, rq->curr->time_slice);
++#endif
++
++	if (p->time_slice < RESCHED_NS) {
++		p->time_slice = sched_timeslice_ns;
++		resched_curr(rq);
++	}
++	sched_task_fork(p, rq);
++	raw_spin_unlock(&rq->lock);
++
++	rseq_migrate(p);
++	/*
++	 * We're setting the CPU for the first time, we don't migrate,
++	 * so use __set_task_cpu().
++	 */
++	__set_task_cpu(p, cpu_of(rq));
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++
++#ifdef CONFIG_SCHED_INFO
++	if (unlikely(sched_info_on()))
++		memset(&p->sched_info, 0, sizeof(p->sched_info));
++#endif
++	init_task_preempt_count(p);
++
++	return 0;
++}
++
++void sched_post_fork(struct task_struct *p) {}
++
++#ifdef CONFIG_SCHEDSTATS
++
++DEFINE_STATIC_KEY_FALSE(sched_schedstats);
++static bool __initdata __sched_schedstats = false;
++
++static void set_schedstats(bool enabled)
++{
++	if (enabled)
++		static_branch_enable(&sched_schedstats);
++	else
++		static_branch_disable(&sched_schedstats);
++}
++
++void force_schedstat_enabled(void)
++{
++	if (!schedstat_enabled()) {
++		pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
++		static_branch_enable(&sched_schedstats);
++	}
++}
++
++static int __init setup_schedstats(char *str)
++{
++	int ret = 0;
++	if (!str)
++		goto out;
++
++	/*
++	 * This code is called before jump labels have been set up, so we can't
++	 * change the static branch directly just yet.  Instead set a temporary
++	 * variable so init_schedstats() can do it later.
++	 */
++	if (!strcmp(str, "enable")) {
++		__sched_schedstats = true;
++		ret = 1;
++	} else if (!strcmp(str, "disable")) {
++		__sched_schedstats = false;
++		ret = 1;
++	}
++out:
++	if (!ret)
++		pr_warn("Unable to parse schedstats=\n");
++
++	return ret;
++}
++__setup("schedstats=", setup_schedstats);
++
++static void __init init_schedstats(void)
++{
++	set_schedstats(__sched_schedstats);
++}
++
++#ifdef CONFIG_PROC_SYSCTL
++int sysctl_schedstats(struct ctl_table *table, int write,
++			 void __user *buffer, size_t *lenp, loff_t *ppos)
++{
++	struct ctl_table t;
++	int err;
++	int state = static_branch_likely(&sched_schedstats);
++
++	if (write && !capable(CAP_SYS_ADMIN))
++		return -EPERM;
++
++	t = *table;
++	t.data = &state;
++	err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
++	if (err < 0)
++		return err;
++	if (write)
++		set_schedstats(state);
++	return err;
++}
++#endif /* CONFIG_PROC_SYSCTL */
++#else  /* !CONFIG_SCHEDSTATS */
++static inline void init_schedstats(void) {}
++#endif /* CONFIG_SCHEDSTATS */
++
++/*
++ * wake_up_new_task - wake up a newly created task for the first time.
++ *
++ * This function will do some initial scheduler statistics housekeeping
++ * that must be done for every newly created context, then puts the task
++ * on the runqueue and wakes it.
++ */
++void wake_up_new_task(struct task_struct *p)
++{
++	unsigned long flags;
++	struct rq *rq;
++
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++
++	p->state = TASK_RUNNING;
++
++	rq = cpu_rq(select_task_rq(p, this_rq()));
++#ifdef CONFIG_SMP
++	rseq_migrate(p);
++	/*
++	 * Fork balancing, do it here and not earlier because:
++	 * - cpus_ptr can change in the fork path
++	 * - any previously selected CPU might disappear through hotplug
++	 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
++	 * as we're not fully set-up yet.
++	 */
++	__set_task_cpu(p, cpu_of(rq));
++#endif
++
++	raw_spin_lock(&rq->lock);
++
++	update_rq_clock(rq);
++	activate_task(p, rq);
++	trace_sched_wakeup_new(p);
++	check_preempt_curr(rq);
++
++	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++}
++
++#ifdef CONFIG_PREEMPT_NOTIFIERS
++
++static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key);
++
++void preempt_notifier_inc(void)
++{
++	static_branch_inc(&preempt_notifier_key);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_inc);
++
++void preempt_notifier_dec(void)
++{
++	static_branch_dec(&preempt_notifier_key);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_dec);
++
++/**
++ * preempt_notifier_register - tell me when current is being preempted & rescheduled
++ * @notifier: notifier struct to register
++ */
++void preempt_notifier_register(struct preempt_notifier *notifier)
++{
++	if (!static_branch_unlikely(&preempt_notifier_key))
++		WARN(1, "registering preempt_notifier while notifiers disabled\n");
++
++	hlist_add_head(&notifier->link, &current->preempt_notifiers);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_register);
++
++/**
++ * preempt_notifier_unregister - no longer interested in preemption notifications
++ * @notifier: notifier struct to unregister
++ *
++ * This is *not* safe to call from within a preemption notifier.
++ */
++void preempt_notifier_unregister(struct preempt_notifier *notifier)
++{
++	hlist_del(&notifier->link);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
++
++static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
++{
++	struct preempt_notifier *notifier;
++
++	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
++		notifier->ops->sched_in(notifier, raw_smp_processor_id());
++}
++
++static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
++{
++	if (static_branch_unlikely(&preempt_notifier_key))
++		__fire_sched_in_preempt_notifiers(curr);
++}
++
++static void
++__fire_sched_out_preempt_notifiers(struct task_struct *curr,
++				   struct task_struct *next)
++{
++	struct preempt_notifier *notifier;
++
++	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
++		notifier->ops->sched_out(notifier, next);
++}
++
++static __always_inline void
++fire_sched_out_preempt_notifiers(struct task_struct *curr,
++				 struct task_struct *next)
++{
++	if (static_branch_unlikely(&preempt_notifier_key))
++		__fire_sched_out_preempt_notifiers(curr, next);
++}
++
++#else /* !CONFIG_PREEMPT_NOTIFIERS */
++
++static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
++{
++}
++
++static inline void
++fire_sched_out_preempt_notifiers(struct task_struct *curr,
++				 struct task_struct *next)
++{
++}
++
++#endif /* CONFIG_PREEMPT_NOTIFIERS */
++
++static inline void prepare_task(struct task_struct *next)
++{
++	/*
++	 * Claim the task as running, we do this before switching to it
++	 * such that any running task will have this set.
++	 *
++	 * See the ttwu() WF_ON_CPU case and its ordering comment.
++	 */
++	WRITE_ONCE(next->on_cpu, 1);
++}
++
++static inline void finish_task(struct task_struct *prev)
++{
++#ifdef CONFIG_SMP
++	/*
++	 * This must be the very last reference to @prev from this CPU. After
++	 * p->on_cpu is cleared, the task can be moved to a different CPU. We
++	 * must ensure this doesn't happen until the switch is completely
++	 * finished.
++	 *
++	 * In particular, the load of prev->state in finish_task_switch() must
++	 * happen before this.
++	 *
++	 * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
++	 */
++	smp_store_release(&prev->on_cpu, 0);
++#else
++	prev->on_cpu = 0;
++#endif
++}
++
++#ifdef CONFIG_SMP
++
++static void do_balance_callbacks(struct rq *rq, struct callback_head *head)
++{
++	void (*func)(struct rq *rq);
++	struct callback_head *next;
++
++	lockdep_assert_held(&rq->lock);
++
++	while (head) {
++		func = (void (*)(struct rq *))head->func;
++		next = head->next;
++		head->next = NULL;
++		head = next;
++
++		func(rq);
++	}
++}
++
++static void balance_push(struct rq *rq);
++
++struct callback_head balance_push_callback = {
++	.next = NULL,
++	.func = (void (*)(struct callback_head *))balance_push,
++};
++
++static inline struct callback_head *splice_balance_callbacks(struct rq *rq)
++{
++	struct callback_head *head = rq->balance_callback;
++
++	if (head) {
++		lockdep_assert_held(&rq->lock);
++		rq->balance_callback = NULL;
++	}
++
++	return head;
++}
++
++static void __balance_callbacks(struct rq *rq)
++{
++	do_balance_callbacks(rq, splice_balance_callbacks(rq));
++}
++
++static inline void balance_callbacks(struct rq *rq, struct callback_head *head)
++{
++	unsigned long flags;
++
++	if (unlikely(head)) {
++		raw_spin_lock_irqsave(&rq->lock, flags);
++		do_balance_callbacks(rq, head);
++		raw_spin_unlock_irqrestore(&rq->lock, flags);
++	}
++}
++
++#else
++
++static inline void __balance_callbacks(struct rq *rq)
++{
++}
++
++static inline struct callback_head *splice_balance_callbacks(struct rq *rq)
++{
++	return NULL;
++}
++
++static inline void balance_callbacks(struct rq *rq, struct callback_head *head)
++{
++}
++
++#endif
++
++static inline void
++prepare_lock_switch(struct rq *rq, struct task_struct *next)
++{
++	/*
++	 * Since the runqueue lock will be released by the next
++	 * task (which is an invalid locking op but in the case
++	 * of the scheduler it's an obvious special-case), so we
++	 * do an early lockdep release here:
++	 */
++	spin_release(&rq->lock.dep_map, _THIS_IP_);
++#ifdef CONFIG_DEBUG_SPINLOCK
++	/* this is a valid case when another task releases the spinlock */
++	rq->lock.owner = next;
++#endif
++}
++
++static inline void finish_lock_switch(struct rq *rq)
++{
++	/*
++	 * If we are tracking spinlock dependencies then we have to
++	 * fix up the runqueue lock - which gets 'carried over' from
++	 * prev into current:
++	 */
++	spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
++	__balance_callbacks(rq);
++	raw_spin_unlock_irq(&rq->lock);
++}
++
++/*
++ * NOP if the arch has not defined these:
++ */
++
++#ifndef prepare_arch_switch
++# define prepare_arch_switch(next)	do { } while (0)
++#endif
++
++#ifndef finish_arch_post_lock_switch
++# define finish_arch_post_lock_switch()	do { } while (0)
++#endif
++
++static inline void kmap_local_sched_out(void)
++{
++#ifdef CONFIG_KMAP_LOCAL
++	if (unlikely(current->kmap_ctrl.idx))
++		__kmap_local_sched_out();
++#endif
++}
++
++static inline void kmap_local_sched_in(void)
++{
++#ifdef CONFIG_KMAP_LOCAL
++	if (unlikely(current->kmap_ctrl.idx))
++		__kmap_local_sched_in();
++#endif
++}
++
++/**
++ * prepare_task_switch - prepare to switch tasks
++ * @rq: the runqueue preparing to switch
++ * @next: the task we are going to switch to.
++ *
++ * This is called with the rq lock held and interrupts off. It must
++ * be paired with a subsequent finish_task_switch after the context
++ * switch.
++ *
++ * prepare_task_switch sets up locking and calls architecture specific
++ * hooks.
++ */
++static inline void
++prepare_task_switch(struct rq *rq, struct task_struct *prev,
++		    struct task_struct *next)
++{
++	kcov_prepare_switch(prev);
++	sched_info_switch(rq, prev, next);
++	perf_event_task_sched_out(prev, next);
++	rseq_preempt(prev);
++	fire_sched_out_preempt_notifiers(prev, next);
++	kmap_local_sched_out();
++	prepare_task(next);
++	prepare_arch_switch(next);
++}
++
++/**
++ * finish_task_switch - clean up after a task-switch
++ * @rq: runqueue associated with task-switch
++ * @prev: the thread we just switched away from.
++ *
++ * finish_task_switch must be called after the context switch, paired
++ * with a prepare_task_switch call before the context switch.
++ * finish_task_switch will reconcile locking set up by prepare_task_switch,
++ * and do any other architecture-specific cleanup actions.
++ *
++ * Note that we may have delayed dropping an mm in context_switch(). If
++ * so, we finish that here outside of the runqueue lock.  (Doing it
++ * with the lock held can cause deadlocks; see schedule() for
++ * details.)
++ *
++ * The context switch have flipped the stack from under us and restored the
++ * local variables which were saved when this task called schedule() in the
++ * past. prev == current is still correct but we need to recalculate this_rq
++ * because prev may have moved to another CPU.
++ */
++static struct rq *finish_task_switch(struct task_struct *prev)
++	__releases(rq->lock)
++{
++	struct rq *rq = this_rq();
++	struct mm_struct *mm = rq->prev_mm;
++	long prev_state;
++
++	/*
++	 * The previous task will have left us with a preempt_count of 2
++	 * because it left us after:
++	 *
++	 *	schedule()
++	 *	  preempt_disable();			// 1
++	 *	  __schedule()
++	 *	    raw_spin_lock_irq(&rq->lock)	// 2
++	 *
++	 * Also, see FORK_PREEMPT_COUNT.
++	 */
++	if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
++		      "corrupted preempt_count: %s/%d/0x%x\n",
++		      current->comm, current->pid, preempt_count()))
++		preempt_count_set(FORK_PREEMPT_COUNT);
++
++	rq->prev_mm = NULL;
++
++	/*
++	 * A task struct has one reference for the use as "current".
++	 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
++	 * schedule one last time. The schedule call will never return, and
++	 * the scheduled task must drop that reference.
++	 *
++	 * We must observe prev->state before clearing prev->on_cpu (in
++	 * finish_task), otherwise a concurrent wakeup can get prev
++	 * running on another CPU and we could rave with its RUNNING -> DEAD
++	 * transition, resulting in a double drop.
++	 */
++	prev_state = prev->state;
++	vtime_task_switch(prev);
++	perf_event_task_sched_in(prev, current);
++	finish_task(prev);
++	finish_lock_switch(rq);
++	finish_arch_post_lock_switch();
++	kcov_finish_switch(current);
++	/*
++	 * kmap_local_sched_out() is invoked with rq::lock held and
++	 * interrupts disabled. There is no requirement for that, but the
++	 * sched out code does not have an interrupt enabled section.
++	 * Restoring the maps on sched in does not require interrupts being
++	 * disabled either.
++	 */
++	kmap_local_sched_in();
++
++	fire_sched_in_preempt_notifiers(current);
++	/*
++	 * When switching through a kernel thread, the loop in
++	 * membarrier_{private,global}_expedited() may have observed that
++	 * kernel thread and not issued an IPI. It is therefore possible to
++	 * schedule between user->kernel->user threads without passing though
++	 * switch_mm(). Membarrier requires a barrier after storing to
++	 * rq->curr, before returning to userspace, so provide them here:
++	 *
++	 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
++	 *   provided by mmdrop(),
++	 * - a sync_core for SYNC_CORE.
++	 */
++	if (mm) {
++		membarrier_mm_sync_core_before_usermode(mm);
++		mmdrop(mm);
++	}
++	if (unlikely(prev_state == TASK_DEAD)) {
++		/*
++		 * Remove function-return probe instances associated with this
++		 * task and put them back on the free list.
++		 */
++		kprobe_flush_task(prev);
++
++		/* Task is done with its stack. */
++		put_task_stack(prev);
++
++		put_task_struct_rcu_user(prev);
++	}
++
++	tick_nohz_task_switch();
++	return rq;
++}
++
++/**
++ * schedule_tail - first thing a freshly forked thread must call.
++ * @prev: the thread we just switched away from.
++ */
++asmlinkage __visible void schedule_tail(struct task_struct *prev)
++	__releases(rq->lock)
++{
++	struct rq *rq;
++
++	/*
++	 * New tasks start with FORK_PREEMPT_COUNT, see there and
++	 * finish_task_switch() for details.
++	 *
++	 * finish_task_switch() will drop rq->lock() and lower preempt_count
++	 * and the preempt_enable() will end up enabling preemption (on
++	 * PREEMPT_COUNT kernels).
++	 */
++
++	rq = finish_task_switch(prev);
++	preempt_enable();
++
++	if (current->set_child_tid)
++		put_user(task_pid_vnr(current), current->set_child_tid);
++
++	calculate_sigpending();
++}
++
++/*
++ * context_switch - switch to the new MM and the new thread's register state.
++ */
++static __always_inline struct rq *
++context_switch(struct rq *rq, struct task_struct *prev,
++	       struct task_struct *next)
++{
++	prepare_task_switch(rq, prev, next);
++
++	/*
++	 * For paravirt, this is coupled with an exit in switch_to to
++	 * combine the page table reload and the switch backend into
++	 * one hypercall.
++	 */
++	arch_start_context_switch(prev);
++
++	/*
++	 * kernel -> kernel   lazy + transfer active
++	 *   user -> kernel   lazy + mmgrab() active
++	 *
++	 * kernel ->   user   switch + mmdrop() active
++	 *   user ->   user   switch
++	 */
++	if (!next->mm) {                                // to kernel
++		enter_lazy_tlb(prev->active_mm, next);
++
++		next->active_mm = prev->active_mm;
++		if (prev->mm)                           // from user
++			mmgrab(prev->active_mm);
++		else
++			prev->active_mm = NULL;
++	} else {                                        // to user
++		membarrier_switch_mm(rq, prev->active_mm, next->mm);
++		/*
++		 * sys_membarrier() requires an smp_mb() between setting
++		 * rq->curr / membarrier_switch_mm() and returning to userspace.
++		 *
++		 * The below provides this either through switch_mm(), or in
++		 * case 'prev->active_mm == next->mm' through
++		 * finish_task_switch()'s mmdrop().
++		 */
++		switch_mm_irqs_off(prev->active_mm, next->mm, next);
++
++		if (!prev->mm) {                        // from kernel
++			/* will mmdrop() in finish_task_switch(). */
++			rq->prev_mm = prev->active_mm;
++			prev->active_mm = NULL;
++		}
++	}
++
++	prepare_lock_switch(rq, next);
++
++	/* Here we just switch the register state and the stack. */
++	switch_to(prev, next, prev);
++	barrier();
++
++	return finish_task_switch(prev);
++}
++
++/*
++ * nr_running, nr_uninterruptible and nr_context_switches:
++ *
++ * externally visible scheduler statistics: current number of runnable
++ * threads, total number of context switches performed since bootup.
++ */
++unsigned long nr_running(void)
++{
++	unsigned long i, sum = 0;
++
++	for_each_online_cpu(i)
++		sum += cpu_rq(i)->nr_running;
++
++	return sum;
++}
++
++/*
++ * Check if only the current task is running on the CPU.
++ *
++ * Caution: this function does not check that the caller has disabled
++ * preemption, thus the result might have a time-of-check-to-time-of-use
++ * race.  The caller is responsible to use it correctly, for example:
++ *
++ * - from a non-preemptible section (of course)
++ *
++ * - from a thread that is bound to a single CPU
++ *
++ * - in a loop with very short iterations (e.g. a polling loop)
++ */
++bool single_task_running(void)
++{
++	return raw_rq()->nr_running == 1;
++}
++EXPORT_SYMBOL(single_task_running);
++
++unsigned long long nr_context_switches(void)
++{
++	int i;
++	unsigned long long sum = 0;
++
++	for_each_possible_cpu(i)
++		sum += cpu_rq(i)->nr_switches;
++
++	return sum;
++}
++
++/*
++ * Consumers of these two interfaces, like for example the cpuidle menu
++ * governor, are using nonsensical data. Preferring shallow idle state selection
++ * for a CPU that has IO-wait which might not even end up running the task when
++ * it does become runnable.
++ */
++
++unsigned long nr_iowait_cpu(int cpu)
++{
++	return atomic_read(&cpu_rq(cpu)->nr_iowait);
++}
++
++/*
++ * IO-wait accounting, and how it's mostly bollocks (on SMP).
++ *
++ * The idea behind IO-wait account is to account the idle time that we could
++ * have spend running if it were not for IO. That is, if we were to improve the
++ * storage performance, we'd have a proportional reduction in IO-wait time.
++ *
++ * This all works nicely on UP, where, when a task blocks on IO, we account
++ * idle time as IO-wait, because if the storage were faster, it could've been
++ * running and we'd not be idle.
++ *
++ * This has been extended to SMP, by doing the same for each CPU. This however
++ * is broken.
++ *
++ * Imagine for instance the case where two tasks block on one CPU, only the one
++ * CPU will have IO-wait accounted, while the other has regular idle. Even
++ * though, if the storage were faster, both could've ran at the same time,
++ * utilising both CPUs.
++ *
++ * This means, that when looking globally, the current IO-wait accounting on
++ * SMP is a lower bound, by reason of under accounting.
++ *
++ * Worse, since the numbers are provided per CPU, they are sometimes
++ * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
++ * associated with any one particular CPU, it can wake to another CPU than it
++ * blocked on. This means the per CPU IO-wait number is meaningless.
++ *
++ * Task CPU affinities can make all that even more 'interesting'.
++ */
++
++unsigned long nr_iowait(void)
++{
++	unsigned long i, sum = 0;
++
++	for_each_possible_cpu(i)
++		sum += nr_iowait_cpu(i);
++
++	return sum;
++}
++
++#ifdef CONFIG_SMP
++
++/*
++ * sched_exec - execve() is a valuable balancing opportunity, because at
++ * this point the task has the smallest effective memory and cache
++ * footprint.
++ */
++void sched_exec(void)
++{
++	struct task_struct *p = current;
++	unsigned long flags;
++	int dest_cpu;
++	struct rq *rq;
++
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++	rq = this_rq();
++
++	if (rq != task_rq(p) || rq->nr_running < 2)
++		goto unlock;
++
++	dest_cpu = select_task_rq(p, task_rq(p));
++	if (dest_cpu == smp_processor_id())
++		goto unlock;
++
++	if (likely(cpu_active(dest_cpu))) {
++		struct migration_arg arg = { p, dest_cpu };
++
++		raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++		stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
++		return;
++	}
++unlock:
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++}
++
++#endif
++
++DEFINE_PER_CPU(struct kernel_stat, kstat);
++DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
++
++EXPORT_PER_CPU_SYMBOL(kstat);
++EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
++
++static inline void update_curr(struct rq *rq, struct task_struct *p)
++{
++	s64 ns = rq->clock_task - p->last_ran;
++
++	p->sched_time += ns;
++	account_group_exec_runtime(p, ns);
++
++	p->time_slice -= ns;
++	p->last_ran = rq->clock_task;
++}
++
++/*
++ * Return accounted runtime for the task.
++ * Return separately the current's pending runtime that have not been
++ * accounted yet.
++ */
++unsigned long long task_sched_runtime(struct task_struct *p)
++{
++	unsigned long flags;
++	struct rq *rq;
++	raw_spinlock_t *lock;
++	u64 ns;
++
++#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
++	/*
++	 * 64-bit doesn't need locks to atomically read a 64-bit value.
++	 * So we have a optimization chance when the task's delta_exec is 0.
++	 * Reading ->on_cpu is racy, but this is ok.
++	 *
++	 * If we race with it leaving CPU, we'll take a lock. So we're correct.
++	 * If we race with it entering CPU, unaccounted time is 0. This is
++	 * indistinguishable from the read occurring a few cycles earlier.
++	 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
++	 * been accounted, so we're correct here as well.
++	 */
++	if (!p->on_cpu || !task_on_rq_queued(p))
++		return tsk_seruntime(p);
++#endif
++
++	rq = task_access_lock_irqsave(p, &lock, &flags);
++	/*
++	 * Must be ->curr _and_ ->on_rq.  If dequeued, we would
++	 * project cycles that may never be accounted to this
++	 * thread, breaking clock_gettime().
++	 */
++	if (p == rq->curr && task_on_rq_queued(p)) {
++		update_rq_clock(rq);
++		update_curr(rq, p);
++	}
++	ns = tsk_seruntime(p);
++	task_access_unlock_irqrestore(p, lock, &flags);
++
++	return ns;
++}
++
++/* This manages tasks that have run out of timeslice during a scheduler_tick */
++static inline void scheduler_task_tick(struct rq *rq)
++{
++	struct task_struct *p = rq->curr;
++
++	if (is_idle_task(p))
++		return;
++
++	update_curr(rq, p);
++	cpufreq_update_util(rq, 0);
++
++	/*
++	 * Tasks have less than RESCHED_NS of time slice left they will be
++	 * rescheduled.
++	 */
++	if (p->time_slice >= RESCHED_NS)
++		return;
++	set_tsk_need_resched(p);
++	set_preempt_need_resched();
++}
++
++/*
++ * This function gets called by the timer code, with HZ frequency.
++ * We call it with interrupts disabled.
++ */
++void scheduler_tick(void)
++{
++	int cpu __maybe_unused = smp_processor_id();
++	struct rq *rq = cpu_rq(cpu);
++
++	arch_scale_freq_tick();
++	sched_clock_tick();
++
++	raw_spin_lock(&rq->lock);
++	update_rq_clock(rq);
++
++	scheduler_task_tick(rq);
++	calc_global_load_tick(rq);
++	psi_task_tick(rq);
++
++	rq->last_tick = rq->clock;
++	raw_spin_unlock(&rq->lock);
++
++	perf_event_task_tick();
++}
++
++#ifdef CONFIG_SCHED_SMT
++static inline int active_load_balance_cpu_stop(void *data)
++{
++	struct rq *rq = this_rq();
++	struct task_struct *p = data;
++	cpumask_t tmp;
++	unsigned long flags;
++
++	local_irq_save(flags);
++
++	raw_spin_lock(&p->pi_lock);
++	raw_spin_lock(&rq->lock);
++
++	rq->active_balance = 0;
++	/* _something_ may have changed the task, double check again */
++	if (task_on_rq_queued(p) && task_rq(p) == rq &&
++	    cpumask_and(&tmp, p->cpus_ptr, &sched_sg_idle_mask) &&
++	    !is_migration_disabled(p)) {
++		int cpu = cpu_of(rq);
++		int dcpu = __best_mask_cpu(cpu, &tmp,
++					   per_cpu(sched_cpu_llc_mask, cpu));
++		rq = move_queued_task(rq, p, dcpu);
++	}
++
++	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock(&p->pi_lock);
++
++	local_irq_restore(flags);
++
++	return 0;
++}
++
++/* sg_balance_trigger - trigger slibing group balance for @cpu */
++static inline int sg_balance_trigger(const int cpu)
++{
++	struct rq *rq= cpu_rq(cpu);
++	unsigned long flags;
++	struct task_struct *curr;
++	int res;
++
++	if (!raw_spin_trylock_irqsave(&rq->lock, flags))
++		return 0;
++	curr = rq->curr;
++	res = (!is_idle_task(curr)) && (1 == rq->nr_running) &&\
++	      cpumask_intersects(curr->cpus_ptr, &sched_sg_idle_mask) &&\
++	      !is_migration_disabled(curr) && (!rq->active_balance);
++
++	if (res)
++		rq->active_balance = 1;
++
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++	if (res)
++		stop_one_cpu_nowait(cpu, active_load_balance_cpu_stop,
++				    curr, &rq->active_balance_work);
++	return res;
++}
++
++/*
++ * sg_balance_check - slibing group balance check for run queue @rq
++ */
++static inline void sg_balance_check(struct rq *rq)
++{
++	cpumask_t chk;
++	int cpu;
++
++	/* exit when no sg in idle */
++	if (cpumask_empty(&sched_sg_idle_mask))
++		return;
++
++	cpu = cpu_of(rq);
++	/*
++	 * Only cpu in slibing idle group will do the checking and then
++	 * find potential cpus which can migrate the current running task
++	 */
++	if (cpumask_test_cpu(cpu, &sched_sg_idle_mask) &&
++	    cpumask_andnot(&chk, cpu_online_mask, &sched_rq_pending_mask) &&
++	    cpumask_andnot(&chk, &chk, &sched_rq_watermark[IDLE_WM])) {
++		int i, tried = 0;
++
++		for_each_cpu_wrap(i, &chk, cpu) {
++			if (cpumask_subset(cpu_smt_mask(i), &chk)) {
++				if (sg_balance_trigger(i))
++					return;
++				if (tried)
++					return;
++				tried++;
++			}
++		}
++	}
++}
++#endif /* CONFIG_SCHED_SMT */
++
++#ifdef CONFIG_NO_HZ_FULL
++
++struct tick_work {
++	int			cpu;
++	atomic_t		state;
++	struct delayed_work	work;
++};
++/* Values for ->state, see diagram below. */
++#define TICK_SCHED_REMOTE_OFFLINE	0
++#define TICK_SCHED_REMOTE_OFFLINING	1
++#define TICK_SCHED_REMOTE_RUNNING	2
++
++/*
++ * State diagram for ->state:
++ *
++ *
++ *          TICK_SCHED_REMOTE_OFFLINE
++ *                    |   ^
++ *                    |   |
++ *                    |   | sched_tick_remote()
++ *                    |   |
++ *                    |   |
++ *                    +--TICK_SCHED_REMOTE_OFFLINING
++ *                    |   ^
++ *                    |   |
++ * sched_tick_start() |   | sched_tick_stop()
++ *                    |   |
++ *                    V   |
++ *          TICK_SCHED_REMOTE_RUNNING
++ *
++ *
++ * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
++ * and sched_tick_start() are happy to leave the state in RUNNING.
++ */
++
++static struct tick_work __percpu *tick_work_cpu;
++
++static void sched_tick_remote(struct work_struct *work)
++{
++	struct delayed_work *dwork = to_delayed_work(work);
++	struct tick_work *twork = container_of(dwork, struct tick_work, work);
++	int cpu = twork->cpu;
++	struct rq *rq = cpu_rq(cpu);
++	struct task_struct *curr;
++	unsigned long flags;
++	u64 delta;
++	int os;
++
++	/*
++	 * Handle the tick only if it appears the remote CPU is running in full
++	 * dynticks mode. The check is racy by nature, but missing a tick or
++	 * having one too much is no big deal because the scheduler tick updates
++	 * statistics and checks timeslices in a time-independent way, regardless
++	 * of when exactly it is running.
++	 */
++	if (!tick_nohz_tick_stopped_cpu(cpu))
++		goto out_requeue;
++
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	curr = rq->curr;
++	if (cpu_is_offline(cpu))
++		goto out_unlock;
++
++	update_rq_clock(rq);
++	if (!is_idle_task(curr)) {
++		/*
++		 * Make sure the next tick runs within a reasonable
++		 * amount of time.
++		 */
++		delta = rq_clock_task(rq) - curr->last_ran;
++		WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
++	}
++	scheduler_task_tick(rq);
++
++	calc_load_nohz_remote(rq);
++out_unlock:
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++out_requeue:
++	/*
++	 * Run the remote tick once per second (1Hz). This arbitrary
++	 * frequency is large enough to avoid overload but short enough
++	 * to keep scheduler internal stats reasonably up to date.  But
++	 * first update state to reflect hotplug activity if required.
++	 */
++	os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
++	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
++	if (os == TICK_SCHED_REMOTE_RUNNING)
++		queue_delayed_work(system_unbound_wq, dwork, HZ);
++}
++
++static void sched_tick_start(int cpu)
++{
++	int os;
++	struct tick_work *twork;
++
++	if (housekeeping_cpu(cpu, HK_FLAG_TICK))
++		return;
++
++	WARN_ON_ONCE(!tick_work_cpu);
++
++	twork = per_cpu_ptr(tick_work_cpu, cpu);
++	os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
++	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
++	if (os == TICK_SCHED_REMOTE_OFFLINE) {
++		twork->cpu = cpu;
++		INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
++		queue_delayed_work(system_unbound_wq, &twork->work, HZ);
++	}
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++static void sched_tick_stop(int cpu)
++{
++	struct tick_work *twork;
++
++	if (housekeeping_cpu(cpu, HK_FLAG_TICK))
++		return;
++
++	WARN_ON_ONCE(!tick_work_cpu);
++
++	twork = per_cpu_ptr(tick_work_cpu, cpu);
++	cancel_delayed_work_sync(&twork->work);
++}
++#endif /* CONFIG_HOTPLUG_CPU */
++
++int __init sched_tick_offload_init(void)
++{
++	tick_work_cpu = alloc_percpu(struct tick_work);
++	BUG_ON(!tick_work_cpu);
++	return 0;
++}
++
++#else /* !CONFIG_NO_HZ_FULL */
++static inline void sched_tick_start(int cpu) { }
++static inline void sched_tick_stop(int cpu) { }
++#endif
++
++#if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
++				defined(CONFIG_PREEMPT_TRACER))
++/*
++ * If the value passed in is equal to the current preempt count
++ * then we just disabled preemption. Start timing the latency.
++ */
++static inline void preempt_latency_start(int val)
++{
++	if (preempt_count() == val) {
++		unsigned long ip = get_lock_parent_ip();
++#ifdef CONFIG_DEBUG_PREEMPT
++		current->preempt_disable_ip = ip;
++#endif
++		trace_preempt_off(CALLER_ADDR0, ip);
++	}
++}
++
++void preempt_count_add(int val)
++{
++#ifdef CONFIG_DEBUG_PREEMPT
++	/*
++	 * Underflow?
++	 */
++	if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
++		return;
++#endif
++	__preempt_count_add(val);
++#ifdef CONFIG_DEBUG_PREEMPT
++	/*
++	 * Spinlock count overflowing soon?
++	 */
++	DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
++				PREEMPT_MASK - 10);
++#endif
++	preempt_latency_start(val);
++}
++EXPORT_SYMBOL(preempt_count_add);
++NOKPROBE_SYMBOL(preempt_count_add);
++
++/*
++ * If the value passed in equals to the current preempt count
++ * then we just enabled preemption. Stop timing the latency.
++ */
++static inline void preempt_latency_stop(int val)
++{
++	if (preempt_count() == val)
++		trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
++}
++
++void preempt_count_sub(int val)
++{
++#ifdef CONFIG_DEBUG_PREEMPT
++	/*
++	 * Underflow?
++	 */
++	if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
++		return;
++	/*
++	 * Is the spinlock portion underflowing?
++	 */
++	if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
++			!(preempt_count() & PREEMPT_MASK)))
++		return;
++#endif
++
++	preempt_latency_stop(val);
++	__preempt_count_sub(val);
++}
++EXPORT_SYMBOL(preempt_count_sub);
++NOKPROBE_SYMBOL(preempt_count_sub);
++
++#else
++static inline void preempt_latency_start(int val) { }
++static inline void preempt_latency_stop(int val) { }
++#endif
++
++static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
++{
++#ifdef CONFIG_DEBUG_PREEMPT
++	return p->preempt_disable_ip;
++#else
++	return 0;
++#endif
++}
++
++/*
++ * Print scheduling while atomic bug:
++ */
++static noinline void __schedule_bug(struct task_struct *prev)
++{
++	/* Save this before calling printk(), since that will clobber it */
++	unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
++
++	if (oops_in_progress)
++		return;
++
++	printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
++		prev->comm, prev->pid, preempt_count());
++
++	debug_show_held_locks(prev);
++	print_modules();
++	if (irqs_disabled())
++		print_irqtrace_events(prev);
++	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
++	    && in_atomic_preempt_off()) {
++		pr_err("Preemption disabled at:");
++		print_ip_sym(KERN_ERR, preempt_disable_ip);
++	}
++	if (panic_on_warn)
++		panic("scheduling while atomic\n");
++
++	dump_stack();
++	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++
++/*
++ * Various schedule()-time debugging checks and statistics:
++ */
++static inline void schedule_debug(struct task_struct *prev, bool preempt)
++{
++#ifdef CONFIG_SCHED_STACK_END_CHECK
++	if (task_stack_end_corrupted(prev))
++		panic("corrupted stack end detected inside scheduler\n");
++
++	if (task_scs_end_corrupted(prev))
++		panic("corrupted shadow stack detected inside scheduler\n");
++#endif
++
++#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
++	if (!preempt && prev->state && prev->non_block_count) {
++		printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
++			prev->comm, prev->pid, prev->non_block_count);
++		dump_stack();
++		add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++	}
++#endif
++
++	if (unlikely(in_atomic_preempt_off())) {
++		__schedule_bug(prev);
++		preempt_count_set(PREEMPT_DISABLED);
++	}
++	rcu_sleep_check();
++	SCHED_WARN_ON(ct_state() == CONTEXT_USER);
++
++	profile_hit(SCHED_PROFILING, __builtin_return_address(0));
++
++	schedstat_inc(this_rq()->sched_count);
++}
++
++/*
++ * Compile time debug macro
++ * #define ALT_SCHED_DEBUG
++ */
++
++#ifdef ALT_SCHED_DEBUG
++void alt_sched_debug(void)
++{
++	printk(KERN_INFO "sched: pending: 0x%04lx, idle: 0x%04lx, sg_idle: 0x%04lx\n",
++	       sched_rq_pending_mask.bits[0],
++	       sched_rq_watermark[IDLE_WM].bits[0],
++	       sched_sg_idle_mask.bits[0]);
++}
++#else
++inline void alt_sched_debug(void) {}
++#endif
++
++#ifdef	CONFIG_SMP
++
++#define SCHED_RQ_NR_MIGRATION (32U)
++/*
++ * Migrate pending tasks in @rq to @dest_cpu
++ * Will try to migrate mininal of half of @rq nr_running tasks and
++ * SCHED_RQ_NR_MIGRATION to @dest_cpu
++ */
++static inline int
++migrate_pending_tasks(struct rq *rq, struct rq *dest_rq, const int dest_cpu)
++{
++	struct task_struct *p, *skip = rq->curr;
++	int nr_migrated = 0;
++	int nr_tries = min(rq->nr_running / 2, SCHED_RQ_NR_MIGRATION);
++
++	while (skip != rq->idle && nr_tries &&
++	       (p = sched_rq_next_task(skip, rq)) != rq->idle) {
++		skip = sched_rq_next_task(p, rq);
++		if (cpumask_test_cpu(dest_cpu, p->cpus_ptr)) {
++			__SCHED_DEQUEUE_TASK(p, rq, 0, );
++			set_task_cpu(p, dest_cpu);
++			__SCHED_ENQUEUE_TASK(p, dest_rq, 0);
++			nr_migrated++;
++		}
++		nr_tries--;
++	}
++
++	return nr_migrated;
++}
++
++static inline int take_other_rq_tasks(struct rq *rq, int cpu)
++{
++	struct cpumask *affinity_mask, *end_mask;
++
++	if (unlikely(!rq->online))
++		return 0;
++
++	if (cpumask_empty(&sched_rq_pending_mask))
++		return 0;
++
++	affinity_mask = per_cpu(sched_cpu_affinity_masks, cpu) + 1;
++	end_mask = per_cpu(sched_cpu_affinity_end_mask, cpu);
++	do {
++		int i;
++		for_each_cpu_and(i, &sched_rq_pending_mask, affinity_mask) {
++			int nr_migrated;
++			struct rq *src_rq;
++
++			src_rq = cpu_rq(i);
++			if (!do_raw_spin_trylock(&src_rq->lock))
++				continue;
++			spin_acquire(&src_rq->lock.dep_map,
++				     SINGLE_DEPTH_NESTING, 1, _RET_IP_);
++
++			if ((nr_migrated = migrate_pending_tasks(src_rq, rq, cpu))) {
++				src_rq->nr_running -= nr_migrated;
++#ifdef CONFIG_SMP
++				if (src_rq->nr_running < 2)
++					cpumask_clear_cpu(i, &sched_rq_pending_mask);
++#endif
++				rq->nr_running += nr_migrated;
++#ifdef CONFIG_SMP
++				if (rq->nr_running > 1)
++					cpumask_set_cpu(cpu, &sched_rq_pending_mask);
++#endif
++				update_sched_rq_watermark(rq);
++				cpufreq_update_util(rq, 0);
++
++				spin_release(&src_rq->lock.dep_map, _RET_IP_);
++				do_raw_spin_unlock(&src_rq->lock);
++
++				return 1;
++			}
++
++			spin_release(&src_rq->lock.dep_map, _RET_IP_);
++			do_raw_spin_unlock(&src_rq->lock);
++		}
++	} while (++affinity_mask < end_mask);
++
++	return 0;
++}
++#endif
++
++/*
++ * Timeslices below RESCHED_NS are considered as good as expired as there's no
++ * point rescheduling when there's so little time left.
++ */
++static inline void check_curr(struct task_struct *p, struct rq *rq)
++{
++	if (unlikely(rq->idle == p))
++		return;
++
++	update_curr(rq, p);
++
++	if (p->time_slice < RESCHED_NS)
++		time_slice_expired(p, rq);
++}
++
++static inline struct task_struct *
++choose_next_task(struct rq *rq, int cpu, struct task_struct *prev)
++{
++	struct task_struct *next;
++
++	if (unlikely(rq->skip)) {
++		next = rq_runnable_task(rq);
++		if (next == rq->idle) {
++#ifdef	CONFIG_SMP
++			if (!take_other_rq_tasks(rq, cpu)) {
++#endif
++				rq->skip = NULL;
++				schedstat_inc(rq->sched_goidle);
++				return next;
++#ifdef	CONFIG_SMP
++			}
++			next = rq_runnable_task(rq);
++#endif
++		}
++		rq->skip = NULL;
++#ifdef CONFIG_HIGH_RES_TIMERS
++		hrtick_start(rq, next->time_slice);
++#endif
++		return next;
++	}
++
++	next = sched_rq_first_task(rq);
++	if (next == rq->idle) {
++#ifdef	CONFIG_SMP
++		if (!take_other_rq_tasks(rq, cpu)) {
++#endif
++			schedstat_inc(rq->sched_goidle);
++			/*printk(KERN_INFO "sched: choose_next_task(%d) idle %px\n", cpu, next);*/
++			return next;
++#ifdef	CONFIG_SMP
++		}
++		next = sched_rq_first_task(rq);
++#endif
++	}
++#ifdef CONFIG_HIGH_RES_TIMERS
++	hrtick_start(rq, next->time_slice);
++#endif
++	/*printk(KERN_INFO "sched: choose_next_task(%d) next %px\n", cpu,
++	 * next);*/
++	return next;
++}
++
++/*
++ * schedule() is the main scheduler function.
++ *
++ * The main means of driving the scheduler and thus entering this function are:
++ *
++ *   1. Explicit blocking: mutex, semaphore, waitqueue, etc.
++ *
++ *   2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
++ *      paths. For example, see arch/x86/entry_64.S.
++ *
++ *      To drive preemption between tasks, the scheduler sets the flag in timer
++ *      interrupt handler scheduler_tick().
++ *
++ *   3. Wakeups don't really cause entry into schedule(). They add a
++ *      task to the run-queue and that's it.
++ *
++ *      Now, if the new task added to the run-queue preempts the current
++ *      task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
++ *      called on the nearest possible occasion:
++ *
++ *       - If the kernel is preemptible (CONFIG_PREEMPTION=y):
++ *
++ *         - in syscall or exception context, at the next outmost
++ *           preempt_enable(). (this might be as soon as the wake_up()'s
++ *           spin_unlock()!)
++ *
++ *         - in IRQ context, return from interrupt-handler to
++ *           preemptible context
++ *
++ *       - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
++ *         then at the next:
++ *
++ *          - cond_resched() call
++ *          - explicit schedule() call
++ *          - return from syscall or exception to user-space
++ *          - return from interrupt-handler to user-space
++ *
++ * WARNING: must be called with preemption disabled!
++ */
++static void __sched notrace __schedule(bool preempt)
++{
++	struct task_struct *prev, *next;
++	unsigned long *switch_count;
++	unsigned long prev_state;
++	struct rq *rq;
++	int cpu;
++
++	cpu = smp_processor_id();
++	rq = cpu_rq(cpu);
++	prev = rq->curr;
++
++	schedule_debug(prev, preempt);
++
++	/* by passing sched_feat(HRTICK) checking which Alt schedule FW doesn't support */
++	hrtick_clear(rq);
++
++	local_irq_disable();
++	rcu_note_context_switch(preempt);
++
++	/*
++	 * Make sure that signal_pending_state()->signal_pending() below
++	 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
++	 * done by the caller to avoid the race with signal_wake_up():
++	 *
++	 * __set_current_state(@state)		signal_wake_up()
++	 * schedule()				  set_tsk_thread_flag(p, TIF_SIGPENDING)
++	 *					  wake_up_state(p, state)
++	 *   LOCK rq->lock			    LOCK p->pi_state
++	 *   smp_mb__after_spinlock()		    smp_mb__after_spinlock()
++	 *     if (signal_pending_state())	    if (p->state & @state)
++	 *
++	 * Also, the membarrier system call requires a full memory barrier
++	 * after coming from user-space, before storing to rq->curr.
++	 */
++	raw_spin_lock(&rq->lock);
++	smp_mb__after_spinlock();
++
++	update_rq_clock(rq);
++
++	switch_count = &prev->nivcsw;
++	/*
++	 * We must load prev->state once (task_struct::state is volatile), such
++	 * that:
++	 *
++	 *  - we form a control dependency vs deactivate_task() below.
++	 *  - ptrace_{,un}freeze_traced() can change ->state underneath us.
++	 */
++	prev_state = prev->state;
++	if (!preempt && prev_state && prev_state == prev->state) {
++		if (signal_pending_state(prev_state, prev)) {
++			prev->state = TASK_RUNNING;
++		} else {
++			prev->sched_contributes_to_load =
++				(prev_state & TASK_UNINTERRUPTIBLE) &&
++				!(prev_state & TASK_NOLOAD) &&
++				!(prev->flags & PF_FROZEN);
++
++			if (prev->sched_contributes_to_load)
++				rq->nr_uninterruptible++;
++
++			/*
++			 * __schedule()			ttwu()
++			 *   prev_state = prev->state;    if (p->on_rq && ...)
++			 *   if (prev_state)		    goto out;
++			 *     p->on_rq = 0;		  smp_acquire__after_ctrl_dep();
++			 *				  p->state = TASK_WAKING
++			 *
++			 * Where __schedule() and ttwu() have matching control dependencies.
++			 *
++			 * After this, schedule() must not care about p->state any more.
++			 */
++			sched_task_deactivate(prev, rq);
++			deactivate_task(prev, rq);
++
++			if (prev->in_iowait) {
++				atomic_inc(&rq->nr_iowait);
++				delayacct_blkio_start();
++			}
++		}
++		switch_count = &prev->nvcsw;
++	}
++
++	check_curr(prev, rq);
++
++	next = choose_next_task(rq, cpu, prev);
++	clear_tsk_need_resched(prev);
++	clear_preempt_need_resched();
++
++
++	if (likely(prev != next)) {
++		next->last_ran = rq->clock_task;
++		rq->last_ts_switch = rq->clock;
++
++		rq->nr_switches++;
++		/*
++		 * RCU users of rcu_dereference(rq->curr) may not see
++		 * changes to task_struct made by pick_next_task().
++		 */
++		RCU_INIT_POINTER(rq->curr, next);
++		/*
++		 * The membarrier system call requires each architecture
++		 * to have a full memory barrier after updating
++		 * rq->curr, before returning to user-space.
++		 *
++		 * Here are the schemes providing that barrier on the
++		 * various architectures:
++		 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC.
++		 *   switch_mm() rely on membarrier_arch_switch_mm() on PowerPC.
++		 * - finish_lock_switch() for weakly-ordered
++		 *   architectures where spin_unlock is a full barrier,
++		 * - switch_to() for arm64 (weakly-ordered, spin_unlock
++		 *   is a RELEASE barrier),
++		 */
++		++*switch_count;
++
++		psi_sched_switch(prev, next, !task_on_rq_queued(prev));
++
++		trace_sched_switch(preempt, prev, next);
++
++		/* Also unlocks the rq: */
++		rq = context_switch(rq, prev, next);
++	} else {
++		__balance_callbacks(rq);
++		raw_spin_unlock_irq(&rq->lock);
++	}
++
++#ifdef CONFIG_SCHED_SMT
++	sg_balance_check(rq);
++#endif
++}
++
++void __noreturn do_task_dead(void)
++{
++	/* Causes final put_task_struct in finish_task_switch(): */
++	set_special_state(TASK_DEAD);
++
++	/* Tell freezer to ignore us: */
++	current->flags |= PF_NOFREEZE;
++
++	__schedule(false);
++	BUG();
++
++	/* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
++	for (;;)
++		cpu_relax();
++}
++
++static inline void sched_submit_work(struct task_struct *tsk)
++{
++	unsigned int task_flags;
++
++	if (!tsk->state)
++		return;
++
++	task_flags = tsk->flags;
++	/*
++	 * If a worker went to sleep, notify and ask workqueue whether
++	 * it wants to wake up a task to maintain concurrency.
++	 * As this function is called inside the schedule() context,
++	 * we disable preemption to avoid it calling schedule() again
++	 * in the possible wakeup of a kworker and because wq_worker_sleeping()
++	 * requires it.
++	 */
++	if (task_flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
++		preempt_disable();
++		if (task_flags & PF_WQ_WORKER)
++			wq_worker_sleeping(tsk);
++		else
++			io_wq_worker_sleeping(tsk);
++		preempt_enable_no_resched();
++	}
++
++	if (tsk_is_pi_blocked(tsk))
++		return;
++
++	/*
++	 * If we are going to sleep and we have plugged IO queued,
++	 * make sure to submit it to avoid deadlocks.
++	 */
++	if (blk_needs_flush_plug(tsk))
++		blk_schedule_flush_plug(tsk);
++}
++
++static void sched_update_worker(struct task_struct *tsk)
++{
++	if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
++		if (tsk->flags & PF_WQ_WORKER)
++			wq_worker_running(tsk);
++		else
++			io_wq_worker_running(tsk);
++	}
++}
++
++asmlinkage __visible void __sched schedule(void)
++{
++	struct task_struct *tsk = current;
++
++	sched_submit_work(tsk);
++	do {
++		preempt_disable();
++		__schedule(false);
++		sched_preempt_enable_no_resched();
++	} while (need_resched());
++	sched_update_worker(tsk);
++}
++EXPORT_SYMBOL(schedule);
++
++/*
++ * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
++ * state (have scheduled out non-voluntarily) by making sure that all
++ * tasks have either left the run queue or have gone into user space.
++ * As idle tasks do not do either, they must not ever be preempted
++ * (schedule out non-voluntarily).
++ *
++ * schedule_idle() is similar to schedule_preempt_disable() except that it
++ * never enables preemption because it does not call sched_submit_work().
++ */
++void __sched schedule_idle(void)
++{
++	/*
++	 * As this skips calling sched_submit_work(), which the idle task does
++	 * regardless because that function is a nop when the task is in a
++	 * TASK_RUNNING state, make sure this isn't used someplace that the
++	 * current task can be in any other state. Note, idle is always in the
++	 * TASK_RUNNING state.
++	 */
++	WARN_ON_ONCE(current->state);
++	do {
++		__schedule(false);
++	} while (need_resched());
++}
++
++#if defined(CONFIG_CONTEXT_TRACKING) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_OFFSTACK)
++asmlinkage __visible void __sched schedule_user(void)
++{
++	/*
++	 * If we come here after a random call to set_need_resched(),
++	 * or we have been woken up remotely but the IPI has not yet arrived,
++	 * we haven't yet exited the RCU idle mode. Do it here manually until
++	 * we find a better solution.
++	 *
++	 * NB: There are buggy callers of this function.  Ideally we
++	 * should warn if prev_state != CONTEXT_USER, but that will trigger
++	 * too frequently to make sense yet.
++	 */
++	enum ctx_state prev_state = exception_enter();
++	schedule();
++	exception_exit(prev_state);
++}
++#endif
++
++/**
++ * schedule_preempt_disabled - called with preemption disabled
++ *
++ * Returns with preemption disabled. Note: preempt_count must be 1
++ */
++void __sched schedule_preempt_disabled(void)
++{
++	sched_preempt_enable_no_resched();
++	schedule();
++	preempt_disable();
++}
++
++static void __sched notrace preempt_schedule_common(void)
++{
++	do {
++		/*
++		 * Because the function tracer can trace preempt_count_sub()
++		 * and it also uses preempt_enable/disable_notrace(), if
++		 * NEED_RESCHED is set, the preempt_enable_notrace() called
++		 * by the function tracer will call this function again and
++		 * cause infinite recursion.
++		 *
++		 * Preemption must be disabled here before the function
++		 * tracer can trace. Break up preempt_disable() into two
++		 * calls. One to disable preemption without fear of being
++		 * traced. The other to still record the preemption latency,
++		 * which can also be traced by the function tracer.
++		 */
++		preempt_disable_notrace();
++		preempt_latency_start(1);
++		__schedule(true);
++		preempt_latency_stop(1);
++		preempt_enable_no_resched_notrace();
++
++		/*
++		 * Check again in case we missed a preemption opportunity
++		 * between schedule and now.
++		 */
++	} while (need_resched());
++}
++
++#ifdef CONFIG_PREEMPTION
++/*
++ * This is the entry point to schedule() from in-kernel preemption
++ * off of preempt_enable.
++ */
++asmlinkage __visible void __sched notrace preempt_schedule(void)
++{
++	/*
++	 * If there is a non-zero preempt_count or interrupts are disabled,
++	 * we do not want to preempt the current task. Just return..
++	 */
++	if (likely(!preemptible()))
++		return;
++
++	preempt_schedule_common();
++}
++NOKPROBE_SYMBOL(preempt_schedule);
++EXPORT_SYMBOL(preempt_schedule);
++
++#ifdef CONFIG_PREEMPT_DYNAMIC
++DEFINE_STATIC_CALL(preempt_schedule, __preempt_schedule_func);
++EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
++#endif
++
++
++/**
++ * preempt_schedule_notrace - preempt_schedule called by tracing
++ *
++ * The tracing infrastructure uses preempt_enable_notrace to prevent
++ * recursion and tracing preempt enabling caused by the tracing
++ * infrastructure itself. But as tracing can happen in areas coming
++ * from userspace or just about to enter userspace, a preempt enable
++ * can occur before user_exit() is called. This will cause the scheduler
++ * to be called when the system is still in usermode.
++ *
++ * To prevent this, the preempt_enable_notrace will use this function
++ * instead of preempt_schedule() to exit user context if needed before
++ * calling the scheduler.
++ */
++asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
++{
++	enum ctx_state prev_ctx;
++
++	if (likely(!preemptible()))
++		return;
++
++	do {
++		/*
++		 * Because the function tracer can trace preempt_count_sub()
++		 * and it also uses preempt_enable/disable_notrace(), if
++		 * NEED_RESCHED is set, the preempt_enable_notrace() called
++		 * by the function tracer will call this function again and
++		 * cause infinite recursion.
++		 *
++		 * Preemption must be disabled here before the function
++		 * tracer can trace. Break up preempt_disable() into two
++		 * calls. One to disable preemption without fear of being
++		 * traced. The other to still record the preemption latency,
++		 * which can also be traced by the function tracer.
++		 */
++		preempt_disable_notrace();
++		preempt_latency_start(1);
++		/*
++		 * Needs preempt disabled in case user_exit() is traced
++		 * and the tracer calls preempt_enable_notrace() causing
++		 * an infinite recursion.
++		 */
++		prev_ctx = exception_enter();
++		__schedule(true);
++		exception_exit(prev_ctx);
++
++		preempt_latency_stop(1);
++		preempt_enable_no_resched_notrace();
++	} while (need_resched());
++}
++EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
++
++#ifdef CONFIG_PREEMPT_DYNAMIC
++DEFINE_STATIC_CALL(preempt_schedule_notrace, __preempt_schedule_notrace_func);
++EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
++#endif
++
++#endif /* CONFIG_PREEMPTION */
++
++#ifdef CONFIG_PREEMPT_DYNAMIC
++
++#include <linux/entry-common.h>
++
++/*
++ * SC:cond_resched
++ * SC:might_resched
++ * SC:preempt_schedule
++ * SC:preempt_schedule_notrace
++ * SC:irqentry_exit_cond_resched
++ *
++ *
++ * NONE:
++ *   cond_resched               <- __cond_resched
++ *   might_resched              <- RET0
++ *   preempt_schedule           <- NOP
++ *   preempt_schedule_notrace   <- NOP
++ *   irqentry_exit_cond_resched <- NOP
++ *
++ * VOLUNTARY:
++ *   cond_resched               <- __cond_resched
++ *   might_resched              <- __cond_resched
++ *   preempt_schedule           <- NOP
++ *   preempt_schedule_notrace   <- NOP
++ *   irqentry_exit_cond_resched <- NOP
++ *
++ * FULL:
++ *   cond_resched               <- RET0
++ *   might_resched              <- RET0
++ *   preempt_schedule           <- preempt_schedule
++ *   preempt_schedule_notrace   <- preempt_schedule_notrace
++ *   irqentry_exit_cond_resched <- irqentry_exit_cond_resched
++ */
++
++enum {
++	preempt_dynamic_none = 0,
++	preempt_dynamic_voluntary,
++	preempt_dynamic_full,
++};
++
++static int preempt_dynamic_mode = preempt_dynamic_full;
++
++static int sched_dynamic_mode(const char *str)
++{
++	if (!strcmp(str, "none"))
++		return 0;
++
++	if (!strcmp(str, "voluntary"))
++		return 1;
++
++	if (!strcmp(str, "full"))
++		return 2;
++
++	return -1;
++}
++
++static void sched_dynamic_update(int mode)
++{
++	/*
++	 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
++	 * the ZERO state, which is invalid.
++	 */
++	static_call_update(cond_resched, __cond_resched);
++	static_call_update(might_resched, __cond_resched);
++	static_call_update(preempt_schedule, __preempt_schedule_func);
++	static_call_update(preempt_schedule_notrace, __preempt_schedule_notrace_func);
++	static_call_update(irqentry_exit_cond_resched, irqentry_exit_cond_resched);
++
++	switch (mode) {
++	case preempt_dynamic_none:
++		static_call_update(cond_resched, __cond_resched);
++		static_call_update(might_resched, (typeof(&__cond_resched)) __static_call_return0);
++		static_call_update(preempt_schedule, (typeof(&preempt_schedule)) NULL);
++		static_call_update(preempt_schedule_notrace, (typeof(&preempt_schedule_notrace)) NULL);
++		static_call_update(irqentry_exit_cond_resched, (typeof(&irqentry_exit_cond_resched)) NULL);
++		pr_info("Dynamic Preempt: none\n");
++		break;
++
++	case preempt_dynamic_voluntary:
++		static_call_update(cond_resched, __cond_resched);
++		static_call_update(might_resched, __cond_resched);
++		static_call_update(preempt_schedule, (typeof(&preempt_schedule)) NULL);
++		static_call_update(preempt_schedule_notrace, (typeof(&preempt_schedule_notrace)) NULL);
++		static_call_update(irqentry_exit_cond_resched, (typeof(&irqentry_exit_cond_resched)) NULL);
++		pr_info("Dynamic Preempt: voluntary\n");
++		break;
++
++	case preempt_dynamic_full:
++		static_call_update(cond_resched, (typeof(&__cond_resched)) __static_call_return0);
++		static_call_update(might_resched, (typeof(&__cond_resched)) __static_call_return0);
++		static_call_update(preempt_schedule, __preempt_schedule_func);
++		static_call_update(preempt_schedule_notrace, __preempt_schedule_notrace_func);
++		static_call_update(irqentry_exit_cond_resched, irqentry_exit_cond_resched);
++		pr_info("Dynamic Preempt: full\n");
++		break;
++	}
++
++	preempt_dynamic_mode = mode;
++}
++
++static int __init setup_preempt_mode(char *str)
++{
++	int mode = sched_dynamic_mode(str);
++	if (mode < 0) {
++		pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
++		return 1;
++	}
++
++	sched_dynamic_update(mode);
++	return 0;
++}
++__setup("preempt=", setup_preempt_mode);
++
++#ifdef CONFIG_SCHED_DEBUG
++
++static ssize_t sched_dynamic_write(struct file *filp, const char __user *ubuf,
++				   size_t cnt, loff_t *ppos)
++{
++	char buf[16];
++	int mode;
++
++	if (cnt > 15)
++		cnt = 15;
++
++	if (copy_from_user(&buf, ubuf, cnt))
++		return -EFAULT;
++
++	buf[cnt] = 0;
++	mode = sched_dynamic_mode(strstrip(buf));
++	if (mode < 0)
++		return mode;
++
++	sched_dynamic_update(mode);
++
++	*ppos += cnt;
++
++	return cnt;
++}
++
++static int sched_dynamic_show(struct seq_file *m, void *v)
++{
++	static const char * preempt_modes[] = {
++		"none", "voluntary", "full"
++	};
++	int i;
++
++	for (i = 0; i < ARRAY_SIZE(preempt_modes); i++) {
++		if (preempt_dynamic_mode == i)
++			seq_puts(m, "(");
++		seq_puts(m, preempt_modes[i]);
++		if (preempt_dynamic_mode == i)
++			seq_puts(m, ")");
++
++		seq_puts(m, " ");
++	}
++
++	seq_puts(m, "\n");
++	return 0;
++}
++
++static int sched_dynamic_open(struct inode *inode, struct file *filp)
++{
++	return single_open(filp, sched_dynamic_show, NULL);
++}
++
++static const struct file_operations sched_dynamic_fops = {
++	.open		= sched_dynamic_open,
++	.write		= sched_dynamic_write,
++	.read		= seq_read,
++	.llseek		= seq_lseek,
++	.release	= single_release,
++};
++
++static __init int sched_init_debug_dynamic(void)
++{
++	debugfs_create_file("sched_preempt", 0644, NULL, NULL, &sched_dynamic_fops);
++	return 0;
++}
++late_initcall(sched_init_debug_dynamic);
++
++#endif /* CONFIG_SCHED_DEBUG */
++#endif /* CONFIG_PREEMPT_DYNAMIC */
++
++
++/*
++ * This is the entry point to schedule() from kernel preemption
++ * off of irq context.
++ * Note, that this is called and return with irqs disabled. This will
++ * protect us against recursive calling from irq.
++ */
++asmlinkage __visible void __sched preempt_schedule_irq(void)
++{
++	enum ctx_state prev_state;
++
++	/* Catch callers which need to be fixed */
++	BUG_ON(preempt_count() || !irqs_disabled());
++
++	prev_state = exception_enter();
++
++	do {
++		preempt_disable();
++		local_irq_enable();
++		__schedule(true);
++		local_irq_disable();
++		sched_preempt_enable_no_resched();
++	} while (need_resched());
++
++	exception_exit(prev_state);
++}
++
++int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
++			  void *key)
++{
++	WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~WF_SYNC);
++	return try_to_wake_up(curr->private, mode, wake_flags);
++}
++EXPORT_SYMBOL(default_wake_function);
++
++static inline void check_task_changed(struct rq *rq, struct task_struct *p)
++{
++	/* Trigger resched if task sched_prio has been modified. */
++	if (task_on_rq_queued(p) && sched_task_need_requeue(p, rq)) {
++		requeue_task(p, rq);
++		check_preempt_curr(rq);
++	}
++}
++
++#ifdef CONFIG_RT_MUTEXES
++
++static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
++{
++	if (pi_task)
++		prio = min(prio, pi_task->prio);
++
++	return prio;
++}
++
++static inline int rt_effective_prio(struct task_struct *p, int prio)
++{
++	struct task_struct *pi_task = rt_mutex_get_top_task(p);
++
++	return __rt_effective_prio(pi_task, prio);
++}
++
++/*
++ * rt_mutex_setprio - set the current priority of a task
++ * @p: task to boost
++ * @pi_task: donor task
++ *
++ * This function changes the 'effective' priority of a task. It does
++ * not touch ->normal_prio like __setscheduler().
++ *
++ * Used by the rt_mutex code to implement priority inheritance
++ * logic. Call site only calls if the priority of the task changed.
++ */
++void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
++{
++	int prio;
++	struct rq *rq;
++	raw_spinlock_t *lock;
++
++	/* XXX used to be waiter->prio, not waiter->task->prio */
++	prio = __rt_effective_prio(pi_task, p->normal_prio);
++
++	/*
++	 * If nothing changed; bail early.
++	 */
++	if (p->pi_top_task == pi_task && prio == p->prio)
++		return;
++
++	rq = __task_access_lock(p, &lock);
++	/*
++	 * Set under pi_lock && rq->lock, such that the value can be used under
++	 * either lock.
++	 *
++	 * Note that there is loads of tricky to make this pointer cache work
++	 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
++	 * ensure a task is de-boosted (pi_task is set to NULL) before the
++	 * task is allowed to run again (and can exit). This ensures the pointer
++	 * points to a blocked task -- which guarantees the task is present.
++	 */
++	p->pi_top_task = pi_task;
++
++	/*
++	 * For FIFO/RR we only need to set prio, if that matches we're done.
++	 */
++	if (prio == p->prio)
++		goto out_unlock;
++
++	/*
++	 * Idle task boosting is a nono in general. There is one
++	 * exception, when PREEMPT_RT and NOHZ is active:
++	 *
++	 * The idle task calls get_next_timer_interrupt() and holds
++	 * the timer wheel base->lock on the CPU and another CPU wants
++	 * to access the timer (probably to cancel it). We can safely
++	 * ignore the boosting request, as the idle CPU runs this code
++	 * with interrupts disabled and will complete the lock
++	 * protected section without being interrupted. So there is no
++	 * real need to boost.
++	 */
++	if (unlikely(p == rq->idle)) {
++		WARN_ON(p != rq->curr);
++		WARN_ON(p->pi_blocked_on);
++		goto out_unlock;
++	}
++
++	trace_sched_pi_setprio(p, pi_task);
++	p->prio = prio;
++	update_task_priodl(p);
++
++	check_task_changed(rq, p);
++out_unlock:
++	/* Avoid rq from going away on us: */
++	preempt_disable();
++
++	__balance_callbacks(rq);
++	__task_access_unlock(p, lock);
++
++	preempt_enable();
++}
++#else
++static inline int rt_effective_prio(struct task_struct *p, int prio)
++{
++	return prio;
++}
++#endif
++
++void set_user_nice(struct task_struct *p, long nice)
++{
++	unsigned long flags;
++	struct rq *rq;
++	raw_spinlock_t *lock;
++
++	if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
++		return;
++	/*
++	 * We have to be careful, if called from sys_setpriority(),
++	 * the task might be in the middle of scheduling on another CPU.
++	 */
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++	rq = __task_access_lock(p, &lock);
++
++	p->static_prio = NICE_TO_PRIO(nice);
++	/*
++	 * The RT priorities are set via sched_setscheduler(), but we still
++	 * allow the 'normal' nice value to be set - but as expected
++	 * it won't have any effect on scheduling until the task is
++	 * not SCHED_NORMAL/SCHED_BATCH:
++	 */
++	if (task_has_rt_policy(p))
++		goto out_unlock;
++
++	p->prio = effective_prio(p);
++	update_task_priodl(p);
++
++	check_task_changed(rq, p);
++out_unlock:
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++}
++EXPORT_SYMBOL(set_user_nice);
++
++/*
++ * can_nice - check if a task can reduce its nice value
++ * @p: task
++ * @nice: nice value
++ */
++int can_nice(const struct task_struct *p, const int nice)
++{
++	/* Convert nice value [19,-20] to rlimit style value [1,40] */
++	int nice_rlim = nice_to_rlimit(nice);
++
++	return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
++		capable(CAP_SYS_NICE));
++}
++
++#ifdef __ARCH_WANT_SYS_NICE
++
++/*
++ * sys_nice - change the priority of the current process.
++ * @increment: priority increment
++ *
++ * sys_setpriority is a more generic, but much slower function that
++ * does similar things.
++ */
++SYSCALL_DEFINE1(nice, int, increment)
++{
++	long nice, retval;
++
++	/*
++	 * Setpriority might change our priority at the same moment.
++	 * We don't have to worry. Conceptually one call occurs first
++	 * and we have a single winner.
++	 */
++
++	increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
++	nice = task_nice(current) + increment;
++
++	nice = clamp_val(nice, MIN_NICE, MAX_NICE);
++	if (increment < 0 && !can_nice(current, nice))
++		return -EPERM;
++
++	retval = security_task_setnice(current, nice);
++	if (retval)
++		return retval;
++
++	set_user_nice(current, nice);
++	return 0;
++}
++
++#endif
++
++/**
++ * idle_cpu - is a given CPU idle currently?
++ * @cpu: the processor in question.
++ *
++ * Return: 1 if the CPU is currently idle. 0 otherwise.
++ */
++int idle_cpu(int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	if (rq->curr != rq->idle)
++		return 0;
++
++	if (rq->nr_running)
++		return 0;
++
++#ifdef CONFIG_SMP
++	if (rq->ttwu_pending)
++		return 0;
++#endif
++
++	return 1;
++}
++
++/**
++ * idle_task - return the idle task for a given CPU.
++ * @cpu: the processor in question.
++ *
++ * Return: The idle task for the cpu @cpu.
++ */
++struct task_struct *idle_task(int cpu)
++{
++	return cpu_rq(cpu)->idle;
++}
++
++/**
++ * find_process_by_pid - find a process with a matching PID value.
++ * @pid: the pid in question.
++ *
++ * The task of @pid, if found. %NULL otherwise.
++ */
++static inline struct task_struct *find_process_by_pid(pid_t pid)
++{
++	return pid ? find_task_by_vpid(pid) : current;
++}
++
++/*
++ * sched_setparam() passes in -1 for its policy, to let the functions
++ * it calls know not to change it.
++ */
++#define SETPARAM_POLICY -1
++
++static void __setscheduler_params(struct task_struct *p,
++		const struct sched_attr *attr)
++{
++	int policy = attr->sched_policy;
++
++	if (policy == SETPARAM_POLICY)
++		policy = p->policy;
++
++	p->policy = policy;
++
++	/*
++	 * allow normal nice value to be set, but will not have any
++	 * effect on scheduling until the task not SCHED_NORMAL/
++	 * SCHED_BATCH
++	 */
++	p->static_prio = NICE_TO_PRIO(attr->sched_nice);
++
++	/*
++	 * __sched_setscheduler() ensures attr->sched_priority == 0 when
++	 * !rt_policy. Always setting this ensures that things like
++	 * getparam()/getattr() don't report silly values for !rt tasks.
++	 */
++	p->rt_priority = attr->sched_priority;
++	p->normal_prio = normal_prio(p);
++}
++
++/* Actually do priority change: must hold rq lock. */
++static void __setscheduler(struct rq *rq, struct task_struct *p,
++			   const struct sched_attr *attr, bool keep_boost)
++{
++	__setscheduler_params(p, attr);
++
++	/*
++	 * Keep a potential priority boosting if called from
++	 * sched_setscheduler().
++	 */
++	p->prio = normal_prio(p);
++	if (keep_boost)
++		p->prio = rt_effective_prio(p, p->prio);
++	update_task_priodl(p);
++}
++
++/*
++ * check the target process has a UID that matches the current process's
++ */
++static bool check_same_owner(struct task_struct *p)
++{
++	const struct cred *cred = current_cred(), *pcred;
++	bool match;
++
++	rcu_read_lock();
++	pcred = __task_cred(p);
++	match = (uid_eq(cred->euid, pcred->euid) ||
++		 uid_eq(cred->euid, pcred->uid));
++	rcu_read_unlock();
++	return match;
++}
++
++static int __sched_setscheduler(struct task_struct *p,
++				const struct sched_attr *attr,
++				bool user, bool pi)
++{
++	const struct sched_attr dl_squash_attr = {
++		.size		= sizeof(struct sched_attr),
++		.sched_policy	= SCHED_FIFO,
++		.sched_nice	= 0,
++		.sched_priority = 99,
++	};
++	int newprio = MAX_RT_PRIO - 1 - attr->sched_priority;
++	int retval, oldpolicy = -1;
++	int policy = attr->sched_policy;
++	struct callback_head *head;
++	unsigned long flags;
++	struct rq *rq;
++	int reset_on_fork;
++	raw_spinlock_t *lock;
++
++	/* The pi code expects interrupts enabled */
++	BUG_ON(pi && in_interrupt());
++
++	/*
++	 * Alt schedule FW supports SCHED_DEADLINE by squash it as prio 0 SCHED_FIFO
++	 */
++	if (unlikely(SCHED_DEADLINE == policy)) {
++		attr = &dl_squash_attr;
++		policy = attr->sched_policy;
++		newprio = MAX_RT_PRIO - 1 - attr->sched_priority;
++	}
++recheck:
++	/* Double check policy once rq lock held */
++	if (policy < 0) {
++		reset_on_fork = p->sched_reset_on_fork;
++		policy = oldpolicy = p->policy;
++	} else {
++		reset_on_fork = !!(attr->sched_flags & SCHED_RESET_ON_FORK);
++
++		if (policy > SCHED_IDLE)
++			return -EINVAL;
++	}
++
++	if (attr->sched_flags & ~(SCHED_FLAG_ALL))
++		return -EINVAL;
++
++	/*
++	 * Valid priorities for SCHED_FIFO and SCHED_RR are
++	 * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL and
++	 * SCHED_BATCH and SCHED_IDLE is 0.
++	 */
++	if (attr->sched_priority < 0 ||
++	    (p->mm && attr->sched_priority > MAX_RT_PRIO - 1) ||
++	    (!p->mm && attr->sched_priority > MAX_RT_PRIO - 1))
++		return -EINVAL;
++	if ((SCHED_RR == policy || SCHED_FIFO == policy) !=
++	    (attr->sched_priority != 0))
++		return -EINVAL;
++
++	/*
++	 * Allow unprivileged RT tasks to decrease priority:
++	 */
++	if (user && !capable(CAP_SYS_NICE)) {
++		if (SCHED_FIFO == policy || SCHED_RR == policy) {
++			unsigned long rlim_rtprio =
++					task_rlimit(p, RLIMIT_RTPRIO);
++
++			/* Can't set/change the rt policy */
++			if (policy != p->policy && !rlim_rtprio)
++				return -EPERM;
++
++			/* Can't increase priority */
++			if (attr->sched_priority > p->rt_priority &&
++			    attr->sched_priority > rlim_rtprio)
++				return -EPERM;
++		}
++
++		/* Can't change other user's priorities */
++		if (!check_same_owner(p))
++			return -EPERM;
++
++		/* Normal users shall not reset the sched_reset_on_fork flag */
++		if (p->sched_reset_on_fork && !reset_on_fork)
++			return -EPERM;
++	}
++
++	if (user) {
++		retval = security_task_setscheduler(p);
++		if (retval)
++			return retval;
++	}
++
++	if (pi)
++		cpuset_read_lock();
++
++	/*
++	 * Make sure no PI-waiters arrive (or leave) while we are
++	 * changing the priority of the task:
++	 */
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++
++	/*
++	 * To be able to change p->policy safely, task_access_lock()
++	 * must be called.
++	 * IF use task_access_lock() here:
++	 * For the task p which is not running, reading rq->stop is
++	 * racy but acceptable as ->stop doesn't change much.
++	 * An enhancemnet can be made to read rq->stop saftly.
++	 */
++	rq = __task_access_lock(p, &lock);
++
++	/*
++	 * Changing the policy of the stop threads its a very bad idea
++	 */
++	if (p == rq->stop) {
++		retval = -EINVAL;
++		goto unlock;
++	}
++
++	/*
++	 * If not changing anything there's no need to proceed further:
++	 */
++	if (unlikely(policy == p->policy)) {
++		if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
++			goto change;
++		if (!rt_policy(policy) &&
++		    NICE_TO_PRIO(attr->sched_nice) != p->static_prio)
++			goto change;
++
++		p->sched_reset_on_fork = reset_on_fork;
++		retval = 0;
++		goto unlock;
++	}
++change:
++
++	/* Re-check policy now with rq lock held */
++	if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
++		policy = oldpolicy = -1;
++		__task_access_unlock(p, lock);
++		raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++		if (pi)
++			cpuset_read_unlock();
++		goto recheck;
++	}
++
++	p->sched_reset_on_fork = reset_on_fork;
++
++	if (pi) {
++		/*
++		 * Take priority boosted tasks into account. If the new
++		 * effective priority is unchanged, we just store the new
++		 * normal parameters and do not touch the scheduler class and
++		 * the runqueue. This will be done when the task deboost
++		 * itself.
++		 */
++		if (rt_effective_prio(p, newprio) == p->prio) {
++			__setscheduler_params(p, attr);
++			retval = 0;
++			goto unlock;
++		}
++	}
++
++	__setscheduler(rq, p, attr, pi);
++
++	check_task_changed(rq, p);
++
++	/* Avoid rq from going away on us: */
++	preempt_disable();
++	head = splice_balance_callbacks(rq);
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++
++	if (pi) {
++		cpuset_read_unlock();
++		rt_mutex_adjust_pi(p);
++	}
++
++	/* Run balance callbacks after we've adjusted the PI chain: */
++	balance_callbacks(rq, head);
++	preempt_enable();
++
++	return 0;
++
++unlock:
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++	if (pi)
++		cpuset_read_unlock();
++	return retval;
++}
++
++static int _sched_setscheduler(struct task_struct *p, int policy,
++			       const struct sched_param *param, bool check)
++{
++	struct sched_attr attr = {
++		.sched_policy   = policy,
++		.sched_priority = param->sched_priority,
++		.sched_nice     = PRIO_TO_NICE(p->static_prio),
++	};
++
++	/* Fixup the legacy SCHED_RESET_ON_FORK hack. */
++	if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
++		attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
++		policy &= ~SCHED_RESET_ON_FORK;
++		attr.sched_policy = policy;
++	}
++
++	return __sched_setscheduler(p, &attr, check, true);
++}
++
++/**
++ * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
++ * @p: the task in question.
++ * @policy: new policy.
++ * @param: structure containing the new RT priority.
++ *
++ * Use sched_set_fifo(), read its comment.
++ *
++ * Return: 0 on success. An error code otherwise.
++ *
++ * NOTE that the task may be already dead.
++ */
++int sched_setscheduler(struct task_struct *p, int policy,
++		       const struct sched_param *param)
++{
++	return _sched_setscheduler(p, policy, param, true);
++}
++
++int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
++{
++	return __sched_setscheduler(p, attr, true, true);
++}
++
++int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
++{
++	return __sched_setscheduler(p, attr, false, true);
++}
++
++/**
++ * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
++ * @p: the task in question.
++ * @policy: new policy.
++ * @param: structure containing the new RT priority.
++ *
++ * Just like sched_setscheduler, only don't bother checking if the
++ * current context has permission.  For example, this is needed in
++ * stop_machine(): we create temporary high priority worker threads,
++ * but our caller might not have that capability.
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++int sched_setscheduler_nocheck(struct task_struct *p, int policy,
++			       const struct sched_param *param)
++{
++	return _sched_setscheduler(p, policy, param, false);
++}
++
++/*
++ * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally
++ * incapable of resource management, which is the one thing an OS really should
++ * be doing.
++ *
++ * This is of course the reason it is limited to privileged users only.
++ *
++ * Worse still; it is fundamentally impossible to compose static priority
++ * workloads. You cannot take two correctly working static prio workloads
++ * and smash them together and still expect them to work.
++ *
++ * For this reason 'all' FIFO tasks the kernel creates are basically at:
++ *
++ *   MAX_RT_PRIO / 2
++ *
++ * The administrator _MUST_ configure the system, the kernel simply doesn't
++ * know enough information to make a sensible choice.
++ */
++void sched_set_fifo(struct task_struct *p)
++{
++	struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 };
++	WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
++}
++EXPORT_SYMBOL_GPL(sched_set_fifo);
++
++/*
++ * For when you don't much care about FIFO, but want to be above SCHED_NORMAL.
++ */
++void sched_set_fifo_low(struct task_struct *p)
++{
++	struct sched_param sp = { .sched_priority = 1 };
++	WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
++}
++EXPORT_SYMBOL_GPL(sched_set_fifo_low);
++
++void sched_set_normal(struct task_struct *p, int nice)
++{
++	struct sched_attr attr = {
++		.sched_policy = SCHED_NORMAL,
++		.sched_nice = nice,
++	};
++	WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0);
++}
++EXPORT_SYMBOL_GPL(sched_set_normal);
++
++static int
++do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
++{
++	struct sched_param lparam;
++	struct task_struct *p;
++	int retval;
++
++	if (!param || pid < 0)
++		return -EINVAL;
++	if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
++		return -EFAULT;
++
++	rcu_read_lock();
++	retval = -ESRCH;
++	p = find_process_by_pid(pid);
++	if (likely(p))
++		get_task_struct(p);
++	rcu_read_unlock();
++
++	if (likely(p)) {
++		retval = sched_setscheduler(p, policy, &lparam);
++		put_task_struct(p);
++	}
++
++	return retval;
++}
++
++/*
++ * Mimics kernel/events/core.c perf_copy_attr().
++ */
++static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr)
++{
++	u32 size;
++	int ret;
++
++	/* Zero the full structure, so that a short copy will be nice: */
++	memset(attr, 0, sizeof(*attr));
++
++	ret = get_user(size, &uattr->size);
++	if (ret)
++		return ret;
++
++	/* ABI compatibility quirk: */
++	if (!size)
++		size = SCHED_ATTR_SIZE_VER0;
++
++	if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE)
++		goto err_size;
++
++	ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size);
++	if (ret) {
++		if (ret == -E2BIG)
++			goto err_size;
++		return ret;
++	}
++
++	/*
++	 * XXX: Do we want to be lenient like existing syscalls; or do we want
++	 * to be strict and return an error on out-of-bounds values?
++	 */
++	attr->sched_nice = clamp(attr->sched_nice, -20, 19);
++
++	/* sched/core.c uses zero here but we already know ret is zero */
++	return 0;
++
++err_size:
++	put_user(sizeof(*attr), &uattr->size);
++	return -E2BIG;
++}
++
++/**
++ * sys_sched_setscheduler - set/change the scheduler policy and RT priority
++ * @pid: the pid in question.
++ * @policy: new policy.
++ *
++ * Return: 0 on success. An error code otherwise.
++ * @param: structure containing the new RT priority.
++ */
++SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param)
++{
++	if (policy < 0)
++		return -EINVAL;
++
++	return do_sched_setscheduler(pid, policy, param);
++}
++
++/**
++ * sys_sched_setparam - set/change the RT priority of a thread
++ * @pid: the pid in question.
++ * @param: structure containing the new RT priority.
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
++{
++	return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
++}
++
++/**
++ * sys_sched_setattr - same as above, but with extended sched_attr
++ * @pid: the pid in question.
++ * @uattr: structure containing the extended parameters.
++ */
++SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
++			       unsigned int, flags)
++{
++	struct sched_attr attr;
++	struct task_struct *p;
++	int retval;
++
++	if (!uattr || pid < 0 || flags)
++		return -EINVAL;
++
++	retval = sched_copy_attr(uattr, &attr);
++	if (retval)
++		return retval;
++
++	if ((int)attr.sched_policy < 0)
++		return -EINVAL;
++
++	rcu_read_lock();
++	retval = -ESRCH;
++	p = find_process_by_pid(pid);
++	if (likely(p))
++		get_task_struct(p);
++	rcu_read_unlock();
++
++	if (likely(p)) {
++		retval = sched_setattr(p, &attr);
++		put_task_struct(p);
++	}
++
++	return retval;
++}
++
++/**
++ * sys_sched_getscheduler - get the policy (scheduling class) of a thread
++ * @pid: the pid in question.
++ *
++ * Return: On success, the policy of the thread. Otherwise, a negative error
++ * code.
++ */
++SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
++{
++	struct task_struct *p;
++	int retval = -EINVAL;
++
++	if (pid < 0)
++		goto out_nounlock;
++
++	retval = -ESRCH;
++	rcu_read_lock();
++	p = find_process_by_pid(pid);
++	if (p) {
++		retval = security_task_getscheduler(p);
++		if (!retval)
++			retval = p->policy;
++	}
++	rcu_read_unlock();
++
++out_nounlock:
++	return retval;
++}
++
++/**
++ * sys_sched_getscheduler - get the RT priority of a thread
++ * @pid: the pid in question.
++ * @param: structure containing the RT priority.
++ *
++ * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
++ * code.
++ */
++SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
++{
++	struct sched_param lp = { .sched_priority = 0 };
++	struct task_struct *p;
++	int retval = -EINVAL;
++
++	if (!param || pid < 0)
++		goto out_nounlock;
++
++	rcu_read_lock();
++	p = find_process_by_pid(pid);
++	retval = -ESRCH;
++	if (!p)
++		goto out_unlock;
++
++	retval = security_task_getscheduler(p);
++	if (retval)
++		goto out_unlock;
++
++	if (task_has_rt_policy(p))
++		lp.sched_priority = p->rt_priority;
++	rcu_read_unlock();
++
++	/*
++	 * This one might sleep, we cannot do it with a spinlock held ...
++	 */
++	retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
++
++out_nounlock:
++	return retval;
++
++out_unlock:
++	rcu_read_unlock();
++	return retval;
++}
++
++/*
++ * Copy the kernel size attribute structure (which might be larger
++ * than what user-space knows about) to user-space.
++ *
++ * Note that all cases are valid: user-space buffer can be larger or
++ * smaller than the kernel-space buffer. The usual case is that both
++ * have the same size.
++ */
++static int
++sched_attr_copy_to_user(struct sched_attr __user *uattr,
++			struct sched_attr *kattr,
++			unsigned int usize)
++{
++	unsigned int ksize = sizeof(*kattr);
++
++	if (!access_ok(uattr, usize))
++		return -EFAULT;
++
++	/*
++	 * sched_getattr() ABI forwards and backwards compatibility:
++	 *
++	 * If usize == ksize then we just copy everything to user-space and all is good.
++	 *
++	 * If usize < ksize then we only copy as much as user-space has space for,
++	 * this keeps ABI compatibility as well. We skip the rest.
++	 *
++	 * If usize > ksize then user-space is using a newer version of the ABI,
++	 * which part the kernel doesn't know about. Just ignore it - tooling can
++	 * detect the kernel's knowledge of attributes from the attr->size value
++	 * which is set to ksize in this case.
++	 */
++	kattr->size = min(usize, ksize);
++
++	if (copy_to_user(uattr, kattr, kattr->size))
++		return -EFAULT;
++
++	return 0;
++}
++
++/**
++ * sys_sched_getattr - similar to sched_getparam, but with sched_attr
++ * @pid: the pid in question.
++ * @uattr: structure containing the extended parameters.
++ * @usize: sizeof(attr) for fwd/bwd comp.
++ * @flags: for future extension.
++ */
++SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
++		unsigned int, usize, unsigned int, flags)
++{
++	struct sched_attr kattr = { };
++	struct task_struct *p;
++	int retval;
++
++	if (!uattr || pid < 0 || usize > PAGE_SIZE ||
++	    usize < SCHED_ATTR_SIZE_VER0 || flags)
++		return -EINVAL;
++
++	rcu_read_lock();
++	p = find_process_by_pid(pid);
++	retval = -ESRCH;
++	if (!p)
++		goto out_unlock;
++
++	retval = security_task_getscheduler(p);
++	if (retval)
++		goto out_unlock;
++
++	kattr.sched_policy = p->policy;
++	if (p->sched_reset_on_fork)
++		kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
++	if (task_has_rt_policy(p))
++		kattr.sched_priority = p->rt_priority;
++	else
++		kattr.sched_nice = task_nice(p);
++
++#ifdef CONFIG_UCLAMP_TASK
++	kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
++	kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
++#endif
++
++	rcu_read_unlock();
++
++	return sched_attr_copy_to_user(uattr, &kattr, usize);
++
++out_unlock:
++	rcu_read_unlock();
++	return retval;
++}
++
++long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
++{
++	cpumask_var_t cpus_allowed, new_mask;
++	struct task_struct *p;
++	int retval;
++
++	rcu_read_lock();
++
++	p = find_process_by_pid(pid);
++	if (!p) {
++		rcu_read_unlock();
++		return -ESRCH;
++	}
++
++	/* Prevent p going away */
++	get_task_struct(p);
++	rcu_read_unlock();
++
++	if (p->flags & PF_NO_SETAFFINITY) {
++		retval = -EINVAL;
++		goto out_put_task;
++	}
++	if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
++		retval = -ENOMEM;
++		goto out_put_task;
++	}
++	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
++		retval = -ENOMEM;
++		goto out_free_cpus_allowed;
++	}
++	retval = -EPERM;
++	if (!check_same_owner(p)) {
++		rcu_read_lock();
++		if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
++			rcu_read_unlock();
++			goto out_free_new_mask;
++		}
++		rcu_read_unlock();
++	}
++
++	retval = security_task_setscheduler(p);
++	if (retval)
++		goto out_free_new_mask;
++
++	cpuset_cpus_allowed(p, cpus_allowed);
++	cpumask_and(new_mask, in_mask, cpus_allowed);
++
++again:
++	retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK);
++
++	if (!retval) {
++		cpuset_cpus_allowed(p, cpus_allowed);
++		if (!cpumask_subset(new_mask, cpus_allowed)) {
++			/*
++			 * We must have raced with a concurrent cpuset
++			 * update. Just reset the cpus_allowed to the
++			 * cpuset's cpus_allowed
++			 */
++			cpumask_copy(new_mask, cpus_allowed);
++			goto again;
++		}
++	}
++out_free_new_mask:
++	free_cpumask_var(new_mask);
++out_free_cpus_allowed:
++	free_cpumask_var(cpus_allowed);
++out_put_task:
++	put_task_struct(p);
++	return retval;
++}
++
++static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
++			     struct cpumask *new_mask)
++{
++	if (len < cpumask_size())
++		cpumask_clear(new_mask);
++	else if (len > cpumask_size())
++		len = cpumask_size();
++
++	return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
++}
++
++/**
++ * sys_sched_setaffinity - set the CPU affinity of a process
++ * @pid: pid of the process
++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
++ * @user_mask_ptr: user-space pointer to the new CPU mask
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
++		unsigned long __user *, user_mask_ptr)
++{
++	cpumask_var_t new_mask;
++	int retval;
++
++	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
++		return -ENOMEM;
++
++	retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
++	if (retval == 0)
++		retval = sched_setaffinity(pid, new_mask);
++	free_cpumask_var(new_mask);
++	return retval;
++}
++
++long sched_getaffinity(pid_t pid, cpumask_t *mask)
++{
++	struct task_struct *p;
++	raw_spinlock_t *lock;
++	unsigned long flags;
++	int retval;
++
++	rcu_read_lock();
++
++	retval = -ESRCH;
++	p = find_process_by_pid(pid);
++	if (!p)
++		goto out_unlock;
++
++	retval = security_task_getscheduler(p);
++	if (retval)
++		goto out_unlock;
++
++	task_access_lock_irqsave(p, &lock, &flags);
++	cpumask_and(mask, &p->cpus_mask, cpu_active_mask);
++	task_access_unlock_irqrestore(p, lock, &flags);
++
++out_unlock:
++	rcu_read_unlock();
++
++	return retval;
++}
++
++/**
++ * sys_sched_getaffinity - get the CPU affinity of a process
++ * @pid: pid of the process
++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
++ * @user_mask_ptr: user-space pointer to hold the current CPU mask
++ *
++ * Return: size of CPU mask copied to user_mask_ptr on success. An
++ * error code otherwise.
++ */
++SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
++		unsigned long __user *, user_mask_ptr)
++{
++	int ret;
++	cpumask_var_t mask;
++
++	if ((len * BITS_PER_BYTE) < nr_cpu_ids)
++		return -EINVAL;
++	if (len & (sizeof(unsigned long)-1))
++		return -EINVAL;
++
++	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
++		return -ENOMEM;
++
++	ret = sched_getaffinity(pid, mask);
++	if (ret == 0) {
++		unsigned int retlen = min_t(size_t, len, cpumask_size());
++
++		if (copy_to_user(user_mask_ptr, mask, retlen))
++			ret = -EFAULT;
++		else
++			ret = retlen;
++	}
++	free_cpumask_var(mask);
++
++	return ret;
++}
++
++static void do_sched_yield(void)
++{
++	struct rq *rq;
++	struct rq_flags rf;
++
++	if (!sched_yield_type)
++		return;
++
++	rq = this_rq_lock_irq(&rf);
++
++	schedstat_inc(rq->yld_count);
++
++	if (1 == sched_yield_type) {
++		if (!rt_task(current))
++			do_sched_yield_type_1(current, rq);
++	} else if (2 == sched_yield_type) {
++		if (rq->nr_running > 1)
++			rq->skip = current;
++	}
++
++	preempt_disable();
++	raw_spin_unlock_irq(&rq->lock);
++	sched_preempt_enable_no_resched();
++
++	schedule();
++}
++
++/**
++ * sys_sched_yield - yield the current processor to other threads.
++ *
++ * This function yields the current CPU to other tasks. If there are no
++ * other threads running on this CPU then this function will return.
++ *
++ * Return: 0.
++ */
++SYSCALL_DEFINE0(sched_yield)
++{
++	do_sched_yield();
++	return 0;
++}
++
++#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
++int __sched __cond_resched(void)
++{
++	if (should_resched(0)) {
++		preempt_schedule_common();
++		return 1;
++	}
++#ifndef CONFIG_PREEMPT_RCU
++	rcu_all_qs();
++#endif
++	return 0;
++}
++EXPORT_SYMBOL(__cond_resched);
++#endif
++
++#ifdef CONFIG_PREEMPT_DYNAMIC
++DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched);
++EXPORT_STATIC_CALL_TRAMP(cond_resched);
++
++DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched);
++EXPORT_STATIC_CALL_TRAMP(might_resched);
++#endif
++
++/*
++ * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
++ * call schedule, and on return reacquire the lock.
++ *
++ * This works OK both with and without CONFIG_PREEMPTION.  We do strange low-level
++ * operations here to prevent schedule() from being called twice (once via
++ * spin_unlock(), once by hand).
++ */
++int __cond_resched_lock(spinlock_t *lock)
++{
++	int resched = should_resched(PREEMPT_LOCK_OFFSET);
++	int ret = 0;
++
++	lockdep_assert_held(lock);
++
++	if (spin_needbreak(lock) || resched) {
++		spin_unlock(lock);
++		if (resched)
++			preempt_schedule_common();
++		else
++			cpu_relax();
++		ret = 1;
++		spin_lock(lock);
++	}
++	return ret;
++}
++EXPORT_SYMBOL(__cond_resched_lock);
++
++int __cond_resched_rwlock_read(rwlock_t *lock)
++{
++	int resched = should_resched(PREEMPT_LOCK_OFFSET);
++	int ret = 0;
++
++	lockdep_assert_held_read(lock);
++
++	if (rwlock_needbreak(lock) || resched) {
++		read_unlock(lock);
++		if (resched)
++			preempt_schedule_common();
++		else
++			cpu_relax();
++		ret = 1;
++		read_lock(lock);
++	}
++	return ret;
++}
++EXPORT_SYMBOL(__cond_resched_rwlock_read);
++
++int __cond_resched_rwlock_write(rwlock_t *lock)
++{
++	int resched = should_resched(PREEMPT_LOCK_OFFSET);
++	int ret = 0;
++
++	lockdep_assert_held_write(lock);
++
++	if (rwlock_needbreak(lock) || resched) {
++		write_unlock(lock);
++		if (resched)
++			preempt_schedule_common();
++		else
++			cpu_relax();
++		ret = 1;
++		write_lock(lock);
++	}
++	return ret;
++}
++EXPORT_SYMBOL(__cond_resched_rwlock_write);
++
++/**
++ * yield - yield the current processor to other threads.
++ *
++ * Do not ever use this function, there's a 99% chance you're doing it wrong.
++ *
++ * The scheduler is at all times free to pick the calling task as the most
++ * eligible task to run, if removing the yield() call from your code breaks
++ * it, it's already broken.
++ *
++ * Typical broken usage is:
++ *
++ * while (!event)
++ * 	yield();
++ *
++ * where one assumes that yield() will let 'the other' process run that will
++ * make event true. If the current task is a SCHED_FIFO task that will never
++ * happen. Never use yield() as a progress guarantee!!
++ *
++ * If you want to use yield() to wait for something, use wait_event().
++ * If you want to use yield() to be 'nice' for others, use cond_resched().
++ * If you still want to use yield(), do not!
++ */
++void __sched yield(void)
++{
++	set_current_state(TASK_RUNNING);
++	do_sched_yield();
++}
++EXPORT_SYMBOL(yield);
++
++/**
++ * yield_to - yield the current processor to another thread in
++ * your thread group, or accelerate that thread toward the
++ * processor it's on.
++ * @p: target task
++ * @preempt: whether task preemption is allowed or not
++ *
++ * It's the caller's job to ensure that the target task struct
++ * can't go away on us before we can do any checks.
++ *
++ * In Alt schedule FW, yield_to is not supported.
++ *
++ * Return:
++ *	true (>0) if we indeed boosted the target task.
++ *	false (0) if we failed to boost the target.
++ *	-ESRCH if there's no task to yield to.
++ */
++int __sched yield_to(struct task_struct *p, bool preempt)
++{
++	return 0;
++}
++EXPORT_SYMBOL_GPL(yield_to);
++
++int io_schedule_prepare(void)
++{
++	int old_iowait = current->in_iowait;
++
++	current->in_iowait = 1;
++	blk_schedule_flush_plug(current);
++
++	return old_iowait;
++}
++
++void io_schedule_finish(int token)
++{
++	current->in_iowait = token;
++}
++
++/*
++ * This task is about to go to sleep on IO.  Increment rq->nr_iowait so
++ * that process accounting knows that this is a task in IO wait state.
++ *
++ * But don't do that if it is a deliberate, throttling IO wait (this task
++ * has set its backing_dev_info: the queue against which it should throttle)
++ */
++
++long __sched io_schedule_timeout(long timeout)
++{
++	int token;
++	long ret;
++
++	token = io_schedule_prepare();
++	ret = schedule_timeout(timeout);
++	io_schedule_finish(token);
++
++	return ret;
++}
++EXPORT_SYMBOL(io_schedule_timeout);
++
++void __sched io_schedule(void)
++{
++	int token;
++
++	token = io_schedule_prepare();
++	schedule();
++	io_schedule_finish(token);
++}
++EXPORT_SYMBOL(io_schedule);
++
++/**
++ * sys_sched_get_priority_max - return maximum RT priority.
++ * @policy: scheduling class.
++ *
++ * Return: On success, this syscall returns the maximum
++ * rt_priority that can be used by a given scheduling class.
++ * On failure, a negative error code is returned.
++ */
++SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
++{
++	int ret = -EINVAL;
++
++	switch (policy) {
++	case SCHED_FIFO:
++	case SCHED_RR:
++		ret = MAX_RT_PRIO - 1;
++		break;
++	case SCHED_NORMAL:
++	case SCHED_BATCH:
++	case SCHED_IDLE:
++		ret = 0;
++		break;
++	}
++	return ret;
++}
++
++/**
++ * sys_sched_get_priority_min - return minimum RT priority.
++ * @policy: scheduling class.
++ *
++ * Return: On success, this syscall returns the minimum
++ * rt_priority that can be used by a given scheduling class.
++ * On failure, a negative error code is returned.
++ */
++SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
++{
++	int ret = -EINVAL;
++
++	switch (policy) {
++	case SCHED_FIFO:
++	case SCHED_RR:
++		ret = 1;
++		break;
++	case SCHED_NORMAL:
++	case SCHED_BATCH:
++	case SCHED_IDLE:
++		ret = 0;
++		break;
++	}
++	return ret;
++}
++
++static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
++{
++	struct task_struct *p;
++	int retval;
++
++	alt_sched_debug();
++
++	if (pid < 0)
++		return -EINVAL;
++
++	retval = -ESRCH;
++	rcu_read_lock();
++	p = find_process_by_pid(pid);
++	if (!p)
++		goto out_unlock;
++
++	retval = security_task_getscheduler(p);
++	if (retval)
++		goto out_unlock;
++	rcu_read_unlock();
++
++	*t = ns_to_timespec64(sched_timeslice_ns);
++	return 0;
++
++out_unlock:
++	rcu_read_unlock();
++	return retval;
++}
++
++/**
++ * sys_sched_rr_get_interval - return the default timeslice of a process.
++ * @pid: pid of the process.
++ * @interval: userspace pointer to the timeslice value.
++ *
++ *
++ * Return: On success, 0 and the timeslice is in @interval. Otherwise,
++ * an error code.
++ */
++SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
++		struct __kernel_timespec __user *, interval)
++{
++	struct timespec64 t;
++	int retval = sched_rr_get_interval(pid, &t);
++
++	if (retval == 0)
++		retval = put_timespec64(&t, interval);
++
++	return retval;
++}
++
++#ifdef CONFIG_COMPAT_32BIT_TIME
++SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid,
++		struct old_timespec32 __user *, interval)
++{
++	struct timespec64 t;
++	int retval = sched_rr_get_interval(pid, &t);
++
++	if (retval == 0)
++		retval = put_old_timespec32(&t, interval);
++	return retval;
++}
++#endif
++
++void sched_show_task(struct task_struct *p)
++{
++	unsigned long free = 0;
++	int ppid;
++
++	if (!try_get_task_stack(p))
++		return;
++
++	pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
++
++	if (p->state == TASK_RUNNING)
++		pr_cont("  running task    ");
++#ifdef CONFIG_DEBUG_STACK_USAGE
++	free = stack_not_used(p);
++#endif
++	ppid = 0;
++	rcu_read_lock();
++	if (pid_alive(p))
++		ppid = task_pid_nr(rcu_dereference(p->real_parent));
++	rcu_read_unlock();
++	pr_cont(" stack:%5lu pid:%5d ppid:%6d flags:0x%08lx\n",
++		free, task_pid_nr(p), ppid,
++		(unsigned long)task_thread_info(p)->flags);
++
++	print_worker_info(KERN_INFO, p);
++	print_stop_info(KERN_INFO, p);
++	show_stack(p, NULL, KERN_INFO);
++	put_task_stack(p);
++}
++EXPORT_SYMBOL_GPL(sched_show_task);
++
++static inline bool
++state_filter_match(unsigned long state_filter, struct task_struct *p)
++{
++	/* no filter, everything matches */
++	if (!state_filter)
++		return true;
++
++	/* filter, but doesn't match */
++	if (!(p->state & state_filter))
++		return false;
++
++	/*
++	 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
++	 * TASK_KILLABLE).
++	 */
++	if (state_filter == TASK_UNINTERRUPTIBLE && p->state == TASK_IDLE)
++		return false;
++
++	return true;
++}
++
++
++void show_state_filter(unsigned long state_filter)
++{
++	struct task_struct *g, *p;
++
++	rcu_read_lock();
++	for_each_process_thread(g, p) {
++		/*
++		 * reset the NMI-timeout, listing all files on a slow
++		 * console might take a lot of time:
++		 * Also, reset softlockup watchdogs on all CPUs, because
++		 * another CPU might be blocked waiting for us to process
++		 * an IPI.
++		 */
++		touch_nmi_watchdog();
++		touch_all_softlockup_watchdogs();
++		if (state_filter_match(state_filter, p))
++			sched_show_task(p);
++	}
++
++#ifdef CONFIG_SCHED_DEBUG
++	/* TODO: Alt schedule FW should support this
++	if (!state_filter)
++		sysrq_sched_debug_show();
++	*/
++#endif
++	rcu_read_unlock();
++	/*
++	 * Only show locks if all tasks are dumped:
++	 */
++	if (!state_filter)
++		debug_show_all_locks();
++}
++
++void dump_cpu_task(int cpu)
++{
++	pr_info("Task dump for CPU %d:\n", cpu);
++	sched_show_task(cpu_curr(cpu));
++}
++
++/**
++ * init_idle - set up an idle thread for a given CPU
++ * @idle: task in question
++ * @cpu: CPU the idle task belongs to
++ *
++ * NOTE: this function does not set the idle thread's NEED_RESCHED
++ * flag, to make booting more robust.
++ */
++void init_idle(struct task_struct *idle, int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++
++	__sched_fork(0, idle);
++
++	raw_spin_lock_irqsave(&idle->pi_lock, flags);
++	raw_spin_lock(&rq->lock);
++	update_rq_clock(rq);
++
++	idle->last_ran = rq->clock_task;
++	idle->state = TASK_RUNNING;
++	idle->flags |= PF_IDLE;
++	sched_queue_init_idle(rq, idle);
++
++	scs_task_reset(idle);
++	kasan_unpoison_task_stack(idle);
++
++#ifdef CONFIG_SMP
++	/*
++	 * It's possible that init_idle() gets called multiple times on a task,
++	 * in that case do_set_cpus_allowed() will not do the right thing.
++	 *
++	 * And since this is boot we can forgo the serialisation.
++	 */
++	set_cpus_allowed_common(idle, cpumask_of(cpu), 0);
++#endif
++
++	/* Silence PROVE_RCU */
++	rcu_read_lock();
++	__set_task_cpu(idle, cpu);
++	rcu_read_unlock();
++
++	rq->idle = idle;
++	rcu_assign_pointer(rq->curr, idle);
++	idle->on_cpu = 1;
++
++	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
++
++	/* Set the preempt count _outside_ the spinlocks! */
++	init_idle_preempt_count(idle, cpu);
++
++	ftrace_graph_init_idle_task(idle, cpu);
++	vtime_init_idle(idle, cpu);
++#ifdef CONFIG_SMP
++	sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
++#endif
++}
++
++#ifdef CONFIG_SMP
++
++int cpuset_cpumask_can_shrink(const struct cpumask __maybe_unused *cur,
++			      const struct cpumask __maybe_unused *trial)
++{
++	return 1;
++}
++
++int task_can_attach(struct task_struct *p,
++		    const struct cpumask *cs_cpus_allowed)
++{
++	int ret = 0;
++
++	/*
++	 * Kthreads which disallow setaffinity shouldn't be moved
++	 * to a new cpuset; we don't want to change their CPU
++	 * affinity and isolating such threads by their set of
++	 * allowed nodes is unnecessary.  Thus, cpusets are not
++	 * applicable for such threads.  This prevents checking for
++	 * success of set_cpus_allowed_ptr() on all attached tasks
++	 * before cpus_mask may be changed.
++	 */
++	if (p->flags & PF_NO_SETAFFINITY)
++		ret = -EINVAL;
++
++	return ret;
++}
++
++bool sched_smp_initialized __read_mostly;
++
++#ifdef CONFIG_HOTPLUG_CPU
++/*
++ * Ensures that the idle task is using init_mm right before its CPU goes
++ * offline.
++ */
++void idle_task_exit(void)
++{
++	struct mm_struct *mm = current->active_mm;
++
++	BUG_ON(current != this_rq()->idle);
++
++	if (mm != &init_mm) {
++		switch_mm(mm, &init_mm, current);
++		finish_arch_post_lock_switch();
++	}
++
++	/* finish_cpu(), as ran on the BP, will clean up the active_mm state */
++}
++
++static int __balance_push_cpu_stop(void *arg)
++{
++	struct task_struct *p = arg;
++	struct rq *rq = this_rq();
++	struct rq_flags rf;
++	int cpu;
++
++	raw_spin_lock_irq(&p->pi_lock);
++	rq_lock(rq, &rf);
++
++	update_rq_clock(rq);
++
++	if (task_rq(p) == rq && task_on_rq_queued(p)) {
++		cpu = select_fallback_rq(rq->cpu, p);
++		rq = __migrate_task(rq, p, cpu);
++	}
++
++	rq_unlock(rq, &rf);
++	raw_spin_unlock_irq(&p->pi_lock);
++
++	put_task_struct(p);
++
++	return 0;
++}
++
++static DEFINE_PER_CPU(struct cpu_stop_work, push_work);
++
++/*
++ * Ensure we only run per-cpu kthreads once the CPU goes !active.
++ */
++static void balance_push(struct rq *rq)
++{
++	struct task_struct *push_task = rq->curr;
++
++	lockdep_assert_held(&rq->lock);
++	SCHED_WARN_ON(rq->cpu != smp_processor_id());
++	/*
++	 * Ensure the thing is persistent until balance_push_set(.on = false);
++	 */
++	rq->balance_callback = &balance_push_callback;
++
++	/*
++	 * Both the cpu-hotplug and stop task are in this case and are
++	 * required to complete the hotplug process.
++	 *
++	 * XXX: the idle task does not match kthread_is_per_cpu() due to
++	 * histerical raisins.
++	 */
++	if (rq->idle == push_task ||
++	    ((push_task->flags & PF_KTHREAD) && kthread_is_per_cpu(push_task)) ||
++	    is_migration_disabled(push_task)) {
++
++		/*
++		 * If this is the idle task on the outgoing CPU try to wake
++		 * up the hotplug control thread which might wait for the
++		 * last task to vanish. The rcuwait_active() check is
++		 * accurate here because the waiter is pinned on this CPU
++		 * and can't obviously be running in parallel.
++		 *
++		 * On RT kernels this also has to check whether there are
++		 * pinned and scheduled out tasks on the runqueue. They
++		 * need to leave the migrate disabled section first.
++		 */
++		if (!rq->nr_running && !rq_has_pinned_tasks(rq) &&
++		    rcuwait_active(&rq->hotplug_wait)) {
++			raw_spin_unlock(&rq->lock);
++			rcuwait_wake_up(&rq->hotplug_wait);
++			raw_spin_lock(&rq->lock);
++		}
++		return;
++	}
++
++	get_task_struct(push_task);
++	/*
++	 * Temporarily drop rq->lock such that we can wake-up the stop task.
++	 * Both preemption and IRQs are still disabled.
++	 */
++	raw_spin_unlock(&rq->lock);
++	stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
++			    this_cpu_ptr(&push_work));
++	/*
++	 * At this point need_resched() is true and we'll take the loop in
++	 * schedule(). The next pick is obviously going to be the stop task
++	 * which kthread_is_per_cpu() and will push this task away.
++	 */
++	raw_spin_lock(&rq->lock);
++}
++
++static void balance_push_set(int cpu, bool on)
++{
++	struct rq *rq = cpu_rq(cpu);
++	struct rq_flags rf;
++
++	rq_lock_irqsave(rq, &rf);
++	rq->balance_push = on;
++	if (on) {
++		WARN_ON_ONCE(rq->balance_callback);
++		rq->balance_callback = &balance_push_callback;
++	} else if (rq->balance_callback == &balance_push_callback) {
++		rq->balance_callback = NULL;
++	}
++	rq_unlock_irqrestore(rq, &rf);
++}
++
++/*
++ * Invoked from a CPUs hotplug control thread after the CPU has been marked
++ * inactive. All tasks which are not per CPU kernel threads are either
++ * pushed off this CPU now via balance_push() or placed on a different CPU
++ * during wakeup. Wait until the CPU is quiescent.
++ */
++static void balance_hotplug_wait(void)
++{
++	struct rq *rq = this_rq();
++
++	rcuwait_wait_event(&rq->hotplug_wait,
++			   rq->nr_running == 1 && !rq_has_pinned_tasks(rq),
++			   TASK_UNINTERRUPTIBLE);
++}
++
++#else
++
++static void balance_push(struct rq *rq)
++{
++}
++
++static void balance_push_set(int cpu, bool on)
++{
++}
++
++static inline void balance_hotplug_wait(void)
++{
++}
++#endif /* CONFIG_HOTPLUG_CPU */
++
++static void set_rq_offline(struct rq *rq)
++{
++	if (rq->online)
++		rq->online = false;
++}
++
++static void set_rq_online(struct rq *rq)
++{
++	if (!rq->online)
++		rq->online = true;
++}
++
++/*
++ * used to mark begin/end of suspend/resume:
++ */
++static int num_cpus_frozen;
++
++/*
++ * Update cpusets according to cpu_active mask.  If cpusets are
++ * disabled, cpuset_update_active_cpus() becomes a simple wrapper
++ * around partition_sched_domains().
++ *
++ * If we come here as part of a suspend/resume, don't touch cpusets because we
++ * want to restore it back to its original state upon resume anyway.
++ */
++static void cpuset_cpu_active(void)
++{
++	if (cpuhp_tasks_frozen) {
++		/*
++		 * num_cpus_frozen tracks how many CPUs are involved in suspend
++		 * resume sequence. As long as this is not the last online
++		 * operation in the resume sequence, just build a single sched
++		 * domain, ignoring cpusets.
++		 */
++		partition_sched_domains(1, NULL, NULL);
++		if (--num_cpus_frozen)
++			return;
++		/*
++		 * This is the last CPU online operation. So fall through and
++		 * restore the original sched domains by considering the
++		 * cpuset configurations.
++		 */
++		cpuset_force_rebuild();
++	}
++
++	cpuset_update_active_cpus();
++}
++
++static int cpuset_cpu_inactive(unsigned int cpu)
++{
++	if (!cpuhp_tasks_frozen) {
++		cpuset_update_active_cpus();
++	} else {
++		num_cpus_frozen++;
++		partition_sched_domains(1, NULL, NULL);
++	}
++	return 0;
++}
++
++int sched_cpu_activate(unsigned int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++
++	/*
++	 * Make sure that when the hotplug state machine does a roll-back
++	 * we clear balance_push. Ideally that would happen earlier...
++	 */
++	balance_push_set(cpu, false);
++
++#ifdef CONFIG_SCHED_SMT
++	/*
++	 * When going up, increment the number of cores with SMT present.
++	 */
++	if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
++		static_branch_inc_cpuslocked(&sched_smt_present);
++#endif
++	set_cpu_active(cpu, true);
++
++	if (sched_smp_initialized)
++		cpuset_cpu_active();
++
++	/*
++	 * Put the rq online, if not already. This happens:
++	 *
++	 * 1) In the early boot process, because we build the real domains
++	 *    after all cpus have been brought up.
++	 *
++	 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
++	 *    domains.
++	 */
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	set_rq_online(rq);
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++	return 0;
++}
++
++int sched_cpu_deactivate(unsigned int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++	int ret;
++
++	set_cpu_active(cpu, false);
++
++	/*
++	 * From this point forward, this CPU will refuse to run any task that
++	 * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
++	 * push those tasks away until this gets cleared, see
++	 * sched_cpu_dying().
++	 */
++	balance_push_set(cpu, true);
++
++	/*
++	 * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU
++	 * users of this state to go away such that all new such users will
++	 * observe it.
++	 *
++	 * Specifically, we rely on ttwu to no longer target this CPU, see
++	 * ttwu_queue_cond() and is_cpu_allowed().
++	 *
++	 * Do sync before park smpboot threads to take care the rcu boost case.
++	 */
++	synchronize_rcu();
++
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	update_rq_clock(rq);
++	set_rq_offline(rq);
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++#ifdef CONFIG_SCHED_SMT
++	/*
++	 * When going down, decrement the number of cores with SMT present.
++	 */
++	if (cpumask_weight(cpu_smt_mask(cpu)) == 2) {
++		static_branch_dec_cpuslocked(&sched_smt_present);
++		if (!static_branch_likely(&sched_smt_present))
++			cpumask_clear(&sched_sg_idle_mask);
++	}
++#endif
++
++	if (!sched_smp_initialized)
++		return 0;
++
++	ret = cpuset_cpu_inactive(cpu);
++	if (ret) {
++		balance_push_set(cpu, false);
++		set_cpu_active(cpu, true);
++		return ret;
++	}
++
++	return 0;
++}
++
++static void sched_rq_cpu_starting(unsigned int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	rq->calc_load_update = calc_load_update;
++}
++
++int sched_cpu_starting(unsigned int cpu)
++{
++	sched_rq_cpu_starting(cpu);
++	sched_tick_start(cpu);
++	return 0;
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++
++/*
++ * Invoked immediately before the stopper thread is invoked to bring the
++ * CPU down completely. At this point all per CPU kthreads except the
++ * hotplug thread (current) and the stopper thread (inactive) have been
++ * either parked or have been unbound from the outgoing CPU. Ensure that
++ * any of those which might be on the way out are gone.
++ *
++ * If after this point a bound task is being woken on this CPU then the
++ * responsible hotplug callback has failed to do it's job.
++ * sched_cpu_dying() will catch it with the appropriate fireworks.
++ */
++int sched_cpu_wait_empty(unsigned int cpu)
++{
++	balance_hotplug_wait();
++	return 0;
++}
++
++/*
++ * Since this CPU is going 'away' for a while, fold any nr_active delta we
++ * might have. Called from the CPU stopper task after ensuring that the
++ * stopper is the last running task on the CPU, so nr_active count is
++ * stable. We need to take the teardown thread which is calling this into
++ * account, so we hand in adjust = 1 to the load calculation.
++ *
++ * Also see the comment "Global load-average calculations".
++ */
++static void calc_load_migrate(struct rq *rq)
++{
++	long delta = calc_load_fold_active(rq, 1);
++
++	if (delta)
++		atomic_long_add(delta, &calc_load_tasks);
++}
++
++static void dump_rq_tasks(struct rq *rq, const char *loglvl)
++{
++	struct task_struct *g, *p;
++	int cpu = cpu_of(rq);
++
++	lockdep_assert_held(&rq->lock);
++
++	printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
++	for_each_process_thread(g, p) {
++		if (task_cpu(p) != cpu)
++			continue;
++
++		if (!task_on_rq_queued(p))
++			continue;
++
++		printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
++	}
++}
++
++int sched_cpu_dying(unsigned int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++
++	/* Handle pending wakeups and then migrate everything off */
++	sched_tick_stop(cpu);
++
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
++		WARN(true, "Dying CPU not properly vacated!");
++		dump_rq_tasks(rq, KERN_WARNING);
++	}
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++	/*
++	 * Now that the CPU is offline, make sure we're welcome
++	 * to new tasks once we come back up.
++	 */
++	balance_push_set(cpu, false);
++
++	calc_load_migrate(rq);
++	hrtick_clear(rq);
++	return 0;
++}
++#endif
++
++#ifdef CONFIG_SMP
++static void sched_init_topology_cpumask_early(void)
++{
++	int cpu;
++	cpumask_t *tmp;
++
++	for_each_possible_cpu(cpu) {
++		/* init affinity masks */
++		tmp = per_cpu(sched_cpu_affinity_masks, cpu);
++
++		cpumask_copy(tmp, cpumask_of(cpu));
++		tmp++;
++		cpumask_copy(tmp, cpu_possible_mask);
++		cpumask_clear_cpu(cpu, tmp);
++		per_cpu(sched_cpu_affinity_end_mask, cpu) = ++tmp;
++		/* init topo masks */
++		tmp = per_cpu(sched_cpu_topo_masks, cpu);
++
++		cpumask_copy(tmp, cpumask_of(cpu));
++		tmp++;
++		cpumask_copy(tmp, cpu_possible_mask);
++		per_cpu(sched_cpu_llc_mask, cpu) = tmp;
++		/*per_cpu(sd_llc_id, cpu) = cpu;*/
++	}
++}
++
++#define TOPOLOGY_CPUMASK(name, mask, last) \
++	if (cpumask_and(chk, chk, mask)) {					\
++		cpumask_copy(topo, mask);					\
++		printk(KERN_INFO "sched: cpu#%02d affinity: 0x%08lx topo: 0x%08lx - "#name,\
++		       cpu, (chk++)->bits[0], (topo++)->bits[0]);		\
++	}									\
++	if (!last)								\
++		cpumask_complement(chk, mask)
++
++static void sched_init_topology_cpumask(void)
++{
++	int cpu;
++	cpumask_t *chk, *topo;
++
++	for_each_online_cpu(cpu) {
++		/* take chance to reset time slice for idle tasks */
++		cpu_rq(cpu)->idle->time_slice = sched_timeslice_ns;
++
++		chk = per_cpu(sched_cpu_affinity_masks, cpu) + 1;
++		topo = per_cpu(sched_cpu_topo_masks, cpu) + 1;
++
++		cpumask_complement(chk, cpumask_of(cpu));
++#ifdef CONFIG_SCHED_SMT
++		TOPOLOGY_CPUMASK(smt, topology_sibling_cpumask(cpu), false);
++#endif
++		per_cpu(sd_llc_id, cpu) = cpumask_first(cpu_coregroup_mask(cpu));
++		per_cpu(sched_cpu_llc_mask, cpu) = topo;
++		TOPOLOGY_CPUMASK(coregroup, cpu_coregroup_mask(cpu), false);
++
++		TOPOLOGY_CPUMASK(core, topology_core_cpumask(cpu), false);
++
++		TOPOLOGY_CPUMASK(others, cpu_online_mask, true);
++
++		per_cpu(sched_cpu_affinity_end_mask, cpu) = chk;
++		printk(KERN_INFO "sched: cpu#%02d llc_id = %d, llc_mask idx = %d\n",
++		       cpu, per_cpu(sd_llc_id, cpu),
++		       (int) (per_cpu(sched_cpu_llc_mask, cpu) -
++			      per_cpu(sched_cpu_topo_masks, cpu)));
++	}
++}
++#endif
++
++void __init sched_init_smp(void)
++{
++	/* Move init over to a non-isolated CPU */
++	if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0)
++		BUG();
++
++	sched_init_topology_cpumask();
++
++	sched_smp_initialized = true;
++}
++#else
++void __init sched_init_smp(void)
++{
++	cpu_rq(0)->idle->time_slice = sched_timeslice_ns;
++}
++#endif /* CONFIG_SMP */
++
++int in_sched_functions(unsigned long addr)
++{
++	return in_lock_functions(addr) ||
++		(addr >= (unsigned long)__sched_text_start
++		&& addr < (unsigned long)__sched_text_end);
++}
++
++#ifdef CONFIG_CGROUP_SCHED
++/* task group related information */
++struct task_group {
++	struct cgroup_subsys_state css;
++
++	struct rcu_head rcu;
++	struct list_head list;
++
++	struct task_group *parent;
++	struct list_head siblings;
++	struct list_head children;
++#ifdef CONFIG_FAIR_GROUP_SCHED
++	unsigned long		shares;
++#endif
++};
++
++/*
++ * Default task group.
++ * Every task in system belongs to this group at bootup.
++ */
++struct task_group root_task_group;
++LIST_HEAD(task_groups);
++
++/* Cacheline aligned slab cache for task_group */
++static struct kmem_cache *task_group_cache __read_mostly;
++#endif /* CONFIG_CGROUP_SCHED */
++
++void __init sched_init(void)
++{
++	int i;
++	struct rq *rq;
++
++	printk(KERN_INFO ALT_SCHED_VERSION_MSG);
++
++	wait_bit_init();
++
++#ifdef CONFIG_SMP
++	for (i = 0; i < SCHED_BITS; i++)
++		cpumask_copy(&sched_rq_watermark[i], cpu_present_mask);
++#endif
++
++#ifdef CONFIG_CGROUP_SCHED
++	task_group_cache = KMEM_CACHE(task_group, 0);
++
++	list_add(&root_task_group.list, &task_groups);
++	INIT_LIST_HEAD(&root_task_group.children);
++	INIT_LIST_HEAD(&root_task_group.siblings);
++#endif /* CONFIG_CGROUP_SCHED */
++	for_each_possible_cpu(i) {
++		rq = cpu_rq(i);
++
++		sched_queue_init(rq);
++		rq->watermark = IDLE_WM;
++		rq->skip = NULL;
++
++		raw_spin_lock_init(&rq->lock);
++		rq->nr_running = rq->nr_uninterruptible = 0;
++		rq->calc_load_active = 0;
++		rq->calc_load_update = jiffies + LOAD_FREQ;
++#ifdef CONFIG_SMP
++		rq->online = false;
++		rq->cpu = i;
++
++#ifdef CONFIG_SCHED_SMT
++		rq->active_balance = 0;
++#endif
++
++#ifdef CONFIG_NO_HZ_COMMON
++		INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq);
++#endif
++		rq->balance_callback = NULL;
++#ifdef CONFIG_HOTPLUG_CPU
++		rcuwait_init(&rq->hotplug_wait);
++#endif
++#endif /* CONFIG_SMP */
++		rq->nr_switches = 0;
++
++		hrtick_rq_init(rq);
++		atomic_set(&rq->nr_iowait, 0);
++	}
++#ifdef CONFIG_SMP
++	/* Set rq->online for cpu 0 */
++	cpu_rq(0)->online = true;
++#endif
++	/*
++	 * The boot idle thread does lazy MMU switching as well:
++	 */
++	mmgrab(&init_mm);
++	enter_lazy_tlb(&init_mm, current);
++
++	/*
++	 * Make us the idle thread. Technically, schedule() should not be
++	 * called from this thread, however somewhere below it might be,
++	 * but because we are the idle thread, we just pick up running again
++	 * when this runqueue becomes "idle".
++	 */
++	init_idle(current, smp_processor_id());
++
++	calc_load_update = jiffies + LOAD_FREQ;
++
++#ifdef CONFIG_SMP
++	idle_thread_set_boot_cpu();
++
++	sched_init_topology_cpumask_early();
++#endif /* SMP */
++
++	init_schedstats();
++
++	psi_init();
++}
++
++#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
++static inline int preempt_count_equals(int preempt_offset)
++{
++	int nested = preempt_count() + rcu_preempt_depth();
++
++	return (nested == preempt_offset);
++}
++
++void __might_sleep(const char *file, int line, int preempt_offset)
++{
++	/*
++	 * Blocking primitives will set (and therefore destroy) current->state,
++	 * since we will exit with TASK_RUNNING make sure we enter with it,
++	 * otherwise we will destroy state.
++	 */
++	WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change,
++			"do not call blocking ops when !TASK_RUNNING; "
++			"state=%lx set at [<%p>] %pS\n",
++			current->state,
++			(void *)current->task_state_change,
++			(void *)current->task_state_change);
++
++	___might_sleep(file, line, preempt_offset);
++}
++EXPORT_SYMBOL(__might_sleep);
++
++void ___might_sleep(const char *file, int line, int preempt_offset)
++{
++	/* Ratelimiting timestamp: */
++	static unsigned long prev_jiffy;
++
++	unsigned long preempt_disable_ip;
++
++	/* WARN_ON_ONCE() by default, no rate limit required: */
++	rcu_sleep_check();
++
++	if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
++	     !is_idle_task(current) && !current->non_block_count) ||
++	    system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
++	    oops_in_progress)
++		return;
++	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
++		return;
++	prev_jiffy = jiffies;
++
++	/* Save this before calling printk(), since that will clobber it: */
++	preempt_disable_ip = get_preempt_disable_ip(current);
++
++	printk(KERN_ERR
++		"BUG: sleeping function called from invalid context at %s:%d\n",
++			file, line);
++	printk(KERN_ERR
++		"in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
++			in_atomic(), irqs_disabled(), current->non_block_count,
++			current->pid, current->comm);
++
++	if (task_stack_end_corrupted(current))
++		printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
++
++	debug_show_held_locks(current);
++	if (irqs_disabled())
++		print_irqtrace_events(current);
++#ifdef CONFIG_DEBUG_PREEMPT
++	if (!preempt_count_equals(preempt_offset)) {
++		pr_err("Preemption disabled at:");
++		print_ip_sym(KERN_ERR, preempt_disable_ip);
++	}
++#endif
++	dump_stack();
++	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++EXPORT_SYMBOL(___might_sleep);
++
++void __cant_sleep(const char *file, int line, int preempt_offset)
++{
++	static unsigned long prev_jiffy;
++
++	if (irqs_disabled())
++		return;
++
++	if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
++		return;
++
++	if (preempt_count() > preempt_offset)
++		return;
++
++	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
++		return;
++	prev_jiffy = jiffies;
++
++	printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line);
++	printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
++			in_atomic(), irqs_disabled(),
++			current->pid, current->comm);
++
++	debug_show_held_locks(current);
++	dump_stack();
++	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++EXPORT_SYMBOL_GPL(__cant_sleep);
++
++#ifdef CONFIG_SMP
++void __cant_migrate(const char *file, int line)
++{
++	static unsigned long prev_jiffy;
++
++	if (irqs_disabled())
++		return;
++
++	if (is_migration_disabled(current))
++		return;
++
++	if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
++		return;
++
++	if (preempt_count() > 0)
++		return;
++
++	if (current->migration_flags & MDF_FORCE_ENABLED)
++		return;
++
++	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
++		return;
++	prev_jiffy = jiffies;
++
++	pr_err("BUG: assuming non migratable context at %s:%d\n", file, line);
++	pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n",
++	       in_atomic(), irqs_disabled(), is_migration_disabled(current),
++	       current->pid, current->comm);
++
++	debug_show_held_locks(current);
++	dump_stack();
++	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++EXPORT_SYMBOL_GPL(__cant_migrate);
++#endif
++#endif
++
++#ifdef CONFIG_MAGIC_SYSRQ
++void normalize_rt_tasks(void)
++{
++	struct task_struct *g, *p;
++	struct sched_attr attr = {
++		.sched_policy = SCHED_NORMAL,
++	};
++
++	read_lock(&tasklist_lock);
++	for_each_process_thread(g, p) {
++		/*
++		 * Only normalize user tasks:
++		 */
++		if (p->flags & PF_KTHREAD)
++			continue;
++
++		if (!rt_task(p)) {
++			/*
++			 * Renice negative nice level userspace
++			 * tasks back to 0:
++			 */
++			if (task_nice(p) < 0)
++				set_user_nice(p, 0);
++			continue;
++		}
++
++		__sched_setscheduler(p, &attr, false, false);
++	}
++	read_unlock(&tasklist_lock);
++}
++#endif /* CONFIG_MAGIC_SYSRQ */
++
++#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
++/*
++ * These functions are only useful for the IA64 MCA handling, or kdb.
++ *
++ * They can only be called when the whole system has been
++ * stopped - every CPU needs to be quiescent, and no scheduling
++ * activity can take place. Using them for anything else would
++ * be a serious bug, and as a result, they aren't even visible
++ * under any other configuration.
++ */
++
++/**
++ * curr_task - return the current task for a given CPU.
++ * @cpu: the processor in question.
++ *
++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
++ *
++ * Return: The current task for @cpu.
++ */
++struct task_struct *curr_task(int cpu)
++{
++	return cpu_curr(cpu);
++}
++
++#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
++
++#ifdef CONFIG_IA64
++/**
++ * ia64_set_curr_task - set the current task for a given CPU.
++ * @cpu: the processor in question.
++ * @p: the task pointer to set.
++ *
++ * Description: This function must only be used when non-maskable interrupts
++ * are serviced on a separate stack.  It allows the architecture to switch the
++ * notion of the current task on a CPU in a non-blocking manner.  This function
++ * must be called with all CPU's synchronised, and interrupts disabled, the
++ * and caller must save the original value of the current task (see
++ * curr_task() above) and restore that value before reenabling interrupts and
++ * re-starting the system.
++ *
++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
++ */
++void ia64_set_curr_task(int cpu, struct task_struct *p)
++{
++	cpu_curr(cpu) = p;
++}
++
++#endif
++
++#ifdef CONFIG_CGROUP_SCHED
++static void sched_free_group(struct task_group *tg)
++{
++	kmem_cache_free(task_group_cache, tg);
++}
++
++/* allocate runqueue etc for a new task group */
++struct task_group *sched_create_group(struct task_group *parent)
++{
++	struct task_group *tg;
++
++	tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
++	if (!tg)
++		return ERR_PTR(-ENOMEM);
++
++	return tg;
++}
++
++void sched_online_group(struct task_group *tg, struct task_group *parent)
++{
++}
++
++/* rcu callback to free various structures associated with a task group */
++static void sched_free_group_rcu(struct rcu_head *rhp)
++{
++	/* Now it should be safe to free those cfs_rqs */
++	sched_free_group(container_of(rhp, struct task_group, rcu));
++}
++
++void sched_destroy_group(struct task_group *tg)
++{
++	/* Wait for possible concurrent references to cfs_rqs complete */
++	call_rcu(&tg->rcu, sched_free_group_rcu);
++}
++
++void sched_offline_group(struct task_group *tg)
++{
++}
++
++static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
++{
++	return css ? container_of(css, struct task_group, css) : NULL;
++}
++
++static struct cgroup_subsys_state *
++cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
++{
++	struct task_group *parent = css_tg(parent_css);
++	struct task_group *tg;
++
++	if (!parent) {
++		/* This is early initialization for the top cgroup */
++		return &root_task_group.css;
++	}
++
++	tg = sched_create_group(parent);
++	if (IS_ERR(tg))
++		return ERR_PTR(-ENOMEM);
++	return &tg->css;
++}
++
++/* Expose task group only after completing cgroup initialization */
++static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
++{
++	struct task_group *tg = css_tg(css);
++	struct task_group *parent = css_tg(css->parent);
++
++	if (parent)
++		sched_online_group(tg, parent);
++	return 0;
++}
++
++static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
++{
++	struct task_group *tg = css_tg(css);
++
++	sched_offline_group(tg);
++}
++
++static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
++{
++	struct task_group *tg = css_tg(css);
++
++	/*
++	 * Relies on the RCU grace period between css_released() and this.
++	 */
++	sched_free_group(tg);
++}
++
++static void cpu_cgroup_fork(struct task_struct *task)
++{
++}
++
++static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
++{
++	return 0;
++}
++
++static void cpu_cgroup_attach(struct cgroup_taskset *tset)
++{
++}
++
++#ifdef CONFIG_FAIR_GROUP_SCHED
++static DEFINE_MUTEX(shares_mutex);
++
++int sched_group_set_shares(struct task_group *tg, unsigned long shares)
++{
++	/*
++	 * We can't change the weight of the root cgroup.
++	 */
++	if (&root_task_group == tg)
++		return -EINVAL;
++
++	shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
++
++	mutex_lock(&shares_mutex);
++	if (tg->shares == shares)
++		goto done;
++
++	tg->shares = shares;
++done:
++	mutex_unlock(&shares_mutex);
++	return 0;
++}
++
++static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
++				struct cftype *cftype, u64 shareval)
++{
++	if (shareval > scale_load_down(ULONG_MAX))
++		shareval = MAX_SHARES;
++	return sched_group_set_shares(css_tg(css), scale_load(shareval));
++}
++
++static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
++			       struct cftype *cft)
++{
++	struct task_group *tg = css_tg(css);
++
++	return (u64) scale_load_down(tg->shares);
++}
++#endif
++
++static struct cftype cpu_legacy_files[] = {
++#ifdef CONFIG_FAIR_GROUP_SCHED
++	{
++		.name = "shares",
++		.read_u64 = cpu_shares_read_u64,
++		.write_u64 = cpu_shares_write_u64,
++	},
++#endif
++	{ }	/* Terminate */
++};
++
++
++static struct cftype cpu_files[] = {
++	{ }	/* terminate */
++};
++
++static int cpu_extra_stat_show(struct seq_file *sf,
++			       struct cgroup_subsys_state *css)
++{
++	return 0;
++}
++
++struct cgroup_subsys cpu_cgrp_subsys = {
++	.css_alloc	= cpu_cgroup_css_alloc,
++	.css_online	= cpu_cgroup_css_online,
++	.css_released	= cpu_cgroup_css_released,
++	.css_free	= cpu_cgroup_css_free,
++	.css_extra_stat_show = cpu_extra_stat_show,
++	.fork		= cpu_cgroup_fork,
++	.can_attach	= cpu_cgroup_can_attach,
++	.attach		= cpu_cgroup_attach,
++	.legacy_cftypes	= cpu_files,
++	.legacy_cftypes	= cpu_legacy_files,
++	.dfl_cftypes	= cpu_files,
++	.early_init	= true,
++	.threaded	= true,
++};
++#endif	/* CONFIG_CGROUP_SCHED */
++
++#undef CREATE_TRACE_POINTS
+diff --git a/kernel/sched/alt_debug.c b/kernel/sched/alt_debug.c
+new file mode 100644
+index 000000000000..1212a031700e
+--- /dev/null
++++ b/kernel/sched/alt_debug.c
+@@ -0,0 +1,31 @@
++/*
++ * kernel/sched/alt_debug.c
++ *
++ * Print the alt scheduler debugging details
++ *
++ * Author: Alfred Chen
++ * Date  : 2020
++ */
++#include "sched.h"
++
++/*
++ * This allows printing both to /proc/sched_debug and
++ * to the console
++ */
++#define SEQ_printf(m, x...)			\
++ do {						\
++	if (m)					\
++		seq_printf(m, x);		\
++	else					\
++		pr_cont(x);			\
++ } while (0)
++
++void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
++			  struct seq_file *m)
++{
++	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
++						get_nr_threads(p));
++}
++
++void proc_sched_set_task(struct task_struct *p)
++{}
+diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
+new file mode 100644
+index 000000000000..2a6a0530fbb7
+--- /dev/null
++++ b/kernel/sched/alt_sched.h
+@@ -0,0 +1,686 @@
++#ifndef ALT_SCHED_H
++#define ALT_SCHED_H
++
++#include <linux/sched.h>
++
++#include <linux/sched/clock.h>
++#include <linux/sched/cpufreq.h>
++#include <linux/sched/cputime.h>
++#include <linux/sched/debug.h>
++#include <linux/sched/init.h>
++#include <linux/sched/isolation.h>
++#include <linux/sched/loadavg.h>
++#include <linux/sched/mm.h>
++#include <linux/sched/nohz.h>
++#include <linux/sched/signal.h>
++#include <linux/sched/stat.h>
++#include <linux/sched/sysctl.h>
++#include <linux/sched/task.h>
++#include <linux/sched/topology.h>
++#include <linux/sched/wake_q.h>
++
++#include <uapi/linux/sched/types.h>
++
++#include <linux/cgroup.h>
++#include <linux/cpufreq.h>
++#include <linux/cpuidle.h>
++#include <linux/cpuset.h>
++#include <linux/ctype.h>
++#include <linux/debugfs.h>
++#include <linux/kthread.h>
++#include <linux/livepatch.h>
++#include <linux/membarrier.h>
++#include <linux/proc_fs.h>
++#include <linux/psi.h>
++#include <linux/slab.h>
++#include <linux/stop_machine.h>
++#include <linux/suspend.h>
++#include <linux/swait.h>
++#include <linux/syscalls.h>
++#include <linux/tsacct_kern.h>
++
++#include <asm/tlb.h>
++
++#ifdef CONFIG_PARAVIRT
++# include <asm/paravirt.h>
++#endif
++
++#include "cpupri.h"
++
++#include <trace/events/sched.h>
++
++#ifdef CONFIG_SCHED_BMQ
++#include "bmq.h"
++#endif
++#ifdef CONFIG_SCHED_PDS
++#include "pds.h"
++#endif
++
++#ifdef CONFIG_SCHED_DEBUG
++# define SCHED_WARN_ON(x)	WARN_ONCE(x, #x)
++#else
++# define SCHED_WARN_ON(x)	({ (void)(x), 0; })
++#endif
++
++/*
++ * Increase resolution of nice-level calculations for 64-bit architectures.
++ * The extra resolution improves shares distribution and load balancing of
++ * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
++ * hierarchies, especially on larger systems. This is not a user-visible change
++ * and does not change the user-interface for setting shares/weights.
++ *
++ * We increase resolution only if we have enough bits to allow this increased
++ * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit
++ * are pretty high and the returns do not justify the increased costs.
++ *
++ * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to
++ * increase coverage and consistency always enable it on 64-bit platforms.
++ */
++#ifdef CONFIG_64BIT
++# define NICE_0_LOAD_SHIFT	(SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
++# define scale_load(w)		((w) << SCHED_FIXEDPOINT_SHIFT)
++# define scale_load_down(w) \
++({ \
++	unsigned long __w = (w); \
++	if (__w) \
++		__w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \
++	__w; \
++})
++#else
++# define NICE_0_LOAD_SHIFT	(SCHED_FIXEDPOINT_SHIFT)
++# define scale_load(w)		(w)
++# define scale_load_down(w)	(w)
++#endif
++
++#ifdef CONFIG_FAIR_GROUP_SCHED
++#define ROOT_TASK_GROUP_LOAD	NICE_0_LOAD
++
++/*
++ * A weight of 0 or 1 can cause arithmetics problems.
++ * A weight of a cfs_rq is the sum of weights of which entities
++ * are queued on this cfs_rq, so a weight of a entity should not be
++ * too large, so as the shares value of a task group.
++ * (The default weight is 1024 - so there's no practical
++ *  limitation from this.)
++ */
++#define MIN_SHARES		(1UL <<  1)
++#define MAX_SHARES		(1UL << 18)
++#endif
++
++/* task_struct::on_rq states: */
++#define TASK_ON_RQ_QUEUED	1
++#define TASK_ON_RQ_MIGRATING	2
++
++static inline int task_on_rq_queued(struct task_struct *p)
++{
++	return p->on_rq == TASK_ON_RQ_QUEUED;
++}
++
++static inline int task_on_rq_migrating(struct task_struct *p)
++{
++	return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING;
++}
++
++/*
++ * wake flags
++ */
++#define WF_SYNC		0x01		/* waker goes to sleep after wakeup */
++#define WF_FORK		0x02		/* child wakeup after fork */
++#define WF_MIGRATED	0x04		/* internal use, task got migrated */
++#define WF_ON_CPU	0x08		/* Wakee is on_rq */
++
++/*
++ * This is the main, per-CPU runqueue data structure.
++ * This data should only be modified by the local cpu.
++ */
++struct rq {
++	/* runqueue lock: */
++	raw_spinlock_t lock;
++
++	struct task_struct __rcu *curr;
++	struct task_struct *idle, *stop, *skip;
++	struct mm_struct *prev_mm;
++
++#ifdef CONFIG_SCHED_BMQ
++	struct bmq queue;
++#endif
++#ifdef CONFIG_SCHED_PDS
++	struct skiplist_node sl_header;
++#endif
++	unsigned long watermark;
++
++	/* switch count */
++	u64 nr_switches;
++
++	atomic_t nr_iowait;
++
++#ifdef CONFIG_MEMBARRIER
++	int membarrier_state;
++#endif
++
++#ifdef CONFIG_SMP
++	int cpu;		/* cpu of this runqueue */
++	bool online;
++
++	unsigned int		ttwu_pending;
++	unsigned char		nohz_idle_balance;
++	unsigned char		idle_balance;
++
++#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
++	struct sched_avg	avg_irq;
++#endif
++
++#ifdef CONFIG_SCHED_SMT
++	int active_balance;
++	struct cpu_stop_work	active_balance_work;
++#endif
++	struct callback_head	*balance_callback;
++	unsigned char		balance_push;
++#ifdef CONFIG_HOTPLUG_CPU
++	struct rcuwait		hotplug_wait;
++#endif
++	unsigned int		nr_pinned;
++#endif /* CONFIG_SMP */
++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
++	u64 prev_irq_time;
++#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
++#ifdef CONFIG_PARAVIRT
++	u64 prev_steal_time;
++#endif /* CONFIG_PARAVIRT */
++#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
++	u64 prev_steal_time_rq;
++#endif /* CONFIG_PARAVIRT_TIME_ACCOUNTING */
++
++	/* calc_load related fields */
++	unsigned long calc_load_update;
++	long calc_load_active;
++
++	u64 clock, last_tick;
++	u64 last_ts_switch;
++	u64 clock_task;
++
++	unsigned int  nr_running;
++	unsigned long nr_uninterruptible;
++
++#ifdef CONFIG_SCHED_HRTICK
++#ifdef CONFIG_SMP
++	call_single_data_t hrtick_csd;
++#endif
++	struct hrtimer		hrtick_timer;
++	ktime_t			hrtick_time;
++#endif
++
++#ifdef CONFIG_SCHEDSTATS
++
++	/* latency stats */
++	struct sched_info rq_sched_info;
++	unsigned long long rq_cpu_time;
++	/* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
++
++	/* sys_sched_yield() stats */
++	unsigned int yld_count;
++
++	/* schedule() stats */
++	unsigned int sched_switch;
++	unsigned int sched_count;
++	unsigned int sched_goidle;
++
++	/* try_to_wake_up() stats */
++	unsigned int ttwu_count;
++	unsigned int ttwu_local;
++#endif /* CONFIG_SCHEDSTATS */
++
++#ifdef CONFIG_CPU_IDLE
++	/* Must be inspected within a rcu lock section */
++	struct cpuidle_state *idle_state;
++#endif
++
++#ifdef CONFIG_NO_HZ_COMMON
++#ifdef CONFIG_SMP
++	call_single_data_t	nohz_csd;
++#endif
++	atomic_t		nohz_flags;
++#endif /* CONFIG_NO_HZ_COMMON */
++};
++
++extern unsigned long calc_load_update;
++extern atomic_long_t calc_load_tasks;
++
++extern void calc_global_load_tick(struct rq *this_rq);
++extern long calc_load_fold_active(struct rq *this_rq, long adjust);
++
++DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
++#define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
++#define this_rq()		this_cpu_ptr(&runqueues)
++#define task_rq(p)		cpu_rq(task_cpu(p))
++#define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
++#define raw_rq()		raw_cpu_ptr(&runqueues)
++
++#ifdef CONFIG_SMP
++#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
++void register_sched_domain_sysctl(void);
++void unregister_sched_domain_sysctl(void);
++#else
++static inline void register_sched_domain_sysctl(void)
++{
++}
++static inline void unregister_sched_domain_sysctl(void)
++{
++}
++#endif
++
++extern bool sched_smp_initialized;
++
++enum {
++	ITSELF_LEVEL_SPACE_HOLDER,
++#ifdef CONFIG_SCHED_SMT
++	SMT_LEVEL_SPACE_HOLDER,
++#endif
++	COREGROUP_LEVEL_SPACE_HOLDER,
++	CORE_LEVEL_SPACE_HOLDER,
++	OTHER_LEVEL_SPACE_HOLDER,
++	NR_CPU_AFFINITY_LEVELS
++};
++
++DECLARE_PER_CPU(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_topo_masks);
++DECLARE_PER_CPU(cpumask_t *, sched_cpu_llc_mask);
++
++static inline int __best_mask_cpu(int cpu, const cpumask_t *cpumask,
++				  const cpumask_t *mask)
++{
++#if NR_CPUS <= 64
++	unsigned long t;
++
++	while ((t = cpumask->bits[0] & mask->bits[0]) == 0UL)
++		mask++;
++
++	return __ffs(t);
++#else
++	while ((cpu = cpumask_any_and(cpumask, mask)) >= nr_cpu_ids)
++		mask++;
++	return cpu;
++#endif
++}
++
++static inline int best_mask_cpu(int cpu, const cpumask_t *mask)
++{
++#if NR_CPUS <= 64
++	unsigned long llc_match;
++	cpumask_t *chk = per_cpu(sched_cpu_llc_mask, cpu);
++
++	if ((llc_match = mask->bits[0] & chk->bits[0])) {
++		unsigned long match;
++
++		chk = per_cpu(sched_cpu_topo_masks, cpu);
++		if (mask->bits[0] & chk->bits[0])
++			return cpu;
++
++#ifdef CONFIG_SCHED_SMT
++		chk++;
++		if ((match = mask->bits[0] & chk->bits[0]))
++			return __ffs(match);
++#endif
++
++		return __ffs(llc_match);
++	}
++
++	return __best_mask_cpu(cpu, mask, chk + 1);
++#else
++	cpumask_t llc_match;
++	cpumask_t *chk = per_cpu(sched_cpu_llc_mask, cpu);
++
++	if (cpumask_and(&llc_match, mask, chk)) {
++		cpumask_t tmp;
++
++		chk = per_cpu(sched_cpu_topo_masks, cpu);
++		if (cpumask_test_cpu(cpu, mask))
++			return cpu;
++
++#ifdef CONFIG_SCHED_SMT
++		chk++;
++		if (cpumask_and(&tmp, mask, chk))
++			return cpumask_any(&tmp);
++#endif
++
++		return cpumask_any(&llc_match);
++	}
++
++	return __best_mask_cpu(cpu, mask, chk + 1);
++#endif
++}
++
++extern void flush_smp_call_function_from_idle(void);
++
++#else  /* !CONFIG_SMP */
++static inline void flush_smp_call_function_from_idle(void) { }
++#endif
++
++#ifndef arch_scale_freq_tick
++static __always_inline
++void arch_scale_freq_tick(void)
++{
++}
++#endif
++
++#ifndef arch_scale_freq_capacity
++static __always_inline
++unsigned long arch_scale_freq_capacity(int cpu)
++{
++	return SCHED_CAPACITY_SCALE;
++}
++#endif
++
++static inline u64 __rq_clock_broken(struct rq *rq)
++{
++	return READ_ONCE(rq->clock);
++}
++
++static inline u64 rq_clock(struct rq *rq)
++{
++	/*
++	 * Relax lockdep_assert_held() checking as in VRQ, call to
++	 * sched_info_xxxx() may not held rq->lock
++	 * lockdep_assert_held(&rq->lock);
++	 */
++	return rq->clock;
++}
++
++static inline u64 rq_clock_task(struct rq *rq)
++{
++	/*
++	 * Relax lockdep_assert_held() checking as in VRQ, call to
++	 * sched_info_xxxx() may not held rq->lock
++	 * lockdep_assert_held(&rq->lock);
++	 */
++	return rq->clock_task;
++}
++
++/*
++ * {de,en}queue flags:
++ *
++ * DEQUEUE_SLEEP  - task is no longer runnable
++ * ENQUEUE_WAKEUP - task just became runnable
++ *
++ */
++
++#define DEQUEUE_SLEEP		0x01
++
++#define ENQUEUE_WAKEUP		0x01
++
++
++/*
++ * Below are scheduler API which using in other kernel code
++ * It use the dummy rq_flags
++ * ToDo : BMQ need to support these APIs for compatibility with mainline
++ * scheduler code.
++ */
++struct rq_flags {
++	unsigned long flags;
++};
++
++struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
++	__acquires(rq->lock);
++
++struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
++	__acquires(p->pi_lock)
++	__acquires(rq->lock);
++
++static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
++	__releases(rq->lock)
++{
++	raw_spin_unlock(&rq->lock);
++}
++
++static inline void
++task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
++	__releases(rq->lock)
++	__releases(p->pi_lock)
++{
++	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
++}
++
++static inline void
++rq_lock(struct rq *rq, struct rq_flags *rf)
++	__acquires(rq->lock)
++{
++	raw_spin_lock(&rq->lock);
++}
++
++static inline void
++rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
++	__releases(rq->lock)
++{
++	raw_spin_unlock_irq(&rq->lock);
++}
++
++static inline void
++rq_unlock(struct rq *rq, struct rq_flags *rf)
++	__releases(rq->lock)
++{
++	raw_spin_unlock(&rq->lock);
++}
++
++static inline struct rq *
++this_rq_lock_irq(struct rq_flags *rf)
++	__acquires(rq->lock)
++{
++	struct rq *rq;
++
++	local_irq_disable();
++	rq = this_rq();
++	raw_spin_lock(&rq->lock);
++
++	return rq;
++}
++
++static inline int task_current(struct rq *rq, struct task_struct *p)
++{
++	return rq->curr == p;
++}
++
++static inline bool task_running(struct task_struct *p)
++{
++	return p->on_cpu;
++}
++
++extern int task_running_nice(struct task_struct *p);
++
++extern struct static_key_false sched_schedstats;
++
++#ifdef CONFIG_CPU_IDLE
++static inline void idle_set_state(struct rq *rq,
++				  struct cpuidle_state *idle_state)
++{
++	rq->idle_state = idle_state;
++}
++
++static inline struct cpuidle_state *idle_get_state(struct rq *rq)
++{
++	WARN_ON(!rcu_read_lock_held());
++	return rq->idle_state;
++}
++#else
++static inline void idle_set_state(struct rq *rq,
++				  struct cpuidle_state *idle_state)
++{
++}
++
++static inline struct cpuidle_state *idle_get_state(struct rq *rq)
++{
++	return NULL;
++}
++#endif
++
++static inline int cpu_of(const struct rq *rq)
++{
++#ifdef CONFIG_SMP
++	return rq->cpu;
++#else
++	return 0;
++#endif
++}
++
++#include "stats.h"
++
++#ifdef CONFIG_NO_HZ_COMMON
++#define NOHZ_BALANCE_KICK_BIT	0
++#define NOHZ_STATS_KICK_BIT	1
++
++#define NOHZ_BALANCE_KICK	BIT(NOHZ_BALANCE_KICK_BIT)
++#define NOHZ_STATS_KICK		BIT(NOHZ_STATS_KICK_BIT)
++
++#define NOHZ_KICK_MASK	(NOHZ_BALANCE_KICK | NOHZ_STATS_KICK)
++
++#define nohz_flags(cpu)	(&cpu_rq(cpu)->nohz_flags)
++
++/* TODO: needed?
++extern void nohz_balance_exit_idle(struct rq *rq);
++#else
++static inline void nohz_balance_exit_idle(struct rq *rq) { }
++*/
++#endif
++
++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
++struct irqtime {
++	u64			total;
++	u64			tick_delta;
++	u64			irq_start_time;
++	struct u64_stats_sync	sync;
++};
++
++DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
++
++/*
++ * Returns the irqtime minus the softirq time computed by ksoftirqd.
++ * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime
++ * and never move forward.
++ */
++static inline u64 irq_time_read(int cpu)
++{
++	struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
++	unsigned int seq;
++	u64 total;
++
++	do {
++		seq = __u64_stats_fetch_begin(&irqtime->sync);
++		total = irqtime->total;
++	} while (__u64_stats_fetch_retry(&irqtime->sync, seq));
++
++	return total;
++}
++#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
++
++#ifdef CONFIG_CPU_FREQ
++DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data);
++
++/**
++ * cpufreq_update_util - Take a note about CPU utilization changes.
++ * @rq: Runqueue to carry out the update for.
++ * @flags: Update reason flags.
++ *
++ * This function is called by the scheduler on the CPU whose utilization is
++ * being updated.
++ *
++ * It can only be called from RCU-sched read-side critical sections.
++ *
++ * The way cpufreq is currently arranged requires it to evaluate the CPU
++ * performance state (frequency/voltage) on a regular basis to prevent it from
++ * being stuck in a completely inadequate performance level for too long.
++ * That is not guaranteed to happen if the updates are only triggered from CFS
++ * and DL, though, because they may not be coming in if only RT tasks are
++ * active all the time (or there are RT tasks only).
++ *
++ * As a workaround for that issue, this function is called periodically by the
++ * RT sched class to trigger extra cpufreq updates to prevent it from stalling,
++ * but that really is a band-aid.  Going forward it should be replaced with
++ * solutions targeted more specifically at RT tasks.
++ */
++static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
++{
++	struct update_util_data *data;
++
++	data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
++						  cpu_of(rq)));
++	if (data)
++		data->func(data, rq_clock(rq), flags);
++}
++#else
++static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
++#endif /* CONFIG_CPU_FREQ */
++
++#ifdef CONFIG_NO_HZ_FULL
++extern int __init sched_tick_offload_init(void);
++#else
++static inline int sched_tick_offload_init(void) { return 0; }
++#endif
++
++#ifdef arch_scale_freq_capacity
++#ifndef arch_scale_freq_invariant
++#define arch_scale_freq_invariant()	(true)
++#endif
++#else /* arch_scale_freq_capacity */
++#define arch_scale_freq_invariant()	(false)
++#endif
++
++extern void schedule_idle(void);
++
++#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
++
++/*
++ * !! For sched_setattr_nocheck() (kernel) only !!
++ *
++ * This is actually gross. :(
++ *
++ * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE
++ * tasks, but still be able to sleep. We need this on platforms that cannot
++ * atomically change clock frequency. Remove once fast switching will be
++ * available on such platforms.
++ *
++ * SUGOV stands for SchedUtil GOVernor.
++ */
++#define SCHED_FLAG_SUGOV	0x10000000
++
++#ifdef CONFIG_MEMBARRIER
++/*
++ * The scheduler provides memory barriers required by membarrier between:
++ * - prior user-space memory accesses and store to rq->membarrier_state,
++ * - store to rq->membarrier_state and following user-space memory accesses.
++ * In the same way it provides those guarantees around store to rq->curr.
++ */
++static inline void membarrier_switch_mm(struct rq *rq,
++					struct mm_struct *prev_mm,
++					struct mm_struct *next_mm)
++{
++	int membarrier_state;
++
++	if (prev_mm == next_mm)
++		return;
++
++	membarrier_state = atomic_read(&next_mm->membarrier_state);
++	if (READ_ONCE(rq->membarrier_state) == membarrier_state)
++		return;
++
++	WRITE_ONCE(rq->membarrier_state, membarrier_state);
++}
++#else
++static inline void membarrier_switch_mm(struct rq *rq,
++					struct mm_struct *prev_mm,
++					struct mm_struct *next_mm)
++{
++}
++#endif
++
++#ifdef CONFIG_NUMA
++extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu);
++#else
++static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
++{
++	return nr_cpu_ids;
++}
++#endif
++
++void swake_up_all_locked(struct swait_queue_head *q);
++void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
++
++#endif /* ALT_SCHED_H */
+diff --git a/kernel/sched/bmq.h b/kernel/sched/bmq.h
+new file mode 100644
+index 000000000000..aba3c98759f8
+--- /dev/null
++++ b/kernel/sched/bmq.h
+@@ -0,0 +1,14 @@
++#ifndef BMQ_H
++#define BMQ_H
++
++/* bits:
++ * RT(0-99), (Low prio adj range, nice width, high prio adj range) / 2, cpu idle task */
++#define SCHED_BITS	(MAX_RT_PRIO + NICE_WIDTH / 2 + MAX_PRIORITY_ADJ + 1)
++#define IDLE_TASK_SCHED_PRIO	(SCHED_BITS - 1)
++
++struct bmq {
++	DECLARE_BITMAP(bitmap, SCHED_BITS);
++	struct list_head heads[SCHED_BITS];
++};
++
++#endif
+diff --git a/kernel/sched/bmq_imp.h b/kernel/sched/bmq_imp.h
+new file mode 100644
+index 000000000000..7c71f1141d00
+--- /dev/null
++++ b/kernel/sched/bmq_imp.h
+@@ -0,0 +1,203 @@
++#define ALT_SCHED_VERSION_MSG "sched/bmq: BMQ CPU Scheduler "ALT_SCHED_VERSION" by Alfred Chen.\n"
++
++/*
++ * BMQ only routines
++ */
++#define rq_switch_time(rq)	((rq)->clock - (rq)->last_ts_switch)
++#define boost_threshold(p)	(sched_timeslice_ns >>\
++				 (15 - MAX_PRIORITY_ADJ -  (p)->boost_prio))
++
++static inline void boost_task(struct task_struct *p)
++{
++	int limit;
++
++	switch (p->policy) {
++	case SCHED_NORMAL:
++		limit = -MAX_PRIORITY_ADJ;
++		break;
++	case SCHED_BATCH:
++	case SCHED_IDLE:
++		limit = 0;
++		break;
++	default:
++		return;
++	}
++
++	if (p->boost_prio > limit)
++		p->boost_prio--;
++}
++
++static inline void deboost_task(struct task_struct *p)
++{
++	if (p->boost_prio < MAX_PRIORITY_ADJ)
++		p->boost_prio++;
++}
++
++/*
++ * Common interfaces
++ */
++static inline int normal_prio(struct task_struct *p)
++{
++	if (task_has_rt_policy(p))
++		return MAX_RT_PRIO - 1 - p->rt_priority;
++
++	return p->static_prio + MAX_PRIORITY_ADJ;
++}
++
++static inline int task_sched_prio(struct task_struct *p, struct rq *rq)
++{
++	return (p->prio < MAX_RT_PRIO)? p->prio : MAX_RT_PRIO / 2 + (p->prio + p->boost_prio) / 2;
++}
++
++static inline void requeue_task(struct task_struct *p, struct rq *rq);
++
++static inline void time_slice_expired(struct task_struct *p, struct rq *rq)
++{
++	p->time_slice = sched_timeslice_ns;
++
++	if (SCHED_FIFO != p->policy && task_on_rq_queued(p)) {
++		if (SCHED_RR != p->policy)
++			deboost_task(p);
++		requeue_task(p, rq);
++	}
++}
++
++inline int task_running_nice(struct task_struct *p)
++{
++	return (p->prio + p->boost_prio > DEFAULT_PRIO + MAX_PRIORITY_ADJ);
++}
++
++static inline void update_task_priodl(struct task_struct *p) {}
++
++static inline unsigned long sched_queue_watermark(struct rq *rq)
++{
++	return find_first_bit(rq->queue.bitmap, SCHED_BITS);
++}
++
++static inline void sched_queue_init(struct rq *rq)
++{
++	struct bmq *q = &rq->queue;
++	int i;
++
++	bitmap_zero(q->bitmap, SCHED_BITS);
++	for(i = 0; i < SCHED_BITS; i++)
++		INIT_LIST_HEAD(&q->heads[i]);
++}
++
++static inline void sched_queue_init_idle(struct rq *rq, struct task_struct *idle)
++{
++	struct bmq *q = &rq->queue;
++
++	idle->bmq_idx = IDLE_TASK_SCHED_PRIO;
++	INIT_LIST_HEAD(&q->heads[idle->bmq_idx]);
++	list_add(&idle->bmq_node, &q->heads[idle->bmq_idx]);
++	set_bit(idle->bmq_idx, q->bitmap);
++}
++
++/*
++ * This routine used in bmq scheduler only which assume the idle task in the bmq
++ */
++static inline struct task_struct *sched_rq_first_task(struct rq *rq)
++{
++	unsigned long idx = find_first_bit(rq->queue.bitmap, SCHED_BITS);
++	const struct list_head *head = &rq->queue.heads[idx];
++
++	return list_first_entry(head, struct task_struct, bmq_node);
++}
++
++static inline struct task_struct *
++sched_rq_next_task(struct task_struct *p, struct rq *rq)
++{
++	unsigned long idx = p->bmq_idx;
++	struct list_head *head = &rq->queue.heads[idx];
++
++	if (list_is_last(&p->bmq_node, head)) {
++		idx = find_next_bit(rq->queue.bitmap, SCHED_BITS, idx + 1);
++		head = &rq->queue.heads[idx];
++
++		return list_first_entry(head, struct task_struct, bmq_node);
++	}
++
++	return list_next_entry(p, bmq_node);
++}
++
++#define __SCHED_DEQUEUE_TASK(p, rq, flags, func)	\
++	psi_dequeue(p, flags & DEQUEUE_SLEEP);		\
++	sched_info_dequeued(rq, p);			\
++							\
++	list_del(&p->bmq_node);				\
++	if (list_empty(&rq->queue.heads[p->bmq_idx])) {	\
++		clear_bit(p->bmq_idx, rq->queue.bitmap);\
++		func;					\
++	}
++
++#define __SCHED_ENQUEUE_TASK(p, rq, flags)				\
++	sched_info_queued(rq, p);					\
++	psi_enqueue(p, flags);						\
++									\
++	p->bmq_idx = task_sched_prio(p, rq);				\
++	list_add_tail(&p->bmq_node, &rq->queue.heads[p->bmq_idx]);	\
++	set_bit(p->bmq_idx, rq->queue.bitmap)
++
++#define __SCHED_REQUEUE_TASK(p, rq, func)				\
++{									\
++	int idx = task_sched_prio(p, rq);				\
++\
++	list_del(&p->bmq_node);						\
++	list_add_tail(&p->bmq_node, &rq->queue.heads[idx]);		\
++	if (idx != p->bmq_idx) {					\
++		if (list_empty(&rq->queue.heads[p->bmq_idx]))		\
++			clear_bit(p->bmq_idx, rq->queue.bitmap);	\
++		p->bmq_idx = idx;					\
++		set_bit(p->bmq_idx, rq->queue.bitmap);			\
++		func;							\
++	}								\
++}
++
++static inline bool sched_task_need_requeue(struct task_struct *p, struct rq *rq)
++{
++	return (task_sched_prio(p, rq) != p->bmq_idx);
++}
++
++static void sched_task_fork(struct task_struct *p, struct rq *rq)
++{
++	p->boost_prio = (p->boost_prio < 0) ?
++		p->boost_prio + MAX_PRIORITY_ADJ : MAX_PRIORITY_ADJ;
++}
++
++/**
++ * task_prio - return the priority value of a given task.
++ * @p: the task in question.
++ *
++ * Return: The priority value as seen by users in /proc.
++ *
++ * sched policy         return value   kernel prio    user prio/nice/boost
++ *
++ * normal, batch, idle     [0 ... 53]  [100 ... 139]          0/[-20 ... 19]/[-7 ... 7]
++ * fifo, rr             [-1 ... -100]     [99 ... 0]  [0 ... 99]
++ */
++int task_prio(const struct task_struct *p)
++{
++	if (p->prio < MAX_RT_PRIO)
++		return (p->prio - MAX_RT_PRIO);
++	return (p->prio - MAX_RT_PRIO + p->boost_prio);
++}
++
++static void do_sched_yield_type_1(struct task_struct *p, struct rq *rq)
++{
++	p->boost_prio = MAX_PRIORITY_ADJ;
++}
++
++#ifdef CONFIG_SMP
++static void sched_task_ttwu(struct task_struct *p)
++{
++	if(this_rq()->clock_task - p->last_ran > sched_timeslice_ns)
++		boost_task(p);
++}
++#endif
++
++static void sched_task_deactivate(struct task_struct *p, struct rq *rq)
++{
++	if (rq_switch_time(rq) < boost_threshold(p))
++		boost_task(p);
++}
+diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
+index 50cbad89f7fa..fb703fd370fd 100644
+--- a/kernel/sched/cpufreq_schedutil.c
++++ b/kernel/sched/cpufreq_schedutil.c
+@@ -57,6 +57,13 @@ struct sugov_cpu {
+ 	unsigned long		bw_dl;
+ 	unsigned long		max;
+ 
++#ifdef CONFIG_SCHED_ALT
++	/* For genenal cpu load util */
++	s32			load_history;
++	u64			load_block;
++	u64			load_stamp;
++#endif
++
+ 	/* The field below is for single-CPU policies only: */
+ #ifdef CONFIG_NO_HZ_COMMON
+ 	unsigned long		saved_idle_calls;
+@@ -171,6 +178,7 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
+ 	return cpufreq_driver_resolve_freq(policy, freq);
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ static void sugov_get_util(struct sugov_cpu *sg_cpu)
+ {
+ 	struct rq *rq = cpu_rq(sg_cpu->cpu);
+@@ -182,6 +190,55 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu)
+ 					  FREQUENCY_UTIL, NULL);
+ }
+ 
++#else /* CONFIG_SCHED_ALT */
++
++#define SG_CPU_LOAD_HISTORY_BITS	(sizeof(s32) * 8ULL)
++#define SG_CPU_UTIL_SHIFT		(8)
++#define SG_CPU_LOAD_HISTORY_SHIFT	(SG_CPU_LOAD_HISTORY_BITS - 1 - SG_CPU_UTIL_SHIFT)
++#define SG_CPU_LOAD_HISTORY_TO_UTIL(l)	(((l) >> SG_CPU_LOAD_HISTORY_SHIFT) & 0xff)
++
++#define LOAD_BLOCK(t)		((t) >> 17)
++#define LOAD_HALF_BLOCK(t)	((t) >> 16)
++#define BLOCK_MASK(t)		((t) & ((0x01 << 18) - 1))
++#define LOAD_BLOCK_BIT(b)	(1UL << (SG_CPU_LOAD_HISTORY_BITS - 1 - (b)))
++#define CURRENT_LOAD_BIT	LOAD_BLOCK_BIT(0)
++
++static void sugov_get_util(struct sugov_cpu *sg_cpu)
++{
++	unsigned long max = arch_scale_cpu_capacity(sg_cpu->cpu);
++
++	sg_cpu->max = max;
++	sg_cpu->bw_dl = 0;
++	sg_cpu->util = SG_CPU_LOAD_HISTORY_TO_UTIL(sg_cpu->load_history) *
++		(max >> SG_CPU_UTIL_SHIFT);
++}
++
++static inline void sugov_cpu_load_update(struct sugov_cpu *sg_cpu, u64 time)
++{
++	u64 delta = min(LOAD_BLOCK(time) - LOAD_BLOCK(sg_cpu->load_stamp),
++			SG_CPU_LOAD_HISTORY_BITS - 1);
++	u64 prev = !!(sg_cpu->load_history & CURRENT_LOAD_BIT);
++	u64 curr = !!cpu_rq(sg_cpu->cpu)->nr_running;
++
++	if (delta) {
++		sg_cpu->load_history = sg_cpu->load_history >> delta;
++
++		if (delta <= SG_CPU_UTIL_SHIFT) {
++			sg_cpu->load_block += (~BLOCK_MASK(sg_cpu->load_stamp)) * prev;
++			if (!!LOAD_HALF_BLOCK(sg_cpu->load_block) ^ curr)
++				sg_cpu->load_history ^= LOAD_BLOCK_BIT(delta);
++		}
++
++		sg_cpu->load_block = BLOCK_MASK(time) * prev;
++	} else {
++		sg_cpu->load_block += (time - sg_cpu->load_stamp) * prev;
++	}
++	if (prev ^ curr)
++		sg_cpu->load_history ^= CURRENT_LOAD_BIT;
++	sg_cpu->load_stamp = time;
++}
++#endif /* CONFIG_SCHED_ALT */
++
+ /**
+  * sugov_iowait_reset() - Reset the IO boost status of a CPU.
+  * @sg_cpu: the sugov data for the CPU to boost
+@@ -322,13 +379,19 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
+  */
+ static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu)
+ {
++#ifndef CONFIG_SCHED_ALT
+ 	if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
+ 		sg_cpu->sg_policy->limits_changed = true;
++#endif
+ }
+ 
+ static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu,
+ 					      u64 time, unsigned int flags)
+ {
++#ifdef CONFIG_SCHED_ALT
++	sugov_cpu_load_update(sg_cpu, time);
++#endif /* CONFIG_SCHED_ALT */
++
+ 	sugov_iowait_boost(sg_cpu, time, flags);
+ 	sg_cpu->last_update = time;
+ 
+@@ -446,6 +509,10 @@ sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)
+ 
+ 	raw_spin_lock(&sg_policy->update_lock);
+ 
++#ifdef CONFIG_SCHED_ALT
++	sugov_cpu_load_update(sg_cpu, time);
++#endif /* CONFIG_SCHED_ALT */
++
+ 	sugov_iowait_boost(sg_cpu, time, flags);
+ 	sg_cpu->last_update = time;
+ 
+@@ -603,6 +670,7 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy)
+ 	}
+ 
+ 	ret = sched_setattr_nocheck(thread, &attr);
++
+ 	if (ret) {
+ 		kthread_stop(thread);
+ 		pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
+@@ -835,6 +903,7 @@ struct cpufreq_governor *cpufreq_default_governor(void)
+ cpufreq_governor_init(schedutil_gov);
+ 
+ #ifdef CONFIG_ENERGY_MODEL
++#ifndef CONFIG_SCHED_ALT
+ static void rebuild_sd_workfn(struct work_struct *work)
+ {
+ 	rebuild_sched_domains_energy();
+@@ -858,4 +927,10 @@ void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
+ 	}
+ 
+ }
++#else /* CONFIG_SCHED_ALT */
++void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
++				  struct cpufreq_governor *old_gov)
++{
++}
++#endif
+ #endif
+diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
+index 5f611658eeab..631276f56ba0 100644
+--- a/kernel/sched/cputime.c
++++ b/kernel/sched/cputime.c
+@@ -123,7 +123,7 @@ void account_user_time(struct task_struct *p, u64 cputime)
+ 	p->utime += cputime;
+ 	account_group_user_time(p, cputime);
+ 
+-	index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
++	index = task_running_nice(p) ? CPUTIME_NICE : CPUTIME_USER;
+ 
+ 	/* Add user time to cpustat. */
+ 	task_group_account_field(p, index, cputime);
+@@ -147,7 +147,7 @@ void account_guest_time(struct task_struct *p, u64 cputime)
+ 	p->gtime += cputime;
+ 
+ 	/* Add guest time to cpustat. */
+-	if (task_nice(p) > 0) {
++	if (task_running_nice(p)) {
+ 		cpustat[CPUTIME_NICE] += cputime;
+ 		cpustat[CPUTIME_GUEST_NICE] += cputime;
+ 	} else {
+@@ -270,7 +270,7 @@ static inline u64 account_other_time(u64 max)
+ #ifdef CONFIG_64BIT
+ static inline u64 read_sum_exec_runtime(struct task_struct *t)
+ {
+-	return t->se.sum_exec_runtime;
++	return tsk_seruntime(t);
+ }
+ #else
+ static u64 read_sum_exec_runtime(struct task_struct *t)
+@@ -280,7 +280,7 @@ static u64 read_sum_exec_runtime(struct task_struct *t)
+ 	struct rq *rq;
+ 
+ 	rq = task_rq_lock(t, &rf);
+-	ns = t->se.sum_exec_runtime;
++	ns = tsk_seruntime(t);
+ 	task_rq_unlock(rq, t, &rf);
+ 
+ 	return ns;
+@@ -612,7 +612,7 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
+ void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
+ {
+ 	struct task_cputime cputime = {
+-		.sum_exec_runtime = p->se.sum_exec_runtime,
++		.sum_exec_runtime = tsk_seruntime(p),
+ 	};
+ 
+ 	task_cputime(p, &cputime.utime, &cputime.stime);
+diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
+index 7199e6f23789..bbdd227da3a4 100644
+--- a/kernel/sched/idle.c
++++ b/kernel/sched/idle.c
+@@ -397,6 +397,7 @@ void cpu_startup_entry(enum cpuhp_state state)
+ 		do_idle();
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ /*
+  * idle-task scheduling class.
+  */
+@@ -510,3 +511,4 @@ DEFINE_SCHED_CLASS(idle) = {
+ 	.switched_to		= switched_to_idle,
+ 	.update_curr		= update_curr_idle,
+ };
++#endif
+diff --git a/kernel/sched/pds.h b/kernel/sched/pds.h
+new file mode 100644
+index 000000000000..623908cf4380
+--- /dev/null
++++ b/kernel/sched/pds.h
+@@ -0,0 +1,9 @@
++#ifndef PDS_H
++#define PDS_H
++
++/* bits:
++ * RT(0-99), (Low prio adj range, nice width, high prio adj range) / 2, cpu idle task */
++#define SCHED_BITS	(MAX_RT_PRIO + NICE_WIDTH / 2 + 1)
++#define IDLE_TASK_SCHED_PRIO	(SCHED_BITS - 1)
++
++#endif
+diff --git a/kernel/sched/pds_imp.h b/kernel/sched/pds_imp.h
+new file mode 100644
+index 000000000000..335ce3a8e3ec
+--- /dev/null
++++ b/kernel/sched/pds_imp.h
+@@ -0,0 +1,279 @@
++#define ALT_SCHED_VERSION_MSG "sched/pds: PDS CPU Scheduler "ALT_SCHED_VERSION" by Alfred Chen.\n"
++
++static const u64 user_prio2deadline[NICE_WIDTH] = {
++/* -20 */	  4194304,   4613734,   5075107,   5582617,   6140878,
++/* -15 */	  6754965,   7430461,   8173507,   8990857,   9889942,
++/* -10 */	 10878936,  11966829,  13163511,  14479862,  15927848,
++/*  -5 */	 17520632,  19272695,  21199964,  23319960,  25651956,
++/*   0 */	 28217151,  31038866,  34142752,  37557027,  41312729,
++/*   5 */	 45444001,  49988401,  54987241,  60485965,  66534561,
++/*  10 */	 73188017,  80506818,  88557499,  97413248, 107154572,
++/*  15 */	117870029, 129657031, 142622734, 156885007, 172573507
++};
++
++static const unsigned char dl_level_map[] = {
++/*       0               4               8              12           */
++	19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 18,
++/*      16              20              24              28           */
++	18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 17, 17, 17, 17, 17,
++/*      32              36              40              44           */
++	17, 17, 17, 17, 16, 16, 16, 16, 16, 16, 16, 16, 15, 15, 15, 15,
++/*      48              52              56              60           */
++	15, 15, 15, 14, 14, 14, 14, 14, 14, 13, 13, 13, 13, 12, 12, 12,
++/*      64              68              72              76           */
++	12, 11, 11, 11, 10, 10, 10,  9,  9,  8,  7,  6,  5,  4,  3,  2,
++/*      80              84              88              92           */
++	 1,  0
++};
++
++/* DEFAULT_SCHED_PRIO:
++ * dl_level_map[(user_prio2deadline[39] - user_prio2deadline[0]) >> 21] =
++ * dl_level_map[68] =
++ * 10
++ */
++#define DEFAULT_SCHED_PRIO (MAX_RT_PRIO + 10)
++
++static inline int normal_prio(struct task_struct *p)
++{
++	if (task_has_rt_policy(p))
++		return MAX_RT_PRIO - 1 - p->rt_priority;
++
++	return MAX_RT_PRIO;
++}
++
++static inline int
++task_sched_prio(const struct task_struct *p, const struct rq *rq)
++{
++	size_t delta;
++
++	if (p == rq->idle)
++		return IDLE_TASK_SCHED_PRIO;
++
++	if (p->prio < MAX_RT_PRIO)
++		return p->prio;
++
++	delta = (rq->clock + user_prio2deadline[39] - p->deadline) >> 21;
++	delta = min((size_t)delta, ARRAY_SIZE(dl_level_map) - 1);
++
++	return MAX_RT_PRIO + dl_level_map[delta];
++}
++
++int task_running_nice(struct task_struct *p)
++{
++	return task_sched_prio(p, task_rq(p)) > DEFAULT_SCHED_PRIO;
++}
++
++static inline void update_task_priodl(struct task_struct *p)
++{
++	p->priodl = (((u64) (p->prio))<<56) | ((p->deadline)>>8);
++}
++
++static inline void requeue_task(struct task_struct *p, struct rq *rq);
++
++static inline void time_slice_expired(struct task_struct *p, struct rq *rq)
++{
++	/*printk(KERN_INFO "sched: time_slice_expired(%d) - %px\n", cpu_of(rq), p);*/
++	p->time_slice = sched_timeslice_ns;
++
++	if (p->prio >= MAX_RT_PRIO)
++		p->deadline = rq->clock +
++			user_prio2deadline[p->static_prio - MAX_RT_PRIO];
++	update_task_priodl(p);
++
++	if (SCHED_FIFO != p->policy && task_on_rq_queued(p))
++		requeue_task(p, rq);
++}
++
++/*
++ * pds_skiplist_task_search -- search function used in PDS run queue skip list
++ * node insert operation.
++ * @it: iterator pointer to the node in the skip list
++ * @node: pointer to the skiplist_node to be inserted
++ *
++ * Returns true if key of @it is less or equal to key value of @node, otherwise
++ * false.
++ */
++static inline bool
++pds_skiplist_task_search(struct skiplist_node *it, struct skiplist_node *node)
++{
++	return (skiplist_entry(it, struct task_struct, sl_node)->priodl <=
++		skiplist_entry(node, struct task_struct, sl_node)->priodl);
++}
++
++/*
++ * Define the skip list insert function for PDS
++ */
++DEFINE_SKIPLIST_INSERT_FUNC(pds_skiplist_insert, pds_skiplist_task_search);
++
++/*
++ * Init the queue structure in rq
++ */
++static inline void sched_queue_init(struct rq *rq)
++{
++	INIT_SKIPLIST_NODE(&rq->sl_header);
++}
++
++/*
++ * Init idle task and put into queue structure of rq
++ * IMPORTANT: may be called multiple times for a single cpu
++ */
++static inline void sched_queue_init_idle(struct rq *rq, struct task_struct *idle)
++{
++	/*printk(KERN_INFO "sched: init(%d) - %px\n", cpu_of(rq), idle);*/
++	int default_prio = idle->prio;
++
++	idle->prio = MAX_PRIO;
++	idle->deadline = 0ULL;
++	update_task_priodl(idle);
++
++	INIT_SKIPLIST_NODE(&rq->sl_header);
++
++	idle->sl_node.level = idle->sl_level;
++	pds_skiplist_insert(&rq->sl_header, &idle->sl_node);
++
++	idle->prio = default_prio;
++}
++
++/*
++ * This routine assume that the idle task always in queue
++ */
++static inline struct task_struct *sched_rq_first_task(struct rq *rq)
++{
++	struct skiplist_node *node = rq->sl_header.next[0];
++
++	BUG_ON(node == &rq->sl_header);
++	return skiplist_entry(node, struct task_struct, sl_node);
++}
++
++static inline struct task_struct *
++sched_rq_next_task(struct task_struct *p, struct rq *rq)
++{
++	struct skiplist_node *next = p->sl_node.next[0];
++
++	BUG_ON(next == &rq->sl_header);
++	return skiplist_entry(next, struct task_struct, sl_node);
++}
++
++static inline unsigned long sched_queue_watermark(struct rq *rq)
++{
++	return task_sched_prio(sched_rq_first_task(rq), rq);
++}
++
++#define __SCHED_DEQUEUE_TASK(p, rq, flags, func)		\
++	psi_dequeue(p, flags & DEQUEUE_SLEEP);			\
++	sched_info_dequeued(rq, p);				\
++								\
++	if (skiplist_del_init(&rq->sl_header, &p->sl_node)) {	\
++		func;						\
++	}
++
++#define __SCHED_ENQUEUE_TASK(p, rq, flags)				\
++	sched_info_queued(rq, p);					\
++	psi_enqueue(p, flags);						\
++									\
++	p->sl_node.level = p->sl_level;					\
++	pds_skiplist_insert(&rq->sl_header, &p->sl_node)
++
++/*
++ * Requeue a task @p to @rq
++ */
++#define __SCHED_REQUEUE_TASK(p, rq, func)					\
++{\
++	bool b_first = skiplist_del_init(&rq->sl_header, &p->sl_node);		\
++\
++	p->sl_node.level = p->sl_level;						\
++	if (pds_skiplist_insert(&rq->sl_header, &p->sl_node) || b_first) {	\
++		func;								\
++	}									\
++}
++
++static inline bool sched_task_need_requeue(struct task_struct *p, struct rq *rq)
++{
++	struct skiplist_node *node;
++
++	node = p->sl_node.prev[0];
++	if (node != &rq->sl_header &&
++	    skiplist_entry(node, struct task_struct, sl_node)->priodl > p->priodl)
++		return true;
++
++	node = p->sl_node.next[0];
++	if (node != &rq->sl_header &&
++	    skiplist_entry(node, struct task_struct, sl_node)->priodl < p->priodl)
++		return true;
++
++	return false;
++}
++
++/*
++ * pds_skiplist_random_level -- Returns a pseudo-random level number for skip
++ * list node which is used in PDS run queue.
++ *
++ * __ffs() is used to satisfy p = 0.5 between each levels, and there should be
++ * platform instruction(known as ctz/clz) for acceleration.
++ *
++ * The skiplist level for a task is populated when task is created and doesn't
++ * change in task's life time. When task is being inserted into run queue, this
++ * skiplist level is set to task's sl_node->level, the skiplist insert function
++ * may change it based on current level of the skip lsit.
++ */
++static inline int pds_skiplist_random_level(const struct task_struct *p)
++{
++	/*
++	 * 1. Some architectures don't have better than microsecond resolution
++	 * so mask out ~microseconds as a factor of the random seed for skiplist
++	 * insertion.
++	 * 2. Use address of task structure pointer as another factor of the
++	 * random seed for task burst forking scenario.
++	 */
++	unsigned long randseed = (task_rq(p)->clock ^ (unsigned long)p) >> 10;
++
++	randseed &= __GENMASK(NUM_SKIPLIST_LEVEL - 1, 0);
++	if (randseed)
++		return __ffs(randseed);
++
++	return (NUM_SKIPLIST_LEVEL - 1);
++}
++
++static void sched_task_fork(struct task_struct *p, struct rq *rq)
++{
++	p->sl_level = pds_skiplist_random_level(p);
++	if (p->prio >= MAX_RT_PRIO)
++		p->deadline = rq->clock +
++			user_prio2deadline[p->static_prio - MAX_RT_PRIO];
++	update_task_priodl(p);
++}
++
++/**
++ * task_prio - return the priority value of a given task.
++ * @p: the task in question.
++ *
++ * Return: The priority value as seen by users in /proc.
++ *
++ * sched policy         return value   kernel prio    user prio/nice
++ *
++ * normal, batch, idle     [0 ... 39]            100          0/[-20 ... 19]
++ * fifo, rr             [-1 ... -100]     [99 ... 0]  [0 ... 99]
++ */
++int task_prio(const struct task_struct *p)
++{
++	int ret;
++
++	if (p->prio < MAX_RT_PRIO)
++		return (p->prio - MAX_RT_PRIO);
++
++	preempt_disable();
++	ret = task_sched_prio(p, this_rq()) - MAX_RT_PRIO;
++	preempt_enable();
++
++	return ret;
++}
++
++static void do_sched_yield_type_1(struct task_struct *p, struct rq *rq)
++{
++	time_slice_expired(p, rq);
++}
++
++#ifdef CONFIG_SMP
++static void sched_task_ttwu(struct task_struct *p) {}
++#endif
++static void sched_task_deactivate(struct task_struct *p, struct rq *rq) {}
+diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
+index 2c613e1cff3a..0103b2a7201d 100644
+--- a/kernel/sched/pelt.c
++++ b/kernel/sched/pelt.c
+@@ -270,6 +270,7 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load)
+ 	WRITE_ONCE(sa->util_avg, sa->util_sum / divider);
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ /*
+  * sched_entity:
+  *
+@@ -387,8 +388,9 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
+ 
+ 	return 0;
+ }
++#endif
+ 
+-#ifdef CONFIG_SCHED_THERMAL_PRESSURE
++#if defined(CONFIG_SCHED_THERMAL_PRESSURE) && !defined(CONFIG_SCHED_ALT)
+ /*
+  * thermal:
+  *
+diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
+index 795e43e02afc..856163dac896 100644
+--- a/kernel/sched/pelt.h
++++ b/kernel/sched/pelt.h
+@@ -1,13 +1,15 @@
+ #ifdef CONFIG_SMP
+ #include "sched-pelt.h"
+ 
++#ifndef CONFIG_SCHED_ALT
+ int __update_load_avg_blocked_se(u64 now, struct sched_entity *se);
+ int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
+ int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
+ int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
+ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
++#endif
+ 
+-#ifdef CONFIG_SCHED_THERMAL_PRESSURE
++#if defined(CONFIG_SCHED_THERMAL_PRESSURE) && !defined(CONFIG_SCHED_ALT)
+ int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity);
+ 
+ static inline u64 thermal_load_avg(struct rq *rq)
+@@ -42,6 +44,7 @@ static inline u32 get_pelt_divider(struct sched_avg *avg)
+ 	return LOAD_AVG_MAX - 1024 + avg->period_contrib;
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ /*
+  * When a task is dequeued, its estimated utilization should not be update if
+  * its util_avg has not been updated at least once.
+@@ -162,9 +165,11 @@ static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
+ 	return rq_clock_pelt(rq_of(cfs_rq));
+ }
+ #endif
++#endif /* CONFIG_SCHED_ALT */
+ 
+ #else
+ 
++#ifndef CONFIG_SCHED_ALT
+ static inline int
+ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
+ {
+@@ -182,6 +187,7 @@ update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
+ {
+ 	return 0;
+ }
++#endif
+ 
+ static inline int
+ update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index 10a1522b1e30..1a74a266340b 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -2,6 +2,10 @@
+ /*
+  * Scheduler internal types and methods:
+  */
++#ifdef CONFIG_SCHED_ALT
++#include "alt_sched.h"
++#else
++
+ #include <linux/sched.h>
+ 
+ #include <linux/sched/autogroup.h>
+@@ -2720,3 +2724,9 @@ static inline bool is_per_cpu_kthread(struct task_struct *p)
+ 
+ void swake_up_all_locked(struct swait_queue_head *q);
+ void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
++
++static inline int task_running_nice(struct task_struct *p)
++{
++	return (task_nice(p) > 0);
++}
++#endif /* !CONFIG_SCHED_ALT */
+diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c
+index 750fb3c67eed..108422ebc7bf 100644
+--- a/kernel/sched/stats.c
++++ b/kernel/sched/stats.c
+@@ -22,8 +22,10 @@ static int show_schedstat(struct seq_file *seq, void *v)
+ 	} else {
+ 		struct rq *rq;
+ #ifdef CONFIG_SMP
++#ifndef CONFIG_SCHED_ALT
+ 		struct sched_domain *sd;
+ 		int dcount = 0;
++#endif
+ #endif
+ 		cpu = (unsigned long)(v - 2);
+ 		rq = cpu_rq(cpu);
+@@ -40,6 +42,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
+ 		seq_printf(seq, "\n");
+ 
+ #ifdef CONFIG_SMP
++#ifndef CONFIG_SCHED_ALT
+ 		/* domain-specific stats */
+ 		rcu_read_lock();
+ 		for_each_domain(cpu, sd) {
+@@ -68,6 +71,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
+ 			    sd->ttwu_move_balance);
+ 		}
+ 		rcu_read_unlock();
++#endif
+ #endif
+ 	}
+ 	return 0;
+diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
+index 09d35044bd88..2c146b042b51 100644
+--- a/kernel/sched/topology.c
++++ b/kernel/sched/topology.c
+@@ -4,6 +4,7 @@
+  */
+ #include "sched.h"
+ 
++#ifndef CONFIG_SCHED_ALT
+ DEFINE_MUTEX(sched_domains_mutex);
+ 
+ /* Protected by sched_domains_mutex: */
+@@ -1241,8 +1242,10 @@ static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
+  */
+ 
+ static int default_relax_domain_level = -1;
++#endif /* CONFIG_SCHED_ALT */
+ int sched_domain_level_max;
+ 
++#ifndef CONFIG_SCHED_ALT
+ static int __init setup_relax_domain_level(char *str)
+ {
+ 	if (kstrtoint(str, 0, &default_relax_domain_level))
+@@ -1472,6 +1475,7 @@ sd_init(struct sched_domain_topology_level *tl,
+ 
+ 	return sd;
+ }
++#endif /* CONFIG_SCHED_ALT */
+ 
+ /*
+  * Topology list, bottom-up.
+@@ -1501,6 +1505,7 @@ void set_sched_topology(struct sched_domain_topology_level *tl)
+ 	sched_domain_topology = tl;
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ #ifdef CONFIG_NUMA
+ 
+ static const struct cpumask *sd_numa_mask(int cpu)
+@@ -2371,3 +2376,17 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
+ 	partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
+ 	mutex_unlock(&sched_domains_mutex);
+ }
++#else /* CONFIG_SCHED_ALT */
++void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
++			     struct sched_domain_attr *dattr_new)
++{}
++
++#ifdef CONFIG_NUMA
++int __read_mostly		node_reclaim_distance = RECLAIM_DISTANCE;
++
++int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
++{
++	return best_mask_cpu(cpu, cpus);
++}
++#endif /* CONFIG_NUMA */
++#endif
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index 62fbd09b5dc1..19f9a6185db3 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -120,6 +120,10 @@ static unsigned long long_max = LONG_MAX;
+ static int one_hundred = 100;
+ static int two_hundred = 200;
+ static int one_thousand = 1000;
++#ifdef CONFIG_SCHED_ALT
++static int __maybe_unused zero = 0;
++extern int sched_yield_type;
++#endif
+ #ifdef CONFIG_PRINTK
+ static int ten_thousand = 10000;
+ #endif
+@@ -184,7 +188,7 @@ static enum sysctl_writes_mode sysctl_writes_strict = SYSCTL_WRITES_STRICT;
+ int sysctl_legacy_va_layout;
+ #endif
+ 
+-#ifdef CONFIG_SCHED_DEBUG
++#if defined(CONFIG_SCHED_DEBUG) && !defined(CONFIG_SCHED_ALT)
+ static int min_sched_granularity_ns = 100000;		/* 100 usecs */
+ static int max_sched_granularity_ns = NSEC_PER_SEC;	/* 1 second */
+ static int min_wakeup_granularity_ns;			/* 0 usecs */
+@@ -1652,6 +1656,24 @@ int proc_do_static_key(struct ctl_table *table, int write,
+ }
+ 
+ static struct ctl_table kern_table[] = {
++#ifdef CONFIG_SCHED_ALT
++/* In ALT, only supported "sched_schedstats" */
++#ifdef CONFIG_SCHED_DEBUG
++#ifdef CONFIG_SMP
++#ifdef CONFIG_SCHEDSTATS
++	{
++		.procname	= "sched_schedstats",
++		.data		= NULL,
++		.maxlen		= sizeof(unsigned int),
++		.mode		= 0644,
++		.proc_handler	= sysctl_schedstats,
++		.extra1		= SYSCTL_ZERO,
++		.extra2		= SYSCTL_ONE,
++	},
++#endif /* CONFIG_SCHEDSTATS */
++#endif /* CONFIG_SMP */
++#endif /* CONFIG_SCHED_DEBUG */
++#else  /* !CONFIG_SCHED_ALT */
+ 	{
+ 		.procname	= "sched_child_runs_first",
+ 		.data		= &sysctl_sched_child_runs_first,
+@@ -1854,6 +1876,7 @@ static struct ctl_table kern_table[] = {
+ 		.extra2		= SYSCTL_ONE,
+ 	},
+ #endif
++#endif /* !CONFIG_SCHED_ALT */
+ #ifdef CONFIG_PROVE_LOCKING
+ 	{
+ 		.procname	= "prove_locking",
+@@ -2430,6 +2453,17 @@ static struct ctl_table kern_table[] = {
+ 		.proc_handler	= proc_dointvec,
+ 	},
+ #endif
++#ifdef CONFIG_SCHED_ALT
++	{
++		.procname	= "yield_type",
++		.data		= &sched_yield_type,
++		.maxlen		= sizeof (int),
++		.mode		= 0644,
++		.proc_handler	= &proc_dointvec_minmax,
++		.extra1		= &zero,
++		.extra2		= &two,
++	},
++#endif
+ #if defined(CONFIG_S390) && defined(CONFIG_SMP)
+ 	{
+ 		.procname	= "spin_retry",
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index 5c9d968187ae..fe47db46303c 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -1940,8 +1940,10 @@ long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode,
+ 	int ret = 0;
+ 	u64 slack;
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	slack = current->timer_slack_ns;
+ 	if (dl_task(current) || rt_task(current))
++#endif
+ 		slack = 0;
+ 
+ 	hrtimer_init_sleeper_on_stack(&t, clockid, mode);
+diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
+index 9abe15255bc4..691db8192ddb 100644
+--- a/kernel/time/posix-cpu-timers.c
++++ b/kernel/time/posix-cpu-timers.c
+@@ -216,7 +216,7 @@ static void task_sample_cputime(struct task_struct *p, u64 *samples)
+ 	u64 stime, utime;
+ 
+ 	task_cputime(p, &utime, &stime);
+-	store_samples(samples, stime, utime, p->se.sum_exec_runtime);
++	store_samples(samples, stime, utime, tsk_seruntime(p));
+ }
+ 
+ static void proc_sample_cputime_atomic(struct task_cputime_atomic *at,
+@@ -801,6 +801,7 @@ static void collect_posix_cputimers(struct posix_cputimers *pct, u64 *samples,
+ 	}
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ static inline void check_dl_overrun(struct task_struct *tsk)
+ {
+ 	if (tsk->dl.dl_overrun) {
+@@ -808,6 +809,7 @@ static inline void check_dl_overrun(struct task_struct *tsk)
+ 		__group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
+ 	}
+ }
++#endif
+ 
+ static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard)
+ {
+@@ -835,8 +837,10 @@ static void check_thread_timers(struct task_struct *tsk,
+ 	u64 samples[CPUCLOCK_MAX];
+ 	unsigned long soft;
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	if (dl_task(tsk))
+ 		check_dl_overrun(tsk);
++#endif
+ 
+ 	if (expiry_cache_is_inactive(pct))
+ 		return;
+@@ -850,7 +854,7 @@ static void check_thread_timers(struct task_struct *tsk,
+ 	soft = task_rlimit(tsk, RLIMIT_RTTIME);
+ 	if (soft != RLIM_INFINITY) {
+ 		/* Task RT timeout is accounted in jiffies. RTTIME is usec */
+-		unsigned long rttime = tsk->rt.timeout * (USEC_PER_SEC / HZ);
++		unsigned long rttime = tsk_rttimeout(tsk) * (USEC_PER_SEC / HZ);
+ 		unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME);
+ 
+ 		/* At the hard limit, send SIGKILL. No further action. */
+@@ -1086,8 +1090,10 @@ static inline bool fastpath_timer_check(struct task_struct *tsk)
+ 			return true;
+ 	}
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	if (dl_task(tsk) && tsk->dl.dl_overrun)
+ 		return true;
++#endif
+ 
+ 	return false;
+ }
+diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
+index 73ef12092250..24bf8ef1249a 100644
+--- a/kernel/trace/trace_selftest.c
++++ b/kernel/trace/trace_selftest.c
+@@ -1052,10 +1052,15 @@ static int trace_wakeup_test_thread(void *data)
+ {
+ 	/* Make this a -deadline thread */
+ 	static const struct sched_attr attr = {
++#ifdef CONFIG_SCHED_ALT
++		/* No deadline on BMQ/PDS, use RR */
++		.sched_policy = SCHED_RR,
++#else
+ 		.sched_policy = SCHED_DEADLINE,
+ 		.sched_runtime = 100000ULL,
+ 		.sched_deadline = 10000000ULL,
+ 		.sched_period = 10000000ULL
++#endif
+ 	};
+ 	struct wakeup_test_data *x = data;
+ 

diff --git a/5021_BMQ-and-PDS-gentoo-defaults-v5.12-r0.patch b/5021_BMQ-and-PDS-gentoo-defaults-v5.12-r0.patch
new file mode 100644
index 0000000..d449eec
--- /dev/null
+++ b/5021_BMQ-and-PDS-gentoo-defaults-v5.12-r0.patch
@@ -0,0 +1,13 @@
+--- a/init/Kconfig	2021-04-27 07:38:30.556467045 -0400
++++ b/init/Kconfig	2021-04-27 07:39:32.956412800 -0400
+@@ -780,8 +780,9 @@ config GENERIC_SCHED_CLOCK
+ menu "Scheduler features"
+ 
+ menuconfig SCHED_ALT
++	depends on X86_64
+ 	bool "Alternative CPU Schedulers"
+-	default y
++	default n
+ 	help
+ 	  This feature enable alternative CPU scheduler"
+ 


^ permalink raw reply related	[flat|nested] 33+ messages in thread
* [gentoo-commits] proj/linux-patches:5.12 commit in: /
@ 2021-04-18 22:03 Mike Pagano
  0 siblings, 0 replies; 33+ messages in thread
From: Mike Pagano @ 2021-04-18 22:03 UTC (permalink / raw
  To: gentoo-commits

commit:     29f16b3ccb72dce3d1a56939bfbbd085bd8ff16a
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Apr 18 22:01:47 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Apr 18 22:01:47 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=29f16b3c

Update CPU Opt patch (Adds rocketlake support)

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 5013_enable-cpu-optimizations-for-gcc10.patch | 271 ++++++++++++++++++--------
 1 file changed, 189 insertions(+), 82 deletions(-)

diff --git a/5013_enable-cpu-optimizations-for-gcc10.patch b/5013_enable-cpu-optimizations-for-gcc10.patch
index c90b586..1868f23 100644
--- a/5013_enable-cpu-optimizations-for-gcc10.patch
+++ b/5013_enable-cpu-optimizations-for-gcc10.patch
@@ -1,64 +1,82 @@
-From 4666424a864159b4de572c90adb2c3e1fcdd5890 Mon Sep 17 00:00:00 2001
+From 59db769ad69e080c512b3890e1d27d6120f4a1a4 Mon Sep 17 00:00:00 2001
 From: graysky <graysky@archlinux.us>
-Date: Fri, 13 Nov 2020 15:45:08 -0500
-Subject: [PATCH]more-uarches-for-gcc-v10-and-kernel-5.8+
+Date: Mon, 12 Apr 2021 07:09:27 -0400
+Subject: [PATCH] more uarches for kernel 5.8+
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
 
 WARNING
-This patch works with gcc versions 10.1+ and with kernel version 5.8+ and should
+This patch works with all gcc versions 9.0+ and with kernel version 5.8+ and should
 NOT be applied when compiling on older versions of gcc due to key name changes
 of the march flags introduced with the version 4.9 release of gcc.[1]
 
-Use the older version of this patch hosted on the same github for older
-versions of gcc.
-
 FEATURES
 This patch adds additional CPU options to the Linux kernel accessible under:
  Processor type and features  --->
   Processor family --->
 
-The expanded microarchitectures include:
-* AMD Improved K8-family
-* AMD K10-family
-* AMD Family 10h (Barcelona)
-* AMD Family 14h (Bobcat)
-* AMD Family 16h (Jaguar)
-* AMD Family 15h (Bulldozer)
-* AMD Family 15h (Piledriver)
-* AMD Family 15h (Steamroller)
-* AMD Family 15h (Excavator)
-* AMD Family 17h (Zen)
-* AMD Family 17h (Zen 2)
-* Intel Silvermont low-power processors
-* Intel Goldmont low-power processors (Apollo Lake and Denverton)
-* Intel Goldmont Plus low-power processors (Gemini Lake)
-* Intel 1st Gen Core i3/i5/i7 (Nehalem)
-* Intel 1.5 Gen Core i3/i5/i7 (Westmere)
-* Intel 2nd Gen Core i3/i5/i7 (Sandybridge)
-* Intel 3rd Gen Core i3/i5/i7 (Ivybridge)
-* Intel 4th Gen Core i3/i5/i7 (Haswell)
-* Intel 5th Gen Core i3/i5/i7 (Broadwell)
-* Intel 6th Gen Core i3/i5/i7 (Skylake)
-* Intel 6th Gen Core i7/i9 (Skylake X)
-* Intel 8th Gen Core i3/i5/i7 (Cannon Lake)
-* Intel 10th Gen Core i7/i9 (Ice Lake)
-* Intel Xeon (Cascade Lake)
-* Intel Xeon (Cooper Lake)
-* Intel 3rd Gen 10nm++  i3/i5/i7/i9-family (Tiger Lake)
+With the release of gcc 11.0, several generic 64-bit levels are offered which
+are good for supported Intel or AMD CPUs:
+• x86-64-v2
+• x86-64-v3
+• x86-64-v4
+
+Users of glibc 2.33 and above can see which level is supported by current
+hardware by running:
+  /lib/ld-linux-x86-64.so.2 --help | grep supported
+
+Alternatively, compare the flags from /proc/cpuinfo to this list.[2]
+
+CPU-specific microarchitectures include:
+• AMD Improved K8-family
+• AMD K10-family
+• AMD Family 10h (Barcelona)
+• AMD Family 14h (Bobcat)
+• AMD Family 16h (Jaguar)
+• AMD Family 15h (Bulldozer)
+• AMD Family 15h (Piledriver)
+• AMD Family 15h (Steamroller)
+• AMD Family 15h (Excavator)
+• AMD Family 17h (Zen)
+• AMD Family 17h (Zen 2)
+• AMD Family 19h (Zen 3)†
+• Intel Silvermont low-power processors
+• Intel Goldmont low-power processors (Apollo Lake and Denverton)
+• Intel Goldmont Plus low-power processors (Gemini Lake)
+• Intel 1st Gen Core i3/i5/i7 (Nehalem)
+• Intel 1.5 Gen Core i3/i5/i7 (Westmere)
+• Intel 2nd Gen Core i3/i5/i7 (Sandybridge)
+• Intel 3rd Gen Core i3/i5/i7 (Ivybridge)
+• Intel 4th Gen Core i3/i5/i7 (Haswell)
+• Intel 5th Gen Core i3/i5/i7 (Broadwell)
+• Intel 6th Gen Core i3/i5/i7 (Skylake)
+• Intel 6th Gen Core i7/i9 (Skylake X)
+• Intel 8th Gen Core i3/i5/i7 (Cannon Lake)
+• Intel 10th Gen Core i7/i9 (Ice Lake)
+• Intel Xeon (Cascade Lake)
+• Intel Xeon (Cooper Lake)*
+• Intel 3rd Gen 10nm++ i3/i5/i7/i9-family (Tiger Lake)*
+• Intel 3rd Gen 10nm++ Xeon (Sapphire Rapids)‡
+• Intel 11th Gen i3/i5/i7/i9-family (Rocket Lake)‡
+• Intel 12th Gen i3/i5/i7/i9-family (Alder Lake)‡
+
+Notes: If not otherwise noted, gcc >=9.1 is required for support.
+       *Requires gcc >=10.1  †Required gcc >=10.3  ‡Required gcc >=11.0
 
 It also offers to compile passing the 'native' option which, "selects the CPU
 to generate code for at compilation time by determining the processor type of
 the compiling machine. Using -march=native enables all instruction subsets
 supported by the local machine and will produce code optimized for the local
-machine under the constraints of the selected instruction set."[2]
+machine under the constraints of the selected instruction set."[3]
 
-Do NOT try using the 'native' option on AMD Piledriver, Steamroller, or
-Excavator CPUs (-march=bdver{2,3,4} flag). The build will error out due the
-kernel's objtool issue with these.[3a,b]
+Users of Intel CPUs should select the 'Intel-Native' option and users of AMD
+CPUs should select the 'AMD-Native' option.
 
-MINOR NOTES
-This patch also changes 'atom' to 'bonnell' in accordance with the gcc v4.9
-changes. Note that upstream is using the deprecated 'match=atom' flags when I
-believe it should use the newer 'march=bonnell' flag for atom processors.[4]
+MINOR NOTES RELATING TO INTEL ATOM PROCESSORS
+This patch also changes -march=atom to -march=bonnell in accordance with the
+gcc v4.9 changes. Upstream is using the deprecated -match=atom flags when I
+believe it should use the newer -march=bonnell flag for atom processors.[4]
 
 It is not recommended to compile on Atom-CPUs with the 'native' option.[5] The
 recommendation is to use the 'atom' option instead.
@@ -72,28 +90,26 @@ https://github.com/graysky2/kernel_gcc_patch
 
 REQUIREMENTS
 linux version >=5.8
-gcc version >=10.1
+gcc version >=9.0
 
 ACKNOWLEDGMENTS
 This patch builds on the seminal work by Jeroen.[6]
 
 REFERENCES
 1.  https://gcc.gnu.org/gcc-4.9/changes.html
-2.  https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html
-3a. https://gcc.gnu.org/bugzilla/show_bug.cgi?id=95671#c11
-3b. https://github.com/graysky2/kernel_gcc_patch/issues/55
+2.  https://gitlab.com/x86-psABIs/x86-64-ABI/-/commit/77566eb03bc6a326811cb7e9
+3.  https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html#index-x86-Options
 4.  https://bugzilla.kernel.org/show_bug.cgi?id=77461
 5.  https://github.com/graysky2/kernel_gcc_patch/issues/15
 6.  http://www.linuxforge.net/docs/linux/linux-gcc.php
-
 ---
- arch/x86/Kconfig.cpu            | 258 ++++++++++++++++++++++++++++++--
- arch/x86/Makefile               |  39 ++++-
- arch/x86/include/asm/vermagic.h |  56 +++++++
- 3 files changed, 336 insertions(+), 17 deletions(-)
+ arch/x86/Kconfig.cpu            | 332 ++++++++++++++++++++++++++++++--
+ arch/x86/Makefile               |  47 ++++-
+ arch/x86/include/asm/vermagic.h |  66 +++++++
+ 3 files changed, 428 insertions(+), 17 deletions(-)
 
 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
-index 814fe0d349b0..134390e619bb 100644
+index 814fe0d349b0..872b9cf598e3 100644
 --- a/arch/x86/Kconfig.cpu
 +++ b/arch/x86/Kconfig.cpu
 @@ -157,7 +157,7 @@ config MPENTIUM4
@@ -114,7 +130,7 @@ index 814fe0d349b0..134390e619bb 100644
  	depends on X86_32
  	help
  	  Select this for an AMD Athlon K7-family processor.  Enables use of
-@@ -173,12 +173,90 @@ config MK7
+@@ -173,12 +173,98 @@ config MK7
  	  flags to GCC.
  
  config MK8
@@ -202,11 +218,19 @@ index 814fe0d349b0..134390e619bb 100644
 +	  Select this for AMD Family 17h Zen 2 processors.
 +
 +	  Enables -march=znver2
++
++config MZEN3
++	bool "AMD Zen 3"
++	depends on GCC_VERSION > 100300
++	help
++	  Select this for AMD Family 19h Zen 3 processors.
++
++	  Enables -march=znver3
 +
  config MCRUSOE
  	bool "Crusoe"
  	depends on X86_32
-@@ -270,7 +348,7 @@ config MPSC
+@@ -270,7 +356,7 @@ config MPSC
  	  in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one.
  
  config MCORE2
@@ -215,7 +239,7 @@ index 814fe0d349b0..134390e619bb 100644
  	help
  
  	  Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
-@@ -278,6 +356,8 @@ config MCORE2
+@@ -278,6 +364,8 @@ config MCORE2
  	  family in /proc/cpuinfo. Newer ones have 6 and older ones 15
  	  (not a typo)
  
@@ -224,7 +248,7 @@ index 814fe0d349b0..134390e619bb 100644
  config MATOM
  	bool "Intel Atom"
  	help
-@@ -287,6 +367,150 @@ config MATOM
+@@ -287,6 +375,182 @@ config MATOM
  	  accordingly optimized code. Use a recent GCC with specific Atom
  	  support in order to fully benefit from selecting this option.
  
@@ -356,6 +380,7 @@ index 814fe0d349b0..134390e619bb 100644
 +
 +config MCOOPERLAKE
 +	bool "Intel Cooper Lake"
++	depends on GCC_VERSION > 100100
 +	select X86_P6_NOP
 +	help
 +
@@ -365,22 +390,77 @@ index 814fe0d349b0..134390e619bb 100644
 +
 +config MTIGERLAKE
 +	bool "Intel Tiger Lake"
++	depends on GCC_VERSION > 100100
 +	select X86_P6_NOP
 +	help
 +
 +	  Select this for third-generation 10 nm process processors in the Tiger Lake family.
 +
 +	  Enables -march=tigerlake
++
++config MSAPPHIRERAPIDS
++	bool "Intel Sapphire Rapids"
++	depends on GCC_VERSION > 110000
++	select X86_P6_NOP
++	help
++
++	  Select this for third-generation 10 nm process processors in the Sapphire Rapids family.
++
++	  Enables -march=sapphirerapids
++
++config MROCKETLAKE
++	bool "Intel Rocket Lake"
++	depends on GCC_VERSION > 110000
++	select X86_P6_NOP
++	help
++
++	  Select this for eleventh-generation processors in the Rocket Lake family.
++
++	  Enables -march=rocketlake
++
++config MALDERLAKE
++	bool "Intel Alder Lake"
++	depends on GCC_VERSION > 110000
++	select X86_P6_NOP
++	help
++
++	  Select this for twelfth-generation processors in the Alder Lake family.
++
++	  Enables -march=alderlake
 +
  config GENERIC_CPU
  	bool "Generic-x86-64"
  	depends on X86_64
-@@ -294,6 +518,16 @@ config GENERIC_CPU
+@@ -294,6 +558,50 @@ config GENERIC_CPU
  	  Generic x86-64 CPU.
  	  Run equally well on all x86-64 CPUs.
  
-+config MNATIVE
-+	bool "Native optimizations autodetected by GCC"
++config GENERIC_CPU2
++	bool "Generic-x86-64-v2"
++	depends on GCC_VERSION > 110000
++	depends on X86_64
++	help
++	  Generic x86-64 CPU.
++	  Run equally well on all x86-64 CPUs with min support of x86-64-v2.
++
++config GENERIC_CPU3
++	bool "Generic-x86-64-v3"
++	depends on GCC_VERSION > 110000
++	depends on X86_64
++	help
++	  Generic x86-64-v3 CPU with v3 instructions.
++	  Run equally well on all x86-64 CPUs with min support of x86-64-v3.
++
++config GENERIC_CPU4
++	bool "Generic-x86-64-v4"
++	depends on GCC_VERSION > 110000
++	depends on X86_64
++	help
++	  Generic x86-64 CPU with v4 instructions.
++	  Run equally well on all x86-64 CPUs with min support of x86-64-v4.
++
++config MNATIVE_INTEL
++	bool "Intel-Native optimizations autodetected by GCC"
 +	help
 +
 +	  GCC 4.2 and above support -march=native, which automatically detects
@@ -388,70 +468,80 @@ index 814fe0d349b0..134390e619bb 100644
 +	  for AMD CPUs.  Intel Only!
 +
 +	  Enables -march=native
++
++config MNATIVE_AMD
++	bool "AMD-Native optimizations autodetected by GCC"
++	help
++
++	  GCC 4.2 and above support -march=native, which automatically detects
++	  the optimum settings to use based on your processor. Do NOT use this
++	  for Intel CPUs.  AMD Only!
++
++	  Enables -march=native
 +
  endchoice
  
  config X86_GENERIC
-@@ -318,7 +552,7 @@ config X86_INTERNODE_CACHE_SHIFT
+@@ -318,7 +626,7 @@ config X86_INTERNODE_CACHE_SHIFT
  config X86_L1_CACHE_SHIFT
  	int
  	default "7" if MPENTIUM4 || MPSC
 -	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
-+	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE || X86_GENERIC || GENERIC_CPU
++	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD || X86_GENERIC || GENERIC_CPU || GENERIC_CPU2 || GENERIC_CPU3 || GENERIC_CPU4
  	default "4" if MELAN || M486SX || M486 || MGEODEGX1
  	default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
  
-@@ -336,11 +570,11 @@ config X86_ALIGNMENT_16
+@@ -336,11 +644,11 @@ config X86_ALIGNMENT_16
  
  config X86_INTEL_USERCOPY
  	def_bool y
 -	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
-+	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE
++	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL
  
  config X86_USE_PPRO_CHECKSUM
  	def_bool y
 -	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
-+	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE
++	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD
  
  config X86_USE_3DNOW
  	def_bool y
-@@ -360,26 +594,26 @@ config X86_USE_3DNOW
+@@ -360,26 +668,26 @@ config X86_USE_3DNOW
  config X86_P6_NOP
  	def_bool y
  	depends on X86_64
 -	depends on (MCORE2 || MPENTIUM4 || MPSC)
-+	depends on (MCORE2 || MPENTIUM4 || MPSC || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE)
++	depends on (MCORE2 || MPENTIUM4 || MPSC || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL)
  
  config X86_TSC
  	def_bool y
 -	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) || X86_64
-+	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE) || X86_64
++	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD) || X86_64
  
  config X86_CMPXCHG64
  	def_bool y
 -	depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8
-+	depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE
++	depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD
  
  # this should be set for all -march=.. options where the compiler
  # generates cmov.
  config X86_CMOV
  	def_bool y
 -	depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
-+	depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE)
++	depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD)
  
  config X86_MINIMUM_CPU_FAMILY
  	int
  	default "64" if X86_64
 -	default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8)
-+	default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8 ||  MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE)
++	default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8 ||  MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD)
  	default "5" if X86_32 && X86_CMPXCHG64
  	default "4"
  
 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
-index 7116da3980be..50c8af35092b 100644
+index 9a85eae37b17..facf9a278fe3 100644
 --- a/arch/x86/Makefile
 +++ b/arch/x86/Makefile
-@@ -110,11 +110,40 @@ else
+@@ -113,11 +113,48 @@ else
          # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
          cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
          cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
@@ -473,9 +563,11 @@ index 7116da3980be..50c8af35092b 100644
 +        cflags-$(CONFIG_MEXCAVATOR) += $(call cc-option,-march=bdver4)
 +        cflags-$(CONFIG_MEXCAVATOR) += $(call cc-option,-mno-tbm)
 +        cflags-$(CONFIG_MZEN) += $(call cc-option,-march=znver1)
-+        cflags-$(CONFIG_MZEN2) +=  $(call cc-option,-march=znver2)
++        cflags-$(CONFIG_MZEN2) += $(call cc-option,-march=znver2)
++        cflags-$(CONFIG_MZEN3) += $(call cc-option,-march=znver3)
 +
-+        cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
++        cflags-$(CONFIG_MNATIVE_INTEL) += $(call cc-option,-march=native)
++        cflags-$(CONFIG_MNATIVE_AMD) += $(call cc-option,-march=native)
 +        cflags-$(CONFIG_MATOM) += $(call cc-option,-march=bonnell)
 +        cflags-$(CONFIG_MCORE2) += $(call cc-option,-march=core2)
 +        cflags-$(CONFIG_MNEHALEM) += $(call cc-option,-march=nehalem)
@@ -494,19 +586,27 @@ index 7116da3980be..50c8af35092b 100644
 +        cflags-$(CONFIG_MCASCADELAKE) += $(call cc-option,-march=cascadelake)
 +        cflags-$(CONFIG_MCOOPERLAKE) += $(call cc-option,-march=cooperlake)
 +        cflags-$(CONFIG_MTIGERLAKE) += $(call cc-option,-march=tigerlake)
++        cflags-$(CONFIG_MSAPPHIRERAPIDS) += $(call cc-option,-march=sapphirerapids)
++        cflags-$(CONFIG_MROCKETLAKE) += $(call cc-option,-march=rocketlake)
++        cflags-$(CONFIG_MALDERLAKE) += $(call cc-option,-march=alderlake)
++        cflags-$(CONFIG_GENERIC_CPU2) += $(call cc-option,-march=x86-64-v2)
++        cflags-$(CONFIG_GENERIC_CPU3) += $(call cc-option,-march=x86-64-v3)
++        cflags-$(CONFIG_GENERIC_CPU4) += $(call cc-option,-march=x86-64-v4)
          cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
          KBUILD_CFLAGS += $(cflags-y)
  
 diff --git a/arch/x86/include/asm/vermagic.h b/arch/x86/include/asm/vermagic.h
-index 75884d2cdec3..14c222e78213 100644
+index 75884d2cdec3..4e6a08d4c7e5 100644
 --- a/arch/x86/include/asm/vermagic.h
 +++ b/arch/x86/include/asm/vermagic.h
-@@ -17,6 +17,40 @@
+@@ -17,6 +17,48 @@
  #define MODULE_PROC_FAMILY "586MMX "
  #elif defined CONFIG_MCORE2
  #define MODULE_PROC_FAMILY "CORE2 "
-+#elif defined CONFIG_MNATIVE
-+#define MODULE_PROC_FAMILY "NATIVE "
++#elif defined CONFIG_MNATIVE_INTEL
++#define MODULE_PROC_FAMILY "NATIVE_INTEL "
++#elif defined CONFIG_MNATIVE_AMD
++#define MODULE_PROC_FAMILY "NATIVE_AMD "
 +#elif defined CONFIG_MNEHALEM
 +#define MODULE_PROC_FAMILY "NEHALEM "
 +#elif defined CONFIG_MWESTMERE
@@ -539,10 +639,16 @@ index 75884d2cdec3..14c222e78213 100644
 +#define MODULE_PROC_FAMILY "COOPERLAKE "
 +#elif defined CONFIG_MTIGERLAKE
 +#define MODULE_PROC_FAMILY "TIGERLAKE "
++#elif defined CONFIG_MSAPPHIRERAPIDS
++#define MODULE_PROC_FAMILY "SAPPHIRERAPIDS "
++#elif defined CONFIG_ROCKETLAKE
++#define MODULE_PROC_FAMILY "ROCKETLAKE "
++#elif defined CONFIG_MALDERLAKE
++#define MODULE_PROC_FAMILY "ALDERLAKE "
  #elif defined CONFIG_MATOM
  #define MODULE_PROC_FAMILY "ATOM "
  #elif defined CONFIG_M686
-@@ -35,6 +69,28 @@
+@@ -35,6 +77,30 @@
  #define MODULE_PROC_FAMILY "K7 "
  #elif defined CONFIG_MK8
  #define MODULE_PROC_FAMILY "K8 "
@@ -568,10 +674,11 @@ index 75884d2cdec3..14c222e78213 100644
 +#define MODULE_PROC_FAMILY "ZEN "
 +#elif defined CONFIG_MZEN2
 +#define MODULE_PROC_FAMILY "ZEN2 "
++#elif defined CONFIG_MZEN3
++#define MODULE_PROC_FAMILY "ZEN3 "
  #elif defined CONFIG_MELAN
  #define MODULE_PROC_FAMILY "ELAN "
  #elif defined CONFIG_MCRUSOE
 -- 
-2.30.1
-
+2.31.1
 


^ permalink raw reply related	[flat|nested] 33+ messages in thread
* [gentoo-commits] proj/linux-patches:5.12 commit in: /
@ 2021-03-23 12:29 Mike Pagano
  0 siblings, 0 replies; 33+ messages in thread
From: Mike Pagano @ 2021-03-23 12:29 UTC (permalink / raw
  To: gentoo-commits

commit:     b19549e0681e5bd37ae375f321a201ea4127a8d7
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Mar 23 12:28:22 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Mar 23 12:28:22 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b19549e0

Update for CPU Optimization patch for 5.12.X gcc v10

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 5013_enable-cpu-optimizations-for-gcc10.patch | 283 ++++++++------------------
 1 file changed, 83 insertions(+), 200 deletions(-)

diff --git a/5013_enable-cpu-optimizations-for-gcc10.patch b/5013_enable-cpu-optimizations-for-gcc10.patch
index 0fc0a64..c90b586 100644
--- a/5013_enable-cpu-optimizations-for-gcc10.patch
+++ b/5013_enable-cpu-optimizations-for-gcc10.patch
@@ -1,8 +1,7 @@
 From 4666424a864159b4de572c90adb2c3e1fcdd5890 Mon Sep 17 00:00:00 2001
 From: graysky <graysky@archlinux.us>
 Date: Fri, 13 Nov 2020 15:45:08 -0500
-Subject: [PATCH] 
- enable_additional_cpu_optimizations_for_gcc_v10.1+_kernel_v5.8+.patch
+Subject: [PATCH]more-uarches-for-gcc-v10-and-kernel-5.8+
 
 WARNING
 This patch works with gcc versions 10.1+ and with kernel version 5.8+ and should
@@ -86,30 +85,20 @@ REFERENCES
 4.  https://bugzilla.kernel.org/show_bug.cgi?id=77461
 5.  https://github.com/graysky2/kernel_gcc_patch/issues/15
 6.  http://www.linuxforge.net/docs/linux/linux-gcc.php
+
 ---
- arch/x86/Kconfig.cpu            | 301 ++++++++++++++++++++++++++++----
- arch/x86/Makefile               |  53 +++++-
- arch/x86/Makefile_32.cpu        |  32 +++-
- arch/x86/include/asm/vermagic.h |  56 ++++++
- 4 files changed, 407 insertions(+), 35 deletions(-)
+ arch/x86/Kconfig.cpu            | 258 ++++++++++++++++++++++++++++++--
+ arch/x86/Makefile               |  39 ++++-
+ arch/x86/include/asm/vermagic.h |  56 +++++++
+ 3 files changed, 336 insertions(+), 17 deletions(-)
 
 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
-index 814fe0d349b0..7b08e87fe797 100644
+index 814fe0d349b0..134390e619bb 100644
 --- a/arch/x86/Kconfig.cpu
 +++ b/arch/x86/Kconfig.cpu
-@@ -123,6 +123,7 @@ config MPENTIUMM
- config MPENTIUM4
- 	bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/older Xeon"
- 	depends on X86_32
-+	select X86_P6_NOP
- 	help
- 	  Select this for Intel Pentium 4 chips.  This includes the
- 	  Pentium 4, Pentium D, P4-based Celeron and Xeon, and
-@@ -155,9 +156,8 @@ config MPENTIUM4
- 		-Paxville
- 		-Dempsey
+@@ -157,7 +157,7 @@ config MPENTIUM4
+ 
  
--
  config MK6
 -	bool "K6/K6-II/K6-III"
 +	bool "AMD K6/K6-II/K6-III"
@@ -147,7 +136,7 @@ index 814fe0d349b0..7b08e87fe797 100644
 +	bool "AMD 61xx/7x50/PhenomX3/X4/II/K10"
 +	help
 +	  Select this for an AMD 61xx Eight-Core Magny-Cours, Athlon X2 7x50,
-+		Phenom X3/X4/II, Athlon II X2/X3/X4, or Turion II-family processor.
++	  Phenom X3/X4/II, Athlon II X2/X3/X4, or Turion II-family processor.
 +	  Enables use of some extended instructions, and passes appropriate
 +	  optimization flags to GCC.
 +
@@ -217,52 +206,33 @@ index 814fe0d349b0..7b08e87fe797 100644
  config MCRUSOE
  	bool "Crusoe"
  	depends on X86_32
-@@ -260,6 +338,7 @@ config MVIAC7
- 
- config MPSC
- 	bool "Intel P4 / older Netburst based Xeon"
-+	select X86_P6_NOP
- 	depends on X86_64
- 	help
- 	  Optimize for Intel Pentium 4, Pentium D and older Nocona/Dempsey
-@@ -269,8 +348,19 @@ config MPSC
- 	  using the cpu family field
+@@ -270,7 +348,7 @@ config MPSC
  	  in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one.
  
-+config MATOM
-+	bool "Intel Atom"
-+	select X86_P6_NOP
-+	help
-+
-+	  Select this for the Intel Atom platform. Intel Atom CPUs have an
-+	  in-order pipelining architecture and thus can benefit from
-+	  accordingly optimized code. Use a recent GCC with specific Atom
-+	  support in order to fully benefit from selecting this option.
-+
  config MCORE2
 -	bool "Core 2/newer Xeon"
 +	bool "Intel Core 2"
-+	select X86_P6_NOP
  	help
  
  	  Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
-@@ -278,14 +368,151 @@ config MCORE2
+@@ -278,6 +356,8 @@ config MCORE2
  	  family in /proc/cpuinfo. Newer ones have 6 and older ones 15
  	  (not a typo)
  
--config MATOM
--	bool "Intel Atom"
 +	  Enables -march=core2
 +
+ config MATOM
+ 	bool "Intel Atom"
+ 	help
+@@ -287,6 +367,150 @@ config MATOM
+ 	  accordingly optimized code. Use a recent GCC with specific Atom
+ 	  support in order to fully benefit from selecting this option.
+ 
 +config MNEHALEM
 +	bool "Intel Nehalem"
 +	select X86_P6_NOP
- 	help
- 
--	  Select this for the Intel Atom platform. Intel Atom CPUs have an
--	  in-order pipelining architecture and thus can benefit from
--	  accordingly optimized code. Use a recent GCC with specific Atom
--	  support in order to fully benefit from selecting this option.
++	help
++
 +	  Select this for 1st Gen Core processors in the Nehalem family.
 +
 +	  Enables -march=nehalem
@@ -401,112 +371,96 @@ index 814fe0d349b0..7b08e87fe797 100644
 +	  Select this for third-generation 10 nm process processors in the Tiger Lake family.
 +
 +	  Enables -march=tigerlake
- 
++
  config GENERIC_CPU
  	bool "Generic-x86-64"
-@@ -294,6 +521,19 @@ config GENERIC_CPU
+ 	depends on X86_64
+@@ -294,6 +518,16 @@ config GENERIC_CPU
  	  Generic x86-64 CPU.
  	  Run equally well on all x86-64 CPUs.
  
 +config MNATIVE
-+ bool "Native optimizations autodetected by GCC"
-+ help
++	bool "Native optimizations autodetected by GCC"
++	help
 +
-+   GCC 4.2 and above support -march=native, which automatically detects
-+   the optimum settings to use based on your processor. -march=native
-+   also detects and applies additional settings beyond -march specific
-+   to your CPU, (eg. -msse4). Unless you have a specific reason not to
-+   (e.g. distcc cross-compiling), you should probably be using
-+   -march=native rather than anything listed below.
++	  GCC 4.2 and above support -march=native, which automatically detects
++	  the optimum settings to use based on your processor. Do NOT use this
++	  for AMD CPUs.  Intel Only!
 +
-+   Enables -march=native
++	  Enables -march=native
 +
  endchoice
  
  config X86_GENERIC
-@@ -318,7 +558,7 @@ config X86_INTERNODE_CACHE_SHIFT
+@@ -318,7 +552,7 @@ config X86_INTERNODE_CACHE_SHIFT
  config X86_L1_CACHE_SHIFT
  	int
  	default "7" if MPENTIUM4 || MPSC
 -	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
-+	default "6" if MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MJAGUAR || MPENTIUMM || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
++	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE || X86_GENERIC || GENERIC_CPU
  	default "4" if MELAN || M486SX || M486 || MGEODEGX1
  	default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
  
-@@ -336,35 +576,36 @@ config X86_ALIGNMENT_16
+@@ -336,11 +570,11 @@ config X86_ALIGNMENT_16
  
  config X86_INTEL_USERCOPY
  	def_bool y
 -	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
-+	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK8SSE3 || MK7 || MEFFICEON || MCORE2 || MK10 || MBARCELONA || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE
++	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE
  
  config X86_USE_PPRO_CHECKSUM
  	def_bool y
 -	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
-+	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MK10 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MK8SSE3 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MATOM || MNATIVE
++	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE
  
  config X86_USE_3DNOW
  	def_bool y
- 	depends on (MCYRIXIII || MK7 || MGEODE_LX) && !UML
- 
--#
--# P6_NOPs are a relatively minor optimization that require a family >=
--# 6 processor, except that it is broken on certain VIA chips.
--# Furthermore, AMD chips prefer a totally different sequence of NOPs
--# (which work on all CPUs).  In addition, it looks like Virtual PC
--# does not understand them.
--#
--# As a result, disallow these if we're not compiling for X86_64 (these
--# NOPs do work on all x86-64 capable chips); the list of processors in
--# the right-hand clause are the cores that benefit from this optimization.
--#
+@@ -360,26 +594,26 @@ config X86_USE_3DNOW
  config X86_P6_NOP
--	def_bool y
--	depends on X86_64
+ 	def_bool y
+ 	depends on X86_64
 -	depends on (MCORE2 || MPENTIUM4 || MPSC)
-+	default n
-+	bool "Support for P6_NOPs on Intel chips"
-+	depends on (MCORE2 || MPENTIUM4 || MPSC || MATOM || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS  || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE)
-+	help
-+	P6_NOPs are a relatively minor optimization that require a family >=
-+	6 processor, except that it is broken on certain VIA chips.
-+	Furthermore, AMD chips prefer a totally different sequence of NOPs
-+	(which work on all CPUs).  In addition, it looks like Virtual PC
-+	does not understand them.
-+
-+	As a result, disallow these if we're not compiling for X86_64 (these
-+	NOPs do work on all x86-64 capable chips); the list of processors in
-+	the right-hand clause are the cores that benefit from this optimization.
-+
-+	Say Y if you have Intel CPU newer than Pentium Pro, N otherwise.
++	depends on (MCORE2 || MPENTIUM4 || MPSC || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE)
  
  config X86_TSC
  	def_bool y
 -	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) || X86_64
-+	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MK8SSE3 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE || MATOM) || X86_64
++	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE) || X86_64
  
  config X86_CMPXCHG64
  	def_bool y
-@@ -374,7 +615,7 @@ config X86_CMPXCHG64
+-	depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8
++	depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE
+ 
+ # this should be set for all -march=.. options where the compiler
  # generates cmov.
  config X86_CMOV
  	def_bool y
 -	depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
-+	depends on (MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MJAGUAR || MK7 || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MNATIVE || MATOM || MGEODE_LX)
++	depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE)
  
  config X86_MINIMUM_CPU_FAMILY
  	int
+ 	default "64" if X86_64
+-	default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8)
++	default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8 ||  MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE)
+ 	default "5" if X86_32 && X86_CMPXCHG64
+ 	default "4"
+ 
 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
-index 154259f18b8b..405b1f2b3c65 100644
+index 7116da3980be..50c8af35092b 100644
 --- a/arch/x86/Makefile
 +++ b/arch/x86/Makefile
-@@ -115,13 +115,60 @@ else
- 	KBUILD_CFLAGS += $(call cc-option,-mskip-rax-setup)
- 
+@@ -110,11 +110,40 @@ else
          # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
-+        cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
          cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
-+        cflags-$(CONFIG_MK8SSE3) += $(call cc-option,-march=k8-sse3,-mtune=k8)
+         cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
+-
+-        cflags-$(CONFIG_MCORE2) += \
+-                $(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
+-	cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom) \
+-		$(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
++        cflags-$(CONFIG_MK8SSE3) += $(call cc-option,-march=k8-sse3)
 +        cflags-$(CONFIG_MK10) += $(call cc-option,-march=amdfam10)
 +        cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona)
 +        cflags-$(CONFIG_MBOBCAT) += $(call cc-option,-march=btver1)
@@ -519,102 +473,30 @@ index 154259f18b8b..405b1f2b3c65 100644
 +        cflags-$(CONFIG_MEXCAVATOR) += $(call cc-option,-march=bdver4)
 +        cflags-$(CONFIG_MEXCAVATOR) += $(call cc-option,-mno-tbm)
 +        cflags-$(CONFIG_MZEN) += $(call cc-option,-march=znver1)
-+        cflags-$(CONFIG_MZEN2) += $(call cc-option,-march=znver2)
-         cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
- 
-         cflags-$(CONFIG_MCORE2) += \
--                $(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
--	cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom) \
--		$(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
-+                $(call cc-option,-march=core2,$(call cc-option,-mtune=core2))
-+        cflags-$(CONFIG_MNEHALEM) += \
-+                $(call cc-option,-march=nehalem,$(call cc-option,-mtune=nehalem))
-+        cflags-$(CONFIG_MWESTMERE) += \
-+                $(call cc-option,-march=westmere,$(call cc-option,-mtune=westmere))
-+        cflags-$(CONFIG_MSILVERMONT) += \
-+                $(call cc-option,-march=silvermont,$(call cc-option,-mtune=silvermont))
-+        cflags-$(CONFIG_MGOLDMONT) += \
-+                $(call cc-option,-march=goldmont,$(call cc-option,-mtune=goldmont))
-+        cflags-$(CONFIG_MGOLDMONTPLUS) += \
-+                $(call cc-option,-march=goldmont-plus,$(call cc-option,-mtune=goldmont-plus))
-+        cflags-$(CONFIG_MSANDYBRIDGE) += \
-+                $(call cc-option,-march=sandybridge,$(call cc-option,-mtune=sandybridge))
-+        cflags-$(CONFIG_MIVYBRIDGE) += \
-+                $(call cc-option,-march=ivybridge,$(call cc-option,-mtune=ivybridge))
-+        cflags-$(CONFIG_MHASWELL) += \
-+                $(call cc-option,-march=haswell,$(call cc-option,-mtune=haswell))
-+        cflags-$(CONFIG_MBROADWELL) += \
-+                $(call cc-option,-march=broadwell,$(call cc-option,-mtune=broadwell))
-+        cflags-$(CONFIG_MSKYLAKE) += \
-+                $(call cc-option,-march=skylake,$(call cc-option,-mtune=skylake))
-+        cflags-$(CONFIG_MSKYLAKEX) += \
-+                $(call cc-option,-march=skylake-avx512,$(call cc-option,-mtune=skylake-avx512))
-+        cflags-$(CONFIG_MCANNONLAKE) += \
-+                $(call cc-option,-march=cannonlake,$(call cc-option,-mtune=cannonlake))
-+        cflags-$(CONFIG_MICELAKE) += \
-+                $(call cc-option,-march=icelake-client,$(call cc-option,-mtune=icelake-client))
-+        cflags-$(CONFIG_MCASCADELAKE) += \
-+                $(call cc-option,-march=cascadelake,$(call cc-option,-mtune=cascadelake))
-+        cflags-$(CONFIG_MCOOPERLAKE) += \
-+                $(call cc-option,-march=cooperlake,$(call cc-option,-mtune=cooperlake))
-+        cflags-$(CONFIG_MTIGERLAKE) += \
-+                $(call cc-option,-march=tigerlake,$(call cc-option,-mtune=tigerlake))
-+        cflags-$(CONFIG_MATOM) += $(call cc-option,-march=bonnell) \
-+                $(call cc-option,-mtune=bonnell,$(call cc-option,-mtune=generic))
++        cflags-$(CONFIG_MZEN2) +=  $(call cc-option,-march=znver2)
++
++        cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
++        cflags-$(CONFIG_MATOM) += $(call cc-option,-march=bonnell)
++        cflags-$(CONFIG_MCORE2) += $(call cc-option,-march=core2)
++        cflags-$(CONFIG_MNEHALEM) += $(call cc-option,-march=nehalem)
++        cflags-$(CONFIG_MWESTMERE) += $(call cc-option,-march=westmere)
++        cflags-$(CONFIG_MSILVERMONT) += $(call cc-option,-march=silvermont)
++        cflags-$(CONFIG_MGOLDMONT) += $(call cc-option,-march=goldmont)
++        cflags-$(CONFIG_MGOLDMONTPLUS) += $(call cc-option,-march=goldmont-plus)
++        cflags-$(CONFIG_MSANDYBRIDGE) += $(call cc-option,-march=sandybridge)
++        cflags-$(CONFIG_MIVYBRIDGE) += $(call cc-option,-march=ivybridge)
++        cflags-$(CONFIG_MHASWELL) += $(call cc-option,-march=haswell)
++        cflags-$(CONFIG_MBROADWELL) += $(call cc-option,-march=broadwell)
++        cflags-$(CONFIG_MSKYLAKE) += $(call cc-option,-march=skylake)
++        cflags-$(CONFIG_MSKYLAKEX) += $(call cc-option,-march=skylake-avx512)
++        cflags-$(CONFIG_MCANNONLAKE) += $(call cc-option,-march=cannonlake)
++        cflags-$(CONFIG_MICELAKE) += $(call cc-option,-march=icelake-client)
++        cflags-$(CONFIG_MCASCADELAKE) += $(call cc-option,-march=cascadelake)
++        cflags-$(CONFIG_MCOOPERLAKE) += $(call cc-option,-march=cooperlake)
++        cflags-$(CONFIG_MTIGERLAKE) += $(call cc-option,-march=tigerlake)
          cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
          KBUILD_CFLAGS += $(cflags-y)
  
-diff --git a/arch/x86/Makefile_32.cpu b/arch/x86/Makefile_32.cpu
-index cd3056759880..cb0a4c6bd987 100644
---- a/arch/x86/Makefile_32.cpu
-+++ b/arch/x86/Makefile_32.cpu
-@@ -24,7 +24,19 @@ cflags-$(CONFIG_MK6)		+= -march=k6
- # Please note, that patches that add -march=athlon-xp and friends are pointless.
- # They make zero difference whatsosever to performance at this time.
- cflags-$(CONFIG_MK7)		+= -march=athlon
-+cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
- cflags-$(CONFIG_MK8)		+= $(call cc-option,-march=k8,-march=athlon)
-+cflags-$(CONFIG_MK8SSE3)		+= $(call cc-option,-march=k8-sse3,-march=athlon)
-+cflags-$(CONFIG_MK10)	+= $(call cc-option,-march=amdfam10,-march=athlon)
-+cflags-$(CONFIG_MBARCELONA)	+= $(call cc-option,-march=barcelona,-march=athlon)
-+cflags-$(CONFIG_MBOBCAT)	+= $(call cc-option,-march=btver1,-march=athlon)
-+cflags-$(CONFIG_MJAGUAR)	+= $(call cc-option,-march=btver2,-march=athlon)
-+cflags-$(CONFIG_MBULLDOZER)	+= $(call cc-option,-march=bdver1,-march=athlon)
-+cflags-$(CONFIG_MPILEDRIVER)	+= $(call cc-option,-march=bdver2,-march=athlon)
-+cflags-$(CONFIG_MSTEAMROLLER)	+= $(call cc-option,-march=bdver3,-march=athlon)
-+cflags-$(CONFIG_MEXCAVATOR)	+= $(call cc-option,-march=bdver4,-march=athlon)
-+cflags-$(CONFIG_MZEN)	+= $(call cc-option,-march=znver1,-march=athlon)
-+cflags-$(CONFIG_MZEN2)	+= $(call cc-option,-march=znver2,-march=athlon)
- cflags-$(CONFIG_MCRUSOE)	+= -march=i686 -falign-functions=0 -falign-jumps=0 -falign-loops=0
- cflags-$(CONFIG_MEFFICEON)	+= -march=i686 $(call tune,pentium3) -falign-functions=0 -falign-jumps=0 -falign-loops=0
- cflags-$(CONFIG_MWINCHIPC6)	+= $(call cc-option,-march=winchip-c6,-march=i586)
-@@ -33,8 +45,24 @@ cflags-$(CONFIG_MCYRIXIII)	+= $(call cc-option,-march=c3,-march=i486) -falign-fu
- cflags-$(CONFIG_MVIAC3_2)	+= $(call cc-option,-march=c3-2,-march=i686)
- cflags-$(CONFIG_MVIAC7)		+= -march=i686
- cflags-$(CONFIG_MCORE2)		+= -march=i686 $(call tune,core2)
--cflags-$(CONFIG_MATOM)		+= $(call cc-option,-march=atom,$(call cc-option,-march=core2,-march=i686)) \
--	$(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
-+cflags-$(CONFIG_MNEHALEM)	+= -march=i686 $(call tune,nehalem)
-+cflags-$(CONFIG_MWESTMERE)	+= -march=i686 $(call tune,westmere)
-+cflags-$(CONFIG_MSILVERMONT)	+= -march=i686 $(call tune,silvermont)
-+cflags-$(CONFIG_MGOLDMONT)	+= -march=i686 $(call tune,goldmont)
-+cflags-$(CONFIG_MGOLDMONTPLUS)	+= -march=i686 $(call tune,goldmont-plus)
-+cflags-$(CONFIG_MSANDYBRIDGE)	+= -march=i686 $(call tune,sandybridge)
-+cflags-$(CONFIG_MIVYBRIDGE)	+= -march=i686 $(call tune,ivybridge)
-+cflags-$(CONFIG_MHASWELL)	+= -march=i686 $(call tune,haswell)
-+cflags-$(CONFIG_MBROADWELL)	+= -march=i686 $(call tune,broadwell)
-+cflags-$(CONFIG_MSKYLAKE)	+= -march=i686 $(call tune,skylake)
-+cflags-$(CONFIG_MSKYLAKEX)	+= -march=i686 $(call tune,skylake-avx512)
-+cflags-$(CONFIG_MCANNONLAKE)	+= -march=i686 $(call tune,cannonlake)
-+cflags-$(CONFIG_MICELAKE)	+= -march=i686 $(call tune,icelake-client)
-+cflags-$(CONFIG_MCASCADELAKE)	+= -march=i686 $(call tune,cascadelake)
-+cflags-$(CONFIG_MCOOPERLAKE)	+= -march=i686 $(call tune,cooperlake)
-+cflags-$(CONFIG_MTIGERLAKE)	+= -march=i686 $(call tune,tigerlake)
-+cflags-$(CONFIG_MATOM)		+= $(call cc-option,-march=bonnell,$(call cc-option,-march=core2,-march=i686)) \
-+	$(call cc-option,-mtune=bonnell,$(call cc-option,-mtune=generic))
- 
- # AMD Elan support
- cflags-$(CONFIG_MELAN)		+= -march=i486
 diff --git a/arch/x86/include/asm/vermagic.h b/arch/x86/include/asm/vermagic.h
 index 75884d2cdec3..14c222e78213 100644
 --- a/arch/x86/include/asm/vermagic.h
@@ -690,5 +572,6 @@ index 75884d2cdec3..14c222e78213 100644
  #define MODULE_PROC_FAMILY "ELAN "
  #elif defined CONFIG_MCRUSOE
 -- 
-2.29.2
+2.30.1
+
 


^ permalink raw reply related	[flat|nested] 33+ messages in thread

end of thread, other threads:[~2021-07-20 15:49 UTC | newest]

Thread overview: 33+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2021-06-18 12:21 [gentoo-commits] proj/linux-patches:5.12 commit in: / Mike Pagano
  -- strict thread matches above, loose matches on Subject: below --
2021-07-20 15:49 Alice Ferrazzi
2021-07-19 11:16 Mike Pagano
2021-07-14 16:18 Mike Pagano
2021-07-13 12:36 Mike Pagano
2021-07-11 14:42 Mike Pagano
2021-07-07 13:12 Mike Pagano
2021-07-04 15:43 Mike Pagano
2021-07-01 14:28 Mike Pagano
2021-06-30 14:22 Mike Pagano
2021-06-23 15:15 Mike Pagano
2021-06-18 12:00 Mike Pagano
2021-06-18 11:35 Mike Pagano
2021-06-16 12:25 Mike Pagano
2021-06-11 13:21 Mike Pagano
2021-06-10 12:14 Mike Pagano
2021-06-08 22:15 Mike Pagano
2021-06-08 16:48 Mike Pagano
2021-06-08 16:26 Mike Pagano
2021-06-03 10:22 Alice Ferrazzi
2021-05-28 12:17 Alice Ferrazzi
2021-05-26 12:08 Mike Pagano
2021-05-24 17:26 Mike Pagano
2021-05-22 16:50 Mike Pagano
2021-05-19 12:25 Mike Pagano
2021-05-14 14:02 Alice Ferrazzi
2021-05-12 12:30 Mike Pagano
2021-05-07 13:15 Alice Ferrazzi
2021-05-02 16:05 Mike Pagano
2021-04-30 18:53 Mike Pagano
2021-04-27 11:53 Mike Pagano
2021-04-18 22:03 Mike Pagano
2021-03-23 12:29 Mike Pagano

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox