public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:4.15 commit in: /
Date: Thu, 12 Apr 2018 12:20:11 +0000 (UTC)	[thread overview]
Message-ID: <1523535602.b57564a42c932434a1633d677be3e73f9716454d.mpagano@gentoo> (raw)

commit:     b57564a42c932434a1633d677be3e73f9716454d
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Apr 12 12:20:02 2018 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Apr 12 12:20:02 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b57564a4

Linux patch 4.15.17

 0000_README              |    4 +
 1016_linux-4.15.17.patch | 6212 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 6216 insertions(+)

diff --git a/0000_README b/0000_README
index ba8435c..f973683 100644
--- a/0000_README
+++ b/0000_README
@@ -107,6 +107,10 @@ Patch:  1015_linux-4.15.16.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.15.16
 
+Patch:  1016_linux-4.15.17.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.15.17
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1016_linux-4.15.17.patch b/1016_linux-4.15.17.patch
new file mode 100644
index 0000000..1b23b1b
--- /dev/null
+++ b/1016_linux-4.15.17.patch
@@ -0,0 +1,6212 @@
+diff --git a/Makefile b/Makefile
+index b28f0f721ec7..cfff73b62eb5 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 15
+-SUBLEVEL = 16
++SUBLEVEL = 17
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
+index 9319e1f0f1d8..379b4a03cfe2 100644
+--- a/arch/arm/boot/dts/ls1021a.dtsi
++++ b/arch/arm/boot/dts/ls1021a.dtsi
+@@ -155,7 +155,7 @@
+ 		};
+ 
+ 		esdhc: esdhc@1560000 {
+-			compatible = "fsl,esdhc";
++			compatible = "fsl,ls1021a-esdhc", "fsl,esdhc";
+ 			reg = <0x0 0x1560000 0x0 0x10000>;
+ 			interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
+ 			clock-frequency = <0>;
+diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
+index b1ac80fba578..301417ae2ba8 100644
+--- a/arch/arm64/mm/context.c
++++ b/arch/arm64/mm/context.c
+@@ -194,26 +194,29 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
+ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
+ {
+ 	unsigned long flags;
+-	u64 asid;
++	u64 asid, old_active_asid;
+ 
+ 	asid = atomic64_read(&mm->context.id);
+ 
+ 	/*
+ 	 * The memory ordering here is subtle.
+-	 * If our ASID matches the current generation, then we update
+-	 * our active_asids entry with a relaxed xchg. Racing with a
+-	 * concurrent rollover means that either:
++	 * If our active_asids is non-zero and the ASID matches the current
++	 * generation, then we update the active_asids entry with a relaxed
++	 * cmpxchg. Racing with a concurrent rollover means that either:
+ 	 *
+-	 * - We get a zero back from the xchg and end up waiting on the
++	 * - We get a zero back from the cmpxchg and end up waiting on the
+ 	 *   lock. Taking the lock synchronises with the rollover and so
+ 	 *   we are forced to see the updated generation.
+ 	 *
+-	 * - We get a valid ASID back from the xchg, which means the
++	 * - We get a valid ASID back from the cmpxchg, which means the
+ 	 *   relaxed xchg in flush_context will treat us as reserved
+ 	 *   because atomic RmWs are totally ordered for a given location.
+ 	 */
+-	if (!((asid ^ atomic64_read(&asid_generation)) >> asid_bits)
+-	    && atomic64_xchg_relaxed(&per_cpu(active_asids, cpu), asid))
++	old_active_asid = atomic64_read(&per_cpu(active_asids, cpu));
++	if (old_active_asid &&
++	    !((asid ^ atomic64_read(&asid_generation)) >> asid_bits) &&
++	    atomic64_cmpxchg_relaxed(&per_cpu(active_asids, cpu),
++				     old_active_asid, asid))
+ 		goto switch_mm_fastpath;
+ 
+ 	raw_spin_lock_irqsave(&cpu_asid_lock, flags);
+diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
+index 55520cec8b27..6cf0e4cb7b97 100644
+--- a/arch/x86/include/asm/microcode.h
++++ b/arch/x86/include/asm/microcode.h
+@@ -37,7 +37,13 @@ struct cpu_signature {
+ 
+ struct device;
+ 
+-enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
++enum ucode_state {
++	UCODE_OK	= 0,
++	UCODE_NEW,
++	UCODE_UPDATED,
++	UCODE_NFOUND,
++	UCODE_ERROR,
++};
+ 
+ struct microcode_ops {
+ 	enum ucode_state (*request_microcode_user) (int cpu,
+@@ -54,7 +60,7 @@ struct microcode_ops {
+ 	 * are being called.
+ 	 * See also the "Synchronization" section in microcode_core.c.
+ 	 */
+-	int (*apply_microcode) (int cpu);
++	enum ucode_state (*apply_microcode) (int cpu);
+ 	int (*collect_cpu_info) (int cpu, struct cpu_signature *csig);
+ };
+ 
+diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
+index 44c2c4ec6d60..a5fc8f8bfb83 100644
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -969,4 +969,5 @@ bool xen_set_default_idle(void);
+ 
+ void stop_this_cpu(void *dummy);
+ void df_debug(struct pt_regs *regs, long error_code);
++void microcode_check(void);
+ #endif /* _ASM_X86_PROCESSOR_H */
+diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
+index f5d92bc3b884..2c4d5ece7456 100644
+--- a/arch/x86/kernel/aperture_64.c
++++ b/arch/x86/kernel/aperture_64.c
+@@ -30,6 +30,7 @@
+ #include <asm/dma.h>
+ #include <asm/amd_nb.h>
+ #include <asm/x86_init.h>
++#include <linux/crash_dump.h>
+ 
+ /*
+  * Using 512M as goal, in case kexec will load kernel_big
+@@ -56,6 +57,33 @@ int fallback_aper_force __initdata;
+ 
+ int fix_aperture __initdata = 1;
+ 
++#ifdef CONFIG_PROC_VMCORE
++/*
++ * If the first kernel maps the aperture over e820 RAM, the kdump kernel will
++ * use the same range because it will remain configured in the northbridge.
++ * Trying to dump this area via /proc/vmcore may crash the machine, so exclude
++ * it from vmcore.
++ */
++static unsigned long aperture_pfn_start, aperture_page_count;
++
++static int gart_oldmem_pfn_is_ram(unsigned long pfn)
++{
++	return likely((pfn < aperture_pfn_start) ||
++		      (pfn >= aperture_pfn_start + aperture_page_count));
++}
++
++static void exclude_from_vmcore(u64 aper_base, u32 aper_order)
++{
++	aperture_pfn_start = aper_base >> PAGE_SHIFT;
++	aperture_page_count = (32 * 1024 * 1024) << aper_order >> PAGE_SHIFT;
++	WARN_ON(register_oldmem_pfn_is_ram(&gart_oldmem_pfn_is_ram));
++}
++#else
++static void exclude_from_vmcore(u64 aper_base, u32 aper_order)
++{
++}
++#endif
++
+ /* This code runs before the PCI subsystem is initialized, so just
+    access the northbridge directly. */
+ 
+@@ -435,8 +463,16 @@ int __init gart_iommu_hole_init(void)
+ 
+ out:
+ 	if (!fix && !fallback_aper_force) {
+-		if (last_aper_base)
++		if (last_aper_base) {
++			/*
++			 * If this is the kdump kernel, the first kernel
++			 * may have allocated the range over its e820 RAM
++			 * and fixed up the northbridge
++			 */
++			exclude_from_vmcore(last_aper_base, last_aper_order);
++
+ 			return 1;
++		}
+ 		return 0;
+ 	}
+ 
+@@ -473,6 +509,14 @@ int __init gart_iommu_hole_init(void)
+ 		return 0;
+ 	}
+ 
++	/*
++	 * If this is the kdump kernel _and_ the first kernel did not
++	 * configure the aperture in the northbridge, this range may
++	 * overlap with the first kernel's memory. We can't access the
++	 * range through vmcore even though it should be part of the dump.
++	 */
++	exclude_from_vmcore(aper_alloc, aper_order);
++
+ 	/* Fix up the north bridges */
+ 	for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) {
+ 		int bus, dev_base, dev_limit;
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 824aee0117bb..348cf4821240 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1749,3 +1749,33 @@ static int __init init_cpu_syscore(void)
+ 	return 0;
+ }
+ core_initcall(init_cpu_syscore);
++
++/*
++ * The microcode loader calls this upon late microcode load to recheck features,
++ * only when microcode has been updated. Caller holds microcode_mutex and CPU
++ * hotplug lock.
++ */
++void microcode_check(void)
++{
++	struct cpuinfo_x86 info;
++
++	perf_check_microcode();
++
++	/* Reload CPUID max function as it might've changed. */
++	info.cpuid_level = cpuid_eax(0);
++
++	/*
++	 * Copy all capability leafs to pick up the synthetic ones so that
++	 * memcmp() below doesn't fail on that. The ones coming from CPUID will
++	 * get overwritten in get_cpu_cap().
++	 */
++	memcpy(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability));
++
++	get_cpu_cap(&info);
++
++	if (!memcmp(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability)))
++		return;
++
++	pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n");
++	pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
++}
+diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
+index 330b8462d426..48179928ff38 100644
+--- a/arch/x86/kernel/cpu/microcode/amd.c
++++ b/arch/x86/kernel/cpu/microcode/amd.c
+@@ -339,7 +339,7 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
+ 		return -EINVAL;
+ 
+ 	ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size);
+-	if (ret != UCODE_OK)
++	if (ret > UCODE_UPDATED)
+ 		return -EINVAL;
+ 
+ 	return 0;
+@@ -498,7 +498,7 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size,
+ 	return patch_size;
+ }
+ 
+-static int apply_microcode_amd(int cpu)
++static enum ucode_state apply_microcode_amd(int cpu)
+ {
+ 	struct cpuinfo_x86 *c = &cpu_data(cpu);
+ 	struct microcode_amd *mc_amd;
+@@ -512,7 +512,7 @@ static int apply_microcode_amd(int cpu)
+ 
+ 	p = find_patch(cpu);
+ 	if (!p)
+-		return 0;
++		return UCODE_NFOUND;
+ 
+ 	mc_amd  = p->data;
+ 	uci->mc = p->data;
+@@ -523,13 +523,13 @@ static int apply_microcode_amd(int cpu)
+ 	if (rev >= mc_amd->hdr.patch_id) {
+ 		c->microcode = rev;
+ 		uci->cpu_sig.rev = rev;
+-		return 0;
++		return UCODE_OK;
+ 	}
+ 
+ 	if (__apply_microcode_amd(mc_amd)) {
+ 		pr_err("CPU%d: update failed for patch_level=0x%08x\n",
+ 			cpu, mc_amd->hdr.patch_id);
+-		return -1;
++		return UCODE_ERROR;
+ 	}
+ 	pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
+ 		mc_amd->hdr.patch_id);
+@@ -537,7 +537,7 @@ static int apply_microcode_amd(int cpu)
+ 	uci->cpu_sig.rev = mc_amd->hdr.patch_id;
+ 	c->microcode = mc_amd->hdr.patch_id;
+ 
+-	return 0;
++	return UCODE_UPDATED;
+ }
+ 
+ static int install_equiv_cpu_table(const u8 *buf)
+@@ -683,27 +683,35 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
+ static enum ucode_state
+ load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
+ {
++	struct ucode_patch *p;
+ 	enum ucode_state ret;
+ 
+ 	/* free old equiv table */
+ 	free_equiv_cpu_table();
+ 
+ 	ret = __load_microcode_amd(family, data, size);
+-
+-	if (ret != UCODE_OK)
++	if (ret != UCODE_OK) {
+ 		cleanup();
++		return ret;
++	}
+ 
+-#ifdef CONFIG_X86_32
+-	/* save BSP's matching patch for early load */
+-	if (save) {
+-		struct ucode_patch *p = find_patch(0);
+-		if (p) {
+-			memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
+-			memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data),
+-							       PATCH_MAX_SIZE));
+-		}
++	p = find_patch(0);
++	if (!p) {
++		return ret;
++	} else {
++		if (boot_cpu_data.microcode == p->patch_id)
++			return ret;
++
++		ret = UCODE_NEW;
+ 	}
+-#endif
++
++	/* save BSP's matching patch for early load */
++	if (!save)
++		return ret;
++
++	memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
++	memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), PATCH_MAX_SIZE));
++
+ 	return ret;
+ }
+ 
+diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
+index e4fc595cd6ea..021c90464cc2 100644
+--- a/arch/x86/kernel/cpu/microcode/core.c
++++ b/arch/x86/kernel/cpu/microcode/core.c
+@@ -22,13 +22,16 @@
+ #define pr_fmt(fmt) "microcode: " fmt
+ 
+ #include <linux/platform_device.h>
++#include <linux/stop_machine.h>
+ #include <linux/syscore_ops.h>
+ #include <linux/miscdevice.h>
+ #include <linux/capability.h>
+ #include <linux/firmware.h>
+ #include <linux/kernel.h>
++#include <linux/delay.h>
+ #include <linux/mutex.h>
+ #include <linux/cpu.h>
++#include <linux/nmi.h>
+ #include <linux/fs.h>
+ #include <linux/mm.h>
+ 
+@@ -64,6 +67,11 @@ LIST_HEAD(microcode_cache);
+  */
+ static DEFINE_MUTEX(microcode_mutex);
+ 
++/*
++ * Serialize late loading so that CPUs get updated one-by-one.
++ */
++static DEFINE_SPINLOCK(update_lock);
++
+ struct ucode_cpu_info		ucode_cpu_info[NR_CPUS];
+ 
+ struct cpu_info_ctx {
+@@ -373,26 +381,23 @@ static int collect_cpu_info(int cpu)
+ 	return ret;
+ }
+ 
+-struct apply_microcode_ctx {
+-	int err;
+-};
+-
+ static void apply_microcode_local(void *arg)
+ {
+-	struct apply_microcode_ctx *ctx = arg;
++	enum ucode_state *err = arg;
+ 
+-	ctx->err = microcode_ops->apply_microcode(smp_processor_id());
++	*err = microcode_ops->apply_microcode(smp_processor_id());
+ }
+ 
+ static int apply_microcode_on_target(int cpu)
+ {
+-	struct apply_microcode_ctx ctx = { .err = 0 };
++	enum ucode_state err;
+ 	int ret;
+ 
+-	ret = smp_call_function_single(cpu, apply_microcode_local, &ctx, 1);
+-	if (!ret)
+-		ret = ctx.err;
+-
++	ret = smp_call_function_single(cpu, apply_microcode_local, &err, 1);
++	if (!ret) {
++		if (err == UCODE_ERROR)
++			ret = 1;
++	}
+ 	return ret;
+ }
+ 
+@@ -489,31 +494,124 @@ static void __exit microcode_dev_exit(void)
+ /* fake device for request_firmware */
+ static struct platform_device	*microcode_pdev;
+ 
+-static int reload_for_cpu(int cpu)
++/*
++ * Late loading dance. Why the heavy-handed stomp_machine effort?
++ *
++ * - HT siblings must be idle and not execute other code while the other sibling
++ *   is loading microcode in order to avoid any negative interactions caused by
++ *   the loading.
++ *
++ * - In addition, microcode update on the cores must be serialized until this
++ *   requirement can be relaxed in the future. Right now, this is conservative
++ *   and good.
++ */
++#define SPINUNIT 100 /* 100 nsec */
++
++static int check_online_cpus(void)
+ {
+-	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
+-	enum ucode_state ustate;
+-	int err = 0;
++	if (num_online_cpus() == num_present_cpus())
++		return 0;
+ 
+-	if (!uci->valid)
+-		return err;
++	pr_err("Not all CPUs online, aborting microcode update.\n");
+ 
+-	ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev, true);
+-	if (ustate == UCODE_OK)
+-		apply_microcode_on_target(cpu);
+-	else
+-		if (ustate == UCODE_ERROR)
+-			err = -EINVAL;
+-	return err;
++	return -EINVAL;
++}
++
++static atomic_t late_cpus_in;
++static atomic_t late_cpus_out;
++
++static int __wait_for_cpus(atomic_t *t, long long timeout)
++{
++	int all_cpus = num_online_cpus();
++
++	atomic_inc(t);
++
++	while (atomic_read(t) < all_cpus) {
++		if (timeout < SPINUNIT) {
++			pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n",
++				all_cpus - atomic_read(t));
++			return 1;
++		}
++
++		ndelay(SPINUNIT);
++		timeout -= SPINUNIT;
++
++		touch_nmi_watchdog();
++	}
++	return 0;
++}
++
++/*
++ * Returns:
++ * < 0 - on error
++ *   0 - no update done
++ *   1 - microcode was updated
++ */
++static int __reload_late(void *info)
++{
++	int cpu = smp_processor_id();
++	enum ucode_state err;
++	int ret = 0;
++
++	/*
++	 * Wait for all CPUs to arrive. A load will not be attempted unless all
++	 * CPUs show up.
++	 * */
++	if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC))
++		return -1;
++
++	spin_lock(&update_lock);
++	apply_microcode_local(&err);
++	spin_unlock(&update_lock);
++
++	if (err > UCODE_NFOUND) {
++		pr_warn("Error reloading microcode on CPU %d\n", cpu);
++		return -1;
++	/* siblings return UCODE_OK because their engine got updated already */
++	} else if (err == UCODE_UPDATED || err == UCODE_OK) {
++		ret = 1;
++	} else {
++		return ret;
++	}
++
++	/*
++	 * Increase the wait timeout to a safe value here since we're
++	 * serializing the microcode update and that could take a while on a
++	 * large number of CPUs. And that is fine as the *actual* timeout will
++	 * be determined by the last CPU finished updating and thus cut short.
++	 */
++	if (__wait_for_cpus(&late_cpus_out, NSEC_PER_SEC * num_online_cpus()))
++		panic("Timeout during microcode update!\n");
++
++	return ret;
++}
++
++/*
++ * Reload microcode late on all CPUs. Wait for a sec until they
++ * all gather together.
++ */
++static int microcode_reload_late(void)
++{
++	int ret;
++
++	atomic_set(&late_cpus_in,  0);
++	atomic_set(&late_cpus_out, 0);
++
++	ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask);
++	if (ret > 0)
++		microcode_check();
++
++	return ret;
+ }
+ 
+ static ssize_t reload_store(struct device *dev,
+ 			    struct device_attribute *attr,
+ 			    const char *buf, size_t size)
+ {
++	enum ucode_state tmp_ret = UCODE_OK;
++	int bsp = boot_cpu_data.cpu_index;
+ 	unsigned long val;
+-	int cpu;
+-	ssize_t ret = 0, tmp_ret;
++	ssize_t ret = 0;
+ 
+ 	ret = kstrtoul(buf, 0, &val);
+ 	if (ret)
+@@ -522,23 +620,24 @@ static ssize_t reload_store(struct device *dev,
+ 	if (val != 1)
+ 		return size;
+ 
++	tmp_ret = microcode_ops->request_microcode_fw(bsp, &microcode_pdev->dev, true);
++	if (tmp_ret != UCODE_NEW)
++		return size;
++
+ 	get_online_cpus();
+-	mutex_lock(&microcode_mutex);
+-	for_each_online_cpu(cpu) {
+-		tmp_ret = reload_for_cpu(cpu);
+-		if (tmp_ret != 0)
+-			pr_warn("Error reloading microcode on CPU %d\n", cpu);
+ 
+-		/* save retval of the first encountered reload error */
+-		if (!ret)
+-			ret = tmp_ret;
+-	}
+-	if (!ret)
+-		perf_check_microcode();
++	ret = check_online_cpus();
++	if (ret)
++		goto put;
++
++	mutex_lock(&microcode_mutex);
++	ret = microcode_reload_late();
+ 	mutex_unlock(&microcode_mutex);
++
++put:
+ 	put_online_cpus();
+ 
+-	if (!ret)
++	if (ret >= 0)
+ 		ret = size;
+ 
+ 	return ret;
+@@ -606,10 +705,8 @@ static enum ucode_state microcode_init_cpu(int cpu, bool refresh_fw)
+ 	if (system_state != SYSTEM_RUNNING)
+ 		return UCODE_NFOUND;
+ 
+-	ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev,
+-						     refresh_fw);
+-
+-	if (ustate == UCODE_OK) {
++	ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev, refresh_fw);
++	if (ustate == UCODE_NEW) {
+ 		pr_debug("CPU%d updated upon init\n", cpu);
+ 		apply_microcode_on_target(cpu);
+ 	}
+diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
+index a15db2b4e0d6..32b8e5724f96 100644
+--- a/arch/x86/kernel/cpu/microcode/intel.c
++++ b/arch/x86/kernel/cpu/microcode/intel.c
+@@ -589,6 +589,23 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
+ 	if (!mc)
+ 		return 0;
+ 
++	/*
++	 * Save us the MSR write below - which is a particular expensive
++	 * operation - when the other hyperthread has updated the microcode
++	 * already.
++	 */
++	rev = intel_get_microcode_revision();
++	if (rev >= mc->hdr.rev) {
++		uci->cpu_sig.rev = rev;
++		return UCODE_OK;
++	}
++
++	/*
++	 * Writeback and invalidate caches before updating microcode to avoid
++	 * internal issues depending on what the microcode is updating.
++	 */
++	native_wbinvd();
++
+ 	/* write microcode via MSR 0x79 */
+ 	native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
+ 
+@@ -772,27 +789,44 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
+ 	return 0;
+ }
+ 
+-static int apply_microcode_intel(int cpu)
++static enum ucode_state apply_microcode_intel(int cpu)
+ {
++	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
++	struct cpuinfo_x86 *c = &cpu_data(cpu);
+ 	struct microcode_intel *mc;
+-	struct ucode_cpu_info *uci;
+-	struct cpuinfo_x86 *c;
+ 	static int prev_rev;
+ 	u32 rev;
+ 
+ 	/* We should bind the task to the CPU */
+ 	if (WARN_ON(raw_smp_processor_id() != cpu))
+-		return -1;
++		return UCODE_ERROR;
+ 
+-	uci = ucode_cpu_info + cpu;
+-	mc = uci->mc;
++	/* Look for a newer patch in our cache: */
++	mc = find_patch(uci);
+ 	if (!mc) {
+-		/* Look for a newer patch in our cache: */
+-		mc = find_patch(uci);
++		mc = uci->mc;
+ 		if (!mc)
+-			return 0;
++			return UCODE_NFOUND;
+ 	}
+ 
++	/*
++	 * Save us the MSR write below - which is a particular expensive
++	 * operation - when the other hyperthread has updated the microcode
++	 * already.
++	 */
++	rev = intel_get_microcode_revision();
++	if (rev >= mc->hdr.rev) {
++		uci->cpu_sig.rev = rev;
++		c->microcode = rev;
++		return UCODE_OK;
++	}
++
++	/*
++	 * Writeback and invalidate caches before updating microcode to avoid
++	 * internal issues depending on what the microcode is updating.
++	 */
++	native_wbinvd();
++
+ 	/* write microcode via MSR 0x79 */
+ 	wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
+ 
+@@ -801,7 +835,7 @@ static int apply_microcode_intel(int cpu)
+ 	if (rev != mc->hdr.rev) {
+ 		pr_err("CPU%d update to revision 0x%x failed\n",
+ 		       cpu, mc->hdr.rev);
+-		return -1;
++		return UCODE_ERROR;
+ 	}
+ 
+ 	if (rev != prev_rev) {
+@@ -813,12 +847,10 @@ static int apply_microcode_intel(int cpu)
+ 		prev_rev = rev;
+ 	}
+ 
+-	c = &cpu_data(cpu);
+-
+ 	uci->cpu_sig.rev = rev;
+ 	c->microcode = rev;
+ 
+-	return 0;
++	return UCODE_UPDATED;
+ }
+ 
+ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
+@@ -830,6 +862,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
+ 	unsigned int leftover = size;
+ 	unsigned int curr_mc_size = 0, new_mc_size = 0;
+ 	unsigned int csig, cpf;
++	enum ucode_state ret = UCODE_OK;
+ 
+ 	while (leftover) {
+ 		struct microcode_header_intel mc_header;
+@@ -871,6 +904,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
+ 			new_mc  = mc;
+ 			new_mc_size = mc_size;
+ 			mc = NULL;	/* trigger new vmalloc */
++			ret = UCODE_NEW;
+ 		}
+ 
+ 		ucode_ptr += mc_size;
+@@ -900,7 +934,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
+ 	pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
+ 		 cpu, new_rev, uci->cpu_sig.rev);
+ 
+-	return UCODE_OK;
++	return ret;
+ }
+ 
+ static int get_ucode_fw(void *to, const void *from, size_t n)
+diff --git a/arch/x86/xen/mmu_hvm.c b/arch/x86/xen/mmu_hvm.c
+index 2cfcfe4f6b2a..dd2ad82eee80 100644
+--- a/arch/x86/xen/mmu_hvm.c
++++ b/arch/x86/xen/mmu_hvm.c
+@@ -75,6 +75,6 @@ void __init xen_hvm_init_mmu_ops(void)
+ 	if (is_pagetable_dying_supported())
+ 		pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap;
+ #ifdef CONFIG_PROC_VMCORE
+-	register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram);
++	WARN_ON(register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram));
+ #endif
+ }
+diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
+index da1525ec4c87..d819dc77fe65 100644
+--- a/block/bfq-cgroup.c
++++ b/block/bfq-cgroup.c
+@@ -775,10 +775,11 @@ static void bfq_pd_offline(struct blkg_policy_data *pd)
+ 	unsigned long flags;
+ 	int i;
+ 
++	spin_lock_irqsave(&bfqd->lock, flags);
++
+ 	if (!entity) /* root group */
+-		return;
++		goto put_async_queues;
+ 
+-	spin_lock_irqsave(&bfqd->lock, flags);
+ 	/*
+ 	 * Empty all service_trees belonging to this group before
+ 	 * deactivating the group itself.
+@@ -809,6 +810,8 @@ static void bfq_pd_offline(struct blkg_policy_data *pd)
+ 	}
+ 
+ 	__bfq_deactivate_entity(entity, false);
++
++put_async_queues:
+ 	bfq_put_async_queues(bfqd, bfqg);
+ 
+ 	spin_unlock_irqrestore(&bfqd->lock, flags);
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 5629f18b51bd..ab88ff3314a7 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -1996,7 +1996,8 @@ static void blk_mq_exit_hctx(struct request_queue *q,
+ {
+ 	blk_mq_debugfs_unregister_hctx(hctx);
+ 
+-	blk_mq_tag_idle(hctx);
++	if (blk_mq_hw_queue_mapped(hctx))
++		blk_mq_tag_idle(hctx);
+ 
+ 	if (set->ops->exit_request)
+ 		set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
+@@ -2388,6 +2389,9 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
+ 	struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
+ 
+ 	blk_mq_sysfs_unregister(q);
++
++	/* protect against switching io scheduler  */
++	mutex_lock(&q->sysfs_lock);
+ 	for (i = 0; i < set->nr_hw_queues; i++) {
+ 		int node;
+ 
+@@ -2432,6 +2436,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
+ 		}
+ 	}
+ 	q->nr_hw_queues = i;
++	mutex_unlock(&q->sysfs_lock);
+ 	blk_mq_sysfs_register(q);
+ }
+ 
+@@ -2603,9 +2608,27 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
+ 
+ static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
+ {
+-	if (set->ops->map_queues)
++	if (set->ops->map_queues) {
++		int cpu;
++		/*
++		 * transport .map_queues is usually done in the following
++		 * way:
++		 *
++		 * for (queue = 0; queue < set->nr_hw_queues; queue++) {
++		 * 	mask = get_cpu_mask(queue)
++		 * 	for_each_cpu(cpu, mask)
++		 * 		set->mq_map[cpu] = queue;
++		 * }
++		 *
++		 * When we need to remap, the table has to be cleared for
++		 * killing stale mapping since one CPU may not be mapped
++		 * to any hw queue.
++		 */
++		for_each_possible_cpu(cpu)
++			set->mq_map[cpu] = 0;
++
+ 		return set->ops->map_queues(set);
+-	else
++	} else
+ 		return blk_mq_map_queues(set);
+ }
+ 
+diff --git a/crypto/Makefile b/crypto/Makefile
+index d674884b2d51..daa69360e054 100644
+--- a/crypto/Makefile
++++ b/crypto/Makefile
+@@ -99,6 +99,7 @@ obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o
+ obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o
+ CFLAGS_serpent_generic.o := $(call cc-option,-fsched-pressure)  # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
+ obj-$(CONFIG_CRYPTO_AES) += aes_generic.o
++CFLAGS_aes_generic.o := $(call cc-ifversion, -ge, 0701, -Os) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83356
+ obj-$(CONFIG_CRYPTO_AES_TI) += aes_ti.o
+ obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o
+ obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o
+diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
+index 0972ec0e2eb8..f53ccc680238 100644
+--- a/drivers/acpi/acpi_video.c
++++ b/drivers/acpi/acpi_video.c
+@@ -80,8 +80,8 @@ MODULE_PARM_DESC(report_key_events,
+ static bool device_id_scheme = false;
+ module_param(device_id_scheme, bool, 0444);
+ 
+-static bool only_lcd = false;
+-module_param(only_lcd, bool, 0444);
++static int only_lcd = -1;
++module_param(only_lcd, int, 0444);
+ 
+ static int register_count;
+ static DEFINE_MUTEX(register_count_mutex);
+@@ -2136,6 +2136,16 @@ int acpi_video_register(void)
+ 		goto leave;
+ 	}
+ 
++	/*
++	 * We're seeing a lot of bogus backlight interfaces on newer machines
++	 * without a LCD such as desktops, servers and HDMI sticks. Checking
++	 * the lcd flag fixes this, so enable this on any machines which are
++	 * win8 ready (where we also prefer the native backlight driver, so
++	 * normally the acpi_video code should not register there anyways).
++	 */
++	if (only_lcd == -1)
++		only_lcd = acpi_osi_is_win8();
++
+ 	dmi_check_system(video_dmi_table);
+ 
+ 	ret = acpi_bus_register_driver(&acpi_video_bus);
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index 0252c9b9af3d..d9f38c645e4a 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -1516,7 +1516,7 @@ static int acpi_ec_setup(struct acpi_ec *ec, bool handle_events)
+ 	}
+ 
+ 	acpi_handle_info(ec->handle,
+-			 "GPE=0x%lx, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n",
++			 "GPE=0x%x, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n",
+ 			 ec->gpe, ec->command_addr, ec->data_addr);
+ 	return ret;
+ }
+diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
+index 6c7dd7af789e..dd70d6c2bca0 100644
+--- a/drivers/acpi/ec_sys.c
++++ b/drivers/acpi/ec_sys.c
+@@ -128,7 +128,7 @@ static int acpi_ec_add_debugfs(struct acpi_ec *ec, unsigned int ec_device_count)
+ 		return -ENOMEM;
+ 	}
+ 
+-	if (!debugfs_create_x32("gpe", 0444, dev_dir, (u32 *)&first_ec->gpe))
++	if (!debugfs_create_x32("gpe", 0444, dev_dir, &first_ec->gpe))
+ 		goto error;
+ 	if (!debugfs_create_bool("use_global_lock", 0444, dev_dir,
+ 				 &first_ec->global_lock))
+diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
+index 7f43423de43c..1d0a501bc7f0 100644
+--- a/drivers/acpi/internal.h
++++ b/drivers/acpi/internal.h
+@@ -159,7 +159,7 @@ static inline void acpi_early_processor_osc(void) {}
+    -------------------------------------------------------------------------- */
+ struct acpi_ec {
+ 	acpi_handle handle;
+-	unsigned long gpe;
++	u32 gpe;
+ 	unsigned long command_addr;
+ 	unsigned long data_addr;
+ 	bool global_lock;
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index 0c80bea05bcb..b4501873354e 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -1032,15 +1032,12 @@ static int genpd_prepare(struct device *dev)
+ static int genpd_finish_suspend(struct device *dev, bool poweroff)
+ {
+ 	struct generic_pm_domain *genpd;
+-	int ret;
++	int ret = 0;
+ 
+ 	genpd = dev_to_genpd(dev);
+ 	if (IS_ERR(genpd))
+ 		return -EINVAL;
+ 
+-	if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
+-		return 0;
+-
+ 	if (poweroff)
+ 		ret = pm_generic_poweroff_noirq(dev);
+ 	else
+@@ -1048,10 +1045,18 @@ static int genpd_finish_suspend(struct device *dev, bool poweroff)
+ 	if (ret)
+ 		return ret;
+ 
++	if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
++		return 0;
++
+ 	if (genpd->dev_ops.stop && genpd->dev_ops.start) {
+ 		ret = pm_runtime_force_suspend(dev);
+-		if (ret)
++		if (ret) {
++			if (poweroff)
++				pm_generic_restore_noirq(dev);
++			else
++				pm_generic_resume_noirq(dev);
+ 			return ret;
++		}
+ 	}
+ 
+ 	genpd_lock(genpd);
+@@ -1085,7 +1090,7 @@ static int genpd_suspend_noirq(struct device *dev)
+ static int genpd_resume_noirq(struct device *dev)
+ {
+ 	struct generic_pm_domain *genpd;
+-	int ret = 0;
++	int ret;
+ 
+ 	dev_dbg(dev, "%s()\n", __func__);
+ 
+@@ -1094,21 +1099,20 @@ static int genpd_resume_noirq(struct device *dev)
+ 		return -EINVAL;
+ 
+ 	if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
+-		return 0;
++		return pm_generic_resume_noirq(dev);
+ 
+ 	genpd_lock(genpd);
+ 	genpd_sync_power_on(genpd, true, 0);
+ 	genpd->suspended_count--;
+ 	genpd_unlock(genpd);
+ 
+-	if (genpd->dev_ops.stop && genpd->dev_ops.start)
++	if (genpd->dev_ops.stop && genpd->dev_ops.start) {
+ 		ret = pm_runtime_force_resume(dev);
++		if (ret)
++			return ret;
++	}
+ 
+-	ret = pm_generic_resume_noirq(dev);
+-	if (ret)
+-		return ret;
+-
+-	return ret;
++	return pm_generic_resume_noirq(dev);
+ }
+ 
+ /**
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 2f57e8b88a7a..0fec82469536 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -272,6 +272,7 @@ static const struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x0489, 0xe09f), .driver_info = BTUSB_QCA_ROME },
+ 	{ USB_DEVICE(0x0489, 0xe0a2), .driver_info = BTUSB_QCA_ROME },
+ 	{ USB_DEVICE(0x04ca, 0x3011), .driver_info = BTUSB_QCA_ROME },
++	{ USB_DEVICE(0x04ca, 0x3015), .driver_info = BTUSB_QCA_ROME },
+ 	{ USB_DEVICE(0x04ca, 0x3016), .driver_info = BTUSB_QCA_ROME },
+ 
+ 	/* Broadcom BCM2035 */
+diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
+index 707c2d1b84c7..7d98f9a17636 100644
+--- a/drivers/bluetooth/hci_bcm.c
++++ b/drivers/bluetooth/hci_bcm.c
+@@ -379,7 +379,7 @@ static int bcm_close(struct hci_uart *hu)
+ 		pm_runtime_disable(bdev->dev);
+ 		pm_runtime_set_suspended(bdev->dev);
+ 
+-		if (device_can_wakeup(bdev->dev)) {
++		if (bdev->irq > 0) {
+ 			devm_free_irq(bdev->dev, bdev->irq, bdev);
+ 			device_init_wakeup(bdev->dev, false);
+ 		}
+@@ -577,11 +577,9 @@ static int bcm_suspend_device(struct device *dev)
+ 	}
+ 
+ 	/* Suspend the device */
+-	if (bdev->device_wakeup) {
+-		gpiod_set_value(bdev->device_wakeup, false);
+-		bt_dev_dbg(bdev, "suspend, delaying 15 ms");
+-		mdelay(15);
+-	}
++	gpiod_set_value(bdev->device_wakeup, false);
++	bt_dev_dbg(bdev, "suspend, delaying 15 ms");
++	mdelay(15);
+ 
+ 	return 0;
+ }
+@@ -592,11 +590,9 @@ static int bcm_resume_device(struct device *dev)
+ 
+ 	bt_dev_dbg(bdev, "");
+ 
+-	if (bdev->device_wakeup) {
+-		gpiod_set_value(bdev->device_wakeup, true);
+-		bt_dev_dbg(bdev, "resume, delaying 15 ms");
+-		mdelay(15);
+-	}
++	gpiod_set_value(bdev->device_wakeup, true);
++	bt_dev_dbg(bdev, "resume, delaying 15 ms");
++	mdelay(15);
+ 
+ 	/* When this executes, the device has woken up already */
+ 	if (bdev->is_suspended && bdev->hu) {
+@@ -632,7 +628,7 @@ static int bcm_suspend(struct device *dev)
+ 	if (pm_runtime_active(dev))
+ 		bcm_suspend_device(dev);
+ 
+-	if (device_may_wakeup(dev)) {
++	if (device_may_wakeup(dev) && bdev->irq > 0) {
+ 		error = enable_irq_wake(bdev->irq);
+ 		if (!error)
+ 			bt_dev_dbg(bdev, "BCM irq: enabled");
+@@ -662,7 +658,7 @@ static int bcm_resume(struct device *dev)
+ 	if (!bdev->hu)
+ 		goto unlock;
+ 
+-	if (device_may_wakeup(dev)) {
++	if (device_may_wakeup(dev) && bdev->irq > 0) {
+ 		disable_irq_wake(bdev->irq);
+ 		bt_dev_dbg(bdev, "BCM irq: disabled");
+ 	}
+@@ -779,8 +775,7 @@ static int bcm_get_resources(struct bcm_device *dev)
+ 
+ 	dev->clk = devm_clk_get(dev->dev, NULL);
+ 
+-	dev->device_wakeup = devm_gpiod_get_optional(dev->dev,
+-						     "device-wakeup",
++	dev->device_wakeup = devm_gpiod_get_optional(dev->dev, "device-wakeup",
+ 						     GPIOD_OUT_LOW);
+ 	if (IS_ERR(dev->device_wakeup))
+ 		return PTR_ERR(dev->device_wakeup);
+diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
+index 5294442505cb..0f1dc35e7078 100644
+--- a/drivers/char/tpm/tpm-interface.c
++++ b/drivers/char/tpm/tpm-interface.c
+@@ -328,7 +328,7 @@ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip,
+ }
+ EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration);
+ 
+-static bool tpm_validate_command(struct tpm_chip *chip,
++static int tpm_validate_command(struct tpm_chip *chip,
+ 				 struct tpm_space *space,
+ 				 const u8 *cmd,
+ 				 size_t len)
+@@ -340,10 +340,10 @@ static bool tpm_validate_command(struct tpm_chip *chip,
+ 	unsigned int nr_handles;
+ 
+ 	if (len < TPM_HEADER_SIZE)
+-		return false;
++		return -EINVAL;
+ 
+ 	if (!space)
+-		return true;
++		return 0;
+ 
+ 	if (chip->flags & TPM_CHIP_FLAG_TPM2 && chip->nr_commands) {
+ 		cc = be32_to_cpu(header->ordinal);
+@@ -352,7 +352,7 @@ static bool tpm_validate_command(struct tpm_chip *chip,
+ 		if (i < 0) {
+ 			dev_dbg(&chip->dev, "0x%04X is an invalid command\n",
+ 				cc);
+-			return false;
++			return -EOPNOTSUPP;
+ 		}
+ 
+ 		attrs = chip->cc_attrs_tbl[i];
+@@ -362,11 +362,11 @@ static bool tpm_validate_command(struct tpm_chip *chip,
+ 			goto err_len;
+ 	}
+ 
+-	return true;
++	return 0;
+ err_len:
+ 	dev_dbg(&chip->dev,
+ 		"%s: insufficient command length %zu", __func__, len);
+-	return false;
++	return -EINVAL;
+ }
+ 
+ /**
+@@ -391,8 +391,20 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
+ 	unsigned long stop;
+ 	bool need_locality;
+ 
+-	if (!tpm_validate_command(chip, space, buf, bufsiz))
+-		return -EINVAL;
++	rc = tpm_validate_command(chip, space, buf, bufsiz);
++	if (rc == -EINVAL)
++		return rc;
++	/*
++	 * If the command is not implemented by the TPM, synthesize a
++	 * response with a TPM2_RC_COMMAND_CODE return for user-space.
++	 */
++	if (rc == -EOPNOTSUPP) {
++		header->length = cpu_to_be32(sizeof(*header));
++		header->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS);
++		header->return_code = cpu_to_be32(TPM2_RC_COMMAND_CODE |
++						  TSS2_RESMGR_TPM_RC_LAYER);
++		return bufsiz;
++	}
+ 
+ 	if (bufsiz > TPM_BUFSIZE)
+ 		bufsiz = TPM_BUFSIZE;
+diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
+index 528cffbd49d3..f6f56dfda6c7 100644
+--- a/drivers/char/tpm/tpm.h
++++ b/drivers/char/tpm/tpm.h
+@@ -93,12 +93,17 @@ enum tpm2_structures {
+ 	TPM2_ST_SESSIONS	= 0x8002,
+ };
+ 
++/* Indicates from what layer of the software stack the error comes from */
++#define TSS2_RC_LAYER_SHIFT	 16
++#define TSS2_RESMGR_TPM_RC_LAYER (11 << TSS2_RC_LAYER_SHIFT)
++
+ enum tpm2_return_codes {
+ 	TPM2_RC_SUCCESS		= 0x0000,
+ 	TPM2_RC_HASH		= 0x0083, /* RC_FMT1 */
+ 	TPM2_RC_HANDLE		= 0x008B,
+ 	TPM2_RC_INITIALIZE	= 0x0100, /* RC_VER1 */
+ 	TPM2_RC_DISABLED	= 0x0120,
++	TPM2_RC_COMMAND_CODE    = 0x0143,
+ 	TPM2_RC_TESTING		= 0x090A, /* RC_WARN */
+ 	TPM2_RC_REFERENCE_H0	= 0x0910,
+ };
+diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
+index 4ed516cb7276..b49942b9fe50 100644
+--- a/drivers/clk/clk-divider.c
++++ b/drivers/clk/clk-divider.c
+@@ -118,12 +118,11 @@ static unsigned int _get_val(const struct clk_div_table *table,
+ unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate,
+ 				  unsigned int val,
+ 				  const struct clk_div_table *table,
+-				  unsigned long flags)
++				  unsigned long flags, unsigned long width)
+ {
+-	struct clk_divider *divider = to_clk_divider(hw);
+ 	unsigned int div;
+ 
+-	div = _get_div(table, val, flags, divider->width);
++	div = _get_div(table, val, flags, width);
+ 	if (!div) {
+ 		WARN(!(flags & CLK_DIVIDER_ALLOW_ZERO),
+ 			"%s: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n",
+@@ -145,7 +144,7 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
+ 	val &= div_mask(divider->width);
+ 
+ 	return divider_recalc_rate(hw, parent_rate, val, divider->table,
+-				   divider->flags);
++				   divider->flags, divider->width);
+ }
+ 
+ static bool _is_valid_table_div(const struct clk_div_table *table,
+diff --git a/drivers/clk/hisilicon/clkdivider-hi6220.c b/drivers/clk/hisilicon/clkdivider-hi6220.c
+index a1c1f684ad58..9f46cf9dcc65 100644
+--- a/drivers/clk/hisilicon/clkdivider-hi6220.c
++++ b/drivers/clk/hisilicon/clkdivider-hi6220.c
+@@ -56,7 +56,7 @@ static unsigned long hi6220_clkdiv_recalc_rate(struct clk_hw *hw,
+ 	val &= div_mask(dclk->width);
+ 
+ 	return divider_recalc_rate(hw, parent_rate, val, dclk->table,
+-				   CLK_DIVIDER_ROUND_CLOSEST);
++				   CLK_DIVIDER_ROUND_CLOSEST, dclk->width);
+ }
+ 
+ static long hi6220_clkdiv_round_rate(struct clk_hw *hw, unsigned long rate,
+diff --git a/drivers/clk/meson/clk-mpll.c b/drivers/clk/meson/clk-mpll.c
+index 44a5a535ca63..5144360e2c80 100644
+--- a/drivers/clk/meson/clk-mpll.c
++++ b/drivers/clk/meson/clk-mpll.c
+@@ -98,7 +98,7 @@ static void params_from_rate(unsigned long requested_rate,
+ 		*sdm = SDM_DEN - 1;
+ 	} else {
+ 		*n2 = div;
+-		*sdm = DIV_ROUND_UP(rem * SDM_DEN, requested_rate);
++		*sdm = DIV_ROUND_UP_ULL((u64)rem * SDM_DEN, requested_rate);
+ 	}
+ }
+ 
+diff --git a/drivers/clk/nxp/clk-lpc32xx.c b/drivers/clk/nxp/clk-lpc32xx.c
+index 7b359afd620e..a6438f50e6db 100644
+--- a/drivers/clk/nxp/clk-lpc32xx.c
++++ b/drivers/clk/nxp/clk-lpc32xx.c
+@@ -956,7 +956,7 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
+ 	val &= div_mask(divider->width);
+ 
+ 	return divider_recalc_rate(hw, parent_rate, val, divider->table,
+-				   divider->flags);
++				   divider->flags, divider->width);
+ }
+ 
+ static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
+diff --git a/drivers/clk/qcom/clk-regmap-divider.c b/drivers/clk/qcom/clk-regmap-divider.c
+index 53484912301e..928fcc16ee27 100644
+--- a/drivers/clk/qcom/clk-regmap-divider.c
++++ b/drivers/clk/qcom/clk-regmap-divider.c
+@@ -59,7 +59,7 @@ static unsigned long div_recalc_rate(struct clk_hw *hw,
+ 	div &= BIT(divider->width) - 1;
+ 
+ 	return divider_recalc_rate(hw, parent_rate, div, NULL,
+-				   CLK_DIVIDER_ROUND_CLOSEST);
++				   CLK_DIVIDER_ROUND_CLOSEST, divider->width);
+ }
+ 
+ const struct clk_ops clk_regmap_div_ops = {
+diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
+index 5cedcd0d8be8..aeafa7a4fff5 100644
+--- a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
++++ b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
+@@ -493,8 +493,8 @@ static SUNXI_CCU_MUX_WITH_GATE(tcon0_clk, "tcon0", tcon0_parents,
+ 				 0x118, 24, 3, BIT(31), CLK_SET_RATE_PARENT);
+ 
+ static const char * const tcon1_parents[] = { "pll-video1" };
+-static SUNXI_CCU_MUX_WITH_GATE(tcon1_clk, "tcon1", tcon1_parents,
+-				 0x11c, 24, 3, BIT(31), CLK_SET_RATE_PARENT);
++static SUNXI_CCU_M_WITH_MUX_GATE(tcon1_clk, "tcon1", tcon1_parents,
++				 0x11c, 0, 4, 24, 2, BIT(31), CLK_SET_RATE_PARENT);
+ 
+ static SUNXI_CCU_GATE(csi_misc_clk, "csi-misc", "osc24M", 0x130, BIT(16), 0);
+ 
+diff --git a/drivers/clk/sunxi-ng/ccu_div.c b/drivers/clk/sunxi-ng/ccu_div.c
+index baa3cf96507b..302a18efd39f 100644
+--- a/drivers/clk/sunxi-ng/ccu_div.c
++++ b/drivers/clk/sunxi-ng/ccu_div.c
+@@ -71,7 +71,7 @@ static unsigned long ccu_div_recalc_rate(struct clk_hw *hw,
+ 						  parent_rate);
+ 
+ 	val = divider_recalc_rate(hw, parent_rate, val, cd->div.table,
+-				  cd->div.flags);
++				  cd->div.flags, cd->div.width);
+ 
+ 	if (cd->common.features & CCU_FEATURE_FIXED_POSTDIV)
+ 		val /= cd->fixed_post_div;
+diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
+index da7fdb4b661a..37381238bf69 100644
+--- a/drivers/cpufreq/powernv-cpufreq.c
++++ b/drivers/cpufreq/powernv-cpufreq.c
+@@ -41,11 +41,9 @@
+ #define POWERNV_MAX_PSTATES	256
+ #define PMSR_PSAFE_ENABLE	(1UL << 30)
+ #define PMSR_SPR_EM_DISABLE	(1UL << 31)
+-#define PMSR_MAX(x)		((x >> 32) & 0xFF)
++#define MAX_PSTATE_SHIFT	32
+ #define LPSTATE_SHIFT		48
+ #define GPSTATE_SHIFT		56
+-#define GET_LPSTATE(x)		(((x) >> LPSTATE_SHIFT) & 0xFF)
+-#define GET_GPSTATE(x)		(((x) >> GPSTATE_SHIFT) & 0xFF)
+ 
+ #define MAX_RAMP_DOWN_TIME				5120
+ /*
+@@ -94,6 +92,7 @@ struct global_pstate_info {
+ };
+ 
+ static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1];
++u32 pstate_sign_prefix;
+ static bool rebooting, throttled, occ_reset;
+ 
+ static const char * const throttle_reason[] = {
+@@ -148,6 +147,20 @@ static struct powernv_pstate_info {
+ 	bool wof_enabled;
+ } powernv_pstate_info;
+ 
++static inline int extract_pstate(u64 pmsr_val, unsigned int shift)
++{
++	int ret = ((pmsr_val >> shift) & 0xFF);
++
++	if (!ret)
++		return ret;
++
++	return (pstate_sign_prefix | ret);
++}
++
++#define extract_local_pstate(x) extract_pstate(x, LPSTATE_SHIFT)
++#define extract_global_pstate(x) extract_pstate(x, GPSTATE_SHIFT)
++#define extract_max_pstate(x)  extract_pstate(x, MAX_PSTATE_SHIFT)
++
+ /* Use following macros for conversions between pstate_id and index */
+ static inline int idx_to_pstate(unsigned int i)
+ {
+@@ -278,6 +291,9 @@ static int init_powernv_pstates(void)
+ 
+ 	powernv_pstate_info.nr_pstates = nr_pstates;
+ 	pr_debug("NR PStates %d\n", nr_pstates);
++
++	pstate_sign_prefix = pstate_min & ~0xFF;
++
+ 	for (i = 0; i < nr_pstates; i++) {
+ 		u32 id = be32_to_cpu(pstate_ids[i]);
+ 		u32 freq = be32_to_cpu(pstate_freqs[i]);
+@@ -438,17 +454,10 @@ struct powernv_smp_call_data {
+ static void powernv_read_cpu_freq(void *arg)
+ {
+ 	unsigned long pmspr_val;
+-	s8 local_pstate_id;
+ 	struct powernv_smp_call_data *freq_data = arg;
+ 
+ 	pmspr_val = get_pmspr(SPRN_PMSR);
+-
+-	/*
+-	 * The local pstate id corresponds bits 48..55 in the PMSR.
+-	 * Note: Watch out for the sign!
+-	 */
+-	local_pstate_id = (pmspr_val >> 48) & 0xFF;
+-	freq_data->pstate_id = local_pstate_id;
++	freq_data->pstate_id = extract_local_pstate(pmspr_val);
+ 	freq_data->freq = pstate_id_to_freq(freq_data->pstate_id);
+ 
+ 	pr_debug("cpu %d pmsr %016lX pstate_id %d frequency %d kHz\n",
+@@ -522,7 +531,7 @@ static void powernv_cpufreq_throttle_check(void *data)
+ 	chip = this_cpu_read(chip_info);
+ 
+ 	/* Check for Pmax Capping */
+-	pmsr_pmax = (s8)PMSR_MAX(pmsr);
++	pmsr_pmax = extract_max_pstate(pmsr);
+ 	pmsr_pmax_idx = pstate_to_idx(pmsr_pmax);
+ 	if (pmsr_pmax_idx != powernv_pstate_info.max) {
+ 		if (chip->throttled)
+@@ -645,8 +654,8 @@ void gpstate_timer_handler(struct timer_list *t)
+ 	 * value. Hence, read from PMCR to get correct data.
+ 	 */
+ 	val = get_pmspr(SPRN_PMCR);
+-	freq_data.gpstate_id = (s8)GET_GPSTATE(val);
+-	freq_data.pstate_id = (s8)GET_LPSTATE(val);
++	freq_data.gpstate_id = extract_global_pstate(val);
++	freq_data.pstate_id = extract_local_pstate(val);
+ 	if (freq_data.gpstate_id  == freq_data.pstate_id) {
+ 		reset_gpstates(policy);
+ 		spin_unlock(&gpstates->gpstate_lock);
+diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
+index eeaf27859d80..ea83d0bff0e9 100644
+--- a/drivers/crypto/amcc/crypto4xx_alg.c
++++ b/drivers/crypto/amcc/crypto4xx_alg.c
+@@ -256,10 +256,6 @@ static inline bool crypto4xx_aead_need_fallback(struct aead_request *req,
+ 	if (is_ccm && !(req->iv[0] == 1 || req->iv[0] == 3))
+ 		return true;
+ 
+-	/* CCM - fix CBC MAC mismatch in special case */
+-	if (is_ccm && decrypt && !req->assoclen)
+-		return true;
+-
+ 	return false;
+ }
+ 
+@@ -330,7 +326,7 @@ int crypto4xx_setkey_aes_ccm(struct crypto_aead *cipher, const u8 *key,
+ 	sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+ 	sa->sa_contents.w = SA_AES_CCM_CONTENTS | (keylen << 2);
+ 
+-	set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV,
++	set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
+ 				 SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
+ 				 SA_NO_HEADER_PROC, SA_HASH_ALG_CBC_MAC,
+ 				 SA_CIPHER_ALG_AES,
+diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
+index c44954e274bc..33256b4a302e 100644
+--- a/drivers/crypto/amcc/crypto4xx_core.c
++++ b/drivers/crypto/amcc/crypto4xx_core.c
+@@ -570,15 +570,14 @@ static void crypto4xx_aead_done(struct crypto4xx_device *dev,
+ 				struct pd_uinfo *pd_uinfo,
+ 				struct ce_pd *pd)
+ {
+-	struct aead_request *aead_req;
+-	struct crypto4xx_ctx *ctx;
++	struct aead_request *aead_req = container_of(pd_uinfo->async_req,
++		struct aead_request, base);
+ 	struct scatterlist *dst = pd_uinfo->dest_va;
++	size_t cp_len = crypto_aead_authsize(
++		crypto_aead_reqtfm(aead_req));
++	u32 icv[cp_len];
+ 	int err = 0;
+ 
+-	aead_req = container_of(pd_uinfo->async_req, struct aead_request,
+-				base);
+-	ctx  = crypto_tfm_ctx(aead_req->base.tfm);
+-
+ 	if (pd_uinfo->using_sd) {
+ 		crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
+ 					  pd->pd_ctl_len.bf.pkt_len,
+@@ -590,38 +589,39 @@ static void crypto4xx_aead_done(struct crypto4xx_device *dev,
+ 
+ 	if (pd_uinfo->sa_va->sa_command_0.bf.dir == DIR_OUTBOUND) {
+ 		/* append icv at the end */
+-		size_t cp_len = crypto_aead_authsize(
+-			crypto_aead_reqtfm(aead_req));
+-		u32 icv[cp_len];
+-
+ 		crypto4xx_memcpy_from_le32(icv, pd_uinfo->sr_va->save_digest,
+ 					   cp_len);
+ 
+ 		scatterwalk_map_and_copy(icv, dst, aead_req->cryptlen,
+ 					 cp_len, 1);
++	} else {
++		/* check icv at the end */
++		scatterwalk_map_and_copy(icv, aead_req->src,
++			aead_req->assoclen + aead_req->cryptlen -
++			cp_len, cp_len, 0);
++
++		crypto4xx_memcpy_from_le32(icv, icv, cp_len);
++
++		if (crypto_memneq(icv, pd_uinfo->sr_va->save_digest, cp_len))
++			err = -EBADMSG;
+ 	}
+ 
+ 	crypto4xx_ret_sg_desc(dev, pd_uinfo);
+ 
+ 	if (pd->pd_ctl.bf.status & 0xff) {
+-		if (pd->pd_ctl.bf.status & 0x1) {
+-			/* authentication error */
+-			err = -EBADMSG;
+-		} else {
+-			if (!__ratelimit(&dev->aead_ratelimit)) {
+-				if (pd->pd_ctl.bf.status & 2)
+-					pr_err("pad fail error\n");
+-				if (pd->pd_ctl.bf.status & 4)
+-					pr_err("seqnum fail\n");
+-				if (pd->pd_ctl.bf.status & 8)
+-					pr_err("error _notify\n");
+-				pr_err("aead return err status = 0x%02x\n",
+-					pd->pd_ctl.bf.status & 0xff);
+-				pr_err("pd pad_ctl = 0x%08x\n",
+-					pd->pd_ctl.bf.pd_pad_ctl);
+-			}
+-			err = -EINVAL;
++		if (!__ratelimit(&dev->aead_ratelimit)) {
++			if (pd->pd_ctl.bf.status & 2)
++				pr_err("pad fail error\n");
++			if (pd->pd_ctl.bf.status & 4)
++				pr_err("seqnum fail\n");
++			if (pd->pd_ctl.bf.status & 8)
++				pr_err("error _notify\n");
++			pr_err("aead return err status = 0x%02x\n",
++				pd->pd_ctl.bf.status & 0xff);
++			pr_err("pd pad_ctl = 0x%08x\n",
++				pd->pd_ctl.bf.pd_pad_ctl);
+ 		}
++		err = -EINVAL;
+ 	}
+ 
+ 	if (pd_uinfo->state & PD_ENTRY_BUSY)
+diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
+index 99c4021fc33b..fe2af6aa88fc 100644
+--- a/drivers/devfreq/devfreq.c
++++ b/drivers/devfreq/devfreq.c
+@@ -996,7 +996,8 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
+ 	if (df->governor == governor) {
+ 		ret = 0;
+ 		goto out;
+-	} else if (df->governor->immutable || governor->immutable) {
++	} else if ((df->governor && df->governor->immutable) ||
++					governor->immutable) {
+ 		ret = -EINVAL;
+ 		goto out;
+ 	}
+diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
+index ec5d695bbb72..3c68bb525d5d 100644
+--- a/drivers/edac/mv64x60_edac.c
++++ b/drivers/edac/mv64x60_edac.c
+@@ -758,7 +758,7 @@ static int mv64x60_mc_err_probe(struct platform_device *pdev)
+ 		/* Non-ECC RAM? */
+ 		printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__);
+ 		res = -ENODEV;
+-		goto err2;
++		goto err;
+ 	}
+ 
+ 	edac_dbg(3, "init mci\n");
+diff --git a/drivers/gpio/gpio-thunderx.c b/drivers/gpio/gpio-thunderx.c
+index b5adb79a631a..d16e9d4a129b 100644
+--- a/drivers/gpio/gpio-thunderx.c
++++ b/drivers/gpio/gpio-thunderx.c
+@@ -553,8 +553,10 @@ static int thunderx_gpio_probe(struct pci_dev *pdev,
+ 	txgpio->irqd = irq_domain_create_hierarchy(irq_get_irq_data(txgpio->msix_entries[0].vector)->domain,
+ 						   0, 0, of_node_to_fwnode(dev->of_node),
+ 						   &thunderx_gpio_irqd_ops, txgpio);
+-	if (!txgpio->irqd)
++	if (!txgpio->irqd) {
++		err = -ENOMEM;
+ 		goto out;
++	}
+ 
+ 	/* Push on irq_data and the domain for each line. */
+ 	for (i = 0; i < ngpio; i++) {
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index 7a5cf5b08c54..ec6e922123cb 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -2468,7 +2468,7 @@ EXPORT_SYMBOL_GPL(gpiod_direction_output_raw);
+  */
+ int gpiod_direction_output(struct gpio_desc *desc, int value)
+ {
+-	struct gpio_chip *gc = desc->gdev->chip;
++	struct gpio_chip *gc;
+ 	int ret;
+ 
+ 	VALIDATE_DESC(desc);
+@@ -2485,6 +2485,7 @@ int gpiod_direction_output(struct gpio_desc *desc, int value)
+ 		return -EIO;
+ 	}
+ 
++	gc = desc->gdev->chip;
+ 	if (test_bit(FLAG_OPEN_DRAIN, &desc->flags)) {
+ 		/* First see if we can enable open drain in hardware */
+ 		ret = gpio_set_drive_single_ended(gc, gpio_chip_hwgpio(desc),
+@@ -3646,7 +3647,8 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
+ 		return desc;
+ 	}
+ 
+-	status = gpiod_request(desc, con_id);
++	/* If a connection label was passed use that, else use the device name as label */
++	status = gpiod_request(desc, con_id ? con_id : dev_name(dev));
+ 	if (status < 0)
+ 		return ERR_PTR(status);
+ 
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+index 7f5359a97ef2..885d9d802670 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+@@ -648,6 +648,12 @@ int smu7_init(struct pp_hwmgr *hwmgr)
+ 
+ int smu7_smu_fini(struct pp_hwmgr *hwmgr)
+ {
++	struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
++
++	smu_free_memory(hwmgr->device, (void *) smu_data->header_buffer.handle);
++	if (!cgs_is_virtualization_enabled(hwmgr->device))
++		smu_free_memory(hwmgr->device, (void *) smu_data->smu_buffer.handle);
++
+ 	kfree(hwmgr->smu_backend);
+ 	hwmgr->smu_backend = NULL;
+ 	cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU);
+diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
+index fd23023df7c1..3a0728a212fb 100644
+--- a/drivers/gpu/drm/i915/intel_bios.c
++++ b/drivers/gpu/drm/i915/intel_bios.c
+@@ -1107,6 +1107,7 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
+ }
+ 
+ static const u8 cnp_ddc_pin_map[] = {
++	[0] = 0, /* N/A */
+ 	[DDC_BUS_DDI_B] = GMBUS_PIN_1_BXT,
+ 	[DDC_BUS_DDI_C] = GMBUS_PIN_2_BXT,
+ 	[DDC_BUS_DDI_D] = GMBUS_PIN_4_CNP, /* sic */
+@@ -1115,9 +1116,14 @@ static const u8 cnp_ddc_pin_map[] = {
+ 
+ static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
+ {
+-	if (HAS_PCH_CNP(dev_priv) &&
+-	    vbt_pin > 0 && vbt_pin < ARRAY_SIZE(cnp_ddc_pin_map))
+-		return cnp_ddc_pin_map[vbt_pin];
++	if (HAS_PCH_CNP(dev_priv)) {
++		if (vbt_pin < ARRAY_SIZE(cnp_ddc_pin_map)) {
++			return cnp_ddc_pin_map[vbt_pin];
++		} else {
++			DRM_DEBUG_KMS("Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n", vbt_pin);
++			return 0;
++		}
++	}
+ 
+ 	return vbt_pin;
+ }
+diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
+index 05022ea2a007..bfb3d689f47d 100644
+--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
++++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
+@@ -125,11 +125,14 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
+ {
+ 	struct msm_drm_private *priv = dev->dev_private;
+ 	struct platform_device *pdev = priv->gpu_pdev;
+-	struct msm_gpu *gpu = platform_get_drvdata(priv->gpu_pdev);
++	struct msm_gpu *gpu = NULL;
+ 	int ret;
+ 
++	if (pdev)
++		gpu = platform_get_drvdata(pdev);
++
+ 	if (!gpu) {
+-		dev_err(dev->dev, "no adreno device\n");
++		dev_err_once(dev->dev, "no GPU device was found\n");
+ 		return NULL;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c
+index fe15aa64086f..71fe60e5f01f 100644
+--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c
++++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c
+@@ -698,7 +698,7 @@ static unsigned long dsi_pll_14nm_postdiv_recalc_rate(struct clk_hw *hw,
+ 	val &= div_mask(width);
+ 
+ 	return divider_recalc_rate(hw, parent_rate, val, NULL,
+-				   postdiv->flags);
++				   postdiv->flags, width);
+ }
+ 
+ static long dsi_pll_14nm_postdiv_round_rate(struct clk_hw *hw,
+diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
+index 62e38fa8cda2..e362a932fe8c 100644
+--- a/drivers/hwmon/ina2xx.c
++++ b/drivers/hwmon/ina2xx.c
+@@ -95,18 +95,20 @@ enum ina2xx_ids { ina219, ina226 };
+ 
+ struct ina2xx_config {
+ 	u16 config_default;
+-	int calibration_factor;
++	int calibration_value;
+ 	int registers;
+ 	int shunt_div;
+ 	int bus_voltage_shift;
+ 	int bus_voltage_lsb;	/* uV */
+-	int power_lsb;		/* uW */
++	int power_lsb_factor;
+ };
+ 
+ struct ina2xx_data {
+ 	const struct ina2xx_config *config;
+ 
+ 	long rshunt;
++	long current_lsb_uA;
++	long power_lsb_uW;
+ 	struct mutex config_lock;
+ 	struct regmap *regmap;
+ 
+@@ -116,21 +118,21 @@ struct ina2xx_data {
+ static const struct ina2xx_config ina2xx_config[] = {
+ 	[ina219] = {
+ 		.config_default = INA219_CONFIG_DEFAULT,
+-		.calibration_factor = 40960000,
++		.calibration_value = 4096,
+ 		.registers = INA219_REGISTERS,
+ 		.shunt_div = 100,
+ 		.bus_voltage_shift = 3,
+ 		.bus_voltage_lsb = 4000,
+-		.power_lsb = 20000,
++		.power_lsb_factor = 20,
+ 	},
+ 	[ina226] = {
+ 		.config_default = INA226_CONFIG_DEFAULT,
+-		.calibration_factor = 5120000,
++		.calibration_value = 2048,
+ 		.registers = INA226_REGISTERS,
+ 		.shunt_div = 400,
+ 		.bus_voltage_shift = 0,
+ 		.bus_voltage_lsb = 1250,
+-		.power_lsb = 25000,
++		.power_lsb_factor = 25,
+ 	},
+ };
+ 
+@@ -169,12 +171,16 @@ static u16 ina226_interval_to_reg(int interval)
+ 	return INA226_SHIFT_AVG(avg_bits);
+ }
+ 
++/*
++ * Calibration register is set to the best value, which eliminates
++ * truncation errors on calculating current register in hardware.
++ * According to datasheet (eq. 3) the best values are 2048 for
++ * ina226 and 4096 for ina219. They are hardcoded as calibration_value.
++ */
+ static int ina2xx_calibrate(struct ina2xx_data *data)
+ {
+-	u16 val = DIV_ROUND_CLOSEST(data->config->calibration_factor,
+-				    data->rshunt);
+-
+-	return regmap_write(data->regmap, INA2XX_CALIBRATION, val);
++	return regmap_write(data->regmap, INA2XX_CALIBRATION,
++			    data->config->calibration_value);
+ }
+ 
+ /*
+@@ -187,10 +193,6 @@ static int ina2xx_init(struct ina2xx_data *data)
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	/*
+-	 * Set current LSB to 1mA, shunt is in uOhms
+-	 * (equation 13 in datasheet).
+-	 */
+ 	return ina2xx_calibrate(data);
+ }
+ 
+@@ -268,15 +270,15 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg,
+ 		val = DIV_ROUND_CLOSEST(val, 1000);
+ 		break;
+ 	case INA2XX_POWER:
+-		val = regval * data->config->power_lsb;
++		val = regval * data->power_lsb_uW;
+ 		break;
+ 	case INA2XX_CURRENT:
+-		/* signed register, LSB=1mA (selected), in mA */
+-		val = (s16)regval;
++		/* signed register, result in mA */
++		val = regval * data->current_lsb_uA;
++		val = DIV_ROUND_CLOSEST(val, 1000);
+ 		break;
+ 	case INA2XX_CALIBRATION:
+-		val = DIV_ROUND_CLOSEST(data->config->calibration_factor,
+-					regval);
++		val = regval;
+ 		break;
+ 	default:
+ 		/* programmer goofed */
+@@ -304,9 +306,32 @@ static ssize_t ina2xx_show_value(struct device *dev,
+ 			ina2xx_get_value(data, attr->index, regval));
+ }
+ 
+-static ssize_t ina2xx_set_shunt(struct device *dev,
+-				struct device_attribute *da,
+-				const char *buf, size_t count)
++/*
++ * In order to keep calibration register value fixed, the product
++ * of current_lsb and shunt_resistor should also be fixed and equal
++ * to shunt_voltage_lsb = 1 / shunt_div multiplied by 10^9 in order
++ * to keep the scale.
++ */
++static int ina2xx_set_shunt(struct ina2xx_data *data, long val)
++{
++	unsigned int dividend = DIV_ROUND_CLOSEST(1000000000,
++						  data->config->shunt_div);
++	if (val <= 0 || val > dividend)
++		return -EINVAL;
++
++	mutex_lock(&data->config_lock);
++	data->rshunt = val;
++	data->current_lsb_uA = DIV_ROUND_CLOSEST(dividend, val);
++	data->power_lsb_uW = data->config->power_lsb_factor *
++			     data->current_lsb_uA;
++	mutex_unlock(&data->config_lock);
++
++	return 0;
++}
++
++static ssize_t ina2xx_store_shunt(struct device *dev,
++				  struct device_attribute *da,
++				  const char *buf, size_t count)
+ {
+ 	unsigned long val;
+ 	int status;
+@@ -316,18 +341,9 @@ static ssize_t ina2xx_set_shunt(struct device *dev,
+ 	if (status < 0)
+ 		return status;
+ 
+-	if (val == 0 ||
+-	    /* Values greater than the calibration factor make no sense. */
+-	    val > data->config->calibration_factor)
+-		return -EINVAL;
+-
+-	mutex_lock(&data->config_lock);
+-	data->rshunt = val;
+-	status = ina2xx_calibrate(data);
+-	mutex_unlock(&data->config_lock);
++	status = ina2xx_set_shunt(data, val);
+ 	if (status < 0)
+ 		return status;
+-
+ 	return count;
+ }
+ 
+@@ -387,7 +403,7 @@ static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL,
+ 
+ /* shunt resistance */
+ static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR,
+-			  ina2xx_show_value, ina2xx_set_shunt,
++			  ina2xx_show_value, ina2xx_store_shunt,
+ 			  INA2XX_CALIBRATION);
+ 
+ /* update interval (ina226 only) */
+@@ -448,10 +464,7 @@ static int ina2xx_probe(struct i2c_client *client,
+ 			val = INA2XX_RSHUNT_DEFAULT;
+ 	}
+ 
+-	if (val <= 0 || val > data->config->calibration_factor)
+-		return -ENODEV;
+-
+-	data->rshunt = val;
++	ina2xx_set_shunt(data, val);
+ 
+ 	ina2xx_regmap_config.max_register = data->config->registers;
+ 
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index 67aece2f5d8d..2fb7f2586353 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -4449,6 +4449,7 @@ static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb)
+ 			id_stats->qp_type	= id->qp_type;
+ 
+ 			i_id++;
++			nlmsg_end(skb, nlh);
+ 		}
+ 
+ 		cb->args[1] = 0;
+diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
+index 722235bed075..d6fa38f8604f 100644
+--- a/drivers/infiniband/core/ucma.c
++++ b/drivers/infiniband/core/ucma.c
+@@ -914,13 +914,14 @@ static ssize_t ucma_query_path(struct ucma_context *ctx,
+ 
+ 		resp->path_data[i].flags = IB_PATH_GMP | IB_PATH_PRIMARY |
+ 					   IB_PATH_BIDIRECTIONAL;
+-		if (rec->rec_type == SA_PATH_REC_TYPE_IB) {
+-			ib_sa_pack_path(rec, &resp->path_data[i].path_rec);
+-		} else {
++		if (rec->rec_type == SA_PATH_REC_TYPE_OPA) {
+ 			struct sa_path_rec ib;
+ 
+ 			sa_convert_path_opa_to_ib(&ib, rec);
+ 			ib_sa_pack_path(&ib, &resp->path_data[i].path_rec);
++
++		} else {
++			ib_sa_pack_path(rec, &resp->path_data[i].path_rec);
+ 		}
+ 	}
+ 
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index 8e18445714a9..0d925b3d3d47 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -2463,11 +2463,14 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
+ 		roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 0);
+ 	}
+ 
+-	roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
+-		       V2_QPC_BYTE_140_RR_MAX_S,
+-		       ilog2((unsigned int)attr->max_dest_rd_atomic));
+-	roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
+-		       V2_QPC_BYTE_140_RR_MAX_S, 0);
++	if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) &&
++	     attr->max_dest_rd_atomic) {
++		roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
++			       V2_QPC_BYTE_140_RR_MAX_S,
++			       fls(attr->max_dest_rd_atomic - 1));
++		roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
++			       V2_QPC_BYTE_140_RR_MAX_S, 0);
++	}
+ 
+ 	roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
+ 		       V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num);
+@@ -2557,12 +2560,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
+ 		       V2_QPC_BYTE_168_LP_SGEN_INI_M,
+ 		       V2_QPC_BYTE_168_LP_SGEN_INI_S, 0);
+ 
+-	roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
+-		       V2_QPC_BYTE_208_SR_MAX_S,
+-		       ilog2((unsigned int)attr->max_rd_atomic));
+-	roce_set_field(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
+-		       V2_QPC_BYTE_208_SR_MAX_S, 0);
+-
+ 	roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
+ 		       V2_QPC_BYTE_28_SL_S, rdma_ah_get_sl(&attr->ah_attr));
+ 	roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
+@@ -2766,6 +2763,14 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
+ 	roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M,
+ 		       V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0);
+ 
++	if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
++		roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
++			       V2_QPC_BYTE_208_SR_MAX_S,
++			       fls(attr->max_rd_atomic - 1));
++		roce_set_field(qpc_mask->byte_208_irrl,
++			       V2_QPC_BYTE_208_SR_MAX_M,
++			       V2_QPC_BYTE_208_SR_MAX_S, 0);
++	}
+ 	return 0;
+ }
+ 
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+index 77870f9e1736..726d7143475f 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+@@ -125,7 +125,8 @@ static u8 i40iw_derive_hw_ird_setting(u16 cm_ird)
+  * @conn_ird: connection IRD
+  * @conn_ord: connection ORD
+  */
+-static void i40iw_record_ird_ord(struct i40iw_cm_node *cm_node, u16 conn_ird, u16 conn_ord)
++static void i40iw_record_ird_ord(struct i40iw_cm_node *cm_node, u32 conn_ird,
++				 u32 conn_ord)
+ {
+ 	if (conn_ird > I40IW_MAX_IRD_SIZE)
+ 		conn_ird = I40IW_MAX_IRD_SIZE;
+@@ -3849,7 +3850,7 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+ 	}
+ 
+ 	cm_node->apbvt_set = true;
+-	i40iw_record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord);
++	i40iw_record_ird_ord(cm_node, conn_param->ird, conn_param->ord);
+ 	if (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO &&
+ 	    !cm_node->ord_size)
+ 		cm_node->ord_size = 1;
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+index da9821a10e0d..1b9ca09d3cee 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+@@ -3928,8 +3928,10 @@ enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_
+ 		hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].cnt = 1;
+ 		hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt = mrwanted;
+ 
+-		hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt = I40IW_MAX_WQ_ENTRIES * qpwanted;
+-		hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt = 4 * I40IW_MAX_IRD_SIZE * qpwanted;
++		hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt =
++			roundup_pow_of_two(I40IW_MAX_WQ_ENTRIES * qpwanted);
++		hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt =
++			roundup_pow_of_two(2 * I40IW_MAX_IRD_SIZE * qpwanted);
+ 		hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].cnt =
+ 			hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt / hmc_fpm_misc->xf_block_size;
+ 		hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].cnt =
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h
+index 029083cb81d5..4b65e4140bd7 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_d.h
++++ b/drivers/infiniband/hw/i40iw/i40iw_d.h
+@@ -97,6 +97,7 @@
+ #define RDMA_OPCODE_MASK        0x0f
+ #define RDMA_READ_REQ_OPCODE    1
+ #define Q2_BAD_FRAME_OFFSET     72
++#define Q2_FPSN_OFFSET          64
+ #define CQE_MAJOR_DRV           0x8000
+ 
+ #define I40IW_TERM_SENT 0x01
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
+index 796a815b53fd..f64b6700f43f 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
+@@ -1378,7 +1378,7 @@ static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,
+ 	u32 *hw_host_ctx = (u32 *)qp->hw_host_ctx;
+ 	u32 rcv_wnd = hw_host_ctx[23];
+ 	/* first partial seq # in q2 */
+-	u32 fps = qp->q2_buf[16];
++	u32 fps = *(u32 *)(qp->q2_buf + Q2_FPSN_OFFSET);
+ 	struct list_head *rxlist = &pfpdu->rxlist;
+ 	struct list_head *plist;
+ 
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index 262c1aa2e028..f8b06102cc5d 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -682,7 +682,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
+ 						MLX5_RX_HASH_SRC_PORT_TCP |
+ 						MLX5_RX_HASH_DST_PORT_TCP |
+ 						MLX5_RX_HASH_SRC_PORT_UDP |
+-						MLX5_RX_HASH_DST_PORT_UDP;
++						MLX5_RX_HASH_DST_PORT_UDP |
++						MLX5_RX_HASH_INNER;
+ 			resp.response_length += sizeof(resp.rss_caps);
+ 		}
+ 	} else {
+diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
+index 97d71e49c092..88fa4d44ab5f 100644
+--- a/drivers/infiniband/sw/rdmavt/cq.c
++++ b/drivers/infiniband/sw/rdmavt/cq.c
+@@ -198,7 +198,7 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
+ 		return ERR_PTR(-EINVAL);
+ 
+ 	/* Allocate the completion queue structure. */
+-	cq = kzalloc(sizeof(*cq), GFP_KERNEL);
++	cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, rdi->dparms.node);
+ 	if (!cq)
+ 		return ERR_PTR(-ENOMEM);
+ 
+@@ -214,7 +214,9 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
+ 		sz += sizeof(struct ib_uverbs_wc) * (entries + 1);
+ 	else
+ 		sz += sizeof(struct ib_wc) * (entries + 1);
+-	wc = vmalloc_user(sz);
++	wc = udata ?
++		vmalloc_user(sz) :
++		vzalloc_node(sz, rdi->dparms.node);
+ 	if (!wc) {
+ 		ret = ERR_PTR(-ENOMEM);
+ 		goto bail_cq;
+@@ -369,7 +371,9 @@ int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
+ 		sz += sizeof(struct ib_uverbs_wc) * (cqe + 1);
+ 	else
+ 		sz += sizeof(struct ib_wc) * (cqe + 1);
+-	wc = vmalloc_user(sz);
++	wc = udata ?
++		vmalloc_user(sz) :
++		vzalloc_node(sz, rdi->dparms.node);
+ 	if (!wc)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+index 71ea9e26666c..c075d6850ed3 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+@@ -766,12 +766,14 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
+ 	skb_orphan(skb);
+ 	skb_dst_drop(skb);
+ 
+-	if (netif_queue_stopped(dev))
+-		if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
+-				     IB_CQ_REPORT_MISSED_EVENTS)) {
++	if (netif_queue_stopped(dev)) {
++		rc = ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
++				      IB_CQ_REPORT_MISSED_EVENTS);
++		if (unlikely(rc < 0))
+ 			ipoib_warn(priv, "IPoIB/CM:request notify on send CQ failed\n");
++		else if (rc)
+ 			napi_schedule(&priv->send_napi);
+-		}
++	}
+ 
+ 	rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), tx_req);
+ 	if (unlikely(rc)) {
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+index e6151a29c412..28658080e761 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+@@ -644,7 +644,7 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb,
+ 
+ 	if (netif_queue_stopped(dev))
+ 		if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
+-				     IB_CQ_REPORT_MISSED_EVENTS))
++				     IB_CQ_REPORT_MISSED_EVENTS) < 0)
+ 			ipoib_warn(priv, "request notify on send CQ failed\n");
+ 
+ 	rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
+diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
+index 69d0b8cbc71f..ecec8eb17f28 100644
+--- a/drivers/input/touchscreen/goodix.c
++++ b/drivers/input/touchscreen/goodix.c
+@@ -878,8 +878,10 @@ static int __maybe_unused goodix_suspend(struct device *dev)
+ 	int error;
+ 
+ 	/* We need gpio pins to suspend/resume */
+-	if (!ts->gpiod_int || !ts->gpiod_rst)
++	if (!ts->gpiod_int || !ts->gpiod_rst) {
++		disable_irq(client->irq);
+ 		return 0;
++	}
+ 
+ 	wait_for_completion(&ts->firmware_loading_complete);
+ 
+@@ -919,8 +921,10 @@ static int __maybe_unused goodix_resume(struct device *dev)
+ 	struct goodix_ts_data *ts = i2c_get_clientdata(client);
+ 	int error;
+ 
+-	if (!ts->gpiod_int || !ts->gpiod_rst)
++	if (!ts->gpiod_int || !ts->gpiod_rst) {
++		enable_irq(client->irq);
+ 		return 0;
++	}
+ 
+ 	/*
+ 	 * Exit sleep mode by outputting HIGH level to INT pin
+diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
+index 980ae8e7df30..45ced9e48c5d 100644
+--- a/drivers/irqchip/irq-gic-v3.c
++++ b/drivers/irqchip/irq-gic-v3.c
+@@ -1331,6 +1331,10 @@ gic_acpi_parse_madt_gicc(struct acpi_subtable_header *header,
+ 	u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2;
+ 	void __iomem *redist_base;
+ 
++	/* GICC entry which has !ACPI_MADT_ENABLED is not unusable so skip */
++	if (!(gicc->flags & ACPI_MADT_ENABLED))
++		return 0;
++
+ 	redist_base = ioremap(gicc->gicr_base_address, size);
+ 	if (!redist_base)
+ 		return -ENOMEM;
+@@ -1380,6 +1384,13 @@ static int __init gic_acpi_match_gicc(struct acpi_subtable_header *header,
+ 	if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address)
+ 		return 0;
+ 
++	/*
++	 * It's perfectly valid firmware can pass disabled GICC entry, driver
++	 * should not treat as errors, skip the entry instead of probe fail.
++	 */
++	if (!(gicc->flags & ACPI_MADT_ENABLED))
++		return 0;
++
+ 	return -ENODEV;
+ }
+ 
+diff --git a/drivers/irqchip/irq-ompic.c b/drivers/irqchip/irq-ompic.c
+index cf6d0c455518..e66ef4373b1e 100644
+--- a/drivers/irqchip/irq-ompic.c
++++ b/drivers/irqchip/irq-ompic.c
+@@ -171,9 +171,9 @@ static int __init ompic_of_init(struct device_node *node,
+ 
+ 	/* Setup the device */
+ 	ompic_base = ioremap(res.start, resource_size(&res));
+-	if (IS_ERR(ompic_base)) {
++	if (!ompic_base) {
+ 		pr_err("ompic: unable to map registers");
+-		return PTR_ERR(ompic_base);
++		return -ENOMEM;
+ 	}
+ 
+ 	irq = irq_of_parse_and_map(node, 0);
+diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
+index a0cc1bc6d884..6cc6c0f9c3a9 100644
+--- a/drivers/md/bcache/alloc.c
++++ b/drivers/md/bcache/alloc.c
+@@ -525,15 +525,21 @@ struct open_bucket {
+ 
+ /*
+  * We keep multiple buckets open for writes, and try to segregate different
+- * write streams for better cache utilization: first we look for a bucket where
+- * the last write to it was sequential with the current write, and failing that
+- * we look for a bucket that was last used by the same task.
++ * write streams for better cache utilization: first we try to segregate flash
++ * only volume write streams from cached devices, secondly we look for a bucket
++ * where the last write to it was sequential with the current write, and
++ * failing that we look for a bucket that was last used by the same task.
+  *
+  * The ideas is if you've got multiple tasks pulling data into the cache at the
+  * same time, you'll get better cache utilization if you try to segregate their
+  * data and preserve locality.
+  *
+- * For example, say you've starting Firefox at the same time you're copying a
++ * For example, dirty sectors of flash only volume is not reclaimable, if their
++ * dirty sectors mixed with dirty sectors of cached device, such buckets will
++ * be marked as dirty and won't be reclaimed, though the dirty data of cached
++ * device have been written back to backend device.
++ *
++ * And say you've starting Firefox at the same time you're copying a
+  * bunch of files. Firefox will likely end up being fairly hot and stay in the
+  * cache awhile, but the data you copied might not be; if you wrote all that
+  * data to the same buckets it'd get invalidated at the same time.
+@@ -550,7 +556,10 @@ static struct open_bucket *pick_data_bucket(struct cache_set *c,
+ 	struct open_bucket *ret, *ret_task = NULL;
+ 
+ 	list_for_each_entry_reverse(ret, &c->data_buckets, list)
+-		if (!bkey_cmp(&ret->key, search))
++		if (UUID_FLASH_ONLY(&c->uuids[KEY_INODE(&ret->key)]) !=
++		    UUID_FLASH_ONLY(&c->uuids[KEY_INODE(search)]))
++			continue;
++		else if (!bkey_cmp(&ret->key, search))
+ 			goto found;
+ 		else if (ret->last_write_point == write_point)
+ 			ret_task = ret;
+diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
+index 643c3021624f..d1faaba6b93f 100644
+--- a/drivers/md/bcache/request.c
++++ b/drivers/md/bcache/request.c
+@@ -576,6 +576,7 @@ static void cache_lookup(struct closure *cl)
+ {
+ 	struct search *s = container_of(cl, struct search, iop.cl);
+ 	struct bio *bio = &s->bio.bio;
++	struct cached_dev *dc;
+ 	int ret;
+ 
+ 	bch_btree_op_init(&s->op, -1);
+@@ -588,6 +589,27 @@ static void cache_lookup(struct closure *cl)
+ 		return;
+ 	}
+ 
++	/*
++	 * We might meet err when searching the btree, If that happens, we will
++	 * get negative ret, in this scenario we should not recover data from
++	 * backing device (when cache device is dirty) because we don't know
++	 * whether bkeys the read request covered are all clean.
++	 *
++	 * And after that happened, s->iop.status is still its initial value
++	 * before we submit s->bio.bio
++	 */
++	if (ret < 0) {
++		BUG_ON(ret == -EINTR);
++		if (s->d && s->d->c &&
++				!UUID_FLASH_ONLY(&s->d->c->uuids[s->d->id])) {
++			dc = container_of(s->d, struct cached_dev, disk);
++			if (dc && atomic_read(&dc->has_dirty))
++				s->recoverable = false;
++		}
++		if (!s->iop.status)
++			s->iop.status = BLK_STS_IOERR;
++	}
++
+ 	closure_return(cl);
+ }
+ 
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 14bdaf1cef2c..47785eb22aab 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -906,6 +906,12 @@ static void cached_dev_detach_finish(struct work_struct *w)
+ 
+ 	mutex_lock(&bch_register_lock);
+ 
++	cancel_delayed_work_sync(&dc->writeback_rate_update);
++	if (!IS_ERR_OR_NULL(dc->writeback_thread)) {
++		kthread_stop(dc->writeback_thread);
++		dc->writeback_thread = NULL;
++	}
++
+ 	memset(&dc->sb.set_uuid, 0, 16);
+ 	SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE);
+ 
+diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
+index a8589d96ef72..2bacadf50247 100644
+--- a/drivers/media/v4l2-core/videobuf2-core.c
++++ b/drivers/media/v4l2-core/videobuf2-core.c
+@@ -332,6 +332,10 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
+ 	struct vb2_buffer *vb;
+ 	int ret;
+ 
++	/* Ensure that q->num_buffers+num_buffers is below VB2_MAX_FRAME */
++	num_buffers = min_t(unsigned int, num_buffers,
++			    VB2_MAX_FRAME - q->num_buffers);
++
+ 	for (buffer = 0; buffer < num_buffers; ++buffer) {
+ 		/* Allocate videobuf buffer structures */
+ 		vb = kzalloc(q->buf_struct_size, GFP_KERNEL);
+diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
+index bf93e8b0b191..b8fa17a759dd 100644
+--- a/drivers/mmc/host/sdhci-pci-core.c
++++ b/drivers/mmc/host/sdhci-pci-core.c
+@@ -805,6 +805,8 @@ static int intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot *slot)
+ 		slot->host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
+ 		break;
+ 	case INTEL_MRFLD_SDIO:
++		/* Advertise 2.0v for compatibility with the SDIO card's OCR */
++		slot->host->ocr_mask = MMC_VDD_20_21 | MMC_VDD_165_195;
+ 		slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE |
+ 					 MMC_CAP_POWER_OFF_CARD;
+ 		break;
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index d24306b2b839..3a5f305fd442 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -1470,6 +1470,13 @@ void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
+ 	if (mode != MMC_POWER_OFF) {
+ 		switch (1 << vdd) {
+ 		case MMC_VDD_165_195:
++		/*
++		 * Without a regulator, SDHCI does not support 2.0v
++		 * so we only get here if the driver deliberately
++		 * added the 2.0v range to ocr_avail. Map it to 1.8v
++		 * for the purpose of turning on the power.
++		 */
++		case MMC_VDD_20_21:
+ 			pwr = SDHCI_POWER_180;
+ 			break;
+ 		case MMC_VDD_29_30:
+diff --git a/drivers/mtd/tests/oobtest.c b/drivers/mtd/tests/oobtest.c
+index 1cb3f7758fb6..766b2c385682 100644
+--- a/drivers/mtd/tests/oobtest.c
++++ b/drivers/mtd/tests/oobtest.c
+@@ -193,6 +193,9 @@ static int verify_eraseblock(int ebnum)
+ 		ops.datbuf    = NULL;
+ 		ops.oobbuf    = readbuf;
+ 		err = mtd_read_oob(mtd, addr, &ops);
++		if (mtd_is_bitflip(err))
++			err = 0;
++
+ 		if (err || ops.oobretlen != use_len) {
+ 			pr_err("error: readoob failed at %#llx\n",
+ 			       (long long)addr);
+@@ -227,6 +230,9 @@ static int verify_eraseblock(int ebnum)
+ 			ops.datbuf    = NULL;
+ 			ops.oobbuf    = readbuf;
+ 			err = mtd_read_oob(mtd, addr, &ops);
++			if (mtd_is_bitflip(err))
++				err = 0;
++
+ 			if (err || ops.oobretlen != mtd->oobavail) {
+ 				pr_err("error: readoob failed at %#llx\n",
+ 						(long long)addr);
+@@ -286,6 +292,9 @@ static int verify_eraseblock_in_one_go(int ebnum)
+ 
+ 	/* read entire block's OOB at one go */
+ 	err = mtd_read_oob(mtd, addr, &ops);
++	if (mtd_is_bitflip(err))
++		err = 0;
++
+ 	if (err || ops.oobretlen != len) {
+ 		pr_err("error: readoob failed at %#llx\n",
+ 		       (long long)addr);
+@@ -527,6 +536,9 @@ static int __init mtd_oobtest_init(void)
+ 	pr_info("attempting to start read past end of OOB\n");
+ 	pr_info("an error is expected...\n");
+ 	err = mtd_read_oob(mtd, addr0, &ops);
++	if (mtd_is_bitflip(err))
++		err = 0;
++
+ 	if (err) {
+ 		pr_info("error occurred as expected\n");
+ 		err = 0;
+@@ -571,6 +583,9 @@ static int __init mtd_oobtest_init(void)
+ 		pr_info("attempting to read past end of device\n");
+ 		pr_info("an error is expected...\n");
+ 		err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
++		if (mtd_is_bitflip(err))
++			err = 0;
++
+ 		if (err) {
+ 			pr_info("error occurred as expected\n");
+ 			err = 0;
+@@ -615,6 +630,9 @@ static int __init mtd_oobtest_init(void)
+ 		pr_info("attempting to read past end of device\n");
+ 		pr_info("an error is expected...\n");
+ 		err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
++		if (mtd_is_bitflip(err))
++			err = 0;
++
+ 		if (err) {
+ 			pr_info("error occurred as expected\n");
+ 			err = 0;
+@@ -684,6 +702,9 @@ static int __init mtd_oobtest_init(void)
+ 		ops.datbuf    = NULL;
+ 		ops.oobbuf    = readbuf;
+ 		err = mtd_read_oob(mtd, addr, &ops);
++		if (mtd_is_bitflip(err))
++			err = 0;
++
+ 		if (err)
+ 			goto out;
+ 		if (memcmpshow(addr, readbuf, writebuf,
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index c669554d70bb..b7b113018853 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1528,39 +1528,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
+ 			goto err_close;
+ 	}
+ 
+-	/* If the mode uses primary, then the following is handled by
+-	 * bond_change_active_slave().
+-	 */
+-	if (!bond_uses_primary(bond)) {
+-		/* set promiscuity level to new slave */
+-		if (bond_dev->flags & IFF_PROMISC) {
+-			res = dev_set_promiscuity(slave_dev, 1);
+-			if (res)
+-				goto err_close;
+-		}
+-
+-		/* set allmulti level to new slave */
+-		if (bond_dev->flags & IFF_ALLMULTI) {
+-			res = dev_set_allmulti(slave_dev, 1);
+-			if (res)
+-				goto err_close;
+-		}
+-
+-		netif_addr_lock_bh(bond_dev);
+-
+-		dev_mc_sync_multiple(slave_dev, bond_dev);
+-		dev_uc_sync_multiple(slave_dev, bond_dev);
+-
+-		netif_addr_unlock_bh(bond_dev);
+-	}
+-
+-	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
+-		/* add lacpdu mc addr to mc list */
+-		u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
+-
+-		dev_mc_add(slave_dev, lacpdu_multicast);
+-	}
+-
+ 	res = vlan_vids_add_by_dev(slave_dev, bond_dev);
+ 	if (res) {
+ 		netdev_err(bond_dev, "Couldn't add bond vlan ids to %s\n",
+@@ -1725,6 +1692,40 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
+ 		goto err_upper_unlink;
+ 	}
+ 
++	/* If the mode uses primary, then the following is handled by
++	 * bond_change_active_slave().
++	 */
++	if (!bond_uses_primary(bond)) {
++		/* set promiscuity level to new slave */
++		if (bond_dev->flags & IFF_PROMISC) {
++			res = dev_set_promiscuity(slave_dev, 1);
++			if (res)
++				goto err_sysfs_del;
++		}
++
++		/* set allmulti level to new slave */
++		if (bond_dev->flags & IFF_ALLMULTI) {
++			res = dev_set_allmulti(slave_dev, 1);
++			if (res) {
++				if (bond_dev->flags & IFF_PROMISC)
++					dev_set_promiscuity(slave_dev, -1);
++				goto err_sysfs_del;
++			}
++		}
++
++		netif_addr_lock_bh(bond_dev);
++		dev_mc_sync_multiple(slave_dev, bond_dev);
++		dev_uc_sync_multiple(slave_dev, bond_dev);
++		netif_addr_unlock_bh(bond_dev);
++
++		if (BOND_MODE(bond) == BOND_MODE_8023AD) {
++			/* add lacpdu mc addr to mc list */
++			u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
++
++			dev_mc_add(slave_dev, lacpdu_multicast);
++		}
++	}
++
+ 	bond->slave_cnt++;
+ 	bond_compute_features(bond);
+ 	bond_set_carrier(bond);
+@@ -1748,6 +1749,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
+ 	return 0;
+ 
+ /* Undo stages on error */
++err_sysfs_del:
++	bond_sysfs_slave_del(new_slave);
++
+ err_upper_unlink:
+ 	bond_upper_dev_unlink(bond, new_slave);
+ 
+@@ -1755,9 +1759,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
+ 	netdev_rx_handler_unregister(slave_dev);
+ 
+ err_detach:
+-	if (!bond_uses_primary(bond))
+-		bond_hw_addr_flush(bond_dev, slave_dev);
+-
+ 	vlan_vids_del_by_dev(slave_dev, bond_dev);
+ 	if (rcu_access_pointer(bond->primary_slave) == new_slave)
+ 		RCU_INIT_POINTER(bond->primary_slave, NULL);
+diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+index 14d7e673c656..129b914a434c 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
++++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+@@ -2619,8 +2619,8 @@ void t4vf_sge_stop(struct adapter *adapter)
+ int t4vf_sge_init(struct adapter *adapter)
+ {
+ 	struct sge_params *sge_params = &adapter->params.sge;
+-	u32 fl0 = sge_params->sge_fl_buffer_size[0];
+-	u32 fl1 = sge_params->sge_fl_buffer_size[1];
++	u32 fl_small_pg = sge_params->sge_fl_buffer_size[0];
++	u32 fl_large_pg = sge_params->sge_fl_buffer_size[1];
+ 	struct sge *s = &adapter->sge;
+ 
+ 	/*
+@@ -2628,9 +2628,20 @@ int t4vf_sge_init(struct adapter *adapter)
+ 	 * the Physical Function Driver.  Ideally we should be able to deal
+ 	 * with _any_ configuration.  Practice is different ...
+ 	 */
+-	if (fl0 != PAGE_SIZE || (fl1 != 0 && fl1 <= fl0)) {
++
++	/* We only bother using the Large Page logic if the Large Page Buffer
++	 * is larger than our Page Size Buffer.
++	 */
++	if (fl_large_pg <= fl_small_pg)
++		fl_large_pg = 0;
++
++	/* The Page Size Buffer must be exactly equal to our Page Size and the
++	 * Large Page Size Buffer should be 0 (per above) or a power of 2.
++	 */
++	if (fl_small_pg != PAGE_SIZE ||
++	    (fl_large_pg & (fl_large_pg - 1)) != 0) {
+ 		dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n",
+-			fl0, fl1);
++			fl_small_pg, fl_large_pg);
+ 		return -EINVAL;
+ 	}
+ 	if ((sge_params->sge_control & RXPKTCPLMODE_F) !=
+@@ -2642,8 +2653,8 @@ int t4vf_sge_init(struct adapter *adapter)
+ 	/*
+ 	 * Now translate the adapter parameters into our internal forms.
+ 	 */
+-	if (fl1)
+-		s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT;
++	if (fl_large_pg)
++		s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
+ 	s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F)
+ 			? 128 : 64);
+ 	s->pktshift = PKTSHIFT_G(sge_params->sge_control);
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index 59ed806a52c3..3a3c7fe50e13 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -2189,6 +2189,10 @@ static int hclge_get_autoneg(struct hnae3_handle *handle)
+ {
+ 	struct hclge_vport *vport = hclge_get_vport(handle);
+ 	struct hclge_dev *hdev = vport->back;
++	struct phy_device *phydev = hdev->hw.mac.phydev;
++
++	if (phydev)
++		return phydev->autoneg;
+ 
+ 	hclge_query_autoneg_result(hdev);
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+index 7069e9408d7d..22be638be40f 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+@@ -17,6 +17,7 @@
+ #define HCLGE_PHY_SUPPORTED_FEATURES	(SUPPORTED_Autoneg | \
+ 					 SUPPORTED_TP | \
+ 					 SUPPORTED_Pause | \
++					 SUPPORTED_Asym_Pause | \
+ 					 PHY_10BT_FEATURES | \
+ 					 PHY_100BT_FEATURES | \
+ 					 PHY_1000BT_FEATURES)
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c
+index 59415090ff0f..a685368ab25b 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c
+@@ -1055,6 +1055,8 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
+ 	u64 rx_bytes = 0;
+ 	u64 tx_pkts = 0;
+ 	u64 rx_pkts = 0;
++	u64 tx_drop = 0;
++	u64 rx_drop = 0;
+ 
+ 	for (idx = 0; idx < queue_num; idx++) {
+ 		/* fetch the tx stats */
+@@ -1063,6 +1065,8 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
+ 			start = u64_stats_fetch_begin_irq(&ring->syncp);
+ 			tx_bytes += ring->stats.tx_bytes;
+ 			tx_pkts += ring->stats.tx_pkts;
++			tx_drop += ring->stats.tx_busy;
++			tx_drop += ring->stats.sw_err_cnt;
+ 		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ 
+ 		/* fetch the rx stats */
+@@ -1071,6 +1075,9 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
+ 			start = u64_stats_fetch_begin_irq(&ring->syncp);
+ 			rx_bytes += ring->stats.rx_bytes;
+ 			rx_pkts += ring->stats.rx_pkts;
++			rx_drop += ring->stats.non_vld_descs;
++			rx_drop += ring->stats.err_pkt_len;
++			rx_drop += ring->stats.l2_err;
+ 		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ 	}
+ 
+@@ -1086,8 +1093,8 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
+ 	stats->rx_missed_errors = netdev->stats.rx_missed_errors;
+ 
+ 	stats->tx_errors = netdev->stats.tx_errors;
+-	stats->rx_dropped = netdev->stats.rx_dropped;
+-	stats->tx_dropped = netdev->stats.tx_dropped;
++	stats->rx_dropped = rx_drop + netdev->stats.rx_dropped;
++	stats->tx_dropped = tx_drop + netdev->stats.tx_dropped;
+ 	stats->collisions = netdev->stats.collisions;
+ 	stats->rx_over_errors = netdev->stats.rx_over_errors;
+ 	stats->rx_frame_errors = netdev->stats.rx_frame_errors;
+@@ -1317,6 +1324,8 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
+ 		return ret;
+ 	}
+ 
++	netdev->mtu = new_mtu;
++
+ 	/* if the netdev was running earlier, bring it up again */
+ 	if (if_running && hns3_nic_net_open(netdev))
+ 		ret = -EINVAL;
+@@ -2785,8 +2794,12 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
+ 			h->ae_algo->ops->reset_queue(h, i);
+ 
+ 		hns3_fini_ring(priv->ring_data[i].ring);
++		devm_kfree(priv->dev, priv->ring_data[i].ring);
+ 		hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
++		devm_kfree(priv->dev,
++			   priv->ring_data[i + h->kinfo.num_tqps].ring);
+ 	}
++	devm_kfree(priv->dev, priv->ring_data);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c
+index a21470c72da3..8974be4011e5 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c
+@@ -23,7 +23,8 @@ struct hns3_stats {
+ #define HNS3_TQP_STAT(_string, _member)	{			\
+ 	.stats_string = _string,				\
+ 	.stats_size = FIELD_SIZEOF(struct ring_stats, _member),	\
+-	.stats_offset = offsetof(struct hns3_enet_ring, stats),	\
++	.stats_offset = offsetof(struct hns3_enet_ring, stats) +\
++			offsetof(struct ring_stats, _member),   \
+ }								\
+ 
+ static const struct hns3_stats hns3_txq_stats[] = {
+@@ -455,13 +456,13 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
+ 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ 	struct hns3_enet_ring *ring;
+ 	u8 *stat;
+-	u32 i;
++	int i, j;
+ 
+ 	/* get stats for Tx */
+ 	for (i = 0; i < kinfo->num_tqps; i++) {
+ 		ring = nic_priv->ring_data[i].ring;
+-		for (i = 0; i < HNS3_TXQ_STATS_COUNT; i++) {
+-			stat = (u8 *)ring + hns3_txq_stats[i].stats_offset;
++		for (j = 0; j < HNS3_TXQ_STATS_COUNT; j++) {
++			stat = (u8 *)ring + hns3_txq_stats[j].stats_offset;
+ 			*data++ = *(u64 *)stat;
+ 		}
+ 	}
+@@ -469,8 +470,8 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
+ 	/* get stats for Rx */
+ 	for (i = 0; i < kinfo->num_tqps; i++) {
+ 		ring = nic_priv->ring_data[i + kinfo->num_tqps].ring;
+-		for (i = 0; i < HNS3_RXQ_STATS_COUNT; i++) {
+-			stat = (u8 *)ring + hns3_rxq_stats[i].stats_offset;
++		for (j = 0; j < HNS3_RXQ_STATS_COUNT; j++) {
++			stat = (u8 *)ring + hns3_rxq_stats[j].stats_offset;
+ 			*data++ = *(u64 *)stat;
+ 		}
+ 	}
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index b65f5f3ac034..6e064c04ce0b 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -2484,6 +2484,12 @@ static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
+ 	struct ibmvnic_sub_crq_queue *scrq = instance;
+ 	struct ibmvnic_adapter *adapter = scrq->adapter;
+ 
++	/* When booting a kdump kernel we can hit pending interrupts
++	 * prior to completing driver initialization.
++	 */
++	if (unlikely(adapter->state != VNIC_OPEN))
++		return IRQ_NONE;
++
+ 	adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
+ 
+ 	if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
+diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+index 7b2a4eba92e2..0b23bf6d7873 100644
+--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
++++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+@@ -1796,7 +1796,11 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter)
+ 
+ 	adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
+ 
+-	if (netif_running(adapter->netdev)) {
++	/* We don't use netif_running() because it may be true prior to
++	 * ndo_open() returning, so we can't assume it means all our open
++	 * tasks have finished, since we're not holding the rtnl_lock here.
++	 */
++	if (adapter->state == __I40EVF_RUNNING) {
+ 		set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
+ 		netif_carrier_off(adapter->netdev);
+ 		netif_tx_disable(adapter->netdev);
+@@ -1854,6 +1858,7 @@ static void i40evf_reset_task(struct work_struct *work)
+ 	struct i40evf_mac_filter *f;
+ 	u32 reg_val;
+ 	int i = 0, err;
++	bool running;
+ 
+ 	while (test_and_set_bit(__I40EVF_IN_CLIENT_TASK,
+ 				&adapter->crit_section))
+@@ -1913,7 +1918,13 @@ static void i40evf_reset_task(struct work_struct *work)
+ 	}
+ 
+ continue_reset:
+-	if (netif_running(netdev)) {
++	/* We don't use netif_running() because it may be true prior to
++	 * ndo_open() returning, so we can't assume it means all our open
++	 * tasks have finished, since we're not holding the rtnl_lock here.
++	 */
++	running = (adapter->state == __I40EVF_RUNNING);
++
++	if (running) {
+ 		netif_carrier_off(netdev);
+ 		netif_tx_stop_all_queues(netdev);
+ 		adapter->link_up = false;
+@@ -1964,7 +1975,10 @@ static void i40evf_reset_task(struct work_struct *work)
+ 
+ 	mod_timer(&adapter->watchdog_timer, jiffies + 2);
+ 
+-	if (netif_running(adapter->netdev)) {
++	/* We were running when the reset started, so we need to restore some
++	 * state here.
++	 */
++	if (running) {
+ 		/* allocate transmit descriptors */
+ 		err = i40evf_setup_all_tx_resources(adapter);
+ 		if (err)
+diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
+index 9efe1771423c..523e1108c9df 100644
+--- a/drivers/net/ethernet/marvell/sky2.c
++++ b/drivers/net/ethernet/marvell/sky2.c
+@@ -5087,7 +5087,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	INIT_WORK(&hw->restart_work, sky2_restart);
+ 
+ 	pci_set_drvdata(pdev, hw);
+-	pdev->d3_delay = 150;
++	pdev->d3_delay = 200;
+ 
+ 	return 0;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+index 5f41dc92aa68..752a72499b4f 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+@@ -156,57 +156,63 @@ static int mlx4_en_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
+ static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev)
+ {
+ 	struct mlx4_en_priv *priv = netdev_priv(netdev);
++	struct mlx4_en_port_profile *prof = priv->prof;
+ 	struct mlx4_en_dev *mdev = priv->mdev;
++	u8 tx_pause, tx_ppp, rx_pause, rx_ppp;
+ 
+ 	if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ 		return 1;
+ 
+ 	if (priv->cee_config.pfc_state) {
+ 		int tc;
++		rx_ppp = prof->rx_ppp;
++		tx_ppp = prof->tx_ppp;
+ 
+-		priv->prof->rx_pause = 0;
+-		priv->prof->tx_pause = 0;
+ 		for (tc = 0; tc < CEE_DCBX_MAX_PRIO; tc++) {
+ 			u8 tc_mask = 1 << tc;
+ 
+ 			switch (priv->cee_config.dcb_pfc[tc]) {
+ 			case pfc_disabled:
+-				priv->prof->tx_ppp &= ~tc_mask;
+-				priv->prof->rx_ppp &= ~tc_mask;
++				tx_ppp &= ~tc_mask;
++				rx_ppp &= ~tc_mask;
+ 				break;
+ 			case pfc_enabled_full:
+-				priv->prof->tx_ppp |= tc_mask;
+-				priv->prof->rx_ppp |= tc_mask;
++				tx_ppp |= tc_mask;
++				rx_ppp |= tc_mask;
+ 				break;
+ 			case pfc_enabled_tx:
+-				priv->prof->tx_ppp |= tc_mask;
+-				priv->prof->rx_ppp &= ~tc_mask;
++				tx_ppp |= tc_mask;
++				rx_ppp &= ~tc_mask;
+ 				break;
+ 			case pfc_enabled_rx:
+-				priv->prof->tx_ppp &= ~tc_mask;
+-				priv->prof->rx_ppp |= tc_mask;
++				tx_ppp &= ~tc_mask;
++				rx_ppp |= tc_mask;
+ 				break;
+ 			default:
+ 				break;
+ 			}
+ 		}
+-		en_dbg(DRV, priv, "Set pfc on\n");
++		rx_pause = !!(rx_ppp || tx_ppp) ? 0 : prof->rx_pause;
++		tx_pause = !!(rx_ppp || tx_ppp) ? 0 : prof->tx_pause;
+ 	} else {
+-		priv->prof->rx_pause = 1;
+-		priv->prof->tx_pause = 1;
+-		en_dbg(DRV, priv, "Set pfc off\n");
++		rx_ppp = 0;
++		tx_ppp = 0;
++		rx_pause = prof->rx_pause;
++		tx_pause = prof->tx_pause;
+ 	}
+ 
+ 	if (mlx4_SET_PORT_general(mdev->dev, priv->port,
+ 				  priv->rx_skb_size + ETH_FCS_LEN,
+-				  priv->prof->tx_pause,
+-				  priv->prof->tx_ppp,
+-				  priv->prof->rx_pause,
+-				  priv->prof->rx_ppp)) {
++				  tx_pause, tx_ppp, rx_pause, rx_ppp)) {
+ 		en_err(priv, "Failed setting pause params\n");
+ 		return 1;
+ 	}
+ 
++	prof->tx_ppp = tx_ppp;
++	prof->rx_ppp = rx_ppp;
++	prof->tx_pause = tx_pause;
++	prof->rx_pause = rx_pause;
++
+ 	return 0;
+ }
+ 
+@@ -310,6 +316,7 @@ static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets)
+ 		}
+ 
+ 		switch (ets->tc_tsa[i]) {
++		case IEEE_8021QAZ_TSA_VENDOR:
+ 		case IEEE_8021QAZ_TSA_STRICT:
+ 			break;
+ 		case IEEE_8021QAZ_TSA_ETS:
+@@ -347,6 +354,10 @@ static int mlx4_en_config_port_scheduler(struct mlx4_en_priv *priv,
+ 	/* higher TC means higher priority => lower pg */
+ 	for (i = IEEE_8021QAZ_MAX_TCS - 1; i >= 0; i--) {
+ 		switch (ets->tc_tsa[i]) {
++		case IEEE_8021QAZ_TSA_VENDOR:
++			pg[i] = MLX4_EN_TC_VENDOR;
++			tc_tx_bw[i] = MLX4_EN_BW_MAX;
++			break;
+ 		case IEEE_8021QAZ_TSA_STRICT:
+ 			pg[i] = num_strict++;
+ 			tc_tx_bw[i] = MLX4_EN_BW_MAX;
+@@ -403,6 +414,7 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
+ 	struct mlx4_en_priv *priv = netdev_priv(dev);
+ 	struct mlx4_en_port_profile *prof = priv->prof;
+ 	struct mlx4_en_dev *mdev = priv->mdev;
++	u32 tx_pause, tx_ppp, rx_pause, rx_ppp;
+ 	int err;
+ 
+ 	en_dbg(DRV, priv, "cap: 0x%x en: 0x%x mbc: 0x%x delay: %d\n",
+@@ -411,23 +423,26 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
+ 			pfc->mbc,
+ 			pfc->delay);
+ 
+-	prof->rx_pause = !pfc->pfc_en;
+-	prof->tx_pause = !pfc->pfc_en;
+-	prof->rx_ppp = pfc->pfc_en;
+-	prof->tx_ppp = pfc->pfc_en;
++	rx_pause = prof->rx_pause && !pfc->pfc_en;
++	tx_pause = prof->tx_pause && !pfc->pfc_en;
++	rx_ppp = pfc->pfc_en;
++	tx_ppp = pfc->pfc_en;
+ 
+ 	err = mlx4_SET_PORT_general(mdev->dev, priv->port,
+ 				    priv->rx_skb_size + ETH_FCS_LEN,
+-				    prof->tx_pause,
+-				    prof->tx_ppp,
+-				    prof->rx_pause,
+-				    prof->rx_ppp);
+-	if (err)
++				    tx_pause, tx_ppp, rx_pause, rx_ppp);
++	if (err) {
+ 		en_err(priv, "Failed setting pause params\n");
+-	else
+-		mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
+-						prof->rx_ppp, prof->rx_pause,
+-						prof->tx_ppp, prof->tx_pause);
++		return err;
++	}
++
++	mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
++					rx_ppp, rx_pause, tx_ppp, tx_pause);
++
++	prof->tx_ppp = tx_ppp;
++	prof->rx_ppp = rx_ppp;
++	prof->rx_pause = rx_pause;
++	prof->tx_pause = tx_pause;
+ 
+ 	return err;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+index bf1f04164885..c5ab626f4cba 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+@@ -1046,27 +1046,32 @@ static int mlx4_en_set_pauseparam(struct net_device *dev,
+ {
+ 	struct mlx4_en_priv *priv = netdev_priv(dev);
+ 	struct mlx4_en_dev *mdev = priv->mdev;
++	u8 tx_pause, tx_ppp, rx_pause, rx_ppp;
+ 	int err;
+ 
+ 	if (pause->autoneg)
+ 		return -EINVAL;
+ 
+-	priv->prof->tx_pause = pause->tx_pause != 0;
+-	priv->prof->rx_pause = pause->rx_pause != 0;
++	tx_pause = !!(pause->tx_pause);
++	rx_pause = !!(pause->rx_pause);
++	rx_ppp = priv->prof->rx_ppp && !(tx_pause || rx_pause);
++	tx_ppp = priv->prof->tx_ppp && !(tx_pause || rx_pause);
++
+ 	err = mlx4_SET_PORT_general(mdev->dev, priv->port,
+ 				    priv->rx_skb_size + ETH_FCS_LEN,
+-				    priv->prof->tx_pause,
+-				    priv->prof->tx_ppp,
+-				    priv->prof->rx_pause,
+-				    priv->prof->rx_ppp);
+-	if (err)
+-		en_err(priv, "Failed setting pause params\n");
+-	else
+-		mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
+-						priv->prof->rx_ppp,
+-						priv->prof->rx_pause,
+-						priv->prof->tx_ppp,
+-						priv->prof->tx_pause);
++				    tx_pause, tx_ppp, rx_pause, rx_ppp);
++	if (err) {
++		en_err(priv, "Failed setting pause params, err = %d\n", err);
++		return err;
++	}
++
++	mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
++					rx_ppp, rx_pause, tx_ppp, tx_pause);
++
++	priv->prof->tx_pause = tx_pause;
++	priv->prof->rx_pause = rx_pause;
++	priv->prof->tx_ppp = tx_ppp;
++	priv->prof->rx_ppp = rx_ppp;
+ 
+ 	return err;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
+index 2c2965497ed3..d25e16d2c319 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
+@@ -163,9 +163,9 @@ static void mlx4_en_get_profile(struct mlx4_en_dev *mdev)
+ 		params->udp_rss = 0;
+ 	}
+ 	for (i = 1; i <= MLX4_MAX_PORTS; i++) {
+-		params->prof[i].rx_pause = 1;
++		params->prof[i].rx_pause = !(pfcrx || pfctx);
+ 		params->prof[i].rx_ppp = pfcrx;
+-		params->prof[i].tx_pause = 1;
++		params->prof[i].tx_pause = !(pfcrx || pfctx);
+ 		params->prof[i].tx_ppp = pfctx;
+ 		params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
+ 		params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+index 99051a294fa6..21bc17fa3854 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+@@ -3336,6 +3336,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	priv->msg_enable = MLX4_EN_MSG_LEVEL;
+ #ifdef CONFIG_MLX4_EN_DCB
+ 	if (!mlx4_is_slave(priv->mdev->dev)) {
++		u8 prio;
++
++		for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; ++prio) {
++			priv->ets.prio_tc[prio] = prio;
++			priv->ets.tc_tsa[prio]  = IEEE_8021QAZ_TSA_VENDOR;
++		}
++
+ 		priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST |
+ 			DCB_CAP_DCBX_VER_IEEE;
+ 		priv->flags |= MLX4_EN_DCB_ENABLED;
+diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+index 2b72677eccd4..7db3d0d9bfce 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
++++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+@@ -479,6 +479,7 @@ struct mlx4_en_frag_info {
+ #define MLX4_EN_BW_MIN 1
+ #define MLX4_EN_BW_MAX 100 /* Utilize 100% of the line */
+ 
++#define MLX4_EN_TC_VENDOR 0
+ #define MLX4_EN_TC_ETS 7
+ 
+ enum dcb_pfc_type {
+diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+index 606a0e0beeae..29e50f787349 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
++++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+@@ -5088,6 +5088,7 @@ static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
+ 						 &tracker->res_tree[RES_FS_RULE]);
+ 					list_del(&fs_rule->com.list);
+ 					spin_unlock_irq(mlx4_tlock(dev));
++					kfree(fs_rule->mirr_mbox);
+ 					kfree(fs_rule);
+ 					state = 0;
+ 					break;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+index ea5fff2c3143..f909d0dbae10 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+@@ -492,6 +492,9 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
+ 	return mlx5e_ethtool_get_coalesce(priv, coal);
+ }
+ 
++#define MLX5E_MAX_COAL_TIME		MLX5_MAX_CQ_PERIOD
++#define MLX5E_MAX_COAL_FRAMES		MLX5_MAX_CQ_COUNT
++
+ static void
+ mlx5e_set_priv_channels_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal)
+ {
+@@ -526,6 +529,20 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
+ 	if (!MLX5_CAP_GEN(mdev, cq_moderation))
+ 		return -EOPNOTSUPP;
+ 
++	if (coal->tx_coalesce_usecs > MLX5E_MAX_COAL_TIME ||
++	    coal->rx_coalesce_usecs > MLX5E_MAX_COAL_TIME) {
++		netdev_info(priv->netdev, "%s: maximum coalesce time supported is %lu usecs\n",
++			    __func__, MLX5E_MAX_COAL_TIME);
++		return -ERANGE;
++	}
++
++	if (coal->tx_max_coalesced_frames > MLX5E_MAX_COAL_FRAMES ||
++	    coal->rx_max_coalesced_frames > MLX5E_MAX_COAL_FRAMES) {
++		netdev_info(priv->netdev, "%s: maximum coalesced frames supported is %lu\n",
++			    __func__, MLX5E_MAX_COAL_FRAMES);
++		return -ERANGE;
++	}
++
+ 	mutex_lock(&priv->state_lock);
+ 	new_channels.params = priv->channels.params;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 0d352d4cf48c..f5a704c7d143 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -2715,6 +2715,9 @@ int mlx5e_open(struct net_device *netdev)
+ 		mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP);
+ 	mutex_unlock(&priv->state_lock);
+ 
++	if (mlx5e_vxlan_allowed(priv->mdev))
++		udp_tunnel_get_rx_info(netdev);
++
+ 	return err;
+ }
+ 
+@@ -4075,7 +4078,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
+ 			    struct mlx5e_params *params,
+ 			    u16 max_channels)
+ {
+-	u8 cq_period_mode = 0;
++	u8 rx_cq_period_mode;
+ 	u32 link_speed = 0;
+ 	u32 pci_bw = 0;
+ 
+@@ -4111,12 +4114,12 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
+ 	params->lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
+ 
+ 	/* CQ moderation params */
+-	cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
++	rx_cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
+ 			MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
+ 			MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+ 	params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
+-	mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
+-	mlx5e_set_tx_cq_mode_params(params, cq_period_mode);
++	mlx5e_set_rx_cq_mode_params(params, rx_cq_period_mode);
++	mlx5e_set_tx_cq_mode_params(params, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
+ 
+ 	/* TX inline */
+ 	params->tx_max_inline = mlx5e_get_max_inline_cap(mdev);
+@@ -4428,12 +4431,6 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
+ #ifdef CONFIG_MLX5_CORE_EN_DCB
+ 	mlx5e_dcbnl_init_app(priv);
+ #endif
+-	/* Device already registered: sync netdev system state */
+-	if (mlx5e_vxlan_allowed(mdev)) {
+-		rtnl_lock();
+-		udp_tunnel_get_rx_info(netdev);
+-		rtnl_unlock();
+-	}
+ 
+ 	queue_work(priv->wq, &priv->set_rx_mode_work);
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+index 3409d86eb06b..dfa8c6a28a6c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+@@ -44,6 +44,11 @@
+ #include "en_tc.h"
+ #include "fs_core.h"
+ 
++#define MLX5E_REP_PARAMS_LOG_SQ_SIZE \
++	max(0x6, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
++#define MLX5E_REP_PARAMS_LOG_RQ_SIZE \
++	max(0x6, MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)
++
+ static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
+ 
+ static void mlx5e_rep_get_drvinfo(struct net_device *dev,
+@@ -231,7 +236,7 @@ void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
+ static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
+ {
+ #if IS_ENABLED(CONFIG_IPV6)
+-	unsigned long ipv6_interval = NEIGH_VAR(&ipv6_stub->nd_tbl->parms,
++	unsigned long ipv6_interval = NEIGH_VAR(&nd_tbl.parms,
+ 						DELAY_PROBE_TIME);
+ #else
+ 	unsigned long ipv6_interval = ~0UL;
+@@ -367,7 +372,7 @@ static int mlx5e_rep_netevent_event(struct notifier_block *nb,
+ 	case NETEVENT_NEIGH_UPDATE:
+ 		n = ptr;
+ #if IS_ENABLED(CONFIG_IPV6)
+-		if (n->tbl != ipv6_stub->nd_tbl && n->tbl != &arp_tbl)
++		if (n->tbl != &nd_tbl && n->tbl != &arp_tbl)
+ #else
+ 		if (n->tbl != &arp_tbl)
+ #endif
+@@ -415,7 +420,7 @@ static int mlx5e_rep_netevent_event(struct notifier_block *nb,
+ 		 * done per device delay prob time parameter.
+ 		 */
+ #if IS_ENABLED(CONFIG_IPV6)
+-		if (!p->dev || (p->tbl != ipv6_stub->nd_tbl && p->tbl != &arp_tbl))
++		if (!p->dev || (p->tbl != &nd_tbl && p->tbl != &arp_tbl))
+ #else
+ 		if (!p->dev || p->tbl != &arp_tbl)
+ #endif
+@@ -611,7 +616,6 @@ static int mlx5e_rep_open(struct net_device *dev)
+ 	struct mlx5e_priv *priv = netdev_priv(dev);
+ 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ 	struct mlx5_eswitch_rep *rep = rpriv->rep;
+-	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ 	int err;
+ 
+ 	mutex_lock(&priv->state_lock);
+@@ -619,8 +623,9 @@ static int mlx5e_rep_open(struct net_device *dev)
+ 	if (err)
+ 		goto unlock;
+ 
+-	if (!mlx5_eswitch_set_vport_state(esw, rep->vport,
+-					  MLX5_ESW_VPORT_ADMIN_STATE_UP))
++	if (!mlx5_modify_vport_admin_state(priv->mdev,
++			MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
++			rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_UP))
+ 		netif_carrier_on(dev);
+ 
+ unlock:
+@@ -633,11 +638,12 @@ static int mlx5e_rep_close(struct net_device *dev)
+ 	struct mlx5e_priv *priv = netdev_priv(dev);
+ 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ 	struct mlx5_eswitch_rep *rep = rpriv->rep;
+-	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ 	int ret;
+ 
+ 	mutex_lock(&priv->state_lock);
+-	(void)mlx5_eswitch_set_vport_state(esw, rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
++	mlx5_modify_vport_admin_state(priv->mdev,
++			MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
++			rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
+ 	ret = mlx5e_close_locked(dev);
+ 	mutex_unlock(&priv->state_lock);
+ 	return ret;
+@@ -823,9 +829,9 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
+ 					 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
+ 					 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+ 
+-	params->log_sq_size = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
++	params->log_sq_size = MLX5E_REP_PARAMS_LOG_SQ_SIZE;
+ 	params->rq_wq_type  = MLX5_WQ_TYPE_LINKED_LIST;
+-	params->log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
++	params->log_rq_size = MLX5E_REP_PARAMS_LOG_RQ_SIZE;
+ 
+ 	params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
+ 	mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+index 36611b64a91c..f7d9aab2b3b6 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+@@ -1196,7 +1196,9 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
+ 					 u32 cqe_bcnt,
+ 					 struct sk_buff *skb)
+ {
++	struct hwtstamp_config *tstamp;
+ 	struct net_device *netdev;
++	struct mlx5e_priv *priv;
+ 	char *pseudo_header;
+ 	u32 qpn;
+ 	u8 *dgid;
+@@ -1215,6 +1217,9 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
+ 		return;
+ 	}
+ 
++	priv = mlx5i_epriv(netdev);
++	tstamp = &priv->tstamp;
++
+ 	g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
+ 	dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET;
+ 	if ((!g) || dgid[0] != 0xff)
+@@ -1235,7 +1240,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
+ 	skb->ip_summed = CHECKSUM_COMPLETE;
+ 	skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
+ 
+-	if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp)))
++	if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
+ 		skb_hwtstamps(skb)->hwtstamp =
+ 				mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe));
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index 55979ec2e88a..dfab6b08db70 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -495,7 +495,7 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
+ 		tbl = &arp_tbl;
+ #if IS_ENABLED(CONFIG_IPV6)
+ 	else if (m_neigh->family == AF_INET6)
+-		tbl = ipv6_stub->nd_tbl;
++		tbl = &nd_tbl;
+ #endif
+ 	else
+ 		return;
+@@ -2102,19 +2102,19 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
+ 	if (err != -EAGAIN)
+ 		flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
+ 
++	if (!(flow->flags & MLX5E_TC_FLOW_ESWITCH) ||
++	    !(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP))
++		kvfree(parse_attr);
++
+ 	err = rhashtable_insert_fast(&tc->ht, &flow->node,
+ 				     tc->ht_params);
+-	if (err)
+-		goto err_del_rule;
++	if (err) {
++		mlx5e_tc_del_flow(priv, flow);
++		kfree(flow);
++	}
+ 
+-	if (flow->flags & MLX5E_TC_FLOW_ESWITCH &&
+-	    !(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP))
+-		kvfree(parse_attr);
+ 	return err;
+ 
+-err_del_rule:
+-	mlx5e_tc_del_flow(priv, flow);
+-
+ err_free:
+ 	kvfree(parse_attr);
+ 	kfree(flow);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+index a1296a62497d..71153c0f1605 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+@@ -36,6 +36,9 @@
+ #include <linux/mlx5/vport.h>
+ #include "mlx5_core.h"
+ 
++/* Mutex to hold while enabling or disabling RoCE */
++static DEFINE_MUTEX(mlx5_roce_en_lock);
++
+ static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
+ 				   u16 vport, u32 *out, int outlen)
+ {
+@@ -998,17 +1001,35 @@ static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev *mdev,
+ 
+ int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
+ {
+-	if (atomic_inc_return(&mdev->roce.roce_en) != 1)
+-		return 0;
+-	return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_ENABLED);
++	int err = 0;
++
++	mutex_lock(&mlx5_roce_en_lock);
++	if (!mdev->roce.roce_en)
++		err = mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_ENABLED);
++
++	if (!err)
++		mdev->roce.roce_en++;
++	mutex_unlock(&mlx5_roce_en_lock);
++
++	return err;
+ }
+ EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
+ 
+ int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
+ {
+-	if (atomic_dec_return(&mdev->roce.roce_en) != 0)
+-		return 0;
+-	return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED);
++	int err = 0;
++
++	mutex_lock(&mlx5_roce_en_lock);
++	if (mdev->roce.roce_en) {
++		mdev->roce.roce_en--;
++		if (mdev->roce.roce_en == 0)
++			err = mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED);
++
++		if (err)
++			mdev->roce.roce_en++;
++	}
++	mutex_unlock(&mlx5_roce_en_lock);
++	return err;
+ }
+ EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
+ 
+diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
+index 14a6d1ba51a9..54fe044ceef8 100644
+--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
++++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
+@@ -68,10 +68,11 @@
+ /* CPP address to retrieve the data from */
+ #define NSP_BUFFER		0x10
+ #define   NSP_BUFFER_CPP	GENMASK_ULL(63, 40)
+-#define   NSP_BUFFER_PCIE	GENMASK_ULL(39, 38)
+-#define   NSP_BUFFER_ADDRESS	GENMASK_ULL(37, 0)
++#define   NSP_BUFFER_ADDRESS	GENMASK_ULL(39, 0)
+ 
+ #define NSP_DFLT_BUFFER		0x18
++#define   NSP_DFLT_BUFFER_CPP	GENMASK_ULL(63, 40)
++#define   NSP_DFLT_BUFFER_ADDRESS	GENMASK_ULL(39, 0)
+ 
+ #define NSP_DFLT_BUFFER_CONFIG	0x20
+ #define   NSP_DFLT_BUFFER_SIZE_MB	GENMASK_ULL(7, 0)
+@@ -412,8 +413,8 @@ static int nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option,
+ 	if (err < 0)
+ 		return err;
+ 
+-	cpp_id = FIELD_GET(NSP_BUFFER_CPP, reg) << 8;
+-	cpp_buf = FIELD_GET(NSP_BUFFER_ADDRESS, reg);
++	cpp_id = FIELD_GET(NSP_DFLT_BUFFER_CPP, reg) << 8;
++	cpp_buf = FIELD_GET(NSP_DFLT_BUFFER_ADDRESS, reg);
+ 
+ 	if (in_buf && in_size) {
+ 		err = nfp_cpp_write(cpp, cpp_id, cpp_buf, in_buf, in_size);
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index dd713dff8d22..3a0c450552d6 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -8699,12 +8699,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 		goto err_out_msi_5;
+ 	}
+ 
++	pci_set_drvdata(pdev, dev);
++
+ 	rc = register_netdev(dev);
+ 	if (rc < 0)
+ 		goto err_out_cnt_6;
+ 
+-	pci_set_drvdata(pdev, dev);
+-
+ 	netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n",
+ 		   rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr,
+ 		   (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq);
+diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
+index 6dde9a0cfe76..9b70a3af678e 100644
+--- a/drivers/net/ppp/pptp.c
++++ b/drivers/net/ppp/pptp.c
+@@ -464,7 +464,6 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
+ 	po->chan.mtu = dst_mtu(&rt->dst);
+ 	if (!po->chan.mtu)
+ 		po->chan.mtu = PPP_MRU;
+-	ip_rt_put(rt);
+ 	po->chan.mtu -= PPTP_HEADER_OVERHEAD;
+ 
+ 	po->chan.hdrlen = 2 + sizeof(struct pptp_gre_header);
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index 56c701b73c12..befed2d22bf4 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -1197,11 +1197,6 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
+ 		goto err_dev_open;
+ 	}
+ 
+-	netif_addr_lock_bh(dev);
+-	dev_uc_sync_multiple(port_dev, dev);
+-	dev_mc_sync_multiple(port_dev, dev);
+-	netif_addr_unlock_bh(dev);
+-
+ 	err = vlan_vids_add_by_dev(port_dev, dev);
+ 	if (err) {
+ 		netdev_err(dev, "Failed to add vlan ids to device %s\n",
+@@ -1241,6 +1236,11 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
+ 		goto err_option_port_add;
+ 	}
+ 
++	netif_addr_lock_bh(dev);
++	dev_uc_sync_multiple(port_dev, dev);
++	dev_mc_sync_multiple(port_dev, dev);
++	netif_addr_unlock_bh(dev);
++
+ 	port->index = -1;
+ 	list_add_tail_rcu(&port->list, &team->port_list);
+ 	team_port_enable(team, port);
+@@ -1265,8 +1265,6 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
+ 	vlan_vids_del_by_dev(port_dev, dev);
+ 
+ err_vids_add:
+-	dev_uc_unsync(port_dev, dev);
+-	dev_mc_unsync(port_dev, dev);
+ 	dev_close(port_dev);
+ 
+ err_dev_open:
+diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
+index ec56ff29aac4..02048263c1fb 100644
+--- a/drivers/net/usb/lan78xx.c
++++ b/drivers/net/usb/lan78xx.c
+@@ -2863,8 +2863,7 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
+ 	if (ret < 0) {
+ 		netdev_warn(dev->net,
+ 			    "lan78xx_setup_irq_domain() failed : %d", ret);
+-		kfree(pdata);
+-		return ret;
++		goto out1;
+ 	}
+ 
+ 	dev->net->hard_header_len += TX_OVERHEAD;
+@@ -2872,14 +2871,32 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
+ 
+ 	/* Init all registers */
+ 	ret = lan78xx_reset(dev);
++	if (ret) {
++		netdev_warn(dev->net, "Registers INIT FAILED....");
++		goto out2;
++	}
+ 
+ 	ret = lan78xx_mdio_init(dev);
++	if (ret) {
++		netdev_warn(dev->net, "MDIO INIT FAILED.....");
++		goto out2;
++	}
+ 
+ 	dev->net->flags |= IFF_MULTICAST;
+ 
+ 	pdata->wol = WAKE_MAGIC;
+ 
+ 	return ret;
++
++out2:
++	lan78xx_remove_irq_domain(dev);
++
++out1:
++	netdev_warn(dev->net, "Bind routine FAILED");
++	cancel_work_sync(&pdata->set_multicast);
++	cancel_work_sync(&pdata->set_vlan);
++	kfree(pdata);
++	return ret;
+ }
+ 
+ static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
+@@ -2891,6 +2908,8 @@ static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
+ 	lan78xx_remove_mdio(dev);
+ 
+ 	if (pdata) {
++		cancel_work_sync(&pdata->set_multicast);
++		cancel_work_sync(&pdata->set_vlan);
+ 		netif_dbg(dev, ifdown, dev->net, "free pdata");
+ 		kfree(pdata);
+ 		pdata = NULL;
+diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
+index 139c61c8244a..ac40924fe437 100644
+--- a/drivers/net/vrf.c
++++ b/drivers/net/vrf.c
+@@ -578,12 +578,13 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
+ 	if (!IS_ERR(neigh)) {
+ 		sock_confirm_neigh(skb, neigh);
+ 		ret = neigh_output(neigh, skb);
++		rcu_read_unlock_bh();
++		return ret;
+ 	}
+ 
+ 	rcu_read_unlock_bh();
+ err:
+-	if (unlikely(ret < 0))
+-		vrf_tx_error(skb->dev, skb);
++	vrf_tx_error(skb->dev, skb);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
+index ecc96312a370..6fe0c6abe0d6 100644
+--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
++++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
+@@ -142,15 +142,25 @@ void rt2x00mac_tx(struct ieee80211_hw *hw,
+ 	if (!rt2x00dev->ops->hw->set_rts_threshold &&
+ 	    (tx_info->control.rates[0].flags & (IEEE80211_TX_RC_USE_RTS_CTS |
+ 						IEEE80211_TX_RC_USE_CTS_PROTECT))) {
+-		if (rt2x00queue_available(queue) <= 1)
+-			goto exit_fail;
++		if (rt2x00queue_available(queue) <= 1) {
++			/*
++			 * Recheck for full queue under lock to avoid race
++			 * conditions with rt2x00lib_txdone().
++			 */
++			spin_lock(&queue->tx_lock);
++			if (rt2x00queue_threshold(queue))
++				rt2x00queue_pause_queue(queue);
++			spin_unlock(&queue->tx_lock);
++
++			goto exit_free_skb;
++		}
+ 
+ 		if (rt2x00mac_tx_rts_cts(rt2x00dev, queue, skb))
+-			goto exit_fail;
++			goto exit_free_skb;
+ 	}
+ 
+ 	if (unlikely(rt2x00queue_write_tx_frame(queue, skb, control->sta, false)))
+-		goto exit_fail;
++		goto exit_free_skb;
+ 
+ 	/*
+ 	 * Pausing queue has to be serialized with rt2x00lib_txdone(). Note
+@@ -164,10 +174,6 @@ void rt2x00mac_tx(struct ieee80211_hw *hw,
+ 
+ 	return;
+ 
+- exit_fail:
+-	spin_lock(&queue->tx_lock);
+-	rt2x00queue_pause_queue(queue);
+-	spin_unlock(&queue->tx_lock);
+  exit_free_skb:
+ 	ieee80211_free_txskb(hw, skb);
+ }
+diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
+index 6d02c660b4ab..037defd10b91 100644
+--- a/drivers/net/wireless/ti/wl1251/main.c
++++ b/drivers/net/wireless/ti/wl1251/main.c
+@@ -1200,8 +1200,7 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
+ 		WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);
+ 
+ 		enable = bss_conf->arp_addr_cnt == 1 && bss_conf->assoc;
+-		wl1251_acx_arp_ip_filter(wl, enable, addr);
+-
++		ret = wl1251_acx_arp_ip_filter(wl, enable, addr);
+ 		if (ret < 0)
+ 			goto out_sleep;
+ 	}
+diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
+index 894c2ccb3891..8b5e640d8686 100644
+--- a/drivers/nvme/host/fabrics.c
++++ b/drivers/nvme/host/fabrics.c
+@@ -869,32 +869,41 @@ nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
+ 		goto out_unlock;
+ 	}
+ 
++	if (!try_module_get(ops->module)) {
++		ret = -EBUSY;
++		goto out_unlock;
++	}
++
+ 	ret = nvmf_check_required_opts(opts, ops->required_opts);
+ 	if (ret)
+-		goto out_unlock;
++		goto out_module_put;
+ 	ret = nvmf_check_allowed_opts(opts, NVMF_ALLOWED_OPTS |
+ 				ops->allowed_opts | ops->required_opts);
+ 	if (ret)
+-		goto out_unlock;
++		goto out_module_put;
+ 
+ 	ctrl = ops->create_ctrl(dev, opts);
+ 	if (IS_ERR(ctrl)) {
+ 		ret = PTR_ERR(ctrl);
+-		goto out_unlock;
++		goto out_module_put;
+ 	}
+ 
+ 	if (strcmp(ctrl->subsys->subnqn, opts->subsysnqn)) {
+ 		dev_warn(ctrl->device,
+ 			"controller returned incorrect NQN: \"%s\".\n",
+ 			ctrl->subsys->subnqn);
++		module_put(ops->module);
+ 		up_read(&nvmf_transports_rwsem);
+ 		nvme_delete_ctrl_sync(ctrl);
+ 		return ERR_PTR(-EINVAL);
+ 	}
+ 
++	module_put(ops->module);
+ 	up_read(&nvmf_transports_rwsem);
+ 	return ctrl;
+ 
++out_module_put:
++	module_put(ops->module);
+ out_unlock:
+ 	up_read(&nvmf_transports_rwsem);
+ out_free_opts:
+diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
+index 9ba614953607..25b19f722f5b 100644
+--- a/drivers/nvme/host/fabrics.h
++++ b/drivers/nvme/host/fabrics.h
+@@ -108,6 +108,7 @@ struct nvmf_ctrl_options {
+  *			       fabric implementation of NVMe fabrics.
+  * @entry:		Used by the fabrics library to add the new
+  *			registration entry to its linked-list internal tree.
++ * @module:             Transport module reference
+  * @name:		Name of the NVMe fabric driver implementation.
+  * @required_opts:	sysfs command-line options that must be specified
+  *			when adding a new NVMe controller.
+@@ -126,6 +127,7 @@ struct nvmf_ctrl_options {
+  */
+ struct nvmf_transport_ops {
+ 	struct list_head	entry;
++	struct module		*module;
+ 	const char		*name;
+ 	int			required_opts;
+ 	int			allowed_opts;
+diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
+index 794e66e4aa20..306aee47c8ce 100644
+--- a/drivers/nvme/host/fc.c
++++ b/drivers/nvme/host/fc.c
+@@ -3380,6 +3380,7 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
+ 
+ static struct nvmf_transport_ops nvme_fc_transport = {
+ 	.name		= "fc",
++	.module		= THIS_MODULE,
+ 	.required_opts	= NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
+ 	.allowed_opts	= NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO,
+ 	.create_ctrl	= nvme_fc_create_ctrl,
+diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
+index 2a0bba7f50cf..d49b1e74f304 100644
+--- a/drivers/nvme/host/rdma.c
++++ b/drivers/nvme/host/rdma.c
+@@ -2018,6 +2018,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
+ 
+ static struct nvmf_transport_ops nvme_rdma_transport = {
+ 	.name		= "rdma",
++	.module		= THIS_MODULE,
+ 	.required_opts	= NVMF_OPT_TRADDR,
+ 	.allowed_opts	= NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
+ 			  NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO,
+diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
+index 6a018a0bd6ce..cd1adb9e7e9d 100644
+--- a/drivers/nvme/target/fcloop.c
++++ b/drivers/nvme/target/fcloop.c
+@@ -204,6 +204,10 @@ struct fcloop_lport {
+ 	struct completion unreg_done;
+ };
+ 
++struct fcloop_lport_priv {
++	struct fcloop_lport *lport;
++};
++
+ struct fcloop_rport {
+ 	struct nvme_fc_remote_port *remoteport;
+ 	struct nvmet_fc_target_port *targetport;
+@@ -370,6 +374,7 @@ fcloop_tgt_fcprqst_done_work(struct work_struct *work)
+ 
+ 	spin_lock(&tfcp_req->reqlock);
+ 	fcpreq = tfcp_req->fcpreq;
++	tfcp_req->fcpreq = NULL;
+ 	spin_unlock(&tfcp_req->reqlock);
+ 
+ 	if (tport->remoteport && fcpreq) {
+@@ -611,11 +616,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
+ 
+ 	if (!tfcp_req)
+ 		/* abort has already been called */
+-		return;
+-
+-	if (rport->targetport)
+-		nvmet_fc_rcv_fcp_abort(rport->targetport,
+-					&tfcp_req->tgt_fcp_req);
++		goto finish;
+ 
+ 	/* break initiator/target relationship for io */
+ 	spin_lock(&tfcp_req->reqlock);
+@@ -623,6 +624,11 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
+ 	tfcp_req->fcpreq = NULL;
+ 	spin_unlock(&tfcp_req->reqlock);
+ 
++	if (rport->targetport)
++		nvmet_fc_rcv_fcp_abort(rport->targetport,
++					&tfcp_req->tgt_fcp_req);
++
++finish:
+ 	/* post the aborted io completion */
+ 	fcpreq->status = -ECANCELED;
+ 	schedule_work(&inireq->iniwork);
+@@ -657,7 +663,8 @@ fcloop_nport_get(struct fcloop_nport *nport)
+ static void
+ fcloop_localport_delete(struct nvme_fc_local_port *localport)
+ {
+-	struct fcloop_lport *lport = localport->private;
++	struct fcloop_lport_priv *lport_priv = localport->private;
++	struct fcloop_lport *lport = lport_priv->lport;
+ 
+ 	/* release any threads waiting for the unreg to complete */
+ 	complete(&lport->unreg_done);
+@@ -697,7 +704,7 @@ static struct nvme_fc_port_template fctemplate = {
+ 	.max_dif_sgl_segments	= FCLOOP_SGL_SEGS,
+ 	.dma_boundary		= FCLOOP_DMABOUND_4G,
+ 	/* sizes of additional private data for data structures */
+-	.local_priv_sz		= sizeof(struct fcloop_lport),
++	.local_priv_sz		= sizeof(struct fcloop_lport_priv),
+ 	.remote_priv_sz		= sizeof(struct fcloop_rport),
+ 	.lsrqst_priv_sz		= sizeof(struct fcloop_lsreq),
+ 	.fcprqst_priv_sz	= sizeof(struct fcloop_ini_fcpreq),
+@@ -728,11 +735,17 @@ fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
+ 	struct fcloop_ctrl_options *opts;
+ 	struct nvme_fc_local_port *localport;
+ 	struct fcloop_lport *lport;
+-	int ret;
++	struct fcloop_lport_priv *lport_priv;
++	unsigned long flags;
++	int ret = -ENOMEM;
++
++	lport = kzalloc(sizeof(*lport), GFP_KERNEL);
++	if (!lport)
++		return -ENOMEM;
+ 
+ 	opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ 	if (!opts)
+-		return -ENOMEM;
++		goto out_free_lport;
+ 
+ 	ret = fcloop_parse_options(opts, buf);
+ 	if (ret)
+@@ -752,23 +765,25 @@ fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
+ 
+ 	ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
+ 	if (!ret) {
+-		unsigned long flags;
+-
+ 		/* success */
+-		lport = localport->private;
++		lport_priv = localport->private;
++		lport_priv->lport = lport;
++
+ 		lport->localport = localport;
+ 		INIT_LIST_HEAD(&lport->lport_list);
+ 
+ 		spin_lock_irqsave(&fcloop_lock, flags);
+ 		list_add_tail(&lport->lport_list, &fcloop_lports);
+ 		spin_unlock_irqrestore(&fcloop_lock, flags);
+-
+-		/* mark all of the input buffer consumed */
+-		ret = count;
+ 	}
+ 
+ out_free_opts:
+ 	kfree(opts);
++out_free_lport:
++	/* free only if we're going to fail */
++	if (ret)
++		kfree(lport);
++
+ 	return ret ? ret : count;
+ }
+ 
+@@ -790,6 +805,8 @@ __wait_localport_unreg(struct fcloop_lport *lport)
+ 
+ 	wait_for_completion(&lport->unreg_done);
+ 
++	kfree(lport);
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
+index 1e21b286f299..fdfcc961029f 100644
+--- a/drivers/nvme/target/loop.c
++++ b/drivers/nvme/target/loop.c
+@@ -686,6 +686,7 @@ static struct nvmet_fabrics_ops nvme_loop_ops = {
+ 
+ static struct nvmf_transport_ops nvme_loop_transport = {
+ 	.name		= "loop",
++	.module		= THIS_MODULE,
+ 	.create_ctrl	= nvme_loop_create_ctrl,
+ };
+ 
+diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
+index 9c1ca29c60b7..6b52ea1440a6 100644
+--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
++++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
+@@ -46,6 +46,9 @@
+ #define BYT_TRIG_POS		BIT(25)
+ #define BYT_TRIG_LVL		BIT(24)
+ #define BYT_DEBOUNCE_EN		BIT(20)
++#define BYT_GLITCH_FILTER_EN	BIT(19)
++#define BYT_GLITCH_F_SLOW_CLK	BIT(17)
++#define BYT_GLITCH_F_FAST_CLK	BIT(16)
+ #define BYT_PULL_STR_SHIFT	9
+ #define BYT_PULL_STR_MASK	(3 << BYT_PULL_STR_SHIFT)
+ #define BYT_PULL_STR_2K		(0 << BYT_PULL_STR_SHIFT)
+@@ -1579,6 +1582,9 @@ static int byt_irq_type(struct irq_data *d, unsigned int type)
+ 	 */
+ 	value &= ~(BYT_DIRECT_IRQ_EN | BYT_TRIG_POS | BYT_TRIG_NEG |
+ 		   BYT_TRIG_LVL);
++	/* Enable glitch filtering */
++	value |= BYT_GLITCH_FILTER_EN | BYT_GLITCH_F_SLOW_CLK |
++		 BYT_GLITCH_F_FAST_CLK;
+ 
+ 	writel(value, reg);
+ 
+diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c
+index d51ebd1da65e..9dc7590e07cb 100644
+--- a/drivers/power/supply/axp288_charger.c
++++ b/drivers/power/supply/axp288_charger.c
+@@ -785,6 +785,14 @@ static int charger_init_hw_regs(struct axp288_chrg_info *info)
+ 	return 0;
+ }
+ 
++static void axp288_charger_cancel_work(void *data)
++{
++	struct axp288_chrg_info *info = data;
++
++	cancel_work_sync(&info->otg.work);
++	cancel_work_sync(&info->cable.work);
++}
++
+ static int axp288_charger_probe(struct platform_device *pdev)
+ {
+ 	int ret, i, pirq;
+@@ -836,6 +844,11 @@ static int axp288_charger_probe(struct platform_device *pdev)
+ 		return ret;
+ 	}
+ 
++	/* Cancel our work on cleanup, register this before the notifiers */
++	ret = devm_add_action(dev, axp288_charger_cancel_work, info);
++	if (ret)
++		return ret;
++
+ 	/* Register for extcon notification */
+ 	INIT_WORK(&info->cable.work, axp288_charger_extcon_evt_worker);
+ 	info->cable.nb[0].notifier_call = axp288_charger_handle_cable0_evt;
+diff --git a/drivers/rtc/rtc-ac100.c b/drivers/rtc/rtc-ac100.c
+index 0e358d4b6738..8ff9dc3fe5bf 100644
+--- a/drivers/rtc/rtc-ac100.c
++++ b/drivers/rtc/rtc-ac100.c
+@@ -137,13 +137,15 @@ static unsigned long ac100_clkout_recalc_rate(struct clk_hw *hw,
+ 		div = (reg >> AC100_CLKOUT_PRE_DIV_SHIFT) &
+ 			((1 << AC100_CLKOUT_PRE_DIV_WIDTH) - 1);
+ 		prate = divider_recalc_rate(hw, prate, div,
+-					    ac100_clkout_prediv, 0);
++					    ac100_clkout_prediv, 0,
++					    AC100_CLKOUT_PRE_DIV_WIDTH);
+ 	}
+ 
+ 	div = (reg >> AC100_CLKOUT_DIV_SHIFT) &
+ 		(BIT(AC100_CLKOUT_DIV_WIDTH) - 1);
+ 	return divider_recalc_rate(hw, prate, div, NULL,
+-				   CLK_DIVIDER_POWER_OF_TWO);
++				   CLK_DIVIDER_POWER_OF_TWO,
++				   AC100_CLKOUT_DIV_WIDTH);
+ }
+ 
+ static long ac100_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index 9c50d2d9f27c..785d1c55d152 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -1696,6 +1696,15 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
+ 		 */
+ 		switch (session->state) {
+ 		case ISCSI_STATE_FAILED:
++			/*
++			 * cmds should fail during shutdown, if the session
++			 * state is bad, allowing completion to happen
++			 */
++			if (unlikely(system_state != SYSTEM_RUNNING)) {
++				reason = FAILURE_SESSION_FAILED;
++				sc->result = DID_NO_CONNECT << 16;
++				break;
++			}
+ 		case ISCSI_STATE_IN_RECOVERY:
+ 			reason = FAILURE_SESSION_IN_RECOVERY;
+ 			sc->result = DID_IMM_RETRY << 16;
+@@ -1978,6 +1987,19 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
+ 	}
+ 
+ 	if (session->state != ISCSI_STATE_LOGGED_IN) {
++		/*
++		 * During shutdown, if session is prematurely disconnected,
++		 * recovery won't happen and there will be hung cmds. Not
++		 * handling cmds would trigger EH, also bad in this case.
++		 * Instead, handle cmd, allow completion to happen and let
++		 * upper layer to deal with the result.
++		 */
++		if (unlikely(system_state != SYSTEM_RUNNING)) {
++			sc->result = DID_NO_CONNECT << 16;
++			ISCSI_DBG_EH(session, "sc on shutdown, handled\n");
++			rc = BLK_EH_HANDLED;
++			goto done;
++		}
+ 		/*
+ 		 * We are probably in the middle of iscsi recovery so let
+ 		 * that complete and handle the error.
+@@ -2082,7 +2104,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
+ 		task->last_timeout = jiffies;
+ 	spin_unlock(&session->frwd_lock);
+ 	ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ?
+-		     "timer reset" : "nh");
++		     "timer reset" : "shutdown or nh");
+ 	return rc;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_eh_cmd_timed_out);
+diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
+index 0bb9eefc08c8..5d7254aa2dd2 100644
+--- a/drivers/scsi/libsas/sas_event.c
++++ b/drivers/scsi/libsas/sas_event.c
+@@ -29,7 +29,8 @@
+ 
+ int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
+ {
+-	int rc = 0;
++	/* it's added to the defer_q when draining so return succeed */
++	int rc = 1;
+ 
+ 	if (!test_bit(SAS_HA_REGISTERED, &ha->state))
+ 		return 0;
+@@ -44,19 +45,15 @@ int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
+ 	return rc;
+ }
+ 
+-static int sas_queue_event(int event, unsigned long *pending,
+-			    struct sas_work *work,
++static int sas_queue_event(int event, struct sas_work *work,
+ 			    struct sas_ha_struct *ha)
+ {
+-	int rc = 0;
++	unsigned long flags;
++	int rc;
+ 
+-	if (!test_and_set_bit(event, pending)) {
+-		unsigned long flags;
+-
+-		spin_lock_irqsave(&ha->lock, flags);
+-		rc = sas_queue_work(ha, work);
+-		spin_unlock_irqrestore(&ha->lock, flags);
+-	}
++	spin_lock_irqsave(&ha->lock, flags);
++	rc = sas_queue_work(ha, work);
++	spin_unlock_irqrestore(&ha->lock, flags);
+ 
+ 	return rc;
+ }
+@@ -66,6 +63,7 @@ void __sas_drain_work(struct sas_ha_struct *ha)
+ {
+ 	struct workqueue_struct *wq = ha->core.shost->work_q;
+ 	struct sas_work *sw, *_sw;
++	int ret;
+ 
+ 	set_bit(SAS_HA_DRAINING, &ha->state);
+ 	/* flush submitters */
+@@ -78,7 +76,10 @@ void __sas_drain_work(struct sas_ha_struct *ha)
+ 	clear_bit(SAS_HA_DRAINING, &ha->state);
+ 	list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) {
+ 		list_del_init(&sw->drain_node);
+-		sas_queue_work(ha, sw);
++		ret = sas_queue_work(ha, sw);
++		if (ret != 1)
++			sas_free_event(to_asd_sas_event(&sw->work));
++
+ 	}
+ 	spin_unlock_irq(&ha->lock);
+ }
+@@ -119,29 +120,68 @@ void sas_enable_revalidation(struct sas_ha_struct *ha)
+ 		if (!test_and_clear_bit(ev, &d->pending))
+ 			continue;
+ 
+-		sas_queue_event(ev, &d->pending, &d->disc_work[ev].work, ha);
++		sas_queue_event(ev, &d->disc_work[ev].work, ha);
+ 	}
+ 	mutex_unlock(&ha->disco_mutex);
+ }
+ 
++
++static void sas_port_event_worker(struct work_struct *work)
++{
++	struct asd_sas_event *ev = to_asd_sas_event(work);
++
++	sas_port_event_fns[ev->event](work);
++	sas_free_event(ev);
++}
++
++static void sas_phy_event_worker(struct work_struct *work)
++{
++	struct asd_sas_event *ev = to_asd_sas_event(work);
++
++	sas_phy_event_fns[ev->event](work);
++	sas_free_event(ev);
++}
++
+ static int sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event)
+ {
++	struct asd_sas_event *ev;
+ 	struct sas_ha_struct *ha = phy->ha;
++	int ret;
+ 
+ 	BUG_ON(event >= PORT_NUM_EVENTS);
+ 
+-	return sas_queue_event(event, &phy->port_events_pending,
+-			       &phy->port_events[event].work, ha);
++	ev = sas_alloc_event(phy);
++	if (!ev)
++		return -ENOMEM;
++
++	INIT_SAS_EVENT(ev, sas_port_event_worker, phy, event);
++
++	ret = sas_queue_event(event, &ev->work, ha);
++	if (ret != 1)
++		sas_free_event(ev);
++
++	return ret;
+ }
+ 
+ int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
+ {
++	struct asd_sas_event *ev;
+ 	struct sas_ha_struct *ha = phy->ha;
++	int ret;
+ 
+ 	BUG_ON(event >= PHY_NUM_EVENTS);
+ 
+-	return sas_queue_event(event, &phy->phy_events_pending,
+-			       &phy->phy_events[event].work, ha);
++	ev = sas_alloc_event(phy);
++	if (!ev)
++		return -ENOMEM;
++
++	INIT_SAS_EVENT(ev, sas_phy_event_worker, phy, event);
++
++	ret = sas_queue_event(event, &ev->work, ha);
++	if (ret != 1)
++		sas_free_event(ev);
++
++	return ret;
+ }
+ 
+ int sas_init_events(struct sas_ha_struct *sas_ha)
+diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
+index 3183d63de4da..39e42744aa33 100644
+--- a/drivers/scsi/libsas/sas_expander.c
++++ b/drivers/scsi/libsas/sas_expander.c
+@@ -293,6 +293,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
+ 	phy->phy->minimum_linkrate = dr->pmin_linkrate;
+ 	phy->phy->maximum_linkrate = dr->pmax_linkrate;
+ 	phy->phy->negotiated_linkrate = phy->linkrate;
++	phy->phy->enabled = (phy->linkrate != SAS_PHY_DISABLED);
+ 
+  skip:
+ 	if (new_phy)
+@@ -686,7 +687,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
+ 	res = smp_execute_task(dev, req, RPEL_REQ_SIZE,
+ 			            resp, RPEL_RESP_SIZE);
+ 
+-	if (!res)
++	if (res)
+ 		goto out;
+ 
+ 	phy->invalid_dword_count = scsi_to_u32(&resp[12]);
+@@ -695,6 +696,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
+ 	phy->phy_reset_problem_count = scsi_to_u32(&resp[24]);
+ 
+  out:
++	kfree(req);
+ 	kfree(resp);
+ 	return res;
+ 
+diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
+index 64fa6f53cb8b..e04f6d6f5aff 100644
+--- a/drivers/scsi/libsas/sas_init.c
++++ b/drivers/scsi/libsas/sas_init.c
+@@ -39,6 +39,7 @@
+ #include "../scsi_sas_internal.h"
+ 
+ static struct kmem_cache *sas_task_cache;
++static struct kmem_cache *sas_event_cache;
+ 
+ struct sas_task *sas_alloc_task(gfp_t flags)
+ {
+@@ -364,8 +365,6 @@ void sas_prep_resume_ha(struct sas_ha_struct *ha)
+ 		struct asd_sas_phy *phy = ha->sas_phy[i];
+ 
+ 		memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
+-		phy->port_events_pending = 0;
+-		phy->phy_events_pending = 0;
+ 		phy->frame_rcvd_size = 0;
+ 	}
+ }
+@@ -555,20 +554,42 @@ sas_domain_attach_transport(struct sas_domain_function_template *dft)
+ }
+ EXPORT_SYMBOL_GPL(sas_domain_attach_transport);
+ 
++
++struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy)
++{
++	gfp_t flags = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
++
++	return kmem_cache_zalloc(sas_event_cache, flags);
++}
++
++void sas_free_event(struct asd_sas_event *event)
++{
++	kmem_cache_free(sas_event_cache, event);
++}
++
+ /* ---------- SAS Class register/unregister ---------- */
+ 
+ static int __init sas_class_init(void)
+ {
+ 	sas_task_cache = KMEM_CACHE(sas_task, SLAB_HWCACHE_ALIGN);
+ 	if (!sas_task_cache)
+-		return -ENOMEM;
++		goto out;
++
++	sas_event_cache = KMEM_CACHE(asd_sas_event, SLAB_HWCACHE_ALIGN);
++	if (!sas_event_cache)
++		goto free_task_kmem;
+ 
+ 	return 0;
++free_task_kmem:
++	kmem_cache_destroy(sas_task_cache);
++out:
++	return -ENOMEM;
+ }
+ 
+ static void __exit sas_class_exit(void)
+ {
+ 	kmem_cache_destroy(sas_task_cache);
++	kmem_cache_destroy(sas_event_cache);
+ }
+ 
+ MODULE_AUTHOR("Luben Tuikov <luben_tuikov@adaptec.com>");
+diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
+index c07e08136491..d8826a747690 100644
+--- a/drivers/scsi/libsas/sas_internal.h
++++ b/drivers/scsi/libsas/sas_internal.h
+@@ -61,6 +61,9 @@ int sas_show_oob_mode(enum sas_oob_mode oob_mode, char *buf);
+ int  sas_register_phys(struct sas_ha_struct *sas_ha);
+ void sas_unregister_phys(struct sas_ha_struct *sas_ha);
+ 
++struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy);
++void sas_free_event(struct asd_sas_event *event);
++
+ int  sas_register_ports(struct sas_ha_struct *sas_ha);
+ void sas_unregister_ports(struct sas_ha_struct *sas_ha);
+ 
+@@ -99,6 +102,9 @@ void sas_hae_reset(struct work_struct *work);
+ 
+ void sas_free_device(struct kref *kref);
+ 
++extern const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS];
++extern const work_func_t sas_port_event_fns[PORT_NUM_EVENTS];
++
+ #ifdef CONFIG_SCSI_SAS_HOST_SMP
+ extern void sas_smp_host_handler(struct bsg_job *job, struct Scsi_Host *shost);
+ #else
+diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
+index cdee446c29e1..59f82929b0a3 100644
+--- a/drivers/scsi/libsas/sas_phy.c
++++ b/drivers/scsi/libsas/sas_phy.c
+@@ -35,7 +35,6 @@ static void sas_phye_loss_of_signal(struct work_struct *work)
+ 	struct asd_sas_event *ev = to_asd_sas_event(work);
+ 	struct asd_sas_phy *phy = ev->phy;
+ 
+-	clear_bit(PHYE_LOSS_OF_SIGNAL, &phy->phy_events_pending);
+ 	phy->error = 0;
+ 	sas_deform_port(phy, 1);
+ }
+@@ -45,7 +44,6 @@ static void sas_phye_oob_done(struct work_struct *work)
+ 	struct asd_sas_event *ev = to_asd_sas_event(work);
+ 	struct asd_sas_phy *phy = ev->phy;
+ 
+-	clear_bit(PHYE_OOB_DONE, &phy->phy_events_pending);
+ 	phy->error = 0;
+ }
+ 
+@@ -58,8 +56,6 @@ static void sas_phye_oob_error(struct work_struct *work)
+ 	struct sas_internal *i =
+ 		to_sas_internal(sas_ha->core.shost->transportt);
+ 
+-	clear_bit(PHYE_OOB_ERROR, &phy->phy_events_pending);
+-
+ 	sas_deform_port(phy, 1);
+ 
+ 	if (!port && phy->enabled && i->dft->lldd_control_phy) {
+@@ -88,8 +84,6 @@ static void sas_phye_spinup_hold(struct work_struct *work)
+ 	struct sas_internal *i =
+ 		to_sas_internal(sas_ha->core.shost->transportt);
+ 
+-	clear_bit(PHYE_SPINUP_HOLD, &phy->phy_events_pending);
+-
+ 	phy->error = 0;
+ 	i->dft->lldd_control_phy(phy, PHY_FUNC_RELEASE_SPINUP_HOLD, NULL);
+ }
+@@ -99,8 +93,6 @@ static void sas_phye_resume_timeout(struct work_struct *work)
+ 	struct asd_sas_event *ev = to_asd_sas_event(work);
+ 	struct asd_sas_phy *phy = ev->phy;
+ 
+-	clear_bit(PHYE_RESUME_TIMEOUT, &phy->phy_events_pending);
+-
+ 	/* phew, lldd got the phy back in the nick of time */
+ 	if (!phy->suspended) {
+ 		dev_info(&phy->phy->dev, "resume timeout cancelled\n");
+@@ -119,39 +111,12 @@ int sas_register_phys(struct sas_ha_struct *sas_ha)
+ {
+ 	int i;
+ 
+-	static const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS] = {
+-		[PHYE_LOSS_OF_SIGNAL] = sas_phye_loss_of_signal,
+-		[PHYE_OOB_DONE] = sas_phye_oob_done,
+-		[PHYE_OOB_ERROR] = sas_phye_oob_error,
+-		[PHYE_SPINUP_HOLD] = sas_phye_spinup_hold,
+-		[PHYE_RESUME_TIMEOUT] = sas_phye_resume_timeout,
+-
+-	};
+-
+-	static const work_func_t sas_port_event_fns[PORT_NUM_EVENTS] = {
+-		[PORTE_BYTES_DMAED] = sas_porte_bytes_dmaed,
+-		[PORTE_BROADCAST_RCVD] = sas_porte_broadcast_rcvd,
+-		[PORTE_LINK_RESET_ERR] = sas_porte_link_reset_err,
+-		[PORTE_TIMER_EVENT] = sas_porte_timer_event,
+-		[PORTE_HARD_RESET] = sas_porte_hard_reset,
+-	};
+-
+ 	/* Now register the phys. */
+ 	for (i = 0; i < sas_ha->num_phys; i++) {
+-		int k;
+ 		struct asd_sas_phy *phy = sas_ha->sas_phy[i];
+ 
+ 		phy->error = 0;
+ 		INIT_LIST_HEAD(&phy->port_phy_el);
+-		for (k = 0; k < PORT_NUM_EVENTS; k++) {
+-			INIT_SAS_WORK(&phy->port_events[k].work, sas_port_event_fns[k]);
+-			phy->port_events[k].phy = phy;
+-		}
+-
+-		for (k = 0; k < PHY_NUM_EVENTS; k++) {
+-			INIT_SAS_WORK(&phy->phy_events[k].work, sas_phy_event_fns[k]);
+-			phy->phy_events[k].phy = phy;
+-		}
+ 
+ 		phy->port = NULL;
+ 		phy->ha = sas_ha;
+@@ -179,3 +144,12 @@ int sas_register_phys(struct sas_ha_struct *sas_ha)
+ 
+ 	return 0;
+ }
++
++const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS] = {
++	[PHYE_LOSS_OF_SIGNAL] = sas_phye_loss_of_signal,
++	[PHYE_OOB_DONE] = sas_phye_oob_done,
++	[PHYE_OOB_ERROR] = sas_phye_oob_error,
++	[PHYE_SPINUP_HOLD] = sas_phye_spinup_hold,
++	[PHYE_RESUME_TIMEOUT] = sas_phye_resume_timeout,
++
++};
+diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
+index d3c5297c6c89..93266283f51f 100644
+--- a/drivers/scsi/libsas/sas_port.c
++++ b/drivers/scsi/libsas/sas_port.c
+@@ -261,8 +261,6 @@ void sas_porte_bytes_dmaed(struct work_struct *work)
+ 	struct asd_sas_event *ev = to_asd_sas_event(work);
+ 	struct asd_sas_phy *phy = ev->phy;
+ 
+-	clear_bit(PORTE_BYTES_DMAED, &phy->port_events_pending);
+-
+ 	sas_form_port(phy);
+ }
+ 
+@@ -273,8 +271,6 @@ void sas_porte_broadcast_rcvd(struct work_struct *work)
+ 	unsigned long flags;
+ 	u32 prim;
+ 
+-	clear_bit(PORTE_BROADCAST_RCVD, &phy->port_events_pending);
+-
+ 	spin_lock_irqsave(&phy->sas_prim_lock, flags);
+ 	prim = phy->sas_prim;
+ 	spin_unlock_irqrestore(&phy->sas_prim_lock, flags);
+@@ -288,8 +284,6 @@ void sas_porte_link_reset_err(struct work_struct *work)
+ 	struct asd_sas_event *ev = to_asd_sas_event(work);
+ 	struct asd_sas_phy *phy = ev->phy;
+ 
+-	clear_bit(PORTE_LINK_RESET_ERR, &phy->port_events_pending);
+-
+ 	sas_deform_port(phy, 1);
+ }
+ 
+@@ -298,8 +292,6 @@ void sas_porte_timer_event(struct work_struct *work)
+ 	struct asd_sas_event *ev = to_asd_sas_event(work);
+ 	struct asd_sas_phy *phy = ev->phy;
+ 
+-	clear_bit(PORTE_TIMER_EVENT, &phy->port_events_pending);
+-
+ 	sas_deform_port(phy, 1);
+ }
+ 
+@@ -308,8 +300,6 @@ void sas_porte_hard_reset(struct work_struct *work)
+ 	struct asd_sas_event *ev = to_asd_sas_event(work);
+ 	struct asd_sas_phy *phy = ev->phy;
+ 
+-	clear_bit(PORTE_HARD_RESET, &phy->port_events_pending);
+-
+ 	sas_deform_port(phy, 1);
+ }
+ 
+@@ -353,3 +343,11 @@ void sas_unregister_ports(struct sas_ha_struct *sas_ha)
+ 			sas_deform_port(sas_ha->sas_phy[i], 0);
+ 
+ }
++
++const work_func_t sas_port_event_fns[PORT_NUM_EVENTS] = {
++	[PORTE_BYTES_DMAED] = sas_porte_bytes_dmaed,
++	[PORTE_BROADCAST_RCVD] = sas_porte_broadcast_rcvd,
++	[PORTE_LINK_RESET_ERR] = sas_porte_link_reset_err,
++	[PORTE_TIMER_EVENT] = sas_porte_timer_event,
++	[PORTE_HARD_RESET] = sas_porte_hard_reset,
++};
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index cc54bdb5c712..d4129469a73c 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -6822,7 +6822,6 @@ static void megasas_detach_one(struct pci_dev *pdev)
+ 	u32 pd_seq_map_sz;
+ 
+ 	instance = pci_get_drvdata(pdev);
+-	instance->unload = 1;
+ 	host = instance->host;
+ 	fusion = instance->ctrl_context;
+ 
+@@ -6833,6 +6832,7 @@ static void megasas_detach_one(struct pci_dev *pdev)
+ 	if (instance->fw_crash_state != UNAVAILABLE)
+ 		megasas_free_host_crash_buffer(instance);
+ 	scsi_remove_host(instance->host);
++	instance->unload = 1;
+ 
+ 	if (megasas_wait_for_adapter_operational(instance))
+ 		goto skip_firing_dcmds;
+diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
+index bfad9bfc313f..f2ffde430ec1 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
++++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
+@@ -168,7 +168,7 @@ static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span,
+ /*
+  * This function will Populate Driver Map using firmware raid map
+  */
+-void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
++static int MR_PopulateDrvRaidMap(struct megasas_instance *instance)
+ {
+ 	struct fusion_context *fusion = instance->ctrl_context;
+ 	struct MR_FW_RAID_MAP_ALL     *fw_map_old    = NULL;
+@@ -259,7 +259,7 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
+ 		ld_count = (u16)le16_to_cpu(fw_map_ext->ldCount);
+ 		if (ld_count > MAX_LOGICAL_DRIVES_EXT) {
+ 			dev_dbg(&instance->pdev->dev, "megaraid_sas: LD count exposed in RAID map in not valid\n");
+-			return;
++			return 1;
+ 		}
+ 
+ 		pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
+@@ -285,6 +285,12 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
+ 			fusion->ld_map[(instance->map_id & 1)];
+ 		pFwRaidMap = &fw_map_old->raidMap;
+ 		ld_count = (u16)le32_to_cpu(pFwRaidMap->ldCount);
++		if (ld_count > MAX_LOGICAL_DRIVES) {
++			dev_dbg(&instance->pdev->dev,
++				"LD count exposed in RAID map in not valid\n");
++			return 1;
++		}
++
+ 		pDrvRaidMap->totalSize = pFwRaidMap->totalSize;
+ 		pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
+ 		pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec;
+@@ -300,6 +306,8 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
+ 			sizeof(struct MR_DEV_HANDLE_INFO) *
+ 			MAX_RAIDMAP_PHYSICAL_DEVICES);
+ 	}
++
++	return 0;
+ }
+ 
+ /*
+@@ -317,8 +325,8 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
+ 	u16 ld;
+ 	u32 expected_size;
+ 
+-
+-	MR_PopulateDrvRaidMap(instance);
++	if (MR_PopulateDrvRaidMap(instance))
++		return 0;
+ 
+ 	fusion = instance->ctrl_context;
+ 	drv_map = fusion->ld_drv_map[(instance->map_id & 1)];
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+index 741b0a28c2e3..fecc19eb1d25 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+@@ -4761,19 +4761,6 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
+ 		return 0;
+ 	}
+ 
+-	/*
+-	 * Bug work around for firmware SATL handling.  The loop
+-	 * is based on atomic operations and ensures consistency
+-	 * since we're lockless at this point
+-	 */
+-	do {
+-		if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
+-			scmd->result = SAM_STAT_BUSY;
+-			scmd->scsi_done(scmd);
+-			return 0;
+-		}
+-	} while (_scsih_set_satl_pending(scmd, true));
+-
+ 	sas_target_priv_data = sas_device_priv_data->sas_target;
+ 
+ 	/* invalid device handle */
+@@ -4799,6 +4786,19 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
+ 	    sas_device_priv_data->block)
+ 		return SCSI_MLQUEUE_DEVICE_BUSY;
+ 
++	/*
++	 * Bug work around for firmware SATL handling.  The loop
++	 * is based on atomic operations and ensures consistency
++	 * since we're lockless at this point
++	 */
++	do {
++		if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
++			scmd->result = SAM_STAT_BUSY;
++			scmd->scsi_done(scmd);
++			return 0;
++		}
++	} while (_scsih_set_satl_pending(scmd, true));
++
+ 	if (scmd->sc_data_direction == DMA_FROM_DEVICE)
+ 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
+ 	else if (scmd->sc_data_direction == DMA_TO_DEVICE)
+@@ -4826,6 +4826,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
+ 	if (!smid) {
+ 		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ 		    ioc->name, __func__);
++		_scsih_set_satl_pending(scmd, false);
+ 		goto out;
+ 	}
+ 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+@@ -4857,6 +4858,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
+ 		pcie_device = sas_target_priv_data->pcie_dev;
+ 		if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) {
+ 			mpt3sas_base_free_smid(ioc, smid);
++			_scsih_set_satl_pending(scmd, false);
+ 			goto out;
+ 		}
+ 	} else
+diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
+index bf34e9b238af..e83e93dc0859 100644
+--- a/drivers/spi/spi-sh-msiof.c
++++ b/drivers/spi/spi-sh-msiof.c
+@@ -797,11 +797,21 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
+ 		goto stop_dma;
+ 	}
+ 
+-	/* wait for tx fifo to be emptied / rx fifo to be filled */
++	/* wait for tx/rx DMA completion */
+ 	ret = sh_msiof_wait_for_completion(p);
+ 	if (ret)
+ 		goto stop_reset;
+ 
++	if (!rx) {
++		reinit_completion(&p->done);
++		sh_msiof_write(p, IER, IER_TEOFE);
++
++		/* wait for tx fifo to be emptied */
++		ret = sh_msiof_wait_for_completion(p);
++		if (ret)
++			goto stop_reset;
++	}
++
+ 	/* clear status bits */
+ 	sh_msiof_reset_str(p);
+ 
+diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
+index 51823ce71773..a013f7eb208f 100644
+--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
++++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
+@@ -529,19 +529,20 @@ EXPORT_SYMBOL(cfs_cpt_spread_node);
+ int
+ cfs_cpt_current(struct cfs_cpt_table *cptab, int remap)
+ {
+-	int cpu = smp_processor_id();
+-	int cpt = cptab->ctb_cpu2cpt[cpu];
++	int cpu;
++	int cpt;
+ 
+-	if (cpt < 0) {
+-		if (!remap)
+-			return cpt;
++	preempt_disable();
++	cpu = smp_processor_id();
++	cpt = cptab->ctb_cpu2cpt[cpu];
+ 
++	if (cpt < 0 && remap) {
+ 		/* don't return negative value for safety of upper layer,
+ 		 * instead we shadow the unknown cpu to a valid partition ID
+ 		 */
+ 		cpt = cpu % cptab->ctb_nparts;
+ 	}
+-
++	preempt_enable();
+ 	return cpt;
+ }
+ EXPORT_SYMBOL(cfs_cpt_current);
+diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
+index a415d87f22d2..3ab96d0f705e 100644
+--- a/drivers/target/target_core_user.c
++++ b/drivers/target/target_core_user.c
+@@ -805,6 +805,13 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
+ 		int ret;
+ 		DEFINE_WAIT(__wait);
+ 
++		/*
++		 * Don't leave commands partially setup because the unmap
++		 * thread might need the blocks to make forward progress.
++		 */
++		tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur);
++		tcmu_cmd_reset_dbi_cur(tcmu_cmd);
++
+ 		prepare_to_wait(&udev->wait_cmdr, &__wait, TASK_INTERRUPTIBLE);
+ 
+ 		pr_debug("sleeping for ring space\n");
+diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
+index 2d855a96cdd9..761d0559c268 100644
+--- a/drivers/thermal/hisi_thermal.c
++++ b/drivers/thermal/hisi_thermal.c
+@@ -527,7 +527,7 @@ static void hisi_thermal_toggle_sensor(struct hisi_thermal_sensor *sensor,
+ static int hisi_thermal_probe(struct platform_device *pdev)
+ {
+ 	struct hisi_thermal_data *data;
+-	int const (*platform_probe)(struct hisi_thermal_data *);
++	int (*platform_probe)(struct hisi_thermal_data *);
+ 	struct device *dev = &pdev->dev;
+ 	int ret;
+ 
+diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c
+index 8ee38f55c7f3..43b90fd577e4 100644
+--- a/drivers/thermal/int340x_thermal/int3400_thermal.c
++++ b/drivers/thermal/int340x_thermal/int3400_thermal.c
+@@ -319,17 +319,21 @@ static int int3400_thermal_probe(struct platform_device *pdev)
+ 
+ 	result = sysfs_create_group(&pdev->dev.kobj, &uuid_attribute_group);
+ 	if (result)
+-		goto free_zone;
++		goto free_rel_misc;
+ 
+ 	result = acpi_install_notify_handler(
+ 			priv->adev->handle, ACPI_DEVICE_NOTIFY, int3400_notify,
+ 			(void *)priv);
+ 	if (result)
+-		goto free_zone;
++		goto free_sysfs;
+ 
+ 	return 0;
+ 
+-free_zone:
++free_sysfs:
++	sysfs_remove_group(&pdev->dev.kobj, &uuid_attribute_group);
++free_rel_misc:
++	if (!priv->rel_misc_dev_res)
++		acpi_thermal_rel_misc_device_remove(priv->adev->handle);
+ 	thermal_zone_device_unregister(priv->thermal);
+ free_art_trt:
+ 	kfree(priv->trts);
+diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c
+index b4d3116cfdaf..3055f9a12a17 100644
+--- a/drivers/thermal/power_allocator.c
++++ b/drivers/thermal/power_allocator.c
+@@ -523,6 +523,7 @@ static void allow_maximum_power(struct thermal_zone_device *tz)
+ 	struct thermal_instance *instance;
+ 	struct power_allocator_params *params = tz->governor_data;
+ 
++	mutex_lock(&tz->lock);
+ 	list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+ 		if ((instance->trip != params->trip_max_desired_temperature) ||
+ 		    (!cdev_is_power_actor(instance->cdev)))
+@@ -534,6 +535,7 @@ static void allow_maximum_power(struct thermal_zone_device *tz)
+ 		mutex_unlock(&instance->cdev->lock);
+ 		thermal_cdev_update(instance->cdev);
+ 	}
++	mutex_unlock(&tz->lock);
+ }
+ 
+ /**
+diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
+index 5131bdc9e765..db33fc50bfaa 100644
+--- a/drivers/tty/n_gsm.c
++++ b/drivers/tty/n_gsm.c
+@@ -1451,6 +1451,10 @@ static void gsm_dlci_open(struct gsm_dlci *dlci)
+  *	in which case an opening port goes back to closed and a closing port
+  *	is simply put into closed state (any further frames from the other
+  *	end will get a DM response)
++ *
++ *	Some control dlci can stay in ADM mode with other dlci working just
++ *	fine. In that case we can just keep the control dlci open after the
++ *	DLCI_OPENING retries time out.
+  */
+ 
+ static void gsm_dlci_t1(struct timer_list *t)
+@@ -1464,8 +1468,15 @@ static void gsm_dlci_t1(struct timer_list *t)
+ 		if (dlci->retries) {
+ 			gsm_command(dlci->gsm, dlci->addr, SABM|PF);
+ 			mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
+-		} else
++		} else if (!dlci->addr && gsm->control == (DM | PF)) {
++			if (debug & 8)
++				pr_info("DLCI %d opening in ADM mode.\n",
++					dlci->addr);
++			gsm_dlci_open(dlci);
++		} else {
+ 			gsm_dlci_close(dlci);
++		}
++
+ 		break;
+ 	case DLCI_CLOSING:
+ 		dlci->retries--;
+@@ -1483,8 +1494,8 @@ static void gsm_dlci_t1(struct timer_list *t)
+  *	@dlci: DLCI to open
+  *
+  *	Commence opening a DLCI from the Linux side. We issue SABM messages
+- *	to the modem which should then reply with a UA, at which point we
+- *	will move into open state. Opening is done asynchronously with retry
++ *	to the modem which should then reply with a UA or ADM, at which point
++ *	we will move into open state. Opening is done asynchronously with retry
+  *	running off timers and the responses.
+  */
+ 
+diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
+index 1bef39828ca7..571ce1f69d8d 100644
+--- a/drivers/tty/serdev/core.c
++++ b/drivers/tty/serdev/core.c
+@@ -54,6 +54,11 @@ static int serdev_uevent(struct device *dev, struct kobj_uevent_env *env)
+ 	int rc;
+ 
+ 	/* TODO: platform modalias */
++
++	/* ACPI enumerated controllers do not have a modalias */
++	if (!dev->of_node && dev->type == &serdev_ctrl_type)
++		return 0;
++
+ 	rc = acpi_device_uevent_modalias(dev, env);
+ 	if (rc != -ENODEV)
+ 		return rc;
+diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
+index 48d5327d38d4..fe5cdda80b2c 100644
+--- a/drivers/uio/uio_hv_generic.c
++++ b/drivers/uio/uio_hv_generic.c
+@@ -124,6 +124,13 @@ hv_uio_probe(struct hv_device *dev,
+ 	if (ret)
+ 		goto fail;
+ 
++	/* Communicating with host has to be via shared memory not hypercall */
++	if (!dev->channel->offermsg.monitor_allocated) {
++		dev_err(&dev->device, "vmbus channel requires hypercall\n");
++		ret = -ENOTSUPP;
++		goto fail_close;
++	}
++
+ 	dev->channel->inbound.ring_buffer->interrupt_mask = 1;
+ 	set_channel_read_mode(dev->channel, HV_CALL_DIRECT);
+ 
+diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
+index 5636c7ca8eba..0020ae906bf9 100644
+--- a/drivers/vhost/net.c
++++ b/drivers/vhost/net.c
+@@ -618,7 +618,7 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
+ 
+ 	if (!len && vq->busyloop_timeout) {
+ 		/* Both tx vq and rx socket were polled here */
+-		mutex_lock(&vq->mutex);
++		mutex_lock_nested(&vq->mutex, 1);
+ 		vhost_disable_notify(&net->dev, vq);
+ 
+ 		preempt_disable();
+@@ -751,7 +751,7 @@ static void handle_rx(struct vhost_net *net)
+ 	struct iov_iter fixup;
+ 	__virtio16 num_buffers;
+ 
+-	mutex_lock(&vq->mutex);
++	mutex_lock_nested(&vq->mutex, 0);
+ 	sock = vq->private_data;
+ 	if (!sock)
+ 		goto out;
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
+index 5727b186b3ca..a5622a8364cb 100644
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -213,8 +213,7 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file)
+ 	if (mask)
+ 		vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
+ 	if (mask & POLLERR) {
+-		if (poll->wqh)
+-			remove_wait_queue(poll->wqh, &poll->wait);
++		vhost_poll_stop(poll);
+ 		ret = -EINVAL;
+ 	}
+ 
+@@ -1257,14 +1256,12 @@ static int vq_log_access_ok(struct vhost_virtqueue *vq,
+ /* Caller should have vq mutex and device mutex */
+ int vhost_vq_access_ok(struct vhost_virtqueue *vq)
+ {
+-	if (vq->iotlb) {
+-		/* When device IOTLB was used, the access validation
+-		 * will be validated during prefetching.
+-		 */
+-		return 1;
+-	}
+-	return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used) &&
+-		vq_log_access_ok(vq, vq->log_base);
++	int ret = vq_log_access_ok(vq, vq->log_base);
++
++	if (ret || vq->iotlb)
++		return ret;
++
++	return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used);
+ }
+ EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
+ 
+diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
+index d7c239ea3d09..f5574060f9c8 100644
+--- a/drivers/video/backlight/corgi_lcd.c
++++ b/drivers/video/backlight/corgi_lcd.c
+@@ -177,7 +177,7 @@ static int corgi_ssp_lcdtg_send(struct corgi_lcd *lcd, int adrs, uint8_t data)
+ 	struct spi_message msg;
+ 	struct spi_transfer xfer = {
+ 		.len		= 1,
+-		.cs_change	= 1,
++		.cs_change	= 0,
+ 		.tx_buf		= lcd->buf,
+ 	};
+ 
+diff --git a/drivers/video/backlight/tdo24m.c b/drivers/video/backlight/tdo24m.c
+index eab1f842f9c0..e4bd63e9db6b 100644
+--- a/drivers/video/backlight/tdo24m.c
++++ b/drivers/video/backlight/tdo24m.c
+@@ -369,7 +369,7 @@ static int tdo24m_probe(struct spi_device *spi)
+ 
+ 	spi_message_init(m);
+ 
+-	x->cs_change = 1;
++	x->cs_change = 0;
+ 	x->tx_buf = &lcd->buf[0];
+ 	spi_message_add_tail(x, m);
+ 
+diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c
+index 6a41ea92737a..4dc5ee8debeb 100644
+--- a/drivers/video/backlight/tosa_lcd.c
++++ b/drivers/video/backlight/tosa_lcd.c
+@@ -49,7 +49,7 @@ static int tosa_tg_send(struct spi_device *spi, int adrs, uint8_t data)
+ 	struct spi_message msg;
+ 	struct spi_transfer xfer = {
+ 		.len		= 1,
+-		.cs_change	= 1,
++		.cs_change	= 0,
+ 		.tx_buf		= buf,
+ 	};
+ 
+diff --git a/drivers/video/fbdev/vfb.c b/drivers/video/fbdev/vfb.c
+index da653a080394..54127905bfe7 100644
+--- a/drivers/video/fbdev/vfb.c
++++ b/drivers/video/fbdev/vfb.c
+@@ -239,8 +239,23 @@ static int vfb_check_var(struct fb_var_screeninfo *var,
+  */
+ static int vfb_set_par(struct fb_info *info)
+ {
++	switch (info->var.bits_per_pixel) {
++	case 1:
++		info->fix.visual = FB_VISUAL_MONO01;
++		break;
++	case 8:
++		info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
++		break;
++	case 16:
++	case 24:
++	case 32:
++		info->fix.visual = FB_VISUAL_TRUECOLOR;
++		break;
++	}
++
+ 	info->fix.line_length = get_line_length(info->var.xres_virtual,
+ 						info->var.bits_per_pixel);
++
+ 	return 0;
+ }
+ 
+@@ -450,6 +465,8 @@ static int vfb_probe(struct platform_device *dev)
+ 		goto err2;
+ 	platform_set_drvdata(dev, info);
+ 
++	vfb_set_par(info);
++
+ 	fb_info(info, "Virtual frame buffer device, using %ldK of video memory\n",
+ 		videomemorysize >> 10);
+ 	return 0;
+diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c
+index 36be987ff9ef..c2f4ff516230 100644
+--- a/drivers/watchdog/dw_wdt.c
++++ b/drivers/watchdog/dw_wdt.c
+@@ -127,14 +127,27 @@ static int dw_wdt_start(struct watchdog_device *wdd)
+ 
+ 	dw_wdt_set_timeout(wdd, wdd->timeout);
+ 
+-	set_bit(WDOG_HW_RUNNING, &wdd->status);
+-
+ 	writel(WDOG_CONTROL_REG_WDT_EN_MASK,
+ 	       dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
+ 
+ 	return 0;
+ }
+ 
++static int dw_wdt_stop(struct watchdog_device *wdd)
++{
++	struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
++
++	if (!dw_wdt->rst) {
++		set_bit(WDOG_HW_RUNNING, &wdd->status);
++		return 0;
++	}
++
++	reset_control_assert(dw_wdt->rst);
++	reset_control_deassert(dw_wdt->rst);
++
++	return 0;
++}
++
+ static int dw_wdt_restart(struct watchdog_device *wdd,
+ 			  unsigned long action, void *data)
+ {
+@@ -173,6 +186,7 @@ static const struct watchdog_info dw_wdt_ident = {
+ static const struct watchdog_ops dw_wdt_ops = {
+ 	.owner		= THIS_MODULE,
+ 	.start		= dw_wdt_start,
++	.stop		= dw_wdt_stop,
+ 	.ping		= dw_wdt_ping,
+ 	.set_timeout	= dw_wdt_set_timeout,
+ 	.get_timeleft	= dw_wdt_get_timeleft,
+diff --git a/fs/dcache.c b/fs/dcache.c
+index eb2c297a87d0..485d9d158429 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -468,9 +468,11 @@ static void dentry_lru_add(struct dentry *dentry)
+  * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
+  * reason (NFS timeouts or autofs deletes).
+  *
+- * __d_drop requires dentry->d_lock.
++ * __d_drop requires dentry->d_lock
++ * ___d_drop doesn't mark dentry as "unhashed"
++ *   (dentry->d_hash.pprev will be LIST_POISON2, not NULL).
+  */
+-void __d_drop(struct dentry *dentry)
++static void ___d_drop(struct dentry *dentry)
+ {
+ 	if (!d_unhashed(dentry)) {
+ 		struct hlist_bl_head *b;
+@@ -486,12 +488,17 @@ void __d_drop(struct dentry *dentry)
+ 
+ 		hlist_bl_lock(b);
+ 		__hlist_bl_del(&dentry->d_hash);
+-		dentry->d_hash.pprev = NULL;
+ 		hlist_bl_unlock(b);
+ 		/* After this call, in-progress rcu-walk path lookup will fail. */
+ 		write_seqcount_invalidate(&dentry->d_seq);
+ 	}
+ }
++
++void __d_drop(struct dentry *dentry)
++{
++	___d_drop(dentry);
++	dentry->d_hash.pprev = NULL;
++}
+ EXPORT_SYMBOL(__d_drop);
+ 
+ void d_drop(struct dentry *dentry)
+@@ -2386,7 +2393,7 @@ EXPORT_SYMBOL(d_delete);
+ static void __d_rehash(struct dentry *entry)
+ {
+ 	struct hlist_bl_head *b = d_hash(entry->d_name.hash);
+-	BUG_ON(!d_unhashed(entry));
++
+ 	hlist_bl_lock(b);
+ 	hlist_bl_add_head_rcu(&entry->d_hash, b);
+ 	hlist_bl_unlock(b);
+@@ -2821,9 +2828,9 @@ static void __d_move(struct dentry *dentry, struct dentry *target,
+ 	write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
+ 
+ 	/* unhash both */
+-	/* __d_drop does write_seqcount_barrier, but they're OK to nest. */
+-	__d_drop(dentry);
+-	__d_drop(target);
++	/* ___d_drop does write_seqcount_barrier, but they're OK to nest. */
++	___d_drop(dentry);
++	___d_drop(target);
+ 
+ 	/* Switch the names.. */
+ 	if (exchange)
+@@ -2835,6 +2842,8 @@ static void __d_move(struct dentry *dentry, struct dentry *target,
+ 	__d_rehash(dentry);
+ 	if (exchange)
+ 		__d_rehash(target);
++	else
++		target->d_hash.pprev = NULL;
+ 
+ 	/* ... and switch them in the tree */
+ 	if (IS_ROOT(dentry)) {
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index 7874bbd7311d..84a011a522a1 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -1186,14 +1186,14 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
+ 	pg_start = offset >> PAGE_SHIFT;
+ 	pg_end = (offset + len) >> PAGE_SHIFT;
+ 
++	/* avoid gc operation during block exchange */
++	down_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
++
+ 	down_write(&F2FS_I(inode)->i_mmap_sem);
+ 	/* write out all dirty pages from offset */
+ 	ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
+ 	if (ret)
+-		goto out;
+-
+-	/* avoid gc operation during block exchange */
+-	down_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
++		goto out_unlock;
+ 
+ 	truncate_pagecache(inode, offset);
+ 
+@@ -1212,9 +1212,8 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
+ 	if (!ret)
+ 		f2fs_i_size_write(inode, new_size);
+ out_unlock:
+-	up_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
+-out:
+ 	up_write(&F2FS_I(inode)->i_mmap_sem);
++	up_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
+ 	return ret;
+ }
+ 
+@@ -1385,6 +1384,9 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
+ 
+ 	f2fs_balance_fs(sbi, true);
+ 
++	/* avoid gc operation during block exchange */
++	down_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
++
+ 	down_write(&F2FS_I(inode)->i_mmap_sem);
+ 	ret = truncate_blocks(inode, i_size_read(inode), true);
+ 	if (ret)
+@@ -1395,9 +1397,6 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
+ 	if (ret)
+ 		goto out;
+ 
+-	/* avoid gc operation during block exchange */
+-	down_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
+-
+ 	truncate_pagecache(inode, offset);
+ 
+ 	pg_start = offset >> PAGE_SHIFT;
+@@ -1425,10 +1424,9 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
+ 
+ 	if (!ret)
+ 		f2fs_i_size_write(inode, new_size);
+-
+-	up_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
+ out:
+ 	up_write(&F2FS_I(inode)->i_mmap_sem);
++	up_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
+ 	return ret;
+ }
+ 
+diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
+index 7c925e6211f1..48171b349b88 100644
+--- a/include/linux/clk-provider.h
++++ b/include/linux/clk-provider.h
+@@ -412,7 +412,7 @@ extern const struct clk_ops clk_divider_ro_ops;
+ 
+ unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate,
+ 		unsigned int val, const struct clk_div_table *table,
+-		unsigned long flags);
++		unsigned long flags, unsigned long width);
+ long divider_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent,
+ 			       unsigned long rate, unsigned long *prate,
+ 			       const struct clk_div_table *table,
+diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
+index c8198ed8b180..907adbf99a9c 100644
+--- a/include/linux/mlx5/driver.h
++++ b/include/linux/mlx5/driver.h
+@@ -826,7 +826,7 @@ struct mlx5_core_dev {
+ 	struct mlx5e_resources  mlx5e_res;
+ 	struct {
+ 		struct mlx5_rsvd_gids	reserved_gids;
+-		atomic_t                roce_en;
++		u32			roce_en;
+ 	} roce;
+ #ifdef CONFIG_MLX5_FPGA
+ 	struct mlx5_fpga_device *fpga;
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index ef789e1d679e..ddb1fc7bd938 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -4402,8 +4402,8 @@ do {								\
+ 	WARN(1, "netdevice: %s%s\n" format, netdev_name(dev),	\
+ 	     netdev_reg_state(dev), ##args)
+ 
+-#define netdev_WARN_ONCE(dev, condition, format, arg...)		\
+-	WARN_ONCE(1, "netdevice: %s%s\n" format, netdev_name(dev)	\
++#define netdev_WARN_ONCE(dev, format, args...)				\
++	WARN_ONCE(1, "netdevice: %s%s\n" format, netdev_name(dev),	\
+ 		  netdev_reg_state(dev), ##args)
+ 
+ /* netif printk helpers, similar to netdev_printk */
+diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
+index 6df6fe0c2198..61c84d536a7e 100644
+--- a/include/scsi/libsas.h
++++ b/include/scsi/libsas.h
+@@ -292,6 +292,7 @@ struct asd_sas_port {
+ struct asd_sas_event {
+ 	struct sas_work work;
+ 	struct asd_sas_phy *phy;
++	int event;
+ };
+ 
+ static inline struct asd_sas_event *to_asd_sas_event(struct work_struct *work)
+@@ -301,17 +302,21 @@ static inline struct asd_sas_event *to_asd_sas_event(struct work_struct *work)
+ 	return ev;
+ }
+ 
++static inline void INIT_SAS_EVENT(struct asd_sas_event *ev,
++		void (*fn)(struct work_struct *),
++		struct asd_sas_phy *phy, int event)
++{
++	INIT_SAS_WORK(&ev->work, fn);
++	ev->phy = phy;
++	ev->event = event;
++}
++
++
+ /* The phy pretty much is controlled by the LLDD.
+  * The class only reads those fields.
+  */
+ struct asd_sas_phy {
+ /* private: */
+-	struct asd_sas_event   port_events[PORT_NUM_EVENTS];
+-	struct asd_sas_event   phy_events[PHY_NUM_EVENTS];
+-
+-	unsigned long port_events_pending;
+-	unsigned long phy_events_pending;
+-
+ 	int error;
+ 	int suspended;
+ 
+diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h
+index a33e0517d3fd..062d14f07b61 100644
+--- a/include/uapi/rdma/mlx5-abi.h
++++ b/include/uapi/rdma/mlx5-abi.h
+@@ -307,7 +307,7 @@ enum mlx5_rx_hash_fields {
+ 	MLX5_RX_HASH_SRC_PORT_UDP	= 1 << 6,
+ 	MLX5_RX_HASH_DST_PORT_UDP	= 1 << 7,
+ 	/* Save bits for future fields */
+-	MLX5_RX_HASH_INNER		= 1 << 31
++	MLX5_RX_HASH_INNER		= (1UL << 31),
+ };
+ 
+ struct mlx5_ib_create_qp_rss {
+diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
+index f7e83f6d2e64..236452ebbd9e 100644
+--- a/net/8021q/vlan_dev.c
++++ b/net/8021q/vlan_dev.c
+@@ -29,6 +29,7 @@
+ #include <linux/net_tstamp.h>
+ #include <linux/etherdevice.h>
+ #include <linux/ethtool.h>
++#include <linux/phy.h>
+ #include <net/arp.h>
+ #include <net/switchdev.h>
+ 
+@@ -665,8 +666,11 @@ static int vlan_ethtool_get_ts_info(struct net_device *dev,
+ {
+ 	const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+ 	const struct ethtool_ops *ops = vlan->real_dev->ethtool_ops;
++	struct phy_device *phydev = vlan->real_dev->phydev;
+ 
+-	if (ops->get_ts_info) {
++	if (phydev && phydev->drv && phydev->drv->ts_info) {
++		 return phydev->drv->ts_info(phydev, info);
++	} else if (ops->get_ts_info) {
+ 		return ops->get_ts_info(vlan->real_dev, info);
+ 	} else {
+ 		info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
+diff --git a/net/core/dev.c b/net/core/dev.c
+index f3fbd10a0632..af4d670f5619 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1027,7 +1027,7 @@ bool dev_valid_name(const char *name)
+ {
+ 	if (*name == '\0')
+ 		return false;
+-	if (strlen(name) >= IFNAMSIZ)
++	if (strnlen(name, IFNAMSIZ) == IFNAMSIZ)
+ 		return false;
+ 	if (!strcmp(name, ".") || !strcmp(name, ".."))
+ 		return false;
+@@ -2719,7 +2719,7 @@ __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
+ 		if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
+ 			return 0;
+ 
+-		eth = (struct ethhdr *)skb_mac_header(skb);
++		eth = (struct ethhdr *)skb->data;
+ 		type = eth->h_proto;
+ 	}
+ 
+diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
+index 7d036696e8c4..65292b9fa01a 100644
+--- a/net/dsa/dsa_priv.h
++++ b/net/dsa/dsa_priv.h
+@@ -117,6 +117,7 @@ static inline struct net_device *dsa_master_find_slave(struct net_device *dev,
+ 	struct dsa_port *cpu_dp = dev->dsa_ptr;
+ 	struct dsa_switch_tree *dst = cpu_dp->dst;
+ 	struct dsa_switch *ds;
++	struct dsa_port *slave_port;
+ 
+ 	if (device < 0 || device >= DSA_MAX_SWITCHES)
+ 		return NULL;
+@@ -128,7 +129,12 @@ static inline struct net_device *dsa_master_find_slave(struct net_device *dev,
+ 	if (port < 0 || port >= ds->num_ports)
+ 		return NULL;
+ 
+-	return ds->ports[port].slave;
++	slave_port = &ds->ports[port];
++
++	if (unlikely(slave_port->type != DSA_PORT_TYPE_USER))
++		return NULL;
++
++	return slave_port->slave;
+ }
+ 
+ /* port.c */
+diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
+index 6c231b43974d..e981e05594c5 100644
+--- a/net/ipv4/arp.c
++++ b/net/ipv4/arp.c
+@@ -437,7 +437,7 @@ static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev)
+ 	/*unsigned long now; */
+ 	struct net *net = dev_net(dev);
+ 
+-	rt = ip_route_output(net, sip, tip, 0, 0);
++	rt = ip_route_output(net, sip, tip, 0, l3mdev_master_ifindex_rcu(dev));
+ 	if (IS_ERR(rt))
+ 		return 1;
+ 	if (rt->dst.dev != dev) {
+diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
+index 7d36a950d961..9d512922243f 100644
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -1746,18 +1746,20 @@ void fib_select_multipath(struct fib_result *res, int hash)
+ 	bool first = false;
+ 
+ 	for_nexthops(fi) {
++		if (net->ipv4.sysctl_fib_multipath_use_neigh) {
++			if (!fib_good_nh(nh))
++				continue;
++			if (!first) {
++				res->nh_sel = nhsel;
++				first = true;
++			}
++		}
++
+ 		if (hash > atomic_read(&nh->nh_upper_bound))
+ 			continue;
+ 
+-		if (!net->ipv4.sysctl_fib_multipath_use_neigh ||
+-		    fib_good_nh(nh)) {
+-			res->nh_sel = nhsel;
+-			return;
+-		}
+-		if (!first) {
+-			res->nh_sel = nhsel;
+-			first = true;
+-		}
++		res->nh_sel = nhsel;
++		return;
+ 	} endfor_nexthops(fi);
+ }
+ #endif
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
+index 6d21068f9b55..a70a1d6db157 100644
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -253,13 +253,14 @@ static struct net_device *__ip_tunnel_create(struct net *net,
+ 	struct net_device *dev;
+ 	char name[IFNAMSIZ];
+ 
+-	if (parms->name[0])
++	err = -E2BIG;
++	if (parms->name[0]) {
++		if (!dev_valid_name(parms->name))
++			goto failed;
+ 		strlcpy(name, parms->name, IFNAMSIZ);
+-	else {
+-		if (strlen(ops->kind) > (IFNAMSIZ - 3)) {
+-			err = -E2BIG;
++	} else {
++		if (strlen(ops->kind) > (IFNAMSIZ - 3))
+ 			goto failed;
+-		}
+ 		strlcpy(name, ops->kind, IFNAMSIZ);
+ 		strncat(name, "%d", 2);
+ 	}
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index 873549228ccb..9f9f38dd6775 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -319,11 +319,13 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
+ 	if (t || !create)
+ 		return t;
+ 
+-	if (parms->name[0])
++	if (parms->name[0]) {
++		if (!dev_valid_name(parms->name))
++			return NULL;
+ 		strlcpy(name, parms->name, IFNAMSIZ);
+-	else
++	} else {
+ 		strcpy(name, "ip6gre%d");
+-
++	}
+ 	dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
+ 			   ip6gre_tunnel_setup);
+ 	if (!dev)
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 3763dc01e374..ffbb81609016 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -138,6 +138,14 @@ static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
+ 		return ret;
+ 	}
+ 
++#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
++	/* Policy lookup after SNAT yielded a new policy */
++	if (skb_dst(skb)->xfrm) {
++		IPCB(skb)->flags |= IPSKB_REROUTED;
++		return dst_output(net, sk, skb);
++	}
++#endif
++
+ 	if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
+ 	    dst_allfrag(skb_dst(skb)) ||
+ 	    (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
+@@ -367,6 +375,11 @@ static int ip6_forward_proxy_check(struct sk_buff *skb)
+ static inline int ip6_forward_finish(struct net *net, struct sock *sk,
+ 				     struct sk_buff *skb)
+ {
++	struct dst_entry *dst = skb_dst(skb);
++
++	__IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
++	__IP6_ADD_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
++
+ 	return dst_output(net, sk, skb);
+ }
+ 
+@@ -560,8 +573,6 @@ int ip6_forward(struct sk_buff *skb)
+ 
+ 	hdr->hop_limit--;
+ 
+-	__IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
+-	__IP6_ADD_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
+ 	return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
+ 		       net, NULL, skb, skb->dev, dst->dev,
+ 		       ip6_forward_finish);
+@@ -1237,7 +1248,7 @@ static int __ip6_append_data(struct sock *sk,
+ 			     const struct sockcm_cookie *sockc)
+ {
+ 	struct sk_buff *skb, *skb_prev = NULL;
+-	unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
++	unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu, pmtu;
+ 	int exthdrlen = 0;
+ 	int dst_exthdrlen = 0;
+ 	int hh_len;
+@@ -1273,6 +1284,12 @@ static int __ip6_append_data(struct sock *sk,
+ 		      sizeof(struct frag_hdr) : 0) +
+ 		     rt->rt6i_nfheader_len;
+ 
++	/* as per RFC 7112 section 5, the entire IPv6 Header Chain must fit
++	 * the first fragment
++	 */
++	if (headersize + transhdrlen > mtu)
++		goto emsgsize;
++
+ 	if (cork->length + length > mtu - headersize && ipc6->dontfrag &&
+ 	    (sk->sk_protocol == IPPROTO_UDP ||
+ 	     sk->sk_protocol == IPPROTO_RAW)) {
+@@ -1288,9 +1305,8 @@ static int __ip6_append_data(struct sock *sk,
+ 
+ 	if (cork->length + length > maxnonfragsize - headersize) {
+ emsgsize:
+-		ipv6_local_error(sk, EMSGSIZE, fl6,
+-				 mtu - headersize +
+-				 sizeof(struct ipv6hdr));
++		pmtu = max_t(int, mtu - headersize + sizeof(struct ipv6hdr), 0);
++		ipv6_local_error(sk, EMSGSIZE, fl6, pmtu);
+ 		return -EMSGSIZE;
+ 	}
+ 
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index 1ee5584c3555..38e0952e2396 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -297,13 +297,16 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
+ 	struct net_device *dev;
+ 	struct ip6_tnl *t;
+ 	char name[IFNAMSIZ];
+-	int err = -ENOMEM;
++	int err = -E2BIG;
+ 
+-	if (p->name[0])
++	if (p->name[0]) {
++		if (!dev_valid_name(p->name))
++			goto failed;
+ 		strlcpy(name, p->name, IFNAMSIZ);
+-	else
++	} else {
+ 		sprintf(name, "ip6tnl%%d");
+-
++	}
++	err = -ENOMEM;
+ 	dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
+ 			   ip6_tnl_dev_setup);
+ 	if (!dev)
+diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
+index 8c184f84f353..15c51686e076 100644
+--- a/net/ipv6/ip6_vti.c
++++ b/net/ipv6/ip6_vti.c
+@@ -212,10 +212,13 @@ static struct ip6_tnl *vti6_tnl_create(struct net *net, struct __ip6_tnl_parm *p
+ 	char name[IFNAMSIZ];
+ 	int err;
+ 
+-	if (p->name[0])
++	if (p->name[0]) {
++		if (!dev_valid_name(p->name))
++			goto failed;
+ 		strlcpy(name, p->name, IFNAMSIZ);
+-	else
++	} else {
+ 		sprintf(name, "ip6_vti%%d");
++	}
+ 
+ 	dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, vti6_dev_setup);
+ 	if (!dev)
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 1f0d94439c77..065518620dc2 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -922,6 +922,9 @@ static struct rt6_info *ip6_pol_route_lookup(struct net *net,
+ 	struct rt6_info *rt, *rt_cache;
+ 	struct fib6_node *fn;
+ 
++	if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
++		flags &= ~RT6_LOOKUP_F_IFACE;
++
+ 	rcu_read_lock();
+ 	fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
+ restart:
+diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
+index 7a78dcfda68a..f343e6f0fc95 100644
+--- a/net/ipv6/seg6_iptunnel.c
++++ b/net/ipv6/seg6_iptunnel.c
+@@ -16,6 +16,7 @@
+ #include <linux/net.h>
+ #include <linux/module.h>
+ #include <net/ip.h>
++#include <net/ip_tunnels.h>
+ #include <net/lwtunnel.h>
+ #include <net/netevent.h>
+ #include <net/netns/generic.h>
+@@ -211,11 +212,6 @@ static int seg6_do_srh(struct sk_buff *skb)
+ 
+ 	tinfo = seg6_encap_lwtunnel(dst->lwtstate);
+ 
+-	if (likely(!skb->encapsulation)) {
+-		skb_reset_inner_headers(skb);
+-		skb->encapsulation = 1;
+-	}
+-
+ 	switch (tinfo->mode) {
+ 	case SEG6_IPTUN_MODE_INLINE:
+ 		if (skb->protocol != htons(ETH_P_IPV6))
+@@ -224,10 +220,12 @@ static int seg6_do_srh(struct sk_buff *skb)
+ 		err = seg6_do_srh_inline(skb, tinfo->srh);
+ 		if (err)
+ 			return err;
+-
+-		skb_reset_inner_headers(skb);
+ 		break;
+ 	case SEG6_IPTUN_MODE_ENCAP:
++		err = iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6);
++		if (err)
++			return err;
++
+ 		if (skb->protocol == htons(ETH_P_IPV6))
+ 			proto = IPPROTO_IPV6;
+ 		else if (skb->protocol == htons(ETH_P_IP))
+@@ -239,6 +237,8 @@ static int seg6_do_srh(struct sk_buff *skb)
+ 		if (err)
+ 			return err;
+ 
++		skb_set_inner_transport_header(skb, skb_transport_offset(skb));
++		skb_set_inner_protocol(skb, skb->protocol);
+ 		skb->protocol = htons(ETH_P_IPV6);
+ 		break;
+ 	case SEG6_IPTUN_MODE_L2ENCAP:
+@@ -262,8 +262,6 @@ static int seg6_do_srh(struct sk_buff *skb)
+ 	ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
+ 	skb_set_transport_header(skb, sizeof(struct ipv6hdr));
+ 
+-	skb_set_inner_protocol(skb, skb->protocol);
+-
+ 	return 0;
+ }
+ 
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index 3a1775a62973..5a0725d7aabc 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -250,11 +250,13 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
+ 	if (!create)
+ 		goto failed;
+ 
+-	if (parms->name[0])
++	if (parms->name[0]) {
++		if (!dev_valid_name(parms->name))
++			goto failed;
+ 		strlcpy(name, parms->name, IFNAMSIZ);
+-	else
++	} else {
+ 		strcpy(name, "sit%d");
+-
++	}
+ 	dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
+ 			   ipip6_tunnel_setup);
+ 	if (!dev)
+diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
+index a1f24fb2be98..7e9c50125556 100644
+--- a/net/l2tp/l2tp_netlink.c
++++ b/net/l2tp/l2tp_netlink.c
+@@ -761,6 +761,8 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl
+ 
+ 	if ((session->ifname[0] &&
+ 	     nla_put_string(skb, L2TP_ATTR_IFNAME, session->ifname)) ||
++	    (session->offset &&
++	     nla_put_u16(skb, L2TP_ATTR_OFFSET, session->offset)) ||
+ 	    (session->cookie_len &&
+ 	     nla_put(skb, L2TP_ATTR_COOKIE, session->cookie_len,
+ 		     &session->cookie[0])) ||
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index 84f757c5d91a..288640471c2f 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -2373,10 +2373,17 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
+ 	struct ieee80211_sub_if_data *sdata;
+ 	enum nl80211_tx_power_setting txp_type = type;
+ 	bool update_txp_type = false;
++	bool has_monitor = false;
+ 
+ 	if (wdev) {
+ 		sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
+ 
++		if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
++			sdata = rtnl_dereference(local->monitor_sdata);
++			if (!sdata)
++				return -EOPNOTSUPP;
++		}
++
+ 		switch (type) {
+ 		case NL80211_TX_POWER_AUTOMATIC:
+ 			sdata->user_power_level = IEEE80211_UNSET_POWER_LEVEL;
+@@ -2415,15 +2422,34 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
+ 
+ 	mutex_lock(&local->iflist_mtx);
+ 	list_for_each_entry(sdata, &local->interfaces, list) {
++		if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
++			has_monitor = true;
++			continue;
++		}
+ 		sdata->user_power_level = local->user_power_level;
+ 		if (txp_type != sdata->vif.bss_conf.txpower_type)
+ 			update_txp_type = true;
+ 		sdata->vif.bss_conf.txpower_type = txp_type;
+ 	}
+-	list_for_each_entry(sdata, &local->interfaces, list)
++	list_for_each_entry(sdata, &local->interfaces, list) {
++		if (sdata->vif.type == NL80211_IFTYPE_MONITOR)
++			continue;
+ 		ieee80211_recalc_txpower(sdata, update_txp_type);
++	}
+ 	mutex_unlock(&local->iflist_mtx);
+ 
++	if (has_monitor) {
++		sdata = rtnl_dereference(local->monitor_sdata);
++		if (sdata) {
++			sdata->user_power_level = local->user_power_level;
++			if (txp_type != sdata->vif.bss_conf.txpower_type)
++				update_txp_type = true;
++			sdata->vif.bss_conf.txpower_type = txp_type;
++
++			ieee80211_recalc_txpower(sdata, update_txp_type);
++		}
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
+index c7f93fd9ca7a..4d82fe7d627c 100644
+--- a/net/mac80211/driver-ops.h
++++ b/net/mac80211/driver-ops.h
+@@ -165,7 +165,8 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local,
+ 	if (WARN_ON_ONCE(sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE ||
+ 			 sdata->vif.type == NL80211_IFTYPE_NAN ||
+ 			 (sdata->vif.type == NL80211_IFTYPE_MONITOR &&
+-			  !sdata->vif.mu_mimo_owner)))
++			  !sdata->vif.mu_mimo_owner &&
++			  !(changed & BSS_CHANGED_TXPOWER))))
+ 		return;
+ 
+ 	if (!check_sdata_in_driver(sdata))
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index ca9c0544c856..1245aa1d6e1c 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -1052,6 +1052,9 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
+ 	if (addr->sa_family != AF_NETLINK)
+ 		return -EINVAL;
+ 
++	if (alen < sizeof(struct sockaddr_nl))
++		return -EINVAL;
++
+ 	if ((nladdr->nl_groups || nladdr->nl_pid) &&
+ 	    !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
+ 		return -EPERM;
+diff --git a/net/rds/bind.c b/net/rds/bind.c
+index 75d43dc8e96b..5aa3a64aa4f0 100644
+--- a/net/rds/bind.c
++++ b/net/rds/bind.c
+@@ -114,6 +114,7 @@ static int rds_add_bound(struct rds_sock *rs, __be32 addr, __be16 *port)
+ 			  rs, &addr, (int)ntohs(*port));
+ 			break;
+ 		} else {
++			rs->rs_bound_addr = 0;
+ 			rds_sock_put(rs);
+ 			ret = -ENOMEM;
+ 			break;
+diff --git a/net/sched/act_api.c b/net/sched/act_api.c
+index 4d33a50a8a6d..e3386f1f485c 100644
+--- a/net/sched/act_api.c
++++ b/net/sched/act_api.c
+@@ -135,8 +135,10 @@ static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
+ 			continue;
+ 
+ 		nest = nla_nest_start(skb, n_i);
+-		if (!nest)
++		if (!nest) {
++			index--;
+ 			goto nla_put_failure;
++		}
+ 		err = tcf_action_dump_1(skb, p, 0, 0);
+ 		if (err < 0) {
+ 			index--;
+diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
+index 5ef8ce8c83d4..502159bdded3 100644
+--- a/net/sched/act_bpf.c
++++ b/net/sched/act_bpf.c
+@@ -248,10 +248,14 @@ static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
+ 
+ static void tcf_bpf_cfg_cleanup(const struct tcf_bpf_cfg *cfg)
+ {
+-	if (cfg->is_ebpf)
+-		bpf_prog_put(cfg->filter);
+-	else
+-		bpf_prog_destroy(cfg->filter);
++	struct bpf_prog *filter = cfg->filter;
++
++	if (filter) {
++		if (cfg->is_ebpf)
++			bpf_prog_put(filter);
++		else
++			bpf_prog_destroy(filter);
++	}
+ 
+ 	kfree(cfg->bpf_ops);
+ 	kfree(cfg->bpf_name);
+diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
+index 9438969290a6..2298d91c4c83 100644
+--- a/net/sched/act_sample.c
++++ b/net/sched/act_sample.c
+@@ -103,7 +103,8 @@ static void tcf_sample_cleanup(struct tc_action *a, int bind)
+ 
+ 	psample_group = rtnl_dereference(s->psample_group);
+ 	RCU_INIT_POINTER(s->psample_group, NULL);
+-	psample_group_put(psample_group);
++	if (psample_group)
++		psample_group_put(psample_group);
+ }
+ 
+ static bool tcf_sample_dev_ok_push(struct net_device *dev)
+diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
+index b642ad3d39dd..6d10b3af479b 100644
+--- a/net/sched/act_skbmod.c
++++ b/net/sched/act_skbmod.c
+@@ -190,7 +190,8 @@ static void tcf_skbmod_cleanup(struct tc_action *a, int bind)
+ 	struct tcf_skbmod_params  *p;
+ 
+ 	p = rcu_dereference_protected(d->skbmod_p, 1);
+-	kfree_rcu(p, rcu);
++	if (p)
++		kfree_rcu(p, rcu);
+ }
+ 
+ static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a,
+diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
+index 22bf1a376b91..7cb63616805d 100644
+--- a/net/sched/act_tunnel_key.c
++++ b/net/sched/act_tunnel_key.c
+@@ -208,11 +208,12 @@ static void tunnel_key_release(struct tc_action *a, int bind)
+ 	struct tcf_tunnel_key_params *params;
+ 
+ 	params = rcu_dereference_protected(t->params, 1);
++	if (params) {
++		if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
++			dst_release(&params->tcft_enc_metadata->dst);
+ 
+-	if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
+-		dst_release(&params->tcft_enc_metadata->dst);
+-
+-	kfree_rcu(params, rcu);
++		kfree_rcu(params, rcu);
++	}
+ }
+ 
+ static int tunnel_key_dump_addresses(struct sk_buff *skb,
+diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
+index 97f717a13ad5..788a8daf9230 100644
+--- a/net/sched/act_vlan.c
++++ b/net/sched/act_vlan.c
+@@ -225,7 +225,8 @@ static void tcf_vlan_cleanup(struct tc_action *a, int bind)
+ 	struct tcf_vlan_params *p;
+ 
+ 	p = rcu_dereference_protected(v->vlan_p, 1);
+-	kfree_rcu(p, rcu);
++	if (p)
++		kfree_rcu(p, rcu);
+ }
+ 
+ static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a,
+diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
+index 425cc341fd41..8d25f38cc1ad 100644
+--- a/net/sched/cls_u32.c
++++ b/net/sched/cls_u32.c
+@@ -478,6 +478,7 @@ static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
+ 				RCU_INIT_POINTER(*kp, key->next);
+ 
+ 				tcf_unbind_filter(tp, &key->res);
++				idr_remove(&ht->handle_idr, key->handle);
+ 				tcf_exts_get_net(&key->exts);
+ 				call_rcu(&key->rcu, u32_delete_key_freepf_rcu);
+ 				return 0;
+diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
+index f0747eb87dc4..cca57e93a810 100644
+--- a/net/sched/sch_red.c
++++ b/net/sched/sch_red.c
+@@ -157,7 +157,6 @@ static int red_offload(struct Qdisc *sch, bool enable)
+ 		.handle = sch->handle,
+ 		.parent = sch->parent,
+ 	};
+-	int err;
+ 
+ 	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
+ 		return -EOPNOTSUPP;
+@@ -172,14 +171,7 @@ static int red_offload(struct Qdisc *sch, bool enable)
+ 		opt.command = TC_RED_DESTROY;
+ 	}
+ 
+-	err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, &opt);
+-
+-	if (!err && enable)
+-		sch->flags |= TCQ_F_OFFLOADED;
+-	else
+-		sch->flags &= ~TCQ_F_OFFLOADED;
+-
+-	return err;
++	return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, &opt);
+ }
+ 
+ static void red_destroy(struct Qdisc *sch)
+@@ -294,12 +286,22 @@ static int red_dump_offload_stats(struct Qdisc *sch, struct tc_red_qopt *opt)
+ 			.stats.qstats = &sch->qstats,
+ 		},
+ 	};
++	int err;
++
++	sch->flags &= ~TCQ_F_OFFLOADED;
+ 
+-	if (!(sch->flags & TCQ_F_OFFLOADED))
++	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
++		return 0;
++
++	err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED,
++					    &hw_stats);
++	if (err == -EOPNOTSUPP)
+ 		return 0;
+ 
+-	return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED,
+-					     &hw_stats);
++	if (!err)
++		sch->flags |= TCQ_F_OFFLOADED;
++
++	return err;
+ }
+ 
+ static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
+diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
+index e35d4f73d2df..f6d3d0c1e133 100644
+--- a/net/sctp/ipv6.c
++++ b/net/sctp/ipv6.c
+@@ -728,8 +728,10 @@ static int sctp_v6_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr)
+ 			sctp_v6_map_v4(addr);
+ 	}
+ 
+-	if (addr->sa.sa_family == AF_INET)
++	if (addr->sa.sa_family == AF_INET) {
++		memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
+ 		return sizeof(struct sockaddr_in);
++	}
+ 	return sizeof(struct sockaddr_in6);
+ }
+ 
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 039fcb618c34..5e6ff7ac07d1 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -338,11 +338,14 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
+ 	if (!opt->pf->af_supported(addr->sa.sa_family, opt))
+ 		return NULL;
+ 
+-	/* V4 mapped address are really of AF_INET family */
+-	if (addr->sa.sa_family == AF_INET6 &&
+-	    ipv6_addr_v4mapped(&addr->v6.sin6_addr) &&
+-	    !opt->pf->af_supported(AF_INET, opt))
+-		return NULL;
++	if (addr->sa.sa_family == AF_INET6) {
++		if (len < SIN6_LEN_RFC2133)
++			return NULL;
++		/* V4 mapped address are really of AF_INET family */
++		if (ipv6_addr_v4mapped(&addr->v6.sin6_addr) &&
++		    !opt->pf->af_supported(AF_INET, opt))
++			return NULL;
++	}
+ 
+ 	/* If we get this far, af is valid. */
+ 	af = sctp_get_af_specific(addr->sa.sa_family);
+diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c
+index 1fdab5c4eda8..b9283ce5cd85 100644
+--- a/net/strparser/strparser.c
++++ b/net/strparser/strparser.c
+@@ -60,7 +60,7 @@ static void strp_abort_strp(struct strparser *strp, int err)
+ 		struct sock *sk = strp->sk;
+ 
+ 		/* Report an error on the lower socket */
+-		sk->sk_err = err;
++		sk->sk_err = -err;
+ 		sk->sk_error_report(sk);
+ 	}
+ }
+@@ -458,7 +458,7 @@ static void strp_msg_timeout(struct work_struct *w)
+ 	/* Message assembly timed out */
+ 	STRP_STATS_INCR(strp->stats.msg_timeouts);
+ 	strp->cb.lock(strp);
+-	strp->cb.abort_parser(strp, ETIMEDOUT);
++	strp->cb.abort_parser(strp, -ETIMEDOUT);
+ 	strp->cb.unlock(strp);
+ }
+ 
+diff --git a/sound/soc/intel/atom/sst/sst_stream.c b/sound/soc/intel/atom/sst/sst_stream.c
+index 65e257b17a7e..20f5066fefb9 100644
+--- a/sound/soc/intel/atom/sst/sst_stream.c
++++ b/sound/soc/intel/atom/sst/sst_stream.c
+@@ -220,7 +220,7 @@ int sst_send_byte_stream_mrfld(struct intel_sst_drv *sst_drv_ctx,
+ 		sst_free_block(sst_drv_ctx, block);
+ out:
+ 	test_and_clear_bit(pvt_id, &sst_drv_ctx->pvt_id);
+-	return 0;
++	return ret;
+ }
+ 
+ /*
+diff --git a/sound/soc/intel/boards/cht_bsw_rt5645.c b/sound/soc/intel/boards/cht_bsw_rt5645.c
+index 18d129caa974..f898ee140cdc 100644
+--- a/sound/soc/intel/boards/cht_bsw_rt5645.c
++++ b/sound/soc/intel/boards/cht_bsw_rt5645.c
+@@ -118,6 +118,7 @@ static const struct snd_soc_dapm_widget cht_dapm_widgets[] = {
+ 	SND_SOC_DAPM_HP("Headphone", NULL),
+ 	SND_SOC_DAPM_MIC("Headset Mic", NULL),
+ 	SND_SOC_DAPM_MIC("Int Mic", NULL),
++	SND_SOC_DAPM_MIC("Int Analog Mic", NULL),
+ 	SND_SOC_DAPM_SPK("Ext Spk", NULL),
+ 	SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0,
+ 			platform_clock_control, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+@@ -128,6 +129,8 @@ static const struct snd_soc_dapm_route cht_rt5645_audio_map[] = {
+ 	{"IN1N", NULL, "Headset Mic"},
+ 	{"DMIC L1", NULL, "Int Mic"},
+ 	{"DMIC R1", NULL, "Int Mic"},
++	{"IN2P", NULL, "Int Analog Mic"},
++	{"IN2N", NULL, "Int Analog Mic"},
+ 	{"Headphone", NULL, "HPOL"},
+ 	{"Headphone", NULL, "HPOR"},
+ 	{"Ext Spk", NULL, "SPOL"},
+@@ -135,6 +138,9 @@ static const struct snd_soc_dapm_route cht_rt5645_audio_map[] = {
+ 	{"Headphone", NULL, "Platform Clock"},
+ 	{"Headset Mic", NULL, "Platform Clock"},
+ 	{"Int Mic", NULL, "Platform Clock"},
++	{"Int Analog Mic", NULL, "Platform Clock"},
++	{"Int Analog Mic", NULL, "micbias1"},
++	{"Int Analog Mic", NULL, "micbias2"},
+ 	{"Ext Spk", NULL, "Platform Clock"},
+ };
+ 
+@@ -189,6 +195,7 @@ static const struct snd_kcontrol_new cht_mc_controls[] = {
+ 	SOC_DAPM_PIN_SWITCH("Headphone"),
+ 	SOC_DAPM_PIN_SWITCH("Headset Mic"),
+ 	SOC_DAPM_PIN_SWITCH("Int Mic"),
++	SOC_DAPM_PIN_SWITCH("Int Analog Mic"),
+ 	SOC_DAPM_PIN_SWITCH("Ext Spk"),
+ };
+ 
+diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c
+index 61b5bfa79d13..97572fb38387 100644
+--- a/sound/soc/intel/skylake/skl-messages.c
++++ b/sound/soc/intel/skylake/skl-messages.c
+@@ -404,7 +404,11 @@ int skl_resume_dsp(struct skl *skl)
+ 	if (skl->skl_sst->is_first_boot == true)
+ 		return 0;
+ 
++	/* disable dynamic clock gating during fw and lib download */
++	ctx->enable_miscbdcge(ctx->dev, false);
++
+ 	ret = skl_dsp_wake(ctx->dsp);
++	ctx->enable_miscbdcge(ctx->dev, true);
+ 	if (ret < 0)
+ 		return ret;
+ 
+diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c
+index 1dd97479e0c0..32b30f99d2c8 100644
+--- a/sound/soc/intel/skylake/skl-pcm.c
++++ b/sound/soc/intel/skylake/skl-pcm.c
+@@ -1343,7 +1343,11 @@ static int skl_platform_soc_probe(struct snd_soc_platform *platform)
+ 			return -EIO;
+ 		}
+ 
++		/* disable dynamic clock gating during fw and lib download */
++		skl->skl_sst->enable_miscbdcge(platform->dev, false);
++
+ 		ret = ops->init_fw(platform->dev, skl->skl_sst);
++		skl->skl_sst->enable_miscbdcge(platform->dev, true);
+ 		if (ret < 0) {
+ 			dev_err(platform->dev, "Failed to boot first fw: %d\n", ret);
+ 			return ret;
+diff --git a/tools/perf/arch/powerpc/util/sym-handling.c b/tools/perf/arch/powerpc/util/sym-handling.c
+index 9c4e23d8c8ce..53d83d7e6a09 100644
+--- a/tools/perf/arch/powerpc/util/sym-handling.c
++++ b/tools/perf/arch/powerpc/util/sym-handling.c
+@@ -64,6 +64,14 @@ int arch__compare_symbol_names_n(const char *namea, const char *nameb,
+ 
+ 	return strncmp(namea, nameb, n);
+ }
++
++const char *arch__normalize_symbol_name(const char *name)
++{
++	/* Skip over initial dot */
++	if (name && *name == '.')
++		name++;
++	return name;
++}
+ #endif
+ 
+ #if defined(_CALL_ELF) && _CALL_ELF == 2
+diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
+index 003255910c05..36b6213884b5 100644
+--- a/tools/perf/builtin-record.c
++++ b/tools/perf/builtin-record.c
+@@ -1781,8 +1781,8 @@ int cmd_record(int argc, const char **argv)
+ 		goto out;
+ 	}
+ 
+-	/* Enable ignoring missing threads when -u option is defined. */
+-	rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX;
++	/* Enable ignoring missing threads when -u/-p option is defined. */
++	rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
+ 
+ 	err = -ENOMEM;
+ 	if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
+diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
+index af5dd038195e..0551a69bd4a5 100644
+--- a/tools/perf/builtin-report.c
++++ b/tools/perf/builtin-report.c
+@@ -162,12 +162,28 @@ static int hist_iter__branch_callback(struct hist_entry_iter *iter,
+ 	struct hist_entry *he = iter->he;
+ 	struct report *rep = arg;
+ 	struct branch_info *bi;
++	struct perf_sample *sample = iter->sample;
++	struct perf_evsel *evsel = iter->evsel;
++	int err;
++
++	if (!ui__has_annotation())
++		return 0;
++
++	hist__account_cycles(sample->branch_stack, al, sample,
++			     rep->nonany_branch_mode);
+ 
+ 	bi = he->branch_info;
++	err = addr_map_symbol__inc_samples(&bi->from, sample, evsel->idx);
++	if (err)
++		goto out;
++
++	err = addr_map_symbol__inc_samples(&bi->to, sample, evsel->idx);
++
+ 	branch_type_count(&rep->brtype_stat, &bi->flags,
+ 			  bi->from.addr, bi->to.addr);
+ 
+-	return 0;
++out:
++	return err;
+ }
+ 
+ static int process_sample_event(struct perf_tool *tool,
+diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
+index d5fbcf8c7aa7..242d345beda4 100644
+--- a/tools/perf/util/evsel.c
++++ b/tools/perf/util/evsel.c
+@@ -36,6 +36,7 @@
+ #include "debug.h"
+ #include "trace-event.h"
+ #include "stat.h"
++#include "memswap.h"
+ #include "util/parse-branch-options.h"
+ 
+ #include "sane_ctype.h"
+@@ -1596,10 +1597,46 @@ static int __open_attr__fprintf(FILE *fp, const char *name, const char *val,
+ 	return fprintf(fp, "  %-32s %s\n", name, val);
+ }
+ 
++static void perf_evsel__remove_fd(struct perf_evsel *pos,
++				  int nr_cpus, int nr_threads,
++				  int thread_idx)
++{
++	for (int cpu = 0; cpu < nr_cpus; cpu++)
++		for (int thread = thread_idx; thread < nr_threads - 1; thread++)
++			FD(pos, cpu, thread) = FD(pos, cpu, thread + 1);
++}
++
++static int update_fds(struct perf_evsel *evsel,
++		      int nr_cpus, int cpu_idx,
++		      int nr_threads, int thread_idx)
++{
++	struct perf_evsel *pos;
++
++	if (cpu_idx >= nr_cpus || thread_idx >= nr_threads)
++		return -EINVAL;
++
++	evlist__for_each_entry(evsel->evlist, pos) {
++		nr_cpus = pos != evsel ? nr_cpus : cpu_idx;
++
++		perf_evsel__remove_fd(pos, nr_cpus, nr_threads, thread_idx);
++
++		/*
++		 * Since fds for next evsel has not been created,
++		 * there is no need to iterate whole event list.
++		 */
++		if (pos == evsel)
++			break;
++	}
++	return 0;
++}
++
+ static bool ignore_missing_thread(struct perf_evsel *evsel,
++				  int nr_cpus, int cpu,
+ 				  struct thread_map *threads,
+ 				  int thread, int err)
+ {
++	pid_t ignore_pid = thread_map__pid(threads, thread);
++
+ 	if (!evsel->ignore_missing_thread)
+ 		return false;
+ 
+@@ -1615,11 +1652,18 @@ static bool ignore_missing_thread(struct perf_evsel *evsel,
+ 	if (threads->nr == 1)
+ 		return false;
+ 
++	/*
++	 * We should remove fd for missing_thread first
++	 * because thread_map__remove() will decrease threads->nr.
++	 */
++	if (update_fds(evsel, nr_cpus, cpu, threads->nr, thread))
++		return false;
++
+ 	if (thread_map__remove(threads, thread))
+ 		return false;
+ 
+ 	pr_warning("WARNING: Ignored open failure for pid %d\n",
+-		   thread_map__pid(threads, thread));
++		   ignore_pid);
+ 	return true;
+ }
+ 
+@@ -1724,7 +1768,7 @@ int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
+ 			if (fd < 0) {
+ 				err = -errno;
+ 
+-				if (ignore_missing_thread(evsel, threads, thread, err)) {
++				if (ignore_missing_thread(evsel, cpus->nr, cpu, threads, thread, err)) {
+ 					/*
+ 					 * We just removed 1 thread, so take a step
+ 					 * back on thread index and lower the upper
+@@ -2120,14 +2164,27 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
+ 	if (type & PERF_SAMPLE_RAW) {
+ 		OVERFLOW_CHECK_u64(array);
+ 		u.val64 = *array;
+-		if (WARN_ONCE(swapped,
+-			      "Endianness of raw data not corrected!\n")) {
+-			/* undo swap of u64, then swap on individual u32s */
++
++		/*
++		 * Undo swap of u64, then swap on individual u32s,
++		 * get the size of the raw area and undo all of the
++		 * swap. The pevent interface handles endianity by
++		 * itself.
++		 */
++		if (swapped) {
+ 			u.val64 = bswap_64(u.val64);
+ 			u.val32[0] = bswap_32(u.val32[0]);
+ 			u.val32[1] = bswap_32(u.val32[1]);
+ 		}
+ 		data->raw_size = u.val32[0];
++
++		/*
++		 * The raw data is aligned on 64bits including the
++		 * u32 size, so it's safe to use mem_bswap_64.
++		 */
++		if (swapped)
++			mem_bswap_64((void *) array, data->raw_size);
++
+ 		array = (void *)array + sizeof(u32);
+ 
+ 		OVERFLOW_CHECK(array, data->raw_size, max_size);
+diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
+index b7aaf9b2294d..68786bb7790e 100644
+--- a/tools/perf/util/probe-event.c
++++ b/tools/perf/util/probe-event.c
+@@ -2625,6 +2625,14 @@ static int get_new_event_name(char *buf, size_t len, const char *base,
+ 
+ out:
+ 	free(nbase);
++
++	/* Final validation */
++	if (ret >= 0 && !is_c_func_name(buf)) {
++		pr_warning("Internal error: \"%s\" is an invalid event name.\n",
++			   buf);
++		ret = -EINVAL;
++	}
++
+ 	return ret;
+ }
+ 
+@@ -2792,16 +2800,32 @@ static int find_probe_functions(struct map *map, char *name,
+ 	int found = 0;
+ 	struct symbol *sym;
+ 	struct rb_node *tmp;
++	const char *norm, *ver;
++	char *buf = NULL;
+ 
+ 	if (map__load(map) < 0)
+ 		return 0;
+ 
+ 	map__for_each_symbol(map, sym, tmp) {
+-		if (strglobmatch(sym->name, name)) {
++		norm = arch__normalize_symbol_name(sym->name);
++		if (!norm)
++			continue;
++
++		/* We don't care about default symbol or not */
++		ver = strchr(norm, '@');
++		if (ver) {
++			buf = strndup(norm, ver - norm);
++			if (!buf)
++				return -ENOMEM;
++			norm = buf;
++		}
++		if (strglobmatch(norm, name)) {
+ 			found++;
+ 			if (syms && found < probe_conf.max_probes)
+ 				syms[found - 1] = sym;
+ 		}
++		if (buf)
++			zfree(&buf);
+ 	}
+ 
+ 	return found;
+@@ -2847,7 +2871,7 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
+ 	 * same name but different addresses, this lists all the symbols.
+ 	 */
+ 	num_matched_functions = find_probe_functions(map, pp->function, syms);
+-	if (num_matched_functions == 0) {
++	if (num_matched_functions <= 0) {
+ 		pr_err("Failed to find symbol %s in %s\n", pp->function,
+ 			pev->target ? : "kernel");
+ 		ret = -ENOENT;
+diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources
+index b4f2f06722a7..7aa0ea64544e 100644
+--- a/tools/perf/util/python-ext-sources
++++ b/tools/perf/util/python-ext-sources
+@@ -10,6 +10,7 @@ util/ctype.c
+ util/evlist.c
+ util/evsel.c
+ util/cpumap.c
++util/memswap.c
+ util/mmap.c
+ util/namespaces.c
+ ../lib/bitmap.c
+diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
+index 1b67a8639dfe..cc065d4bfafc 100644
+--- a/tools/perf/util/symbol.c
++++ b/tools/perf/util/symbol.c
+@@ -94,6 +94,11 @@ static int prefix_underscores_count(const char *str)
+ 	return tail - str;
+ }
+ 
++const char * __weak arch__normalize_symbol_name(const char *name)
++{
++	return name;
++}
++
+ int __weak arch__compare_symbol_names(const char *namea, const char *nameb)
+ {
+ 	return strcmp(namea, nameb);
+diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
+index a4f0075b4e5c..0563f33c1eb3 100644
+--- a/tools/perf/util/symbol.h
++++ b/tools/perf/util/symbol.h
+@@ -349,6 +349,7 @@ bool elf__needs_adjust_symbols(GElf_Ehdr ehdr);
+ void arch__sym_update(struct symbol *s, GElf_Sym *sym);
+ #endif
+ 
++const char *arch__normalize_symbol_name(const char *name);
+ #define SYMBOL_A 0
+ #define SYMBOL_B 1
+ 
+diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
+index a789f952b3e9..443892dabedb 100644
+--- a/tools/perf/util/util.c
++++ b/tools/perf/util/util.c
+@@ -210,7 +210,7 @@ static int copyfile_offset(int ifd, loff_t off_in, int ofd, loff_t off_out, u64
+ 
+ 		size -= ret;
+ 		off_in += ret;
+-		off_out -= ret;
++		off_out += ret;
+ 	}
+ 	munmap(ptr, off_in + size);
+ 
+diff --git a/tools/testing/selftests/net/msg_zerocopy.c b/tools/testing/selftests/net/msg_zerocopy.c
+index 3ab6ec403905..e11fe84de0fd 100644
+--- a/tools/testing/selftests/net/msg_zerocopy.c
++++ b/tools/testing/selftests/net/msg_zerocopy.c
+@@ -259,22 +259,28 @@ static int setup_ip6h(struct ipv6hdr *ip6h, uint16_t payload_len)
+ 	return sizeof(*ip6h);
+ }
+ 
+-static void setup_sockaddr(int domain, const char *str_addr, void *sockaddr)
++
++static void setup_sockaddr(int domain, const char *str_addr,
++			   struct sockaddr_storage *sockaddr)
+ {
+ 	struct sockaddr_in6 *addr6 = (void *) sockaddr;
+ 	struct sockaddr_in *addr4 = (void *) sockaddr;
+ 
+ 	switch (domain) {
+ 	case PF_INET:
++		memset(addr4, 0, sizeof(*addr4));
+ 		addr4->sin_family = AF_INET;
+ 		addr4->sin_port = htons(cfg_port);
+-		if (inet_pton(AF_INET, str_addr, &(addr4->sin_addr)) != 1)
++		if (str_addr &&
++		    inet_pton(AF_INET, str_addr, &(addr4->sin_addr)) != 1)
+ 			error(1, 0, "ipv4 parse error: %s", str_addr);
+ 		break;
+ 	case PF_INET6:
++		memset(addr6, 0, sizeof(*addr6));
+ 		addr6->sin6_family = AF_INET6;
+ 		addr6->sin6_port = htons(cfg_port);
+-		if (inet_pton(AF_INET6, str_addr, &(addr6->sin6_addr)) != 1)
++		if (str_addr &&
++		    inet_pton(AF_INET6, str_addr, &(addr6->sin6_addr)) != 1)
+ 			error(1, 0, "ipv6 parse error: %s", str_addr);
+ 		break;
+ 	default:
+@@ -603,6 +609,7 @@ static void parse_opts(int argc, char **argv)
+ 				    sizeof(struct tcphdr) -
+ 				    40 /* max tcp options */;
+ 	int c;
++	char *daddr = NULL, *saddr = NULL;
+ 
+ 	cfg_payload_len = max_payload_len;
+ 
+@@ -627,7 +634,7 @@ static void parse_opts(int argc, char **argv)
+ 			cfg_cpu = strtol(optarg, NULL, 0);
+ 			break;
+ 		case 'D':
+-			setup_sockaddr(cfg_family, optarg, &cfg_dst_addr);
++			daddr = optarg;
+ 			break;
+ 		case 'i':
+ 			cfg_ifindex = if_nametoindex(optarg);
+@@ -638,7 +645,7 @@ static void parse_opts(int argc, char **argv)
+ 			cfg_cork_mixed = true;
+ 			break;
+ 		case 'p':
+-			cfg_port = htons(strtoul(optarg, NULL, 0));
++			cfg_port = strtoul(optarg, NULL, 0);
+ 			break;
+ 		case 'r':
+ 			cfg_rx = true;
+@@ -647,7 +654,7 @@ static void parse_opts(int argc, char **argv)
+ 			cfg_payload_len = strtoul(optarg, NULL, 0);
+ 			break;
+ 		case 'S':
+-			setup_sockaddr(cfg_family, optarg, &cfg_src_addr);
++			saddr = optarg;
+ 			break;
+ 		case 't':
+ 			cfg_runtime_ms = 200 + strtoul(optarg, NULL, 10) * 1000;
+@@ -660,6 +667,8 @@ static void parse_opts(int argc, char **argv)
+ 			break;
+ 		}
+ 	}
++	setup_sockaddr(cfg_family, daddr, &cfg_dst_addr);
++	setup_sockaddr(cfg_family, saddr, &cfg_src_addr);
+ 
+ 	if (cfg_payload_len > max_payload_len)
+ 		error(1, 0, "-s: payload exceeds max (%d)", max_payload_len);


             reply	other threads:[~2018-04-12 12:20 UTC|newest]

Thread overview: 27+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-04-12 12:20 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2018-04-19 10:44 [gentoo-commits] proj/linux-patches:4.15 commit in: / Mike Pagano
2018-04-08 14:31 Mike Pagano
2018-03-31 22:20 Mike Pagano
2018-03-28 17:03 Mike Pagano
2018-03-25 13:37 Mike Pagano
2018-03-21 14:42 Mike Pagano
2018-03-19 12:02 Mike Pagano
2018-03-15 10:26 Mike Pagano
2018-03-11 17:39 Mike Pagano
2018-03-09 22:51 Mike Pagano
2018-03-09 16:38 Alice Ferrazzi
2018-03-01 13:51 Alice Ferrazzi
2018-02-28 20:07 Alice Ferrazzi
2018-02-28 20:07 Alice Ferrazzi
2018-02-28 19:53 Alice Ferrazzi
2018-02-28 15:16 Alice Ferrazzi
2018-02-28 14:57 Alice Ferrazzi
2018-02-26 14:18 Alice Ferrazzi
2018-02-26 14:18 Alice Ferrazzi
2018-02-25 13:46 Alice Ferrazzi
2018-02-22 23:24 Mike Pagano
2018-02-17 14:02 Alice Ferrazzi
2018-02-12  9:01 Alice Ferrazzi
2018-02-08  0:38 Mike Pagano
2018-02-03 21:20 Mike Pagano
2018-01-16 13:35 Mike Pagano

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1523535602.b57564a42c932434a1633d677be3e73f9716454d.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox