public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Alice Ferrazzi" <alicef@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Sun,  4 Nov 2018 17:31:15 +0000 (UTC)	[thread overview]
Message-ID: <1541352643.45625e29ac9ee0956e90c0063b880573bbbca0f9.alicef@gentoo> (raw)

commit:     45625e29ac9ee0956e90c0063b880573bbbca0f9
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Sun Nov  4 17:30:43 2018 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Sun Nov  4 17:30:43 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=45625e29

linux kernel 4.14.79

 0000_README              |    4 +
 1078_linux-4.14.79.patch | 4661 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 4665 insertions(+)

diff --git a/0000_README b/0000_README
index 509ffd2..319ee36 100644
--- a/0000_README
+++ b/0000_README
@@ -355,6 +355,10 @@ Patch:  1077_linux-4.14.78.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.14.78
 
+Patch:  1078_linux-4.14.79.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.14.79
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1078_linux-4.14.79.patch b/1078_linux-4.14.79.patch
new file mode 100644
index 0000000..e13487a
--- /dev/null
+++ b/1078_linux-4.14.79.patch
@@ -0,0 +1,4661 @@
+diff --git a/Makefile b/Makefile
+index 89574ee68d6b..57a007bf1181 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 14
+-SUBLEVEL = 78
++SUBLEVEL = 79
+ EXTRAVERSION =
+ NAME = Petit Gorille
+ 
+@@ -487,6 +487,8 @@ CLANG_GCC_TC	:= --gcc-toolchain=$(GCC_TOOLCHAIN)
+ endif
+ KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
+ KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
++KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
++KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
+ endif
+ 
+ RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register
+@@ -721,8 +723,6 @@ KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
+ # See modpost pattern 2
+ KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
+ KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
+-KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
+-KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
+ else
+ 
+ # These warnings generated too much noise in a regular build.
+diff --git a/arch/Kconfig b/arch/Kconfig
+index 40dc31fea90c..77b3e21c4844 100644
+--- a/arch/Kconfig
++++ b/arch/Kconfig
+@@ -965,4 +965,12 @@ config REFCOUNT_FULL
+ 	  against various use-after-free conditions that can be used in
+ 	  security flaw exploits.
+ 
++config HAVE_ARCH_COMPILER_H
++	bool
++	help
++	  An architecture can select this if it provides an
++	  asm/compiler.h header that should be included after
++	  linux/compiler-*.h in order to override macro definitions that those
++	  headers generally provide.
++
+ source "kernel/gcov/Kconfig"
+diff --git a/arch/arm/boot/dts/bcm63138.dtsi b/arch/arm/boot/dts/bcm63138.dtsi
+index 43ee992ccdcf..6df61518776f 100644
+--- a/arch/arm/boot/dts/bcm63138.dtsi
++++ b/arch/arm/boot/dts/bcm63138.dtsi
+@@ -106,21 +106,23 @@
+ 		global_timer: timer@1e200 {
+ 			compatible = "arm,cortex-a9-global-timer";
+ 			reg = <0x1e200 0x20>;
+-			interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
+ 			clocks = <&axi_clk>;
+ 		};
+ 
+ 		local_timer: local-timer@1e600 {
+ 			compatible = "arm,cortex-a9-twd-timer";
+ 			reg = <0x1e600 0x20>;
+-			interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) |
++						  IRQ_TYPE_EDGE_RISING)>;
+ 			clocks = <&axi_clk>;
+ 		};
+ 
+ 		twd_watchdog: watchdog@1e620 {
+ 			compatible = "arm,cortex-a9-twd-wdt";
+ 			reg = <0x1e620 0x20>;
+-			interrupts = <GIC_PPI 14 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) |
++						  IRQ_TYPE_LEVEL_HIGH)>;
+ 		};
+ 
+ 		armpll: armpll {
+@@ -158,7 +160,7 @@
+ 		serial0: serial@600 {
+ 			compatible = "brcm,bcm6345-uart";
+ 			reg = <0x600 0x1b>;
+-			interrupts = <GIC_SPI 32 0>;
++			interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&periph_clk>;
+ 			clock-names = "periph";
+ 			status = "disabled";
+@@ -167,7 +169,7 @@
+ 		serial1: serial@620 {
+ 			compatible = "brcm,bcm6345-uart";
+ 			reg = <0x620 0x1b>;
+-			interrupts = <GIC_SPI 33 0>;
++			interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&periph_clk>;
+ 			clock-names = "periph";
+ 			status = "disabled";
+@@ -180,7 +182,7 @@
+ 			reg = <0x2000 0x600>, <0xf0 0x10>;
+ 			reg-names = "nand", "nand-int-base";
+ 			status = "disabled";
+-			interrupts = <GIC_SPI 38 0>;
++			interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "nand";
+ 		};
+ 
+diff --git a/arch/arm/boot/dts/imx53-qsb-common.dtsi b/arch/arm/boot/dts/imx53-qsb-common.dtsi
+index 683dcbe27cbd..8c11190c5218 100644
+--- a/arch/arm/boot/dts/imx53-qsb-common.dtsi
++++ b/arch/arm/boot/dts/imx53-qsb-common.dtsi
+@@ -130,6 +130,17 @@
+ 	};
+ };
+ 
++&cpu0 {
++	/* CPU rated to 1GHz, not 1.2GHz as per the default settings */
++	operating-points = <
++		/* kHz   uV */
++		166666  850000
++		400000  900000
++		800000  1050000
++		1000000 1200000
++	>;
++};
++
+ &esdhc1 {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_esdhc1>;
+diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi
+index 16358bf8d1db..97e8b9b0b750 100644
+--- a/arch/arm/boot/dts/r8a7790.dtsi
++++ b/arch/arm/boot/dts/r8a7790.dtsi
+@@ -153,7 +153,7 @@
+ 
+ 			trips {
+ 				cpu-crit {
+-					temperature	= <115000>;
++					temperature	= <95000>;
+ 					hysteresis	= <0>;
+ 					type		= "critical";
+ 				};
+diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi
+index 914f59166a99..2780e68a853b 100644
+--- a/arch/arm/boot/dts/tegra20.dtsi
++++ b/arch/arm/boot/dts/tegra20.dtsi
+@@ -706,7 +706,7 @@
+ 		phy_type = "ulpi";
+ 		clocks = <&tegra_car TEGRA20_CLK_USB2>,
+ 			 <&tegra_car TEGRA20_CLK_PLL_U>,
+-			 <&tegra_car TEGRA20_CLK_CDEV2>;
++			 <&tegra_car TEGRA20_CLK_PLL_P_OUT4>;
+ 		clock-names = "reg", "pll_u", "ulpi-link";
+ 		resets = <&tegra_car 58>, <&tegra_car 22>;
+ 		reset-names = "usb", "utmi-pads";
+diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
+index fc91205ff46c..5bf9443cfbaa 100644
+--- a/arch/arm/mm/ioremap.c
++++ b/arch/arm/mm/ioremap.c
+@@ -473,7 +473,7 @@ void pci_ioremap_set_mem_type(int mem_type)
+ 
+ int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
+ {
+-	BUG_ON(offset + SZ_64K > IO_SPACE_LIMIT);
++	BUG_ON(offset + SZ_64K - 1 > IO_SPACE_LIMIT);
+ 
+ 	return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
+ 				  PCI_IO_VIRT_BASE + offset + SZ_64K,
+diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
+index 6cb0fa92a651..9f6ae9686dac 100644
+--- a/arch/arm64/mm/hugetlbpage.c
++++ b/arch/arm64/mm/hugetlbpage.c
+@@ -118,11 +118,14 @@ static pte_t get_clear_flush(struct mm_struct *mm,
+ 
+ 		/*
+ 		 * If HW_AFDBM is enabled, then the HW could turn on
+-		 * the dirty bit for any page in the set, so check
+-		 * them all.  All hugetlb entries are already young.
++		 * the dirty or accessed bit for any page in the set,
++		 * so check them all.
+ 		 */
+ 		if (pte_dirty(pte))
+ 			orig_pte = pte_mkdirty(orig_pte);
++
++		if (pte_young(pte))
++			orig_pte = pte_mkyoung(orig_pte);
+ 	}
+ 
+ 	if (valid)
+@@ -347,10 +350,13 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ 	if (!pte_same(orig_pte, pte))
+ 		changed = 1;
+ 
+-	/* Make sure we don't lose the dirty state */
++	/* Make sure we don't lose the dirty or young state */
+ 	if (pte_dirty(orig_pte))
+ 		pte = pte_mkdirty(pte);
+ 
++	if (pte_young(orig_pte))
++		pte = pte_mkyoung(pte);
++
+ 	hugeprot = pte_pgprot(pte);
+ 	for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
+ 		set_pte_at(vma->vm_mm, addr, ptep, pfn_pte(pfn, hugeprot));
+diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
+index c82457b0e733..23e3d3e0ee5b 100644
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -29,6 +29,7 @@ config MIPS
+ 	select GENERIC_SMP_IDLE_THREAD
+ 	select GENERIC_TIME_VSYSCALL
+ 	select HANDLE_DOMAIN_IRQ
++	select HAVE_ARCH_COMPILER_H
+ 	select HAVE_ARCH_JUMP_LABEL
+ 	select HAVE_ARCH_KGDB
+ 	select HAVE_ARCH_MMAP_RND_BITS if MMU
+diff --git a/arch/mips/include/asm/compiler.h b/arch/mips/include/asm/compiler.h
+index e081a265f422..cc2eb1b06050 100644
+--- a/arch/mips/include/asm/compiler.h
++++ b/arch/mips/include/asm/compiler.h
+@@ -8,6 +8,41 @@
+ #ifndef _ASM_COMPILER_H
+ #define _ASM_COMPILER_H
+ 
++/*
++ * With GCC 4.5 onwards we can use __builtin_unreachable to indicate to the
++ * compiler that a particular code path will never be hit. This allows it to be
++ * optimised out of the generated binary.
++ *
++ * Unfortunately at least GCC 4.6.3 through 7.3.0 inclusive suffer from a bug
++ * that can lead to instructions from beyond an unreachable statement being
++ * incorrectly reordered into earlier delay slots if the unreachable statement
++ * is the only content of a case in a switch statement. This can lead to
++ * seemingly random behaviour, such as invalid memory accesses from incorrectly
++ * reordered loads or stores. See this potential GCC fix for details:
++ *
++ *   https://gcc.gnu.org/ml/gcc-patches/2015-09/msg00360.html
++ *
++ * It is unclear whether GCC 8 onwards suffer from the same issue - nothing
++ * relevant is mentioned in GCC 8 release notes and nothing obviously relevant
++ * stands out in GCC commit logs, but these newer GCC versions generate very
++ * different code for the testcase which doesn't exhibit the bug.
++ *
++ * GCC also handles stack allocation suboptimally when calling noreturn
++ * functions or calling __builtin_unreachable():
++ *
++ *   https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82365
++ *
++ * We work around both of these issues by placing a volatile asm statement,
++ * which GCC is prevented from reordering past, prior to __builtin_unreachable
++ * calls.
++ *
++ * The .insn statement is required to ensure that any branches to the
++ * statement, which sadly must be kept due to the asm statement, are known to
++ * be branches to code and satisfy linker requirements for microMIPS kernels.
++ */
++#undef barrier_before_unreachable
++#define barrier_before_unreachable() asm volatile(".insn")
++
+ #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
+ #define GCC_IMM_ASM() "n"
+ #define GCC_REG_ACCUM "$0"
+diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
+index d5f2ee882f74..66c72b356ac0 100644
+--- a/arch/powerpc/include/asm/topology.h
++++ b/arch/powerpc/include/asm/topology.h
+@@ -81,6 +81,9 @@ static inline int numa_update_cpu_topology(bool cpus_locked)
+ {
+ 	return 0;
+ }
++
++static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node) {}
++
+ #endif /* CONFIG_NUMA */
+ 
+ #if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR)
+diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
+index 847ddffbf38a..b5cfab711651 100644
+--- a/arch/sparc/mm/tlb.c
++++ b/arch/sparc/mm/tlb.c
+@@ -163,13 +163,10 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
+ 	pte_unmap(pte);
+ }
+ 
+-void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+-		pmd_t *pmdp, pmd_t pmd)
+-{
+-	pmd_t orig = *pmdp;
+-
+-	*pmdp = pmd;
+ 
++static void __set_pmd_acct(struct mm_struct *mm, unsigned long addr,
++			   pmd_t orig, pmd_t pmd)
++{
+ 	if (mm == &init_mm)
+ 		return;
+ 
+@@ -219,6 +216,15 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+ 	}
+ }
+ 
++void set_pmd_at(struct mm_struct *mm, unsigned long addr,
++		pmd_t *pmdp, pmd_t pmd)
++{
++	pmd_t orig = *pmdp;
++
++	*pmdp = pmd;
++	__set_pmd_acct(mm, addr, orig, pmd);
++}
++
+ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
+ 		unsigned long address, pmd_t *pmdp, pmd_t pmd)
+ {
+@@ -227,6 +233,7 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
+ 	do {
+ 		old = *pmdp;
+ 	} while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd);
++	__set_pmd_acct(vma->vm_mm, address, old, pmd);
+ 
+ 	return old;
+ }
+diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
+index f5cbbba99283..4e1d7483b78c 100644
+--- a/arch/x86/events/amd/uncore.c
++++ b/arch/x86/events/amd/uncore.c
+@@ -35,6 +35,7 @@
+ 
+ static int num_counters_llc;
+ static int num_counters_nb;
++static bool l3_mask;
+ 
+ static HLIST_HEAD(uncore_unused_list);
+ 
+@@ -208,6 +209,13 @@ static int amd_uncore_event_init(struct perf_event *event)
+ 	hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
+ 	hwc->idx = -1;
+ 
++	/*
++	 * SliceMask and ThreadMask need to be set for certain L3 events in
++	 * Family 17h. For other events, the two fields do not affect the count.
++	 */
++	if (l3_mask)
++		hwc->config |= (AMD64_L3_SLICE_MASK | AMD64_L3_THREAD_MASK);
++
+ 	if (event->cpu < 0)
+ 		return -EINVAL;
+ 
+@@ -542,6 +550,7 @@ static int __init amd_uncore_init(void)
+ 		amd_llc_pmu.name	  = "amd_l3";
+ 		format_attr_event_df.show = &event_show_df;
+ 		format_attr_event_l3.show = &event_show_l3;
++		l3_mask			  = true;
+ 	} else {
+ 		num_counters_nb		  = NUM_COUNTERS_NB;
+ 		num_counters_llc	  = NUM_COUNTERS_L2;
+@@ -549,6 +558,7 @@ static int __init amd_uncore_init(void)
+ 		amd_llc_pmu.name	  = "amd_l2";
+ 		format_attr_event_df	  = format_attr_event;
+ 		format_attr_event_l3	  = format_attr_event;
++		l3_mask			  = false;
+ 	}
+ 
+ 	amd_nb_pmu.attr_groups	= amd_uncore_attr_groups_df;
+diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
+index 2dae3f585c01..a68aba8a482f 100644
+--- a/arch/x86/events/intel/uncore_snbep.c
++++ b/arch/x86/events/intel/uncore_snbep.c
+@@ -3807,16 +3807,16 @@ static const struct pci_device_id skx_uncore_pci_ids[] = {
+ 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
+ 	},
+ 	{ /* M3UPI0 Link 0 */
+-		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C),
+-		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, SKX_PCI_UNCORE_M3UPI, 0),
++		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
++		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
+ 	},
+ 	{ /* M3UPI0 Link 1 */
+-		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
+-		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 1),
++		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
++		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
+ 	},
+ 	{ /* M3UPI1 Link 2 */
+-		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C),
+-		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 4, SKX_PCI_UNCORE_M3UPI, 2),
++		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
++		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
+ 	},
+ 	{ /* end: all zeroes */ }
+ };
+diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
+index 12f54082f4c8..78241b736f2a 100644
+--- a/arch/x86/include/asm/perf_event.h
++++ b/arch/x86/include/asm/perf_event.h
+@@ -46,6 +46,14 @@
+ #define INTEL_ARCH_EVENT_MASK	\
+ 	(ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
+ 
++#define AMD64_L3_SLICE_SHIFT				48
++#define AMD64_L3_SLICE_MASK				\
++	((0xFULL) << AMD64_L3_SLICE_SHIFT)
++
++#define AMD64_L3_THREAD_SHIFT				56
++#define AMD64_L3_THREAD_MASK				\
++	((0xFFULL) << AMD64_L3_THREAD_SHIFT)
++
+ #define X86_RAW_EVENT_MASK		\
+ 	(ARCH_PERFMON_EVENTSEL_EVENT |	\
+ 	 ARCH_PERFMON_EVENTSEL_UMASK |	\
+diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
+index f3559b84cd75..04da826381c9 100644
+--- a/arch/x86/kernel/paravirt.c
++++ b/arch/x86/kernel/paravirt.c
+@@ -90,7 +90,7 @@ unsigned paravirt_patch_call(void *insnbuf,
+ 
+ 	if (len < 5) {
+ #ifdef CONFIG_RETPOLINE
+-		WARN_ONCE("Failing to patch indirect CALL in %ps\n", (void *)addr);
++		WARN_ONCE(1, "Failing to patch indirect CALL in %ps\n", (void *)addr);
+ #endif
+ 		return len;	/* call too long for patch site */
+ 	}
+@@ -110,7 +110,7 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
+ 
+ 	if (len < 5) {
+ #ifdef CONFIG_RETPOLINE
+-		WARN_ONCE("Failing to patch indirect JMP in %ps\n", (void *)addr);
++		WARN_ONCE(1, "Failing to patch indirect JMP in %ps\n", (void *)addr);
+ #endif
+ 		return len;	/* call too long for patch site */
+ 	}
+diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
+index 5abae72266b7..6288e9d7068e 100644
+--- a/arch/x86/kvm/paging_tmpl.h
++++ b/arch/x86/kvm/paging_tmpl.h
+@@ -452,14 +452,21 @@ error:
+ 	 * done by is_rsvd_bits_set() above.
+ 	 *
+ 	 * We set up the value of exit_qualification to inject:
+-	 * [2:0] - Derive from [2:0] of real exit_qualification at EPT violation
++	 * [2:0] - Derive from the access bits. The exit_qualification might be
++	 *         out of date if it is serving an EPT misconfiguration.
+ 	 * [5:3] - Calculated by the page walk of the guest EPT page tables
+ 	 * [7:8] - Derived from [7:8] of real exit_qualification
+ 	 *
+ 	 * The other bits are set to 0.
+ 	 */
+ 	if (!(errcode & PFERR_RSVD_MASK)) {
+-		vcpu->arch.exit_qualification &= 0x187;
++		vcpu->arch.exit_qualification &= 0x180;
++		if (write_fault)
++			vcpu->arch.exit_qualification |= EPT_VIOLATION_ACC_WRITE;
++		if (user_fault)
++			vcpu->arch.exit_qualification |= EPT_VIOLATION_ACC_READ;
++		if (fetch_fault)
++			vcpu->arch.exit_qualification |= EPT_VIOLATION_ACC_INSTR;
+ 		vcpu->arch.exit_qualification |= (pte_access & 0x7) << 3;
+ 	}
+ #endif
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 3856828ee1dc..8d688b213504 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -7393,13 +7393,12 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
+ 
+ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+ {
+-	struct fpu *fpu = &current->thread.fpu;
+ 	int r;
+ 
+-	fpu__initialize(fpu);
+-
+ 	kvm_sigset_activate(vcpu);
+ 
++	kvm_load_guest_fpu(vcpu);
++
+ 	if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
+ 		if (kvm_run->immediate_exit) {
+ 			r = -EINTR;
+@@ -7440,6 +7439,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+ 		r = vcpu_run(vcpu);
+ 
+ out:
++	kvm_put_guest_fpu(vcpu);
+ 	post_kvm_run_save(vcpu);
+ 	kvm_sigset_deactivate(vcpu);
+ 
+diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
+index a2070ab86c82..89d5915b1a3f 100644
+--- a/drivers/char/tpm/tpm-interface.c
++++ b/drivers/char/tpm/tpm-interface.c
+@@ -611,12 +611,13 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
+ 		rc = be32_to_cpu(header->return_code);
+ 		if (rc != TPM2_RC_RETRY)
+ 			break;
+-		delay_msec *= 2;
++
+ 		if (delay_msec > TPM2_DURATION_LONG) {
+ 			dev_err(&chip->dev, "TPM is in retry loop\n");
+ 			break;
+ 		}
+ 		tpm_msleep(delay_msec);
++		delay_msec *= 2;
+ 		memcpy(buf, save, save_size);
+ 	}
+ 	return ret;
+diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
+index 5c7ce5aaaf6f..b4ad169836e9 100644
+--- a/drivers/char/tpm/tpm_crb.c
++++ b/drivers/char/tpm/tpm_crb.c
+@@ -520,8 +520,10 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
+ 
+ 	priv->regs_t = crb_map_res(dev, priv, &io_res, buf->control_address,
+ 				   sizeof(struct crb_regs_tail));
+-	if (IS_ERR(priv->regs_t))
+-		return PTR_ERR(priv->regs_t);
++	if (IS_ERR(priv->regs_t)) {
++		ret = PTR_ERR(priv->regs_t);
++		goto out_relinquish_locality;
++	}
+ 
+ 	/*
+ 	 * PTT HW bug w/a: wake up the device to access
+@@ -529,7 +531,7 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
+ 	 */
+ 	ret = __crb_cmd_ready(dev, priv);
+ 	if (ret)
+-		return ret;
++		goto out_relinquish_locality;
+ 
+ 	pa_high = ioread32(&priv->regs_t->ctrl_cmd_pa_high);
+ 	pa_low  = ioread32(&priv->regs_t->ctrl_cmd_pa_low);
+@@ -574,6 +576,8 @@ out:
+ 
+ 	__crb_go_idle(dev, priv);
+ 
++out_relinquish_locality:
++
+ 	__crb_relinquish_locality(dev, priv, 0);
+ 
+ 	return ret;
+diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
+index d4726a3358a4..d6993c2707d1 100644
+--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
++++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
+@@ -2802,6 +2802,7 @@ static int init_cmd_table(struct intel_gvt *gvt)
+ 		if (info) {
+ 			gvt_err("%s %s duplicated\n", e->info->name,
+ 					info->name);
++			kfree(e);
+ 			return -EEXIST;
+ 		}
+ 
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index ab70194a73db..c3a4f5d92391 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -3911,7 +3911,7 @@ mlx5_ib_get_vector_affinity(struct ib_device *ibdev, int comp_vector)
+ {
+ 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ 
+-	return mlx5_get_vector_affinity(dev->mdev, comp_vector);
++	return mlx5_get_vector_affinity_hint(dev->mdev, comp_vector);
+ }
+ 
+ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
+diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
+index ef9ee6c328a1..dfc190055167 100644
+--- a/drivers/infiniband/hw/mlx5/qp.c
++++ b/drivers/infiniband/hw/mlx5/qp.c
+@@ -1527,6 +1527,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ 	u32 uidx = MLX5_IB_DEFAULT_UIDX;
+ 	struct mlx5_ib_create_qp ucmd;
+ 	struct mlx5_ib_qp_base *base;
++	int mlx5_st;
+ 	void *qpc;
+ 	u32 *in;
+ 	int err;
+@@ -1535,6 +1536,10 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ 	spin_lock_init(&qp->sq.lock);
+ 	spin_lock_init(&qp->rq.lock);
+ 
++	mlx5_st = to_mlx5_st(init_attr->qp_type);
++	if (mlx5_st < 0)
++		return -EINVAL;
++
+ 	if (init_attr->rwq_ind_tbl) {
+ 		if (!udata)
+ 			return -ENOSYS;
+@@ -1688,7 +1693,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ 
+ 	qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+ 
+-	MLX5_SET(qpc, qpc, st, to_mlx5_st(init_attr->qp_type));
++	MLX5_SET(qpc, qpc, st, mlx5_st);
+ 	MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+ 
+ 	if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR)
+diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+index e4113ef09315..3c3453d213dc 100644
+--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
++++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+@@ -642,7 +642,7 @@ int usnic_ib_dereg_mr(struct ib_mr *ibmr)
+ 
+ 	usnic_dbg("va 0x%lx length 0x%zx\n", mr->umem->va, mr->umem->length);
+ 
+-	usnic_uiom_reg_release(mr->umem, ibmr->pd->uobject->context->closing);
++	usnic_uiom_reg_release(mr->umem, ibmr->uobject->context);
+ 	kfree(mr);
+ 	return 0;
+ }
+diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
+index 4381c0a9a873..9dd39daa602b 100644
+--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
++++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
+@@ -41,6 +41,7 @@
+ #include <linux/workqueue.h>
+ #include <linux/list.h>
+ #include <linux/pci.h>
++#include <rdma/ib_verbs.h>
+ 
+ #include "usnic_log.h"
+ #include "usnic_uiom.h"
+@@ -88,7 +89,7 @@ static void usnic_uiom_put_pages(struct list_head *chunk_list, int dirty)
+ 		for_each_sg(chunk->page_list, sg, chunk->nents, i) {
+ 			page = sg_page(sg);
+ 			pa = sg_phys(sg);
+-			if (dirty)
++			if (!PageDirty(page) && dirty)
+ 				set_page_dirty_lock(page);
+ 			put_page(page);
+ 			usnic_dbg("pa: %pa\n", &pa);
+@@ -114,6 +115,16 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
+ 	dma_addr_t pa;
+ 	unsigned int gup_flags;
+ 
++	/*
++	 * If the combination of the addr and size requested for this memory
++	 * region causes an integer overflow, return error.
++	 */
++	if (((addr + size) < addr) || PAGE_ALIGN(addr + size) < (addr + size))
++		return -EINVAL;
++
++	if (!size)
++		return -EINVAL;
++
+ 	if (!can_do_mlock())
+ 		return -EPERM;
+ 
+@@ -127,7 +138,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
+ 
+ 	down_write(&current->mm->mmap_sem);
+ 
+-	locked = npages + current->mm->locked_vm;
++	locked = npages + current->mm->pinned_vm;
+ 	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+ 
+ 	if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
+@@ -143,7 +154,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
+ 	ret = 0;
+ 
+ 	while (npages) {
+-		ret = get_user_pages(cur_base,
++		ret = get_user_pages_longterm(cur_base,
+ 					min_t(unsigned long, npages,
+ 					PAGE_SIZE / sizeof(struct page *)),
+ 					gup_flags, page_list, NULL);
+@@ -186,7 +197,7 @@ out:
+ 	if (ret < 0)
+ 		usnic_uiom_put_pages(chunk_list, 0);
+ 	else
+-		current->mm->locked_vm = locked;
++		current->mm->pinned_vm = locked;
+ 
+ 	up_write(&current->mm->mmap_sem);
+ 	free_page((unsigned long) page_list);
+@@ -420,18 +431,22 @@ out_free_uiomr:
+ 	return ERR_PTR(err);
+ }
+ 
+-void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr, int closing)
++void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr,
++			    struct ib_ucontext *ucontext)
+ {
++	struct task_struct *task;
+ 	struct mm_struct *mm;
+ 	unsigned long diff;
+ 
+ 	__usnic_uiom_reg_release(uiomr->pd, uiomr, 1);
+ 
+-	mm = get_task_mm(current);
+-	if (!mm) {
+-		kfree(uiomr);
+-		return;
+-	}
++	task = get_pid_task(ucontext->tgid, PIDTYPE_PID);
++	if (!task)
++		goto out;
++	mm = get_task_mm(task);
++	put_task_struct(task);
++	if (!mm)
++		goto out;
+ 
+ 	diff = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
+ 
+@@ -443,7 +458,7 @@ void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr, int closing)
+ 	 * up here and not be able to take the mmap_sem.  In that case
+ 	 * we defer the vm_locked accounting to the system workqueue.
+ 	 */
+-	if (closing) {
++	if (ucontext->closing) {
+ 		if (!down_write_trylock(&mm->mmap_sem)) {
+ 			INIT_WORK(&uiomr->work, usnic_uiom_reg_account);
+ 			uiomr->mm = mm;
+@@ -455,9 +470,10 @@ void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr, int closing)
+ 	} else
+ 		down_write(&mm->mmap_sem);
+ 
+-	current->mm->locked_vm -= diff;
++	mm->pinned_vm -= diff;
+ 	up_write(&mm->mmap_sem);
+ 	mmput(mm);
++out:
+ 	kfree(uiomr);
+ }
+ 
+diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.h b/drivers/infiniband/hw/usnic/usnic_uiom.h
+index 431efe4143f4..8c096acff123 100644
+--- a/drivers/infiniband/hw/usnic/usnic_uiom.h
++++ b/drivers/infiniband/hw/usnic/usnic_uiom.h
+@@ -39,6 +39,8 @@
+ 
+ #include "usnic_uiom_interval_tree.h"
+ 
++struct ib_ucontext;
++
+ #define USNIC_UIOM_READ			(1)
+ #define USNIC_UIOM_WRITE		(2)
+ 
+@@ -89,7 +91,8 @@ void usnic_uiom_free_dev_list(struct device **devs);
+ struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd,
+ 						unsigned long addr, size_t size,
+ 						int access, int dmasync);
+-void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr, int closing);
++void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr,
++			    struct ib_ucontext *ucontext);
+ int usnic_uiom_init(char *drv_name);
+ void usnic_uiom_fini(void);
+ #endif /* USNIC_UIOM_H_ */
+diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
+index 3b4916680018..b4a8acc7bb7d 100644
+--- a/drivers/infiniband/sw/rxe/rxe_pool.c
++++ b/drivers/infiniband/sw/rxe/rxe_pool.c
+@@ -394,23 +394,25 @@ void *rxe_alloc(struct rxe_pool *pool)
+ 
+ 	kref_get(&pool->rxe->ref_cnt);
+ 
+-	if (atomic_inc_return(&pool->num_elem) > pool->max_elem) {
+-		atomic_dec(&pool->num_elem);
+-		rxe_dev_put(pool->rxe);
+-		rxe_pool_put(pool);
+-		return NULL;
+-	}
++	if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
++		goto out_put_pool;
+ 
+ 	elem = kmem_cache_zalloc(pool_cache(pool),
+ 				 (pool->flags & RXE_POOL_ATOMIC) ?
+ 				 GFP_ATOMIC : GFP_KERNEL);
+ 	if (!elem)
+-		return NULL;
++		goto out_put_pool;
+ 
+ 	elem->pool = pool;
+ 	kref_init(&elem->ref_cnt);
+ 
+ 	return elem;
++
++out_put_pool:
++	atomic_dec(&pool->num_elem);
++	rxe_dev_put(pool->rxe);
++	rxe_pool_put(pool);
++	return NULL;
+ }
+ 
+ void rxe_elem_release(struct kref *kref)
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+index c97384c914a4..d77e8e2ae05f 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+@@ -1203,13 +1203,10 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
+ 		ipoib_ib_dev_down(dev);
+ 
+ 	if (level == IPOIB_FLUSH_HEAVY) {
+-		rtnl_lock();
+ 		if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+ 			ipoib_ib_dev_stop(dev);
+ 
+-		result = ipoib_ib_dev_open(dev);
+-		rtnl_unlock();
+-		if (result)
++		if (ipoib_ib_dev_open(dev))
+ 			return;
+ 
+ 		if (netif_queue_stopped(dev))
+@@ -1249,7 +1246,9 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work)
+ 	struct ipoib_dev_priv *priv =
+ 		container_of(work, struct ipoib_dev_priv, flush_heavy);
+ 
++	rtnl_lock();
+ 	__ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY, 0);
++	rtnl_unlock();
+ }
+ 
+ void ipoib_ib_dev_cleanup(struct net_device *dev)
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index 898286ed47a1..b10e4c5641ea 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -2547,6 +2547,9 @@ static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error,
+ 				*error = error_key;
+ 				return r;
+ 			}
++		} else if (crypto_shash_get_flags(*hash) & CRYPTO_TFM_NEED_KEY) {
++			*error = error_key;
++			return -ENOKEY;
+ 		}
+ 	}
+ 
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
+index 6d22b22cb35b..064d88299adc 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -1865,13 +1865,6 @@ static void uvc_unregister_video(struct uvc_device *dev)
+ {
+ 	struct uvc_streaming *stream;
+ 
+-	/* Unregistering all video devices might result in uvc_delete() being
+-	 * called from inside the loop if there's no open file handle. To avoid
+-	 * that, increment the refcount before iterating over the streams and
+-	 * decrement it when done.
+-	 */
+-	kref_get(&dev->ref);
+-
+ 	list_for_each_entry(stream, &dev->streams, list) {
+ 		if (!video_is_registered(&stream->vdev))
+ 			continue;
+@@ -1880,8 +1873,6 @@ static void uvc_unregister_video(struct uvc_device *dev)
+ 
+ 		uvc_debugfs_cleanup_stream(stream);
+ 	}
+-
+-	kref_put(&dev->ref, uvc_delete);
+ }
+ 
+ static int uvc_register_video(struct uvc_device *dev,
+@@ -2129,6 +2120,7 @@ static int uvc_probe(struct usb_interface *intf,
+ 
+ error:
+ 	uvc_unregister_video(dev);
++	kref_put(&dev->ref, uvc_delete);
+ 	return -ENODEV;
+ }
+ 
+@@ -2146,6 +2138,7 @@ static void uvc_disconnect(struct usb_interface *intf)
+ 		return;
+ 
+ 	uvc_unregister_video(dev);
++	kref_put(&dev->ref, uvc_delete);
+ }
+ 
+ static int uvc_suspend(struct usb_interface *intf, pm_message_t message)
+diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c
+index 339295212935..40d7de2eea12 100644
+--- a/drivers/mmc/host/dw_mmc-rockchip.c
++++ b/drivers/mmc/host/dw_mmc-rockchip.c
+@@ -282,11 +282,11 @@ static int dw_mci_rk3288_parse_dt(struct dw_mci *host)
+ 
+ 	priv->drv_clk = devm_clk_get(host->dev, "ciu-drive");
+ 	if (IS_ERR(priv->drv_clk))
+-		dev_dbg(host->dev, "ciu_drv not available\n");
++		dev_dbg(host->dev, "ciu-drive not available\n");
+ 
+ 	priv->sample_clk = devm_clk_get(host->dev, "ciu-sample");
+ 	if (IS_ERR(priv->sample_clk))
+-		dev_dbg(host->dev, "ciu_sample not available\n");
++		dev_dbg(host->dev, "ciu-sample not available\n");
+ 
+ 	host->priv = priv;
+ 
+diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
+index 19c000722cbc..34ecc12ee3d9 100644
+--- a/drivers/mtd/spi-nor/spi-nor.c
++++ b/drivers/mtd/spi-nor/spi-nor.c
+@@ -1005,6 +1005,12 @@ static const struct flash_info spi_nor_ids[] = {
+ 
+ 	/* ISSI */
+ 	{ "is25cd512", INFO(0x7f9d20, 0, 32 * 1024,   2, SECT_4K) },
++	{ "is25wp032", INFO(0x9d7016, 0, 64 * 1024,  64,
++			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
++	{ "is25wp064", INFO(0x9d7017, 0, 64 * 1024, 128,
++			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
++	{ "is25wp128", INFO(0x9d7018, 0, 64 * 1024, 256,
++			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ 
+ 	/* Macronix */
+ 	{ "mx25l512e",   INFO(0xc22010, 0, 64 * 1024,   1, SECT_4K) },
+diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
+index a1b33aa6054a..77babf1417a7 100644
+--- a/drivers/net/bonding/bond_netlink.c
++++ b/drivers/net/bonding/bond_netlink.c
+@@ -638,8 +638,7 @@ static int bond_fill_info(struct sk_buff *skb,
+ 				goto nla_put_failure;
+ 
+ 			if (nla_put(skb, IFLA_BOND_AD_ACTOR_SYSTEM,
+-				    sizeof(bond->params.ad_actor_system),
+-				    &bond->params.ad_actor_system))
++				    ETH_ALEN, &bond->params.ad_actor_system))
+ 				goto nla_put_failure;
+ 		}
+ 		if (!bond_3ad_get_active_agg_info(bond, &info)) {
+diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+index 60b3ee29d82c..3c7813f04962 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+@@ -1571,8 +1571,6 @@ static int ena_up_complete(struct ena_adapter *adapter)
+ 	if (rc)
+ 		return rc;
+ 
+-	ena_init_napi(adapter);
+-
+ 	ena_change_mtu(adapter->netdev, adapter->netdev->mtu);
+ 
+ 	ena_refill_all_rx_bufs(adapter);
+@@ -1726,6 +1724,13 @@ static int ena_up(struct ena_adapter *adapter)
+ 
+ 	ena_setup_io_intr(adapter);
+ 
++	/* napi poll functions should be initialized before running
++	 * request_irq(), to handle a rare condition where there is a pending
++	 * interrupt, causing the ISR to fire immediately while the poll
++	 * function wasn't set yet, causing a null dereference
++	 */
++	ena_init_napi(adapter);
++
+ 	rc = ena_request_io_irq(adapter);
+ 	if (rc)
+ 		goto err_req_irq;
+@@ -3059,15 +3064,8 @@ err_rss_init:
+ 
+ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
+ {
+-	int release_bars;
+-
+-	if (ena_dev->mem_bar)
+-		devm_iounmap(&pdev->dev, ena_dev->mem_bar);
+-
+-	if (ena_dev->reg_bar)
+-		devm_iounmap(&pdev->dev, ena_dev->reg_bar);
++	int release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
+ 
+-	release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
+ 	pci_release_selected_regions(pdev, release_bars);
+ }
+ 
+diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
+index 82cc81385033..c7cde58feaf7 100644
+--- a/drivers/net/ethernet/amd/declance.c
++++ b/drivers/net/ethernet/amd/declance.c
+@@ -1029,6 +1029,7 @@ static int dec_lance_probe(struct device *bdev, const int type)
+ 	int i, ret;
+ 	unsigned long esar_base;
+ 	unsigned char *esar;
++	const char *desc;
+ 
+ 	if (dec_lance_debug && version_printed++ == 0)
+ 		printk(version);
+@@ -1214,19 +1215,20 @@ static int dec_lance_probe(struct device *bdev, const int type)
+ 	 */
+ 	switch (type) {
+ 	case ASIC_LANCE:
+-		printk("%s: IOASIC onboard LANCE", name);
++		desc = "IOASIC onboard LANCE";
+ 		break;
+ 	case PMAD_LANCE:
+-		printk("%s: PMAD-AA", name);
++		desc = "PMAD-AA";
+ 		break;
+ 	case PMAX_LANCE:
+-		printk("%s: PMAX onboard LANCE", name);
++		desc = "PMAX onboard LANCE";
+ 		break;
+ 	}
+ 	for (i = 0; i < 6; i++)
+ 		dev->dev_addr[i] = esar[i * 4];
+ 
+-	printk(", addr = %pM, irq = %d\n", dev->dev_addr, dev->irq);
++	printk("%s: %s, addr = %pM, irq = %d\n",
++	       name, desc, dev->dev_addr, dev->irq);
+ 
+ 	dev->netdev_ops = &lance_netdev_ops;
+ 	dev->watchdog_timeo = 5*HZ;
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
+index 6ad0ca7ed3e9..abbd2894f870 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
+@@ -339,9 +339,12 @@ int bcmgenet_mii_probe(struct net_device *dev)
+ 	phydev->advertising = phydev->supported;
+ 
+ 	/* The internal PHY has its link interrupts routed to the
+-	 * Ethernet MAC ISRs
+-	 */
+-	if (priv->internal_phy)
++	 * Ethernet MAC ISRs. On GENETv5 there is a hardware issue
++	 * that prevents the signaling of link UP interrupts when
++	 * the link operates at 10Mbps, so fallback to polling for
++	 * those versions of GENET.
++ 	 */
++	if (priv->internal_phy && !GENET_IS_V5(priv))
+ 		priv->phydev->irq = PHY_IGNORE_INTERRUPT;
+ 
+ 	return 0;
+diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
+index b4f92de1efbd..d6f8d6c8b0f1 100644
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -2000,6 +2000,7 @@ static void macb_configure_dma(struct macb *bp)
+ 		else
+ 			dmacfg &= ~GEM_BIT(TXCOEN);
+ 
++		dmacfg &= ~GEM_BIT(ADDR64);
+ #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ 		if (bp->hw_dma_cap & HW_DMA_CAP_64B)
+ 			dmacfg |= GEM_BIT(ADDR64);
+diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+index bf291e90cdb0..79053d2ce7a3 100644
+--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+@@ -2159,6 +2159,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
+ 			return -EPERM;
+ 		if (copy_from_user(&t, useraddr, sizeof(t)))
+ 			return -EFAULT;
++		if (t.cmd != CHELSIO_SET_QSET_PARAMS)
++			return -EINVAL;
+ 		if (t.qset_idx >= SGE_QSETS)
+ 			return -EINVAL;
+ 		if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
+@@ -2258,6 +2260,9 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
+ 		if (copy_from_user(&t, useraddr, sizeof(t)))
+ 			return -EFAULT;
+ 
++		if (t.cmd != CHELSIO_GET_QSET_PARAMS)
++			return -EINVAL;
++
+ 		/* Display qsets for all ports when offload enabled */
+ 		if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
+ 			q1 = 0;
+@@ -2303,6 +2308,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
+ 			return -EBUSY;
+ 		if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ 			return -EFAULT;
++		if (edata.cmd != CHELSIO_SET_QSET_NUM)
++			return -EINVAL;
+ 		if (edata.val < 1 ||
+ 			(edata.val > 1 && !(adapter->flags & USING_MSIX)))
+ 			return -EINVAL;
+@@ -2343,6 +2350,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
+ 			return -EPERM;
+ 		if (copy_from_user(&t, useraddr, sizeof(t)))
+ 			return -EFAULT;
++		if (t.cmd != CHELSIO_LOAD_FW)
++			return -EINVAL;
+ 		/* Check t.len sanity ? */
+ 		fw_data = memdup_user(useraddr + sizeof(t), t.len);
+ 		if (IS_ERR(fw_data))
+@@ -2366,6 +2375,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
+ 			return -EBUSY;
+ 		if (copy_from_user(&m, useraddr, sizeof(m)))
+ 			return -EFAULT;
++		if (m.cmd != CHELSIO_SETMTUTAB)
++			return -EINVAL;
+ 		if (m.nmtus != NMTUS)
+ 			return -EINVAL;
+ 		if (m.mtus[0] < 81)	/* accommodate SACK */
+@@ -2407,6 +2418,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
+ 			return -EBUSY;
+ 		if (copy_from_user(&m, useraddr, sizeof(m)))
+ 			return -EFAULT;
++		if (m.cmd != CHELSIO_SET_PM)
++			return -EINVAL;
+ 		if (!is_power_of_2(m.rx_pg_sz) ||
+ 			!is_power_of_2(m.tx_pg_sz))
+ 			return -EINVAL;	/* not power of 2 */
+@@ -2440,6 +2453,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
+ 			return -EIO;	/* need the memory controllers */
+ 		if (copy_from_user(&t, useraddr, sizeof(t)))
+ 			return -EFAULT;
++		if (t.cmd != CHELSIO_GET_MEM)
++			return -EINVAL;
+ 		if ((t.addr & 7) || (t.len & 7))
+ 			return -EINVAL;
+ 		if (t.mem_id == MEM_CM)
+@@ -2492,6 +2507,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
+ 			return -EAGAIN;
+ 		if (copy_from_user(&t, useraddr, sizeof(t)))
+ 			return -EFAULT;
++		if (t.cmd != CHELSIO_SET_TRACE_FILTER)
++			return -EINVAL;
+ 
+ 		tp = (const struct trace_params *)&t.sip;
+ 		if (t.config_tx)
+diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
+index 2bfaf3e118b1..03f4fee1bbc9 100644
+--- a/drivers/net/ethernet/cisco/enic/enic_main.c
++++ b/drivers/net/ethernet/cisco/enic/enic_main.c
+@@ -1879,7 +1879,7 @@ static int enic_open(struct net_device *netdev)
+ {
+ 	struct enic *enic = netdev_priv(netdev);
+ 	unsigned int i;
+-	int err;
++	int err, ret;
+ 
+ 	err = enic_request_intr(enic);
+ 	if (err) {
+@@ -1936,10 +1936,9 @@ static int enic_open(struct net_device *netdev)
+ 
+ err_out_free_rq:
+ 	for (i = 0; i < enic->rq_count; i++) {
+-		err = vnic_rq_disable(&enic->rq[i]);
+-		if (err)
+-			return err;
+-		vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
++		ret = vnic_rq_disable(&enic->rq[i]);
++		if (!ret)
++			vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
+ 	}
+ 	enic_dev_notify_unset(enic);
+ err_out_free_intr:
+diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
+index 7e2b70c2bba3..39f399741647 100644
+--- a/drivers/net/ethernet/emulex/benet/be_main.c
++++ b/drivers/net/ethernet/emulex/benet/be_main.c
+@@ -3900,8 +3900,6 @@ static int be_enable_vxlan_offloads(struct be_adapter *adapter)
+ 	netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ 				   NETIF_F_TSO | NETIF_F_TSO6 |
+ 				   NETIF_F_GSO_UDP_TUNNEL;
+-	netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+-	netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
+ 
+ 	dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
+ 		 be16_to_cpu(port));
+@@ -3923,8 +3921,6 @@ static void be_disable_vxlan_offloads(struct be_adapter *adapter)
+ 	adapter->vxlan_port = 0;
+ 
+ 	netdev->hw_enc_features = 0;
+-	netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
+-	netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
+ }
+ 
+ static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
+@@ -5215,6 +5211,7 @@ static void be_netdev_init(struct net_device *netdev)
+ 	struct be_adapter *adapter = netdev_priv(netdev);
+ 
+ 	netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
++		NETIF_F_GSO_UDP_TUNNEL |
+ 		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
+ 		NETIF_F_HW_VLAN_CTAG_TX;
+ 	if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
+diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
+index 44720f83af27..4d4f16ad88c3 100644
+--- a/drivers/net/ethernet/freescale/fec.h
++++ b/drivers/net/ethernet/freescale/fec.h
+@@ -451,6 +451,10 @@ struct bufdesc_ex {
+  * initialisation.
+  */
+ #define FEC_QUIRK_MIB_CLEAR		(1 << 15)
++/* Only i.MX25/i.MX27/i.MX28 controller supports FRBR,FRSR registers,
++ * those FIFO receive registers are resolved in other platforms.
++ */
++#define FEC_QUIRK_HAS_FRREG		(1 << 16)
+ 
+ struct bufdesc_prop {
+ 	int qid;
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index eb2ea231c7ca..ce55c8f7f33a 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -89,14 +89,16 @@ static struct platform_device_id fec_devtype[] = {
+ 		.driver_data = 0,
+ 	}, {
+ 		.name = "imx25-fec",
+-		.driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR,
++		.driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR |
++			       FEC_QUIRK_HAS_FRREG,
+ 	}, {
+ 		.name = "imx27-fec",
+-		.driver_data = FEC_QUIRK_MIB_CLEAR,
++		.driver_data = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG,
+ 	}, {
+ 		.name = "imx28-fec",
+ 		.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
+-				FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC,
++				FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
++				FEC_QUIRK_HAS_FRREG,
+ 	}, {
+ 		.name = "imx6q-fec",
+ 		.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+@@ -1155,7 +1157,7 @@ static void fec_enet_timeout_work(struct work_struct *work)
+ 		napi_disable(&fep->napi);
+ 		netif_tx_lock_bh(ndev);
+ 		fec_restart(ndev);
+-		netif_wake_queue(ndev);
++		netif_tx_wake_all_queues(ndev);
+ 		netif_tx_unlock_bh(ndev);
+ 		napi_enable(&fep->napi);
+ 	}
+@@ -1270,7 +1272,7 @@ skb_done:
+ 
+ 		/* Since we have freed up a buffer, the ring is no longer full
+ 		 */
+-		if (netif_queue_stopped(ndev)) {
++		if (netif_tx_queue_stopped(nq)) {
+ 			entries_free = fec_enet_get_free_txdesc_num(txq);
+ 			if (entries_free >= txq->tx_wake_threshold)
+ 				netif_tx_wake_queue(nq);
+@@ -1747,7 +1749,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
+ 			napi_disable(&fep->napi);
+ 			netif_tx_lock_bh(ndev);
+ 			fec_restart(ndev);
+-			netif_wake_queue(ndev);
++			netif_tx_wake_all_queues(ndev);
+ 			netif_tx_unlock_bh(ndev);
+ 			napi_enable(&fep->napi);
+ 		}
+@@ -2166,7 +2168,13 @@ static void fec_enet_get_regs(struct net_device *ndev,
+ 	memset(buf, 0, regs->len);
+ 
+ 	for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) {
+-		off = fec_enet_register_offset[i] / 4;
++		off = fec_enet_register_offset[i];
++
++		if ((off == FEC_R_BOUND || off == FEC_R_FSTART) &&
++		    !(fep->quirks & FEC_QUIRK_HAS_FRREG))
++			continue;
++
++		off >>= 2;
+ 		buf[off] = readl(&theregs[off]);
+ 	}
+ }
+@@ -2249,7 +2257,7 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
+ 		napi_disable(&fep->napi);
+ 		netif_tx_lock_bh(ndev);
+ 		fec_restart(ndev);
+-		netif_wake_queue(ndev);
++		netif_tx_wake_all_queues(ndev);
+ 		netif_tx_unlock_bh(ndev);
+ 		napi_enable(&fep->napi);
+ 	}
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+index 9d64d0759ee9..a5dd99aaf321 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+@@ -257,7 +257,8 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
+ }
+ 
+ static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
+-				    struct ieee_ets *ets)
++				    struct ieee_ets *ets,
++				    bool zero_sum_allowed)
+ {
+ 	bool have_ets_tc = false;
+ 	int bw_sum = 0;
+@@ -282,8 +283,9 @@ static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
+ 	}
+ 
+ 	if (have_ets_tc && bw_sum != 100) {
+-		netdev_err(netdev,
+-			   "Failed to validate ETS: BW sum is illegal\n");
++		if (bw_sum || (!bw_sum && !zero_sum_allowed))
++			netdev_err(netdev,
++				   "Failed to validate ETS: BW sum is illegal\n");
+ 		return -EINVAL;
+ 	}
+ 	return 0;
+@@ -298,7 +300,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
+ 	if (!MLX5_CAP_GEN(priv->mdev, ets))
+ 		return -EOPNOTSUPP;
+ 
+-	err = mlx5e_dbcnl_validate_ets(netdev, ets);
++	err = mlx5e_dbcnl_validate_ets(netdev, ets, false);
+ 	if (err)
+ 		return err;
+ 
+@@ -477,12 +479,9 @@ static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
+ 		ets.prio_tc[i]  = cee_cfg->prio_to_pg_map[i];
+ 	}
+ 
+-	err = mlx5e_dbcnl_validate_ets(netdev, &ets);
+-	if (err) {
+-		netdev_err(netdev,
+-			   "%s, Failed to validate ETS: %d\n", __func__, err);
++	err = mlx5e_dbcnl_validate_ets(netdev, &ets, true);
++	if (err)
+ 		goto out;
+-	}
+ 
+ 	err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
+ 	if (err) {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+index 8285e6d24f30..3d3fd03fa450 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+@@ -635,43 +635,15 @@ static inline bool is_first_ethertype_ip(struct sk_buff *skb)
+ 	return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6));
+ }
+ 
+-static __be32 mlx5e_get_fcs(struct sk_buff *skb)
++static u32 mlx5e_get_fcs(const struct sk_buff *skb)
+ {
+-	int last_frag_sz, bytes_in_prev, nr_frags;
+-	u8 *fcs_p1, *fcs_p2;
+-	skb_frag_t *last_frag;
+-	__be32 fcs_bytes;
++	const void *fcs_bytes;
++	u32 _fcs_bytes;
+ 
+-	if (!skb_is_nonlinear(skb))
+-		return *(__be32 *)(skb->data + skb->len - ETH_FCS_LEN);
++	fcs_bytes = skb_header_pointer(skb, skb->len - ETH_FCS_LEN,
++				       ETH_FCS_LEN, &_fcs_bytes);
+ 
+-	nr_frags = skb_shinfo(skb)->nr_frags;
+-	last_frag = &skb_shinfo(skb)->frags[nr_frags - 1];
+-	last_frag_sz = skb_frag_size(last_frag);
+-
+-	/* If all FCS data is in last frag */
+-	if (last_frag_sz >= ETH_FCS_LEN)
+-		return *(__be32 *)(skb_frag_address(last_frag) +
+-				   last_frag_sz - ETH_FCS_LEN);
+-
+-	fcs_p2 = (u8 *)skb_frag_address(last_frag);
+-	bytes_in_prev = ETH_FCS_LEN - last_frag_sz;
+-
+-	/* Find where the other part of the FCS is - Linear or another frag */
+-	if (nr_frags == 1) {
+-		fcs_p1 = skb_tail_pointer(skb);
+-	} else {
+-		skb_frag_t *prev_frag = &skb_shinfo(skb)->frags[nr_frags - 2];
+-
+-		fcs_p1 = skb_frag_address(prev_frag) +
+-			    skb_frag_size(prev_frag);
+-	}
+-	fcs_p1 -= bytes_in_prev;
+-
+-	memcpy(&fcs_bytes, fcs_p1, bytes_in_prev);
+-	memcpy(((u8 *)&fcs_bytes) + bytes_in_prev, fcs_p2, last_frag_sz);
+-
+-	return fcs_bytes;
++	return __get_unaligned_cpu32(fcs_bytes);
+ }
+ 
+ static inline void mlx5e_handle_csum(struct net_device *netdev,
+@@ -693,8 +665,9 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
+ 		skb->ip_summed = CHECKSUM_COMPLETE;
+ 		skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
+ 		if (unlikely(netdev->features & NETIF_F_RXFCS))
+-			skb->csum = csum_add(skb->csum,
+-					     (__force __wsum)mlx5e_get_fcs(skb));
++			skb->csum = csum_block_add(skb->csum,
++						   (__force __wsum)mlx5e_get_fcs(skb),
++						   skb->len - ETH_FCS_LEN);
+ 		rq->stats.csum_complete++;
+ 		return;
+ 	}
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+index eb91de86202b..5da0b6e11530 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+@@ -262,7 +262,7 @@ static void eq_pf_process(struct mlx5_eq *eq)
+ 		case MLX5_PFAULT_SUBTYPE_WQE:
+ 			/* WQE based event */
+ 			pfault->type =
+-				be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24;
++				(be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24) & 0x7;
+ 			pfault->token =
+ 				be32_to_cpu(pf_eqe->wqe.token);
+ 			pfault->wqe.wq_num =
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+index e41f28602535..eb666877d1aa 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+@@ -1672,7 +1672,7 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
+ 
+ 		cm_info->local_ip[0] = ntohl(iph->daddr);
+ 		cm_info->remote_ip[0] = ntohl(iph->saddr);
+-		cm_info->ip_version = TCP_IPV4;
++		cm_info->ip_version = QED_TCP_IPV4;
+ 
+ 		ip_hlen = (iph->ihl) * sizeof(u32);
+ 		*payload_len = ntohs(iph->tot_len) - ip_hlen;
+@@ -1692,7 +1692,7 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
+ 			cm_info->remote_ip[i] =
+ 			    ntohl(ip6h->saddr.in6_u.u6_addr32[i]);
+ 		}
+-		cm_info->ip_version = TCP_IPV6;
++		cm_info->ip_version = QED_TCP_IPV6;
+ 
+ 		ip_hlen = sizeof(*ip6h);
+ 		*payload_len = ntohs(ip6h->payload_len);
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
+index fb7c2d1562ae..bedbf840fd7d 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
+@@ -129,23 +129,16 @@ static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
+ 
+ static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
+ {
+-	enum roce_flavor flavor;
+-
+ 	switch (roce_mode) {
+ 	case ROCE_V1:
+-		flavor = PLAIN_ROCE;
+-		break;
++		return PLAIN_ROCE;
+ 	case ROCE_V2_IPV4:
+-		flavor = RROCE_IPV4;
+-		break;
++		return RROCE_IPV4;
+ 	case ROCE_V2_IPV6:
+-		flavor = ROCE_V2_IPV6;
+-		break;
++		return RROCE_IPV6;
+ 	default:
+-		flavor = MAX_ROCE_MODE;
+-		break;
++		return MAX_ROCE_FLAVOR;
+ 	}
+-	return flavor;
+ }
+ 
+ void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+index 46d0c3cb83a5..d7c5965328be 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+@@ -154,7 +154,7 @@ qed_set_pf_update_tunn_mode(struct qed_tunnel_info *p_tun,
+ static void qed_set_tunn_cls_info(struct qed_tunnel_info *p_tun,
+ 				  struct qed_tunnel_info *p_src)
+ {
+-	enum tunnel_clss type;
++	int type;
+ 
+ 	p_tun->b_update_rx_cls = p_src->b_update_rx_cls;
+ 	p_tun->b_update_tx_cls = p_src->b_update_tx_cls;
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
+index 6eb85db69f9a..dd8ebf6d380f 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
+@@ -413,7 +413,6 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
+ 	}
+ 
+ 	if (!p_iov->b_pre_fp_hsi &&
+-	    ETH_HSI_VER_MINOR &&
+ 	    (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) {
+ 		DP_INFO(p_hwfn,
+ 			"PF is using older fastpath HSI; %02x.%02x is configured\n",
+@@ -572,7 +571,7 @@ free_p_iov:
+ static void
+ __qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
+ 			   struct qed_tunn_update_type *p_src,
+-			   enum qed_tunn_clss mask, u8 *p_cls)
++			   enum qed_tunn_mode mask, u8 *p_cls)
+ {
+ 	if (p_src->b_update_mode) {
+ 		p_req->tun_mode_update_mask |= BIT(mask);
+@@ -587,7 +586,7 @@ __qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
+ static void
+ qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
+ 			 struct qed_tunn_update_type *p_src,
+-			 enum qed_tunn_clss mask,
++			 enum qed_tunn_mode mask,
+ 			 u8 *p_cls, struct qed_tunn_update_udp_port *p_port,
+ 			 u8 *p_update_port, u16 *p_udp_port)
+ {
+diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+index 929fb8d96ec0..8d979fef5fc7 100644
+--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
++++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+@@ -205,6 +205,9 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
+ 	if (!skb)
+ 		return RX_HANDLER_CONSUMED;
+ 
++	if (skb->pkt_type == PACKET_LOOPBACK)
++		return RX_HANDLER_PASS;
++
+ 	dev = skb->dev;
+ 	port = rmnet_get_port(dev);
+ 
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index f7e540eeb877..1b61ce310132 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -7579,17 +7579,15 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
+ 	struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
+ 	struct net_device *dev = tp->dev;
+ 	u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow;
+-	int work_done= 0;
++	int work_done;
+ 	u16 status;
+ 
+ 	status = rtl_get_events(tp);
+ 	rtl_ack_events(tp, status & ~tp->event_slow);
+ 
+-	if (status & RTL_EVENT_NAPI_RX)
+-		work_done = rtl_rx(dev, tp, (u32) budget);
++	work_done = rtl_rx(dev, tp, (u32) budget);
+ 
+-	if (status & RTL_EVENT_NAPI_TX)
+-		rtl_tx(dev, tp);
++	rtl_tx(dev, tp);
+ 
+ 	if (status & tp->event_slow) {
+ 		enable_mask &= ~tp->event_slow;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+index f5f37bfa1d58..ff2eeddf588e 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+@@ -133,7 +133,7 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
+  */
+ int stmmac_mdio_reset(struct mii_bus *bus)
+ {
+-#if defined(CONFIG_STMMAC_PLATFORM)
++#if IS_ENABLED(CONFIG_STMMAC_PLATFORM)
+ 	struct net_device *ndev = bus->priv;
+ 	struct stmmac_priv *priv = netdev_priv(ndev);
+ 	unsigned int mii_address = priv->hw->mii.addr;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+index 6a393b16a1fc..c54a50dbd5ac 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+@@ -303,7 +303,7 @@ static void stmmac_pci_remove(struct pci_dev *pdev)
+ 	pci_disable_device(pdev);
+ }
+ 
+-static int stmmac_pci_suspend(struct device *dev)
++static int __maybe_unused stmmac_pci_suspend(struct device *dev)
+ {
+ 	struct pci_dev *pdev = to_pci_dev(dev);
+ 	int ret;
+@@ -321,7 +321,7 @@ static int stmmac_pci_suspend(struct device *dev)
+ 	return 0;
+ }
+ 
+-static int stmmac_pci_resume(struct device *dev)
++static int __maybe_unused stmmac_pci_resume(struct device *dev)
+ {
+ 	struct pci_dev *pdev = to_pci_dev(dev);
+ 	int ret;
+diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
+index 7a7c5224a336..16a6e1193912 100644
+--- a/drivers/net/hamradio/yam.c
++++ b/drivers/net/hamradio/yam.c
+@@ -980,6 +980,8 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+ 				 sizeof(struct yamdrv_ioctl_mcs));
+ 		if (IS_ERR(ym))
+ 			return PTR_ERR(ym);
++		if (ym->cmd != SIOCYAMSMCS)
++			return -EINVAL;
+ 		if (ym->bitrate > YAM_MAXBITRATE) {
+ 			kfree(ym);
+ 			return -EINVAL;
+@@ -995,6 +997,8 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+ 		if (copy_from_user(&yi, ifr->ifr_data, sizeof(struct yamdrv_ioctl_cfg)))
+ 			 return -EFAULT;
+ 
++		if (yi.cmd != SIOCYAMSCFG)
++			return -EINVAL;
+ 		if ((yi.cfg.mask & YAM_IOBASE) && netif_running(dev))
+ 			return -EINVAL;		/* Cannot change this parameter when up */
+ 		if ((yi.cfg.mask & YAM_IRQ) && netif_running(dev))
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index a174d05a9752..fe76e2c4022a 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -1641,6 +1641,23 @@ int genphy_config_init(struct phy_device *phydev)
+ }
+ EXPORT_SYMBOL(genphy_config_init);
+ 
++/* This is used for the phy device which doesn't support the MMD extended
++ * register access, but it does have side effect when we are trying to access
++ * the MMD register via indirect method.
++ */
++int genphy_read_mmd_unsupported(struct phy_device *phdev, int devad, u16 regnum)
++{
++	return -EOPNOTSUPP;
++}
++EXPORT_SYMBOL(genphy_read_mmd_unsupported);
++
++int genphy_write_mmd_unsupported(struct phy_device *phdev, int devnum,
++				 u16 regnum, u16 val)
++{
++	return -EOPNOTSUPP;
++}
++EXPORT_SYMBOL(genphy_write_mmd_unsupported);
++
+ int genphy_suspend(struct phy_device *phydev)
+ {
+ 	int value;
+diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
+index e4a6ed88b9cf..79f28b9186c6 100644
+--- a/drivers/net/phy/phylink.c
++++ b/drivers/net/phy/phylink.c
+@@ -561,7 +561,7 @@ void phylink_destroy(struct phylink *pl)
+ {
+ 	if (pl->sfp_bus)
+ 		sfp_unregister_upstream(pl->sfp_bus);
+-	if (!IS_ERR(pl->link_gpio))
++	if (!IS_ERR_OR_NULL(pl->link_gpio))
+ 		gpiod_put(pl->link_gpio);
+ 
+ 	cancel_work_sync(&pl->resolve);
+diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
+index 9cbe645e3d89..7d38af5ed4b5 100644
+--- a/drivers/net/phy/realtek.c
++++ b/drivers/net/phy/realtek.c
+@@ -138,6 +138,8 @@ static struct phy_driver realtek_drvs[] = {
+ 		.read_status	= &genphy_read_status,
+ 		.ack_interrupt	= &rtl821x_ack_interrupt,
+ 		.config_intr	= &rtl8211b_config_intr,
++		.read_mmd	= &genphy_read_mmd_unsupported,
++		.write_mmd	= &genphy_write_mmd_unsupported,
+ 	}, {
+ 		.phy_id		= 0x001cc914,
+ 		.name		= "RTL8211DN Gigabit Ethernet",
+diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
+index 522d2900cd1d..e9fcf6ef716a 100644
+--- a/drivers/net/usb/asix_common.c
++++ b/drivers/net/usb/asix_common.c
+@@ -607,6 +607,9 @@ int asix_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
+ 	struct usbnet *dev = netdev_priv(net);
+ 	u8 opt = 0;
+ 
++	if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
++		return -EINVAL;
++
+ 	if (wolinfo->wolopts & WAKE_PHY)
+ 		opt |= AX_MONITOR_LINK;
+ 	if (wolinfo->wolopts & WAKE_MAGIC)
+diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
+index f32261ecd215..0f69b77e8502 100644
+--- a/drivers/net/usb/ax88179_178a.c
++++ b/drivers/net/usb/ax88179_178a.c
+@@ -566,6 +566,9 @@ ax88179_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
+ 	struct usbnet *dev = netdev_priv(net);
+ 	u8 opt = 0;
+ 
++	if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
++		return -EINVAL;
++
+ 	if (wolinfo->wolopts & WAKE_PHY)
+ 		opt |= AX_MONITOR_MODE_RWLC;
+ 	if (wolinfo->wolopts & WAKE_MAGIC)
+diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
+index 9e3f632e22f1..50e2e10a9050 100644
+--- a/drivers/net/usb/lan78xx.c
++++ b/drivers/net/usb/lan78xx.c
+@@ -1375,19 +1375,10 @@ static int lan78xx_set_wol(struct net_device *netdev,
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	pdata->wol = 0;
+-	if (wol->wolopts & WAKE_UCAST)
+-		pdata->wol |= WAKE_UCAST;
+-	if (wol->wolopts & WAKE_MCAST)
+-		pdata->wol |= WAKE_MCAST;
+-	if (wol->wolopts & WAKE_BCAST)
+-		pdata->wol |= WAKE_BCAST;
+-	if (wol->wolopts & WAKE_MAGIC)
+-		pdata->wol |= WAKE_MAGIC;
+-	if (wol->wolopts & WAKE_PHY)
+-		pdata->wol |= WAKE_PHY;
+-	if (wol->wolopts & WAKE_ARP)
+-		pdata->wol |= WAKE_ARP;
++	if (wol->wolopts & ~WAKE_ALL)
++		return -EINVAL;
++
++	pdata->wol = wol->wolopts;
+ 
+ 	device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
+ 
+@@ -2517,10 +2508,6 @@ static int lan78xx_open(struct net_device *net)
+ 	if (ret < 0)
+ 		goto out;
+ 
+-	ret = lan78xx_reset(dev);
+-	if (ret < 0)
+-		goto done;
+-
+ 	phy_start(net->phydev);
+ 
+ 	netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
+index 0fa64cc1a011..66beff4d7646 100644
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -4497,6 +4497,9 @@ static int rtl8152_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+ 	if (!rtl_can_wakeup(tp))
+ 		return -EOPNOTSUPP;
+ 
++	if (wol->wolopts & ~WAKE_ANY)
++		return -EINVAL;
++
+ 	ret = usb_autopm_get_interface(tp->intf);
+ 	if (ret < 0)
+ 		goto out_set_wol;
+diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
+index b64b1ee56d2d..ec287c9741e8 100644
+--- a/drivers/net/usb/smsc75xx.c
++++ b/drivers/net/usb/smsc75xx.c
+@@ -731,6 +731,9 @@ static int smsc75xx_ethtool_set_wol(struct net_device *net,
+ 	struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+ 	int ret;
+ 
++	if (wolinfo->wolopts & ~SUPPORTED_WAKE)
++		return -EINVAL;
++
+ 	pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
+ 
+ 	ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts);
+diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
+index 309b88acd3d0..99e684e39d35 100644
+--- a/drivers/net/usb/smsc95xx.c
++++ b/drivers/net/usb/smsc95xx.c
+@@ -774,6 +774,9 @@ static int smsc95xx_ethtool_set_wol(struct net_device *net,
+ 	struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ 	int ret;
+ 
++	if (wolinfo->wolopts & ~SUPPORTED_WAKE)
++		return -EINVAL;
++
+ 	pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
+ 
+ 	ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts);
+diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
+index 9277a0f228df..35f39f23d881 100644
+--- a/drivers/net/usb/sr9800.c
++++ b/drivers/net/usb/sr9800.c
+@@ -421,6 +421,9 @@ sr_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
+ 	struct usbnet *dev = netdev_priv(net);
+ 	u8 opt = 0;
+ 
++	if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
++		return -EINVAL;
++
+ 	if (wolinfo->wolopts & WAKE_PHY)
+ 		opt |= SR_MONITOR_LINK;
+ 	if (wolinfo->wolopts & WAKE_MAGIC)
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 910c46b47769..f528e9ac3413 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -1872,8 +1872,9 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
+ 	/* Make sure no work handler is accessing the device */
+ 	flush_work(&vi->config_work);
+ 
++	netif_tx_lock_bh(vi->dev);
+ 	netif_device_detach(vi->dev);
+-	netif_tx_disable(vi->dev);
++	netif_tx_unlock_bh(vi->dev);
+ 	cancel_delayed_work_sync(&vi->refill);
+ 
+ 	if (netif_running(vi->dev)) {
+@@ -1909,7 +1910,9 @@ static int virtnet_restore_up(struct virtio_device *vdev)
+ 		}
+ 	}
+ 
++	netif_tx_lock_bh(vi->dev);
+ 	netif_device_attach(vi->dev);
++	netif_tx_unlock_bh(vi->dev);
+ 	return err;
+ }
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
+index 3684a3e180e5..007bfe7656a4 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
++++ b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
+@@ -95,8 +95,8 @@ enum {
+ #define IWL_ALIVE_FLG_RFKILL	BIT(0)
+ 
+ struct iwl_lmac_alive {
+-	__le32 ucode_minor;
+ 	__le32 ucode_major;
++	__le32 ucode_minor;
+ 	u8 ver_subtype;
+ 	u8 ver_type;
+ 	u8 mac;
+@@ -113,8 +113,8 @@ struct iwl_lmac_alive {
+ } __packed; /* UCODE_ALIVE_NTFY_API_S_VER_3 */
+ 
+ struct iwl_umac_alive {
+-	__le32 umac_minor;		/* UMAC version: minor */
+ 	__le32 umac_major;		/* UMAC version: major */
++	__le32 umac_minor;		/* UMAC version: minor */
+ 	__le32 error_info_addr;		/* SRAM address for UMAC error log */
+ 	__le32 dbg_print_buff_addr;
+ } __packed; /* UMAC_ALIVE_DATA_API_S_VER_2 */
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+index 2fa7ec466275..839010417241 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
++++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+@@ -950,7 +950,20 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
+ 	if (trigger)
+ 		delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay));
+ 
+-	if (WARN(fwrt->trans->state == IWL_TRANS_NO_FW,
++	/*
++	 * If the loading of the FW completed successfully, the next step is to
++	 * get the SMEM config data. Thus, if fwrt->smem_cfg.num_lmacs is non
++	 * zero, the FW was already loaded successully. If the state is "NO_FW"
++	 * in such a case - WARN and exit, since FW may be dead. Otherwise, we
++	 * can try to collect the data, since FW might just not be fully
++	 * loaded (no "ALIVE" yet), and the debug data is accessible.
++	 *
++	 * Corner case: got the FW alive but crashed before getting the SMEM
++	 *	config. In such a case, due to HW access problems, we might
++	 *	collect garbage.
++	 */
++	if (WARN((fwrt->trans->state == IWL_TRANS_NO_FW) &&
++		 fwrt->smem_cfg.num_lmacs,
+ 		 "Can't collect dbg data when FW isn't alive\n"))
+ 		return -EIO;
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+index 2d14a58cbdd7..c73e4be9bde3 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+@@ -439,7 +439,8 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
+ 		rx_status->bw = RATE_INFO_BW_160;
+ 		break;
+ 	}
+-	if (rate_n_flags & RATE_MCS_SGI_MSK)
++	if (!(rate_n_flags & RATE_MCS_CCK_MSK) &&
++	    rate_n_flags & RATE_MCS_SGI_MSK)
+ 		rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ 	if (rate_n_flags & RATE_HT_MCS_GF_MSK)
+ 		rx_status->enc_flags |= RX_ENC_FLAG_HT_GF;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+index e2196dc35dc6..8ba8c70571fb 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+@@ -981,7 +981,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
+ 		rx_status->bw = RATE_INFO_BW_160;
+ 		break;
+ 	}
+-	if (rate_n_flags & RATE_MCS_SGI_MSK)
++
++	if (!(rate_n_flags & RATE_MCS_CCK_MSK) &&
++	    rate_n_flags & RATE_MCS_SGI_MSK)
+ 		rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ 	if (rate_n_flags & RATE_HT_MCS_GF_MSK)
+ 		rx_status->enc_flags |= RX_ENC_FLAG_HT_GF;
+diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
+index aafa7aa18fbd..477f9f2f6626 100644
+--- a/drivers/net/wireless/mac80211_hwsim.c
++++ b/drivers/net/wireless/mac80211_hwsim.c
+@@ -2730,8 +2730,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
+ 	list_add_tail(&data->list, &hwsim_radios);
+ 	spin_unlock_bh(&hwsim_radio_lock);
+ 
+-	if (idx > 0)
+-		hwsim_mcast_new_radio(idx, info, param);
++	hwsim_mcast_new_radio(idx, info, param);
+ 
+ 	return idx;
+ 
+diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c
+index 43743c26c071..39bf85d0ade0 100644
+--- a/drivers/net/wireless/marvell/libertas/if_sdio.c
++++ b/drivers/net/wireless/marvell/libertas/if_sdio.c
+@@ -1317,6 +1317,10 @@ static int if_sdio_suspend(struct device *dev)
+ 	if (priv->wol_criteria == EHS_REMOVE_WAKEUP) {
+ 		dev_info(dev, "Suspend without wake params -- powering down card\n");
+ 		if (priv->fw_ready) {
++			ret = lbs_suspend(priv);
++			if (ret)
++				return ret;
++
+ 			priv->power_up_on_resume = true;
+ 			if_sdio_power_off(card);
+ 		}
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
+index 1a40fc3517a8..6ea95b316256 100644
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -1824,7 +1824,7 @@ static int talk_to_netback(struct xenbus_device *dev,
+ 	err = xen_net_read_mac(dev, info->netdev->dev_addr);
+ 	if (err) {
+ 		xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
+-		goto out;
++		goto out_unlocked;
+ 	}
+ 
+ 	rtnl_lock();
+@@ -1939,6 +1939,7 @@ abort_transaction_no_dev_fatal:
+ 	xennet_destroy_queues(info);
+  out:
+ 	rtnl_unlock();
++out_unlocked:
+ 	device_unregister(&dev->dev);
+ 	return err;
+ }
+@@ -1964,10 +1965,6 @@ static int xennet_connect(struct net_device *dev)
+ 	/* talk_to_netback() sets the correct number of queues */
+ 	num_queues = dev->real_num_tx_queues;
+ 
+-	rtnl_lock();
+-	netdev_update_features(dev);
+-	rtnl_unlock();
+-
+ 	if (dev->reg_state == NETREG_UNINITIALIZED) {
+ 		err = register_netdev(dev);
+ 		if (err) {
+@@ -1977,6 +1974,10 @@ static int xennet_connect(struct net_device *dev)
+ 		}
+ 	}
+ 
++	rtnl_lock();
++	netdev_update_features(dev);
++	rtnl_unlock();
++
+ 	/*
+ 	 * All public and private state should now be sane.  Get
+ 	 * ready to start sending and receiving packets and give the driver
+diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
+index 4f2747cd15a6..169dd7127f9e 100644
+--- a/drivers/s390/net/qeth_core_main.c
++++ b/drivers/s390/net/qeth_core_main.c
+@@ -3001,28 +3001,23 @@ static int qeth_send_startlan(struct qeth_card *card)
+ 	return rc;
+ }
+ 
+-static int qeth_default_setadapterparms_cb(struct qeth_card *card,
+-		struct qeth_reply *reply, unsigned long data)
++static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd)
+ {
+-	struct qeth_ipa_cmd *cmd;
+-
+-	QETH_CARD_TEXT(card, 4, "defadpcb");
+-
+-	cmd = (struct qeth_ipa_cmd *) data;
+-	if (cmd->hdr.return_code == 0)
++	if (!cmd->hdr.return_code)
+ 		cmd->hdr.return_code =
+ 			cmd->data.setadapterparms.hdr.return_code;
+-	return 0;
++	return cmd->hdr.return_code;
+ }
+ 
+ static int qeth_query_setadapterparms_cb(struct qeth_card *card,
+ 		struct qeth_reply *reply, unsigned long data)
+ {
+-	struct qeth_ipa_cmd *cmd;
++	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+ 
+ 	QETH_CARD_TEXT(card, 3, "quyadpcb");
++	if (qeth_setadpparms_inspect_rc(cmd))
++		return 0;
+ 
+-	cmd = (struct qeth_ipa_cmd *) data;
+ 	if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) {
+ 		card->info.link_type =
+ 		      cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
+@@ -3030,7 +3025,7 @@ static int qeth_query_setadapterparms_cb(struct qeth_card *card,
+ 	}
+ 	card->options.adp.supported_funcs =
+ 		cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
+-	return qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
++	return 0;
+ }
+ 
+ static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
+@@ -3122,22 +3117,20 @@ EXPORT_SYMBOL_GPL(qeth_query_ipassists);
+ static int qeth_query_switch_attributes_cb(struct qeth_card *card,
+ 				struct qeth_reply *reply, unsigned long data)
+ {
+-	struct qeth_ipa_cmd *cmd;
+-	struct qeth_switch_info *sw_info;
++	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+ 	struct qeth_query_switch_attributes *attrs;
++	struct qeth_switch_info *sw_info;
+ 
+ 	QETH_CARD_TEXT(card, 2, "qswiatcb");
+-	cmd = (struct qeth_ipa_cmd *) data;
+-	sw_info = (struct qeth_switch_info *)reply->param;
+-	if (cmd->data.setadapterparms.hdr.return_code == 0) {
+-		attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
+-		sw_info->capabilities = attrs->capabilities;
+-		sw_info->settings = attrs->settings;
+-		QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
+-							sw_info->settings);
+-	}
+-	qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
++	if (qeth_setadpparms_inspect_rc(cmd))
++		return 0;
+ 
++	sw_info = (struct qeth_switch_info *)reply->param;
++	attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
++	sw_info->capabilities = attrs->capabilities;
++	sw_info->settings = attrs->settings;
++	QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
++			sw_info->settings);
+ 	return 0;
+ }
+ 
+@@ -4188,16 +4181,13 @@ EXPORT_SYMBOL_GPL(qeth_do_send_packet);
+ static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
+ 		struct qeth_reply *reply, unsigned long data)
+ {
+-	struct qeth_ipa_cmd *cmd;
++	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+ 	struct qeth_ipacmd_setadpparms *setparms;
+ 
+ 	QETH_CARD_TEXT(card, 4, "prmadpcb");
+ 
+-	cmd = (struct qeth_ipa_cmd *) data;
+ 	setparms = &(cmd->data.setadapterparms);
+-
+-	qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
+-	if (cmd->hdr.return_code) {
++	if (qeth_setadpparms_inspect_rc(cmd)) {
+ 		QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code);
+ 		setparms->data.mode = SET_PROMISC_MODE_OFF;
+ 	}
+@@ -4267,11 +4257,12 @@ EXPORT_SYMBOL_GPL(qeth_get_stats);
+ static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
+ 		struct qeth_reply *reply, unsigned long data)
+ {
+-	struct qeth_ipa_cmd *cmd;
++	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+ 
+ 	QETH_CARD_TEXT(card, 4, "chgmaccb");
++	if (qeth_setadpparms_inspect_rc(cmd))
++		return 0;
+ 
+-	cmd = (struct qeth_ipa_cmd *) data;
+ 	if (!card->options.layer2 ||
+ 	    !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) {
+ 		memcpy(card->dev->dev_addr,
+@@ -4279,7 +4270,6 @@ static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
+ 		       OSA_ADDR_LEN);
+ 		card->info.mac_bits |= QETH_LAYER2_MAC_READ;
+ 	}
+-	qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
+ 	return 0;
+ }
+ 
+@@ -4310,13 +4300,15 @@ EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
+ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
+ 		struct qeth_reply *reply, unsigned long data)
+ {
+-	struct qeth_ipa_cmd *cmd;
++	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+ 	struct qeth_set_access_ctrl *access_ctrl_req;
+ 	int fallback = *(int *)reply->param;
+ 
+ 	QETH_CARD_TEXT(card, 4, "setaccb");
++	if (cmd->hdr.return_code)
++		return 0;
++	qeth_setadpparms_inspect_rc(cmd);
+ 
+-	cmd = (struct qeth_ipa_cmd *) data;
+ 	access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
+ 	QETH_DBF_TEXT_(SETUP, 2, "setaccb");
+ 	QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
+@@ -4389,7 +4381,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
+ 			card->options.isolation = card->options.prev_isolation;
+ 		break;
+ 	}
+-	qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
+ 	return 0;
+ }
+ 
+@@ -4677,14 +4668,15 @@ out:
+ static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
+ 		struct qeth_reply *reply, unsigned long data)
+ {
+-	struct qeth_ipa_cmd *cmd;
++	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
+ 	struct qeth_qoat_priv *priv;
+ 	char *resdata;
+ 	int resdatalen;
+ 
+ 	QETH_CARD_TEXT(card, 3, "qoatcb");
++	if (qeth_setadpparms_inspect_rc(cmd))
++		return 0;
+ 
+-	cmd = (struct qeth_ipa_cmd *)data;
+ 	priv = (struct qeth_qoat_priv *)reply->param;
+ 	resdatalen = cmd->data.setadapterparms.hdr.cmdlength;
+ 	resdata = (char *)data + 28;
+@@ -4778,21 +4770,18 @@ out:
+ static int qeth_query_card_info_cb(struct qeth_card *card,
+ 				   struct qeth_reply *reply, unsigned long data)
+ {
+-	struct qeth_ipa_cmd *cmd;
++	struct carrier_info *carrier_info = (struct carrier_info *)reply->param;
++	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
+ 	struct qeth_query_card_info *card_info;
+-	struct carrier_info *carrier_info;
+ 
+ 	QETH_CARD_TEXT(card, 2, "qcrdincb");
+-	carrier_info = (struct carrier_info *)reply->param;
+-	cmd = (struct qeth_ipa_cmd *)data;
+-	card_info = &cmd->data.setadapterparms.data.card_info;
+-	if (cmd->data.setadapterparms.hdr.return_code == 0) {
+-		carrier_info->card_type = card_info->card_type;
+-		carrier_info->port_mode = card_info->port_mode;
+-		carrier_info->port_speed = card_info->port_speed;
+-	}
++	if (qeth_setadpparms_inspect_rc(cmd))
++		return 0;
+ 
+-	qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
++	card_info = &cmd->data.setadapterparms.data.card_info;
++	carrier_info->card_type = card_info->card_type;
++	carrier_info->port_mode = card_info->port_mode;
++	carrier_info->port_speed = card_info->port_speed;
+ 	return 0;
+ }
+ 
+diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
+index 998788a967be..3e38bae6ecde 100644
+--- a/drivers/scsi/aacraid/commsup.c
++++ b/drivers/scsi/aacraid/commsup.c
+@@ -2506,8 +2506,8 @@ int aac_command_thread(void *data)
+ 			/* Synchronize our watches */
+ 			if (((NSEC_PER_SEC - (NSEC_PER_SEC / HZ)) > now.tv_nsec)
+ 			 && (now.tv_nsec > (NSEC_PER_SEC / HZ)))
+-				difference = (((NSEC_PER_SEC - now.tv_nsec) * HZ)
+-				  + NSEC_PER_SEC / 2) / NSEC_PER_SEC;
++				difference = HZ + HZ / 2 -
++					     now.tv_nsec / (NSEC_PER_SEC / HZ);
+ 			else {
+ 				if (now.tv_nsec > NSEC_PER_SEC / 2)
+ 					++now.tv_sec;
+@@ -2531,6 +2531,10 @@ int aac_command_thread(void *data)
+ 		if (kthread_should_stop())
+ 			break;
+ 
++		/*
++		 * we probably want usleep_range() here instead of the
++		 * jiffies computation
++		 */
+ 		schedule_timeout(difference);
+ 
+ 		if (kthread_should_stop())
+diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
+index b491af31a5f8..a06b24a61622 100644
+--- a/drivers/scsi/ibmvscsi/ibmvfc.c
++++ b/drivers/scsi/ibmvscsi/ibmvfc.c
+@@ -3580,11 +3580,9 @@ static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
+ static int ibmvfc_adisc_needs_plogi(struct ibmvfc_passthru_mad *mad,
+ 				    struct ibmvfc_target *tgt)
+ {
+-	if (memcmp(&mad->fc_iu.response[2], &tgt->ids.port_name,
+-		   sizeof(tgt->ids.port_name)))
++	if (wwn_to_u64((u8 *)&mad->fc_iu.response[2]) != tgt->ids.port_name)
+ 		return 1;
+-	if (memcmp(&mad->fc_iu.response[4], &tgt->ids.node_name,
+-		   sizeof(tgt->ids.node_name)))
++	if (wwn_to_u64((u8 *)&mad->fc_iu.response[4]) != tgt->ids.node_name)
+ 		return 1;
+ 	if (be32_to_cpu(mad->fc_iu.response[6]) != tgt->scsi_id)
+ 		return 1;
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index 1d42d38f5a45..0e19f6bc24ff 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -1365,8 +1365,8 @@ qla24xx_abort_sp_done(void *ptr, int res)
+ 	srb_t *sp = ptr;
+ 	struct srb_iocb *abt = &sp->u.iocb_cmd;
+ 
+-	del_timer(&sp->u.iocb_cmd.timer);
+-	complete(&abt->u.abt.comp);
++	if (del_timer(&sp->u.iocb_cmd.timer))
++		complete(&abt->u.abt.comp);
+ }
+ 
+ int
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 6d3091ff9b92..c7b284587365 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -2498,6 +2498,8 @@ sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer)
+ 				sector_size = old_sector_size;
+ 				goto got_data;
+ 			}
++			/* Remember that READ CAPACITY(16) succeeded */
++			sdp->try_rc_10_first = 0;
+ 		}
+ 	}
+ 
+diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
+index 0c6065dba48a..4f27e95efcdd 100644
+--- a/drivers/soc/fsl/qbman/qman.c
++++ b/drivers/soc/fsl/qbman/qman.c
+@@ -2699,6 +2699,9 @@ static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt)
+ {
+ 	unsigned long addr;
+ 
++	if (!p)
++		return -ENODEV;
++
+ 	addr = gen_pool_alloc(p, cnt);
+ 	if (!addr)
+ 		return -ENOMEM;
+diff --git a/drivers/soc/fsl/qe/ucc.c b/drivers/soc/fsl/qe/ucc.c
+index c646d8713861..681f7d4b7724 100644
+--- a/drivers/soc/fsl/qe/ucc.c
++++ b/drivers/soc/fsl/qe/ucc.c
+@@ -626,7 +626,7 @@ static u32 ucc_get_tdm_sync_shift(enum comm_dir mode, u32 tdm_num)
+ {
+ 	u32 shift;
+ 
+-	shift = (mode == COMM_DIR_RX) ? RX_SYNC_SHIFT_BASE : RX_SYNC_SHIFT_BASE;
++	shift = (mode == COMM_DIR_RX) ? RX_SYNC_SHIFT_BASE : TX_SYNC_SHIFT_BASE;
+ 	shift -= tdm_num * 2;
+ 
+ 	return shift;
+diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c
+index 4a22a9f06d96..eb7898353457 100644
+--- a/drivers/usb/usbip/vhci_sysfs.c
++++ b/drivers/usb/usbip/vhci_sysfs.c
+@@ -34,10 +34,10 @@
+ 
+ /*
+  * output example:
+- * hub port sta spd dev       sockfd    local_busid
+- * hs  0000 004 000 00000000  3         1-2.3
++ * hub port sta spd dev       sockfd local_busid
++ * hs  0000 004 000 00000000  000003 1-2.3
+  * ................................................
+- * ss  0008 004 000 00000000  4         2-3.4
++ * ss  0008 004 000 00000000  000004 2-3.4
+  * ................................................
+  *
+  * Output includes socket fd instead of socket pointer address to avoid
+@@ -61,13 +61,13 @@ static void port_show_vhci(char **out, int hub, int port, struct vhci_device *vd
+ 	if (vdev->ud.status == VDEV_ST_USED) {
+ 		*out += sprintf(*out, "%03u %08x ",
+ 				      vdev->speed, vdev->devid);
+-		*out += sprintf(*out, "%u %s",
++		*out += sprintf(*out, "%06u %s",
+ 				      vdev->ud.sockfd,
+ 				      dev_name(&vdev->udev->dev));
+ 
+ 	} else {
+ 		*out += sprintf(*out, "000 00000000 ");
+-		*out += sprintf(*out, "0000000000000000 0-0");
++		*out += sprintf(*out, "000000 0-0");
+ 	}
+ 
+ 	*out += sprintf(*out, "\n");
+@@ -165,7 +165,7 @@ static ssize_t status_show(struct device *dev,
+ 	int pdev_nr;
+ 
+ 	out += sprintf(out,
+-		       "hub port sta spd dev      socket           local_busid\n");
++		       "hub port sta spd dev      sockfd local_busid\n");
+ 
+ 	pdev_nr = status_name_to_id(attr->attr.name);
+ 	if (pdev_nr < 0)
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
+index 7ee3167bc083..ffdd4e937d1d 100644
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -30,6 +30,7 @@
+ #include <linux/sched/mm.h>
+ #include <linux/sched/signal.h>
+ #include <linux/interval_tree_generic.h>
++#include <linux/nospec.h>
+ 
+ #include "vhost.h"
+ 
+@@ -1366,6 +1367,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
+ 	if (idx >= d->nvqs)
+ 		return -ENOBUFS;
+ 
++	idx = array_index_nospec(idx, d->nvqs);
+ 	vq = d->vqs[idx];
+ 
+ 	mutex_lock(&vq->mutex);
+diff --git a/drivers/video/fbdev/pxa168fb.c b/drivers/video/fbdev/pxa168fb.c
+index def3a501acd6..d059d04c63ac 100644
+--- a/drivers/video/fbdev/pxa168fb.c
++++ b/drivers/video/fbdev/pxa168fb.c
+@@ -712,7 +712,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
+ 	/*
+ 	 * enable controller clock
+ 	 */
+-	clk_enable(fbi->clk);
++	clk_prepare_enable(fbi->clk);
+ 
+ 	pxa168fb_set_par(info);
+ 
+@@ -767,7 +767,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
+ failed_free_cmap:
+ 	fb_dealloc_cmap(&info->cmap);
+ failed_free_clk:
+-	clk_disable(fbi->clk);
++	clk_disable_unprepare(fbi->clk);
+ failed_free_fbmem:
+ 	dma_free_coherent(fbi->dev, info->fix.smem_len,
+ 			info->screen_base, fbi->fb_start_dma);
+@@ -807,7 +807,7 @@ static int pxa168fb_remove(struct platform_device *pdev)
+ 	dma_free_wc(fbi->dev, PAGE_ALIGN(info->fix.smem_len),
+ 		    info->screen_base, info->fix.smem_start);
+ 
+-	clk_disable(fbi->clk);
++	clk_disable_unprepare(fbi->clk);
+ 
+ 	framebuffer_release(info);
+ 
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index 473ad5985aa3..47dec283628d 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -2603,8 +2603,10 @@ out:
+ 	}
+ 	btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
+ 
+-	if (done && !ret)
++	if (done && !ret) {
+ 		ret = 1;
++		fs_info->qgroup_rescan_progress.objectid = (u64)-1;
++	}
+ 	return ret;
+ }
+ 
+diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
+index 2cd0b3053439..d01cbca84701 100644
+--- a/fs/cifs/inode.c
++++ b/fs/cifs/inode.c
+@@ -712,7 +712,7 @@ cgfi_exit:
+ /* Simple function to return a 64 bit hash of string.  Rarely called */
+ static __u64 simple_hashstr(const char *str)
+ {
+-	const __u64 hash_mult =  1125899906842597L; /* a big enough prime */
++	const __u64 hash_mult =  1125899906842597ULL; /* a big enough prime */
+ 	__u64 hash = 0;
+ 
+ 	while (*str)
+diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
+index a40f36b1b292..9635df94db7d 100644
+--- a/fs/fat/fatent.c
++++ b/fs/fat/fatent.c
+@@ -681,6 +681,7 @@ int fat_count_free_clusters(struct super_block *sb)
+ 			if (ops->ent_get(&fatent) == FAT_ENT_FREE)
+ 				free++;
+ 		} while (fat_ent_next(sbi, &fatent));
++		cond_resched();
+ 	}
+ 	sbi->free_clusters = free;
+ 	sbi->free_clus_valid = 1;
+diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
+index 1b1283f07941..824f407df1db 100644
+--- a/fs/ocfs2/refcounttree.c
++++ b/fs/ocfs2/refcounttree.c
+@@ -2946,6 +2946,7 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
+ 		if (map_end & (PAGE_SIZE - 1))
+ 			to = map_end & (PAGE_SIZE - 1);
+ 
++retry:
+ 		page = find_or_create_page(mapping, page_index, GFP_NOFS);
+ 		if (!page) {
+ 			ret = -ENOMEM;
+@@ -2954,11 +2955,18 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
+ 		}
+ 
+ 		/*
+-		 * In case PAGE_SIZE <= CLUSTER_SIZE, This page
+-		 * can't be dirtied before we CoW it out.
++		 * In case PAGE_SIZE <= CLUSTER_SIZE, we do not expect a dirty
++		 * page, so write it back.
+ 		 */
+-		if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize)
+-			BUG_ON(PageDirty(page));
++		if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize) {
++			if (PageDirty(page)) {
++				/*
++				 * write_on_page will unlock the page on return
++				 */
++				ret = write_one_page(page);
++				goto retry;
++			}
++		}
+ 
+ 		if (!PageUptodate(page)) {
+ 			ret = block_read_full_page(page, ocfs2_get_block);
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 5c5be80ce802..c9d2a1a3ef11 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -27,6 +27,7 @@ struct bpf_map_ops {
+ 	void (*map_release)(struct bpf_map *map, struct file *map_file);
+ 	void (*map_free)(struct bpf_map *map);
+ 	int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
++	void (*map_release_uref)(struct bpf_map *map);
+ 
+ 	/* funcs callable from userspace and from eBPF programs */
+ 	void *(*map_lookup_elem)(struct bpf_map *map, void *key);
+@@ -300,7 +301,6 @@ int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
+ int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
+ 				 void *key, void *value, u64 map_flags);
+ int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
+-void bpf_fd_array_map_clear(struct bpf_map *map);
+ int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
+ 				void *key, void *value, u64 map_flags);
+ int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
+diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
+index 6b79a9bba9a7..4be464a07612 100644
+--- a/include/linux/compiler_types.h
++++ b/include/linux/compiler_types.h
+@@ -78,6 +78,18 @@ extern void __chk_io_ptr(const volatile void __iomem *);
+ #include <linux/compiler-clang.h>
+ #endif
+ 
++/*
++ * Some architectures need to provide custom definitions of macros provided
++ * by linux/compiler-*.h, and can do so using asm/compiler.h. We include that
++ * conditionally rather than using an asm-generic wrapper in order to avoid
++ * build failures if any C compilation, which will include this file via an
++ * -include argument in c_flags, occurs prior to the asm-generic wrappers being
++ * generated.
++ */
++#ifdef CONFIG_HAVE_ARCH_COMPILER_H
++#include <asm/compiler.h>
++#endif
++
+ /*
+  * Generic compiler-dependent macros required for kernel
+  * build go below this comment. Actual compiler/compiler version
+diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h
+index 767467d886de..67c75372b691 100644
+--- a/include/linux/iio/buffer-dma.h
++++ b/include/linux/iio/buffer-dma.h
+@@ -141,7 +141,7 @@ int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
+ 	char __user *user_buffer);
+ size_t iio_dma_buffer_data_available(struct iio_buffer *buffer);
+ int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd);
+-int iio_dma_buffer_set_length(struct iio_buffer *buffer, int length);
++int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length);
+ int iio_dma_buffer_request_update(struct iio_buffer *buffer);
+ 
+ int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
+diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
+index c4d19e77fea8..fb677e4f902d 100644
+--- a/include/linux/mlx5/driver.h
++++ b/include/linux/mlx5/driver.h
+@@ -1193,25 +1193,9 @@ enum {
+ };
+ 
+ static inline const struct cpumask *
+-mlx5_get_vector_affinity(struct mlx5_core_dev *dev, int vector)
++mlx5_get_vector_affinity_hint(struct mlx5_core_dev *dev, int vector)
+ {
+-	const struct cpumask *mask;
+-	struct irq_desc *desc;
+-	unsigned int irq;
+-	int eqn;
+-	int err;
+-
+-	err = mlx5_vector2eqn(dev, MLX5_EQ_VEC_COMP_BASE + vector, &eqn, &irq);
+-	if (err)
+-		return NULL;
+-
+-	desc = irq_to_desc(irq);
+-#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
+-	mask = irq_data_get_effective_affinity_mask(&desc->irq_data);
+-#else
+-	mask = desc->irq_common_data.affinity;
+-#endif
+-	return mask;
++	return dev->priv.irq_info[vector].mask;
+ }
+ 
+ #endif /* MLX5_DRIVER_H */
+diff --git a/include/linux/phy.h b/include/linux/phy.h
+index dca9e926b88f..efc04c2d92c9 100644
+--- a/include/linux/phy.h
++++ b/include/linux/phy.h
+@@ -879,6 +879,10 @@ static inline int genphy_no_soft_reset(struct phy_device *phydev)
+ {
+ 	return 0;
+ }
++int genphy_read_mmd_unsupported(struct phy_device *phdev, int devad,
++				u16 regnum);
++int genphy_write_mmd_unsupported(struct phy_device *phdev, int devnum,
++				 u16 regnum, u16 val);
+ 
+ /* Clause 45 PHY */
+ int genphy_c45_restart_aneg(struct phy_device *phydev);
+diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
+index f57d0bdf3c9e..a8f55ea4146b 100644
+--- a/kernel/bpf/arraymap.c
++++ b/kernel/bpf/arraymap.c
+@@ -467,7 +467,7 @@ static u32 prog_fd_array_sys_lookup_elem(void *ptr)
+ }
+ 
+ /* decrement refcnt of all bpf_progs that are stored in this map */
+-void bpf_fd_array_map_clear(struct bpf_map *map)
++static void bpf_fd_array_map_clear(struct bpf_map *map)
+ {
+ 	struct bpf_array *array = container_of(map, struct bpf_array, map);
+ 	int i;
+@@ -485,6 +485,7 @@ const struct bpf_map_ops prog_array_map_ops = {
+ 	.map_fd_get_ptr = prog_fd_array_get_ptr,
+ 	.map_fd_put_ptr = prog_fd_array_put_ptr,
+ 	.map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
++	.map_release_uref = bpf_fd_array_map_clear,
+ };
+ 
+ static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
+diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
+index 20eaddfa691c..22991e19c01c 100644
+--- a/kernel/bpf/sockmap.c
++++ b/kernel/bpf/sockmap.c
+@@ -875,7 +875,7 @@ static int sock_map_update_elem(struct bpf_map *map,
+ 	return err;
+ }
+ 
+-static void sock_map_release(struct bpf_map *map, struct file *map_file)
++static void sock_map_release(struct bpf_map *map)
+ {
+ 	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
+ 	struct bpf_prog *orig;
+@@ -895,7 +895,7 @@ const struct bpf_map_ops sock_map_ops = {
+ 	.map_get_next_key = sock_map_get_next_key,
+ 	.map_update_elem = sock_map_update_elem,
+ 	.map_delete_elem = sock_map_delete_elem,
+-	.map_release = sock_map_release,
++	.map_release_uref = sock_map_release,
+ };
+ 
+ BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock,
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index 4e933219fec6..ea22d0b6a9f0 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -214,8 +214,8 @@ static void bpf_map_free_deferred(struct work_struct *work)
+ static void bpf_map_put_uref(struct bpf_map *map)
+ {
+ 	if (atomic_dec_and_test(&map->usercnt)) {
+-		if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
+-			bpf_fd_array_map_clear(map);
++		if (map->ops->map_release_uref)
++			map->ops->map_release_uref(map);
+ 	}
+ }
+ 
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 4dbce29a9313..991af683ef9e 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -8058,6 +8058,8 @@ void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
+ 			goto unlock;
+ 
+ 		list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
++			if (event->cpu != smp_processor_id())
++				continue;
+ 			if (event->attr.type != PERF_TYPE_TRACEPOINT)
+ 				continue;
+ 			if (event->attr.config != entry->type)
+@@ -9020,9 +9022,7 @@ static void free_pmu_context(struct pmu *pmu)
+ 	if (pmu->task_ctx_nr > perf_invalid_context)
+ 		return;
+ 
+-	mutex_lock(&pmus_lock);
+ 	free_percpu(pmu->pmu_cpu_context);
+-	mutex_unlock(&pmus_lock);
+ }
+ 
+ /*
+@@ -9278,12 +9278,8 @@ EXPORT_SYMBOL_GPL(perf_pmu_register);
+ 
+ void perf_pmu_unregister(struct pmu *pmu)
+ {
+-	int remove_device;
+-
+ 	mutex_lock(&pmus_lock);
+-	remove_device = pmu_bus_running;
+ 	list_del_rcu(&pmu->entry);
+-	mutex_unlock(&pmus_lock);
+ 
+ 	/*
+ 	 * We dereference the pmu list under both SRCU and regular RCU, so
+@@ -9295,13 +9291,14 @@ void perf_pmu_unregister(struct pmu *pmu)
+ 	free_percpu(pmu->pmu_disable_count);
+ 	if (pmu->type >= PERF_TYPE_MAX)
+ 		idr_remove(&pmu_idr, pmu->type);
+-	if (remove_device) {
++	if (pmu_bus_running) {
+ 		if (pmu->nr_addr_filters)
+ 			device_remove_file(pmu->dev, &dev_attr_nr_addr_filters);
+ 		device_del(pmu->dev);
+ 		put_device(pmu->dev);
+ 	}
+ 	free_pmu_context(pmu);
++	mutex_unlock(&pmus_lock);
+ }
+ EXPORT_SYMBOL_GPL(perf_pmu_unregister);
+ 
+diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c
+index 0e4cd64ad2c0..654977862b06 100644
+--- a/kernel/locking/test-ww_mutex.c
++++ b/kernel/locking/test-ww_mutex.c
+@@ -260,7 +260,7 @@ static void test_cycle_work(struct work_struct *work)
+ {
+ 	struct test_cycle *cycle = container_of(work, typeof(*cycle), work);
+ 	struct ww_acquire_ctx ctx;
+-	int err;
++	int err, erra = 0;
+ 
+ 	ww_acquire_init(&ctx, &ww_class);
+ 	ww_mutex_lock(&cycle->a_mutex, &ctx);
+@@ -270,17 +270,19 @@ static void test_cycle_work(struct work_struct *work)
+ 
+ 	err = ww_mutex_lock(cycle->b_mutex, &ctx);
+ 	if (err == -EDEADLK) {
++		err = 0;
+ 		ww_mutex_unlock(&cycle->a_mutex);
+ 		ww_mutex_lock_slow(cycle->b_mutex, &ctx);
+-		err = ww_mutex_lock(&cycle->a_mutex, &ctx);
++		erra = ww_mutex_lock(&cycle->a_mutex, &ctx);
+ 	}
+ 
+ 	if (!err)
+ 		ww_mutex_unlock(cycle->b_mutex);
+-	ww_mutex_unlock(&cycle->a_mutex);
++	if (!erra)
++		ww_mutex_unlock(&cycle->a_mutex);
+ 	ww_acquire_fini(&ctx);
+ 
+-	cycle->result = err;
++	cycle->result = err ?: erra;
+ }
+ 
+ static int __test_cycle(unsigned int nthreads)
+diff --git a/lib/test_bpf.c b/lib/test_bpf.c
+index 64701b4c9900..75ebf2bbc2ee 100644
+--- a/lib/test_bpf.c
++++ b/lib/test_bpf.c
+@@ -5427,7 +5427,7 @@ static struct bpf_test tests[] = {
+ 	{
+ 		"BPF_MAXINSNS: Jump, gap, jump, ...",
+ 		{ },
+-#ifdef CONFIG_BPF_JIT_ALWAYS_ON
++#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_X86)
+ 		CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+ #else
+ 		CLASSIC | FLAG_NO_DATA,
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index 1fba2a03f8ae..ba24f613c0fc 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -2298,9 +2298,8 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
+ 	/* LE address type */
+ 	addr_type = le_addr_type(cp->addr.type);
+ 
+-	hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
+-
+-	err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
++	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
++	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
+ 	if (err < 0) {
+ 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
+ 					MGMT_STATUS_NOT_PAIRED, &rp,
+@@ -2314,8 +2313,6 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
+ 		goto done;
+ 	}
+ 
+-	/* Abort any ongoing SMP pairing */
+-	smp_cancel_pairing(conn);
+ 
+ 	/* Defer clearing up the connection parameters until closing to
+ 	 * give a chance of keeping them if a repairing happens.
+diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
+index a27704ff13a9..dbcc439fc78b 100644
+--- a/net/bluetooth/smp.c
++++ b/net/bluetooth/smp.c
+@@ -2410,30 +2410,51 @@ unlock:
+ 	return ret;
+ }
+ 
+-void smp_cancel_pairing(struct hci_conn *hcon)
++int smp_cancel_and_remove_pairing(struct hci_dev *hdev, bdaddr_t *bdaddr,
++				  u8 addr_type)
+ {
+-	struct l2cap_conn *conn = hcon->l2cap_data;
++	struct hci_conn *hcon;
++	struct l2cap_conn *conn;
+ 	struct l2cap_chan *chan;
+ 	struct smp_chan *smp;
++	int err;
++
++	err = hci_remove_ltk(hdev, bdaddr, addr_type);
++	hci_remove_irk(hdev, bdaddr, addr_type);
++
++	hcon = hci_conn_hash_lookup_le(hdev, bdaddr, addr_type);
++	if (!hcon)
++		goto done;
+ 
++	conn = hcon->l2cap_data;
+ 	if (!conn)
+-		return;
++		goto done;
+ 
+ 	chan = conn->smp;
+ 	if (!chan)
+-		return;
++		goto done;
+ 
+ 	l2cap_chan_lock(chan);
+ 
+ 	smp = chan->data;
+ 	if (smp) {
++		/* Set keys to NULL to make sure smp_failure() does not try to
++		 * remove and free already invalidated rcu list entries. */
++		smp->ltk = NULL;
++		smp->slave_ltk = NULL;
++		smp->remote_irk = NULL;
++
+ 		if (test_bit(SMP_FLAG_COMPLETE, &smp->flags))
+ 			smp_failure(conn, 0);
+ 		else
+ 			smp_failure(conn, SMP_UNSPECIFIED);
++		err = 0;
+ 	}
+ 
+ 	l2cap_chan_unlock(chan);
++
++done:
++	return err;
+ }
+ 
+ static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb)
+diff --git a/net/bluetooth/smp.h b/net/bluetooth/smp.h
+index 0ff6247eaa6c..121edadd5f8d 100644
+--- a/net/bluetooth/smp.h
++++ b/net/bluetooth/smp.h
+@@ -181,7 +181,8 @@ enum smp_key_pref {
+ };
+ 
+ /* SMP Commands */
+-void smp_cancel_pairing(struct hci_conn *hcon);
++int smp_cancel_and_remove_pairing(struct hci_dev *hdev, bdaddr_t *bdaddr,
++				  u8 addr_type);
+ bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level,
+ 			     enum smp_key_pref key_pref);
+ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level);
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index 8dc5c8d69bcd..a813dfe2dc2c 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -1390,7 +1390,14 @@ static void br_multicast_query_received(struct net_bridge *br,
+ 		return;
+ 
+ 	br_multicast_update_query_timer(br, query, max_delay);
+-	br_multicast_mark_router(br, port);
++
++	/* Based on RFC4541, section 2.1.1 IGMP Forwarding Rules,
++	 * the arrival port for IGMP Queries where the source address
++	 * is 0.0.0.0 should not be added to router port list.
++	 */
++	if ((saddr->proto == htons(ETH_P_IP) && saddr->u.ip4) ||
++	    saddr->proto == htons(ETH_P_IPV6))
++		br_multicast_mark_router(br, port);
+ }
+ 
+ static int br_ip4_multicast_query(struct net_bridge *br,
+diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
+index c2eea1b8737a..7582f28ab306 100644
+--- a/net/bridge/br_netfilter_hooks.c
++++ b/net/bridge/br_netfilter_hooks.c
+@@ -832,7 +832,8 @@ static unsigned int ip_sabotage_in(void *priv,
+ 				   struct sk_buff *skb,
+ 				   const struct nf_hook_state *state)
+ {
+-	if (skb->nf_bridge && !skb->nf_bridge->in_prerouting) {
++	if (skb->nf_bridge && !skb->nf_bridge->in_prerouting &&
++	    !netif_is_l3_master(skb->dev)) {
+ 		state->okfn(state->net, state->sk, skb);
+ 		return NF_STOLEN;
+ 	}
+diff --git a/net/core/datagram.c b/net/core/datagram.c
+index 3964c108b169..d8a0774f7608 100644
+--- a/net/core/datagram.c
++++ b/net/core/datagram.c
+@@ -810,8 +810,9 @@ int skb_copy_and_csum_datagram_msg(struct sk_buff *skb,
+ 			return -EINVAL;
+ 		}
+ 
+-		if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE))
+-			netdev_rx_csum_fault(skb->dev);
++		if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
++		    !skb->csum_complete_sw)
++			netdev_rx_csum_fault(NULL);
+ 	}
+ 	return 0;
+ fault:
+diff --git a/net/core/ethtool.c b/net/core/ethtool.c
+index 0ae5ac5e090f..3469f5053c79 100644
+--- a/net/core/ethtool.c
++++ b/net/core/ethtool.c
+@@ -2410,13 +2410,17 @@ roll_back:
+ 	return ret;
+ }
+ 
+-static int ethtool_set_per_queue(struct net_device *dev, void __user *useraddr)
++static int ethtool_set_per_queue(struct net_device *dev,
++				 void __user *useraddr, u32 sub_cmd)
+ {
+ 	struct ethtool_per_queue_op per_queue_opt;
+ 
+ 	if (copy_from_user(&per_queue_opt, useraddr, sizeof(per_queue_opt)))
+ 		return -EFAULT;
+ 
++	if (per_queue_opt.sub_command != sub_cmd)
++		return -EINVAL;
++
+ 	switch (per_queue_opt.sub_command) {
+ 	case ETHTOOL_GCOALESCE:
+ 		return ethtool_get_per_queue_coalesce(dev, useraddr, &per_queue_opt);
+@@ -2787,7 +2791,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
+ 		rc = ethtool_get_phy_stats(dev, useraddr);
+ 		break;
+ 	case ETHTOOL_PERQUEUE:
+-		rc = ethtool_set_per_queue(dev, useraddr);
++		rc = ethtool_set_per_queue(dev, useraddr, sub_cmd);
+ 		break;
+ 	case ETHTOOL_GLINKSETTINGS:
+ 		rc = ethtool_get_link_ksettings(dev, useraddr);
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 760364526dc1..c392a77ff788 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -3080,6 +3080,11 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
+ 		return -EINVAL;
+ 	}
+ 
++	if (dev->type != ARPHRD_ETHER) {
++		NL_SET_ERR_MSG(extack, "FDB add only supported for Ethernet devices");
++		return -EINVAL;
++	}
++
+ 	addr = nla_data(tb[NDA_LLADDR]);
+ 
+ 	err = fdb_vid_parse(tb[NDA_VLAN], &vid);
+@@ -3184,6 +3189,11 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
+ 		return -EINVAL;
+ 	}
+ 
++	if (dev->type != ARPHRD_ETHER) {
++		NL_SET_ERR_MSG(extack, "FDB delete only supported for Ethernet devices");
++		return -EINVAL;
++	}
++
+ 	addr = nla_data(tb[NDA_LLADDR]);
+ 
+ 	err = fdb_vid_parse(tb[NDA_VLAN], &vid);
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 9f80b947f53b..c19a118f9f82 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -1843,8 +1843,9 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len)
+ 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
+ 		int delta = skb->len - len;
+ 
+-		skb->csum = csum_sub(skb->csum,
+-				     skb_checksum(skb, len, delta, 0));
++		skb->csum = csum_block_sub(skb->csum,
++					   skb_checksum(skb, len, delta, 0),
++					   len);
+ 	}
+ 	return __pskb_trim(skb, len);
+ }
+diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
+index e7227128df2c..cb8fa5d7afe1 100644
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -720,10 +720,14 @@ struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
+ 	if (ip_is_fragment(&iph)) {
+ 		skb = skb_share_check(skb, GFP_ATOMIC);
+ 		if (skb) {
+-			if (!pskb_may_pull(skb, netoff + iph.ihl * 4))
+-				return skb;
+-			if (pskb_trim_rcsum(skb, netoff + len))
+-				return skb;
++			if (!pskb_may_pull(skb, netoff + iph.ihl * 4)) {
++				kfree_skb(skb);
++				return NULL;
++			}
++			if (pskb_trim_rcsum(skb, netoff + len)) {
++				kfree_skb(skb);
++				return NULL;
++			}
+ 			memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
+ 			if (ip_defrag(net, skb, user))
+ 				return NULL;
+diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
+index cbd9c0d8a788..9f314a5e9f27 100644
+--- a/net/ipv4/ipmr.c
++++ b/net/ipv4/ipmr.c
+@@ -2499,8 +2499,6 @@ static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
+ next_entry:
+ 			e++;
+ 		}
+-		e = 0;
+-		s_e = 0;
+ 
+ 		spin_lock_bh(&mfc_unres_lock);
+ 		list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index dc0ec227b9d2..b89920c0f226 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -2045,8 +2045,24 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
+ 	/* Note, we are only interested in != 0 or == 0, thus the
+ 	 * force to int.
+ 	 */
+-	return (__force int)skb_checksum_init_zero_check(skb, proto, uh->check,
+-							 inet_compute_pseudo);
++	err = (__force int)skb_checksum_init_zero_check(skb, proto, uh->check,
++							inet_compute_pseudo);
++	if (err)
++		return err;
++
++	if (skb->ip_summed == CHECKSUM_COMPLETE && !skb->csum_valid) {
++		/* If SW calculated the value, we know it's bad */
++		if (skb->csum_complete_sw)
++			return 1;
++
++		/* HW says the value is bad. Let's validate that.
++		 * skb->csum is no longer the full packet checksum,
++		 * so don't treat it as such.
++		 */
++		skb_checksum_complete_unset(skb);
++	}
++
++	return 0;
+ }
+ 
+ /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
+diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
+index bcfc00e88756..f8de2482a529 100644
+--- a/net/ipv4/xfrm4_input.c
++++ b/net/ipv4/xfrm4_input.c
+@@ -67,6 +67,7 @@ int xfrm4_transport_finish(struct sk_buff *skb, int async)
+ 
+ 	if (xo && (xo->flags & XFRM_GRO)) {
+ 		skb_mac_header_rebuild(skb);
++		skb_reset_transport_header(skb);
+ 		return 0;
+ 	}
+ 
+diff --git a/net/ipv4/xfrm4_mode_transport.c b/net/ipv4/xfrm4_mode_transport.c
+index 3d36644890bb..1ad2c2c4e250 100644
+--- a/net/ipv4/xfrm4_mode_transport.c
++++ b/net/ipv4/xfrm4_mode_transport.c
+@@ -46,7 +46,6 @@ static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb)
+ static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb)
+ {
+ 	int ihl = skb->data - skb_transport_header(skb);
+-	struct xfrm_offload *xo = xfrm_offload(skb);
+ 
+ 	if (skb->transport_header != skb->network_header) {
+ 		memmove(skb_transport_header(skb),
+@@ -54,8 +53,7 @@ static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb)
+ 		skb->network_header = skb->transport_header;
+ 	}
+ 	ip_hdr(skb)->tot_len = htons(skb->len + ihl);
+-	if (!xo || !(xo->flags & XFRM_GRO))
+-		skb_reset_transport_header(skb);
++	skb_reset_transport_header(skb);
+ 	return 0;
+ }
+ 
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 569f7c3f6b95..9ac6f6232294 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -4793,8 +4793,8 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
+ 
+ 		/* unicast address incl. temp addr */
+ 		list_for_each_entry(ifa, &idev->addr_list, if_list) {
+-			if (++ip_idx < s_ip_idx)
+-				continue;
++			if (ip_idx < s_ip_idx)
++				goto next;
+ 			err = inet6_fill_ifaddr(skb, ifa,
+ 						NETLINK_CB(cb->skb).portid,
+ 						cb->nlh->nlmsg_seq,
+@@ -4803,6 +4803,8 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
+ 			if (err < 0)
+ 				break;
+ 			nl_dump_check_consistent(cb, nlmsg_hdr(skb));
++next:
++			ip_idx++;
+ 		}
+ 		break;
+ 	}
+diff --git a/net/ipv6/ip6_checksum.c b/net/ipv6/ip6_checksum.c
+index 547515e8450a..377717045f8f 100644
+--- a/net/ipv6/ip6_checksum.c
++++ b/net/ipv6/ip6_checksum.c
+@@ -88,8 +88,24 @@ int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto)
+ 	 * Note, we are only interested in != 0 or == 0, thus the
+ 	 * force to int.
+ 	 */
+-	return (__force int)skb_checksum_init_zero_check(skb, proto, uh->check,
+-							 ip6_compute_pseudo);
++	err = (__force int)skb_checksum_init_zero_check(skb, proto, uh->check,
++							ip6_compute_pseudo);
++	if (err)
++		return err;
++
++	if (skb->ip_summed == CHECKSUM_COMPLETE && !skb->csum_valid) {
++		/* If SW calculated the value, we know it's bad */
++		if (skb->csum_complete_sw)
++			return 1;
++
++		/* HW says the value is bad. Let's validate that.
++		 * skb->csum is no longer the full packet checksum,
++		 * so don't treat is as such.
++		 */
++		skb_checksum_complete_unset(skb);
++	}
++
++	return 0;
+ }
+ EXPORT_SYMBOL(udp6_csum_init);
+ 
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index 0e9296f44ee4..948f304db0a3 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -1185,10 +1185,6 @@ route_lookup:
+ 	}
+ 	skb_dst_set(skb, dst);
+ 
+-	if (encap_limit >= 0) {
+-		init_tel_txopt(&opt, encap_limit);
+-		ipv6_push_frag_opts(skb, &opt.ops, &proto);
+-	}
+ 	hop_limit = hop_limit ? : ip6_dst_hoplimit(dst);
+ 
+ 	/* Calculate max headroom for all the headers and adjust
+@@ -1203,6 +1199,11 @@ route_lookup:
+ 	if (err)
+ 		return err;
+ 
++	if (encap_limit >= 0) {
++		init_tel_txopt(&opt, encap_limit);
++		ipv6_push_frag_opts(skb, &opt.ops, &proto);
++	}
++
+ 	skb_push(skb, sizeof(struct ipv6hdr));
+ 	skb_reset_network_header(skb);
+ 	ipv6h = ipv6_hdr(skb);
+diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
+index d112762b4cb8..bd269e78272a 100644
+--- a/net/ipv6/mcast.c
++++ b/net/ipv6/mcast.c
+@@ -2412,17 +2412,17 @@ static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
+ {
+ 	int err;
+ 
+-	/* callers have the socket lock and rtnl lock
+-	 * so no other readers or writers of iml or its sflist
+-	 */
++	write_lock_bh(&iml->sflock);
+ 	if (!iml->sflist) {
+ 		/* any-source empty exclude case */
+-		return ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0);
++		err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0);
++	} else {
++		err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode,
++				iml->sflist->sl_count, iml->sflist->sl_addr, 0);
++		sock_kfree_s(sk, iml->sflist, IP6_SFLSIZE(iml->sflist->sl_max));
++		iml->sflist = NULL;
+ 	}
+-	err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode,
+-		iml->sflist->sl_count, iml->sflist->sl_addr, 0);
+-	sock_kfree_s(sk, iml->sflist, IP6_SFLSIZE(iml->sflist->sl_max));
+-	iml->sflist = NULL;
++	write_unlock_bh(&iml->sflock);
+ 	return err;
+ }
+ 
+diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
+index 528218460bc5..5f80e57e93ed 100644
+--- a/net/ipv6/ndisc.c
++++ b/net/ipv6/ndisc.c
+@@ -1722,10 +1722,9 @@ int ndisc_rcv(struct sk_buff *skb)
+ 		return 0;
+ 	}
+ 
+-	memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb));
+-
+ 	switch (msg->icmph.icmp6_type) {
+ 	case NDISC_NEIGHBOUR_SOLICITATION:
++		memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb));
+ 		ndisc_recv_ns(skb);
+ 		break;
+ 
+diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
+index 2ed8536e10b6..611d406c4656 100644
+--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
+@@ -598,8 +598,6 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
+ 	    fq->q.meat == fq->q.len &&
+ 	    nf_ct_frag6_reasm(fq, skb, dev))
+ 		ret = 0;
+-	else
+-		skb_dst_drop(skb);
+ 
+ out_unlock:
+ 	spin_unlock_bh(&fq->q.lock);
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 5cee941ab0a9..8d185a0fc5af 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -794,11 +794,9 @@ static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
+ 
+ 	ret = udpv6_queue_rcv_skb(sk, skb);
+ 
+-	/* a return value > 0 means to resubmit the input, but
+-	 * it wants the return to be -protocol, or 0
+-	 */
++	/* a return value > 0 means to resubmit the input */
+ 	if (ret > 0)
+-		return -ret;
++		return ret;
+ 	return 0;
+ }
+ 
+diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
+index 841f4a07438e..9ef490dddcea 100644
+--- a/net/ipv6/xfrm6_input.c
++++ b/net/ipv6/xfrm6_input.c
+@@ -59,6 +59,7 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async)
+ 
+ 	if (xo && (xo->flags & XFRM_GRO)) {
+ 		skb_mac_header_rebuild(skb);
++		skb_reset_transport_header(skb);
+ 		return -1;
+ 	}
+ 
+diff --git a/net/ipv6/xfrm6_mode_transport.c b/net/ipv6/xfrm6_mode_transport.c
+index 9ad07a91708e..3c29da5defe6 100644
+--- a/net/ipv6/xfrm6_mode_transport.c
++++ b/net/ipv6/xfrm6_mode_transport.c
+@@ -51,7 +51,6 @@ static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb)
+ static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb)
+ {
+ 	int ihl = skb->data - skb_transport_header(skb);
+-	struct xfrm_offload *xo = xfrm_offload(skb);
+ 
+ 	if (skb->transport_header != skb->network_header) {
+ 		memmove(skb_transport_header(skb),
+@@ -60,8 +59,7 @@ static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb)
+ 	}
+ 	ipv6_hdr(skb)->payload_len = htons(skb->len + ihl -
+ 					   sizeof(struct ipv6hdr));
+-	if (!xo || !(xo->flags & XFRM_GRO))
+-		skb_reset_transport_header(skb);
++	skb_reset_transport_header(skb);
+ 	return 0;
+ }
+ 
+diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
+index 8ae87d4ec5ff..29dae7f2ff14 100644
+--- a/net/ipv6/xfrm6_output.c
++++ b/net/ipv6/xfrm6_output.c
+@@ -170,9 +170,11 @@ static int __xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ 
+ 	if (toobig && xfrm6_local_dontfrag(skb)) {
+ 		xfrm6_local_rxpmtu(skb, mtu);
++		kfree_skb(skb);
+ 		return -EMSGSIZE;
+ 	} else if (!skb->ignore_df && toobig && skb->sk) {
+ 		xfrm_local_error(skb, mtu);
++		kfree_skb(skb);
+ 		return -EMSGSIZE;
+ 	}
+ 
+diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
+index 5c87f1d3e525..33ea389ee015 100644
+--- a/net/l2tp/l2tp_core.c
++++ b/net/l2tp/l2tp_core.c
+@@ -808,10 +808,8 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
+ 		}
+ 	}
+ 
+-	/* Session data offset is handled differently for L2TPv2 and
+-	 * L2TPv3. For L2TPv2, there is an optional 16-bit value in
+-	 * the header. For L2TPv3, the offset is negotiated using AVPs
+-	 * in the session setup control protocol.
++	/* Session data offset is defined only for L2TPv2 and is
++	 * indicated by an optional 16-bit value in the header.
+ 	 */
+ 	if (tunnel->version == L2TP_HDR_VER_2) {
+ 		/* If offset bit set, skip it. */
+@@ -819,8 +817,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
+ 			offset = ntohs(*(__be16 *)ptr);
+ 			ptr += 2 + offset;
+ 		}
+-	} else
+-		ptr += session->offset;
++	}
+ 
+ 	offset = ptr - optr;
+ 	if (!pskb_may_pull(skb, offset))
+@@ -1104,8 +1101,6 @@ static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
+ 		}
+ 		bufp += session->l2specific_len;
+ 	}
+-	if (session->offset)
+-		bufp += session->offset;
+ 
+ 	return bufp - optr;
+ }
+@@ -1779,7 +1774,7 @@ void l2tp_session_set_header_len(struct l2tp_session *session, int version)
+ 		if (session->send_seq)
+ 			session->hdr_len += 4;
+ 	} else {
+-		session->hdr_len = 4 + session->cookie_len + session->l2specific_len + session->offset;
++		session->hdr_len = 4 + session->cookie_len + session->l2specific_len;
+ 		if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP)
+ 			session->hdr_len += 4;
+ 	}
+@@ -1830,7 +1825,6 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
+ 			session->recv_seq = cfg->recv_seq;
+ 			session->lns_mode = cfg->lns_mode;
+ 			session->reorder_timeout = cfg->reorder_timeout;
+-			session->offset = cfg->offset;
+ 			session->l2specific_type = cfg->l2specific_type;
+ 			session->l2specific_len = cfg->l2specific_len;
+ 			session->cookie_len = cfg->cookie_len;
+diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
+index 9e2f1fda1b03..0a58c0754526 100644
+--- a/net/l2tp/l2tp_core.h
++++ b/net/l2tp/l2tp_core.h
+@@ -59,7 +59,6 @@ struct l2tp_session_cfg {
+ 	int			debug;		/* bitmask of debug message
+ 						 * categories */
+ 	u16			vlan_id;	/* VLAN pseudowire only */
+-	u16			offset;		/* offset to payload */
+ 	u16			l2specific_len;	/* Layer 2 specific length */
+ 	u16			l2specific_type; /* Layer 2 specific type */
+ 	u8			cookie[8];	/* optional cookie */
+@@ -86,8 +85,6 @@ struct l2tp_session {
+ 	int			cookie_len;
+ 	u8			peer_cookie[8];
+ 	int			peer_cookie_len;
+-	u16			offset;		/* offset from end of L2TP header
+-						   to beginning of data */
+ 	u16			l2specific_len;
+ 	u16			l2specific_type;
+ 	u16			hdr_len;
+diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
+index 53bae54c4d6e..534cad03b9e9 100644
+--- a/net/l2tp/l2tp_debugfs.c
++++ b/net/l2tp/l2tp_debugfs.c
+@@ -180,8 +180,8 @@ static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v)
+ 		   session->lns_mode ? "LNS" : "LAC",
+ 		   session->debug,
+ 		   jiffies_to_msecs(session->reorder_timeout));
+-	seq_printf(m, "   offset %hu l2specific %hu/%hu\n",
+-		   session->offset, session->l2specific_type, session->l2specific_len);
++	seq_printf(m, "   offset 0 l2specific %hu/%hu\n",
++		   session->l2specific_type, session->l2specific_len);
+ 	if (session->cookie_len) {
+ 		seq_printf(m, "   cookie %02x%02x%02x%02x",
+ 			   session->cookie[0], session->cookie[1],
+diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
+index c28223d8092b..001797ce4084 100644
+--- a/net/l2tp/l2tp_netlink.c
++++ b/net/l2tp/l2tp_netlink.c
+@@ -549,9 +549,6 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
+ 	}
+ 
+ 	if (tunnel->version > 2) {
+-		if (info->attrs[L2TP_ATTR_OFFSET])
+-			cfg.offset = nla_get_u16(info->attrs[L2TP_ATTR_OFFSET]);
+-
+ 		if (info->attrs[L2TP_ATTR_DATA_SEQ])
+ 			cfg.data_seq = nla_get_u8(info->attrs[L2TP_ATTR_DATA_SEQ]);
+ 
+diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
+index b084fd19ad32..56c3fb5cc805 100644
+--- a/net/llc/llc_conn.c
++++ b/net/llc/llc_conn.c
+@@ -734,6 +734,7 @@ void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk)
+ 	llc_sk(sk)->sap = sap;
+ 
+ 	spin_lock_bh(&sap->sk_lock);
++	sock_set_flag(sk, SOCK_RCU_FREE);
+ 	sap->sk_count++;
+ 	sk_nulls_add_node_rcu(sk, laddr_hb);
+ 	hlist_add_head(&llc->dev_hash_node, dev_hb);
+diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
+index 7e5f271e3c30..4f1c61637ce3 100644
+--- a/net/mac80211/mesh.h
++++ b/net/mac80211/mesh.h
+@@ -217,7 +217,8 @@ void mesh_rmc_free(struct ieee80211_sub_if_data *sdata);
+ int mesh_rmc_init(struct ieee80211_sub_if_data *sdata);
+ void ieee80211s_init(void);
+ void ieee80211s_update_metric(struct ieee80211_local *local,
+-			      struct sta_info *sta, struct sk_buff *skb);
++			      struct sta_info *sta,
++			      struct ieee80211_tx_status *st);
+ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata);
+ void ieee80211_mesh_teardown_sdata(struct ieee80211_sub_if_data *sdata);
+ int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata);
+diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
+index 055ea36ff27b..fab0764c315f 100644
+--- a/net/mac80211/mesh_hwmp.c
++++ b/net/mac80211/mesh_hwmp.c
+@@ -295,15 +295,12 @@ int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
+ }
+ 
+ void ieee80211s_update_metric(struct ieee80211_local *local,
+-		struct sta_info *sta, struct sk_buff *skb)
++			      struct sta_info *sta,
++			      struct ieee80211_tx_status *st)
+ {
+-	struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
+-	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
++	struct ieee80211_tx_info *txinfo = st->info;
+ 	int failed;
+ 
+-	if (!ieee80211_is_data(hdr->frame_control))
+-		return;
+-
+ 	failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK);
+ 
+ 	/* moving average, scaled to 100.
+diff --git a/net/mac80211/status.c b/net/mac80211/status.c
+index da7427a41529..bdf131ed5ce8 100644
+--- a/net/mac80211/status.c
++++ b/net/mac80211/status.c
+@@ -470,11 +470,6 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local,
+ 	if (!skb)
+ 		return;
+ 
+-	if (dropped) {
+-		dev_kfree_skb_any(skb);
+-		return;
+-	}
+-
+ 	if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) {
+ 		u64 cookie = IEEE80211_SKB_CB(skb)->ack.cookie;
+ 		struct ieee80211_sub_if_data *sdata;
+@@ -495,6 +490,8 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local,
+ 		}
+ 		rcu_read_unlock();
+ 
++		dev_kfree_skb_any(skb);
++	} else if (dropped) {
+ 		dev_kfree_skb_any(skb);
+ 	} else {
+ 		/* consumes skb */
+@@ -800,7 +797,7 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
+ 
+ 		rate_control_tx_status(local, sband, status);
+ 		if (ieee80211_vif_is_mesh(&sta->sdata->vif))
+-			ieee80211s_update_metric(local, sta, skb);
++			ieee80211s_update_metric(local, sta, status);
+ 
+ 		if (!(info->flags & IEEE80211_TX_CTL_INJECTED) && acked)
+ 			ieee80211_frame_acked(sta, skb);
+@@ -961,6 +958,8 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
+ 		}
+ 
+ 		rate_control_tx_status(local, sband, status);
++		if (ieee80211_vif_is_mesh(&sta->sdata->vif))
++			ieee80211s_update_metric(local, sta, status);
+ 	}
+ 
+ 	if (acked || noack_success) {
+diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
+index 91093d4a2f84..6e7aa65cf345 100644
+--- a/net/mac80211/tdls.c
++++ b/net/mac80211/tdls.c
+@@ -16,6 +16,7 @@
+ #include "ieee80211_i.h"
+ #include "driver-ops.h"
+ #include "rate.h"
++#include "wme.h"
+ 
+ /* give usermode some time for retries in setting up the TDLS session */
+ #define TDLS_PEER_SETUP_TIMEOUT	(15 * HZ)
+@@ -1006,14 +1007,13 @@ ieee80211_tdls_prep_mgmt_packet(struct wiphy *wiphy, struct net_device *dev,
+ 	switch (action_code) {
+ 	case WLAN_TDLS_SETUP_REQUEST:
+ 	case WLAN_TDLS_SETUP_RESPONSE:
+-		skb_set_queue_mapping(skb, IEEE80211_AC_BK);
+-		skb->priority = 2;
++		skb->priority = 256 + 2;
+ 		break;
+ 	default:
+-		skb_set_queue_mapping(skb, IEEE80211_AC_VI);
+-		skb->priority = 5;
++		skb->priority = 256 + 5;
+ 		break;
+ 	}
++	skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, skb));
+ 
+ 	/*
+ 	 * Set the WLAN_TDLS_TEARDOWN flag to indicate a teardown in progress.
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index d8fddd88bf46..a17a56032a21 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -1837,7 +1837,7 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
+ 			sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
+ 
+ 	if (invoke_tx_handlers_early(&tx))
+-		return false;
++		return true;
+ 
+ 	if (ieee80211_queue_skb(local, sdata, tx.sta, tx.skb))
+ 		return true;
+diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
+index 4c9c9458374a..f70e9cbf33d5 100644
+--- a/net/openvswitch/flow_netlink.c
++++ b/net/openvswitch/flow_netlink.c
+@@ -2622,7 +2622,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
+ 			 * is already present */
+ 			if (mac_proto != MAC_PROTO_NONE)
+ 				return -EINVAL;
+-			mac_proto = MAC_PROTO_NONE;
++			mac_proto = MAC_PROTO_ETHERNET;
+ 			break;
+ 
+ 		case OVS_ACTION_ATTR_POP_ETH:
+@@ -2630,7 +2630,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
+ 				return -EINVAL;
+ 			if (vlan_tci & htons(VLAN_TAG_PRESENT))
+ 				return -EINVAL;
+-			mac_proto = MAC_PROTO_ETHERNET;
++			mac_proto = MAC_PROTO_NONE;
+ 			break;
+ 
+ 		default:
+diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
+index e6c2c4f56fb1..71c7f1dd4599 100644
+--- a/net/rxrpc/ar-internal.h
++++ b/net/rxrpc/ar-internal.h
+@@ -424,8 +424,7 @@ struct rxrpc_connection {
+ 	spinlock_t		state_lock;	/* state-change lock */
+ 	enum rxrpc_conn_cache_state cache_state;
+ 	enum rxrpc_conn_proto_state state;	/* current state of connection */
+-	u32			local_abort;	/* local abort code */
+-	u32			remote_abort;	/* remote abort code */
++	u32			abort_code;	/* Abort code of connection abort */
+ 	int			debug_id;	/* debug ID for printks */
+ 	atomic_t		serial;		/* packet serial number counter */
+ 	unsigned int		hi_serial;	/* highest serial number received */
+@@ -435,6 +434,7 @@ struct rxrpc_connection {
+ 	u8			security_size;	/* security header size */
+ 	u8			security_ix;	/* security type */
+ 	u8			out_clientflag;	/* RXRPC_CLIENT_INITIATED if we are client */
++	short			error;		/* Local error code */
+ };
+ 
+ /*
+diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
+index 62b1581d44a5..2dd13f5c47c8 100644
+--- a/net/rxrpc/call_accept.c
++++ b/net/rxrpc/call_accept.c
+@@ -418,11 +418,11 @@ found_service:
+ 
+ 	case RXRPC_CONN_REMOTELY_ABORTED:
+ 		rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
+-					  conn->remote_abort, -ECONNABORTED);
++					  conn->abort_code, conn->error);
+ 		break;
+ 	case RXRPC_CONN_LOCALLY_ABORTED:
+ 		rxrpc_abort_call("CON", call, sp->hdr.seq,
+-				 conn->local_abort, -ECONNABORTED);
++				 conn->abort_code, conn->error);
+ 		break;
+ 	default:
+ 		BUG();
+diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
+index 0435c4167a1a..75ec1ad595b7 100644
+--- a/net/rxrpc/conn_event.c
++++ b/net/rxrpc/conn_event.c
+@@ -117,7 +117,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
+ 
+ 	switch (chan->last_type) {
+ 	case RXRPC_PACKET_TYPE_ABORT:
+-		_proto("Tx ABORT %%%u { %d } [re]", serial, conn->local_abort);
++		_proto("Tx ABORT %%%u { %d } [re]", serial, conn->abort_code);
+ 		break;
+ 	case RXRPC_PACKET_TYPE_ACK:
+ 		trace_rxrpc_tx_ack(NULL, serial, chan->last_seq, 0,
+@@ -135,13 +135,12 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
+  * pass a connection-level abort onto all calls on that connection
+  */
+ static void rxrpc_abort_calls(struct rxrpc_connection *conn,
+-			      enum rxrpc_call_completion compl,
+-			      u32 abort_code, int error)
++			      enum rxrpc_call_completion compl)
+ {
+ 	struct rxrpc_call *call;
+ 	int i;
+ 
+-	_enter("{%d},%x", conn->debug_id, abort_code);
++	_enter("{%d},%x", conn->debug_id, conn->abort_code);
+ 
+ 	spin_lock(&conn->channel_lock);
+ 
+@@ -153,9 +152,11 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn,
+ 			if (compl == RXRPC_CALL_LOCALLY_ABORTED)
+ 				trace_rxrpc_abort("CON", call->cid,
+ 						  call->call_id, 0,
+-						  abort_code, error);
++						  conn->abort_code,
++						  conn->error);
+ 			if (rxrpc_set_call_completion(call, compl,
+-						      abort_code, error))
++						      conn->abort_code,
++						      conn->error))
+ 				rxrpc_notify_socket(call);
+ 		}
+ 	}
+@@ -188,10 +189,12 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
+ 		return 0;
+ 	}
+ 
++	conn->error = error;
++	conn->abort_code = abort_code;
+ 	conn->state = RXRPC_CONN_LOCALLY_ABORTED;
+ 	spin_unlock_bh(&conn->state_lock);
+ 
+-	rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED, abort_code, error);
++	rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED);
+ 
+ 	msg.msg_name	= &conn->params.peer->srx.transport;
+ 	msg.msg_namelen	= conn->params.peer->srx.transport_len;
+@@ -210,7 +213,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
+ 	whdr._rsvd	= 0;
+ 	whdr.serviceId	= htons(conn->service_id);
+ 
+-	word		= htonl(conn->local_abort);
++	word		= htonl(conn->abort_code);
+ 
+ 	iov[0].iov_base	= &whdr;
+ 	iov[0].iov_len	= sizeof(whdr);
+@@ -221,7 +224,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
+ 
+ 	serial = atomic_inc_return(&conn->serial);
+ 	whdr.serial = htonl(serial);
+-	_proto("Tx CONN ABORT %%%u { %d }", serial, conn->local_abort);
++	_proto("Tx CONN ABORT %%%u { %d }", serial, conn->abort_code);
+ 
+ 	ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
+ 	if (ret < 0) {
+@@ -289,9 +292,10 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
+ 		abort_code = ntohl(wtmp);
+ 		_proto("Rx ABORT %%%u { ac=%d }", sp->hdr.serial, abort_code);
+ 
++		conn->error = -ECONNABORTED;
++		conn->abort_code = abort_code;
+ 		conn->state = RXRPC_CONN_REMOTELY_ABORTED;
+-		rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED,
+-				  abort_code, -ECONNABORTED);
++		rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED);
+ 		return -ECONNABORTED;
+ 
+ 	case RXRPC_PACKET_TYPE_CHALLENGE:
+diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
+index 5edb636dbc4d..ea506a77f3c8 100644
+--- a/net/rxrpc/input.c
++++ b/net/rxrpc/input.c
+@@ -216,10 +216,11 @@ static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb,
+ /*
+  * Apply a hard ACK by advancing the Tx window.
+  */
+-static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
++static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
+ 				   struct rxrpc_ack_summary *summary)
+ {
+ 	struct sk_buff *skb, *list = NULL;
++	bool rot_last = false;
+ 	int ix;
+ 	u8 annotation;
+ 
+@@ -243,15 +244,17 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
+ 		skb->next = list;
+ 		list = skb;
+ 
+-		if (annotation & RXRPC_TX_ANNO_LAST)
++		if (annotation & RXRPC_TX_ANNO_LAST) {
+ 			set_bit(RXRPC_CALL_TX_LAST, &call->flags);
++			rot_last = true;
++		}
+ 		if ((annotation & RXRPC_TX_ANNO_MASK) != RXRPC_TX_ANNO_ACK)
+ 			summary->nr_rot_new_acks++;
+ 	}
+ 
+ 	spin_unlock(&call->lock);
+ 
+-	trace_rxrpc_transmit(call, (test_bit(RXRPC_CALL_TX_LAST, &call->flags) ?
++	trace_rxrpc_transmit(call, (rot_last ?
+ 				    rxrpc_transmit_rotate_last :
+ 				    rxrpc_transmit_rotate));
+ 	wake_up(&call->waitq);
+@@ -262,6 +265,8 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
+ 		skb->next = NULL;
+ 		rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
+ 	}
++
++	return rot_last;
+ }
+ 
+ /*
+@@ -332,11 +337,11 @@ static bool rxrpc_receiving_reply(struct rxrpc_call *call)
+ 				ktime_get_real());
+ 	}
+ 
+-	if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags))
+-		rxrpc_rotate_tx_window(call, top, &summary);
+ 	if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
+-		rxrpc_proto_abort("TXL", call, top);
+-		return false;
++		if (!rxrpc_rotate_tx_window(call, top, &summary)) {
++			rxrpc_proto_abort("TXL", call, top);
++			return false;
++		}
+ 	}
+ 	if (!rxrpc_end_tx_phase(call, true, "ETD"))
+ 		return false;
+@@ -801,6 +806,16 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
+ 				  rxrpc_propose_ack_respond_to_ack);
+ 	}
+ 
++	/* Discard any out-of-order or duplicate ACKs. */
++	if (before_eq(sp->hdr.serial, call->acks_latest)) {
++		_debug("discard ACK %d <= %d",
++		       sp->hdr.serial, call->acks_latest);
++		return;
++	}
++	call->acks_latest_ts = skb->tstamp;
++	call->acks_latest = sp->hdr.serial;
++
++	/* Parse rwind and mtu sizes if provided. */
+ 	ioffset = offset + nr_acks + 3;
+ 	if (skb->len >= ioffset + sizeof(buf.info)) {
+ 		if (skb_copy_bits(skb, ioffset, &buf.info, sizeof(buf.info)) < 0)
+@@ -822,23 +837,18 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
+ 		return;
+ 	}
+ 
+-	/* Discard any out-of-order or duplicate ACKs. */
+-	if (before_eq(sp->hdr.serial, call->acks_latest)) {
+-		_debug("discard ACK %d <= %d",
+-		       sp->hdr.serial, call->acks_latest);
+-		return;
+-	}
+-	call->acks_latest_ts = skb->tstamp;
+-	call->acks_latest = sp->hdr.serial;
+-
+ 	if (before(hard_ack, call->tx_hard_ack) ||
+ 	    after(hard_ack, call->tx_top))
+ 		return rxrpc_proto_abort("AKW", call, 0);
+ 	if (nr_acks > call->tx_top - hard_ack)
+ 		return rxrpc_proto_abort("AKN", call, 0);
+ 
+-	if (after(hard_ack, call->tx_hard_ack))
+-		rxrpc_rotate_tx_window(call, hard_ack, &summary);
++	if (after(hard_ack, call->tx_hard_ack)) {
++		if (rxrpc_rotate_tx_window(call, hard_ack, &summary)) {
++			rxrpc_end_tx_phase(call, false, "ETA");
++			return;
++		}
++	}
+ 
+ 	if (nr_acks > 0) {
+ 		if (skb_copy_bits(skb, offset, buf.acks, nr_acks) < 0)
+@@ -847,11 +857,6 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
+ 				      &summary);
+ 	}
+ 
+-	if (test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
+-		rxrpc_end_tx_phase(call, false, "ETA");
+-		return;
+-	}
+-
+ 	if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] &
+ 	    RXRPC_TX_ANNO_LAST &&
+ 	    summary.nr_acks == call->tx_top - hard_ack &&
+@@ -873,8 +878,7 @@ static void rxrpc_input_ackall(struct rxrpc_call *call, struct sk_buff *skb)
+ 
+ 	_proto("Rx ACKALL %%%u", sp->hdr.serial);
+ 
+-	rxrpc_rotate_tx_window(call, call->tx_top, &summary);
+-	if (test_bit(RXRPC_CALL_TX_LAST, &call->flags))
++	if (rxrpc_rotate_tx_window(call, call->tx_top, &summary))
+ 		rxrpc_end_tx_phase(call, false, "ETL");
+ }
+ 
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index 2f4e1483aced..04a70793c1fe 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -31,6 +31,8 @@
+ #include <net/pkt_sched.h>
+ #include <net/pkt_cls.h>
+ 
++extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
++
+ /* The list of all installed classifier types */
+ static LIST_HEAD(tcf_proto_base);
+ 
+@@ -559,7 +561,7 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
+ replay:
+ 	tp_created = 0;
+ 
+-	err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack);
++	err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
+ 	if (err < 0)
+ 		return err;
+ 
+@@ -836,7 +838,8 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
+ 	if (nlmsg_len(cb->nlh) < sizeof(*tcm))
+ 		return skb->len;
+ 
+-	err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, NULL, NULL);
++	err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
++			  NULL);
+ 	if (err)
+ 		return err;
+ 
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index cd69aa067543..691ca96f7460 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -1917,7 +1917,8 @@ static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
+ 
+ 	if (tcm->tcm_parent) {
+ 		q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent));
+-		if (q && tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
++		if (q && q != root &&
++		    tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
+ 			return -1;
+ 		return 0;
+ 	}
+diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
+index bc30f9186ac6..d3105ee8decf 100644
+--- a/net/sched/sch_gred.c
++++ b/net/sched/sch_gred.c
+@@ -411,7 +411,7 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt)
+ 	if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) {
+ 		if (tb[TCA_GRED_LIMIT] != NULL)
+ 			sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
+-		return gred_change_table_def(sch, opt);
++		return gred_change_table_def(sch, tb[TCA_GRED_DPS]);
+ 	}
+ 
+ 	if (tb[TCA_GRED_PARMS] == NULL ||
+diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
+index 2a2ab6bfe5d8..3d325b840802 100644
+--- a/net/sched/sch_netem.c
++++ b/net/sched/sch_netem.c
+@@ -624,6 +624,10 @@ deliver:
+ 			skb->next = NULL;
+ 			skb->prev = NULL;
+ 			skb->tstamp = netem_skb_cb(skb)->tstamp_save;
++			/* skb->dev shares skb->rbnode area,
++			 * we need to restore its value.
++			 */
++			skb->dev = qdisc_dev(sch);
+ 
+ #ifdef CONFIG_NET_CLS_ACT
+ 			/*
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 790094311143..d87d56978b4c 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -250,11 +250,10 @@ struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id)
+ 
+ 	spin_lock_bh(&sctp_assocs_id_lock);
+ 	asoc = (struct sctp_association *)idr_find(&sctp_assocs_id, (int)id);
++	if (asoc && (asoc->base.sk != sk || asoc->base.dead))
++		asoc = NULL;
+ 	spin_unlock_bh(&sctp_assocs_id_lock);
+ 
+-	if (!asoc || (asoc->base.sk != sk) || asoc->base.dead)
+-		return NULL;
+-
+ 	return asoc;
+ }
+ 
+diff --git a/net/socket.c b/net/socket.c
+index d27922639a20..a401578f3f28 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -2879,9 +2879,14 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
+ 		    copy_in_user(&rxnfc->fs.ring_cookie,
+ 				 &compat_rxnfc->fs.ring_cookie,
+ 				 (void __user *)(&rxnfc->fs.location + 1) -
+-				 (void __user *)&rxnfc->fs.ring_cookie) ||
+-		    copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
+-				 sizeof(rxnfc->rule_cnt)))
++				 (void __user *)&rxnfc->fs.ring_cookie))
++			return -EFAULT;
++		if (ethcmd == ETHTOOL_GRXCLSRLALL) {
++			if (put_user(rule_cnt, &rxnfc->rule_cnt))
++				return -EFAULT;
++		} else if (copy_in_user(&rxnfc->rule_cnt,
++					&compat_rxnfc->rule_cnt,
++					sizeof(rxnfc->rule_cnt)))
+ 			return -EFAULT;
+ 	}
+ 
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 3de415bca391..46e9812d13c0 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -3480,6 +3480,7 @@ static bool ht_rateset_to_mask(struct ieee80211_supported_band *sband,
+ 			return false;
+ 
+ 		/* check availability */
++		ridx = array_index_nospec(ridx, IEEE80211_HT_MCS_MASK_LEN);
+ 		if (sband->ht_cap.mcs.rx_mask[ridx] & rbit)
+ 			mcs[ridx] |= rbit;
+ 		else
+@@ -9719,7 +9720,7 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
+ 	struct wireless_dev *wdev = dev->ieee80211_ptr;
+ 	s32 last, low, high;
+ 	u32 hyst;
+-	int i, n;
++	int i, n, low_index;
+ 	int err;
+ 
+ 	/* RSSI reporting disabled? */
+@@ -9756,10 +9757,19 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
+ 		if (last < wdev->cqm_config->rssi_thresholds[i])
+ 			break;
+ 
+-	low = i > 0 ?
+-		(wdev->cqm_config->rssi_thresholds[i - 1] - hyst) : S32_MIN;
+-	high = i < n ?
+-		(wdev->cqm_config->rssi_thresholds[i] + hyst - 1) : S32_MAX;
++	low_index = i - 1;
++	if (low_index >= 0) {
++		low_index = array_index_nospec(low_index, n);
++		low = wdev->cqm_config->rssi_thresholds[low_index] - hyst;
++	} else {
++		low = S32_MIN;
++	}
++	if (i < n) {
++		i = array_index_nospec(i, n);
++		high = wdev->cqm_config->rssi_thresholds[i] + hyst - 1;
++	} else {
++		high = S32_MAX;
++	}
+ 
+ 	return rdev_set_cqm_rssi_range_config(rdev, dev, low, high);
+ }
+diff --git a/net/wireless/reg.c b/net/wireless/reg.c
+index 6e94f6934a0e..bd91de416035 100644
+--- a/net/wireless/reg.c
++++ b/net/wireless/reg.c
+@@ -2170,11 +2170,12 @@ static void reg_process_hint(struct regulatory_request *reg_request)
+ {
+ 	struct wiphy *wiphy = NULL;
+ 	enum reg_request_treatment treatment;
++	enum nl80211_reg_initiator initiator = reg_request->initiator;
+ 
+ 	if (reg_request->wiphy_idx != WIPHY_IDX_INVALID)
+ 		wiphy = wiphy_idx_to_wiphy(reg_request->wiphy_idx);
+ 
+-	switch (reg_request->initiator) {
++	switch (initiator) {
+ 	case NL80211_REGDOM_SET_BY_CORE:
+ 		treatment = reg_process_hint_core(reg_request);
+ 		break;
+@@ -2192,7 +2193,7 @@ static void reg_process_hint(struct regulatory_request *reg_request)
+ 		treatment = reg_process_hint_country_ie(wiphy, reg_request);
+ 		break;
+ 	default:
+-		WARN(1, "invalid initiator %d\n", reg_request->initiator);
++		WARN(1, "invalid initiator %d\n", initiator);
+ 		goto out_free;
+ 	}
+ 
+@@ -2207,7 +2208,7 @@ static void reg_process_hint(struct regulatory_request *reg_request)
+ 	 */
+ 	if (treatment == REG_REQ_ALREADY_SET && wiphy &&
+ 	    wiphy->regulatory_flags & REGULATORY_STRICT_REG) {
+-		wiphy_update_regulatory(wiphy, reg_request->initiator);
++		wiphy_update_regulatory(wiphy, initiator);
+ 		wiphy_all_share_dfs_chan_state(wiphy);
+ 		reg_check_channels();
+ 	}
+@@ -2384,6 +2385,7 @@ static int regulatory_hint_core(const char *alpha2)
+ 	request->alpha2[0] = alpha2[0];
+ 	request->alpha2[1] = alpha2[1];
+ 	request->initiator = NL80211_REGDOM_SET_BY_CORE;
++	request->wiphy_idx = WIPHY_IDX_INVALID;
+ 
+ 	queue_regulatory_request(request);
+ 
+diff --git a/net/wireless/scan.c b/net/wireless/scan.c
+index f6c5fe482506..5ed0ed0559dc 100644
+--- a/net/wireless/scan.c
++++ b/net/wireless/scan.c
+@@ -1055,13 +1055,23 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
+ 	return NULL;
+ }
+ 
++/*
++ * Update RX channel information based on the available frame payload
++ * information. This is mainly for the 2.4 GHz band where frames can be received
++ * from neighboring channels and the Beacon frames use the DSSS Parameter Set
++ * element to indicate the current (transmitting) channel, but this might also
++ * be needed on other bands if RX frequency does not match with the actual
++ * operating channel of a BSS.
++ */
+ static struct ieee80211_channel *
+ cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen,
+-			 struct ieee80211_channel *channel)
++			 struct ieee80211_channel *channel,
++			 enum nl80211_bss_scan_width scan_width)
+ {
+ 	const u8 *tmp;
+ 	u32 freq;
+ 	int channel_number = -1;
++	struct ieee80211_channel *alt_channel;
+ 
+ 	tmp = cfg80211_find_ie(WLAN_EID_DS_PARAMS, ie, ielen);
+ 	if (tmp && tmp[1] == 1) {
+@@ -1075,16 +1085,45 @@ cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen,
+ 		}
+ 	}
+ 
+-	if (channel_number < 0)
++	if (channel_number < 0) {
++		/* No channel information in frame payload */
+ 		return channel;
++	}
+ 
+ 	freq = ieee80211_channel_to_frequency(channel_number, channel->band);
+-	channel = ieee80211_get_channel(wiphy, freq);
+-	if (!channel)
+-		return NULL;
+-	if (channel->flags & IEEE80211_CHAN_DISABLED)
++	alt_channel = ieee80211_get_channel(wiphy, freq);
++	if (!alt_channel) {
++		if (channel->band == NL80211_BAND_2GHZ) {
++			/*
++			 * Better not allow unexpected channels when that could
++			 * be going beyond the 1-11 range (e.g., discovering
++			 * BSS on channel 12 when radio is configured for
++			 * channel 11.
++			 */
++			return NULL;
++		}
++
++		/* No match for the payload channel number - ignore it */
++		return channel;
++	}
++
++	if (scan_width == NL80211_BSS_CHAN_WIDTH_10 ||
++	    scan_width == NL80211_BSS_CHAN_WIDTH_5) {
++		/*
++		 * Ignore channel number in 5 and 10 MHz channels where there
++		 * may not be an n:1 or 1:n mapping between frequencies and
++		 * channel numbers.
++		 */
++		return channel;
++	}
++
++	/*
++	 * Use the channel determined through the payload channel number
++	 * instead of the RX channel reported by the driver.
++	 */
++	if (alt_channel->flags & IEEE80211_CHAN_DISABLED)
+ 		return NULL;
+-	return channel;
++	return alt_channel;
+ }
+ 
+ /* Returned bss is reference counted and must be cleaned up appropriately. */
+@@ -1109,7 +1148,8 @@ cfg80211_inform_bss_data(struct wiphy *wiphy,
+ 		    (data->signal < 0 || data->signal > 100)))
+ 		return NULL;
+ 
+-	channel = cfg80211_get_bss_channel(wiphy, ie, ielen, data->chan);
++	channel = cfg80211_get_bss_channel(wiphy, ie, ielen, data->chan,
++					   data->scan_width);
+ 	if (!channel)
+ 		return NULL;
+ 
+@@ -1207,7 +1247,7 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
+ 		return NULL;
+ 
+ 	channel = cfg80211_get_bss_channel(wiphy, mgmt->u.beacon.variable,
+-					   ielen, data->chan);
++					   ielen, data->chan, data->scan_width);
+ 	if (!channel)
+ 		return NULL;
+ 
+diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
+index 9f492dc417d5..8e75319dd9c0 100644
+--- a/net/xfrm/xfrm_input.c
++++ b/net/xfrm/xfrm_input.c
+@@ -453,6 +453,7 @@ resume:
+ 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
+ 			goto drop;
+ 		}
++		crypto_done = false;
+ 	} while (!err);
+ 
+ 	err = xfrm_rcv_cb(skb, family, x->type->proto, 0);
+diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
+index 35610cc881a9..c47660fba498 100644
+--- a/net/xfrm/xfrm_output.c
++++ b/net/xfrm/xfrm_output.c
+@@ -101,6 +101,10 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
+ 		spin_unlock_bh(&x->lock);
+ 
+ 		skb_dst_force(skb);
++		if (!skb_dst(skb)) {
++			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
++			goto error_nolock;
++		}
+ 
+ 		if (xfrm_offload(skb)) {
+ 			x->type_offload->encap(x, skb);
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
+index 2fb7a78308e1..37c32e73aaef 100644
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -2550,6 +2550,10 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
+ 	}
+ 
+ 	skb_dst_force(skb);
++	if (!skb_dst(skb)) {
++		XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
++		return 0;
++	}
+ 
+ 	dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
+ 	if (IS_ERR(dst)) {
+diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
+index 5554d28a32eb..4e8319766f2b 100644
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -151,10 +151,16 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
+ 	err = -EINVAL;
+ 	switch (p->family) {
+ 	case AF_INET:
++		if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32)
++			goto out;
++
+ 		break;
+ 
+ 	case AF_INET6:
+ #if IS_ENABLED(CONFIG_IPV6)
++		if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128)
++			goto out;
++
+ 		break;
+ #else
+ 		err = -EAFNOSUPPORT;
+@@ -1353,10 +1359,16 @@ static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
+ 
+ 	switch (p->sel.family) {
+ 	case AF_INET:
++		if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32)
++			return -EINVAL;
++
+ 		break;
+ 
+ 	case AF_INET6:
+ #if IS_ENABLED(CONFIG_IPV6)
++		if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128)
++			return -EINVAL;
++
+ 		break;
+ #else
+ 		return  -EAFNOSUPPORT;
+@@ -1437,6 +1449,9 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
+ 		    (ut[i].family != prev_family))
+ 			return -EINVAL;
+ 
++		if (ut[i].mode >= XFRM_MODE_MAX)
++			return -EINVAL;
++
+ 		prev_family = ut[i].family;
+ 
+ 		switch (ut[i].family) {
+diff --git a/scripts/kconfig/zconf.y b/scripts/kconfig/zconf.y
+index 126e3f2e1ed7..2b0adeb5fc42 100644
+--- a/scripts/kconfig/zconf.y
++++ b/scripts/kconfig/zconf.y
+@@ -31,7 +31,7 @@ struct symbol *symbol_hash[SYMBOL_HASHSIZE];
+ static struct menu *current_menu, *current_entry;
+ 
+ %}
+-%expect 31
++%expect 30
+ 
+ %union
+ {
+@@ -112,7 +112,7 @@ start: mainmenu_stmt stmt_list | no_mainmenu_stmt stmt_list;
+ 
+ /* mainmenu entry */
+ 
+-mainmenu_stmt: T_MAINMENU prompt nl
++mainmenu_stmt: T_MAINMENU prompt T_EOL
+ {
+ 	menu_add_prompt(P_MENU, $2, NULL);
+ };
+diff --git a/sound/usb/usx2y/usb_stream.c b/sound/usb/usx2y/usb_stream.c
+index e229abd21652..b0f8979ff2d2 100644
+--- a/sound/usb/usx2y/usb_stream.c
++++ b/sound/usb/usx2y/usb_stream.c
+@@ -56,7 +56,7 @@ check:
+ 		    lb, s->period_size);
+ }
+ 
+-static void init_pipe_urbs(struct usb_stream_kernel *sk, unsigned use_packsize,
++static int init_pipe_urbs(struct usb_stream_kernel *sk, unsigned use_packsize,
+ 			   struct urb **urbs, char *transfer,
+ 			   struct usb_device *dev, int pipe)
+ {
+@@ -77,6 +77,8 @@ static void init_pipe_urbs(struct usb_stream_kernel *sk, unsigned use_packsize,
+ 		urb->interval = 1;
+ 		if (usb_pipeout(pipe))
+ 			continue;
++		if (usb_urb_ep_type_check(urb))
++			return -EINVAL;
+ 
+ 		urb->transfer_buffer_length = transfer_length;
+ 		desc = urb->iso_frame_desc;
+@@ -87,9 +89,11 @@ static void init_pipe_urbs(struct usb_stream_kernel *sk, unsigned use_packsize,
+ 			desc[p].length = maxpacket;
+ 		}
+ 	}
++
++	return 0;
+ }
+ 
+-static void init_urbs(struct usb_stream_kernel *sk, unsigned use_packsize,
++static int init_urbs(struct usb_stream_kernel *sk, unsigned use_packsize,
+ 		      struct usb_device *dev, int in_pipe, int out_pipe)
+ {
+ 	struct usb_stream	*s = sk->s;
+@@ -103,9 +107,12 @@ static void init_urbs(struct usb_stream_kernel *sk, unsigned use_packsize,
+ 		sk->outurb[u] = usb_alloc_urb(sk->n_o_ps, GFP_KERNEL);
+ 	}
+ 
+-	init_pipe_urbs(sk, use_packsize, sk->inurb, indata, dev, in_pipe);
+-	init_pipe_urbs(sk, use_packsize, sk->outurb, sk->write_page, dev,
+-		       out_pipe);
++	if (init_pipe_urbs(sk, use_packsize, sk->inurb, indata, dev, in_pipe) ||
++	    init_pipe_urbs(sk, use_packsize, sk->outurb, sk->write_page, dev,
++			   out_pipe))
++		return -EINVAL;
++
++	return 0;
+ }
+ 
+ 
+@@ -226,7 +233,11 @@ struct usb_stream *usb_stream_new(struct usb_stream_kernel *sk,
+ 	else
+ 		sk->freqn = get_usb_high_speed_rate(sample_rate);
+ 
+-	init_urbs(sk, use_packsize, dev, in_pipe, out_pipe);
++	if (init_urbs(sk, use_packsize, dev, in_pipe, out_pipe) < 0) {
++		usb_stream_free(sk);
++		return NULL;
++	}
++
+ 	sk->s->state = usb_stream_stopped;
+ out:
+ 	return sk->s;
+diff --git a/tools/perf/Makefile b/tools/perf/Makefile
+index 225454416ed5..7902a5681fc8 100644
+--- a/tools/perf/Makefile
++++ b/tools/perf/Makefile
+@@ -84,10 +84,10 @@ endif # has_clean
+ endif # MAKECMDGOALS
+ 
+ #
+-# The clean target is not really parallel, don't print the jobs info:
++# Explicitly disable parallelism for the clean target.
+ #
+ clean:
+-	$(make)
++	$(make) -j1
+ 
+ #
+ # The build-test target is not really parallel, don't print the jobs info,
+diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
+index 5966f1f9b160..1c9bc3516f8b 100644
+--- a/tools/perf/tests/builtin-test.c
++++ b/tools/perf/tests/builtin-test.c
+@@ -375,7 +375,7 @@ static int test_and_print(struct test *t, bool force_skip, int subtest)
+ 	if (!t->subtest.get_nr)
+ 		pr_debug("%s:", t->desc);
+ 	else
+-		pr_debug("%s subtest %d:", t->desc, subtest);
++		pr_debug("%s subtest %d:", t->desc, subtest + 1);
+ 
+ 	switch (err) {
+ 	case TEST_OK:
+@@ -589,7 +589,7 @@ static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
+ 			for (subi = 0; subi < subn; subi++) {
+ 				pr_info("%2d.%1d: %-*s:", i, subi + 1, subw,
+ 					t->subtest.get_desc(subi));
+-				err = test_and_print(t, skip, subi + 1);
++				err = test_and_print(t, skip, subi);
+ 				if (err != TEST_OK && t->subtest.skip_if_fail)
+ 					skip = true;
+ 			}
+diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
+index da4df7fd43a2..23f1bf175179 100644
+--- a/tools/perf/util/setup.py
++++ b/tools/perf/util/setup.py
+@@ -27,7 +27,7 @@ class install_lib(_install_lib):
+ 
+ cflags = getenv('CFLAGS', '').split()
+ # switch off several checks (need to be at the end of cflags list)
+-cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter' ]
++cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter', '-Wno-redundant-decls' ]
+ if cc != "clang":
+     cflags += ['-Wno-cast-function-type' ]
+ 
+diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
+index bef419d4266d..3ad0b3a3317b 100644
+--- a/tools/testing/nvdimm/test/nfit.c
++++ b/tools/testing/nvdimm/test/nfit.c
+@@ -1589,6 +1589,7 @@ static int nfit_ctl_test(struct device *dev)
+ 	unsigned long mask, cmd_size, offset;
+ 	union {
+ 		struct nd_cmd_get_config_size cfg_size;
++		struct nd_cmd_clear_error clear_err;
+ 		struct nd_cmd_ars_status ars_stat;
+ 		struct nd_cmd_ars_cap ars_cap;
+ 		char buf[sizeof(struct nd_cmd_ars_status)
+@@ -1767,6 +1768,23 @@ static int nfit_ctl_test(struct device *dev)
+ 		return -EIO;
+ 	}
+ 
++	/* test clear error */
++	cmd_size = sizeof(cmds.clear_err);
++	cmds.clear_err = (struct nd_cmd_clear_error) {
++		.length = 512,
++		.cleared = 512,
++	};
++	rc = setup_result(cmds.buf, cmd_size);
++	if (rc)
++		return rc;
++	rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_CLEAR_ERROR,
++			cmds.buf, cmd_size, &cmd_rc);
++	if (rc < 0 || cmd_rc) {
++		dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
++				__func__, __LINE__, rc, cmd_rc);
++		return -EIO;
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh
+index 57b5ff576240..891130daac7c 100755
+--- a/tools/testing/selftests/net/rtnetlink.sh
++++ b/tools/testing/selftests/net/rtnetlink.sh
+@@ -1,4 +1,4 @@
+-#!/bin/sh
++#!/bin/bash
+ #
+ # This test is for checking rtnetlink callpaths, and get as much coverage as possible.
+ #
+diff --git a/tools/testing/selftests/powerpc/ptrace/.gitignore b/tools/testing/selftests/powerpc/ptrace/.gitignore
+index 349acfafc95b..9dcc16ea8179 100644
+--- a/tools/testing/selftests/powerpc/ptrace/.gitignore
++++ b/tools/testing/selftests/powerpc/ptrace/.gitignore
+@@ -8,3 +8,4 @@ ptrace-vsx
+ ptrace-tm-vsx
+ ptrace-tm-spd-vsx
+ ptrace-tm-spr
++ptrace-hwbreak
+diff --git a/tools/testing/selftests/powerpc/ptrace/Makefile b/tools/testing/selftests/powerpc/ptrace/Makefile
+index 480305266504..0e2f4601d1a8 100644
+--- a/tools/testing/selftests/powerpc/ptrace/Makefile
++++ b/tools/testing/selftests/powerpc/ptrace/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ TEST_PROGS := ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \
+               ptrace-tar ptrace-tm-tar ptrace-tm-spd-tar ptrace-vsx ptrace-tm-vsx \
+-              ptrace-tm-spd-vsx ptrace-tm-spr
++              ptrace-tm-spd-vsx ptrace-tm-spr ptrace-hwbreak
+ 
+ include ../../lib.mk
+ 
+diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-hwbreak.c b/tools/testing/selftests/powerpc/ptrace/ptrace-hwbreak.c
+new file mode 100644
+index 000000000000..3066d310f32b
+--- /dev/null
++++ b/tools/testing/selftests/powerpc/ptrace/ptrace-hwbreak.c
+@@ -0,0 +1,342 @@
++// SPDX-License-Identifier: GPL-2.0+
++
++/*
++ * Ptrace test for hw breakpoints
++ *
++ * Based on tools/testing/selftests/breakpoints/breakpoint_test.c
++ *
++ * This test forks and the parent then traces the child doing various
++ * types of ptrace enabled breakpoints
++ *
++ * Copyright (C) 2018 Michael Neuling, IBM Corporation.
++ */
++
++#include <sys/ptrace.h>
++#include <unistd.h>
++#include <stddef.h>
++#include <sys/user.h>
++#include <stdio.h>
++#include <stdlib.h>
++#include <signal.h>
++#include <sys/types.h>
++#include <sys/wait.h>
++#include "ptrace.h"
++
++/* Breakpoint access modes */
++enum {
++	BP_X = 1,
++	BP_RW = 2,
++	BP_W = 4,
++};
++
++static pid_t child_pid;
++static struct ppc_debug_info dbginfo;
++
++static void get_dbginfo(void)
++{
++	int ret;
++
++	ret = ptrace(PPC_PTRACE_GETHWDBGINFO, child_pid, NULL, &dbginfo);
++	if (ret) {
++		perror("Can't get breakpoint info\n");
++		exit(-1);
++	}
++}
++
++static bool hwbreak_present(void)
++{
++	return (dbginfo.num_data_bps != 0);
++}
++
++static bool dawr_present(void)
++{
++	return !!(dbginfo.features & PPC_DEBUG_FEATURE_DATA_BP_DAWR);
++}
++
++static void set_breakpoint_addr(void *addr)
++{
++	int ret;
++
++	ret = ptrace(PTRACE_SET_DEBUGREG, child_pid, 0, addr);
++	if (ret) {
++		perror("Can't set breakpoint addr\n");
++		exit(-1);
++	}
++}
++
++static int set_hwbreakpoint_addr(void *addr, int range)
++{
++	int ret;
++
++	struct ppc_hw_breakpoint info;
++
++	info.version = 1;
++	info.trigger_type = PPC_BREAKPOINT_TRIGGER_RW;
++	info.addr_mode = PPC_BREAKPOINT_MODE_EXACT;
++	if (range > 0)
++		info.addr_mode = PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE;
++	info.condition_mode = PPC_BREAKPOINT_CONDITION_NONE;
++	info.addr = (__u64)addr;
++	info.addr2 = (__u64)addr + range;
++	info.condition_value = 0;
++
++	ret = ptrace(PPC_PTRACE_SETHWDEBUG, child_pid, 0, &info);
++	if (ret < 0) {
++		perror("Can't set breakpoint\n");
++		exit(-1);
++	}
++	return ret;
++}
++
++static int del_hwbreakpoint_addr(int watchpoint_handle)
++{
++	int ret;
++
++	ret = ptrace(PPC_PTRACE_DELHWDEBUG, child_pid, 0, watchpoint_handle);
++	if (ret < 0) {
++		perror("Can't delete hw breakpoint\n");
++		exit(-1);
++	}
++	return ret;
++}
++
++#define DAWR_LENGTH_MAX 512
++
++/* Dummy variables to test read/write accesses */
++static unsigned long long
++	dummy_array[DAWR_LENGTH_MAX / sizeof(unsigned long long)]
++	__attribute__((aligned(512)));
++static unsigned long long *dummy_var = dummy_array;
++
++static void write_var(int len)
++{
++	long long *plval;
++	char *pcval;
++	short *psval;
++	int *pival;
++
++	switch (len) {
++	case 1:
++		pcval = (char *)dummy_var;
++		*pcval = 0xff;
++		break;
++	case 2:
++		psval = (short *)dummy_var;
++		*psval = 0xffff;
++		break;
++	case 4:
++		pival = (int *)dummy_var;
++		*pival = 0xffffffff;
++		break;
++	case 8:
++		plval = (long long *)dummy_var;
++		*plval = 0xffffffffffffffffLL;
++		break;
++	}
++}
++
++static void read_var(int len)
++{
++	char cval __attribute__((unused));
++	short sval __attribute__((unused));
++	int ival __attribute__((unused));
++	long long lval __attribute__((unused));
++
++	switch (len) {
++	case 1:
++		cval = *(char *)dummy_var;
++		break;
++	case 2:
++		sval = *(short *)dummy_var;
++		break;
++	case 4:
++		ival = *(int *)dummy_var;
++		break;
++	case 8:
++		lval = *(long long *)dummy_var;
++		break;
++	}
++}
++
++/*
++ * Do the r/w accesses to trigger the breakpoints. And run
++ * the usual traps.
++ */
++static void trigger_tests(void)
++{
++	int len, ret;
++
++	ret = ptrace(PTRACE_TRACEME, 0, NULL, 0);
++	if (ret) {
++		perror("Can't be traced?\n");
++		return;
++	}
++
++	/* Wake up father so that it sets up the first test */
++	kill(getpid(), SIGUSR1);
++
++	/* Test write watchpoints */
++	for (len = 1; len <= sizeof(long); len <<= 1)
++		write_var(len);
++
++	/* Test read/write watchpoints (on read accesses) */
++	for (len = 1; len <= sizeof(long); len <<= 1)
++		read_var(len);
++
++	/* Test when breakpoint is unset */
++
++	/* Test write watchpoints */
++	for (len = 1; len <= sizeof(long); len <<= 1)
++		write_var(len);
++
++	/* Test read/write watchpoints (on read accesses) */
++	for (len = 1; len <= sizeof(long); len <<= 1)
++		read_var(len);
++}
++
++static void check_success(const char *msg)
++{
++	const char *msg2;
++	int status;
++
++	/* Wait for the child to SIGTRAP */
++	wait(&status);
++
++	msg2 = "Failed";
++
++	if (WIFSTOPPED(status) && WSTOPSIG(status) == SIGTRAP) {
++		msg2 = "Child process hit the breakpoint";
++	}
++
++	printf("%s Result: [%s]\n", msg, msg2);
++}
++
++static void launch_watchpoints(char *buf, int mode, int len,
++			       struct ppc_debug_info *dbginfo, bool dawr)
++{
++	const char *mode_str;
++	unsigned long data = (unsigned long)(dummy_var);
++	int wh, range;
++
++	data &= ~0x7UL;
++
++	if (mode == BP_W) {
++		data |= (1UL << 1);
++		mode_str = "write";
++	} else {
++		data |= (1UL << 0);
++		data |= (1UL << 1);
++		mode_str = "read";
++	}
++
++	/* Set DABR_TRANSLATION bit */
++	data |= (1UL << 2);
++
++	/* use PTRACE_SET_DEBUGREG breakpoints */
++	set_breakpoint_addr((void *)data);
++	ptrace(PTRACE_CONT, child_pid, NULL, 0);
++	sprintf(buf, "Test %s watchpoint with len: %d ", mode_str, len);
++	check_success(buf);
++	/* Unregister hw brkpoint */
++	set_breakpoint_addr(NULL);
++
++	data = (data & ~7); /* remove dabr control bits */
++
++	/* use PPC_PTRACE_SETHWDEBUG breakpoint */
++	if (!(dbginfo->features & PPC_DEBUG_FEATURE_DATA_BP_RANGE))
++		return; /* not supported */
++	wh = set_hwbreakpoint_addr((void *)data, 0);
++	ptrace(PTRACE_CONT, child_pid, NULL, 0);
++	sprintf(buf, "Test %s watchpoint with len: %d ", mode_str, len);
++	check_success(buf);
++	/* Unregister hw brkpoint */
++	del_hwbreakpoint_addr(wh);
++
++	/* try a wider range */
++	range = 8;
++	if (dawr)
++		range = 512 - ((int)data & (DAWR_LENGTH_MAX - 1));
++	wh = set_hwbreakpoint_addr((void *)data, range);
++	ptrace(PTRACE_CONT, child_pid, NULL, 0);
++	sprintf(buf, "Test %s watchpoint with len: %d ", mode_str, len);
++	check_success(buf);
++	/* Unregister hw brkpoint */
++	del_hwbreakpoint_addr(wh);
++}
++
++/* Set the breakpoints and check the child successfully trigger them */
++static int launch_tests(bool dawr)
++{
++	char buf[1024];
++	int len, i, status;
++
++	struct ppc_debug_info dbginfo;
++
++	i = ptrace(PPC_PTRACE_GETHWDBGINFO, child_pid, NULL, &dbginfo);
++	if (i) {
++		perror("Can't set breakpoint info\n");
++		exit(-1);
++	}
++	if (!(dbginfo.features & PPC_DEBUG_FEATURE_DATA_BP_RANGE))
++		printf("WARNING: Kernel doesn't support PPC_PTRACE_SETHWDEBUG\n");
++
++	/* Write watchpoint */
++	for (len = 1; len <= sizeof(long); len <<= 1)
++		launch_watchpoints(buf, BP_W, len, &dbginfo, dawr);
++
++	/* Read-Write watchpoint */
++	for (len = 1; len <= sizeof(long); len <<= 1)
++		launch_watchpoints(buf, BP_RW, len, &dbginfo, dawr);
++
++	ptrace(PTRACE_CONT, child_pid, NULL, 0);
++
++	/*
++	 * Now we have unregistered the breakpoint, access by child
++	 * should not cause SIGTRAP.
++	 */
++
++	wait(&status);
++
++	if (WIFSTOPPED(status) && WSTOPSIG(status) == SIGTRAP) {
++		printf("FAIL: Child process hit the breakpoint, which is not expected\n");
++		ptrace(PTRACE_CONT, child_pid, NULL, 0);
++		return TEST_FAIL;
++	}
++
++	if (WIFEXITED(status))
++		printf("Child exited normally\n");
++
++	return TEST_PASS;
++}
++
++static int ptrace_hwbreak(void)
++{
++	pid_t pid;
++	int ret;
++	bool dawr;
++
++	pid = fork();
++	if (!pid) {
++		trigger_tests();
++		return 0;
++	}
++
++	wait(NULL);
++
++	child_pid = pid;
++
++	get_dbginfo();
++	SKIP_IF(!hwbreak_present());
++	dawr = dawr_present();
++
++	ret = launch_tests(dawr);
++
++	wait(NULL);
++
++	return ret;
++}
++
++int main(int argc, char **argv, char **envp)
++{
++	return test_harness(ptrace_hwbreak, "ptrace-hwbreak");
++}


             reply	other threads:[~2018-11-04 17:31 UTC|newest]

Thread overview: 448+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-11-04 17:31 Alice Ferrazzi [this message]
  -- strict thread matches above, loose matches on Subject: below --
2023-08-30 15:01 [gentoo-commits] proj/linux-patches:4.14 commit in: / Mike Pagano
2023-08-16 16:58 Mike Pagano
2023-08-11 11:58 Mike Pagano
2023-08-08 18:44 Mike Pagano
2023-06-28 10:30 Mike Pagano
2023-06-21 14:56 Alice Ferrazzi
2023-06-14 10:22 Mike Pagano
2023-06-09 11:33 Mike Pagano
2023-05-30 12:58 Mike Pagano
2023-05-17 11:01 Mike Pagano
2023-04-26  9:35 Alice Ferrazzi
2023-04-20 11:18 Alice Ferrazzi
2023-04-05 10:02 Alice Ferrazzi
2023-03-22 14:16 Alice Ferrazzi
2023-03-17 10:47 Mike Pagano
2023-03-13 11:36 Alice Ferrazzi
2023-03-11 16:02 Mike Pagano
2023-02-25 11:40 Mike Pagano
2023-02-24  3:13 Alice Ferrazzi
2023-02-22 14:48 Alice Ferrazzi
2023-02-22 14:46 Alice Ferrazzi
2023-02-06 12:50 Mike Pagano
2023-01-24  7:18 Alice Ferrazzi
2023-01-18 11:11 Mike Pagano
2022-12-14 12:24 Mike Pagano
2022-12-08 12:39 Alice Ferrazzi
2022-11-25 17:03 Mike Pagano
2022-11-10 15:14 Mike Pagano
2022-11-03 15:09 Mike Pagano
2022-11-01 19:49 Mike Pagano
2022-10-26 11:42 Mike Pagano
2022-09-28  9:18 Mike Pagano
2022-09-20 12:04 Mike Pagano
2022-09-15 11:10 Mike Pagano
2022-09-05 12:06 Mike Pagano
2022-08-25 10:36 Mike Pagano
2022-07-29 15:27 Mike Pagano
2022-07-21 20:13 Mike Pagano
2022-07-12 16:02 Mike Pagano
2022-07-07 16:19 Mike Pagano
2022-07-02 16:06 Mike Pagano
2022-06-25 10:23 Mike Pagano
2022-06-16 11:41 Mike Pagano
2022-06-14 15:48 Mike Pagano
2022-06-06 11:06 Mike Pagano
2022-05-27 12:28 Mike Pagano
2022-05-25 11:56 Mike Pagano
2022-05-18  9:51 Mike Pagano
2022-05-15 22:13 Mike Pagano
2022-05-12 11:31 Mike Pagano
2022-04-27 11:38 Mike Pagano
2022-04-20 12:10 Mike Pagano
2022-04-02 16:32 Mike Pagano
2022-03-28 11:00 Mike Pagano
2022-03-23 11:58 Mike Pagano
2022-03-16 13:21 Mike Pagano
2022-03-11 10:57 Mike Pagano
2022-03-08 18:28 Mike Pagano
2022-03-02 13:08 Mike Pagano
2022-02-26 23:30 Mike Pagano
2022-02-23 12:40 Mike Pagano
2022-02-16 12:49 Mike Pagano
2022-02-11 12:48 Mike Pagano
2022-02-08 17:57 Mike Pagano
2022-01-29 17:45 Mike Pagano
2022-01-27 11:40 Mike Pagano
2022-01-11 13:16 Mike Pagano
2022-01-05 12:56 Mike Pagano
2021-12-29 13:12 Mike Pagano
2021-12-22 14:07 Mike Pagano
2021-12-14 10:36 Mike Pagano
2021-12-08 12:56 Mike Pagano
2021-11-26 12:00 Mike Pagano
2021-11-12 13:47 Mike Pagano
2021-11-02 19:36 Mike Pagano
2021-10-27 11:59 Mike Pagano
2021-10-20 13:33 Mike Pagano
2021-10-17 13:13 Mike Pagano
2021-10-09 21:34 Mike Pagano
2021-10-06 14:04 Mike Pagano
2021-09-26 14:14 Mike Pagano
2021-09-22 11:41 Mike Pagano
2021-09-20 22:05 Mike Pagano
2021-09-03 11:24 Mike Pagano
2021-08-26 14:05 Mike Pagano
2021-08-25 23:04 Mike Pagano
2021-08-15 20:08 Mike Pagano
2021-08-08 13:40 Mike Pagano
2021-08-04 11:55 Mike Pagano
2021-08-03 12:45 Mike Pagano
2021-07-28 12:38 Mike Pagano
2021-07-20 15:32 Alice Ferrazzi
2021-07-11 14:46 Mike Pagano
2021-06-30 14:26 Mike Pagano
2021-06-16 12:21 Mike Pagano
2021-06-10 11:16 Mike Pagano
2021-06-03 10:35 Alice Ferrazzi
2021-05-26 12:04 Mike Pagano
2021-05-22 10:03 Mike Pagano
2021-04-28 18:22 Mike Pagano
2021-04-28 11:31 Alice Ferrazzi
2021-04-16 11:17 Alice Ferrazzi
2021-04-10 13:23 Mike Pagano
2021-04-07 12:17 Mike Pagano
2021-03-30 14:15 Mike Pagano
2021-03-24 12:07 Mike Pagano
2021-03-17 16:18 Mike Pagano
2021-03-11 14:04 Mike Pagano
2021-03-07 15:14 Mike Pagano
2021-03-03 18:15 Alice Ferrazzi
2021-02-23 13:51 Alice Ferrazzi
2021-02-10 10:07 Alice Ferrazzi
2021-02-07 14:17 Alice Ferrazzi
2021-02-03 23:38 Mike Pagano
2021-01-30 12:58 Alice Ferrazzi
2021-01-23 16:35 Mike Pagano
2021-01-21 11:25 Alice Ferrazzi
2021-01-17 16:21 Mike Pagano
2021-01-12 20:07 Mike Pagano
2021-01-09 12:56 Mike Pagano
2020-12-29 14:20 Mike Pagano
2020-12-11 12:55 Mike Pagano
2020-12-08 12:05 Mike Pagano
2020-12-02 12:48 Mike Pagano
2020-11-24 13:44 Mike Pagano
2020-11-22 19:17 Mike Pagano
2020-11-18 19:24 Mike Pagano
2020-11-11 15:36 Mike Pagano
2020-11-10 13:55 Mike Pagano
2020-11-05 12:34 Mike Pagano
2020-10-29 11:17 Mike Pagano
2020-10-17 10:17 Mike Pagano
2020-10-14 20:35 Mike Pagano
2020-10-01 12:42 Mike Pagano
2020-10-01 12:34 Mike Pagano
2020-09-24 16:00 Mike Pagano
2020-09-23 12:05 Mike Pagano
2020-09-23 12:03 Mike Pagano
2020-09-12 17:50 Mike Pagano
2020-09-09 17:58 Mike Pagano
2020-09-03 11:36 Mike Pagano
2020-08-26 11:14 Mike Pagano
2020-08-21 10:51 Alice Ferrazzi
2020-08-07 19:14 Mike Pagano
2020-08-05 14:57 Thomas Deutschmann
2020-07-31 17:56 Mike Pagano
2020-07-29 12:30 Mike Pagano
2020-07-22 13:47 Mike Pagano
2020-07-09 12:10 Mike Pagano
2020-07-01 12:13 Mike Pagano
2020-06-29 17:43 Mike Pagano
2020-06-25 15:08 Mike Pagano
2020-06-22 14:45 Mike Pagano
2020-06-11 11:32 Mike Pagano
2020-06-03 11:39 Mike Pagano
2020-05-27 15:25 Mike Pagano
2020-05-20 11:26 Mike Pagano
2020-05-13 12:46 Mike Pagano
2020-05-11 22:51 Mike Pagano
2020-05-05 17:41 Mike Pagano
2020-05-02 19:23 Mike Pagano
2020-04-24 12:02 Mike Pagano
2020-04-15 17:38 Mike Pagano
2020-04-13 11:16 Mike Pagano
2020-04-02 15:23 Mike Pagano
2020-03-20 11:56 Mike Pagano
2020-03-11 18:19 Mike Pagano
2020-02-28 16:34 Mike Pagano
2020-02-14 23:46 Mike Pagano
2020-02-05 17:22 Mike Pagano
2020-02-05 14:49 Mike Pagano
2020-01-29 16:14 Mike Pagano
2020-01-27 14:24 Mike Pagano
2020-01-23 11:05 Mike Pagano
2020-01-17 19:53 Mike Pagano
2020-01-14 22:28 Mike Pagano
2020-01-12 14:53 Mike Pagano
2020-01-09 11:14 Mike Pagano
2020-01-04 16:49 Mike Pagano
2019-12-31 13:56 Mike Pagano
2019-12-21 15:00 Mike Pagano
2019-12-17 21:55 Mike Pagano
2019-12-05 15:20 Alice Ferrazzi
2019-12-01 14:08 Thomas Deutschmann
2019-11-24 15:42 Mike Pagano
2019-11-20 18:18 Mike Pagano
2019-11-12 20:59 Mike Pagano
2019-11-10 16:19 Mike Pagano
2019-11-06 14:25 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 11:33 Mike Pagano
2019-10-17 22:26 Mike Pagano
2019-10-11 17:02 Mike Pagano
2019-10-07 17:39 Mike Pagano
2019-10-05 11:40 Mike Pagano
2019-09-21 16:30 Mike Pagano
2019-09-19 23:28 Mike Pagano
2019-09-19 10:03 Mike Pagano
2019-09-16 12:23 Mike Pagano
2019-09-10 11:11 Mike Pagano
2019-09-06 17:19 Mike Pagano
2019-08-29 14:13 Mike Pagano
2019-08-25 17:36 Mike Pagano
2019-08-23 22:15 Mike Pagano
2019-08-16 12:14 Mike Pagano
2019-08-09 17:34 Mike Pagano
2019-08-06 19:16 Mike Pagano
2019-08-04 16:06 Mike Pagano
2019-07-31 10:23 Mike Pagano
2019-07-21 14:40 Mike Pagano
2019-07-10 11:04 Mike Pagano
2019-07-03 13:02 Mike Pagano
2019-06-27 11:09 Mike Pagano
2019-06-25 10:52 Mike Pagano
2019-06-22 19:09 Mike Pagano
2019-06-19 17:19 Thomas Deutschmann
2019-06-17 19:20 Mike Pagano
2019-06-15 15:05 Mike Pagano
2019-06-11 17:51 Mike Pagano
2019-06-11 12:40 Mike Pagano
2019-06-09 16:17 Mike Pagano
2019-05-31 16:41 Mike Pagano
2019-05-26 17:11 Mike Pagano
2019-05-21 17:17 Mike Pagano
2019-05-16 23:02 Mike Pagano
2019-05-14 20:55 Mike Pagano
2019-05-10 19:39 Mike Pagano
2019-05-08 10:05 Mike Pagano
2019-05-04 18:34 Mike Pagano
2019-05-04 18:27 Mike Pagano
2019-05-02 10:14 Mike Pagano
2019-04-27 17:35 Mike Pagano
2019-04-24 22:58 Mike Pagano
2019-04-20 11:08 Mike Pagano
2019-04-19 19:53 Mike Pagano
2019-04-05 21:45 Mike Pagano
2019-04-03 10:58 Mike Pagano
2019-03-27 10:21 Mike Pagano
2019-03-23 14:30 Mike Pagano
2019-03-23 14:19 Mike Pagano
2019-03-19 16:57 Mike Pagano
2019-03-13 22:07 Mike Pagano
2019-03-06 19:09 Mike Pagano
2019-03-05 18:03 Mike Pagano
2019-02-27 11:22 Mike Pagano
2019-02-23 14:43 Mike Pagano
2019-02-20 11:17 Mike Pagano
2019-02-16  0:44 Mike Pagano
2019-02-15 12:51 Mike Pagano
2019-02-12 20:52 Mike Pagano
2019-02-06 17:06 Mike Pagano
2019-01-31 11:24 Mike Pagano
2019-01-26 15:06 Mike Pagano
2019-01-23 11:30 Mike Pagano
2019-01-16 23:30 Mike Pagano
2019-01-13 19:27 Mike Pagano
2019-01-09 17:53 Mike Pagano
2018-12-29 22:47 Mike Pagano
2018-12-29 18:54 Mike Pagano
2018-12-21 14:46 Mike Pagano
2018-12-17 11:40 Mike Pagano
2018-12-13 11:38 Mike Pagano
2018-12-08 13:22 Mike Pagano
2018-12-05 19:42 Mike Pagano
2018-12-01 17:26 Mike Pagano
2018-12-01 15:06 Mike Pagano
2018-11-27 16:17 Mike Pagano
2018-11-23 12:44 Mike Pagano
2018-11-21 12:27 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:33 Mike Pagano
2018-11-13 21:19 Mike Pagano
2018-11-11  1:19 Mike Pagano
2018-11-10 21:31 Mike Pagano
2018-10-20 12:41 Mike Pagano
2018-10-18 10:26 Mike Pagano
2018-10-13 16:33 Mike Pagano
2018-10-10 11:18 Mike Pagano
2018-10-04 10:42 Mike Pagano
2018-09-29 13:35 Mike Pagano
2018-09-26 10:41 Mike Pagano
2018-09-19 22:40 Mike Pagano
2018-09-15 10:12 Mike Pagano
2018-09-09 23:28 Mike Pagano
2018-09-05 15:28 Mike Pagano
2018-08-24 11:44 Mike Pagano
2018-08-22 10:01 Alice Ferrazzi
2018-08-18 18:12 Mike Pagano
2018-08-17 19:37 Mike Pagano
2018-08-17 19:26 Mike Pagano
2018-08-16 11:49 Mike Pagano
2018-08-15 16:48 Mike Pagano
2018-08-09 10:54 Mike Pagano
2018-08-07 18:11 Mike Pagano
2018-08-03 12:27 Mike Pagano
2018-07-28 10:39 Mike Pagano
2018-07-25 10:27 Mike Pagano
2018-07-22 15:13 Mike Pagano
2018-07-17 10:27 Mike Pagano
2018-07-12 16:13 Alice Ferrazzi
2018-07-09 15:07 Alice Ferrazzi
2018-07-03 13:18 Mike Pagano
2018-06-26 16:32 Alice Ferrazzi
2018-06-20 19:42 Mike Pagano
2018-06-16 15:43 Mike Pagano
2018-06-11 21:46 Mike Pagano
2018-06-08 23:48 Mike Pagano
2018-06-05 11:22 Mike Pagano
2018-05-30 22:33 Mike Pagano
2018-05-30 11:42 Mike Pagano
2018-05-25 15:36 Mike Pagano
2018-05-22 18:45 Mike Pagano
2018-05-20 22:21 Mike Pagano
2018-05-16 10:24 Mike Pagano
2018-05-09 10:55 Mike Pagano
2018-05-02 16:14 Mike Pagano
2018-04-30 10:30 Mike Pagano
2018-04-26 10:21 Mike Pagano
2018-04-24 11:27 Mike Pagano
2018-04-19 10:43 Mike Pagano
2018-04-12 15:10 Mike Pagano
2018-04-08 14:27 Mike Pagano
2018-03-31 22:18 Mike Pagano
2018-03-28 17:01 Mike Pagano
2018-03-25 13:38 Mike Pagano
2018-03-21 14:41 Mike Pagano
2018-03-19 12:01 Mike Pagano
2018-03-15 10:28 Mike Pagano
2018-03-11 17:38 Mike Pagano
2018-03-09 16:34 Alice Ferrazzi
2018-03-05  2:24 Alice Ferrazzi
2018-02-28 18:28 Alice Ferrazzi
2018-02-28 15:00 Alice Ferrazzi
2018-02-25 13:40 Alice Ferrazzi
2018-02-22 23:23 Mike Pagano
2018-02-17 14:28 Alice Ferrazzi
2018-02-17 14:27 Alice Ferrazzi
2018-02-13 13:19 Alice Ferrazzi
2018-02-08  0:41 Mike Pagano
2018-02-03 21:21 Mike Pagano
2018-01-31 13:50 Alice Ferrazzi
2018-01-23 21:20 Mike Pagano
2018-01-23 21:18 Mike Pagano
2018-01-17  9:39 Alice Ferrazzi
2018-01-17  9:14 Alice Ferrazzi
2018-01-10 11:52 Mike Pagano
2018-01-10 11:43 Mike Pagano
2018-01-05 15:41 Alice Ferrazzi
2018-01-05 15:41 Alice Ferrazzi
2018-01-05 15:02 Alice Ferrazzi
2018-01-04 15:18 Alice Ferrazzi
2018-01-04  7:40 Alice Ferrazzi
2018-01-04  7:32 Alice Ferrazzi
2018-01-04  0:23 Alice Ferrazzi
2018-01-02 20:19 Mike Pagano
2018-01-02 20:14 Mike Pagano
2017-12-30 12:20 Alice Ferrazzi
2017-12-29 17:54 Alice Ferrazzi
2017-12-29 17:18 Alice Ferrazzi
2017-12-25 14:34 Alice Ferrazzi
2017-12-20 17:51 Alice Ferrazzi
2017-12-20 12:43 Mike Pagano
2017-12-17 14:33 Alice Ferrazzi
2017-12-14  9:11 Alice Ferrazzi
2017-12-10 13:02 Alice Ferrazzi
2017-12-09 14:07 Alice Ferrazzi
2017-12-05 11:37 Mike Pagano
2017-11-30 12:15 Alice Ferrazzi
2017-11-24  9:18 Alice Ferrazzi
2017-11-24  9:15 Alice Ferrazzi
2017-11-21 11:34 Mike Pagano
2017-11-21 11:24 Mike Pagano
2017-11-16 19:08 Mike Pagano
2017-10-23 16:31 Mike Pagano

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1541352643.45625e29ac9ee0956e90c0063b880573bbbca0f9.alicef@gentoo \
    --to=alicef@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox