public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Alice Ferrazzi" <alicef@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: /
Date: Wed, 31 Jan 2018 13:36:06 +0000 (UTC)	[thread overview]
Message-ID: <1517405726.e87c7949f55854a73016f66184bfc84bc3830824.alicef@gentoo> (raw)

commit:     e87c7949f55854a73016f66184bfc84bc3830824
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Wed Jan 31 13:35:26 2018 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Wed Jan 31 13:35:26 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e87c7949

linux kernel 4.4.114

 0000_README              |    4 +
 1113_linux-4.4.114.patch | 3501 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3505 insertions(+)

diff --git a/0000_README b/0000_README
index 47159cb..918bb76 100644
--- a/0000_README
+++ b/0000_README
@@ -495,6 +495,10 @@ Patch:  1112_linux-4.4.113.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.4.113
 
+Patch:  1113_linux-4.4.114.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.4.114
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1113_linux-4.4.114.patch b/1113_linux-4.4.114.patch
new file mode 100644
index 0000000..836d94a
--- /dev/null
+++ b/1113_linux-4.4.114.patch
@@ -0,0 +1,3501 @@
+diff --git a/Makefile b/Makefile
+index 39019c9d205c..153440b1bbb0 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 4
+-SUBLEVEL = 113
++SUBLEVEL = 114
+ EXTRAVERSION =
+ NAME = Blurry Fish Butt
+ 
+diff --git a/arch/um/Makefile b/arch/um/Makefile
+index e3abe6f3156d..9ccf462131c4 100644
+--- a/arch/um/Makefile
++++ b/arch/um/Makefile
+@@ -117,7 +117,7 @@ archheaders:
+ archprepare: include/generated/user_constants.h
+ 
+ LINK-$(CONFIG_LD_SCRIPT_STATIC) += -static
+-LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib
++LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib $(call cc-option, -no-pie)
+ 
+ CFLAGS_NO_HARDENING := $(call cc-option, -fno-PIC,) $(call cc-option, -fno-pic,) \
+ 	$(call cc-option, -fno-stack-protector,) \
+diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c
+index 112178b401a1..2d359991a273 100644
+--- a/arch/x86/entry/vsyscall/vsyscall_64.c
++++ b/arch/x86/entry/vsyscall/vsyscall_64.c
+@@ -46,6 +46,7 @@ static enum { EMULATE, NATIVE, NONE } vsyscall_mode =
+ #else
+ 	EMULATE;
+ #endif
++unsigned long vsyscall_pgprot = __PAGE_KERNEL_VSYSCALL;
+ 
+ static int __init vsyscall_setup(char *str)
+ {
+@@ -336,11 +337,11 @@ void __init map_vsyscall(void)
+ 	extern char __vsyscall_page;
+ 	unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
+ 
++	if (vsyscall_mode != NATIVE)
++		vsyscall_pgprot = __PAGE_KERNEL_VVAR;
+ 	if (vsyscall_mode != NONE)
+ 		__set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
+-			     vsyscall_mode == NATIVE
+-			     ? PAGE_KERNEL_VSYSCALL
+-			     : PAGE_KERNEL_VVAR);
++			     __pgprot(vsyscall_pgprot));
+ 
+ 	BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
+ 		     (unsigned long)VSYSCALL_ADDR);
+diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
+index 0fbc98568018..641f0f2c2982 100644
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -199,6 +199,7 @@
+ #define X86_FEATURE_HWP_EPP	( 7*32+13) /* Intel HWP_EPP */
+ #define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */
+ #define X86_FEATURE_INTEL_PT	( 7*32+15) /* Intel Processor Trace */
++#define X86_FEATURE_RSB_CTXSW	( 7*32+19) /* Fill RSB on context switches */
+ 
+ #define X86_FEATURE_RETPOLINE	( 7*32+29) /* Generic Retpoline mitigation for Spectre variant 2 */
+ #define X86_FEATURE_RETPOLINE_AMD ( 7*32+30) /* AMD Retpoline mitigation for Spectre variant 2 */
+diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
+new file mode 100644
+index 000000000000..6999f7d01a0d
+--- /dev/null
++++ b/arch/x86/include/asm/intel-family.h
+@@ -0,0 +1,68 @@
++#ifndef _ASM_X86_INTEL_FAMILY_H
++#define _ASM_X86_INTEL_FAMILY_H
++
++/*
++ * "Big Core" Processors (Branded as Core, Xeon, etc...)
++ *
++ * The "_X" parts are generally the EP and EX Xeons, or the
++ * "Extreme" ones, like Broadwell-E.
++ *
++ * Things ending in "2" are usually because we have no better
++ * name for them.  There's no processor called "WESTMERE2".
++ */
++
++#define INTEL_FAM6_CORE_YONAH		0x0E
++#define INTEL_FAM6_CORE2_MEROM		0x0F
++#define INTEL_FAM6_CORE2_MEROM_L	0x16
++#define INTEL_FAM6_CORE2_PENRYN		0x17
++#define INTEL_FAM6_CORE2_DUNNINGTON	0x1D
++
++#define INTEL_FAM6_NEHALEM		0x1E
++#define INTEL_FAM6_NEHALEM_EP		0x1A
++#define INTEL_FAM6_NEHALEM_EX		0x2E
++#define INTEL_FAM6_WESTMERE		0x25
++#define INTEL_FAM6_WESTMERE2		0x1F
++#define INTEL_FAM6_WESTMERE_EP		0x2C
++#define INTEL_FAM6_WESTMERE_EX		0x2F
++
++#define INTEL_FAM6_SANDYBRIDGE		0x2A
++#define INTEL_FAM6_SANDYBRIDGE_X	0x2D
++#define INTEL_FAM6_IVYBRIDGE		0x3A
++#define INTEL_FAM6_IVYBRIDGE_X		0x3E
++
++#define INTEL_FAM6_HASWELL_CORE		0x3C
++#define INTEL_FAM6_HASWELL_X		0x3F
++#define INTEL_FAM6_HASWELL_ULT		0x45
++#define INTEL_FAM6_HASWELL_GT3E		0x46
++
++#define INTEL_FAM6_BROADWELL_CORE	0x3D
++#define INTEL_FAM6_BROADWELL_XEON_D	0x56
++#define INTEL_FAM6_BROADWELL_GT3E	0x47
++#define INTEL_FAM6_BROADWELL_X		0x4F
++
++#define INTEL_FAM6_SKYLAKE_MOBILE	0x4E
++#define INTEL_FAM6_SKYLAKE_DESKTOP	0x5E
++#define INTEL_FAM6_SKYLAKE_X		0x55
++#define INTEL_FAM6_KABYLAKE_MOBILE	0x8E
++#define INTEL_FAM6_KABYLAKE_DESKTOP	0x9E
++
++/* "Small Core" Processors (Atom) */
++
++#define INTEL_FAM6_ATOM_PINEVIEW	0x1C
++#define INTEL_FAM6_ATOM_LINCROFT	0x26
++#define INTEL_FAM6_ATOM_PENWELL		0x27
++#define INTEL_FAM6_ATOM_CLOVERVIEW	0x35
++#define INTEL_FAM6_ATOM_CEDARVIEW	0x36
++#define INTEL_FAM6_ATOM_SILVERMONT1	0x37 /* BayTrail/BYT / Valleyview */
++#define INTEL_FAM6_ATOM_SILVERMONT2	0x4D /* Avaton/Rangely */
++#define INTEL_FAM6_ATOM_AIRMONT		0x4C /* CherryTrail / Braswell */
++#define INTEL_FAM6_ATOM_MERRIFIELD1	0x4A /* Tangier */
++#define INTEL_FAM6_ATOM_MERRIFIELD2	0x5A /* Annidale */
++#define INTEL_FAM6_ATOM_GOLDMONT	0x5C
++#define INTEL_FAM6_ATOM_DENVERTON	0x5F /* Goldmont Microserver */
++
++/* Xeon Phi */
++
++#define INTEL_FAM6_XEON_PHI_KNL		0x57 /* Knights Landing */
++
++#endif /* _ASM_X86_INTEL_FAMILY_H */
+diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
+index c124d6ab4bf9..86bccb4bd4dc 100644
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -574,7 +574,7 @@ static inline void sync_core(void)
+ {
+ 	int tmp;
+ 
+-#ifdef CONFIG_M486
++#ifdef CONFIG_X86_32
+ 	/*
+ 	 * Do a CPUID if available, otherwise do a jump.  The jump
+ 	 * can conveniently enough be the jump around CPUID.
+diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
+index 751bf4b7bf11..025ecfaba9c9 100644
+--- a/arch/x86/include/asm/switch_to.h
++++ b/arch/x86/include/asm/switch_to.h
+@@ -1,6 +1,8 @@
+ #ifndef _ASM_X86_SWITCH_TO_H
+ #define _ASM_X86_SWITCH_TO_H
+ 
++#include <asm/nospec-branch.h>
++
+ struct task_struct; /* one of the stranger aspects of C forward declarations */
+ __visible struct task_struct *__switch_to(struct task_struct *prev,
+ 					   struct task_struct *next);
+@@ -24,6 +26,23 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
+ #define __switch_canary_iparam
+ #endif	/* CC_STACKPROTECTOR */
+ 
++#ifdef CONFIG_RETPOLINE
++	/*
++	 * When switching from a shallower to a deeper call stack
++	 * the RSB may either underflow or use entries populated
++	 * with userspace addresses. On CPUs where those concerns
++	 * exist, overwrite the RSB with entries which capture
++	 * speculative execution to prevent attack.
++	 */
++#define __retpoline_fill_return_buffer					\
++	ALTERNATIVE("jmp 910f",						\
++		__stringify(__FILL_RETURN_BUFFER(%%ebx, RSB_CLEAR_LOOPS, %%esp)),\
++		X86_FEATURE_RSB_CTXSW)					\
++	"910:\n\t"
++#else
++#define __retpoline_fill_return_buffer
++#endif
++
+ /*
+  * Saving eflags is important. It switches not only IOPL between tasks,
+  * it also protects other tasks from NT leaking through sysenter etc.
+@@ -46,6 +65,7 @@ do {									\
+ 		     "movl $1f,%[prev_ip]\n\t"	/* save    EIP   */	\
+ 		     "pushl %[next_ip]\n\t"	/* restore EIP   */	\
+ 		     __switch_canary					\
++		     __retpoline_fill_return_buffer			\
+ 		     "jmp __switch_to\n"	/* regparm call  */	\
+ 		     "1:\t"						\
+ 		     "popl %%ebp\n\t"		/* restore EBP   */	\
+@@ -100,6 +120,23 @@ do {									\
+ #define __switch_canary_iparam
+ #endif	/* CC_STACKPROTECTOR */
+ 
++#ifdef CONFIG_RETPOLINE
++	/*
++	 * When switching from a shallower to a deeper call stack
++	 * the RSB may either underflow or use entries populated
++	 * with userspace addresses. On CPUs where those concerns
++	 * exist, overwrite the RSB with entries which capture
++	 * speculative execution to prevent attack.
++	 */
++#define __retpoline_fill_return_buffer					\
++	ALTERNATIVE("jmp 910f",						\
++		__stringify(__FILL_RETURN_BUFFER(%%r12, RSB_CLEAR_LOOPS, %%rsp)),\
++		X86_FEATURE_RSB_CTXSW)					\
++	"910:\n\t"
++#else
++#define __retpoline_fill_return_buffer
++#endif
++
+ /*
+  * There is no need to save or restore flags, because flags are always
+  * clean in kernel mode, with the possible exception of IOPL.  Kernel IOPL
+@@ -112,6 +149,7 @@ do {									\
+ 	     "call __switch_to\n\t"					  \
+ 	     "movq "__percpu_arg([current_task])",%%rsi\n\t"		  \
+ 	     __switch_canary						  \
++	     __retpoline_fill_return_buffer				  \
+ 	     "movq %P[thread_info](%%rsi),%%r8\n\t"			  \
+ 	     "movq %%rax,%%rdi\n\t" 					  \
+ 	     "testl  %[_tif_fork],%P[ti_flags](%%r8)\n\t"		  \
+diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
+index 4865e10dbb55..9ee85066f407 100644
+--- a/arch/x86/include/asm/vsyscall.h
++++ b/arch/x86/include/asm/vsyscall.h
+@@ -13,6 +13,7 @@ extern void map_vsyscall(void);
+  */
+ extern bool emulate_vsyscall(struct pt_regs *regs, unsigned long address);
+ extern bool vsyscall_enabled(void);
++extern unsigned long vsyscall_pgprot;
+ #else
+ static inline void map_vsyscall(void) {}
+ static inline bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
+diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
+index fc91c98bee01..fd945099fc95 100644
+--- a/arch/x86/kernel/apic/io_apic.c
++++ b/arch/x86/kernel/apic/io_apic.c
+@@ -2592,8 +2592,8 @@ static struct resource * __init ioapic_setup_resources(void)
+ 		res[num].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ 		snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i);
+ 		mem += IOAPIC_RESOURCE_NAME_SIZE;
++		ioapics[i].iomem_res = &res[num];
+ 		num++;
+-		ioapics[i].iomem_res = res;
+ 	}
+ 
+ 	ioapic_resources = res;
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 49d25ddf0e9f..8cacf62ec458 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -22,6 +22,7 @@
+ #include <asm/alternative.h>
+ #include <asm/pgtable.h>
+ #include <asm/cacheflush.h>
++#include <asm/intel-family.h>
+ 
+ static void __init spectre_v2_select_mitigation(void);
+ 
+@@ -154,6 +155,23 @@ disable:
+ 	return SPECTRE_V2_CMD_NONE;
+ }
+ 
++/* Check for Skylake-like CPUs (for RSB handling) */
++static bool __init is_skylake_era(void)
++{
++	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
++	    boot_cpu_data.x86 == 6) {
++		switch (boot_cpu_data.x86_model) {
++		case INTEL_FAM6_SKYLAKE_MOBILE:
++		case INTEL_FAM6_SKYLAKE_DESKTOP:
++		case INTEL_FAM6_SKYLAKE_X:
++		case INTEL_FAM6_KABYLAKE_MOBILE:
++		case INTEL_FAM6_KABYLAKE_DESKTOP:
++			return true;
++		}
++	}
++	return false;
++}
++
+ static void __init spectre_v2_select_mitigation(void)
+ {
+ 	enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
+@@ -212,6 +230,24 @@ retpoline_auto:
+ 
+ 	spectre_v2_enabled = mode;
+ 	pr_info("%s\n", spectre_v2_strings[mode]);
++
++	/*
++	 * If neither SMEP or KPTI are available, there is a risk of
++	 * hitting userspace addresses in the RSB after a context switch
++	 * from a shallow call stack to a deeper one. To prevent this fill
++	 * the entire RSB, even when using IBRS.
++	 *
++	 * Skylake era CPUs have a separate issue with *underflow* of the
++	 * RSB, when they will predict 'ret' targets from the generic BTB.
++	 * The proper mitigation for this is IBRS. If IBRS is not supported
++	 * or deactivated in favour of retpolines the RSB fill on context
++	 * switch is required.
++	 */
++	if ((!boot_cpu_has(X86_FEATURE_KAISER) &&
++	     !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
++		setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
++		pr_info("Filling RSB on context switch\n");
++	}
+ }
+ 
+ #undef pr_fmt
+diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
+index e38d338a6447..b4ca91cf55b0 100644
+--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
++++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
+@@ -934,6 +934,8 @@ static int __populate_cache_leaves(unsigned int cpu)
+ 		ci_leaf_init(this_leaf++, &id4_regs);
+ 		__cache_cpumap_setup(cpu, idx, &id4_regs);
+ 	}
++	this_cpu_ci->cpu_map_populated = true;
++
+ 	return 0;
+ }
+ 
+diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
+index b428a8174be1..2c76a1801393 100644
+--- a/arch/x86/kernel/cpu/microcode/intel.c
++++ b/arch/x86/kernel/cpu/microcode/intel.c
+@@ -39,6 +39,9 @@
+ #include <asm/setup.h>
+ #include <asm/msr.h>
+ 
++/* last level cache size per core */
++static int llc_size_per_core;
++
+ static unsigned long mc_saved_in_initrd[MAX_UCODE_COUNT];
+ static struct mc_saved_data {
+ 	unsigned int mc_saved_count;
+@@ -996,15 +999,18 @@ static bool is_blacklisted(unsigned int cpu)
+ 
+ 	/*
+ 	 * Late loading on model 79 with microcode revision less than 0x0b000021
+-	 * may result in a system hang. This behavior is documented in item
+-	 * BDF90, #334165 (Intel Xeon Processor E7-8800/4800 v4 Product Family).
++	 * and LLC size per core bigger than 2.5MB may result in a system hang.
++	 * This behavior is documented in item BDF90, #334165 (Intel Xeon
++	 * Processor E7-8800/4800 v4 Product Family).
+ 	 */
+ 	if (c->x86 == 6 &&
+ 	    c->x86_model == 79 &&
+ 	    c->x86_mask == 0x01 &&
++	    llc_size_per_core > 2621440 &&
+ 	    c->microcode < 0x0b000021) {
+ 		pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
+ 		pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
++		return true;
+ 	}
+ 
+ 	return false;
+@@ -1067,6 +1073,15 @@ static struct microcode_ops microcode_intel_ops = {
+ 	.microcode_fini_cpu               = microcode_fini_cpu,
+ };
+ 
++static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c)
++{
++	u64 llc_size = c->x86_cache_size * 1024;
++
++	do_div(llc_size, c->x86_max_cores);
++
++	return (int)llc_size;
++}
++
+ struct microcode_ops * __init init_intel_microcode(void)
+ {
+ 	struct cpuinfo_x86 *c = &boot_cpu_data;
+@@ -1077,6 +1092,8 @@ struct microcode_ops * __init init_intel_microcode(void)
+ 		return NULL;
+ 	}
+ 
++	llc_size_per_core = calc_llc_size_per_core(c);
++
+ 	return &microcode_intel_ops;
+ }
+ 
+diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
+index e912b2f6d36e..45772560aceb 100644
+--- a/arch/x86/lib/delay.c
++++ b/arch/x86/lib/delay.c
+@@ -93,6 +93,13 @@ static void delay_mwaitx(unsigned long __loops)
+ {
+ 	u64 start, end, delay, loops = __loops;
+ 
++	/*
++	 * Timer value of 0 causes MWAITX to wait indefinitely, unless there
++	 * is a store on the memory monitored by MONITORX.
++	 */
++	if (loops == 0)
++		return;
++
+ 	start = rdtsc_ordered();
+ 
+ 	for (;;) {
+diff --git a/arch/x86/mm/kaiser.c b/arch/x86/mm/kaiser.c
+index 8af98513d36c..2298434f7bdb 100644
+--- a/arch/x86/mm/kaiser.c
++++ b/arch/x86/mm/kaiser.c
+@@ -345,7 +345,7 @@ void __init kaiser_init(void)
+ 	if (vsyscall_enabled())
+ 		kaiser_add_user_map_early((void *)VSYSCALL_ADDR,
+ 					  PAGE_SIZE,
+-					   __PAGE_KERNEL_VSYSCALL);
++					  vsyscall_pgprot);
+ 
+ 	for_each_possible_cpu(cpu) {
+ 		void *percpu_vaddr = __per_cpu_user_mapped_start +
+diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
+index 9f77943653fb..b63a173786d5 100644
+--- a/drivers/acpi/acpi_processor.c
++++ b/drivers/acpi/acpi_processor.c
+@@ -331,15 +331,6 @@ static int acpi_processor_get_info(struct acpi_device *device)
+ 		pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
+ 
+ 		pr->pblk = object.processor.pblk_address;
+-
+-		/*
+-		 * We don't care about error returns - we just try to mark
+-		 * these reserved so that nobody else is confused into thinking
+-		 * that this region might be unused..
+-		 *
+-		 * (In particular, allocating the IO range for Cardbus)
+-		 */
+-		request_region(pr->throttling.address, 6, "ACPI CPU throttle");
+ 	}
+ 
+ 	/*
+diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
+index de325ae04ce1..3b3c5b90bd20 100644
+--- a/drivers/acpi/acpica/nsutils.c
++++ b/drivers/acpi/acpica/nsutils.c
+@@ -593,25 +593,20 @@ struct acpi_namespace_node *acpi_ns_validate_handle(acpi_handle handle)
+ void acpi_ns_terminate(void)
+ {
+ 	acpi_status status;
++	union acpi_operand_object *prev;
++	union acpi_operand_object *next;
+ 
+ 	ACPI_FUNCTION_TRACE(ns_terminate);
+ 
+-#ifdef ACPI_EXEC_APP
+-	{
+-		union acpi_operand_object *prev;
+-		union acpi_operand_object *next;
++	/* Delete any module-level code blocks */
+ 
+-		/* Delete any module-level code blocks */
+-
+-		next = acpi_gbl_module_code_list;
+-		while (next) {
+-			prev = next;
+-			next = next->method.mutex;
+-			prev->method.mutex = NULL;	/* Clear the Mutex (cheated) field */
+-			acpi_ut_remove_reference(prev);
+-		}
++	next = acpi_gbl_module_code_list;
++	while (next) {
++		prev = next;
++		next = next->method.mutex;
++		prev->method.mutex = NULL;	/* Clear the Mutex (cheated) field */
++		acpi_ut_remove_reference(prev);
+ 	}
+-#endif
+ 
+ 	/*
+ 	 * Free the entire namespace -- all nodes and all objects
+diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
+index 73c9c7fa9001..f06317d6fc38 100644
+--- a/drivers/acpi/glue.c
++++ b/drivers/acpi/glue.c
+@@ -99,13 +99,13 @@ static int find_child_checks(struct acpi_device *adev, bool check_children)
+ 		return -ENODEV;
+ 
+ 	/*
+-	 * If the device has a _HID (or _CID) returning a valid ACPI/PNP
+-	 * device ID, it is better to make it look less attractive here, so that
+-	 * the other device with the same _ADR value (that may not have a valid
+-	 * device ID) can be matched going forward.  [This means a second spec
+-	 * violation in a row, so whatever we do here is best effort anyway.]
++	 * If the device has a _HID returning a valid ACPI/PNP device ID, it is
++	 * better to make it look less attractive here, so that the other device
++	 * with the same _ADR value (that may not have a valid device ID) can be
++	 * matched going forward.  [This means a second spec violation in a row,
++	 * so whatever we do here is best effort anyway.]
+ 	 */
+-	return sta_present && list_empty(&adev->pnp.ids) ?
++	return sta_present && !adev->pnp.type.platform_id ?
+ 			FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE;
+ }
+ 
+diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
+index f170d746336d..c72e64893d03 100644
+--- a/drivers/acpi/processor_throttling.c
++++ b/drivers/acpi/processor_throttling.c
+@@ -676,6 +676,15 @@ static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr)
+ 	if (!pr->flags.throttling)
+ 		return -ENODEV;
+ 
++	/*
++	 * We don't care about error returns - we just try to mark
++	 * these reserved so that nobody else is confused into thinking
++	 * that this region might be unused..
++	 *
++	 * (In particular, allocating the IO range for Cardbus)
++	 */
++	request_region(pr->throttling.address, 6, "ACPI CPU throttle");
++
+ 	pr->throttling.state = 0;
+ 
+ 	duty_mask = pr->throttling.state_count - 1;
+diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
+index e9fd32e91668..70e13cf06ed0 100644
+--- a/drivers/base/cacheinfo.c
++++ b/drivers/base/cacheinfo.c
+@@ -16,6 +16,7 @@
+  * You should have received a copy of the GNU General Public License
+  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+  */
++#include <linux/acpi.h>
+ #include <linux/bitops.h>
+ #include <linux/cacheinfo.h>
+ #include <linux/compiler.h>
+@@ -104,9 +105,16 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
+ 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ 	struct cacheinfo *this_leaf, *sib_leaf;
+ 	unsigned int index;
+-	int ret;
++	int ret = 0;
++
++	if (this_cpu_ci->cpu_map_populated)
++		return 0;
+ 
+-	ret = cache_setup_of_node(cpu);
++	if (of_have_populated_dt())
++		ret = cache_setup_of_node(cpu);
++	else if (!acpi_disabled)
++		/* No cache property/hierarchy support yet in ACPI */
++		ret = -ENOTSUPP;
+ 	if (ret)
+ 		return ret;
+ 
+@@ -203,8 +211,7 @@ static int detect_cache_attributes(unsigned int cpu)
+ 	 */
+ 	ret = cache_shared_cpu_map_setup(cpu);
+ 	if (ret) {
+-		pr_warn("Unable to detect cache hierarchy from DT for CPU %d\n",
+-			cpu);
++		pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
+ 		goto free_ci;
+ 	}
+ 	return 0;
+diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
+index a311cfa4c5bd..a6975795e7f3 100644
+--- a/drivers/base/power/trace.c
++++ b/drivers/base/power/trace.c
+@@ -166,14 +166,14 @@ void generate_pm_trace(const void *tracedata, unsigned int user)
+ }
+ EXPORT_SYMBOL(generate_pm_trace);
+ 
+-extern char __tracedata_start, __tracedata_end;
++extern char __tracedata_start[], __tracedata_end[];
+ static int show_file_hash(unsigned int value)
+ {
+ 	int match;
+ 	char *tracedata;
+ 
+ 	match = 0;
+-	for (tracedata = &__tracedata_start ; tracedata < &__tracedata_end ;
++	for (tracedata = __tracedata_start ; tracedata < __tracedata_end ;
+ 			tracedata += 2 + sizeof(unsigned long)) {
+ 		unsigned short lineno = *(unsigned short *)tracedata;
+ 		const char *file = *(const char **)(tracedata + 2);
+diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
+index 7e2dc5e56632..0b49f29bf0da 100644
+--- a/drivers/input/mouse/trackpoint.c
++++ b/drivers/input/mouse/trackpoint.c
+@@ -383,6 +383,9 @@ int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
+ 	if (trackpoint_read(&psmouse->ps2dev, TP_EXT_BTN, &button_info)) {
+ 		psmouse_warn(psmouse, "failed to get extended button data, assuming 3 buttons\n");
+ 		button_info = 0x33;
++	} else if (!button_info) {
++		psmouse_warn(psmouse, "got 0 in extended button data, assuming 3 buttons\n");
++		button_info = 0x33;
+ 	}
+ 
+ 	psmouse->private = kzalloc(sizeof(struct trackpoint_data), GFP_KERNEL);
+diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
+index 90e94a028a49..83b1226471c1 100644
+--- a/drivers/mmc/host/sdhci-of-esdhc.c
++++ b/drivers/mmc/host/sdhci-of-esdhc.c
+@@ -584,6 +584,8 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
+ {
+ 	struct sdhci_host *host;
+ 	struct device_node *np;
++	struct sdhci_pltfm_host *pltfm_host;
++	struct sdhci_esdhc *esdhc;
+ 	int ret;
+ 
+ 	np = pdev->dev.of_node;
+@@ -600,6 +602,14 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
+ 
+ 	sdhci_get_of_property(pdev);
+ 
++	pltfm_host = sdhci_priv(host);
++	esdhc = pltfm_host->priv;
++	if (esdhc->vendor_ver == VENDOR_V_22)
++		host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
++
++	if (esdhc->vendor_ver > VENDOR_V_22)
++		host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
++
+ 	if (of_device_is_compatible(np, "fsl,p5040-esdhc") ||
+ 	    of_device_is_compatible(np, "fsl,p5020-esdhc") ||
+ 	    of_device_is_compatible(np, "fsl,p4080-esdhc") ||
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index c5ea1018cb47..24155380e43c 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -2205,19 +2205,14 @@ static bool rtl8169_do_counters(struct net_device *dev, u32 counter_cmd)
+ 	void __iomem *ioaddr = tp->mmio_addr;
+ 	dma_addr_t paddr = tp->counters_phys_addr;
+ 	u32 cmd;
+-	bool ret;
+ 
+ 	RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
++	RTL_R32(CounterAddrHigh);
+ 	cmd = (u64)paddr & DMA_BIT_MASK(32);
+ 	RTL_W32(CounterAddrLow, cmd);
+ 	RTL_W32(CounterAddrLow, cmd | counter_cmd);
+ 
+-	ret = rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
+-
+-	RTL_W32(CounterAddrLow, 0);
+-	RTL_W32(CounterAddrHigh, 0);
+-
+-	return ret;
++	return rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
+ }
+ 
+ static bool rtl8169_reset_counters(struct net_device *dev)
+diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
+index 4e0068e775f9..b7b859c3a0c7 100644
+--- a/drivers/net/ppp/pppoe.c
++++ b/drivers/net/ppp/pppoe.c
+@@ -860,6 +860,7 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m,
+ 	struct pppoe_hdr *ph;
+ 	struct net_device *dev;
+ 	char *start;
++	int hlen;
+ 
+ 	lock_sock(sk);
+ 	if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) {
+@@ -878,16 +879,16 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m,
+ 	if (total_len > (dev->mtu + dev->hard_header_len))
+ 		goto end;
+ 
+-
+-	skb = sock_wmalloc(sk, total_len + dev->hard_header_len + 32,
+-			   0, GFP_KERNEL);
++	hlen = LL_RESERVED_SPACE(dev);
++	skb = sock_wmalloc(sk, hlen + sizeof(*ph) + total_len +
++			   dev->needed_tailroom, 0, GFP_KERNEL);
+ 	if (!skb) {
+ 		error = -ENOMEM;
+ 		goto end;
+ 	}
+ 
+ 	/* Reserve space for headers. */
+-	skb_reserve(skb, dev->hard_header_len);
++	skb_reserve(skb, hlen);
+ 	skb_reset_network_header(skb);
+ 
+ 	skb->dev = dev;
+@@ -948,7 +949,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
+ 	/* Copy the data if there is no space for the header or if it's
+ 	 * read-only.
+ 	 */
+-	if (skb_cow_head(skb, sizeof(*ph) + dev->hard_header_len))
++	if (skb_cow_head(skb, LL_RESERVED_SPACE(dev) + sizeof(*ph)))
+ 		goto abort;
+ 
+ 	__skb_push(skb, sizeof(*ph));
+diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
+index 41e9ebd7d0a6..ebdee8f01f65 100644
+--- a/drivers/net/usb/lan78xx.c
++++ b/drivers/net/usb/lan78xx.c
+@@ -1859,6 +1859,7 @@ static int lan78xx_reset(struct lan78xx_net *dev)
+ 		buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
+ 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
+ 		dev->rx_qlen = 4;
++		dev->tx_qlen = 4;
+ 	}
+ 
+ 	ret = lan78xx_write_reg(dev, BURST_CAP, buf);
+diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
+index 0cbf520cea77..82bf85ae5d08 100644
+--- a/drivers/net/vmxnet3/vmxnet3_drv.c
++++ b/drivers/net/vmxnet3/vmxnet3_drv.c
+@@ -1563,7 +1563,6 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
+ 					  rq->rx_ring[i].basePA);
+ 			rq->rx_ring[i].base = NULL;
+ 		}
+-		rq->buf_info[i] = NULL;
+ 	}
+ 
+ 	if (rq->comp_ring.base) {
+@@ -1578,6 +1577,7 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
+ 			(rq->rx_ring[0].size + rq->rx_ring[1].size);
+ 		dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0],
+ 				  rq->buf_info_pa);
++		rq->buf_info[0] = rq->buf_info[1] = NULL;
+ 	}
+ }
+ 
+diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c
+index 3923bed93c7e..a21e229d95e0 100644
+--- a/drivers/pci/host/pci-layerscape.c
++++ b/drivers/pci/host/pci-layerscape.c
+@@ -77,6 +77,16 @@ static void ls_pcie_fix_class(struct ls_pcie *pcie)
+ 	iowrite16(PCI_CLASS_BRIDGE_PCI, pcie->dbi + PCI_CLASS_DEVICE);
+ }
+ 
++/* Drop MSG TLP except for Vendor MSG */
++static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie)
++{
++	u32 val;
++
++	val = ioread32(pcie->dbi + PCIE_STRFMR1);
++	val &= 0xDFFFFFFF;
++	iowrite32(val, pcie->dbi + PCIE_STRFMR1);
++}
++
+ static int ls1021_pcie_link_up(struct pcie_port *pp)
+ {
+ 	u32 state;
+@@ -97,7 +107,7 @@ static int ls1021_pcie_link_up(struct pcie_port *pp)
+ static void ls1021_pcie_host_init(struct pcie_port *pp)
+ {
+ 	struct ls_pcie *pcie = to_ls_pcie(pp);
+-	u32 val, index[2];
++	u32 index[2];
+ 
+ 	pcie->scfg = syscon_regmap_lookup_by_phandle(pp->dev->of_node,
+ 						     "fsl,pcie-scfg");
+@@ -116,13 +126,7 @@ static void ls1021_pcie_host_init(struct pcie_port *pp)
+ 
+ 	dw_pcie_setup_rc(pp);
+ 
+-	/*
+-	 * LS1021A Workaround for internal TKT228622
+-	 * to fix the INTx hang issue
+-	 */
+-	val = ioread32(pcie->dbi + PCIE_STRFMR1);
+-	val &= 0xffff;
+-	iowrite32(val, pcie->dbi + PCIE_STRFMR1);
++	ls_pcie_drop_msg_tlp(pcie);
+ }
+ 
+ static int ls_pcie_link_up(struct pcie_port *pp)
+@@ -147,6 +151,7 @@ static void ls_pcie_host_init(struct pcie_port *pp)
+ 	iowrite32(1, pcie->dbi + PCIE_DBI_RO_WR_EN);
+ 	ls_pcie_fix_class(pcie);
+ 	ls_pcie_clear_multifunction(pcie);
++	ls_pcie_drop_msg_tlp(pcie);
+ 	iowrite32(0, pcie->dbi + PCIE_DBI_RO_WR_EN);
+ }
+ 
+@@ -203,6 +208,7 @@ static const struct of_device_id ls_pcie_of_match[] = {
+ 	{ .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata },
+ 	{ .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata },
+ 	{ .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata },
++	{ .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata },
+ 	{ },
+ };
+ MODULE_DEVICE_TABLE(of, ls_pcie_of_match);
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index c1ccf1ee99ea..efce04df2109 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -1727,7 +1727,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
+ 
+ 	if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
+ 		reason = FAILURE_SESSION_IN_RECOVERY;
+-		sc->result = DID_REQUEUE;
++		sc->result = DID_REQUEUE << 16;
+ 		goto fault;
+ 	}
+ 
+diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
+index a3ec49bdc1e6..ec38370ffcab 100644
+--- a/drivers/usb/usbip/stub_dev.c
++++ b/drivers/usb/usbip/stub_dev.c
+@@ -163,8 +163,7 @@ static void stub_shutdown_connection(struct usbip_device *ud)
+ 	 * step 1?
+ 	 */
+ 	if (ud->tcp_socket) {
+-		dev_dbg(&sdev->udev->dev, "shutdown tcp_socket %p\n",
+-			ud->tcp_socket);
++		dev_dbg(&sdev->udev->dev, "shutdown sockfd %d\n", ud->sockfd);
+ 		kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR);
+ 	}
+ 
+diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
+index 7de54a66044f..56cacb68040c 100644
+--- a/drivers/usb/usbip/stub_rx.c
++++ b/drivers/usb/usbip/stub_rx.c
+@@ -338,23 +338,26 @@ static struct stub_priv *stub_priv_alloc(struct stub_device *sdev,
+ 	return priv;
+ }
+ 
+-static int get_pipe(struct stub_device *sdev, int epnum, int dir)
++static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu)
+ {
+ 	struct usb_device *udev = sdev->udev;
+ 	struct usb_host_endpoint *ep;
+ 	struct usb_endpoint_descriptor *epd = NULL;
++	int epnum = pdu->base.ep;
++	int dir = pdu->base.direction;
++
++	if (epnum < 0 || epnum > 15)
++		goto err_ret;
+ 
+ 	if (dir == USBIP_DIR_IN)
+ 		ep = udev->ep_in[epnum & 0x7f];
+ 	else
+ 		ep = udev->ep_out[epnum & 0x7f];
+-	if (!ep) {
+-		dev_err(&sdev->interface->dev, "no such endpoint?, %d\n",
+-			epnum);
+-		BUG();
+-	}
++	if (!ep)
++		goto err_ret;
+ 
+ 	epd = &ep->desc;
++
+ 	if (usb_endpoint_xfer_control(epd)) {
+ 		if (dir == USBIP_DIR_OUT)
+ 			return usb_sndctrlpipe(udev, epnum);
+@@ -377,15 +380,37 @@ static int get_pipe(struct stub_device *sdev, int epnum, int dir)
+ 	}
+ 
+ 	if (usb_endpoint_xfer_isoc(epd)) {
++		/* validate packet size and number of packets */
++		unsigned int maxp, packets, bytes;
++
++#define USB_EP_MAXP_MULT_SHIFT  11
++#define USB_EP_MAXP_MULT_MASK   (3 << USB_EP_MAXP_MULT_SHIFT)
++#define USB_EP_MAXP_MULT(m) \
++	(((m) & USB_EP_MAXP_MULT_MASK) >> USB_EP_MAXP_MULT_SHIFT)
++
++		maxp = usb_endpoint_maxp(epd);
++		maxp *= (USB_EP_MAXP_MULT(
++				__le16_to_cpu(epd->wMaxPacketSize)) + 1);
++		bytes = pdu->u.cmd_submit.transfer_buffer_length;
++		packets = DIV_ROUND_UP(bytes, maxp);
++
++		if (pdu->u.cmd_submit.number_of_packets < 0 ||
++		    pdu->u.cmd_submit.number_of_packets > packets) {
++			dev_err(&sdev->udev->dev,
++				"CMD_SUBMIT: isoc invalid num packets %d\n",
++				pdu->u.cmd_submit.number_of_packets);
++			return -1;
++		}
+ 		if (dir == USBIP_DIR_OUT)
+ 			return usb_sndisocpipe(udev, epnum);
+ 		else
+ 			return usb_rcvisocpipe(udev, epnum);
+ 	}
+ 
++err_ret:
+ 	/* NOT REACHED */
+-	dev_err(&sdev->interface->dev, "get pipe, epnum %d\n", epnum);
+-	return 0;
++	dev_err(&sdev->udev->dev, "CMD_SUBMIT: invalid epnum %d\n", epnum);
++	return -1;
+ }
+ 
+ static void masking_bogus_flags(struct urb *urb)
+@@ -449,7 +474,10 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
+ 	struct stub_priv *priv;
+ 	struct usbip_device *ud = &sdev->ud;
+ 	struct usb_device *udev = sdev->udev;
+-	int pipe = get_pipe(sdev, pdu->base.ep, pdu->base.direction);
++	int pipe = get_pipe(sdev, pdu);
++
++	if (pipe == -1)
++		return;
+ 
+ 	priv = stub_priv_alloc(sdev, pdu);
+ 	if (!priv)
+diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
+index 9752b93f754e..1838f1b2c2fa 100644
+--- a/drivers/usb/usbip/usbip_common.c
++++ b/drivers/usb/usbip/usbip_common.c
+@@ -317,18 +317,14 @@ int usbip_recv(struct socket *sock, void *buf, int size)
+ 	struct msghdr msg;
+ 	struct kvec iov;
+ 	int total = 0;
+-
+ 	/* for blocks of if (usbip_dbg_flag_xmit) */
+ 	char *bp = buf;
+ 	int osize = size;
+ 
+-	usbip_dbg_xmit("enter\n");
+-
+-	if (!sock || !buf || !size) {
+-		pr_err("invalid arg, sock %p buff %p size %d\n", sock, buf,
+-		       size);
++	if (!sock || !buf || !size)
+ 		return -EINVAL;
+-	}
++
++	usbip_dbg_xmit("enter\n");
+ 
+ 	do {
+ 		sock->sk->sk_allocation = GFP_NOIO;
+@@ -341,11 +337,8 @@ int usbip_recv(struct socket *sock, void *buf, int size)
+ 		msg.msg_flags      = MSG_NOSIGNAL;
+ 
+ 		result = kernel_recvmsg(sock, &msg, &iov, 1, size, MSG_WAITALL);
+-		if (result <= 0) {
+-			pr_debug("receive sock %p buf %p size %u ret %d total %d\n",
+-				 sock, buf, size, result, total);
++		if (result <= 0)
+ 			goto err;
+-		}
+ 
+ 		size -= result;
+ 		buf += result;
+diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h
+index 86b08475c254..f875ccaa55f9 100644
+--- a/drivers/usb/usbip/usbip_common.h
++++ b/drivers/usb/usbip/usbip_common.h
+@@ -261,6 +261,7 @@ struct usbip_device {
+ 	/* lock for status */
+ 	spinlock_t lock;
+ 
++	int sockfd;
+ 	struct socket *tcp_socket;
+ 
+ 	struct task_struct *tcp_rx;
+diff --git a/drivers/usb/usbip/usbip_event.c b/drivers/usb/usbip/usbip_event.c
+index 64933b993d7a..2580a32bcdff 100644
+--- a/drivers/usb/usbip/usbip_event.c
++++ b/drivers/usb/usbip/usbip_event.c
+@@ -117,11 +117,12 @@ EXPORT_SYMBOL_GPL(usbip_event_add);
+ int usbip_event_happened(struct usbip_device *ud)
+ {
+ 	int happened = 0;
++	unsigned long flags;
+ 
+-	spin_lock(&ud->lock);
++	spin_lock_irqsave(&ud->lock, flags);
+ 	if (ud->event != 0)
+ 		happened = 1;
+-	spin_unlock(&ud->lock);
++	spin_unlock_irqrestore(&ud->lock, flags);
+ 
+ 	return happened;
+ }
+diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
+index f9af04d7f02f..00d68945548e 100644
+--- a/drivers/usb/usbip/vhci_hcd.c
++++ b/drivers/usb/usbip/vhci_hcd.c
+@@ -121,9 +121,11 @@ static void dump_port_status_diff(u32 prev_status, u32 new_status)
+ 
+ void rh_port_connect(int rhport, enum usb_device_speed speed)
+ {
++	unsigned long	flags;
++
+ 	usbip_dbg_vhci_rh("rh_port_connect %d\n", rhport);
+ 
+-	spin_lock(&the_controller->lock);
++	spin_lock_irqsave(&the_controller->lock, flags);
+ 
+ 	the_controller->port_status[rhport] |= USB_PORT_STAT_CONNECTION
+ 		| (1 << USB_PORT_FEAT_C_CONNECTION);
+@@ -139,22 +141,24 @@ void rh_port_connect(int rhport, enum usb_device_speed speed)
+ 		break;
+ 	}
+ 
+-	spin_unlock(&the_controller->lock);
++	spin_unlock_irqrestore(&the_controller->lock, flags);
+ 
+ 	usb_hcd_poll_rh_status(vhci_to_hcd(the_controller));
+ }
+ 
+ static void rh_port_disconnect(int rhport)
+ {
++	unsigned long	flags;
++
+ 	usbip_dbg_vhci_rh("rh_port_disconnect %d\n", rhport);
+ 
+-	spin_lock(&the_controller->lock);
++	spin_lock_irqsave(&the_controller->lock, flags);
+ 
+ 	the_controller->port_status[rhport] &= ~USB_PORT_STAT_CONNECTION;
+ 	the_controller->port_status[rhport] |=
+ 					(1 << USB_PORT_FEAT_C_CONNECTION);
+ 
+-	spin_unlock(&the_controller->lock);
++	spin_unlock_irqrestore(&the_controller->lock, flags);
+ 	usb_hcd_poll_rh_status(vhci_to_hcd(the_controller));
+ }
+ 
+@@ -182,13 +186,14 @@ static int vhci_hub_status(struct usb_hcd *hcd, char *buf)
+ 	int		retval;
+ 	int		rhport;
+ 	int		changed = 0;
++	unsigned long	flags;
+ 
+ 	retval = DIV_ROUND_UP(VHCI_NPORTS + 1, 8);
+ 	memset(buf, 0, retval);
+ 
+ 	vhci = hcd_to_vhci(hcd);
+ 
+-	spin_lock(&vhci->lock);
++	spin_lock_irqsave(&vhci->lock, flags);
+ 	if (!HCD_HW_ACCESSIBLE(hcd)) {
+ 		usbip_dbg_vhci_rh("hw accessible flag not on?\n");
+ 		goto done;
+@@ -209,7 +214,7 @@ static int vhci_hub_status(struct usb_hcd *hcd, char *buf)
+ 		usb_hcd_resume_root_hub(hcd);
+ 
+ done:
+-	spin_unlock(&vhci->lock);
++	spin_unlock_irqrestore(&vhci->lock, flags);
+ 	return changed ? retval : 0;
+ }
+ 
+@@ -236,6 +241,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ 	struct vhci_hcd	*dum;
+ 	int             retval = 0;
+ 	int		rhport;
++	unsigned long	flags;
+ 
+ 	u32 prev_port_status[VHCI_NPORTS];
+ 
+@@ -254,7 +260,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ 
+ 	dum = hcd_to_vhci(hcd);
+ 
+-	spin_lock(&dum->lock);
++	spin_lock_irqsave(&dum->lock, flags);
+ 
+ 	/* store old status and compare now and old later */
+ 	if (usbip_dbg_flag_vhci_rh) {
+@@ -408,7 +414,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ 	}
+ 	usbip_dbg_vhci_rh(" bye\n");
+ 
+-	spin_unlock(&dum->lock);
++	spin_unlock_irqrestore(&dum->lock, flags);
+ 
+ 	return retval;
+ }
+@@ -431,6 +437,7 @@ static void vhci_tx_urb(struct urb *urb)
+ {
+ 	struct vhci_device *vdev = get_vdev(urb->dev);
+ 	struct vhci_priv *priv;
++	unsigned long flags;
+ 
+ 	if (!vdev) {
+ 		pr_err("could not get virtual device");
+@@ -443,7 +450,7 @@ static void vhci_tx_urb(struct urb *urb)
+ 		return;
+ 	}
+ 
+-	spin_lock(&vdev->priv_lock);
++	spin_lock_irqsave(&vdev->priv_lock, flags);
+ 
+ 	priv->seqnum = atomic_inc_return(&the_controller->seqnum);
+ 	if (priv->seqnum == 0xffff)
+@@ -457,7 +464,7 @@ static void vhci_tx_urb(struct urb *urb)
+ 	list_add_tail(&priv->list, &vdev->priv_tx);
+ 
+ 	wake_up(&vdev->waitq_tx);
+-	spin_unlock(&vdev->priv_lock);
++	spin_unlock_irqrestore(&vdev->priv_lock, flags);
+ }
+ 
+ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
+@@ -466,15 +473,16 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
+ 	struct device *dev = &urb->dev->dev;
+ 	int ret = 0;
+ 	struct vhci_device *vdev;
++	unsigned long flags;
+ 
+ 	/* patch to usb_sg_init() is in 2.5.60 */
+ 	BUG_ON(!urb->transfer_buffer && urb->transfer_buffer_length);
+ 
+-	spin_lock(&the_controller->lock);
++	spin_lock_irqsave(&the_controller->lock, flags);
+ 
+ 	if (urb->status != -EINPROGRESS) {
+ 		dev_err(dev, "URB already unlinked!, status %d\n", urb->status);
+-		spin_unlock(&the_controller->lock);
++		spin_unlock_irqrestore(&the_controller->lock, flags);
+ 		return urb->status;
+ 	}
+ 
+@@ -486,7 +494,7 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
+ 	    vdev->ud.status == VDEV_ST_ERROR) {
+ 		dev_err(dev, "enqueue for inactive port %d\n", vdev->rhport);
+ 		spin_unlock(&vdev->ud.lock);
+-		spin_unlock(&the_controller->lock);
++		spin_unlock_irqrestore(&the_controller->lock, flags);
+ 		return -ENODEV;
+ 	}
+ 	spin_unlock(&vdev->ud.lock);
+@@ -559,14 +567,14 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
+ 
+ out:
+ 	vhci_tx_urb(urb);
+-	spin_unlock(&the_controller->lock);
++	spin_unlock_irqrestore(&the_controller->lock, flags);
+ 
+ 	return 0;
+ 
+ no_need_xmit:
+ 	usb_hcd_unlink_urb_from_ep(hcd, urb);
+ no_need_unlink:
+-	spin_unlock(&the_controller->lock);
++	spin_unlock_irqrestore(&the_controller->lock, flags);
+ 	if (!ret)
+ 		usb_hcd_giveback_urb(vhci_to_hcd(the_controller),
+ 				     urb, urb->status);
+@@ -623,14 +631,15 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+ {
+ 	struct vhci_priv *priv;
+ 	struct vhci_device *vdev;
++	unsigned long flags;
+ 
+-	spin_lock(&the_controller->lock);
++	spin_lock_irqsave(&the_controller->lock, flags);
+ 
+ 	priv = urb->hcpriv;
+ 	if (!priv) {
+ 		/* URB was never linked! or will be soon given back by
+ 		 * vhci_rx. */
+-		spin_unlock(&the_controller->lock);
++		spin_unlock_irqrestore(&the_controller->lock, flags);
+ 		return -EIDRM;
+ 	}
+ 
+@@ -639,7 +648,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+ 
+ 		ret = usb_hcd_check_unlink_urb(hcd, urb, status);
+ 		if (ret) {
+-			spin_unlock(&the_controller->lock);
++			spin_unlock_irqrestore(&the_controller->lock, flags);
+ 			return ret;
+ 		}
+ 	}
+@@ -664,10 +673,10 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+ 		 */
+ 		usb_hcd_unlink_urb_from_ep(hcd, urb);
+ 
+-		spin_unlock(&the_controller->lock);
++		spin_unlock_irqrestore(&the_controller->lock, flags);
+ 		usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
+ 				     urb->status);
+-		spin_lock(&the_controller->lock);
++		spin_lock_irqsave(&the_controller->lock, flags);
+ 
+ 	} else {
+ 		/* tcp connection is alive */
+@@ -679,7 +688,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+ 		unlink = kzalloc(sizeof(struct vhci_unlink), GFP_ATOMIC);
+ 		if (!unlink) {
+ 			spin_unlock(&vdev->priv_lock);
+-			spin_unlock(&the_controller->lock);
++			spin_unlock_irqrestore(&the_controller->lock, flags);
+ 			usbip_event_add(&vdev->ud, VDEV_EVENT_ERROR_MALLOC);
+ 			return -ENOMEM;
+ 		}
+@@ -698,7 +707,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+ 		spin_unlock(&vdev->priv_lock);
+ 	}
+ 
+-	spin_unlock(&the_controller->lock);
++	spin_unlock_irqrestore(&the_controller->lock, flags);
+ 
+ 	usbip_dbg_vhci_hc("leave\n");
+ 	return 0;
+@@ -707,8 +716,9 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+ static void vhci_device_unlink_cleanup(struct vhci_device *vdev)
+ {
+ 	struct vhci_unlink *unlink, *tmp;
++	unsigned long flags;
+ 
+-	spin_lock(&the_controller->lock);
++	spin_lock_irqsave(&the_controller->lock, flags);
+ 	spin_lock(&vdev->priv_lock);
+ 
+ 	list_for_each_entry_safe(unlink, tmp, &vdev->unlink_tx, list) {
+@@ -742,19 +752,19 @@ static void vhci_device_unlink_cleanup(struct vhci_device *vdev)
+ 		list_del(&unlink->list);
+ 
+ 		spin_unlock(&vdev->priv_lock);
+-		spin_unlock(&the_controller->lock);
++		spin_unlock_irqrestore(&the_controller->lock, flags);
+ 
+ 		usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
+ 				     urb->status);
+ 
+-		spin_lock(&the_controller->lock);
++		spin_lock_irqsave(&the_controller->lock, flags);
+ 		spin_lock(&vdev->priv_lock);
+ 
+ 		kfree(unlink);
+ 	}
+ 
+ 	spin_unlock(&vdev->priv_lock);
+-	spin_unlock(&the_controller->lock);
++	spin_unlock_irqrestore(&the_controller->lock, flags);
+ }
+ 
+ /*
+@@ -768,7 +778,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud)
+ 
+ 	/* need this? see stub_dev.c */
+ 	if (ud->tcp_socket) {
+-		pr_debug("shutdown tcp_socket %p\n", ud->tcp_socket);
++		pr_debug("shutdown sockfd %d\n", ud->sockfd);
+ 		kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR);
+ 	}
+ 
+@@ -821,8 +831,9 @@ static void vhci_shutdown_connection(struct usbip_device *ud)
+ static void vhci_device_reset(struct usbip_device *ud)
+ {
+ 	struct vhci_device *vdev = container_of(ud, struct vhci_device, ud);
++	unsigned long flags;
+ 
+-	spin_lock(&ud->lock);
++	spin_lock_irqsave(&ud->lock, flags);
+ 
+ 	vdev->speed  = 0;
+ 	vdev->devid  = 0;
+@@ -836,14 +847,16 @@ static void vhci_device_reset(struct usbip_device *ud)
+ 	}
+ 	ud->status = VDEV_ST_NULL;
+ 
+-	spin_unlock(&ud->lock);
++	spin_unlock_irqrestore(&ud->lock, flags);
+ }
+ 
+ static void vhci_device_unusable(struct usbip_device *ud)
+ {
+-	spin_lock(&ud->lock);
++	unsigned long flags;
++
++	spin_lock_irqsave(&ud->lock, flags);
+ 	ud->status = VDEV_ST_ERROR;
+-	spin_unlock(&ud->lock);
++	spin_unlock_irqrestore(&ud->lock, flags);
+ }
+ 
+ static void vhci_device_init(struct vhci_device *vdev)
+@@ -933,12 +946,13 @@ static int vhci_get_frame_number(struct usb_hcd *hcd)
+ static int vhci_bus_suspend(struct usb_hcd *hcd)
+ {
+ 	struct vhci_hcd *vhci = hcd_to_vhci(hcd);
++	unsigned long flags;
+ 
+ 	dev_dbg(&hcd->self.root_hub->dev, "%s\n", __func__);
+ 
+-	spin_lock(&vhci->lock);
++	spin_lock_irqsave(&vhci->lock, flags);
+ 	hcd->state = HC_STATE_SUSPENDED;
+-	spin_unlock(&vhci->lock);
++	spin_unlock_irqrestore(&vhci->lock, flags);
+ 
+ 	return 0;
+ }
+@@ -947,15 +961,16 @@ static int vhci_bus_resume(struct usb_hcd *hcd)
+ {
+ 	struct vhci_hcd *vhci = hcd_to_vhci(hcd);
+ 	int rc = 0;
++	unsigned long flags;
+ 
+ 	dev_dbg(&hcd->self.root_hub->dev, "%s\n", __func__);
+ 
+-	spin_lock(&vhci->lock);
++	spin_lock_irqsave(&vhci->lock, flags);
+ 	if (!HCD_HW_ACCESSIBLE(hcd))
+ 		rc = -ESHUTDOWN;
+ 	else
+ 		hcd->state = HC_STATE_RUNNING;
+-	spin_unlock(&vhci->lock);
++	spin_unlock_irqrestore(&vhci->lock, flags);
+ 
+ 	return rc;
+ }
+@@ -1053,17 +1068,18 @@ static int vhci_hcd_suspend(struct platform_device *pdev, pm_message_t state)
+ 	int rhport = 0;
+ 	int connected = 0;
+ 	int ret = 0;
++	unsigned long flags;
+ 
+ 	hcd = platform_get_drvdata(pdev);
+ 
+-	spin_lock(&the_controller->lock);
++	spin_lock_irqsave(&the_controller->lock, flags);
+ 
+ 	for (rhport = 0; rhport < VHCI_NPORTS; rhport++)
+ 		if (the_controller->port_status[rhport] &
+ 		    USB_PORT_STAT_CONNECTION)
+ 			connected += 1;
+ 
+-	spin_unlock(&the_controller->lock);
++	spin_unlock_irqrestore(&the_controller->lock, flags);
+ 
+ 	if (connected > 0) {
+ 		dev_info(&pdev->dev,
+diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
+index bc4eb0855314..323aa7789989 100644
+--- a/drivers/usb/usbip/vhci_rx.c
++++ b/drivers/usb/usbip/vhci_rx.c
+@@ -71,10 +71,11 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
+ {
+ 	struct usbip_device *ud = &vdev->ud;
+ 	struct urb *urb;
++	unsigned long flags;
+ 
+-	spin_lock(&vdev->priv_lock);
++	spin_lock_irqsave(&vdev->priv_lock, flags);
+ 	urb = pickup_urb_and_free_priv(vdev, pdu->base.seqnum);
+-	spin_unlock(&vdev->priv_lock);
++	spin_unlock_irqrestore(&vdev->priv_lock, flags);
+ 
+ 	if (!urb) {
+ 		pr_err("cannot find a urb of seqnum %u max seqnum %d\n",
+@@ -103,9 +104,9 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
+ 
+ 	usbip_dbg_vhci_rx("now giveback urb %u\n", pdu->base.seqnum);
+ 
+-	spin_lock(&the_controller->lock);
++	spin_lock_irqsave(&the_controller->lock, flags);
+ 	usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
+-	spin_unlock(&the_controller->lock);
++	spin_unlock_irqrestore(&the_controller->lock, flags);
+ 
+ 	usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status);
+ 
+@@ -116,8 +117,9 @@ static struct vhci_unlink *dequeue_pending_unlink(struct vhci_device *vdev,
+ 						  struct usbip_header *pdu)
+ {
+ 	struct vhci_unlink *unlink, *tmp;
++	unsigned long flags;
+ 
+-	spin_lock(&vdev->priv_lock);
++	spin_lock_irqsave(&vdev->priv_lock, flags);
+ 
+ 	list_for_each_entry_safe(unlink, tmp, &vdev->unlink_rx, list) {
+ 		pr_info("unlink->seqnum %lu\n", unlink->seqnum);
+@@ -126,12 +128,12 @@ static struct vhci_unlink *dequeue_pending_unlink(struct vhci_device *vdev,
+ 					  unlink->seqnum);
+ 			list_del(&unlink->list);
+ 
+-			spin_unlock(&vdev->priv_lock);
++			spin_unlock_irqrestore(&vdev->priv_lock, flags);
+ 			return unlink;
+ 		}
+ 	}
+ 
+-	spin_unlock(&vdev->priv_lock);
++	spin_unlock_irqrestore(&vdev->priv_lock, flags);
+ 
+ 	return NULL;
+ }
+@@ -141,6 +143,7 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
+ {
+ 	struct vhci_unlink *unlink;
+ 	struct urb *urb;
++	unsigned long flags;
+ 
+ 	usbip_dump_header(pdu);
+ 
+@@ -151,9 +154,9 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
+ 		return;
+ 	}
+ 
+-	spin_lock(&vdev->priv_lock);
++	spin_lock_irqsave(&vdev->priv_lock, flags);
+ 	urb = pickup_urb_and_free_priv(vdev, unlink->unlink_seqnum);
+-	spin_unlock(&vdev->priv_lock);
++	spin_unlock_irqrestore(&vdev->priv_lock, flags);
+ 
+ 	if (!urb) {
+ 		/*
+@@ -170,9 +173,9 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
+ 		urb->status = pdu->u.ret_unlink.status;
+ 		pr_info("urb->status %d\n", urb->status);
+ 
+-		spin_lock(&the_controller->lock);
++		spin_lock_irqsave(&the_controller->lock, flags);
+ 		usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
+-		spin_unlock(&the_controller->lock);
++		spin_unlock_irqrestore(&the_controller->lock, flags);
+ 
+ 		usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
+ 				     urb->status);
+@@ -184,10 +187,11 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
+ static int vhci_priv_tx_empty(struct vhci_device *vdev)
+ {
+ 	int empty = 0;
++	unsigned long flags;
+ 
+-	spin_lock(&vdev->priv_lock);
++	spin_lock_irqsave(&vdev->priv_lock, flags);
+ 	empty = list_empty(&vdev->priv_rx);
+-	spin_unlock(&vdev->priv_lock);
++	spin_unlock_irqrestore(&vdev->priv_lock, flags);
+ 
+ 	return empty;
+ }
+diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c
+index 211f43f67ea2..1c7f41a65565 100644
+--- a/drivers/usb/usbip/vhci_sysfs.c
++++ b/drivers/usb/usbip/vhci_sysfs.c
+@@ -32,23 +32,28 @@ static ssize_t status_show(struct device *dev, struct device_attribute *attr,
+ {
+ 	char *s = out;
+ 	int i = 0;
++	unsigned long flags;
+ 
+ 	BUG_ON(!the_controller || !out);
+ 
+-	spin_lock(&the_controller->lock);
++	spin_lock_irqsave(&the_controller->lock, flags);
+ 
+ 	/*
+ 	 * output example:
+-	 * prt sta spd dev socket           local_busid
+-	 * 000 004 000 000         c5a7bb80 1-2.3
+-	 * 001 004 000 000         d8cee980 2-3.4
++	 * port sta spd dev      sockfd local_busid
++	 * 0000 004 000 00000000 000003 1-2.3
++	 * 0001 004 000 00000000 000004 2-3.4
+ 	 *
+-	 * IP address can be retrieved from a socket pointer address by looking
+-	 * up /proc/net/{tcp,tcp6}. Also, a userland program may remember a
+-	 * port number and its peer IP address.
++	 * Output includes socket fd instead of socket pointer address to
++	 * avoid leaking kernel memory address in:
++	 *	/sys/devices/platform/vhci_hcd.0/status and in debug output.
++	 * The socket pointer address is not used at the moment and it was
++	 * made visible as a convenient way to find IP address from socket
++	 * pointer address by looking up /proc/net/{tcp,tcp6}. As this opens
++	 * a security hole, the change is made to use sockfd instead.
+ 	 */
+ 	out += sprintf(out,
+-		       "prt sta spd bus dev socket           local_busid\n");
++		       "prt sta spd bus dev sockfd local_busid\n");
+ 
+ 	for (i = 0; i < VHCI_NPORTS; i++) {
+ 		struct vhci_device *vdev = port_to_vdev(i);
+@@ -60,17 +65,17 @@ static ssize_t status_show(struct device *dev, struct device_attribute *attr,
+ 			out += sprintf(out, "%03u %08x ",
+ 				       vdev->speed, vdev->devid);
+ 			out += sprintf(out, "%16p ", vdev->ud.tcp_socket);
++			out += sprintf(out, "%06u", vdev->ud.sockfd);
+ 			out += sprintf(out, "%s", dev_name(&vdev->udev->dev));
+ 
+-		} else {
+-			out += sprintf(out, "000 000 000 0000000000000000 0-0");
+-		}
++		} else
++			out += sprintf(out, "000 000 000 000000 0-0");
+ 
+ 		out += sprintf(out, "\n");
+ 		spin_unlock(&vdev->ud.lock);
+ 	}
+ 
+-	spin_unlock(&the_controller->lock);
++	spin_unlock_irqrestore(&the_controller->lock, flags);
+ 
+ 	return out - s;
+ }
+@@ -80,11 +85,12 @@ static DEVICE_ATTR_RO(status);
+ static int vhci_port_disconnect(__u32 rhport)
+ {
+ 	struct vhci_device *vdev;
++	unsigned long flags;
+ 
+ 	usbip_dbg_vhci_sysfs("enter\n");
+ 
+ 	/* lock */
+-	spin_lock(&the_controller->lock);
++	spin_lock_irqsave(&the_controller->lock, flags);
+ 
+ 	vdev = port_to_vdev(rhport);
+ 
+@@ -94,14 +100,14 @@ static int vhci_port_disconnect(__u32 rhport)
+ 
+ 		/* unlock */
+ 		spin_unlock(&vdev->ud.lock);
+-		spin_unlock(&the_controller->lock);
++		spin_unlock_irqrestore(&the_controller->lock, flags);
+ 
+ 		return -EINVAL;
+ 	}
+ 
+ 	/* unlock */
+ 	spin_unlock(&vdev->ud.lock);
+-	spin_unlock(&the_controller->lock);
++	spin_unlock_irqrestore(&the_controller->lock, flags);
+ 
+ 	usbip_event_add(&vdev->ud, VDEV_EVENT_DOWN);
+ 
+@@ -177,6 +183,7 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
+ 	int sockfd = 0;
+ 	__u32 rhport = 0, devid = 0, speed = 0;
+ 	int err;
++	unsigned long flags;
+ 
+ 	/*
+ 	 * @rhport: port number of vhci_hcd
+@@ -202,14 +209,14 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
+ 	/* now need lock until setting vdev status as used */
+ 
+ 	/* begin a lock */
+-	spin_lock(&the_controller->lock);
++	spin_lock_irqsave(&the_controller->lock, flags);
+ 	vdev = port_to_vdev(rhport);
+ 	spin_lock(&vdev->ud.lock);
+ 
+ 	if (vdev->ud.status != VDEV_ST_NULL) {
+ 		/* end of the lock */
+ 		spin_unlock(&vdev->ud.lock);
+-		spin_unlock(&the_controller->lock);
++		spin_unlock_irqrestore(&the_controller->lock, flags);
+ 
+ 		sockfd_put(socket);
+ 
+@@ -223,11 +230,12 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
+ 
+ 	vdev->devid         = devid;
+ 	vdev->speed         = speed;
++	vdev->ud.sockfd     = sockfd;
+ 	vdev->ud.tcp_socket = socket;
+ 	vdev->ud.status     = VDEV_ST_NOTASSIGNED;
+ 
+ 	spin_unlock(&vdev->ud.lock);
+-	spin_unlock(&the_controller->lock);
++	spin_unlock_irqrestore(&the_controller->lock, flags);
+ 	/* end the lock */
+ 
+ 	vdev->ud.tcp_rx = kthread_get_run(vhci_rx_loop, &vdev->ud, "vhci_rx");
+diff --git a/drivers/usb/usbip/vhci_tx.c b/drivers/usb/usbip/vhci_tx.c
+index 3c5796c8633a..a9a663a578b6 100644
+--- a/drivers/usb/usbip/vhci_tx.c
++++ b/drivers/usb/usbip/vhci_tx.c
+@@ -47,16 +47,17 @@ static void setup_cmd_submit_pdu(struct usbip_header *pdup,  struct urb *urb)
+ static struct vhci_priv *dequeue_from_priv_tx(struct vhci_device *vdev)
+ {
+ 	struct vhci_priv *priv, *tmp;
++	unsigned long flags;
+ 
+-	spin_lock(&vdev->priv_lock);
++	spin_lock_irqsave(&vdev->priv_lock, flags);
+ 
+ 	list_for_each_entry_safe(priv, tmp, &vdev->priv_tx, list) {
+ 		list_move_tail(&priv->list, &vdev->priv_rx);
+-		spin_unlock(&vdev->priv_lock);
++		spin_unlock_irqrestore(&vdev->priv_lock, flags);
+ 		return priv;
+ 	}
+ 
+-	spin_unlock(&vdev->priv_lock);
++	spin_unlock_irqrestore(&vdev->priv_lock, flags);
+ 
+ 	return NULL;
+ }
+@@ -137,16 +138,17 @@ static int vhci_send_cmd_submit(struct vhci_device *vdev)
+ static struct vhci_unlink *dequeue_from_unlink_tx(struct vhci_device *vdev)
+ {
+ 	struct vhci_unlink *unlink, *tmp;
++	unsigned long flags;
+ 
+-	spin_lock(&vdev->priv_lock);
++	spin_lock_irqsave(&vdev->priv_lock, flags);
+ 
+ 	list_for_each_entry_safe(unlink, tmp, &vdev->unlink_tx, list) {
+ 		list_move_tail(&unlink->list, &vdev->unlink_rx);
+-		spin_unlock(&vdev->priv_lock);
++		spin_unlock_irqrestore(&vdev->priv_lock, flags);
+ 		return unlink;
+ 	}
+ 
+-	spin_unlock(&vdev->priv_lock);
++	spin_unlock_irqrestore(&vdev->priv_lock, flags);
+ 
+ 	return NULL;
+ }
+diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c
+index d6aeb84e90b6..d882d873c5a3 100644
+--- a/fs/ext2/acl.c
++++ b/fs/ext2/acl.c
+@@ -178,11 +178,8 @@ ext2_get_acl(struct inode *inode, int type)
+ 	return acl;
+ }
+ 
+-/*
+- * inode->i_mutex: down
+- */
+-int
+-ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
++static int
++__ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+ {
+ 	int name_index;
+ 	void *value = NULL;
+@@ -192,13 +189,6 @@ ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+ 	switch(type) {
+ 		case ACL_TYPE_ACCESS:
+ 			name_index = EXT2_XATTR_INDEX_POSIX_ACL_ACCESS;
+-			if (acl) {
+-				error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
+-				if (error)
+-					return error;
+-				inode->i_ctime = CURRENT_TIME_SEC;
+-				mark_inode_dirty(inode);
+-			}
+ 			break;
+ 
+ 		case ACL_TYPE_DEFAULT:
+@@ -224,6 +214,24 @@ ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+ 	return error;
+ }
+ 
++/*
++ * inode->i_mutex: down
++ */
++int
++ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
++{
++	int error;
++
++	if (type == ACL_TYPE_ACCESS && acl) {
++		error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
++		if (error)
++			return error;
++		inode->i_ctime = CURRENT_TIME_SEC;
++		mark_inode_dirty(inode);
++	}
++	return __ext2_set_acl(inode, acl, type);
++}
++
+ /*
+  * Initialize the ACLs of a new inode. Called from ext2_new_inode.
+  *
+@@ -241,12 +249,12 @@ ext2_init_acl(struct inode *inode, struct inode *dir)
+ 		return error;
+ 
+ 	if (default_acl) {
+-		error = ext2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
++		error = __ext2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
+ 		posix_acl_release(default_acl);
+ 	}
+ 	if (acl) {
+ 		if (!error)
+-			error = ext2_set_acl(inode, acl, ACL_TYPE_ACCESS);
++			error = __ext2_set_acl(inode, acl, ACL_TYPE_ACCESS);
+ 		posix_acl_release(acl);
+ 	}
+ 	return error;
+diff --git a/fs/fcntl.c b/fs/fcntl.c
+index 62376451bbce..5df914943d96 100644
+--- a/fs/fcntl.c
++++ b/fs/fcntl.c
+@@ -113,6 +113,10 @@ void f_setown(struct file *filp, unsigned long arg, int force)
+ 	int who = arg;
+ 	type = PIDTYPE_PID;
+ 	if (who < 0) {
++		/* avoid overflow below */
++		if (who == INT_MIN)
++			return;
++
+ 		type = PIDTYPE_PGID;
+ 		who = -who;
+ 	}
+diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c
+index a260060042ad..67eb154af881 100644
+--- a/fs/nfsd/auth.c
++++ b/fs/nfsd/auth.c
+@@ -60,9 +60,10 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
+ 			else
+ 				GROUP_AT(gi, i) = GROUP_AT(rqgi, i);
+ 
+-			/* Each thread allocates its own gi, no race */
+-			groups_sort(gi);
+ 		}
++
++		/* Each thread allocates its own gi, no race */
++		groups_sort(gi);
+ 	} else {
+ 		gi = get_group_info(rqgi);
+ 	}
+diff --git a/fs/reiserfs/bitmap.c b/fs/reiserfs/bitmap.c
+index dc198bc64c61..edc8ef78b63f 100644
+--- a/fs/reiserfs/bitmap.c
++++ b/fs/reiserfs/bitmap.c
+@@ -513,9 +513,17 @@ static void __discard_prealloc(struct reiserfs_transaction_handle *th,
+ 			       "inode has negative prealloc blocks count.");
+ #endif
+ 	while (ei->i_prealloc_count > 0) {
+-		reiserfs_free_prealloc_block(th, inode, ei->i_prealloc_block);
+-		ei->i_prealloc_block++;
++		b_blocknr_t block_to_free;
++
++		/*
++		 * reiserfs_free_prealloc_block can drop the write lock,
++		 * which could allow another caller to free the same block.
++		 * We can protect against it by modifying the prealloc
++		 * state before calling it.
++		 */
++		block_to_free = ei->i_prealloc_block++;
+ 		ei->i_prealloc_count--;
++		reiserfs_free_prealloc_block(th, inode, block_to_free);
+ 		dirty = 1;
+ 	}
+ 	if (dirty)
+@@ -1128,7 +1136,7 @@ static int determine_prealloc_size(reiserfs_blocknr_hint_t * hint)
+ 	hint->prealloc_size = 0;
+ 
+ 	if (!hint->formatted_node && hint->preallocate) {
+-		if (S_ISREG(hint->inode->i_mode)
++		if (S_ISREG(hint->inode->i_mode) && !IS_PRIVATE(hint->inode)
+ 		    && hint->inode->i_size >=
+ 		    REISERFS_SB(hint->th->t_super)->s_alloc_options.
+ 		    preallocmin * hint->inode->i_sb->s_blocksize)
+diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
+index 9b1824f35501..91b036902a17 100644
+--- a/fs/reiserfs/xattr_acl.c
++++ b/fs/reiserfs/xattr_acl.c
+@@ -37,7 +37,14 @@ reiserfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+ 	error = journal_begin(&th, inode->i_sb, jcreate_blocks);
+ 	reiserfs_write_unlock(inode->i_sb);
+ 	if (error == 0) {
++		if (type == ACL_TYPE_ACCESS && acl) {
++			error = posix_acl_update_mode(inode, &inode->i_mode,
++						      &acl);
++			if (error)
++				goto unlock;
++		}
+ 		error = __reiserfs_set_acl(&th, inode, type, acl);
++unlock:
+ 		reiserfs_write_lock(inode->i_sb);
+ 		error2 = journal_end(&th);
+ 		reiserfs_write_unlock(inode->i_sb);
+@@ -245,11 +252,6 @@ __reiserfs_set_acl(struct reiserfs_transaction_handle *th, struct inode *inode,
+ 	switch (type) {
+ 	case ACL_TYPE_ACCESS:
+ 		name = POSIX_ACL_XATTR_ACCESS;
+-		if (acl) {
+-			error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
+-			if (error)
+-				return error;
+-		}
+ 		break;
+ 	case ACL_TYPE_DEFAULT:
+ 		name = POSIX_ACL_XATTR_DEFAULT;
+diff --git a/fs/select.c b/fs/select.c
+index 015547330e88..f4dd55fc638c 100644
+--- a/fs/select.c
++++ b/fs/select.c
+@@ -29,6 +29,7 @@
+ #include <linux/sched/rt.h>
+ #include <linux/freezer.h>
+ #include <net/busy_poll.h>
++#include <linux/vmalloc.h>
+ 
+ #include <asm/uaccess.h>
+ 
+@@ -550,7 +551,7 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
+ 	fd_set_bits fds;
+ 	void *bits;
+ 	int ret, max_fds;
+-	unsigned int size;
++	size_t size, alloc_size;
+ 	struct fdtable *fdt;
+ 	/* Allocate small arguments on the stack to save memory and be faster */
+ 	long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
+@@ -577,7 +578,14 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
+ 	if (size > sizeof(stack_fds) / 6) {
+ 		/* Not enough space in on-stack array; must use kmalloc */
+ 		ret = -ENOMEM;
+-		bits = kmalloc(6 * size, GFP_KERNEL);
++		if (size > (SIZE_MAX / 6))
++			goto out_nofds;
++
++		alloc_size = 6 * size;
++		bits = kmalloc(alloc_size, GFP_KERNEL|__GFP_NOWARN);
++		if (!bits && alloc_size > PAGE_SIZE)
++			bits = vmalloc(alloc_size);
++
+ 		if (!bits)
+ 			goto out_nofds;
+ 	}
+@@ -614,7 +622,7 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
+ 
+ out:
+ 	if (bits != stack_fds)
+-		kfree(bits);
++		kvfree(bits);
+ out_nofds:
+ 	return ret;
+ }
+diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h
+index 2189935075b4..a951fd10aaaa 100644
+--- a/include/linux/cacheinfo.h
++++ b/include/linux/cacheinfo.h
+@@ -71,6 +71,7 @@ struct cpu_cacheinfo {
+ 	struct cacheinfo *info_list;
+ 	unsigned int num_levels;
+ 	unsigned int num_leaves;
++	bool cpu_map_populated;
+ };
+ 
+ /*
+diff --git a/include/linux/ktime.h b/include/linux/ktime.h
+index 2b6a204bd8d4..3ffc69ebe967 100644
+--- a/include/linux/ktime.h
++++ b/include/linux/ktime.h
+@@ -63,6 +63,13 @@ static inline ktime_t ktime_set(const s64 secs, const unsigned long nsecs)
+ #define ktime_add(lhs, rhs) \
+ 		({ (ktime_t){ .tv64 = (lhs).tv64 + (rhs).tv64 }; })
+ 
++/*
++ * Same as ktime_add(), but avoids undefined behaviour on overflow; however,
++ * this means that you must check the result for overflow yourself.
++ */
++#define ktime_add_unsafe(lhs, rhs) \
++		({ (ktime_t){ .tv64 = (u64) (lhs).tv64 + (rhs).tv64 }; })
++
+ /*
+  * Add a ktime_t variable and a scalar nanosecond value.
+  * res = kt + nsval:
+diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
+index 04078e8a4803..d6c53fce006b 100644
+--- a/include/linux/netfilter/x_tables.h
++++ b/include/linux/netfilter/x_tables.h
+@@ -243,6 +243,10 @@ int xt_check_entry_offsets(const void *base, const char *elems,
+ 			   unsigned int target_offset,
+ 			   unsigned int next_offset);
+ 
++unsigned int *xt_alloc_entry_offsets(unsigned int size);
++bool xt_find_jump_offset(const unsigned int *offsets,
++			 unsigned int target, unsigned int size);
++
+ int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
+ 		   bool inv_proto);
+ int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
+@@ -377,16 +381,16 @@ static inline unsigned long ifname_compare_aligned(const char *_a,
+  * allows us to return 0 for single core systems without forcing
+  * callers to deal with SMP vs. NONSMP issues.
+  */
+-static inline u64 xt_percpu_counter_alloc(void)
++static inline unsigned long xt_percpu_counter_alloc(void)
+ {
+ 	if (nr_cpu_ids > 1) {
+ 		void __percpu *res = __alloc_percpu(sizeof(struct xt_counters),
+ 						    sizeof(struct xt_counters));
+ 
+ 		if (res == NULL)
+-			return (u64) -ENOMEM;
++			return -ENOMEM;
+ 
+-		return (u64) (__force unsigned long) res;
++		return (__force unsigned long) res;
+ 	}
+ 
+ 	return 0;
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index e887c8d6f395..90bea398e5e0 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1313,6 +1313,7 @@ struct sched_dl_entity {
+ 	u64 dl_deadline;	/* relative deadline of each instance	*/
+ 	u64 dl_period;		/* separation of two instances (period) */
+ 	u64 dl_bw;		/* dl_runtime / dl_deadline		*/
++	u64 dl_density;		/* dl_runtime / dl_deadline		*/
+ 
+ 	/*
+ 	 * Actual scheduling parameters. Initialized with the values above,
+diff --git a/include/linux/tcp.h b/include/linux/tcp.h
+index 318c24612458..2260f92f1492 100644
+--- a/include/linux/tcp.h
++++ b/include/linux/tcp.h
+@@ -29,9 +29,14 @@ static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb)
+ 	return (struct tcphdr *)skb_transport_header(skb);
+ }
+ 
++static inline unsigned int __tcp_hdrlen(const struct tcphdr *th)
++{
++	return th->doff * 4;
++}
++
+ static inline unsigned int tcp_hdrlen(const struct sk_buff *skb)
+ {
+-	return tcp_hdr(skb)->doff * 4;
++	return __tcp_hdrlen(tcp_hdr(skb));
+ }
+ 
+ static inline struct tcphdr *inner_tcp_hdr(const struct sk_buff *skb)
+diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
+index a3d04934aa96..6f8fbcf10dfb 100644
+--- a/include/linux/vermagic.h
++++ b/include/linux/vermagic.h
+@@ -24,16 +24,10 @@
+ #ifndef MODULE_ARCH_VERMAGIC
+ #define MODULE_ARCH_VERMAGIC ""
+ #endif
+-#ifdef RETPOLINE
+-#define MODULE_VERMAGIC_RETPOLINE "retpoline "
+-#else
+-#define MODULE_VERMAGIC_RETPOLINE ""
+-#endif
+ 
+ #define VERMAGIC_STRING 						\
+ 	UTS_RELEASE " "							\
+ 	MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT 			\
+ 	MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS	\
+-	MODULE_ARCH_VERMAGIC						\
+-	MODULE_VERMAGIC_RETPOLINE
++	MODULE_ARCH_VERMAGIC
+ 
+diff --git a/include/net/arp.h b/include/net/arp.h
+index 5e0f891d476c..1b3f86981757 100644
+--- a/include/net/arp.h
++++ b/include/net/arp.h
+@@ -19,6 +19,9 @@ static inline u32 arp_hashfn(const void *pkey, const struct net_device *dev, u32
+ 
+ static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key)
+ {
++	if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
++		key = INADDR_ANY;
++
+ 	return ___neigh_lookup_noref(&arp_tbl, neigh_key_eq32, arp_hashfn, &key, dev);
+ }
+ 
+diff --git a/include/net/ipv6.h b/include/net/ipv6.h
+index 7a8066b90289..84f0d0602433 100644
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -281,6 +281,7 @@ int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
+ 			   int flags);
+ int ip6_flowlabel_init(void);
+ void ip6_flowlabel_cleanup(void);
++bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np);
+ 
+ static inline void fl6_sock_release(struct ip6_flowlabel *fl)
+ {
+diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
+index 2dcea635ecce..93328c61934a 100644
+--- a/include/net/net_namespace.h
++++ b/include/net/net_namespace.h
+@@ -209,6 +209,11 @@ int net_eq(const struct net *net1, const struct net *net2)
+ 	return net1 == net2;
+ }
+ 
++static inline int check_net(const struct net *net)
++{
++	return atomic_read(&net->count) != 0;
++}
++
+ void net_drop_ns(void *);
+ 
+ #else
+@@ -233,6 +238,11 @@ int net_eq(const struct net *net1, const struct net *net2)
+ 	return 1;
+ }
+ 
++static inline int check_net(const struct net *net)
++{
++	return 1;
++}
++
+ #define net_drop_ns NULL
+ #endif
+ 
+diff --git a/include/uapi/linux/eventpoll.h b/include/uapi/linux/eventpoll.h
+index bc81fb2e1f0e..6f04cb419115 100644
+--- a/include/uapi/linux/eventpoll.h
++++ b/include/uapi/linux/eventpoll.h
+@@ -26,6 +26,19 @@
+ #define EPOLL_CTL_DEL 2
+ #define EPOLL_CTL_MOD 3
+ 
++/* Epoll event masks */
++#define EPOLLIN		0x00000001
++#define EPOLLPRI	0x00000002
++#define EPOLLOUT	0x00000004
++#define EPOLLERR	0x00000008
++#define EPOLLHUP	0x00000010
++#define EPOLLRDNORM	0x00000040
++#define EPOLLRDBAND	0x00000080
++#define EPOLLWRNORM	0x00000100
++#define EPOLLWRBAND	0x00000200
++#define EPOLLMSG	0x00000400
++#define EPOLLRDHUP	0x00002000
++
+ /*
+  * Request the handling of system wakeup events so as to prevent system suspends
+  * from happening while those events are being processed.
+diff --git a/ipc/msg.c b/ipc/msg.c
+index c6521c205cb4..f993f441f852 100644
+--- a/ipc/msg.c
++++ b/ipc/msg.c
+@@ -742,7 +742,10 @@ static inline int convert_mode(long *msgtyp, int msgflg)
+ 	if (*msgtyp == 0)
+ 		return SEARCH_ANY;
+ 	if (*msgtyp < 0) {
+-		*msgtyp = -*msgtyp;
++		if (*msgtyp == LONG_MIN) /* -LONG_MIN is undefined */
++			*msgtyp = LONG_MAX;
++		else
++			*msgtyp = -*msgtyp;
+ 		return SEARCH_LESSEQUAL;
+ 	}
+ 	if (msgflg & MSG_EXCEPT)
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 9d6b3d869592..e6d1173a2046 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -2109,6 +2109,7 @@ void __dl_clear_params(struct task_struct *p)
+ 	dl_se->dl_period = 0;
+ 	dl_se->flags = 0;
+ 	dl_se->dl_bw = 0;
++	dl_se->dl_density = 0;
+ 
+ 	dl_se->dl_throttled = 0;
+ 	dl_se->dl_new = 1;
+@@ -3647,6 +3648,7 @@ __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
+ 	dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
+ 	dl_se->flags = attr->sched_flags;
+ 	dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
++	dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
+ 
+ 	/*
+ 	 * Changing the parameters of a task is 'tricky' and we're not doing
+diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
+index 6be2afd9bfd6..e12b0a4df891 100644
+--- a/kernel/sched/deadline.c
++++ b/kernel/sched/deadline.c
+@@ -480,13 +480,84 @@ static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
+ }
+ 
+ /*
+- * When a -deadline entity is queued back on the runqueue, its runtime and
+- * deadline might need updating.
++ * Revised wakeup rule [1]: For self-suspending tasks, rather then
++ * re-initializing task's runtime and deadline, the revised wakeup
++ * rule adjusts the task's runtime to avoid the task to overrun its
++ * density.
+  *
+- * The policy here is that we update the deadline of the entity only if:
+- *  - the current deadline is in the past,
+- *  - using the remaining runtime with the current deadline would make
+- *    the entity exceed its bandwidth.
++ * Reasoning: a task may overrun the density if:
++ *    runtime / (deadline - t) > dl_runtime / dl_deadline
++ *
++ * Therefore, runtime can be adjusted to:
++ *     runtime = (dl_runtime / dl_deadline) * (deadline - t)
++ *
++ * In such way that runtime will be equal to the maximum density
++ * the task can use without breaking any rule.
++ *
++ * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
++ * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
++ */
++static void
++update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
++{
++	u64 laxity = dl_se->deadline - rq_clock(rq);
++
++	/*
++	 * If the task has deadline < period, and the deadline is in the past,
++	 * it should already be throttled before this check.
++	 *
++	 * See update_dl_entity() comments for further details.
++	 */
++	WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
++
++	dl_se->runtime = (dl_se->dl_density * laxity) >> 20;
++}
++
++/*
++ * Regarding the deadline, a task with implicit deadline has a relative
++ * deadline == relative period. A task with constrained deadline has a
++ * relative deadline <= relative period.
++ *
++ * We support constrained deadline tasks. However, there are some restrictions
++ * applied only for tasks which do not have an implicit deadline. See
++ * update_dl_entity() to know more about such restrictions.
++ *
++ * The dl_is_implicit() returns true if the task has an implicit deadline.
++ */
++static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
++{
++	return dl_se->dl_deadline == dl_se->dl_period;
++}
++
++/*
++ * When a deadline entity is placed in the runqueue, its runtime and deadline
++ * might need to be updated. This is done by a CBS wake up rule. There are two
++ * different rules: 1) the original CBS; and 2) the Revisited CBS.
++ *
++ * When the task is starting a new period, the Original CBS is used. In this
++ * case, the runtime is replenished and a new absolute deadline is set.
++ *
++ * When a task is queued before the begin of the next period, using the
++ * remaining runtime and deadline could make the entity to overflow, see
++ * dl_entity_overflow() to find more about runtime overflow. When such case
++ * is detected, the runtime and deadline need to be updated.
++ *
++ * If the task has an implicit deadline, i.e., deadline == period, the Original
++ * CBS is applied. the runtime is replenished and a new absolute deadline is
++ * set, as in the previous cases.
++ *
++ * However, the Original CBS does not work properly for tasks with
++ * deadline < period, which are said to have a constrained deadline. By
++ * applying the Original CBS, a constrained deadline task would be able to run
++ * runtime/deadline in a period. With deadline < period, the task would
++ * overrun the runtime/period allowed bandwidth, breaking the admission test.
++ *
++ * In order to prevent this misbehave, the Revisited CBS is used for
++ * constrained deadline tasks when a runtime overflow is detected. In the
++ * Revisited CBS, rather than replenishing & setting a new absolute deadline,
++ * the remaining runtime of the task is reduced to avoid runtime overflow.
++ * Please refer to the comments update_dl_revised_wakeup() function to find
++ * more about the Revised CBS rule.
+  */
+ static void update_dl_entity(struct sched_dl_entity *dl_se,
+ 			     struct sched_dl_entity *pi_se)
+@@ -505,6 +576,14 @@ static void update_dl_entity(struct sched_dl_entity *dl_se,
+ 
+ 	if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
+ 	    dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
++
++		if (unlikely(!dl_is_implicit(dl_se) &&
++			     !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
++			     !dl_se->dl_boosted)){
++			update_dl_revised_wakeup(dl_se, rq);
++			return;
++		}
++
+ 		dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
+ 		dl_se->runtime = pi_se->dl_runtime;
+ 	}
+@@ -991,11 +1070,6 @@ static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
+ 	__dequeue_dl_entity(dl_se);
+ }
+ 
+-static inline bool dl_is_constrained(struct sched_dl_entity *dl_se)
+-{
+-	return dl_se->dl_deadline < dl_se->dl_period;
+-}
+-
+ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
+ {
+ 	struct task_struct *pi_task = rt_mutex_get_top_task(p);
+@@ -1027,7 +1101,7 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
+ 	 * If that is the case, the task will be throttled and
+ 	 * the replenishment timer will be set to the next period.
+ 	 */
+-	if (!p->dl.dl_throttled && dl_is_constrained(&p->dl))
++	if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl))
+ 		dl_check_constrained_dl(&p->dl);
+ 
+ 	/*
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index 17f7bcff1e02..323282e63865 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -312,7 +312,7 @@ EXPORT_SYMBOL_GPL(__ktime_divns);
+  */
+ ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs)
+ {
+-	ktime_t res = ktime_add(lhs, rhs);
++	ktime_t res = ktime_add_unsafe(lhs, rhs);
+ 
+ 	/*
+ 	 * We use KTIME_SEC_MAX here, the maximum timeout which we can
+@@ -669,7 +669,9 @@ static void hrtimer_reprogram(struct hrtimer *timer,
+ static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
+ {
+ 	base->expires_next.tv64 = KTIME_MAX;
++	base->hang_detected = 0;
+ 	base->hres_active = 0;
++	base->next_timer = NULL;
+ }
+ 
+ /*
+@@ -1615,6 +1617,7 @@ static void init_hrtimers_cpu(int cpu)
+ 		timerqueue_init_head(&cpu_base->clock_base[i].active);
+ 	}
+ 
++	cpu_base->active_bases = 0;
+ 	cpu_base->cpu = cpu;
+ 	hrtimer_init_hres(cpu_base);
+ }
+diff --git a/kernel/time/timer.c b/kernel/time/timer.c
+index 125407144c01..3d7588a2e97c 100644
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -764,8 +764,15 @@ static struct tvec_base *lock_timer_base(struct timer_list *timer,
+ 	__acquires(timer->base->lock)
+ {
+ 	for (;;) {
+-		u32 tf = timer->flags;
+ 		struct tvec_base *base;
++		u32 tf;
++
++		/*
++		 * We need to use READ_ONCE() here, otherwise the compiler
++		 * might re-read @tf between the check for TIMER_MIGRATING
++		 * and spin_lock().
++		 */
++		tf = READ_ONCE(timer->flags);
+ 
+ 		if (!(tf & TIMER_MIGRATING)) {
+ 			base = per_cpu_ptr(&tvec_bases, tf & TIMER_CPUMASK);
+diff --git a/mm/cma.c b/mm/cma.c
+index bd0e1412475e..43f4a122e969 100644
+--- a/mm/cma.c
++++ b/mm/cma.c
+@@ -54,7 +54,7 @@ unsigned long cma_get_size(const struct cma *cma)
+ }
+ 
+ static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
+-					     int align_order)
++					     unsigned int align_order)
+ {
+ 	if (align_order <= cma->order_per_bit)
+ 		return 0;
+@@ -62,17 +62,14 @@ static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
+ }
+ 
+ /*
+- * Find a PFN aligned to the specified order and return an offset represented in
+- * order_per_bits.
++ * Find the offset of the base PFN from the specified align_order.
++ * The value returned is represented in order_per_bits.
+  */
+ static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
+-					       int align_order)
++					       unsigned int align_order)
+ {
+-	if (align_order <= cma->order_per_bit)
+-		return 0;
+-
+-	return (ALIGN(cma->base_pfn, (1UL << align_order))
+-		- cma->base_pfn) >> cma->order_per_bit;
++	return (cma->base_pfn & ((1UL << align_order) - 1))
++		>> cma->order_per_bit;
+ }
+ 
+ static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index e25b93a4267d..55a9facb8e8d 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -5576,7 +5576,7 @@ static void uncharge_list(struct list_head *page_list)
+ 		next = page->lru.next;
+ 
+ 		VM_BUG_ON_PAGE(PageLRU(page), page);
+-		VM_BUG_ON_PAGE(page_count(page), page);
++		VM_BUG_ON_PAGE(!PageHWPoison(page) && page_count(page), page);
+ 
+ 		if (!page->mem_cgroup)
+ 			continue;
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 091fe9b06663..92a647957f91 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -539,6 +539,13 @@ static int delete_from_lru_cache(struct page *p)
+ 		 */
+ 		ClearPageActive(p);
+ 		ClearPageUnevictable(p);
++
++		/*
++		 * Poisoned page might never drop its ref count to 0 so we have
++		 * to uncharge it manually from its memcg.
++		 */
++		mem_cgroup_uncharge(p);
++
+ 		/*
+ 		 * drop the page count elevated by isolate_lru_page()
+ 		 */
+diff --git a/mm/mmap.c b/mm/mmap.c
+index eaa460ddcaf9..cc84b97ca250 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -2188,7 +2188,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
+ 		gap_addr = TASK_SIZE;
+ 
+ 	next = vma->vm_next;
+-	if (next && next->vm_start < gap_addr) {
++	if (next && next->vm_start < gap_addr &&
++			(next->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) {
+ 		if (!(next->vm_flags & VM_GROWSUP))
+ 			return -ENOMEM;
+ 		/* Check that both stack segments have the same anon_vma? */
+@@ -2273,7 +2274,8 @@ int expand_downwards(struct vm_area_struct *vma,
+ 	if (gap_addr > address)
+ 		return -ENOMEM;
+ 	prev = vma->vm_prev;
+-	if (prev && prev->vm_end > gap_addr) {
++	if (prev && prev->vm_end > gap_addr &&
++			(prev->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) {
+ 		if (!(prev->vm_flags & VM_GROWSDOWN))
+ 			return -ENOMEM;
+ 		/* Check that both stack segments have the same anon_vma? */
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 3c70f03d91ec..a4c9cd80c7b6 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -2468,9 +2468,6 @@ static bool __zone_watermark_ok(struct zone *z, unsigned int order,
+ 		if (!area->nr_free)
+ 			continue;
+ 
+-		if (alloc_harder)
+-			return true;
+-
+ 		for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
+ 			if (!list_empty(&area->free_list[mt]))
+ 				return true;
+@@ -2482,6 +2479,9 @@ static bool __zone_watermark_ok(struct zone *z, unsigned int order,
+ 			return true;
+ 		}
+ #endif
++		if (alloc_harder &&
++			!list_empty(&area->free_list[MIGRATE_HIGHATOMIC]))
++			return true;
+ 	}
+ 	return false;
+ }
+diff --git a/net/can/af_can.c b/net/can/af_can.c
+index 928f58064098..c866e761651a 100644
+--- a/net/can/af_can.c
++++ b/net/can/af_can.c
+@@ -722,13 +722,12 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev,
+ 	if (unlikely(!net_eq(dev_net(dev), &init_net)))
+ 		goto drop;
+ 
+-	if (WARN_ONCE(dev->type != ARPHRD_CAN ||
+-		      skb->len != CAN_MTU ||
+-		      cfd->len > CAN_MAX_DLEN,
+-		      "PF_CAN: dropped non conform CAN skbuf: "
+-		      "dev type %d, len %d, datalen %d\n",
+-		      dev->type, skb->len, cfd->len))
++	if (unlikely(dev->type != ARPHRD_CAN || skb->len != CAN_MTU ||
++		     cfd->len > CAN_MAX_DLEN)) {
++		pr_warn_once("PF_CAN: dropped non conform CAN skbuf: dev type %d, len %d, datalen %d\n",
++			     dev->type, skb->len, cfd->len);
+ 		goto drop;
++	}
+ 
+ 	can_receive(skb, dev);
+ 	return NET_RX_SUCCESS;
+@@ -746,13 +745,12 @@ static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
+ 	if (unlikely(!net_eq(dev_net(dev), &init_net)))
+ 		goto drop;
+ 
+-	if (WARN_ONCE(dev->type != ARPHRD_CAN ||
+-		      skb->len != CANFD_MTU ||
+-		      cfd->len > CANFD_MAX_DLEN,
+-		      "PF_CAN: dropped non conform CAN FD skbuf: "
+-		      "dev type %d, len %d, datalen %d\n",
+-		      dev->type, skb->len, cfd->len))
++	if (unlikely(dev->type != ARPHRD_CAN || skb->len != CANFD_MTU ||
++		     cfd->len > CANFD_MAX_DLEN)) {
++		pr_warn_once("PF_CAN: dropped non conform CAN FD skbuf: dev type %d, len %d, datalen %d\n",
++			     dev->type, skb->len, cfd->len);
+ 		goto drop;
++	}
+ 
+ 	can_receive(skb, dev);
+ 	return NET_RX_SUCCESS;
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 3b67c1e5756f..cb58ba15d51e 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2889,10 +2889,21 @@ static void qdisc_pkt_len_init(struct sk_buff *skb)
+ 		hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
+ 
+ 		/* + transport layer */
+-		if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
+-			hdr_len += tcp_hdrlen(skb);
+-		else
+-			hdr_len += sizeof(struct udphdr);
++		if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
++			const struct tcphdr *th;
++			struct tcphdr _tcphdr;
++
++			th = skb_header_pointer(skb, skb_transport_offset(skb),
++						sizeof(_tcphdr), &_tcphdr);
++			if (likely(th))
++				hdr_len += __tcp_hdrlen(th);
++		} else {
++			struct udphdr _udphdr;
++
++			if (skb_header_pointer(skb, skb_transport_offset(skb),
++					       sizeof(_udphdr), &_udphdr))
++				hdr_len += sizeof(struct udphdr);
++		}
+ 
+ 		if (shinfo->gso_type & SKB_GSO_DODGY)
+ 			gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
+diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
+index ee9082792530..4d14908afaec 100644
+--- a/net/core/flow_dissector.c
++++ b/net/core/flow_dissector.c
+@@ -492,8 +492,8 @@ ip_proto_again:
+ out_good:
+ 	ret = true;
+ 
+-	key_control->thoff = (u16)nhoff;
+ out:
++	key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
+ 	key_basic->n_proto = proto;
+ 	key_basic->ip_proto = ip_proto;
+ 
+@@ -501,7 +501,6 @@ out:
+ 
+ out_bad:
+ 	ret = false;
+-	key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
+ 	goto out;
+ }
+ EXPORT_SYMBOL(__skb_flow_dissect);
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index ae92131c4f89..253c86b78ff0 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -496,7 +496,7 @@ struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
+ 	if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
+ 		nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
+ 
+-	hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
++	hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
+ 
+ 	if (n->parms->dead) {
+ 		rc = ERR_PTR(-EINVAL);
+@@ -508,7 +508,7 @@ struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
+ 	     n1 != NULL;
+ 	     n1 = rcu_dereference_protected(n1->next,
+ 			lockdep_is_held(&tbl->lock))) {
+-		if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
++		if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
+ 			if (want_ref)
+ 				neigh_hold(n1);
+ 			rc = n1;
+diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
+index 5e3a7302f774..7753681195c1 100644
+--- a/net/dccp/ccids/ccid2.c
++++ b/net/dccp/ccids/ccid2.c
+@@ -140,6 +140,9 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
+ 
+ 	ccid2_pr_debug("RTO_EXPIRE\n");
+ 
++	if (sk->sk_state == DCCP_CLOSED)
++		goto out;
++
+ 	/* back-off timer */
+ 	hc->tx_rto <<= 1;
+ 	if (hc->tx_rto > DCCP_RTO_MAX)
+diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
+index 711b4dfa17c3..cb5eb649ad5f 100644
+--- a/net/ipv4/arp.c
++++ b/net/ipv4/arp.c
+@@ -223,11 +223,16 @@ static bool arp_key_eq(const struct neighbour *neigh, const void *pkey)
+ 
+ static int arp_constructor(struct neighbour *neigh)
+ {
+-	__be32 addr = *(__be32 *)neigh->primary_key;
++	__be32 addr;
+ 	struct net_device *dev = neigh->dev;
+ 	struct in_device *in_dev;
+ 	struct neigh_parms *parms;
++	u32 inaddr_any = INADDR_ANY;
+ 
++	if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
++		memcpy(neigh->primary_key, &inaddr_any, arp_tbl.key_len);
++
++	addr = *(__be32 *)neigh->primary_key;
+ 	rcu_read_lock();
+ 	in_dev = __in_dev_get_rcu(dev);
+ 	if (!in_dev) {
+diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
+index b60106d34346..8212ed80da48 100644
+--- a/net/ipv4/igmp.c
++++ b/net/ipv4/igmp.c
+@@ -338,7 +338,7 @@ static __be32 igmpv3_get_srcaddr(struct net_device *dev,
+ 		return htonl(INADDR_ANY);
+ 
+ 	for_ifa(in_dev) {
+-		if (inet_ifa_match(fl4->saddr, ifa))
++		if (fl4->saddr == ifa->ifa_local)
+ 			return fl4->saddr;
+ 	} endfor_ifa(in_dev);
+ 
+diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
+index 6e3e0e8b1ce3..4cfcc22f7430 100644
+--- a/net/ipv4/netfilter/arp_tables.c
++++ b/net/ipv4/netfilter/arp_tables.c
+@@ -367,23 +367,12 @@ static inline bool unconditional(const struct arpt_entry *e)
+ 	       memcmp(&e->arp, &uncond, sizeof(uncond)) == 0;
+ }
+ 
+-static bool find_jump_target(const struct xt_table_info *t,
+-			     const struct arpt_entry *target)
+-{
+-	struct arpt_entry *iter;
+-
+-	xt_entry_foreach(iter, t->entries, t->size) {
+-		 if (iter == target)
+-			return true;
+-	}
+-	return false;
+-}
+-
+ /* Figures out from what hook each rule can be called: returns 0 if
+  * there are loops.  Puts hook bitmask in comefrom.
+  */
+ static int mark_source_chains(const struct xt_table_info *newinfo,
+-			      unsigned int valid_hooks, void *entry0)
++			      unsigned int valid_hooks, void *entry0,
++			      unsigned int *offsets)
+ {
+ 	unsigned int hook;
+ 
+@@ -472,10 +461,11 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
+ 					/* This a jump; chase it. */
+ 					duprintf("Jump rule %u -> %u\n",
+ 						 pos, newpos);
++					if (!xt_find_jump_offset(offsets, newpos,
++								 newinfo->number))
++						return 0;
+ 					e = (struct arpt_entry *)
+ 						(entry0 + newpos);
+-					if (!find_jump_target(newinfo, e))
+-						return 0;
+ 				} else {
+ 					/* ... this is a fallthru */
+ 					newpos = pos + e->next_offset;
+@@ -521,11 +511,13 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size)
+ {
+ 	struct xt_entry_target *t;
+ 	struct xt_target *target;
++	unsigned long pcnt;
+ 	int ret;
+ 
+-	e->counters.pcnt = xt_percpu_counter_alloc();
+-	if (IS_ERR_VALUE(e->counters.pcnt))
++	pcnt = xt_percpu_counter_alloc();
++	if (IS_ERR_VALUE(pcnt))
+ 		return -ENOMEM;
++	e->counters.pcnt = pcnt;
+ 
+ 	t = arpt_get_target(e);
+ 	target = xt_request_find_target(NFPROTO_ARP, t->u.user.name,
+@@ -642,6 +634,7 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
+ 			   const struct arpt_replace *repl)
+ {
+ 	struct arpt_entry *iter;
++	unsigned int *offsets;
+ 	unsigned int i;
+ 	int ret = 0;
+ 
+@@ -655,6 +648,9 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
+ 	}
+ 
+ 	duprintf("translate_table: size %u\n", newinfo->size);
++	offsets = xt_alloc_entry_offsets(newinfo->number);
++	if (!offsets)
++		return -ENOMEM;
+ 	i = 0;
+ 
+ 	/* Walk through entries, checking offsets. */
+@@ -665,7 +661,9 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
+ 						 repl->underflow,
+ 						 repl->valid_hooks);
+ 		if (ret != 0)
+-			break;
++			goto out_free;
++		if (i < repl->num_entries)
++			offsets[i] = (void *)iter - entry0;
+ 		++i;
+ 		if (strcmp(arpt_get_target(iter)->u.user.name,
+ 		    XT_ERROR_TARGET) == 0)
+@@ -673,12 +671,13 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
+ 	}
+ 	duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret);
+ 	if (ret != 0)
+-		return ret;
++		goto out_free;
+ 
++	ret = -EINVAL;
+ 	if (i != repl->num_entries) {
+ 		duprintf("translate_table: %u not %u entries\n",
+ 			 i, repl->num_entries);
+-		return -EINVAL;
++		goto out_free;
+ 	}
+ 
+ 	/* Check hooks all assigned */
+@@ -689,17 +688,20 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
+ 		if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
+ 			duprintf("Invalid hook entry %u %u\n",
+ 				 i, repl->hook_entry[i]);
+-			return -EINVAL;
++			goto out_free;
+ 		}
+ 		if (newinfo->underflow[i] == 0xFFFFFFFF) {
+ 			duprintf("Invalid underflow %u %u\n",
+ 				 i, repl->underflow[i]);
+-			return -EINVAL;
++			goto out_free;
+ 		}
+ 	}
+ 
+-	if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
+-		return -ELOOP;
++	if (!mark_source_chains(newinfo, repl->valid_hooks, entry0, offsets)) {
++		ret = -ELOOP;
++		goto out_free;
++	}
++	kvfree(offsets);
+ 
+ 	/* Finally, each sanity check must pass */
+ 	i = 0;
+@@ -719,6 +721,9 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
+ 		return ret;
+ 	}
+ 
++	return ret;
++ out_free:
++	kvfree(offsets);
+ 	return ret;
+ }
+ 
+@@ -1336,8 +1341,8 @@ static int translate_compat_table(struct xt_table_info **pinfo,
+ 
+ 	newinfo->number = compatr->num_entries;
+ 	for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
+-		newinfo->hook_entry[i] = info->hook_entry[i];
+-		newinfo->underflow[i] = info->underflow[i];
++		newinfo->hook_entry[i] = compatr->hook_entry[i];
++		newinfo->underflow[i] = compatr->underflow[i];
+ 	}
+ 	entry1 = newinfo->entries;
+ 	pos = entry1;
+diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
+index a399c5419622..a98173d1ea97 100644
+--- a/net/ipv4/netfilter/ip_tables.c
++++ b/net/ipv4/netfilter/ip_tables.c
+@@ -443,23 +443,12 @@ ipt_do_table(struct sk_buff *skb,
+ #endif
+ }
+ 
+-static bool find_jump_target(const struct xt_table_info *t,
+-			     const struct ipt_entry *target)
+-{
+-	struct ipt_entry *iter;
+-
+-	xt_entry_foreach(iter, t->entries, t->size) {
+-		 if (iter == target)
+-			return true;
+-	}
+-	return false;
+-}
+-
+ /* Figures out from what hook each rule can be called: returns 0 if
+    there are loops.  Puts hook bitmask in comefrom. */
+ static int
+ mark_source_chains(const struct xt_table_info *newinfo,
+-		   unsigned int valid_hooks, void *entry0)
++		   unsigned int valid_hooks, void *entry0,
++		   unsigned int *offsets)
+ {
+ 	unsigned int hook;
+ 
+@@ -552,10 +541,11 @@ mark_source_chains(const struct xt_table_info *newinfo,
+ 					/* This a jump; chase it. */
+ 					duprintf("Jump rule %u -> %u\n",
+ 						 pos, newpos);
++					if (!xt_find_jump_offset(offsets, newpos,
++								 newinfo->number))
++						return 0;
+ 					e = (struct ipt_entry *)
+ 						(entry0 + newpos);
+-					if (!find_jump_target(newinfo, e))
+-						return 0;
+ 				} else {
+ 					/* ... this is a fallthru */
+ 					newpos = pos + e->next_offset;
+@@ -663,10 +653,12 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
+ 	unsigned int j;
+ 	struct xt_mtchk_param mtpar;
+ 	struct xt_entry_match *ematch;
++	unsigned long pcnt;
+ 
+-	e->counters.pcnt = xt_percpu_counter_alloc();
+-	if (IS_ERR_VALUE(e->counters.pcnt))
++	pcnt = xt_percpu_counter_alloc();
++	if (IS_ERR_VALUE(pcnt))
+ 		return -ENOMEM;
++	e->counters.pcnt = pcnt;
+ 
+ 	j = 0;
+ 	mtpar.net	= net;
+@@ -811,6 +803,7 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
+ 		const struct ipt_replace *repl)
+ {
+ 	struct ipt_entry *iter;
++	unsigned int *offsets;
+ 	unsigned int i;
+ 	int ret = 0;
+ 
+@@ -824,6 +817,9 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
+ 	}
+ 
+ 	duprintf("translate_table: size %u\n", newinfo->size);
++	offsets = xt_alloc_entry_offsets(newinfo->number);
++	if (!offsets)
++		return -ENOMEM;
+ 	i = 0;
+ 	/* Walk through entries, checking offsets. */
+ 	xt_entry_foreach(iter, entry0, newinfo->size) {
+@@ -833,17 +829,20 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
+ 						 repl->underflow,
+ 						 repl->valid_hooks);
+ 		if (ret != 0)
+-			return ret;
++			goto out_free;
++		if (i < repl->num_entries)
++			offsets[i] = (void *)iter - entry0;
+ 		++i;
+ 		if (strcmp(ipt_get_target(iter)->u.user.name,
+ 		    XT_ERROR_TARGET) == 0)
+ 			++newinfo->stacksize;
+ 	}
+ 
++	ret = -EINVAL;
+ 	if (i != repl->num_entries) {
+ 		duprintf("translate_table: %u not %u entries\n",
+ 			 i, repl->num_entries);
+-		return -EINVAL;
++		goto out_free;
+ 	}
+ 
+ 	/* Check hooks all assigned */
+@@ -854,17 +853,20 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
+ 		if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
+ 			duprintf("Invalid hook entry %u %u\n",
+ 				 i, repl->hook_entry[i]);
+-			return -EINVAL;
++			goto out_free;
+ 		}
+ 		if (newinfo->underflow[i] == 0xFFFFFFFF) {
+ 			duprintf("Invalid underflow %u %u\n",
+ 				 i, repl->underflow[i]);
+-			return -EINVAL;
++			goto out_free;
+ 		}
+ 	}
+ 
+-	if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
+-		return -ELOOP;
++	if (!mark_source_chains(newinfo, repl->valid_hooks, entry0, offsets)) {
++		ret = -ELOOP;
++		goto out_free;
++	}
++	kvfree(offsets);
+ 
+ 	/* Finally, each sanity check must pass */
+ 	i = 0;
+@@ -884,6 +886,9 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
+ 		return ret;
+ 	}
+ 
++	return ret;
++ out_free:
++	kvfree(offsets);
+ 	return ret;
+ }
+ 
+diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
+index c747b2d9eb77..d4acf38b60fd 100644
+--- a/net/ipv4/netfilter/nf_reject_ipv4.c
++++ b/net/ipv4/netfilter/nf_reject_ipv4.c
+@@ -124,6 +124,8 @@ void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook)
+ 	/* ip_route_me_harder expects skb->dst to be set */
+ 	skb_dst_set_noref(nskb, skb_dst(oldskb));
+ 
++	nskb->mark = IP4_REPLY_MARK(net, oldskb->mark);
++
+ 	skb_reserve(nskb, LL_MAX_HEADER);
+ 	niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
+ 				   ip4_dst_hoplimit(skb_dst(nskb)));
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 5597120c8ffd..37e8966a457b 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2176,6 +2176,9 @@ adjudge_to_death:
+ 			tcp_send_active_reset(sk, GFP_ATOMIC);
+ 			NET_INC_STATS_BH(sock_net(sk),
+ 					LINUX_MIB_TCPABORTONMEMORY);
++		} else if (!check_net(sock_net(sk))) {
++			/* Not possible to send reset; just close */
++			tcp_set_state(sk, TCP_CLOSE);
+ 		}
+ 	}
+ 
+diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
+index 1ec12a4f327e..35f638cfc675 100644
+--- a/net/ipv4/tcp_timer.c
++++ b/net/ipv4/tcp_timer.c
+@@ -46,11 +46,19 @@ static void tcp_write_err(struct sock *sk)
+  * to prevent DoS attacks. It is called when a retransmission timeout
+  * or zero probe timeout occurs on orphaned socket.
+  *
++ * Also close if our net namespace is exiting; in that case there is no
++ * hope of ever communicating again since all netns interfaces are already
++ * down (or about to be down), and we need to release our dst references,
++ * which have been moved to the netns loopback interface, so the namespace
++ * can finish exiting.  This condition is only possible if we are a kernel
++ * socket, as those do not hold references to the namespace.
++ *
+  * Criteria is still not confirmed experimentally and may change.
+  * We kill the socket, if:
+  * 1. If number of orphaned sockets exceeds an administratively configured
+  *    limit.
+  * 2. If we have strong memory pressure.
++ * 3. If our net namespace is exiting.
+  */
+ static int tcp_out_of_resources(struct sock *sk, bool do_reset)
+ {
+@@ -79,6 +87,13 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset)
+ 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
+ 		return 1;
+ 	}
++
++	if (!check_net(sock_net(sk))) {
++		/* Not possible to send reset; just close */
++		tcp_done(sk);
++		return 1;
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index b809958f7388..3ef81c387923 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -148,7 +148,7 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ 			    !(IP6CB(skb)->flags & IP6SKB_REROUTED));
+ }
+ 
+-static bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np)
++bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np)
+ {
+ 	if (!np->autoflowlabel_set)
+ 		return ip6_default_np_autolabel(net);
+@@ -1246,14 +1246,16 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
+ 	v6_cork->tclass = tclass;
+ 	if (rt->dst.flags & DST_XFRM_TUNNEL)
+ 		mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
+-		      rt->dst.dev->mtu : dst_mtu(&rt->dst);
++		      READ_ONCE(rt->dst.dev->mtu) : dst_mtu(&rt->dst);
+ 	else
+ 		mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
+-		      rt->dst.dev->mtu : dst_mtu(rt->dst.path);
++		      READ_ONCE(rt->dst.dev->mtu) : dst_mtu(rt->dst.path);
+ 	if (np->frag_size < mtu) {
+ 		if (np->frag_size)
+ 			mtu = np->frag_size;
+ 	}
++	if (mtu < IPV6_MIN_MTU)
++		return -EINVAL;
+ 	cork->base.fragsize = mtu;
+ 	if (dst_allfrag(rt->dst.path))
+ 		cork->base.flags |= IPCORK_ALLFRAG;
+@@ -1783,6 +1785,7 @@ struct sk_buff *ip6_make_skb(struct sock *sk,
+ 	cork.base.flags = 0;
+ 	cork.base.addr = 0;
+ 	cork.base.opt = NULL;
++	cork.base.dst = NULL;
+ 	v6_cork.opt = NULL;
+ 	err = ip6_setup_cork(sk, &cork, &v6_cork, hlimit, tclass, opt, rt, fl6);
+ 	if (err) {
+diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
+index 435e26210587..9011176c8387 100644
+--- a/net/ipv6/ipv6_sockglue.c
++++ b/net/ipv6/ipv6_sockglue.c
+@@ -1313,7 +1313,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
+ 		break;
+ 
+ 	case IPV6_AUTOFLOWLABEL:
+-		val = np->autoflowlabel;
++		val = ip6_autoflowlabel(sock_net(sk), np);
+ 		break;
+ 
+ 	default:
+diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
+index 22f39e00bef3..bb1b5453a7a1 100644
+--- a/net/ipv6/netfilter/ip6_tables.c
++++ b/net/ipv6/netfilter/ip6_tables.c
+@@ -455,23 +455,12 @@ ip6t_do_table(struct sk_buff *skb,
+ #endif
+ }
+ 
+-static bool find_jump_target(const struct xt_table_info *t,
+-			     const struct ip6t_entry *target)
+-{
+-	struct ip6t_entry *iter;
+-
+-	xt_entry_foreach(iter, t->entries, t->size) {
+-		 if (iter == target)
+-			return true;
+-	}
+-	return false;
+-}
+-
+ /* Figures out from what hook each rule can be called: returns 0 if
+    there are loops.  Puts hook bitmask in comefrom. */
+ static int
+ mark_source_chains(const struct xt_table_info *newinfo,
+-		   unsigned int valid_hooks, void *entry0)
++		   unsigned int valid_hooks, void *entry0,
++		   unsigned int *offsets)
+ {
+ 	unsigned int hook;
+ 
+@@ -564,10 +553,11 @@ mark_source_chains(const struct xt_table_info *newinfo,
+ 					/* This a jump; chase it. */
+ 					duprintf("Jump rule %u -> %u\n",
+ 						 pos, newpos);
++					if (!xt_find_jump_offset(offsets, newpos,
++								 newinfo->number))
++						return 0;
+ 					e = (struct ip6t_entry *)
+ 						(entry0 + newpos);
+-					if (!find_jump_target(newinfo, e))
+-						return 0;
+ 				} else {
+ 					/* ... this is a fallthru */
+ 					newpos = pos + e->next_offset;
+@@ -676,10 +666,12 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
+ 	unsigned int j;
+ 	struct xt_mtchk_param mtpar;
+ 	struct xt_entry_match *ematch;
++	unsigned long pcnt;
+ 
+-	e->counters.pcnt = xt_percpu_counter_alloc();
+-	if (IS_ERR_VALUE(e->counters.pcnt))
++	pcnt = xt_percpu_counter_alloc();
++	if (IS_ERR_VALUE(pcnt))
+ 		return -ENOMEM;
++	e->counters.pcnt = pcnt;
+ 
+ 	j = 0;
+ 	mtpar.net	= net;
+@@ -823,6 +815,7 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
+ 		const struct ip6t_replace *repl)
+ {
+ 	struct ip6t_entry *iter;
++	unsigned int *offsets;
+ 	unsigned int i;
+ 	int ret = 0;
+ 
+@@ -836,6 +829,9 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
+ 	}
+ 
+ 	duprintf("translate_table: size %u\n", newinfo->size);
++	offsets = xt_alloc_entry_offsets(newinfo->number);
++	if (!offsets)
++		return -ENOMEM;
+ 	i = 0;
+ 	/* Walk through entries, checking offsets. */
+ 	xt_entry_foreach(iter, entry0, newinfo->size) {
+@@ -845,17 +841,20 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
+ 						 repl->underflow,
+ 						 repl->valid_hooks);
+ 		if (ret != 0)
+-			return ret;
++			goto out_free;
++		if (i < repl->num_entries)
++			offsets[i] = (void *)iter - entry0;
+ 		++i;
+ 		if (strcmp(ip6t_get_target(iter)->u.user.name,
+ 		    XT_ERROR_TARGET) == 0)
+ 			++newinfo->stacksize;
+ 	}
+ 
++	ret = -EINVAL;
+ 	if (i != repl->num_entries) {
+ 		duprintf("translate_table: %u not %u entries\n",
+ 			 i, repl->num_entries);
+-		return -EINVAL;
++		goto out_free;
+ 	}
+ 
+ 	/* Check hooks all assigned */
+@@ -866,17 +865,20 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
+ 		if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
+ 			duprintf("Invalid hook entry %u %u\n",
+ 				 i, repl->hook_entry[i]);
+-			return -EINVAL;
++			goto out_free;
+ 		}
+ 		if (newinfo->underflow[i] == 0xFFFFFFFF) {
+ 			duprintf("Invalid underflow %u %u\n",
+ 				 i, repl->underflow[i]);
+-			return -EINVAL;
++			goto out_free;
+ 		}
+ 	}
+ 
+-	if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
+-		return -ELOOP;
++	if (!mark_source_chains(newinfo, repl->valid_hooks, entry0, offsets)) {
++		ret = -ELOOP;
++		goto out_free;
++	}
++	kvfree(offsets);
+ 
+ 	/* Finally, each sanity check must pass */
+ 	i = 0;
+@@ -896,6 +898,9 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
+ 		return ret;
+ 	}
+ 
++	return ret;
++ out_free:
++	kvfree(offsets);
+ 	return ret;
+ }
+ 
+diff --git a/net/ipv6/netfilter/nf_dup_ipv6.c b/net/ipv6/netfilter/nf_dup_ipv6.c
+index 6989c70ae29f..4a84b5ad9ecb 100644
+--- a/net/ipv6/netfilter/nf_dup_ipv6.c
++++ b/net/ipv6/netfilter/nf_dup_ipv6.c
+@@ -33,6 +33,7 @@ static bool nf_dup_ipv6_route(struct net *net, struct sk_buff *skb,
+ 	fl6.daddr = *gw;
+ 	fl6.flowlabel = (__force __be32)(((iph->flow_lbl[0] & 0xF) << 16) |
+ 			(iph->flow_lbl[1] << 8) | iph->flow_lbl[2]);
++	fl6.flowi6_flags = FLOWI_FLAG_KNOWN_NH;
+ 	dst = ip6_route_output(net, NULL, &fl6);
+ 	if (dst->error) {
+ 		dst_release(dst);
+diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
+index e0f922b777e3..7117e5bef412 100644
+--- a/net/ipv6/netfilter/nf_reject_ipv6.c
++++ b/net/ipv6/netfilter/nf_reject_ipv6.c
+@@ -157,6 +157,7 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
+ 	fl6.daddr = oip6h->saddr;
+ 	fl6.fl6_sport = otcph->dest;
+ 	fl6.fl6_dport = otcph->source;
++	fl6.flowi6_mark = IP6_REPLY_MARK(net, oldskb->mark);
+ 	security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
+ 	dst = ip6_route_output(net, NULL, &fl6);
+ 	if (dst == NULL || dst->error) {
+@@ -180,6 +181,8 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
+ 
+ 	skb_dst_set(nskb, dst);
+ 
++	nskb->mark = fl6.flowi6_mark;
++
+ 	skb_reserve(nskb, hh_len + dst->header_len);
+ 	ip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
+ 				    ip6_dst_hoplimit(dst));
+diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
+index 86a3c6f0c871..5f747089024f 100644
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -719,6 +719,7 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
+ 	 * least once for the stats anyway.
+ 	 */
+ 	rcu_read_lock_bh();
++ begin:
+ 	hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
+ 		ct = nf_ct_tuplehash_to_ctrack(h);
+ 		if (ct != ignored_conntrack &&
+@@ -730,6 +731,12 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
+ 		}
+ 		NF_CT_STAT_INC(net, searched);
+ 	}
++
++	if (get_nulls_value(n) != hash) {
++		NF_CT_STAT_INC(net, search_restart);
++		goto begin;
++	}
++
+ 	rcu_read_unlock_bh();
+ 
+ 	return 0;
+diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
+index 7f16d19d6198..a91f8bd51d05 100644
+--- a/net/netfilter/nf_conntrack_expect.c
++++ b/net/netfilter/nf_conntrack_expect.c
+@@ -560,7 +560,7 @@ static int exp_seq_show(struct seq_file *s, void *v)
+ 	helper = rcu_dereference(nfct_help(expect->master)->helper);
+ 	if (helper) {
+ 		seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
+-		if (helper->expect_policy[expect->class].name)
++		if (helper->expect_policy[expect->class].name[0])
+ 			seq_printf(s, "/%s",
+ 				   helper->expect_policy[expect->class].name);
+ 	}
+diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
+index 885b4aba3695..1665c2159e4b 100644
+--- a/net/netfilter/nf_conntrack_sip.c
++++ b/net/netfilter/nf_conntrack_sip.c
+@@ -1434,9 +1434,12 @@ static int process_sip_request(struct sk_buff *skb, unsigned int protoff,
+ 		handler = &sip_handlers[i];
+ 		if (handler->request == NULL)
+ 			continue;
+-		if (*datalen < handler->len ||
++		if (*datalen < handler->len + 2 ||
+ 		    strncasecmp(*dptr, handler->method, handler->len))
+ 			continue;
++		if ((*dptr)[handler->len] != ' ' ||
++		    !isalpha((*dptr)[handler->len+1]))
++			continue;
+ 
+ 		if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CSEQ,
+ 				      &matchoff, &matchlen) <= 0) {
+diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
+index 8d34a488efc0..ac143ae4f7b6 100644
+--- a/net/netfilter/nfnetlink_cthelper.c
++++ b/net/netfilter/nfnetlink_cthelper.c
+@@ -17,6 +17,7 @@
+ #include <linux/types.h>
+ #include <linux/list.h>
+ #include <linux/errno.h>
++#include <linux/capability.h>
+ #include <net/netlink.h>
+ #include <net/sock.h>
+ 
+@@ -392,6 +393,9 @@ nfnl_cthelper_new(struct sock *nfnl, struct sk_buff *skb,
+ 	struct nfnl_cthelper *nlcth;
+ 	int ret = 0;
+ 
++	if (!capable(CAP_NET_ADMIN))
++		return -EPERM;
++
+ 	if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE])
+ 		return -EINVAL;
+ 
+@@ -595,6 +599,9 @@ nfnl_cthelper_get(struct sock *nfnl, struct sk_buff *skb,
+ 	struct nfnl_cthelper *nlcth;
+ 	bool tuple_set = false;
+ 
++	if (!capable(CAP_NET_ADMIN))
++		return -EPERM;
++
+ 	if (nlh->nlmsg_flags & NLM_F_DUMP) {
+ 		struct netlink_dump_control c = {
+ 			.dump = nfnl_cthelper_dump_table,
+@@ -661,6 +668,9 @@ nfnl_cthelper_del(struct sock *nfnl, struct sk_buff *skb,
+ 	struct nfnl_cthelper *nlcth, *n;
+ 	int j = 0, ret;
+ 
++	if (!capable(CAP_NET_ADMIN))
++		return -EPERM;
++
+ 	if (tb[NFCTH_NAME])
+ 		helper_name = nla_data(tb[NFCTH_NAME]);
+ 
+diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
+index f6837f9b6d6c..c14d2e8eaec3 100644
+--- a/net/netfilter/nfnetlink_queue.c
++++ b/net/netfilter/nfnetlink_queue.c
+@@ -1053,10 +1053,8 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
+ 	struct net *net = sock_net(ctnl);
+ 	struct nfnl_queue_net *q = nfnl_queue_pernet(net);
+ 
+-	queue = instance_lookup(q, queue_num);
+-	if (!queue)
+-		queue = verdict_instance_lookup(q, queue_num,
+-						NETLINK_CB(skb).portid);
++	queue = verdict_instance_lookup(q, queue_num,
++					NETLINK_CB(skb).portid);
+ 	if (IS_ERR(queue))
+ 		return PTR_ERR(queue);
+ 
+diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
+index 2fc6ca9d1286..7b42b0ad3f9b 100644
+--- a/net/netfilter/x_tables.c
++++ b/net/netfilter/x_tables.c
+@@ -701,6 +701,56 @@ int xt_check_entry_offsets(const void *base,
+ }
+ EXPORT_SYMBOL(xt_check_entry_offsets);
+ 
++/**
++ * xt_alloc_entry_offsets - allocate array to store rule head offsets
++ *
++ * @size: number of entries
++ *
++ * Return: NULL or kmalloc'd or vmalloc'd array
++ */
++unsigned int *xt_alloc_entry_offsets(unsigned int size)
++{
++	unsigned int *off;
++
++	off = kcalloc(size, sizeof(unsigned int), GFP_KERNEL | __GFP_NOWARN);
++
++	if (off)
++		return off;
++
++	if (size < (SIZE_MAX / sizeof(unsigned int)))
++		off = vmalloc(size * sizeof(unsigned int));
++
++	return off;
++}
++EXPORT_SYMBOL(xt_alloc_entry_offsets);
++
++/**
++ * xt_find_jump_offset - check if target is a valid jump offset
++ *
++ * @offsets: array containing all valid rule start offsets of a rule blob
++ * @target: the jump target to search for
++ * @size: entries in @offset
++ */
++bool xt_find_jump_offset(const unsigned int *offsets,
++			 unsigned int target, unsigned int size)
++{
++	int m, low = 0, hi = size;
++
++	while (hi > low) {
++		m = (low + hi) / 2u;
++
++		if (offsets[m] > target)
++			hi = m;
++		else if (offsets[m] < target)
++			low = m + 1;
++		else
++			return true;
++	}
++
++	return false;
++}
++EXPORT_SYMBOL(xt_find_jump_offset);
++
+ int xt_check_target(struct xt_tgchk_param *par,
+ 		    unsigned int size, u_int8_t proto, bool inv_proto)
+ {
+diff --git a/net/netfilter/xt_osf.c b/net/netfilter/xt_osf.c
+index df8801e02a32..7eae0d0af89a 100644
+--- a/net/netfilter/xt_osf.c
++++ b/net/netfilter/xt_osf.c
+@@ -19,6 +19,7 @@
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+ 
++#include <linux/capability.h>
+ #include <linux/if.h>
+ #include <linux/inetdevice.h>
+ #include <linux/ip.h>
+@@ -69,6 +70,9 @@ static int xt_osf_add_callback(struct sock *ctnl, struct sk_buff *skb,
+ 	struct xt_osf_finger *kf = NULL, *sf;
+ 	int err = 0;
+ 
++	if (!capable(CAP_NET_ADMIN))
++		return -EPERM;
++
+ 	if (!osf_attrs[OSF_ATTR_FINGER])
+ 		return -EINVAL;
+ 
+@@ -112,6 +116,9 @@ static int xt_osf_remove_callback(struct sock *ctnl, struct sk_buff *skb,
+ 	struct xt_osf_finger *sf;
+ 	int err = -ENOENT;
+ 
++	if (!capable(CAP_NET_ADMIN))
++		return -EPERM;
++
+ 	if (!osf_attrs[OSF_ATTR_FINGER])
+ 		return -EINVAL;
+ 
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index a870d27ca778..e9851198a850 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -83,7 +83,7 @@
+ static int sctp_writeable(struct sock *sk);
+ static void sctp_wfree(struct sk_buff *skb);
+ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
+-				size_t msg_len, struct sock **orig_sk);
++				size_t msg_len);
+ static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p);
+ static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p);
+ static int sctp_wait_for_accept(struct sock *sk, long timeo);
+@@ -332,16 +332,14 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
+ 	if (len < sizeof (struct sockaddr))
+ 		return NULL;
+ 
++	if (!opt->pf->af_supported(addr->sa.sa_family, opt))
++		return NULL;
++
+ 	/* V4 mapped address are really of AF_INET family */
+ 	if (addr->sa.sa_family == AF_INET6 &&
+-	    ipv6_addr_v4mapped(&addr->v6.sin6_addr)) {
+-		if (!opt->pf->af_supported(AF_INET, opt))
+-			return NULL;
+-	} else {
+-		/* Does this PF support this AF? */
+-		if (!opt->pf->af_supported(addr->sa.sa_family, opt))
+-			return NULL;
+-	}
++	    ipv6_addr_v4mapped(&addr->v6.sin6_addr) &&
++	    !opt->pf->af_supported(AF_INET, opt))
++		return NULL;
+ 
+ 	/* If we get this far, af is valid. */
+ 	af = sctp_get_af_specific(addr->sa.sa_family);
+@@ -1954,7 +1952,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
+ 	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
+ 	if (!sctp_wspace(asoc)) {
+ 		/* sk can be changed by peel off when waiting for buf. */
+-		err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len, &sk);
++		err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
+ 		if (err) {
+ 			if (err == -ESRCH) {
+ 				/* asoc is already dead. */
+@@ -6976,12 +6974,12 @@ void sctp_sock_rfree(struct sk_buff *skb)
+ 
+ /* Helper function to wait for space in the sndbuf.  */
+ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
+-				size_t msg_len, struct sock **orig_sk)
++				size_t msg_len)
+ {
+ 	struct sock *sk = asoc->base.sk;
+-	int err = 0;
+ 	long current_timeo = *timeo_p;
+ 	DEFINE_WAIT(wait);
++	int err = 0;
+ 
+ 	pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc,
+ 		 *timeo_p, msg_len);
+@@ -7010,17 +7008,13 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
+ 		release_sock(sk);
+ 		current_timeo = schedule_timeout(current_timeo);
+ 		lock_sock(sk);
+-		if (sk != asoc->base.sk) {
+-			release_sock(sk);
+-			sk = asoc->base.sk;
+-			lock_sock(sk);
+-		}
++		if (sk != asoc->base.sk)
++			goto do_error;
+ 
+ 		*timeo_p = current_timeo;
+ 	}
+ 
+ out:
+-	*orig_sk = sk;
+ 	finish_wait(&asoc->wait, &wait);
+ 
+ 	/* Release the association's refcnt.  */
+diff --git a/tools/usb/usbip/libsrc/usbip_common.c b/tools/usb/usbip/libsrc/usbip_common.c
+index ac73710473de..8000445ff884 100644
+--- a/tools/usb/usbip/libsrc/usbip_common.c
++++ b/tools/usb/usbip/libsrc/usbip_common.c
+@@ -215,9 +215,16 @@ int read_usb_interface(struct usbip_usb_device *udev, int i,
+ 		       struct usbip_usb_interface *uinf)
+ {
+ 	char busid[SYSFS_BUS_ID_SIZE];
++	int size;
+ 	struct udev_device *sif;
+ 
+-	sprintf(busid, "%s:%d.%d", udev->busid, udev->bConfigurationValue, i);
++	size = snprintf(busid, sizeof(busid), "%s:%d.%d",
++			udev->busid, udev->bConfigurationValue, i);
++	if (size < 0 || (unsigned int)size >= sizeof(busid)) {
++		err("busid length %i >= %lu or < 0", size,
++		    (unsigned long)sizeof(busid));
++		return -1;
++	}
+ 
+ 	sif = udev_device_new_from_subsystem_sysname(udev_context, "usb", busid);
+ 	if (!sif) {
+diff --git a/tools/usb/usbip/libsrc/usbip_host_driver.c b/tools/usb/usbip/libsrc/usbip_host_driver.c
+index bef08d5c44e8..071b9ce99420 100644
+--- a/tools/usb/usbip/libsrc/usbip_host_driver.c
++++ b/tools/usb/usbip/libsrc/usbip_host_driver.c
+@@ -39,13 +39,19 @@ struct udev *udev_context;
+ static int32_t read_attr_usbip_status(struct usbip_usb_device *udev)
+ {
+ 	char status_attr_path[SYSFS_PATH_MAX];
++	int size;
+ 	int fd;
+ 	int length;
+ 	char status;
+ 	int value = 0;
+ 
+-	snprintf(status_attr_path, SYSFS_PATH_MAX, "%s/usbip_status",
+-		 udev->path);
++	size = snprintf(status_attr_path, SYSFS_PATH_MAX, "%s/usbip_status",
++			udev->path);
++	if (size < 0 || (unsigned int)size >= sizeof(status_attr_path)) {
++		err("usbip_status path length %i >= %lu or < 0", size,
++		    (unsigned long)sizeof(status_attr_path));
++		return -1;
++	}
+ 
+ 	fd = open(status_attr_path, O_RDONLY);
+ 	if (fd < 0) {
+@@ -225,6 +231,7 @@ int usbip_host_export_device(struct usbip_exported_device *edev, int sockfd)
+ {
+ 	char attr_name[] = "usbip_sockfd";
+ 	char sockfd_attr_path[SYSFS_PATH_MAX];
++	int size;
+ 	char sockfd_buff[30];
+ 	int ret;
+ 
+@@ -244,10 +251,20 @@ int usbip_host_export_device(struct usbip_exported_device *edev, int sockfd)
+ 	}
+ 
+ 	/* only the first interface is true */
+-	snprintf(sockfd_attr_path, sizeof(sockfd_attr_path), "%s/%s",
+-		 edev->udev.path, attr_name);
++	size = snprintf(sockfd_attr_path, sizeof(sockfd_attr_path), "%s/%s",
++			edev->udev.path, attr_name);
++	if (size < 0 || (unsigned int)size >= sizeof(sockfd_attr_path)) {
++		err("exported device path length %i >= %lu or < 0", size,
++		    (unsigned long)sizeof(sockfd_attr_path));
++		return -1;
++	}
+ 
+-	snprintf(sockfd_buff, sizeof(sockfd_buff), "%d\n", sockfd);
++	size = snprintf(sockfd_buff, sizeof(sockfd_buff), "%d\n", sockfd);
++	if (size < 0 || (unsigned int)size >= sizeof(sockfd_buff)) {
++		err("socket length %i >= %lu or < 0", size,
++		    (unsigned long)sizeof(sockfd_buff));
++		return -1;
++	}
+ 
+ 	ret = write_sysfs_attribute(sockfd_attr_path, sockfd_buff,
+ 				    strlen(sockfd_buff));
+diff --git a/tools/usb/usbip/libsrc/vhci_driver.c b/tools/usb/usbip/libsrc/vhci_driver.c
+index ad9204773533..1274f326242c 100644
+--- a/tools/usb/usbip/libsrc/vhci_driver.c
++++ b/tools/usb/usbip/libsrc/vhci_driver.c
+@@ -55,12 +55,12 @@ static int parse_status(const char *value)
+ 
+ 	while (*c != '\0') {
+ 		int port, status, speed, devid;
+-		unsigned long socket;
++		int sockfd;
+ 		char lbusid[SYSFS_BUS_ID_SIZE];
+ 
+-		ret = sscanf(c, "%d %d %d %x %lx %31s\n",
++		ret = sscanf(c, "%d %d %d %x %u %31s\n",
+ 				&port, &status, &speed,
+-				&devid, &socket, lbusid);
++				&devid, &sockfd, lbusid);
+ 
+ 		if (ret < 5) {
+ 			dbg("sscanf failed: %d", ret);
+@@ -69,7 +69,7 @@ static int parse_status(const char *value)
+ 
+ 		dbg("port %d status %d speed %d devid %x",
+ 				port, status, speed, devid);
+-		dbg("socket %lx lbusid %s", socket, lbusid);
++		dbg("sockfd %u lbusid %s", sockfd, lbusid);
+ 
+ 
+ 		/* if a device is connected, look at it */
+diff --git a/tools/usb/usbip/src/usbip.c b/tools/usb/usbip/src/usbip.c
+index d7599d943529..73d8eee8130b 100644
+--- a/tools/usb/usbip/src/usbip.c
++++ b/tools/usb/usbip/src/usbip.c
+@@ -176,6 +176,8 @@ int main(int argc, char *argv[])
+ 			break;
+ 		case '?':
+ 			printf("usbip: invalid option\n");
++			/* Terminate after printing error */
++			/* FALLTHRU */
+ 		default:
+ 			usbip_usage();
+ 			goto out;


             reply	other threads:[~2018-01-31 13:36 UTC|newest]

Thread overview: 355+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-01-31 13:36 Alice Ferrazzi [this message]
  -- strict thread matches above, loose matches on Subject: below --
2022-02-03 11:46 [gentoo-commits] proj/linux-patches:4.4 commit in: / Mike Pagano
2022-01-29 17:47 Mike Pagano
2022-01-27 11:42 Mike Pagano
2022-01-11 12:57 Mike Pagano
2022-01-05 12:57 Mike Pagano
2021-12-29 13:13 Mike Pagano
2021-12-22 14:09 Mike Pagano
2021-12-14 10:38 Mike Pagano
2021-12-08 12:58 Mike Pagano
2021-11-26 12:02 Mike Pagano
2021-11-12 13:39 Mike Pagano
2021-11-02 17:07 Mike Pagano
2021-10-27 12:01 Mike Pagano
2021-10-17 13:15 Mike Pagano
2021-10-09 21:36 Mike Pagano
2021-10-07 10:37 Mike Pagano
2021-10-06 11:33 Mike Pagano
2021-09-26 14:16 Mike Pagano
2021-09-22 11:43 Mike Pagano
2021-09-20 22:07 Mike Pagano
2021-09-03 11:26 Mike Pagano
2021-08-26 14:02 Mike Pagano
2021-08-25 23:20 Mike Pagano
2021-08-15 20:12 Mike Pagano
2021-08-10 16:22 Mike Pagano
2021-08-08 13:47 Mike Pagano
2021-08-04 11:56 Mike Pagano
2021-08-03 12:51 Mike Pagano
2021-07-28 12:39 Mike Pagano
2021-07-20 15:17 Alice Ferrazzi
2021-07-11 14:48 Mike Pagano
2021-06-30 14:29 Mike Pagano
2021-06-17 11:05 Alice Ferrazzi
2021-06-10 11:09 Mike Pagano
2021-06-03 10:43 Alice Ferrazzi
2021-05-26 11:59 Mike Pagano
2021-05-22 10:00 Mike Pagano
2021-04-28 11:08 Alice Ferrazzi
2021-04-16 11:20 Alice Ferrazzi
2021-04-10 13:21 Mike Pagano
2021-04-07 12:10 Mike Pagano
2021-03-30 14:13 Mike Pagano
2021-03-24 12:06 Mike Pagano
2021-03-17 15:39 Mike Pagano
2021-03-11 13:34 Mike Pagano
2021-03-07 15:12 Mike Pagano
2021-03-03 16:34 Alice Ferrazzi
2021-02-23 13:46 Mike Pagano
2021-02-10 10:17 Alice Ferrazzi
2021-02-05 14:57 Alice Ferrazzi
2021-02-03 23:23 Mike Pagano
2021-01-30 13:11 Alice Ferrazzi
2021-01-23 16:33 Mike Pagano
2021-01-17 16:23 Mike Pagano
2021-01-12 20:08 Mike Pagano
2021-01-09 12:53 Mike Pagano
2020-12-29 14:16 Mike Pagano
2020-12-11 12:54 Mike Pagano
2020-12-02 12:17 Mike Pagano
2020-11-24 13:29 Mike Pagano
2020-11-22 19:08 Mike Pagano
2020-11-18 19:21 Mike Pagano
2020-11-11 15:27 Mike Pagano
2020-11-10 13:53 Mike Pagano
2020-10-29 11:14 Mike Pagano
2020-10-17 10:13 Mike Pagano
2020-10-14 20:30 Mike Pagano
2020-10-01 11:41 Mike Pagano
2020-10-01 11:24 Mike Pagano
2020-09-24 16:04 Mike Pagano
2020-09-23 11:51 Mike Pagano
2020-09-23 11:50 Mike Pagano
2020-09-12 17:08 Mike Pagano
2020-09-03 11:32 Mike Pagano
2020-08-26 11:12 Mike Pagano
2020-08-21 11:11 Alice Ferrazzi
2020-07-31 16:10 Mike Pagano
2020-07-22 12:24 Mike Pagano
2020-07-09 12:05 Mike Pagano
2020-07-01 12:09 Mike Pagano
2020-06-22 14:43 Mike Pagano
2020-06-11 11:25 Mike Pagano
2020-06-03 11:35 Mike Pagano
2020-05-27 15:26 Mike Pagano
2020-05-20 11:20 Mike Pagano
2020-05-13 13:01 Mike Pagano
2020-05-11 22:52 Mike Pagano
2020-05-05 17:37 Mike Pagano
2020-05-02 19:20 Mike Pagano
2020-04-24 11:59 Mike Pagano
2020-04-15 18:24 Mike Pagano
2020-04-13 11:14 Mike Pagano
2020-04-02 18:55 Mike Pagano
2020-03-20 11:53 Mike Pagano
2020-03-20 11:51 Mike Pagano
2020-03-20 11:49 Mike Pagano
2020-03-11 10:14 Mike Pagano
2020-02-28 15:24 Mike Pagano
2020-02-14 23:34 Mike Pagano
2020-02-05 14:47 Mike Pagano
2020-01-29 12:36 Mike Pagano
2020-01-23 11:00 Mike Pagano
2020-01-14 22:24 Mike Pagano
2020-01-12 14:48 Mike Pagano
2020-01-04 16:46 Mike Pagano
2019-12-21 14:51 Mike Pagano
2019-12-05 14:47 Alice Ferrazzi
2019-11-29 21:41 Thomas Deutschmann
2019-11-28 23:49 Mike Pagano
2019-11-25 16:25 Mike Pagano
2019-11-16 10:54 Mike Pagano
2019-11-12 20:57 Mike Pagano
2019-11-10 16:13 Mike Pagano
2019-11-06 14:22 Mike Pagano
2019-10-29 10:08 Mike Pagano
2019-10-17 22:18 Mike Pagano
2019-10-07 21:03 Mike Pagano
2019-10-05 20:43 Mike Pagano
2019-09-21 15:56 Mike Pagano
2019-09-20 15:50 Mike Pagano
2019-09-16 12:21 Mike Pagano
2019-09-10 11:10 Mike Pagano
2019-09-06 17:17 Mike Pagano
2019-08-25 17:33 Mike Pagano
2019-08-11 10:58 Mike Pagano
2019-08-06 19:14 Mike Pagano
2019-08-04 16:03 Mike Pagano
2019-07-21 14:36 Mike Pagano
2019-07-10 11:01 Mike Pagano
2019-06-27 11:11 Mike Pagano
2019-06-22 19:01 Mike Pagano
2019-06-17 19:18 Mike Pagano
2019-06-11 17:30 Mike Pagano
2019-06-11 12:38 Mike Pagano
2019-05-16 23:01 Mike Pagano
2019-04-27 17:28 Mike Pagano
2019-04-03 10:49 Mike Pagano
2019-04-03 10:49 Mike Pagano
2019-03-23 14:17 Mike Pagano
2019-02-23 14:40 Mike Pagano
2019-02-20 11:14 Mike Pagano
2019-02-15 23:38 Mike Pagano
2019-02-15 23:35 Mike Pagano
2019-02-08 15:21 Mike Pagano
2019-02-06 20:51 Mike Pagano
2019-02-06  0:05 Mike Pagano
2019-01-26 14:59 Mike Pagano
2019-01-16 23:27 Mike Pagano
2019-01-13 19:46 Mike Pagano
2019-01-13 19:24 Mike Pagano
2018-12-29 22:56 Mike Pagano
2018-12-21 14:40 Mike Pagano
2018-12-17 21:56 Mike Pagano
2018-12-13 11:35 Mike Pagano
2018-12-01 18:35 Mike Pagano
2018-12-01 15:02 Mike Pagano
2018-11-27 16:59 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 12:18 Mike Pagano
2018-11-10 21:27 Mike Pagano
2018-10-20 12:33 Mike Pagano
2018-10-13 16:35 Mike Pagano
2018-10-10 11:20 Mike Pagano
2018-09-29 13:32 Mike Pagano
2018-09-26 10:44 Mike Pagano
2018-09-19 22:37 Mike Pagano
2018-09-15 10:09 Mike Pagano
2018-09-09 23:26 Mike Pagano
2018-09-05 15:21 Mike Pagano
2018-08-28 22:32 Mike Pagano
2018-08-24 11:41 Mike Pagano
2018-08-22 10:08 Alice Ferrazzi
2018-08-18 18:06 Mike Pagano
2018-08-17 19:24 Mike Pagano
2018-08-15 16:44 Mike Pagano
2018-08-09 10:49 Mike Pagano
2018-08-07 18:14 Mike Pagano
2018-07-28 10:37 Mike Pagano
2018-07-22 15:15 Mike Pagano
2018-07-19 15:27 Mike Pagano
2018-07-17 10:24 Mike Pagano
2018-07-12 16:21 Alice Ferrazzi
2018-07-04 14:26 Mike Pagano
2018-06-16 15:41 Mike Pagano
2018-06-13 14:54 Mike Pagano
2018-06-06 18:00 Mike Pagano
2018-05-30 22:35 Mike Pagano
2018-05-30 11:38 Mike Pagano
2018-05-26 13:43 Mike Pagano
2018-05-16 10:22 Mike Pagano
2018-05-02 16:11 Mike Pagano
2018-04-29 11:48 Mike Pagano
2018-04-24 11:28 Mike Pagano
2018-04-13 22:20 Mike Pagano
2018-04-08 14:25 Mike Pagano
2018-03-31 23:00 Mike Pagano
2018-03-31 22:16 Mike Pagano
2018-03-25 13:42 Mike Pagano
2018-03-22 12:54 Mike Pagano
2018-03-11 18:25 Mike Pagano
2018-03-05  2:52 Alice Ferrazzi
2018-02-28 15:05 Alice Ferrazzi
2018-02-25 15:46 Mike Pagano
2018-02-22 23:20 Mike Pagano
2018-02-17 15:10 Alice Ferrazzi
2018-02-03 21:23 Mike Pagano
2018-01-23 21:15 Mike Pagano
2018-01-17 10:20 Alice Ferrazzi
2018-01-17  9:18 Alice Ferrazzi
2018-01-15 15:01 Alice Ferrazzi
2018-01-10 11:56 Mike Pagano
2018-01-10 11:48 Mike Pagano
2018-01-05 15:59 Alice Ferrazzi
2018-01-05 15:05 Alice Ferrazzi
2018-01-02 20:12 Mike Pagano
2017-12-25 14:41 Alice Ferrazzi
2017-12-20 12:45 Mike Pagano
2017-12-16 11:46 Alice Ferrazzi
2017-12-09 18:50 Alice Ferrazzi
2017-12-05 11:39 Mike Pagano
2017-11-30 12:25 Alice Ferrazzi
2017-11-24 10:49 Alice Ferrazzi
2017-11-24  9:46 Alice Ferrazzi
2017-11-21  8:40 Alice Ferrazzi
2017-11-18 18:12 Mike Pagano
2017-11-15 16:44 Alice Ferrazzi
2017-11-08 13:50 Mike Pagano
2017-11-02 10:02 Mike Pagano
2017-10-27 10:33 Mike Pagano
2017-10-21 20:13 Mike Pagano
2017-10-18 13:44 Mike Pagano
2017-10-12 12:22 Mike Pagano
2017-10-08 14:25 Mike Pagano
2017-10-05 11:39 Mike Pagano
2017-09-27 10:38 Mike Pagano
2017-09-14 13:37 Mike Pagano
2017-09-13 22:26 Mike Pagano
2017-09-13 14:33 Mike Pagano
2017-09-07 22:42 Mike Pagano
2017-09-02 17:14 Mike Pagano
2017-08-30 10:08 Mike Pagano
2017-08-25 10:53 Mike Pagano
2017-08-16 22:30 Mike Pagano
2017-08-13 16:52 Mike Pagano
2017-08-11 17:44 Mike Pagano
2017-08-07 10:25 Mike Pagano
2017-05-14 13:32 Mike Pagano
2017-05-08 10:40 Mike Pagano
2017-05-03 17:41 Mike Pagano
2017-04-30 18:08 Mike Pagano
2017-04-30 17:59 Mike Pagano
2017-04-27  8:18 Alice Ferrazzi
2017-04-22 17:00 Mike Pagano
2017-04-18 10:21 Mike Pagano
2017-04-12 17:59 Mike Pagano
2017-04-08 13:56 Mike Pagano
2017-03-31 10:43 Mike Pagano
2017-03-30 18:16 Mike Pagano
2017-03-26 11:53 Mike Pagano
2017-03-22 12:28 Mike Pagano
2017-03-18 14:32 Mike Pagano
2017-03-15 14:39 Mike Pagano
2017-03-12 12:17 Mike Pagano
2017-03-02 16:29 Mike Pagano
2017-03-02 16:29 Mike Pagano
2017-02-26 20:45 Mike Pagano
2017-02-24  0:38 Mike Pagano
2017-02-23 20:12 Mike Pagano
2017-02-18 16:27 Alice Ferrazzi
2017-02-15 16:22 Alice Ferrazzi
2017-02-09  8:05 Alice Ferrazzi
2017-02-04 13:47 Alice Ferrazzi
2017-02-01 12:59 Alice Ferrazzi
2017-01-26  8:24 Alice Ferrazzi
2017-01-20 12:45 Alice Ferrazzi
2017-01-15 22:57 Mike Pagano
2017-01-14 14:46 Mike Pagano
2017-01-12 12:11 Mike Pagano
2017-01-09 12:46 Mike Pagano
2017-01-06 23:13 Mike Pagano
2016-12-15 23:41 Mike Pagano
2016-12-11 15:02 Alice Ferrazzi
2016-12-09 13:57 Alice Ferrazzi
2016-12-08  0:03 Mike Pagano
2016-12-02 16:21 Mike Pagano
2016-11-26 18:51 Mike Pagano
2016-11-26 18:40 Mike Pagano
2016-11-22  0:14 Mike Pagano
2016-11-19 11:03 Mike Pagano
2016-11-15 10:05 Alice Ferrazzi
2016-11-10 18:13 Alice Ferrazzi
2016-11-01  3:14 Alice Ferrazzi
2016-10-31 14:09 Alice Ferrazzi
2016-10-28 18:27 Alice Ferrazzi
2016-10-22 13:05 Mike Pagano
2016-10-21 11:10 Mike Pagano
2016-10-16 19:25 Mike Pagano
2016-10-08 19:55 Mike Pagano
2016-09-30 19:07 Mike Pagano
2016-09-24 10:51 Mike Pagano
2016-09-16 19:10 Mike Pagano
2016-09-15 13:58 Mike Pagano
2016-09-09 19:20 Mike Pagano
2016-08-20 16:31 Mike Pagano
2016-08-17 11:48 Mike Pagano
2016-08-10 12:56 Mike Pagano
2016-07-27 19:19 Mike Pagano
2016-07-11 19:59 Mike Pagano
2016-07-02 15:30 Mike Pagano
2016-07-01  0:55 Mike Pagano
2016-06-24 20:40 Mike Pagano
2016-06-08 13:38 Mike Pagano
2016-06-02 18:24 Mike Pagano
2016-05-19 13:00 Mike Pagano
2016-05-12  0:14 Mike Pagano
2016-05-04 23:51 Mike Pagano
2016-04-20 11:27 Mike Pagano
2016-04-12 18:59 Mike Pagano
2016-03-22 22:47 Mike Pagano
2016-03-16 19:43 Mike Pagano
2016-03-10  0:51 Mike Pagano
2016-03-04 11:15 Mike Pagano
2016-02-26  0:02 Mike Pagano
2016-02-19 23:33 Mike Pagano
2016-02-18  0:20 Mike Pagano
2016-02-01  0:19 Mike Pagano
2016-02-01  0:13 Mike Pagano
2016-01-31 23:33 Mike Pagano
2016-01-20 12:38 Mike Pagano
2016-01-10 17:19 Mike Pagano

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1517405726.e87c7949f55854a73016f66184bfc84bc3830824.alicef@gentoo \
    --to=alicef@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox