public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
Search results ordered by [date|relevance]  view[summary|nested|Atom feed]
thread overview below | download mbox.gz: |
* [gentoo-commits] proj/linux-patches:4.4 commit in: /
@ 2018-04-29 11:48 99% Mike Pagano
  0 siblings, 0 replies; 1+ results
From: Mike Pagano @ 2018-04-29 11:48 UTC (permalink / raw
  To: gentoo-commits

commit:     8df3794a0a9edcd8fce5a3d06d0fc8d1908dbd09
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Apr 29 11:47:36 2018 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Apr 29 11:47:36 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=8df3794a

Linux patch 4.4.130

 0000_README              |    4 +
 1129_linux-4.4.130.patch | 2794 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2798 insertions(+)

diff --git a/0000_README b/0000_README
index c2625c8..33599fb 100644
--- a/0000_README
+++ b/0000_README
@@ -559,6 +559,10 @@ Patch:  1128_linux-4.4.129.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.4.129
 
+Patch:  1129_linux-4.4.130.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.4.130
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1129_linux-4.4.130.patch b/1129_linux-4.4.130.patch
new file mode 100644
index 0000000..e29e411
--- /dev/null
+++ b/1129_linux-4.4.130.patch
@@ -0,0 +1,2794 @@
+diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
+index f53ef1ac3122..4df6bd7d01ed 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -2402,6 +2402,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+ 
+ 	noalign		[KNL,ARM]
+ 
++	noaltinstr	[S390] Disables alternative instructions patching
++			(CPU alternatives feature).
++
+ 	noapic		[SMP,APIC] Tells the kernel to not make use of any
+ 			IOAPICs that may be present in the system.
+ 
+diff --git a/Makefile b/Makefile
+index 096d7e867b6c..151477d4d5e5 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 4
+-SUBLEVEL = 129
++SUBLEVEL = 130
+ EXTRAVERSION =
+ NAME = Blurry Fish Butt
+ 
+diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
+index 2ee95ece0498..9bdaeb38a768 100644
+--- a/arch/s390/Kconfig
++++ b/arch/s390/Kconfig
+@@ -111,6 +111,7 @@ config S390
+ 	select GENERIC_CLOCKEVENTS
+ 	select GENERIC_CPU_AUTOPROBE
+ 	select GENERIC_CPU_DEVICES if !SMP
++	select GENERIC_CPU_VULNERABILITIES
+ 	select GENERIC_FIND_FIRST_BIT
+ 	select GENERIC_SMP_IDLE_THREAD
+ 	select GENERIC_TIME_VSYSCALL
+@@ -705,6 +706,51 @@ config SECCOMP
+ 
+ 	  If unsure, say Y.
+ 
++config KERNEL_NOBP
++	def_bool n
++	prompt "Enable modified branch prediction for the kernel by default"
++	help
++	  If this option is selected the kernel will switch to a modified
++	  branch prediction mode if the firmware interface is available.
++	  The modified branch prediction mode improves the behaviour in
++	  regard to speculative execution.
++
++	  With the option enabled the kernel parameter "nobp=0" or "nospec"
++	  can be used to run the kernel in the normal branch prediction mode.
++
++	  With the option disabled the modified branch prediction mode is
++	  enabled with the "nobp=1" kernel parameter.
++
++	  If unsure, say N.
++
++config EXPOLINE
++	def_bool n
++	prompt "Avoid speculative indirect branches in the kernel"
++	help
++	  Compile the kernel with the expoline compiler options to guard
++	  against kernel-to-user data leaks by avoiding speculative indirect
++	  branches.
++	  Requires a compiler with -mindirect-branch=thunk support for full
++	  protection. The kernel may run slower.
++
++	  If unsure, say N.
++
++choice
++	prompt "Expoline default"
++	depends on EXPOLINE
++	default EXPOLINE_FULL
++
++config EXPOLINE_OFF
++	bool "spectre_v2=off"
++
++config EXPOLINE_AUTO
++	bool "spectre_v2=auto"
++
++config EXPOLINE_FULL
++	bool "spectre_v2=on"
++
++endchoice
++
+ endmenu
+ 
+ menu "Power Management"
+@@ -754,6 +800,7 @@ config PFAULT
+ config SHARED_KERNEL
+ 	bool "VM shared kernel support"
+ 	depends on !JUMP_LABEL
++	depends on !ALTERNATIVES
+ 	help
+ 	  Select this option, if you want to share the text segment of the
+ 	  Linux kernel between different VM guests. This reduces memory
+diff --git a/arch/s390/Makefile b/arch/s390/Makefile
+index e8d4423e4f85..d924f9b6dc73 100644
+--- a/arch/s390/Makefile
++++ b/arch/s390/Makefile
+@@ -77,6 +77,16 @@ ifeq ($(call cc-option-yn,-mwarn-dynamicstack),y)
+ cflags-$(CONFIG_WARN_DYNAMIC_STACK) += -mwarn-dynamicstack
+ endif
+ 
++ifdef CONFIG_EXPOLINE
++  ifeq ($(call cc-option-yn,$(CC_FLAGS_MARCH) -mindirect-branch=thunk),y)
++    CC_FLAGS_EXPOLINE := -mindirect-branch=thunk
++    CC_FLAGS_EXPOLINE += -mfunction-return=thunk
++    CC_FLAGS_EXPOLINE += -mindirect-branch-table
++    export CC_FLAGS_EXPOLINE
++    cflags-y += $(CC_FLAGS_EXPOLINE) -DCC_USING_EXPOLINE
++  endif
++endif
++
+ ifdef CONFIG_FUNCTION_TRACER
+ # make use of hotpatch feature if the compiler supports it
+ cc_hotpatch	:= -mhotpatch=0,3
+diff --git a/arch/s390/include/asm/alternative.h b/arch/s390/include/asm/alternative.h
+new file mode 100644
+index 000000000000..a72002056b54
+--- /dev/null
++++ b/arch/s390/include/asm/alternative.h
+@@ -0,0 +1,149 @@
++#ifndef _ASM_S390_ALTERNATIVE_H
++#define _ASM_S390_ALTERNATIVE_H
++
++#ifndef __ASSEMBLY__
++
++#include <linux/types.h>
++#include <linux/stddef.h>
++#include <linux/stringify.h>
++
++struct alt_instr {
++	s32 instr_offset;	/* original instruction */
++	s32 repl_offset;	/* offset to replacement instruction */
++	u16 facility;		/* facility bit set for replacement */
++	u8  instrlen;		/* length of original instruction */
++	u8  replacementlen;	/* length of new instruction */
++} __packed;
++
++void apply_alternative_instructions(void);
++void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
++
++/*
++ * |661:       |662:	  |6620      |663:
++ * +-----------+---------------------+
++ * | oldinstr  | oldinstr_padding    |
++ * |	       +----------+----------+
++ * |	       |	  |	     |
++ * |	       | >6 bytes |6/4/2 nops|
++ * |	       |6 bytes jg----------->
++ * +-----------+---------------------+
++ *		 ^^ static padding ^^
++ *
++ * .altinstr_replacement section
++ * +---------------------+-----------+
++ * |6641:			     |6651:
++ * | alternative instr 1	     |
++ * +-----------+---------+- - - - - -+
++ * |6642:		 |6652:      |
++ * | alternative instr 2 | padding
++ * +---------------------+- - - - - -+
++ *			  ^ runtime ^
++ *
++ * .altinstructions section
++ * +---------------------------------+
++ * | alt_instr entries for each      |
++ * | alternative instr		     |
++ * +---------------------------------+
++ */
++
++#define b_altinstr(num)	"664"#num
++#define e_altinstr(num)	"665"#num
++
++#define e_oldinstr_pad_end	"663"
++#define oldinstr_len		"662b-661b"
++#define oldinstr_total_len	e_oldinstr_pad_end"b-661b"
++#define altinstr_len(num)	e_altinstr(num)"b-"b_altinstr(num)"b"
++#define oldinstr_pad_len(num) \
++	"-(((" altinstr_len(num) ")-(" oldinstr_len ")) > 0) * " \
++	"((" altinstr_len(num) ")-(" oldinstr_len "))"
++
++#define INSTR_LEN_SANITY_CHECK(len)					\
++	".if " len " > 254\n"						\
++	"\t.error \"cpu alternatives does not support instructions "	\
++		"blocks > 254 bytes\"\n"				\
++	".endif\n"							\
++	".if (" len ") %% 2\n"						\
++	"\t.error \"cpu alternatives instructions length is odd\"\n"	\
++	".endif\n"
++
++#define OLDINSTR_PADDING(oldinstr, num)					\
++	".if " oldinstr_pad_len(num) " > 6\n"				\
++	"\tjg " e_oldinstr_pad_end "f\n"				\
++	"6620:\n"							\
++	"\t.fill (" oldinstr_pad_len(num) " - (6620b-662b)) / 2, 2, 0x0700\n" \
++	".else\n"							\
++	"\t.fill " oldinstr_pad_len(num) " / 6, 6, 0xc0040000\n"	\
++	"\t.fill " oldinstr_pad_len(num) " %% 6 / 4, 4, 0x47000000\n"	\
++	"\t.fill " oldinstr_pad_len(num) " %% 6 %% 4 / 2, 2, 0x0700\n"	\
++	".endif\n"
++
++#define OLDINSTR(oldinstr, num)						\
++	"661:\n\t" oldinstr "\n662:\n"					\
++	OLDINSTR_PADDING(oldinstr, num)					\
++	e_oldinstr_pad_end ":\n"					\
++	INSTR_LEN_SANITY_CHECK(oldinstr_len)
++
++#define OLDINSTR_2(oldinstr, num1, num2)				\
++	"661:\n\t" oldinstr "\n662:\n"					\
++	".if " altinstr_len(num1) " < " altinstr_len(num2) "\n"		\
++	OLDINSTR_PADDING(oldinstr, num2)				\
++	".else\n"							\
++	OLDINSTR_PADDING(oldinstr, num1)				\
++	".endif\n"							\
++	e_oldinstr_pad_end ":\n"					\
++	INSTR_LEN_SANITY_CHECK(oldinstr_len)
++
++#define ALTINSTR_ENTRY(facility, num)					\
++	"\t.long 661b - .\n"			/* old instruction */	\
++	"\t.long " b_altinstr(num)"b - .\n"	/* alt instruction */	\
++	"\t.word " __stringify(facility) "\n"	/* facility bit    */	\
++	"\t.byte " oldinstr_total_len "\n"	/* source len	   */	\
++	"\t.byte " altinstr_len(num) "\n"	/* alt instruction len */
++
++#define ALTINSTR_REPLACEMENT(altinstr, num)	/* replacement */	\
++	b_altinstr(num)":\n\t" altinstr "\n" e_altinstr(num) ":\n"	\
++	INSTR_LEN_SANITY_CHECK(altinstr_len(num))
++
++/* alternative assembly primitive: */
++#define ALTERNATIVE(oldinstr, altinstr, facility) \
++	".pushsection .altinstr_replacement, \"ax\"\n"			\
++	ALTINSTR_REPLACEMENT(altinstr, 1)				\
++	".popsection\n"							\
++	OLDINSTR(oldinstr, 1)						\
++	".pushsection .altinstructions,\"a\"\n"				\
++	ALTINSTR_ENTRY(facility, 1)					\
++	".popsection\n"
++
++#define ALTERNATIVE_2(oldinstr, altinstr1, facility1, altinstr2, facility2)\
++	".pushsection .altinstr_replacement, \"ax\"\n"			\
++	ALTINSTR_REPLACEMENT(altinstr1, 1)				\
++	ALTINSTR_REPLACEMENT(altinstr2, 2)				\
++	".popsection\n"							\
++	OLDINSTR_2(oldinstr, 1, 2)					\
++	".pushsection .altinstructions,\"a\"\n"				\
++	ALTINSTR_ENTRY(facility1, 1)					\
++	ALTINSTR_ENTRY(facility2, 2)					\
++	".popsection\n"
++
++/*
++ * Alternative instructions for different CPU types or capabilities.
++ *
++ * This allows to use optimized instructions even on generic binary
++ * kernels.
++ *
++ * oldinstr is padded with jump and nops at compile time if altinstr is
++ * longer. altinstr is padded with jump and nops at run-time during patching.
++ *
++ * For non barrier like inlines please define new variants
++ * without volatile and memory clobber.
++ */
++#define alternative(oldinstr, altinstr, facility)			\
++	asm volatile(ALTERNATIVE(oldinstr, altinstr, facility) : : : "memory")
++
++#define alternative_2(oldinstr, altinstr1, facility1, altinstr2, facility2) \
++	asm volatile(ALTERNATIVE_2(oldinstr, altinstr1, facility1,	    \
++				   altinstr2, facility2) ::: "memory")
++
++#endif /* __ASSEMBLY__ */
++
++#endif /* _ASM_S390_ALTERNATIVE_H */
+diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
+index d68e11e0df5e..e903b28e7358 100644
+--- a/arch/s390/include/asm/barrier.h
++++ b/arch/s390/include/asm/barrier.h
+@@ -53,4 +53,28 @@ do {									\
+ 	___p1;								\
+ })
+ 
++/**
++ * array_index_mask_nospec - generate a mask for array_idx() that is
++ * ~0UL when the bounds check succeeds and 0 otherwise
++ * @index: array element index
++ * @size: number of elements in array
++ */
++#define array_index_mask_nospec array_index_mask_nospec
++static inline unsigned long array_index_mask_nospec(unsigned long index,
++						    unsigned long size)
++{
++	unsigned long mask;
++
++	if (__builtin_constant_p(size) && size > 0) {
++		asm("	clgr	%2,%1\n"
++		    "	slbgr	%0,%0\n"
++		    :"=d" (mask) : "d" (size-1), "d" (index) :"cc");
++		return mask;
++	}
++	asm("	clgr	%1,%2\n"
++	    "	slbgr	%0,%0\n"
++	    :"=d" (mask) : "d" (size), "d" (index) :"cc");
++	return ~mask;
++}
++
+ #endif /* __ASM_BARRIER_H */
+diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h
+index 0aa6a7ed95a3..155fcc7bcba6 100644
+--- a/arch/s390/include/asm/facility.h
++++ b/arch/s390/include/asm/facility.h
+@@ -13,6 +13,24 @@
+ 
+ #define MAX_FACILITY_BIT (256*8)	/* stfle_fac_list has 256 bytes */
+ 
++static inline void __set_facility(unsigned long nr, void *facilities)
++{
++	unsigned char *ptr = (unsigned char *) facilities;
++
++	if (nr >= MAX_FACILITY_BIT)
++		return;
++	ptr[nr >> 3] |= 0x80 >> (nr & 7);
++}
++
++static inline void __clear_facility(unsigned long nr, void *facilities)
++{
++	unsigned char *ptr = (unsigned char *) facilities;
++
++	if (nr >= MAX_FACILITY_BIT)
++		return;
++	ptr[nr >> 3] &= ~(0x80 >> (nr & 7));
++}
++
+ static inline int __test_facility(unsigned long nr, void *facilities)
+ {
+ 	unsigned char *ptr;
+diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
+index e9a983f40a24..7d9c5917da2b 100644
+--- a/arch/s390/include/asm/kvm_host.h
++++ b/arch/s390/include/asm/kvm_host.h
+@@ -136,7 +136,8 @@ struct kvm_s390_sie_block {
+ 	__u16	ipa;			/* 0x0056 */
+ 	__u32	ipb;			/* 0x0058 */
+ 	__u32	scaoh;			/* 0x005c */
+-	__u8	reserved60;		/* 0x0060 */
++#define FPF_BPBC 	0x20
++	__u8	fpf;			/* 0x0060 */
+ 	__u8	ecb;			/* 0x0061 */
+ 	__u8    ecb2;                   /* 0x0062 */
+ #define ECB3_AES 0x04
+diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
+index afe1cfebf1a4..8520c23e419b 100644
+--- a/arch/s390/include/asm/lowcore.h
++++ b/arch/s390/include/asm/lowcore.h
+@@ -155,7 +155,9 @@ struct _lowcore {
+ 	/* Per cpu primary space access list */
+ 	__u32	paste[16];			/* 0x0400 */
+ 
+-	__u8	pad_0x04c0[0x0e00-0x0440];	/* 0x0440 */
++	/* br %r1 trampoline */
++	__u16	br_r1_trampoline;		/* 0x0440 */
++	__u8	pad_0x0442[0x0e00-0x0442];	/* 0x0442 */
+ 
+ 	/*
+ 	 * 0xe00 contains the address of the IPL Parameter Information
+@@ -170,7 +172,8 @@ struct _lowcore {
+ 	__u8	pad_0x0e20[0x0f00-0x0e20];	/* 0x0e20 */
+ 
+ 	/* Extended facility list */
+-	__u64	stfle_fac_list[32];		/* 0x0f00 */
++	__u64	stfle_fac_list[16];		/* 0x0f00 */
++	__u64	alt_stfle_fac_list[16];		/* 0x0f80 */
+ 	__u8	pad_0x1000[0x11b0-0x1000];	/* 0x1000 */
+ 
+ 	/* Pointer to vector register save area */
+diff --git a/arch/s390/include/asm/nospec-branch.h b/arch/s390/include/asm/nospec-branch.h
+new file mode 100644
+index 000000000000..b4bd8c41e9d3
+--- /dev/null
++++ b/arch/s390/include/asm/nospec-branch.h
+@@ -0,0 +1,17 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _ASM_S390_EXPOLINE_H
++#define _ASM_S390_EXPOLINE_H
++
++#ifndef __ASSEMBLY__
++
++#include <linux/types.h>
++
++extern int nospec_disable;
++
++void nospec_init_branches(void);
++void nospec_auto_detect(void);
++void nospec_revert(s32 *start, s32 *end);
++
++#endif /* __ASSEMBLY__ */
++
++#endif /* _ASM_S390_EXPOLINE_H */
+diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
+index c61ed7890cef..f915a0f1b0fc 100644
+--- a/arch/s390/include/asm/processor.h
++++ b/arch/s390/include/asm/processor.h
+@@ -69,6 +69,7 @@ extern void s390_adjust_jiffies(void);
+ extern const struct seq_operations cpuinfo_op;
+ extern int sysctl_ieee_emulation_warnings;
+ extern void execve_tail(void);
++extern void __bpon(void);
+ 
+ /*
+  * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
+@@ -315,6 +316,9 @@ extern void memcpy_absolute(void *, void *, size_t);
+ 	memcpy_absolute(&(dest), &__tmp, sizeof(__tmp));	\
+ }
+ 
++extern int s390_isolate_bp(void);
++extern int s390_isolate_bp_guest(void);
++
+ #endif /* __ASSEMBLY__ */
+ 
+ #endif /* __ASM_S390_PROCESSOR_H */
+diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
+index 692b9247c019..b2504163c8fa 100644
+--- a/arch/s390/include/asm/thread_info.h
++++ b/arch/s390/include/asm/thread_info.h
+@@ -78,6 +78,8 @@ void arch_release_task_struct(struct task_struct *tsk);
+ #define TIF_SECCOMP		5	/* secure computing */
+ #define TIF_SYSCALL_TRACEPOINT	6	/* syscall tracepoint instrumentation */
+ #define TIF_UPROBE		7	/* breakpointed or single-stepping */
++#define TIF_ISOLATE_BP		8	/* Run process with isolated BP */
++#define TIF_ISOLATE_BP_GUEST	9	/* Run KVM guests with isolated BP */
+ #define TIF_31BIT		16	/* 32bit process */
+ #define TIF_MEMDIE		17	/* is terminating due to OOM killer */
+ #define TIF_RESTORE_SIGMASK	18	/* restore signal mask in do_signal() */
+@@ -93,6 +95,8 @@ void arch_release_task_struct(struct task_struct *tsk);
+ #define _TIF_SECCOMP		_BITUL(TIF_SECCOMP)
+ #define _TIF_SYSCALL_TRACEPOINT	_BITUL(TIF_SYSCALL_TRACEPOINT)
+ #define _TIF_UPROBE		_BITUL(TIF_UPROBE)
++#define _TIF_ISOLATE_BP		_BITUL(TIF_ISOLATE_BP)
++#define _TIF_ISOLATE_BP_GUEST	_BITUL(TIF_ISOLATE_BP_GUEST)
+ #define _TIF_31BIT		_BITUL(TIF_31BIT)
+ #define _TIF_SINGLE_STEP	_BITUL(TIF_SINGLE_STEP)
+ 
+diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h
+index ef1a5fcc6c66..beb508a9e72c 100644
+--- a/arch/s390/include/uapi/asm/kvm.h
++++ b/arch/s390/include/uapi/asm/kvm.h
+@@ -151,6 +151,7 @@ struct kvm_guest_debug_arch {
+ #define KVM_SYNC_ARCH0  (1UL << 4)
+ #define KVM_SYNC_PFAULT (1UL << 5)
+ #define KVM_SYNC_VRS    (1UL << 6)
++#define KVM_SYNC_BPBC   (1UL << 10)
+ /* definition of registers in kvm_run */
+ struct kvm_sync_regs {
+ 	__u64 prefix;	/* prefix register */
+@@ -168,6 +169,8 @@ struct kvm_sync_regs {
+ 	__u64 vrs[32][2];	/* vector registers */
+ 	__u8  reserved[512];	/* for future vector expansion */
+ 	__u32 fpc;	/* only valid with vector registers */
++	__u8 bpbc : 1;		/* bp mode */
++	__u8 reserved2 : 7;
+ };
+ 
+ #define KVM_REG_S390_TODPR	(KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1)
+diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
+index dc167a23b920..8ccfbf22ecbb 100644
+--- a/arch/s390/kernel/Makefile
++++ b/arch/s390/kernel/Makefile
+@@ -44,10 +44,13 @@ obj-y	+= processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
+ obj-y	+= debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o
+ obj-y	+= sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
+ obj-y	+= runtime_instr.o cache.o dumpstack.o
+-obj-y	+= entry.o reipl.o relocate_kernel.o
++obj-y	+= entry.o reipl.o relocate_kernel.o alternative.o
++obj-y	+= nospec-branch.o
+ 
+ extra-y				+= head.o head64.o vmlinux.lds
+ 
++CFLAGS_REMOVE_nospec-branch.o	+= $(CC_FLAGS_EXPOLINE)
++
+ obj-$(CONFIG_MODULES)		+= s390_ksyms.o module.o
+ obj-$(CONFIG_SMP)		+= smp.o
+ obj-$(CONFIG_SCHED_BOOK)	+= topology.o
+diff --git a/arch/s390/kernel/alternative.c b/arch/s390/kernel/alternative.c
+new file mode 100644
+index 000000000000..b57b293998dc
+--- /dev/null
++++ b/arch/s390/kernel/alternative.c
+@@ -0,0 +1,112 @@
++#include <linux/module.h>
++#include <asm/alternative.h>
++#include <asm/facility.h>
++#include <asm/nospec-branch.h>
++
++#define MAX_PATCH_LEN (255 - 1)
++
++static int __initdata_or_module alt_instr_disabled;
++
++static int __init disable_alternative_instructions(char *str)
++{
++	alt_instr_disabled = 1;
++	return 0;
++}
++
++early_param("noaltinstr", disable_alternative_instructions);
++
++struct brcl_insn {
++	u16 opc;
++	s32 disp;
++} __packed;
++
++static u16 __initdata_or_module nop16 = 0x0700;
++static u32 __initdata_or_module nop32 = 0x47000000;
++static struct brcl_insn __initdata_or_module nop48 = {
++	0xc004, 0
++};
++
++static const void *nops[] __initdata_or_module = {
++	&nop16,
++	&nop32,
++	&nop48
++};
++
++static void __init_or_module add_jump_padding(void *insns, unsigned int len)
++{
++	struct brcl_insn brcl = {
++		0xc0f4,
++		len / 2
++	};
++
++	memcpy(insns, &brcl, sizeof(brcl));
++	insns += sizeof(brcl);
++	len -= sizeof(brcl);
++
++	while (len > 0) {
++		memcpy(insns, &nop16, 2);
++		insns += 2;
++		len -= 2;
++	}
++}
++
++static void __init_or_module add_padding(void *insns, unsigned int len)
++{
++	if (len > 6)
++		add_jump_padding(insns, len);
++	else if (len >= 2)
++		memcpy(insns, nops[len / 2 - 1], len);
++}
++
++static void __init_or_module __apply_alternatives(struct alt_instr *start,
++						  struct alt_instr *end)
++{
++	struct alt_instr *a;
++	u8 *instr, *replacement;
++	u8 insnbuf[MAX_PATCH_LEN];
++
++	/*
++	 * The scan order should be from start to end. A later scanned
++	 * alternative code can overwrite previously scanned alternative code.
++	 */
++	for (a = start; a < end; a++) {
++		int insnbuf_sz = 0;
++
++		instr = (u8 *)&a->instr_offset + a->instr_offset;
++		replacement = (u8 *)&a->repl_offset + a->repl_offset;
++
++		if (!__test_facility(a->facility,
++				     S390_lowcore.alt_stfle_fac_list))
++			continue;
++
++		if (unlikely(a->instrlen % 2 || a->replacementlen % 2)) {
++			WARN_ONCE(1, "cpu alternatives instructions length is "
++				     "odd, skipping patching\n");
++			continue;
++		}
++
++		memcpy(insnbuf, replacement, a->replacementlen);
++		insnbuf_sz = a->replacementlen;
++
++		if (a->instrlen > a->replacementlen) {
++			add_padding(insnbuf + a->replacementlen,
++				    a->instrlen - a->replacementlen);
++			insnbuf_sz += a->instrlen - a->replacementlen;
++		}
++
++		s390_kernel_write(instr, insnbuf, insnbuf_sz);
++	}
++}
++
++void __init_or_module apply_alternatives(struct alt_instr *start,
++					 struct alt_instr *end)
++{
++	if (!alt_instr_disabled)
++		__apply_alternatives(start, end);
++}
++
++extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
++void __init apply_alternative_instructions(void)
++{
++	apply_alternatives(__alt_instructions, __alt_instructions_end);
++}
+diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
+index ee7b8e7ca4f8..8eccead675d4 100644
+--- a/arch/s390/kernel/early.c
++++ b/arch/s390/kernel/early.c
+@@ -279,6 +279,11 @@ static noinline __init void setup_facility_list(void)
+ {
+ 	stfle(S390_lowcore.stfle_fac_list,
+ 	      ARRAY_SIZE(S390_lowcore.stfle_fac_list));
++	memcpy(S390_lowcore.alt_stfle_fac_list,
++	       S390_lowcore.stfle_fac_list,
++	       sizeof(S390_lowcore.alt_stfle_fac_list));
++	if (!IS_ENABLED(CONFIG_KERNEL_NOBP))
++		__clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+ }
+ 
+ static __init void detect_diag9c(void)
+diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
+index 4612ed7ec2e5..c63730326215 100644
+--- a/arch/s390/kernel/entry.S
++++ b/arch/s390/kernel/entry.S
+@@ -104,6 +104,7 @@ _PIF_WORK	= (_PIF_PER_TRAP)
+ 	j	3f
+ 1:	LAST_BREAK %r14
+ 	UPDATE_VTIME %r14,%r15,\timer
++	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
+ 2:	lg	%r15,__LC_ASYNC_STACK	# load async stack
+ 3:	la	%r11,STACK_FRAME_OVERHEAD(%r15)
+ 	.endm
+@@ -162,8 +163,137 @@ _PIF_WORK	= (_PIF_PER_TRAP)
+ 		tm	off+\addr, \mask
+ 	.endm
+ 
++	.macro BPOFF
++	.pushsection .altinstr_replacement, "ax"
++660:	.long	0xb2e8c000
++	.popsection
++661:	.long	0x47000000
++	.pushsection .altinstructions, "a"
++	.long 661b - .
++	.long 660b - .
++	.word 82
++	.byte 4
++	.byte 4
++	.popsection
++	.endm
++
++	.macro BPON
++	.pushsection .altinstr_replacement, "ax"
++662:	.long	0xb2e8d000
++	.popsection
++663:	.long	0x47000000
++	.pushsection .altinstructions, "a"
++	.long 663b - .
++	.long 662b - .
++	.word 82
++	.byte 4
++	.byte 4
++	.popsection
++	.endm
++
++	.macro BPENTER tif_ptr,tif_mask
++	.pushsection .altinstr_replacement, "ax"
++662:	.word	0xc004, 0x0000, 0x0000	# 6 byte nop
++	.word	0xc004, 0x0000, 0x0000	# 6 byte nop
++	.popsection
++664:	TSTMSK	\tif_ptr,\tif_mask
++	jz	. + 8
++	.long	0xb2e8d000
++	.pushsection .altinstructions, "a"
++	.long 664b - .
++	.long 662b - .
++	.word 82
++	.byte 12
++	.byte 12
++	.popsection
++	.endm
++
++	.macro BPEXIT tif_ptr,tif_mask
++	TSTMSK	\tif_ptr,\tif_mask
++	.pushsection .altinstr_replacement, "ax"
++662:	jnz	. + 8
++	.long	0xb2e8d000
++	.popsection
++664:	jz	. + 8
++	.long	0xb2e8c000
++	.pushsection .altinstructions, "a"
++	.long 664b - .
++	.long 662b - .
++	.word 82
++	.byte 8
++	.byte 8
++	.popsection
++	.endm
++
++#ifdef CONFIG_EXPOLINE
++
++	.macro GEN_BR_THUNK name,reg,tmp
++	.section .text.\name,"axG",@progbits,\name,comdat
++	.globl \name
++	.hidden \name
++	.type \name,@function
++\name:
++	.cfi_startproc
++#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
++	exrl	0,0f
++#else
++	larl	\tmp,0f
++	ex	0,0(\tmp)
++#endif
++	j	.
++0:	br	\reg
++	.cfi_endproc
++	.endm
++
++	GEN_BR_THUNK __s390x_indirect_jump_r1use_r9,%r9,%r1
++	GEN_BR_THUNK __s390x_indirect_jump_r1use_r14,%r14,%r1
++	GEN_BR_THUNK __s390x_indirect_jump_r11use_r14,%r14,%r11
++
++	.macro BASR_R14_R9
++0:	brasl	%r14,__s390x_indirect_jump_r1use_r9
++	.pushsection .s390_indirect_branches,"a",@progbits
++	.long	0b-.
++	.popsection
++	.endm
++
++	.macro BR_R1USE_R14
++0:	jg	__s390x_indirect_jump_r1use_r14
++	.pushsection .s390_indirect_branches,"a",@progbits
++	.long	0b-.
++	.popsection
++	.endm
++
++	.macro BR_R11USE_R14
++0:	jg	__s390x_indirect_jump_r11use_r14
++	.pushsection .s390_indirect_branches,"a",@progbits
++	.long	0b-.
++	.popsection
++	.endm
++
++#else	/* CONFIG_EXPOLINE */
++
++	.macro BASR_R14_R9
++	basr	%r14,%r9
++	.endm
++
++	.macro BR_R1USE_R14
++	br	%r14
++	.endm
++
++	.macro BR_R11USE_R14
++	br	%r14
++	.endm
++
++#endif /* CONFIG_EXPOLINE */
++
++
+ 	.section .kprobes.text, "ax"
+ 
++ENTRY(__bpon)
++	.globl __bpon
++	BPON
++	BR_R1USE_R14
++
+ /*
+  * Scheduler resume function, called by switch_to
+  *  gpr2 = (task_struct *) prev
+@@ -190,9 +320,9 @@ ENTRY(__switch_to)
+ 	mvc	__LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
+ 	lmg	%r6,%r15,__SF_GPRS(%r15)	# load gprs of next task
+ 	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
+-	bzr	%r14
++	jz	0f
+ 	.insn	s,0xb2800000,__LC_LPP		# set program parameter
+-	br	%r14
++0:	BR_R1USE_R14
+ 
+ .L__critical_start:
+ 
+@@ -204,9 +334,11 @@ ENTRY(__switch_to)
+  */
+ ENTRY(sie64a)
+ 	stmg	%r6,%r14,__SF_GPRS(%r15)	# save kernel registers
++	lg	%r12,__LC_CURRENT
+ 	stg	%r2,__SF_EMPTY(%r15)		# save control block pointer
+ 	stg	%r3,__SF_EMPTY+8(%r15)		# save guest register save area
+ 	xc	__SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # reason code = 0
++	mvc	__SF_EMPTY+24(8,%r15),__TI_flags(%r12) # copy thread flags
+ 	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU		# load guest fp/vx registers ?
+ 	jno	.Lsie_load_guest_gprs
+ 	brasl	%r14,load_fpu_regs		# load guest fp/vx regs
+@@ -223,7 +355,11 @@ ENTRY(sie64a)
+ 	jnz	.Lsie_skip
+ 	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
+ 	jo	.Lsie_skip			# exit if fp/vx regs changed
++	BPEXIT	__SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
+ 	sie	0(%r14)
++.Lsie_exit:
++	BPOFF
++	BPENTER	__SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
+ .Lsie_skip:
+ 	ni	__SIE_PROG0C+3(%r14),0xfe	# no longer in SIE
+ 	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
+@@ -244,9 +380,15 @@ ENTRY(sie64a)
+ sie_exit:
+ 	lg	%r14,__SF_EMPTY+8(%r15)		# load guest register save area
+ 	stmg	%r0,%r13,0(%r14)		# save guest gprs 0-13
++	xgr	%r0,%r0				# clear guest registers to
++	xgr	%r1,%r1				# prevent speculative use
++	xgr	%r2,%r2
++	xgr	%r3,%r3
++	xgr	%r4,%r4
++	xgr	%r5,%r5
+ 	lmg	%r6,%r14,__SF_GPRS(%r15)	# restore kernel registers
+ 	lg	%r2,__SF_EMPTY+16(%r15)		# return exit reason code
+-	br	%r14
++	BR_R1USE_R14
+ .Lsie_fault:
+ 	lghi	%r14,-EFAULT
+ 	stg	%r14,__SF_EMPTY+16(%r15)	# set exit reason code
+@@ -267,6 +409,7 @@ ENTRY(system_call)
+ 	stpt	__LC_SYNC_ENTER_TIMER
+ .Lsysc_stmg:
+ 	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
++	BPOFF
+ 	lg	%r10,__LC_LAST_BREAK
+ 	lg	%r12,__LC_THREAD_INFO
+ 	lghi	%r14,_PIF_SYSCALL
+@@ -276,12 +419,15 @@ ENTRY(system_call)
+ 	LAST_BREAK %r13
+ .Lsysc_vtime:
+ 	UPDATE_VTIME %r10,%r13,__LC_SYNC_ENTER_TIMER
++	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
+ 	stmg	%r0,%r7,__PT_R0(%r11)
+ 	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
+ 	mvc	__PT_PSW(16,%r11),__LC_SVC_OLD_PSW
+ 	mvc	__PT_INT_CODE(4,%r11),__LC_SVC_ILC
+ 	stg	%r14,__PT_FLAGS(%r11)
+ .Lsysc_do_svc:
++	# clear user controlled register to prevent speculative use
++	xgr	%r0,%r0
+ 	lg	%r10,__TI_sysc_table(%r12)	# address of system call table
+ 	llgh	%r8,__PT_INT_CODE+2(%r11)
+ 	slag	%r8,%r8,2			# shift and test for svc 0
+@@ -299,7 +445,7 @@ ENTRY(system_call)
+ 	lgf	%r9,0(%r8,%r10)			# get system call add.
+ 	TSTMSK	__TI_flags(%r12),_TIF_TRACE
+ 	jnz	.Lsysc_tracesys
+-	basr	%r14,%r9			# call sys_xxxx
++	BASR_R14_R9				# call sys_xxxx
+ 	stg	%r2,__PT_R2(%r11)		# store return value
+ 
+ .Lsysc_return:
+@@ -311,6 +457,7 @@ ENTRY(system_call)
+ 	jnz	.Lsysc_work			# check for work
+ 	TSTMSK	__LC_CPU_FLAGS,_CIF_WORK
+ 	jnz	.Lsysc_work
++	BPEXIT	__TI_flags(%r12),_TIF_ISOLATE_BP
+ .Lsysc_restore:
+ 	lg	%r14,__LC_VDSO_PER_CPU
+ 	lmg	%r0,%r10,__PT_R0(%r11)
+@@ -438,7 +585,7 @@ ENTRY(system_call)
+ 	lmg	%r3,%r7,__PT_R3(%r11)
+ 	stg	%r7,STACK_FRAME_OVERHEAD(%r15)
+ 	lg	%r2,__PT_ORIG_GPR2(%r11)
+-	basr	%r14,%r9		# call sys_xxx
++	BASR_R14_R9			# call sys_xxx
+ 	stg	%r2,__PT_R2(%r11)	# store return value
+ .Lsysc_tracenogo:
+ 	TSTMSK	__TI_flags(%r12),_TIF_TRACE
+@@ -462,7 +609,7 @@ ENTRY(ret_from_fork)
+ 	lmg	%r9,%r10,__PT_R9(%r11)	# load gprs
+ ENTRY(kernel_thread_starter)
+ 	la	%r2,0(%r10)
+-	basr	%r14,%r9
++	BASR_R14_R9
+ 	j	.Lsysc_tracenogo
+ 
+ /*
+@@ -471,6 +618,7 @@ ENTRY(kernel_thread_starter)
+ 
+ ENTRY(pgm_check_handler)
+ 	stpt	__LC_SYNC_ENTER_TIMER
++	BPOFF
+ 	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
+ 	lg	%r10,__LC_LAST_BREAK
+ 	lg	%r12,__LC_THREAD_INFO
+@@ -495,6 +643,7 @@ ENTRY(pgm_check_handler)
+ 	j	3f
+ 2:	LAST_BREAK %r14
+ 	UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
++	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
+ 	lg	%r15,__LC_KERNEL_STACK
+ 	lg	%r14,__TI_task(%r12)
+ 	aghi	%r14,__TASK_thread	# pointer to thread_struct
+@@ -504,6 +653,15 @@ ENTRY(pgm_check_handler)
+ 	mvc	__THREAD_trap_tdb(256,%r14),0(%r13)
+ 3:	la	%r11,STACK_FRAME_OVERHEAD(%r15)
+ 	stmg	%r0,%r7,__PT_R0(%r11)
++	# clear user controlled registers to prevent speculative use
++	xgr	%r0,%r0
++	xgr	%r1,%r1
++	xgr	%r2,%r2
++	xgr	%r3,%r3
++	xgr	%r4,%r4
++	xgr	%r5,%r5
++	xgr	%r6,%r6
++	xgr	%r7,%r7
+ 	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
+ 	stmg	%r8,%r9,__PT_PSW(%r11)
+ 	mvc	__PT_INT_CODE(4,%r11),__LC_PGM_ILC
+@@ -525,9 +683,9 @@ ENTRY(pgm_check_handler)
+ 	nill	%r10,0x007f
+ 	sll	%r10,2
+ 	je	.Lpgm_return
+-	lgf	%r1,0(%r10,%r1)		# load address of handler routine
++	lgf	%r9,0(%r10,%r1)		# load address of handler routine
+ 	lgr	%r2,%r11		# pass pointer to pt_regs
+-	basr	%r14,%r1		# branch to interrupt-handler
++	BASR_R14_R9			# branch to interrupt-handler
+ .Lpgm_return:
+ 	LOCKDEP_SYS_EXIT
+ 	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
+@@ -560,6 +718,7 @@ ENTRY(pgm_check_handler)
+ ENTRY(io_int_handler)
+ 	STCK	__LC_INT_CLOCK
+ 	stpt	__LC_ASYNC_ENTER_TIMER
++	BPOFF
+ 	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
+ 	lg	%r10,__LC_LAST_BREAK
+ 	lg	%r12,__LC_THREAD_INFO
+@@ -567,6 +726,16 @@ ENTRY(io_int_handler)
+ 	lmg	%r8,%r9,__LC_IO_OLD_PSW
+ 	SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
+ 	stmg	%r0,%r7,__PT_R0(%r11)
++	# clear user controlled registers to prevent speculative use
++	xgr	%r0,%r0
++	xgr	%r1,%r1
++	xgr	%r2,%r2
++	xgr	%r3,%r3
++	xgr	%r4,%r4
++	xgr	%r5,%r5
++	xgr	%r6,%r6
++	xgr	%r7,%r7
++	xgr	%r10,%r10
+ 	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
+ 	stmg	%r8,%r9,__PT_PSW(%r11)
+ 	mvc	__PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
+@@ -601,9 +770,13 @@ ENTRY(io_int_handler)
+ 	lg	%r14,__LC_VDSO_PER_CPU
+ 	lmg	%r0,%r10,__PT_R0(%r11)
+ 	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
++	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
++	jno	.Lio_exit_kernel
++	BPEXIT	__TI_flags(%r12),_TIF_ISOLATE_BP
+ .Lio_exit_timer:
+ 	stpt	__LC_EXIT_TIMER
+ 	mvc	__VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
++.Lio_exit_kernel:
+ 	lmg	%r11,%r15,__PT_R11(%r11)
+ 	lpswe	__LC_RETURN_PSW
+ .Lio_done:
+@@ -735,6 +908,7 @@ ENTRY(io_int_handler)
+ ENTRY(ext_int_handler)
+ 	STCK	__LC_INT_CLOCK
+ 	stpt	__LC_ASYNC_ENTER_TIMER
++	BPOFF
+ 	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
+ 	lg	%r10,__LC_LAST_BREAK
+ 	lg	%r12,__LC_THREAD_INFO
+@@ -742,6 +916,16 @@ ENTRY(ext_int_handler)
+ 	lmg	%r8,%r9,__LC_EXT_OLD_PSW
+ 	SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
+ 	stmg	%r0,%r7,__PT_R0(%r11)
++	# clear user controlled registers to prevent speculative use
++	xgr	%r0,%r0
++	xgr	%r1,%r1
++	xgr	%r2,%r2
++	xgr	%r3,%r3
++	xgr	%r4,%r4
++	xgr	%r5,%r5
++	xgr	%r6,%r6
++	xgr	%r7,%r7
++	xgr	%r10,%r10
+ 	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
+ 	stmg	%r8,%r9,__PT_PSW(%r11)
+ 	lghi	%r1,__LC_EXT_PARAMS2
+@@ -773,11 +957,12 @@ ENTRY(psw_idle)
+ 	.insn	rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15)
+ .Lpsw_idle_stcctm:
+ #endif
++	BPON
+ 	STCK	__CLOCK_IDLE_ENTER(%r2)
+ 	stpt	__TIMER_IDLE_ENTER(%r2)
+ .Lpsw_idle_lpsw:
+ 	lpswe	__SF_EMPTY(%r15)
+-	br	%r14
++	BR_R1USE_R14
+ .Lpsw_idle_end:
+ 
+ /*
+@@ -791,7 +976,7 @@ ENTRY(save_fpu_regs)
+ 	lg	%r2,__LC_CURRENT
+ 	aghi	%r2,__TASK_thread
+ 	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
+-	bor	%r14
++	jo	.Lsave_fpu_regs_exit
+ 	stfpc	__THREAD_FPU_fpc(%r2)
+ .Lsave_fpu_regs_fpc_end:
+ 	lg	%r3,__THREAD_FPU_regs(%r2)
+@@ -821,7 +1006,8 @@ ENTRY(save_fpu_regs)
+ 	std	15,120(%r3)
+ .Lsave_fpu_regs_done:
+ 	oi	__LC_CPU_FLAGS+7,_CIF_FPU
+-	br	%r14
++.Lsave_fpu_regs_exit:
++	BR_R1USE_R14
+ .Lsave_fpu_regs_end:
+ 
+ /*
+@@ -838,7 +1024,7 @@ load_fpu_regs:
+ 	lg	%r4,__LC_CURRENT
+ 	aghi	%r4,__TASK_thread
+ 	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
+-	bnor	%r14
++	jno	.Lload_fpu_regs_exit
+ 	lfpc	__THREAD_FPU_fpc(%r4)
+ 	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_VX
+ 	lg	%r4,__THREAD_FPU_regs(%r4)	# %r4 <- reg save area
+@@ -867,7 +1053,8 @@ load_fpu_regs:
+ 	ld	15,120(%r4)
+ .Lload_fpu_regs_done:
+ 	ni	__LC_CPU_FLAGS+7,255-_CIF_FPU
+-	br	%r14
++.Lload_fpu_regs_exit:
++	BR_R1USE_R14
+ .Lload_fpu_regs_end:
+ 
+ .L__critical_end:
+@@ -877,6 +1064,7 @@ load_fpu_regs:
+  */
+ ENTRY(mcck_int_handler)
+ 	STCK	__LC_MCCK_CLOCK
++	BPOFF
+ 	la	%r1,4095		# revalidate r1
+ 	spt	__LC_CPU_TIMER_SAVE_AREA-4095(%r1)	# revalidate cpu timer
+ 	lmg	%r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
+@@ -908,6 +1096,16 @@ ENTRY(mcck_int_handler)
+ .Lmcck_skip:
+ 	lghi	%r14,__LC_GPREGS_SAVE_AREA+64
+ 	stmg	%r0,%r7,__PT_R0(%r11)
++	# clear user controlled registers to prevent speculative use
++	xgr	%r0,%r0
++	xgr	%r1,%r1
++	xgr	%r2,%r2
++	xgr	%r3,%r3
++	xgr	%r4,%r4
++	xgr	%r5,%r5
++	xgr	%r6,%r6
++	xgr	%r7,%r7
++	xgr	%r10,%r10
+ 	mvc	__PT_R8(64,%r11),0(%r14)
+ 	stmg	%r8,%r9,__PT_PSW(%r11)
+ 	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
+@@ -933,6 +1131,7 @@ ENTRY(mcck_int_handler)
+ 	mvc	__LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
+ 	tm	__LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
+ 	jno	0f
++	BPEXIT	__TI_flags(%r12),_TIF_ISOLATE_BP
+ 	stpt	__LC_EXIT_TIMER
+ 	mvc	__VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
+ 0:	lmg	%r11,%r15,__PT_R11(%r11)
+@@ -1028,7 +1227,7 @@ cleanup_critical:
+ 	jl	0f
+ 	clg	%r9,BASED(.Lcleanup_table+104)	# .Lload_fpu_regs_end
+ 	jl	.Lcleanup_load_fpu_regs
+-0:	br	%r14
++0:	BR_R11USE_R14
+ 
+ 	.align	8
+ .Lcleanup_table:
+@@ -1053,11 +1252,12 @@ cleanup_critical:
+ 	.quad	.Lsie_done
+ 
+ .Lcleanup_sie:
++	BPENTER __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
+ 	lg	%r9,__SF_EMPTY(%r15)		# get control block pointer
+ 	ni	__SIE_PROG0C+3(%r9),0xfe	# no longer in SIE
+ 	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
+ 	larl	%r9,sie_exit			# skip forward to sie_exit
+-	br	%r14
++	BR_R11USE_R14
+ #endif
+ 
+ .Lcleanup_system_call:
+@@ -1099,7 +1299,8 @@ cleanup_critical:
+ 	srag	%r9,%r9,23
+ 	jz	0f
+ 	mvc	__TI_last_break(8,%r12),16(%r11)
+-0:	# set up saved register r11
++0:	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
++	# set up saved register r11
+ 	lg	%r15,__LC_KERNEL_STACK
+ 	la	%r9,STACK_FRAME_OVERHEAD(%r15)
+ 	stg	%r9,24(%r11)		# r11 pt_regs pointer
+@@ -1114,7 +1315,7 @@ cleanup_critical:
+ 	stg	%r15,56(%r11)		# r15 stack pointer
+ 	# set new psw address and exit
+ 	larl	%r9,.Lsysc_do_svc
+-	br	%r14
++	BR_R11USE_R14
+ .Lcleanup_system_call_insn:
+ 	.quad	system_call
+ 	.quad	.Lsysc_stmg
+@@ -1124,7 +1325,7 @@ cleanup_critical:
+ 
+ .Lcleanup_sysc_tif:
+ 	larl	%r9,.Lsysc_tif
+-	br	%r14
++	BR_R11USE_R14
+ 
+ .Lcleanup_sysc_restore:
+ 	# check if stpt has been executed
+@@ -1141,14 +1342,14 @@ cleanup_critical:
+ 	mvc	0(64,%r11),__PT_R8(%r9)
+ 	lmg	%r0,%r7,__PT_R0(%r9)
+ 1:	lmg	%r8,%r9,__LC_RETURN_PSW
+-	br	%r14
++	BR_R11USE_R14
+ .Lcleanup_sysc_restore_insn:
+ 	.quad	.Lsysc_exit_timer
+ 	.quad	.Lsysc_done - 4
+ 
+ .Lcleanup_io_tif:
+ 	larl	%r9,.Lio_tif
+-	br	%r14
++	BR_R11USE_R14
+ 
+ .Lcleanup_io_restore:
+ 	# check if stpt has been executed
+@@ -1162,7 +1363,7 @@ cleanup_critical:
+ 	mvc	0(64,%r11),__PT_R8(%r9)
+ 	lmg	%r0,%r7,__PT_R0(%r9)
+ 1:	lmg	%r8,%r9,__LC_RETURN_PSW
+-	br	%r14
++	BR_R11USE_R14
+ .Lcleanup_io_restore_insn:
+ 	.quad	.Lio_exit_timer
+ 	.quad	.Lio_done - 4
+@@ -1214,17 +1415,17 @@ cleanup_critical:
+ 	# prepare return psw
+ 	nihh	%r8,0xfcfd		# clear irq & wait state bits
+ 	lg	%r9,48(%r11)		# return from psw_idle
+-	br	%r14
++	BR_R11USE_R14
+ .Lcleanup_idle_insn:
+ 	.quad	.Lpsw_idle_lpsw
+ 
+ .Lcleanup_save_fpu_regs:
+ 	larl	%r9,save_fpu_regs
+-	br	%r14
++	BR_R11USE_R14
+ 
+ .Lcleanup_load_fpu_regs:
+ 	larl	%r9,load_fpu_regs
+-	br	%r14
++	BR_R11USE_R14
+ 
+ /*
+  * Integer constants
+@@ -1240,7 +1441,6 @@ cleanup_critical:
+ .Lsie_critical_length:
+ 	.quad	.Lsie_done - .Lsie_gmap
+ #endif
+-
+ 	.section .rodata, "a"
+ #define SYSCALL(esame,emu)	.long esame
+ 	.globl	sys_call_table
+diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
+index e73979236659..837bb301023f 100644
+--- a/arch/s390/kernel/ipl.c
++++ b/arch/s390/kernel/ipl.c
+@@ -563,6 +563,7 @@ static struct kset *ipl_kset;
+ 
+ static void __ipl_run(void *unused)
+ {
++	__bpon();
+ 	diag308(DIAG308_IPL, NULL);
+ 	if (MACHINE_IS_VM)
+ 		__cpcmd("IPL", NULL, 0, NULL);
+diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
+index 0c1a679314dd..9bd1933848b8 100644
+--- a/arch/s390/kernel/module.c
++++ b/arch/s390/kernel/module.c
+@@ -31,6 +31,9 @@
+ #include <linux/kernel.h>
+ #include <linux/moduleloader.h>
+ #include <linux/bug.h>
++#include <asm/alternative.h>
++#include <asm/nospec-branch.h>
++#include <asm/facility.h>
+ 
+ #if 0
+ #define DEBUGP printk
+@@ -163,7 +166,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
+ 	me->arch.got_offset = me->core_size;
+ 	me->core_size += me->arch.got_size;
+ 	me->arch.plt_offset = me->core_size;
+-	me->core_size += me->arch.plt_size;
++	if (me->arch.plt_size) {
++		if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable)
++			me->arch.plt_size += PLT_ENTRY_SIZE;
++		me->core_size += me->arch.plt_size;
++	}
+ 	return 0;
+ }
+ 
+@@ -317,9 +324,20 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+ 			unsigned int *ip;
+ 			ip = me->module_core + me->arch.plt_offset +
+ 				info->plt_offset;
+-			ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
+-			ip[1] = 0x100a0004;
+-			ip[2] = 0x07f10000;
++			ip[0] = 0x0d10e310;	/* basr 1,0  */
++			ip[1] = 0x100a0004;	/* lg	1,10(1) */
++			if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable) {
++				unsigned int *ij;
++				ij = me->module_core +
++					me->arch.plt_offset +
++					me->arch.plt_size - PLT_ENTRY_SIZE;
++				ip[2] = 0xa7f40000 +	/* j __jump_r1 */
++					(unsigned int)(u16)
++					(((unsigned long) ij - 8 -
++					  (unsigned long) ip) / 2);
++			} else {
++				ip[2] = 0x07f10000;	/* br %r1 */
++			}
+ 			ip[3] = (unsigned int) (val >> 32);
+ 			ip[4] = (unsigned int) val;
+ 			info->plt_initialized = 1;
+@@ -424,6 +442,45 @@ int module_finalize(const Elf_Ehdr *hdr,
+ 		    const Elf_Shdr *sechdrs,
+ 		    struct module *me)
+ {
++	const Elf_Shdr *s;
++	char *secstrings, *secname;
++	void *aseg;
++
++	if (IS_ENABLED(CONFIG_EXPOLINE) &&
++	    !nospec_disable && me->arch.plt_size) {
++		unsigned int *ij;
++
++		ij = me->module_core + me->arch.plt_offset +
++			me->arch.plt_size - PLT_ENTRY_SIZE;
++		if (test_facility(35)) {
++			ij[0] = 0xc6000000;	/* exrl	%r0,.+10	*/
++			ij[1] = 0x0005a7f4;	/* j	.		*/
++			ij[2] = 0x000007f1;	/* br	%r1		*/
++		} else {
++			ij[0] = 0x44000000 | (unsigned int)
++				offsetof(struct _lowcore, br_r1_trampoline);
++			ij[1] = 0xa7f40000;	/* j	.		*/
++		}
++	}
++
++	secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
++	for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
++		aseg = (void *) s->sh_addr;
++		secname = secstrings + s->sh_name;
++
++		if (!strcmp(".altinstructions", secname))
++			/* patch .altinstructions */
++			apply_alternatives(aseg, aseg + s->sh_size);
++
++		if (IS_ENABLED(CONFIG_EXPOLINE) &&
++		    (!strncmp(".s390_indirect", secname, 14)))
++			nospec_revert(aseg, aseg + s->sh_size);
++
++		if (IS_ENABLED(CONFIG_EXPOLINE) &&
++		    (!strncmp(".s390_return", secname, 12)))
++			nospec_revert(aseg, aseg + s->sh_size);
++	}
++
+ 	jump_label_apply_nops(me);
+ 	vfree(me->arch.syminfo);
+ 	me->arch.syminfo = NULL;
+diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c
+new file mode 100644
+index 000000000000..9f3b5b382743
+--- /dev/null
++++ b/arch/s390/kernel/nospec-branch.c
+@@ -0,0 +1,169 @@
++// SPDX-License-Identifier: GPL-2.0
++#include <linux/module.h>
++#include <linux/device.h>
++#include <asm/facility.h>
++#include <asm/nospec-branch.h>
++
++static int __init nobp_setup_early(char *str)
++{
++	bool enabled;
++	int rc;
++
++	rc = kstrtobool(str, &enabled);
++	if (rc)
++		return rc;
++	if (enabled && test_facility(82)) {
++		/*
++		 * The user explicitely requested nobp=1, enable it and
++		 * disable the expoline support.
++		 */
++		__set_facility(82, S390_lowcore.alt_stfle_fac_list);
++		if (IS_ENABLED(CONFIG_EXPOLINE))
++			nospec_disable = 1;
++	} else {
++		__clear_facility(82, S390_lowcore.alt_stfle_fac_list);
++	}
++	return 0;
++}
++early_param("nobp", nobp_setup_early);
++
++static int __init nospec_setup_early(char *str)
++{
++	__clear_facility(82, S390_lowcore.alt_stfle_fac_list);
++	return 0;
++}
++early_param("nospec", nospec_setup_early);
++
++static int __init nospec_report(void)
++{
++	if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
++		pr_info("Spectre V2 mitigation: execute trampolines.\n");
++	if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
++		pr_info("Spectre V2 mitigation: limited branch prediction.\n");
++	return 0;
++}
++arch_initcall(nospec_report);
++
++#ifdef CONFIG_SYSFS
++ssize_t cpu_show_spectre_v1(struct device *dev,
++			    struct device_attribute *attr, char *buf)
++{
++	return sprintf(buf, "Mitigation: __user pointer sanitization\n");
++}
++
++ssize_t cpu_show_spectre_v2(struct device *dev,
++			    struct device_attribute *attr, char *buf)
++{
++	if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
++		return sprintf(buf, "Mitigation: execute trampolines\n");
++	if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
++		return sprintf(buf, "Mitigation: limited branch prediction.\n");
++	return sprintf(buf, "Vulnerable\n");
++}
++#endif
++
++#ifdef CONFIG_EXPOLINE
++
++int nospec_disable = IS_ENABLED(CONFIG_EXPOLINE_OFF);
++
++static int __init nospectre_v2_setup_early(char *str)
++{
++	nospec_disable = 1;
++	return 0;
++}
++early_param("nospectre_v2", nospectre_v2_setup_early);
++
++void __init nospec_auto_detect(void)
++{
++	if (IS_ENABLED(CC_USING_EXPOLINE)) {
++		/*
++		 * The kernel has been compiled with expolines.
++		 * Keep expolines enabled and disable nobp.
++		 */
++		nospec_disable = 0;
++		__clear_facility(82, S390_lowcore.alt_stfle_fac_list);
++	}
++	/*
++	 * If the kernel has not been compiled with expolines the
++	 * nobp setting decides what is done, this depends on the
++	 * CONFIG_KERNEL_NP option and the nobp/nospec parameters.
++	 */
++}
++
++static int __init spectre_v2_setup_early(char *str)
++{
++	if (str && !strncmp(str, "on", 2)) {
++		nospec_disable = 0;
++		__clear_facility(82, S390_lowcore.alt_stfle_fac_list);
++	}
++	if (str && !strncmp(str, "off", 3))
++		nospec_disable = 1;
++	if (str && !strncmp(str, "auto", 4))
++		nospec_auto_detect();
++	return 0;
++}
++early_param("spectre_v2", spectre_v2_setup_early);
++
++static void __init_or_module __nospec_revert(s32 *start, s32 *end)
++{
++	enum { BRCL_EXPOLINE, BRASL_EXPOLINE } type;
++	u8 *instr, *thunk, *br;
++	u8 insnbuf[6];
++	s32 *epo;
++
++	/* Second part of the instruction replace is always a nop */
++	memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x00, 0x00 }, 4);
++	for (epo = start; epo < end; epo++) {
++		instr = (u8 *) epo + *epo;
++		if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x04)
++			type = BRCL_EXPOLINE;	/* brcl instruction */
++		else if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x05)
++			type = BRASL_EXPOLINE;	/* brasl instruction */
++		else
++			continue;
++		thunk = instr + (*(int *)(instr + 2)) * 2;
++		if (thunk[0] == 0xc6 && thunk[1] == 0x00)
++			/* exrl %r0,<target-br> */
++			br = thunk + (*(int *)(thunk + 2)) * 2;
++		else if (thunk[0] == 0xc0 && (thunk[1] & 0x0f) == 0x00 &&
++			 thunk[6] == 0x44 && thunk[7] == 0x00 &&
++			 (thunk[8] & 0x0f) == 0x00 && thunk[9] == 0x00 &&
++			 (thunk[1] & 0xf0) == (thunk[8] & 0xf0))
++			/* larl %rx,<target br> + ex %r0,0(%rx) */
++			br = thunk + (*(int *)(thunk + 2)) * 2;
++		else
++			continue;
++		if (br[0] != 0x07 || (br[1] & 0xf0) != 0xf0)
++			continue;
++		switch (type) {
++		case BRCL_EXPOLINE:
++			/* brcl to thunk, replace with br + nop */
++			insnbuf[0] = br[0];
++			insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
++			break;
++		case BRASL_EXPOLINE:
++			/* brasl to thunk, replace with basr + nop */
++			insnbuf[0] = 0x0d;
++			insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
++			break;
++		}
++
++		s390_kernel_write(instr, insnbuf, 6);
++	}
++}
++
++void __init_or_module nospec_revert(s32 *start, s32 *end)
++{
++	if (nospec_disable)
++		__nospec_revert(start, end);
++}
++
++extern s32 __nospec_call_start[], __nospec_call_end[];
++extern s32 __nospec_return_start[], __nospec_return_end[];
++void __init nospec_init_branches(void)
++{
++	nospec_revert(__nospec_call_start, __nospec_call_end);
++	nospec_revert(__nospec_return_start, __nospec_return_end);
++}
++
++#endif /* CONFIG_EXPOLINE */
+diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
+index 7ce00e7a709a..ab236bd970bb 100644
+--- a/arch/s390/kernel/processor.c
++++ b/arch/s390/kernel/processor.c
+@@ -13,6 +13,7 @@
+ #include <linux/cpu.h>
+ #include <asm/diag.h>
+ #include <asm/elf.h>
++#include <asm/facility.h>
+ #include <asm/lowcore.h>
+ #include <asm/param.h>
+ #include <asm/smp.h>
+@@ -113,3 +114,20 @@ const struct seq_operations cpuinfo_op = {
+ 	.show	= show_cpuinfo,
+ };
+ 
++int s390_isolate_bp(void)
++{
++	if (!test_facility(82))
++		return -EOPNOTSUPP;
++	set_thread_flag(TIF_ISOLATE_BP);
++	return 0;
++}
++EXPORT_SYMBOL(s390_isolate_bp);
++
++int s390_isolate_bp_guest(void)
++{
++	if (!test_facility(82))
++		return -EOPNOTSUPP;
++	set_thread_flag(TIF_ISOLATE_BP_GUEST);
++	return 0;
++}
++EXPORT_SYMBOL(s390_isolate_bp_guest);
+diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
+index d097d71685df..e7a43a30e3ff 100644
+--- a/arch/s390/kernel/setup.c
++++ b/arch/s390/kernel/setup.c
+@@ -63,6 +63,8 @@
+ #include <asm/sclp.h>
+ #include <asm/sysinfo.h>
+ #include <asm/numa.h>
++#include <asm/alternative.h>
++#include <asm/nospec-branch.h>
+ #include "entry.h"
+ 
+ /*
+@@ -333,7 +335,9 @@ static void __init setup_lowcore(void)
+ 	lc->machine_flags = S390_lowcore.machine_flags;
+ 	lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
+ 	memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
+-	       MAX_FACILITY_BIT/8);
++	       sizeof(lc->stfle_fac_list));
++	memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
++	       sizeof(lc->alt_stfle_fac_list));
+ 	if (MACHINE_HAS_VX)
+ 		lc->vector_save_area_addr =
+ 			(unsigned long) &lc->vector_save_area;
+@@ -370,6 +374,7 @@ static void __init setup_lowcore(void)
+ #ifdef CONFIG_SMP
+ 	lc->spinlock_lockval = arch_spin_lockval(0);
+ #endif
++	lc->br_r1_trampoline = 0x07f1;	/* br %r1 */
+ 
+ 	set_prefix((u32)(unsigned long) lc);
+ 	lowcore_ptr[0] = lc;
+@@ -841,6 +846,9 @@ void __init setup_arch(char **cmdline_p)
+ 	init_mm.end_data = (unsigned long) &_edata;
+ 	init_mm.brk = (unsigned long) &_end;
+ 
++	if (IS_ENABLED(CONFIG_EXPOLINE_AUTO))
++		nospec_auto_detect();
++
+ 	parse_early_param();
+ 	os_info_init();
+ 	setup_ipl();
+@@ -893,6 +901,10 @@ void __init setup_arch(char **cmdline_p)
+ 	conmode_default();
+ 	set_preferred_console();
+ 
++	apply_alternative_instructions();
++	if (IS_ENABLED(CONFIG_EXPOLINE))
++		nospec_init_branches();
++
+ 	/* Setup zfcpdump support */
+ 	setup_zfcpdump();
+ 
+diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
+index 9062df575afe..77f4f334a465 100644
+--- a/arch/s390/kernel/smp.c
++++ b/arch/s390/kernel/smp.c
+@@ -200,6 +200,7 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
+ 	lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
+ 	lc->cpu_nr = cpu;
+ 	lc->spinlock_lockval = arch_spin_lockval(cpu);
++	lc->br_r1_trampoline = 0x07f1;	/* br %r1 */
+ 	if (MACHINE_HAS_VX)
+ 		lc->vector_save_area_addr =
+ 			(unsigned long) &lc->vector_save_area;
+@@ -250,7 +251,9 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
+ 	__ctl_store(lc->cregs_save_area, 0, 15);
+ 	save_access_regs((unsigned int *) lc->access_regs_save_area);
+ 	memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
+-	       MAX_FACILITY_BIT/8);
++	       sizeof(lc->stfle_fac_list));
++	memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
++	       sizeof(lc->alt_stfle_fac_list));
+ }
+ 
+ static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
+@@ -299,6 +302,7 @@ static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
+ 	mem_assign_absolute(lc->restart_fn, (unsigned long) func);
+ 	mem_assign_absolute(lc->restart_data, (unsigned long) data);
+ 	mem_assign_absolute(lc->restart_source, source_cpu);
++	__bpon();
+ 	asm volatile(
+ 		"0:	sigp	0,%0,%2	# sigp restart to target cpu\n"
+ 		"	brc	2,0b	# busy, try again\n"
+@@ -888,6 +892,7 @@ void __cpu_die(unsigned int cpu)
+ void __noreturn cpu_die(void)
+ {
+ 	idle_task_exit();
++	__bpon();
+ 	pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
+ 	for (;;) ;
+ }
+diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c
+index 66956c09d5bf..3d04dfdabc9f 100644
+--- a/arch/s390/kernel/uprobes.c
++++ b/arch/s390/kernel/uprobes.c
+@@ -147,6 +147,15 @@ unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline,
+ 	return orig;
+ }
+ 
++bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
++			     struct pt_regs *regs)
++{
++	if (ctx == RP_CHECK_CHAIN_CALL)
++		return user_stack_pointer(regs) <= ret->stack;
++	else
++		return user_stack_pointer(regs) < ret->stack;
++}
++
+ /* Instruction Emulation */
+ 
+ static void adjust_psw_addr(psw_t *psw, unsigned long len)
+diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
+index 6c553f6e791a..a4ae08e416e6 100644
+--- a/arch/s390/kernel/vmlinux.lds.S
++++ b/arch/s390/kernel/vmlinux.lds.S
+@@ -78,6 +78,43 @@ SECTIONS
+ 		EXIT_DATA
+ 	}
+ 
++	/*
++	 * struct alt_inst entries. From the header (alternative.h):
++	 * "Alternative instructions for different CPU types or capabilities"
++	 * Think locking instructions on spinlocks.
++	 * Note, that it is a part of __init region.
++	 */
++	. = ALIGN(8);
++	.altinstructions : {
++		__alt_instructions = .;
++		*(.altinstructions)
++		__alt_instructions_end = .;
++	}
++
++	/*
++	 * And here are the replacement instructions. The linker sticks
++	 * them as binary blobs. The .altinstructions has enough data to
++	 * get the address and the length of them to patch the kernel safely.
++	 * Note, that it is a part of __init region.
++	 */
++	.altinstr_replacement : {
++		*(.altinstr_replacement)
++	}
++
++	/*
++	 * Table with the patch locations to undo expolines
++	*/
++	.nospec_call_table : {
++		__nospec_call_start = . ;
++		*(.s390_indirect*)
++		__nospec_call_end = . ;
++	}
++	.nospec_return_table : {
++		__nospec_return_start = . ;
++		*(.s390_return*)
++		__nospec_return_end = . ;
++	}
++
+ 	/* early.c uses stsi, which requires page aligned data. */
+ 	. = ALIGN(PAGE_SIZE);
+ 	INIT_DATA_SECTION(0x100)
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index 23e3f5d77a24..b011140e6b06 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -257,6 +257,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
+ 	case KVM_CAP_S390_VECTOR_REGISTERS:
+ 		r = MACHINE_HAS_VX;
+ 		break;
++	case KVM_CAP_S390_BPB:
++		r = test_facility(82);
++		break;
+ 	default:
+ 		r = 0;
+ 	}
+@@ -1264,6 +1267,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+ 				    KVM_SYNC_PFAULT;
+ 	if (test_kvm_facility(vcpu->kvm, 129))
+ 		vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
++	if (test_kvm_facility(vcpu->kvm, 82))
++		vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
+ 
+ 	if (kvm_is_ucontrol(vcpu->kvm))
+ 		return __kvm_ucontrol_vcpu_init(vcpu);
+@@ -1327,6 +1332,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
+ 	current->thread.fpu.fpc = 0;
+ 	vcpu->arch.sie_block->gbea = 1;
+ 	vcpu->arch.sie_block->pp = 0;
++	vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
+ 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
+ 	kvm_clear_async_pf_completion_queue(vcpu);
+ 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
+@@ -2145,6 +2151,11 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+ 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
+ 			kvm_clear_async_pf_completion_queue(vcpu);
+ 	}
++	if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
++	    test_kvm_facility(vcpu->kvm, 82)) {
++		vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
++		vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
++	}
+ 	kvm_run->kvm_dirty_regs = 0;
+ }
+ 
+@@ -2162,6 +2173,7 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+ 	kvm_run->s.regs.pft = vcpu->arch.pfault_token;
+ 	kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
+ 	kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
++	kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
+ }
+ 
+ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
+index eb02087650d2..c42d4a3d9494 100644
+--- a/arch/x86/kernel/tsc.c
++++ b/arch/x86/kernel/tsc.c
+@@ -408,7 +408,7 @@ static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2)
+ 	hpet2 -= hpet1;
+ 	tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
+ 	do_div(tmp, 1000000);
+-	do_div(deltatsc, tmp);
++	deltatsc = div64_u64(deltatsc, tmp);
+ 
+ 	return (unsigned long) deltatsc;
+ }
+diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
+index c206ccda899b..b5f245d2875c 100644
+--- a/drivers/cdrom/cdrom.c
++++ b/drivers/cdrom/cdrom.c
+@@ -2358,7 +2358,7 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi,
+ 	if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT)
+ 		return media_changed(cdi, 1);
+ 
+-	if ((unsigned int)arg >= cdi->capacity)
++	if (arg >= cdi->capacity)
+ 		return -EINVAL;
+ 
+ 	info = kmalloc(sizeof(*info), GFP_KERNEL);
+diff --git a/drivers/input/misc/drv260x.c b/drivers/input/misc/drv260x.c
+index 930424e55439..251d64ca41ce 100644
+--- a/drivers/input/misc/drv260x.c
++++ b/drivers/input/misc/drv260x.c
+@@ -521,7 +521,7 @@ static int drv260x_probe(struct i2c_client *client,
+ 	if (!haptics)
+ 		return -ENOMEM;
+ 
+-	haptics->rated_voltage = DRV260X_DEF_OD_CLAMP_VOLT;
++	haptics->overdrive_voltage = DRV260X_DEF_OD_CLAMP_VOLT;
+ 	haptics->rated_voltage = DRV260X_DEF_RATED_VOLT;
+ 
+ 	if (pdata) {
+diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
+index 7ebccfa8072a..cb790b68920f 100644
+--- a/drivers/message/fusion/mptsas.c
++++ b/drivers/message/fusion/mptsas.c
+@@ -1994,6 +1994,7 @@ static struct scsi_host_template mptsas_driver_template = {
+ 	.cmd_per_lun			= 7,
+ 	.use_clustering			= ENABLE_CLUSTERING,
+ 	.shost_attrs			= mptscsih_host_attrs,
++	.no_write_same			= 1,
+ };
+ 
+ static int mptsas_get_linkerrors(struct sas_phy *phy)
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 278d12888cab..339118f3c718 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1614,8 +1614,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
+ 	} /* switch(bond_mode) */
+ 
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+-	slave_dev->npinfo = bond->dev->npinfo;
+-	if (slave_dev->npinfo) {
++	if (bond->dev->npinfo) {
+ 		if (slave_enable_netpoll(new_slave)) {
+ 			netdev_info(bond_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n");
+ 			res = -EBUSY;
+diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
+index b7b859c3a0c7..583d50f80b24 100644
+--- a/drivers/net/ppp/pppoe.c
++++ b/drivers/net/ppp/pppoe.c
+@@ -638,6 +638,10 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
+ 	lock_sock(sk);
+ 
+ 	error = -EINVAL;
++
++	if (sockaddr_len != sizeof(struct sockaddr_pppox))
++		goto end;
++
+ 	if (sp->sa_protocol != PX_PROTO_OE)
+ 		goto end;
+ 
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index 9bca36e1fefd..e74709e4b5dd 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -247,6 +247,17 @@ static void __team_option_inst_mark_removed_port(struct team *team,
+ 	}
+ }
+ 
++static bool __team_option_inst_tmp_find(const struct list_head *opts,
++					const struct team_option_inst *needle)
++{
++	struct team_option_inst *opt_inst;
++
++	list_for_each_entry(opt_inst, opts, tmp_list)
++		if (opt_inst == needle)
++			return true;
++	return false;
++}
++
+ static int __team_options_register(struct team *team,
+ 				   const struct team_option *option,
+ 				   size_t option_count)
+@@ -1039,14 +1050,11 @@ static void team_port_leave(struct team *team, struct team_port *port)
+ }
+ 
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+-static int team_port_enable_netpoll(struct team *team, struct team_port *port)
++static int __team_port_enable_netpoll(struct team_port *port)
+ {
+ 	struct netpoll *np;
+ 	int err;
+ 
+-	if (!team->dev->npinfo)
+-		return 0;
+-
+ 	np = kzalloc(sizeof(*np), GFP_KERNEL);
+ 	if (!np)
+ 		return -ENOMEM;
+@@ -1060,6 +1068,14 @@ static int team_port_enable_netpoll(struct team *team, struct team_port *port)
+ 	return err;
+ }
+ 
++static int team_port_enable_netpoll(struct team_port *port)
++{
++	if (!port->team->dev->npinfo)
++		return 0;
++
++	return __team_port_enable_netpoll(port);
++}
++
+ static void team_port_disable_netpoll(struct team_port *port)
+ {
+ 	struct netpoll *np = port->np;
+@@ -1074,7 +1090,7 @@ static void team_port_disable_netpoll(struct team_port *port)
+ 	kfree(np);
+ }
+ #else
+-static int team_port_enable_netpoll(struct team *team, struct team_port *port)
++static int team_port_enable_netpoll(struct team_port *port)
+ {
+ 	return 0;
+ }
+@@ -1181,7 +1197,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
+ 		goto err_vids_add;
+ 	}
+ 
+-	err = team_port_enable_netpoll(team, port);
++	err = team_port_enable_netpoll(port);
+ 	if (err) {
+ 		netdev_err(dev, "Failed to enable netpoll on device %s\n",
+ 			   portname);
+@@ -1889,7 +1905,7 @@ static int team_netpoll_setup(struct net_device *dev,
+ 
+ 	mutex_lock(&team->lock);
+ 	list_for_each_entry(port, &team->port_list, list) {
+-		err = team_port_enable_netpoll(team, port);
++		err = __team_port_enable_netpoll(port);
+ 		if (err) {
+ 			__team_netpoll_cleanup(team);
+ 			break;
+@@ -2544,6 +2560,14 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
+ 			if (err)
+ 				goto team_put;
+ 			opt_inst->changed = true;
++
++			/* dumb/evil user-space can send us duplicate opt,
++			 * keep only the last one
++			 */
++			if (__team_option_inst_tmp_find(&opt_inst_list,
++							opt_inst))
++				continue;
++
+ 			list_add(&opt_inst->tmp_list, &opt_inst_list);
+ 		}
+ 		if (!opt_found) {
+diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
+index 6578127db847..f71abe50ea6f 100644
+--- a/drivers/net/usb/cdc_ether.c
++++ b/drivers/net/usb/cdc_ether.c
+@@ -461,6 +461,7 @@ static const struct driver_info wwan_info = {
+ #define REALTEK_VENDOR_ID	0x0bda
+ #define SAMSUNG_VENDOR_ID	0x04e8
+ #define LENOVO_VENDOR_ID	0x17ef
++#define LINKSYS_VENDOR_ID	0x13b1
+ #define NVIDIA_VENDOR_ID	0x0955
+ #define HP_VENDOR_ID		0x03f0
+ 
+@@ -650,6 +651,15 @@ static const struct usb_device_id	products[] = {
+ 	.driver_info = 0,
+ },
+ 
++#if IS_ENABLED(CONFIG_USB_RTL8152)
++/* Linksys USB3GIGV1 Ethernet Adapter */
++{
++	USB_DEVICE_AND_INTERFACE_INFO(LINKSYS_VENDOR_ID, 0x0041, USB_CLASS_COMM,
++			USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
++	.driver_info = 0,
++},
++#endif
++
+ /* Lenovo Thinkpad USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
+ {
+ 	USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x7205, USB_CLASS_COMM,
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
+index 89950f5cea71..b2c1a435357f 100644
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -506,6 +506,7 @@ enum rtl8152_flags {
+ #define VENDOR_ID_REALTEK		0x0bda
+ #define VENDOR_ID_SAMSUNG		0x04e8
+ #define VENDOR_ID_LENOVO		0x17ef
++#define VENDOR_ID_LINKSYS		0x13b1
+ #define VENDOR_ID_NVIDIA		0x0955
+ 
+ #define MCU_TYPE_PLA			0x0100
+@@ -4376,6 +4377,7 @@ static struct usb_device_id rtl8152_table[] = {
+ 	{REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
+ 	{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x7205)},
+ 	{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x304f)},
++	{REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)},
+ 	{REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA,  0x09ff)},
+ 	{}
+ };
+diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
+index 09e14ce85dd0..0c8efdff4843 100644
+--- a/drivers/net/wireless/ath/ath10k/mac.c
++++ b/drivers/net/wireless/ath/ath10k/mac.c
+@@ -5285,9 +5285,8 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
+ 				    sta->addr, smps, err);
+ 	}
+ 
+-	if (changed & IEEE80211_RC_SUPP_RATES_CHANGED ||
+-	    changed & IEEE80211_RC_NSS_CHANGED) {
+-		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates/nss\n",
++	if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
++		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates\n",
+ 			   sta->addr);
+ 
+ 		err = ath10k_station_assoc(ar, arvif->vif, sta, true);
+diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
+index 41382f89abe1..4435c7bbb625 100644
+--- a/drivers/net/wireless/ath/ath9k/hw.c
++++ b/drivers/net/wireless/ath/ath9k/hw.c
+@@ -1595,6 +1595,10 @@ bool ath9k_hw_check_alive(struct ath_hw *ah)
+ 	int count = 50;
+ 	u32 reg, last_val;
+ 
++	/* Check if chip failed to wake up */
++	if (REG_READ(ah, AR_CFG) == 0xdeadbeef)
++		return false;
++
+ 	if (AR_SREV_9300(ah))
+ 		return !ath9k_hw_detect_mac_hang(ah);
+ 
+diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
+index 6fa9364d1c07..835f1054976b 100644
+--- a/drivers/s390/char/Makefile
++++ b/drivers/s390/char/Makefile
+@@ -2,6 +2,8 @@
+ # S/390 character devices
+ #
+ 
++CFLAGS_REMOVE_sclp_early_core.o	+= $(CC_FLAGS_EXPOLINE)
++
+ obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
+ 	 sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \
+ 	 sclp_early.o
+diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
+index 1e16331891a9..f9d6a9f00640 100644
+--- a/drivers/s390/cio/chsc.c
++++ b/drivers/s390/cio/chsc.c
+@@ -451,6 +451,7 @@ static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area)
+ 
+ static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
+ {
++	struct channel_path *chp;
+ 	struct chp_link link;
+ 	struct chp_id chpid;
+ 	int status;
+@@ -463,10 +464,17 @@ static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
+ 	chpid.id = sei_area->rsid;
+ 	/* allocate a new channel path structure, if needed */
+ 	status = chp_get_status(chpid);
+-	if (status < 0)
+-		chp_new(chpid);
+-	else if (!status)
++	if (!status)
+ 		return;
++
++	if (status < 0) {
++		chp_new(chpid);
++	} else {
++		chp = chpid_to_chp(chpid);
++		mutex_lock(&chp->lock);
++		chp_update_desc(chp);
++		mutex_unlock(&chp->lock);
++	}
+ 	memset(&link, 0, sizeof(struct chp_link));
+ 	link.chpid = chpid;
+ 	if ((sei_area->vf & 0xc0) != 0) {
+diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
+index d4c3e5512dd5..b69dfc706440 100644
+--- a/drivers/staging/android/ion/ion_system_heap.c
++++ b/drivers/staging/android/ion/ion_system_heap.c
+@@ -27,7 +27,7 @@
+ #include "ion_priv.h"
+ 
+ static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
+-				     __GFP_NORETRY) & ~__GFP_DIRECT_RECLAIM;
++				     __GFP_NORETRY) & ~__GFP_RECLAIM;
+ static gfp_t low_order_gfp_flags  = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN);
+ static const unsigned int orders[] = {8, 4, 0};
+ static const int num_orders = ARRAY_SIZE(orders);
+diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
+index 49a0d6b027c1..76dacd5307b9 100644
+--- a/fs/cifs/dir.c
++++ b/fs/cifs/dir.c
+@@ -673,6 +673,9 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
+ 		goto mknod_out;
+ 	}
+ 
++	if (!S_ISCHR(mode) && !S_ISBLK(mode))
++		goto mknod_out;
++
+ 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
+ 		goto mknod_out;
+ 
+@@ -681,10 +684,8 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
+ 
+ 	buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
+ 	if (buf == NULL) {
+-		kfree(full_path);
+ 		rc = -ENOMEM;
+-		free_xid(xid);
+-		return rc;
++		goto mknod_out;
+ 	}
+ 
+ 	if (backup_cred(cifs_sb))
+@@ -731,7 +732,7 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
+ 		pdev->minor = cpu_to_le64(MINOR(device_number));
+ 		rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
+ 							&bytes_written, iov, 1);
+-	} /* else if (S_ISFIFO) */
++	}
+ 	tcon->ses->server->ops->close(xid, tcon, &fid);
+ 	d_drop(direntry);
+ 
+diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
+index 4759df4eb8ce..9398d1b70545 100644
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -275,11 +275,11 @@ loop:
+ 	goto loop;
+ 
+ end_loop:
+-	write_unlock(&journal->j_state_lock);
+ 	del_timer_sync(&journal->j_commit_timer);
+ 	journal->j_task = NULL;
+ 	wake_up(&journal->j_wait_done_commit);
+ 	jbd_debug(1, "Journal thread exiting.\n");
++	write_unlock(&journal->j_state_lock);
+ 	return 0;
+ }
+ 
+diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
+index 19db03dbbd00..dd676ba758ee 100644
+--- a/include/linux/if_vlan.h
++++ b/include/linux/if_vlan.h
+@@ -585,7 +585,7 @@ static inline bool skb_vlan_tagged(const struct sk_buff *skb)
+  * Returns true if the skb is tagged with multiple vlan headers, regardless
+  * of whether it is hardware accelerated or not.
+  */
+-static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
++static inline bool skb_vlan_tagged_multi(struct sk_buff *skb)
+ {
+ 	__be16 protocol = skb->protocol;
+ 
+@@ -596,6 +596,9 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
+ 			   protocol != htons(ETH_P_8021AD)))
+ 			return false;
+ 
++		if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
++			return false;
++
+ 		veh = (struct vlan_ethhdr *)skb->data;
+ 		protocol = veh->h_vlan_encapsulated_proto;
+ 	}
+@@ -613,7 +616,7 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
+  *
+  * Returns features without unsafe ones if the skb has multiple tags.
+  */
+-static inline netdev_features_t vlan_features_check(const struct sk_buff *skb,
++static inline netdev_features_t vlan_features_check(struct sk_buff *skb,
+ 						    netdev_features_t features)
+ {
+ 	if (skb_vlan_tagged_multi(skb)) {
+diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h
+index fe994d2e5286..ea985aa7a6c5 100644
+--- a/include/net/llc_conn.h
++++ b/include/net/llc_conn.h
+@@ -97,6 +97,7 @@ static __inline__ char llc_backlog_type(struct sk_buff *skb)
+ 
+ struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority,
+ 			  struct proto *prot, int kern);
++void llc_sk_stop_all_timers(struct sock *sk, bool sync);
+ void llc_sk_free(struct sock *sk);
+ 
+ void llc_sk_reset(struct sock *sk);
+diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
+index 03f3618612aa..376d0ab5b9f2 100644
+--- a/include/uapi/linux/kvm.h
++++ b/include/uapi/linux/kvm.h
+@@ -831,6 +831,7 @@ struct kvm_ppc_smmu_info {
+ #define KVM_CAP_GUEST_DEBUG_HW_WPS 120
+ #define KVM_CAP_SPLIT_IRQCHIP 121
+ #define KVM_CAP_IOEVENTFD_ANY_LENGTH 122
++#define KVM_CAP_S390_BPB 152
+ 
+ #ifdef KVM_CAP_IRQ_ROUTING
+ 
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 835ac4d9f349..6aeb0ef4fe70 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -8133,9 +8133,9 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
+ 		 * __u16 sample size limit.
+ 		 */
+ 		if (attr->sample_stack_user >= USHRT_MAX)
+-			ret = -EINVAL;
++			return -EINVAL;
+ 		else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
+-			ret = -EINVAL;
++			return -EINVAL;
+ 	}
+ 
+ 	if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
+diff --git a/net/core/dev.c b/net/core/dev.c
+index dc63c37d5301..3bcbf931a910 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2706,7 +2706,7 @@ netdev_features_t passthru_features_check(struct sk_buff *skb,
+ }
+ EXPORT_SYMBOL(passthru_features_check);
+ 
+-static netdev_features_t dflt_features_check(const struct sk_buff *skb,
++static netdev_features_t dflt_features_check(struct sk_buff *skb,
+ 					     struct net_device *dev,
+ 					     netdev_features_t features)
+ {
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index 33432e64804c..f60b93627876 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -54,7 +54,8 @@ do {						\
+ static void neigh_timer_handler(unsigned long arg);
+ static void __neigh_notify(struct neighbour *n, int type, int flags);
+ static void neigh_update_notify(struct neighbour *neigh);
+-static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
++static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
++				    struct net_device *dev);
+ 
+ #ifdef CONFIG_PROC_FS
+ static const struct file_operations neigh_stat_seq_fops;
+@@ -254,8 +255,7 @@ int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
+ {
+ 	write_lock_bh(&tbl->lock);
+ 	neigh_flush_dev(tbl, dev);
+-	pneigh_ifdown(tbl, dev);
+-	write_unlock_bh(&tbl->lock);
++	pneigh_ifdown_and_unlock(tbl, dev);
+ 
+ 	del_timer_sync(&tbl->proxy_timer);
+ 	pneigh_queue_purge(&tbl->proxy_queue);
+@@ -645,9 +645,10 @@ int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
+ 	return -ENOENT;
+ }
+ 
+-static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
++static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
++				    struct net_device *dev)
+ {
+-	struct pneigh_entry *n, **np;
++	struct pneigh_entry *n, **np, *freelist = NULL;
+ 	u32 h;
+ 
+ 	for (h = 0; h <= PNEIGH_HASHMASK; h++) {
+@@ -655,16 +656,23 @@ static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
+ 		while ((n = *np) != NULL) {
+ 			if (!dev || n->dev == dev) {
+ 				*np = n->next;
+-				if (tbl->pdestructor)
+-					tbl->pdestructor(n);
+-				if (n->dev)
+-					dev_put(n->dev);
+-				kfree(n);
++				n->next = freelist;
++				freelist = n;
+ 				continue;
+ 			}
+ 			np = &n->next;
+ 		}
+ 	}
++	write_unlock_bh(&tbl->lock);
++	while ((n = freelist)) {
++		freelist = n->next;
++		n->next = NULL;
++		if (tbl->pdestructor)
++			tbl->pdestructor(n);
++		if (n->dev)
++			dev_put(n->dev);
++		kfree(n);
++	}
+ 	return -ENOENT;
+ }
+ 
+@@ -2280,12 +2288,16 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
+ 
+ 	err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL);
+ 	if (!err) {
+-		if (tb[NDA_IFINDEX])
++		if (tb[NDA_IFINDEX]) {
++			if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
++				return -EINVAL;
+ 			filter_idx = nla_get_u32(tb[NDA_IFINDEX]);
+-
+-		if (tb[NDA_MASTER])
++		}
++		if (tb[NDA_MASTER]) {
++			if (nla_len(tb[NDA_MASTER]) != sizeof(u32))
++				return -EINVAL;
+ 			filter_master_idx = nla_get_u32(tb[NDA_MASTER]);
+-
++		}
+ 		if (filter_idx || filter_master_idx)
+ 			flags |= NLM_F_DUMP_FILTERED;
+ 	}
+diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
+index 6abc5012200b..e26df2764e83 100644
+--- a/net/dns_resolver/dns_key.c
++++ b/net/dns_resolver/dns_key.c
+@@ -25,6 +25,7 @@
+ #include <linux/moduleparam.h>
+ #include <linux/slab.h>
+ #include <linux/string.h>
++#include <linux/ratelimit.h>
+ #include <linux/kernel.h>
+ #include <linux/keyctl.h>
+ #include <linux/err.h>
+@@ -91,9 +92,9 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
+ 
+ 			next_opt = memchr(opt, '#', end - opt) ?: end;
+ 			opt_len = next_opt - opt;
+-			if (!opt_len) {
+-				printk(KERN_WARNING
+-				       "Empty option to dns_resolver key\n");
++			if (opt_len <= 0 || opt_len > 128) {
++				pr_warn_ratelimited("Invalid option length (%d) for dns_resolver key\n",
++						    opt_len);
+ 				return -EINVAL;
+ 			}
+ 
+@@ -127,10 +128,8 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
+ 			}
+ 
+ 		bad_option_value:
+-			printk(KERN_WARNING
+-			       "Option '%*.*s' to dns_resolver key:"
+-			       " bad/missing value\n",
+-			       opt_nlen, opt_nlen, opt);
++			pr_warn_ratelimited("Option '%*.*s' to dns_resolver key: bad/missing value\n",
++					    opt_nlen, opt_nlen, opt);
+ 			return -EINVAL;
+ 		} while (opt = next_opt + 1, opt < end);
+ 	}
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 23d77ff1da59..82d2b55c953a 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2589,8 +2589,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
+ 
+ #ifdef CONFIG_TCP_MD5SIG
+ 	case TCP_MD5SIG:
+-		/* Read the IP->Key mappings from userspace */
+-		err = tp->af_specific->md5_parse(sk, optval, optlen);
++		if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
++			err = tp->af_specific->md5_parse(sk, optval, optlen);
++		else
++			err = -EINVAL;
+ 		break;
+ #endif
+ 	case TCP_USER_TIMEOUT:
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 96115d1e0d90..ed018760502e 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -3869,11 +3869,8 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th)
+ 	int length = (th->doff << 2) - sizeof(*th);
+ 	const u8 *ptr = (const u8 *)(th + 1);
+ 
+-	/* If the TCP option is too short, we can short cut */
+-	if (length < TCPOLEN_MD5SIG)
+-		return NULL;
+-
+-	while (length > 0) {
++	/* If not enough data remaining, we can short cut */
++	while (length >= TCPOLEN_MD5SIG) {
+ 		int opcode = *ptr++;
+ 		int opsize;
+ 
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 99920fcea97c..2f6d8f57fdd4 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -2711,6 +2711,7 @@ void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
+ 
+ static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
+ 	[RTA_GATEWAY]           = { .len = sizeof(struct in6_addr) },
++	[RTA_PREFSRC]		= { .len = sizeof(struct in6_addr) },
+ 	[RTA_OIF]               = { .type = NLA_U32 },
+ 	[RTA_IIF]		= { .type = NLA_U32 },
+ 	[RTA_PRIORITY]          = { .type = NLA_U32 },
+@@ -2719,6 +2720,7 @@ static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
+ 	[RTA_PREF]              = { .type = NLA_U8 },
+ 	[RTA_ENCAP_TYPE]	= { .type = NLA_U16 },
+ 	[RTA_ENCAP]		= { .type = NLA_NESTED },
++	[RTA_TABLE]		= { .type = NLA_U32 },
+ };
+ 
+ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
+diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
+index 67f2e72723b2..2764c4bd072c 100644
+--- a/net/l2tp/l2tp_ppp.c
++++ b/net/l2tp/l2tp_ppp.c
+@@ -606,6 +606,13 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
+ 	lock_sock(sk);
+ 
+ 	error = -EINVAL;
++
++	if (sockaddr_len != sizeof(struct sockaddr_pppol2tp) &&
++	    sockaddr_len != sizeof(struct sockaddr_pppol2tpv3) &&
++	    sockaddr_len != sizeof(struct sockaddr_pppol2tpin6) &&
++	    sockaddr_len != sizeof(struct sockaddr_pppol2tpv3in6))
++		goto end;
++
+ 	if (sp->sa_protocol != PX_PROTO_OL2TP)
+ 		goto end;
+ 
+diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
+index 1e698768aca8..09f2f3471ad6 100644
+--- a/net/llc/af_llc.c
++++ b/net/llc/af_llc.c
+@@ -197,9 +197,19 @@ static int llc_ui_release(struct socket *sock)
+ 		llc->laddr.lsap, llc->daddr.lsap);
+ 	if (!llc_send_disc(sk))
+ 		llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo);
+-	if (!sock_flag(sk, SOCK_ZAPPED))
++	if (!sock_flag(sk, SOCK_ZAPPED)) {
++		struct llc_sap *sap = llc->sap;
++
++		/* Hold this for release_sock(), so that llc_backlog_rcv()
++		 * could still use it.
++		 */
++		llc_sap_hold(sap);
+ 		llc_sap_remove_socket(llc->sap, sk);
+-	release_sock(sk);
++		release_sock(sk);
++		llc_sap_put(sap);
++	} else {
++		release_sock(sk);
++	}
+ 	if (llc->dev)
+ 		dev_put(llc->dev);
+ 	sock_put(sk);
+diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c
+index ea225bd2672c..f8d4ab8ca1a5 100644
+--- a/net/llc/llc_c_ac.c
++++ b/net/llc/llc_c_ac.c
+@@ -1096,14 +1096,7 @@ int llc_conn_ac_inc_tx_win_size(struct sock *sk, struct sk_buff *skb)
+ 
+ int llc_conn_ac_stop_all_timers(struct sock *sk, struct sk_buff *skb)
+ {
+-	struct llc_sock *llc = llc_sk(sk);
+-
+-	del_timer(&llc->pf_cycle_timer.timer);
+-	del_timer(&llc->ack_timer.timer);
+-	del_timer(&llc->rej_sent_timer.timer);
+-	del_timer(&llc->busy_state_timer.timer);
+-	llc->ack_must_be_send = 0;
+-	llc->ack_pf = 0;
++	llc_sk_stop_all_timers(sk, false);
+ 	return 0;
+ }
+ 
+diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
+index 8bc5a1bd2d45..d861b74ad068 100644
+--- a/net/llc/llc_conn.c
++++ b/net/llc/llc_conn.c
+@@ -951,6 +951,26 @@ out:
+ 	return sk;
+ }
+ 
++void llc_sk_stop_all_timers(struct sock *sk, bool sync)
++{
++	struct llc_sock *llc = llc_sk(sk);
++
++	if (sync) {
++		del_timer_sync(&llc->pf_cycle_timer.timer);
++		del_timer_sync(&llc->ack_timer.timer);
++		del_timer_sync(&llc->rej_sent_timer.timer);
++		del_timer_sync(&llc->busy_state_timer.timer);
++	} else {
++		del_timer(&llc->pf_cycle_timer.timer);
++		del_timer(&llc->ack_timer.timer);
++		del_timer(&llc->rej_sent_timer.timer);
++		del_timer(&llc->busy_state_timer.timer);
++	}
++
++	llc->ack_must_be_send = 0;
++	llc->ack_pf = 0;
++}
++
+ /**
+  *	llc_sk_free - Frees a LLC socket
+  *	@sk - socket to free
+@@ -963,7 +983,7 @@ void llc_sk_free(struct sock *sk)
+ 
+ 	llc->state = LLC_CONN_OUT_OF_SVC;
+ 	/* Stop all (possibly) running timers */
+-	llc_conn_ac_stop_all_timers(sk, NULL);
++	llc_sk_stop_all_timers(sk, true);
+ #ifdef DEBUG_LLC_CONN_ALLOC
+ 	printk(KERN_INFO "%s: unackq=%d, txq=%d\n", __func__,
+ 		skb_queue_len(&llc->pdu_unack_q),
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 92ca3e106c2b..f165514a4db5 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -332,11 +332,11 @@ static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
+ 	skb_set_queue_mapping(skb, queue_index);
+ }
+ 
+-/* register_prot_hook must be invoked with the po->bind_lock held,
++/* __register_prot_hook must be invoked through register_prot_hook
+  * or from a context in which asynchronous accesses to the packet
+  * socket is not possible (packet_create()).
+  */
+-static void register_prot_hook(struct sock *sk)
++static void __register_prot_hook(struct sock *sk)
+ {
+ 	struct packet_sock *po = pkt_sk(sk);
+ 
+@@ -351,8 +351,13 @@ static void register_prot_hook(struct sock *sk)
+ 	}
+ }
+ 
+-/* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
+- * held.   If the sync parameter is true, we will temporarily drop
++static void register_prot_hook(struct sock *sk)
++{
++	lockdep_assert_held_once(&pkt_sk(sk)->bind_lock);
++	__register_prot_hook(sk);
++}
++
++/* If the sync parameter is true, we will temporarily drop
+  * the po->bind_lock and do a synchronize_net to make sure no
+  * asynchronous packet processing paths still refer to the elements
+  * of po->prot_hook.  If the sync parameter is false, it is the
+@@ -362,6 +367,8 @@ static void __unregister_prot_hook(struct sock *sk, bool sync)
+ {
+ 	struct packet_sock *po = pkt_sk(sk);
+ 
++	lockdep_assert_held_once(&po->bind_lock);
++
+ 	po->running = 0;
+ 
+ 	if (po->fanout)
+@@ -2892,6 +2899,7 @@ static int packet_release(struct socket *sock)
+ 
+ 	packet_flush_mclist(sk);
+ 
++	lock_sock(sk);
+ 	if (po->rx_ring.pg_vec) {
+ 		memset(&req_u, 0, sizeof(req_u));
+ 		packet_set_ring(sk, &req_u, 1, 0);
+@@ -2901,6 +2909,7 @@ static int packet_release(struct socket *sock)
+ 		memset(&req_u, 0, sizeof(req_u));
+ 		packet_set_ring(sk, &req_u, 1, 1);
+ 	}
++	release_sock(sk);
+ 
+ 	f = fanout_release(sk);
+ 
+@@ -3134,7 +3143,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
+ 
+ 	if (proto) {
+ 		po->prot_hook.type = proto;
+-		register_prot_hook(sk);
++		__register_prot_hook(sk);
+ 	}
+ 
+ 	mutex_lock(&net->packet.sklist_lock);
+@@ -3570,6 +3579,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
+ 		union tpacket_req_u req_u;
+ 		int len;
+ 
++		lock_sock(sk);
+ 		switch (po->tp_version) {
+ 		case TPACKET_V1:
+ 		case TPACKET_V2:
+@@ -3580,14 +3590,21 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
+ 			len = sizeof(req_u.req3);
+ 			break;
+ 		}
+-		if (optlen < len)
+-			return -EINVAL;
+-		if (pkt_sk(sk)->has_vnet_hdr)
+-			return -EINVAL;
+-		if (copy_from_user(&req_u.req, optval, len))
+-			return -EFAULT;
+-		return packet_set_ring(sk, &req_u, 0,
+-			optname == PACKET_TX_RING);
++		if (optlen < len) {
++			ret = -EINVAL;
++		} else {
++			if (pkt_sk(sk)->has_vnet_hdr) {
++				ret = -EINVAL;
++			} else {
++				if (copy_from_user(&req_u.req, optval, len))
++					ret = -EFAULT;
++				else
++					ret = packet_set_ring(sk, &req_u, 0,
++							      optname == PACKET_TX_RING);
++			}
++		}
++		release_sock(sk);
++		return ret;
+ 	}
+ 	case PACKET_COPY_THRESH:
+ 	{
+@@ -3653,12 +3670,18 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
+ 
+ 		if (optlen != sizeof(val))
+ 			return -EINVAL;
+-		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
+-			return -EBUSY;
+ 		if (copy_from_user(&val, optval, sizeof(val)))
+ 			return -EFAULT;
+-		po->tp_loss = !!val;
+-		return 0;
++
++		lock_sock(sk);
++		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
++			ret = -EBUSY;
++		} else {
++			po->tp_loss = !!val;
++			ret = 0;
++		}
++		release_sock(sk);
++		return ret;
+ 	}
+ 	case PACKET_AUXDATA:
+ 	{
+@@ -3669,7 +3692,9 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
+ 		if (copy_from_user(&val, optval, sizeof(val)))
+ 			return -EFAULT;
+ 
++		lock_sock(sk);
+ 		po->auxdata = !!val;
++		release_sock(sk);
+ 		return 0;
+ 	}
+ 	case PACKET_ORIGDEV:
+@@ -3681,7 +3706,9 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
+ 		if (copy_from_user(&val, optval, sizeof(val)))
+ 			return -EFAULT;
+ 
++		lock_sock(sk);
+ 		po->origdev = !!val;
++		release_sock(sk);
+ 		return 0;
+ 	}
+ 	case PACKET_VNET_HDR:
+@@ -3690,15 +3717,20 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
+ 
+ 		if (sock->type != SOCK_RAW)
+ 			return -EINVAL;
+-		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
+-			return -EBUSY;
+ 		if (optlen < sizeof(val))
+ 			return -EINVAL;
+ 		if (copy_from_user(&val, optval, sizeof(val)))
+ 			return -EFAULT;
+ 
+-		po->has_vnet_hdr = !!val;
+-		return 0;
++		lock_sock(sk);
++		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
++			ret = -EBUSY;
++		} else {
++			po->has_vnet_hdr = !!val;
++			ret = 0;
++		}
++		release_sock(sk);
++		return ret;
+ 	}
+ 	case PACKET_TIMESTAMP:
+ 	{
+@@ -3736,11 +3768,17 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
+ 
+ 		if (optlen != sizeof(val))
+ 			return -EINVAL;
+-		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
+-			return -EBUSY;
+ 		if (copy_from_user(&val, optval, sizeof(val)))
+ 			return -EFAULT;
+-		po->tp_tx_has_off = !!val;
++
++		lock_sock(sk);
++		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
++			ret = -EBUSY;
++		} else {
++			po->tp_tx_has_off = !!val;
++			ret = 0;
++		}
++		release_sock(sk);
+ 		return 0;
+ 	}
+ 	case PACKET_QDISC_BYPASS:
+@@ -4116,7 +4154,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
+ 	/* Added to avoid minimal code churn */
+ 	struct tpacket_req *req = &req_u->req;
+ 
+-	lock_sock(sk);
+ 	/* Opening a Tx-ring is NOT supported in TPACKET_V3 */
+ 	if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
+ 		WARN(1, "Tx-ring is not supported.\n");
+@@ -4252,7 +4289,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
+ 	if (pg_vec)
+ 		free_pg_vec(pg_vec, order, req->tp_block_nr);
+ out:
+-	release_sock(sk);
+ 	return err;
+ }
+ 
+diff --git a/net/packet/internal.h b/net/packet/internal.h
+index d55bfc34d6b3..1309e2a7baad 100644
+--- a/net/packet/internal.h
++++ b/net/packet/internal.h
+@@ -109,10 +109,12 @@ struct packet_sock {
+ 	int			copy_thresh;
+ 	spinlock_t		bind_lock;
+ 	struct mutex		pg_vec_lock;
+-	unsigned int		running:1,	/* prot_hook is attached*/
+-				auxdata:1,
++	unsigned int		running;	/* bind_lock must be held */
++	unsigned int		auxdata:1,	/* writer must hold sock lock */
+ 				origdev:1,
+-				has_vnet_hdr:1;
++				has_vnet_hdr:1,
++				tp_loss:1,
++				tp_tx_has_off:1;
+ 	int			pressure;
+ 	int			ifindex;	/* bound device		*/
+ 	__be16			num;
+@@ -122,8 +124,6 @@ struct packet_sock {
+ 	enum tpacket_versions	tp_version;
+ 	unsigned int		tp_hdrlen;
+ 	unsigned int		tp_reserve;
+-	unsigned int		tp_loss:1;
+-	unsigned int		tp_tx_has_off:1;
+ 	unsigned int		tp_tstamp;
+ 	struct net_device __rcu	*cached_dev;
+ 	int			(*xmit)(struct sk_buff *skb);
+diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
+index edb8514b4e00..1cd7b7e33fa3 100644
+--- a/net/sctp/ipv6.c
++++ b/net/sctp/ipv6.c
+@@ -519,46 +519,49 @@ static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr,
+ 	addr->v6.sin6_scope_id = 0;
+ }
+ 
+-/* Compare addresses exactly.
+- * v4-mapped-v6 is also in consideration.
+- */
+-static int sctp_v6_cmp_addr(const union sctp_addr *addr1,
+-			    const union sctp_addr *addr2)
++static int __sctp_v6_cmp_addr(const union sctp_addr *addr1,
++			      const union sctp_addr *addr2)
+ {
+ 	if (addr1->sa.sa_family != addr2->sa.sa_family) {
+ 		if (addr1->sa.sa_family == AF_INET &&
+ 		    addr2->sa.sa_family == AF_INET6 &&
+-		    ipv6_addr_v4mapped(&addr2->v6.sin6_addr)) {
+-			if (addr2->v6.sin6_port == addr1->v4.sin_port &&
+-			    addr2->v6.sin6_addr.s6_addr32[3] ==
+-			    addr1->v4.sin_addr.s_addr)
+-				return 1;
+-		}
++		    ipv6_addr_v4mapped(&addr2->v6.sin6_addr) &&
++		    addr2->v6.sin6_addr.s6_addr32[3] ==
++		    addr1->v4.sin_addr.s_addr)
++			return 1;
++
+ 		if (addr2->sa.sa_family == AF_INET &&
+ 		    addr1->sa.sa_family == AF_INET6 &&
+-		    ipv6_addr_v4mapped(&addr1->v6.sin6_addr)) {
+-			if (addr1->v6.sin6_port == addr2->v4.sin_port &&
+-			    addr1->v6.sin6_addr.s6_addr32[3] ==
+-			    addr2->v4.sin_addr.s_addr)
+-				return 1;
+-		}
++		    ipv6_addr_v4mapped(&addr1->v6.sin6_addr) &&
++		    addr1->v6.sin6_addr.s6_addr32[3] ==
++		    addr2->v4.sin_addr.s_addr)
++			return 1;
++
+ 		return 0;
+ 	}
+-	if (addr1->v6.sin6_port != addr2->v6.sin6_port)
+-		return 0;
++
+ 	if (!ipv6_addr_equal(&addr1->v6.sin6_addr, &addr2->v6.sin6_addr))
+ 		return 0;
++
+ 	/* If this is a linklocal address, compare the scope_id. */
+-	if (ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) {
+-		if (addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id &&
+-		    (addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id)) {
+-			return 0;
+-		}
+-	}
++	if ((ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) &&
++	    addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id &&
++	    addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id)
++		return 0;
+ 
+ 	return 1;
+ }
+ 
++/* Compare addresses exactly.
++ * v4-mapped-v6 is also in consideration.
++ */
++static int sctp_v6_cmp_addr(const union sctp_addr *addr1,
++			    const union sctp_addr *addr2)
++{
++	return __sctp_v6_cmp_addr(addr1, addr2) &&
++	       addr1->v6.sin6_port == addr2->v6.sin6_port;
++}
++
+ /* Initialize addr struct to INADDR_ANY. */
+ static void sctp_v6_inaddr_any(union sctp_addr *addr, __be16 port)
+ {
+@@ -843,8 +846,8 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1,
+ 			       const union sctp_addr *addr2,
+ 			       struct sctp_sock *opt)
+ {
+-	struct sctp_af *af1, *af2;
+ 	struct sock *sk = sctp_opt2sk(opt);
++	struct sctp_af *af1, *af2;
+ 
+ 	af1 = sctp_get_af_specific(addr1->sa.sa_family);
+ 	af2 = sctp_get_af_specific(addr2->sa.sa_family);
+@@ -860,10 +863,7 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1,
+ 	if (sctp_is_any(sk, addr1) || sctp_is_any(sk, addr2))
+ 		return 1;
+ 
+-	if (addr1->sa.sa_family != addr2->sa.sa_family)
+-		return 0;
+-
+-	return af1->cmp_addr(addr1, addr2);
++	return __sctp_v6_cmp_addr(addr1, addr2);
+ }
+ 
+ /* Verify that the provided sockaddr looks bindable.   Common verification,
+diff --git a/net/tipc/net.c b/net/tipc/net.c
+index 77bf9113c7a7..2763bd369b79 100644
+--- a/net/tipc/net.c
++++ b/net/tipc/net.c
+@@ -44,7 +44,8 @@
+ 
+ static const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = {
+ 	[TIPC_NLA_NET_UNSPEC]	= { .type = NLA_UNSPEC },
+-	[TIPC_NLA_NET_ID]	= { .type = NLA_U32 }
++	[TIPC_NLA_NET_ID]	= { .type = NLA_U32 },
++	[TIPC_NLA_NET_ADDR]	= { .type = NLA_U32 },
+ };
+ 
+ /*


^ permalink raw reply related	[relevance 99%]

Results 1-1 of 1 | reverse | options above
-- pct% links below jump to the message on this page, permalinks otherwise --
2018-04-29 11:48 99% [gentoo-commits] proj/linux-patches:4.4 commit in: / Mike Pagano

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox