From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:4.16 commit in: /
Date: Fri, 25 May 2018 15:37:50 +0000 (UTC) [thread overview]
Message-ID: <1527262661.ae8ff671b8db0c142f5db55ac3e30eaf78171d29.mpagano@gentoo> (raw)
commit: ae8ff671b8db0c142f5db55ac3e30eaf78171d29
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri May 25 15:37:41 2018 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri May 25 15:37:41 2018 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ae8ff671
Linux patch 4.16.12
0000_README | 4 +
1011_linux-4.16.12.patch | 7076 ++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 7080 insertions(+)
diff --git a/0000_README b/0000_README
index a5237c6..603fb6f 100644
--- a/0000_README
+++ b/0000_README
@@ -87,6 +87,10 @@ Patch: 1010_linux-4.16.11.patch
From: http://www.kernel.org
Desc: Linux 4.16.11
+Patch: 1011_linux-4.16.12.patch
+From: http://www.kernel.org
+Desc: Linux 4.16.12
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1011_linux-4.16.12.patch b/1011_linux-4.16.12.patch
new file mode 100644
index 0000000..2ef2e6c
--- /dev/null
+++ b/1011_linux-4.16.12.patch
@@ -0,0 +1,7076 @@
+diff --git a/Makefile b/Makefile
+index 79c191442771..ded9e8480d74 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 16
+-SUBLEVEL = 11
++SUBLEVEL = 12
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+
+diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
+index 471b2274fbeb..c40b4380951c 100644
+--- a/arch/powerpc/include/asm/exception-64s.h
++++ b/arch/powerpc/include/asm/exception-64s.h
+@@ -74,6 +74,27 @@
+ */
+ #define EX_R3 EX_DAR
+
++#define STF_ENTRY_BARRIER_SLOT \
++ STF_ENTRY_BARRIER_FIXUP_SECTION; \
++ nop; \
++ nop; \
++ nop
++
++#define STF_EXIT_BARRIER_SLOT \
++ STF_EXIT_BARRIER_FIXUP_SECTION; \
++ nop; \
++ nop; \
++ nop; \
++ nop; \
++ nop; \
++ nop
++
++/*
++ * r10 must be free to use, r13 must be paca
++ */
++#define INTERRUPT_TO_KERNEL \
++ STF_ENTRY_BARRIER_SLOT
++
+ /*
+ * Macros for annotating the expected destination of (h)rfid
+ *
+@@ -90,16 +111,19 @@
+ rfid
+
+ #define RFI_TO_USER \
++ STF_EXIT_BARRIER_SLOT; \
+ RFI_FLUSH_SLOT; \
+ rfid; \
+ b rfi_flush_fallback
+
+ #define RFI_TO_USER_OR_KERNEL \
++ STF_EXIT_BARRIER_SLOT; \
+ RFI_FLUSH_SLOT; \
+ rfid; \
+ b rfi_flush_fallback
+
+ #define RFI_TO_GUEST \
++ STF_EXIT_BARRIER_SLOT; \
+ RFI_FLUSH_SLOT; \
+ rfid; \
+ b rfi_flush_fallback
+@@ -108,21 +132,25 @@
+ hrfid
+
+ #define HRFI_TO_USER \
++ STF_EXIT_BARRIER_SLOT; \
+ RFI_FLUSH_SLOT; \
+ hrfid; \
+ b hrfi_flush_fallback
+
+ #define HRFI_TO_USER_OR_KERNEL \
++ STF_EXIT_BARRIER_SLOT; \
+ RFI_FLUSH_SLOT; \
+ hrfid; \
+ b hrfi_flush_fallback
+
+ #define HRFI_TO_GUEST \
++ STF_EXIT_BARRIER_SLOT; \
+ RFI_FLUSH_SLOT; \
+ hrfid; \
+ b hrfi_flush_fallback
+
+ #define HRFI_TO_UNKNOWN \
++ STF_EXIT_BARRIER_SLOT; \
+ RFI_FLUSH_SLOT; \
+ hrfid; \
+ b hrfi_flush_fallback
+@@ -254,6 +282,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
+ #define __EXCEPTION_PROLOG_1_PRE(area) \
+ OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR); \
+ OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR); \
++ INTERRUPT_TO_KERNEL; \
+ SAVE_CTR(r10, area); \
+ mfcr r9;
+
+diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
+index 1e82eb3caabd..a9b64df34e2a 100644
+--- a/arch/powerpc/include/asm/feature-fixups.h
++++ b/arch/powerpc/include/asm/feature-fixups.h
+@@ -187,6 +187,22 @@ label##3: \
+ FTR_ENTRY_OFFSET label##1b-label##3b; \
+ .popsection;
+
++#define STF_ENTRY_BARRIER_FIXUP_SECTION \
++953: \
++ .pushsection __stf_entry_barrier_fixup,"a"; \
++ .align 2; \
++954: \
++ FTR_ENTRY_OFFSET 953b-954b; \
++ .popsection;
++
++#define STF_EXIT_BARRIER_FIXUP_SECTION \
++955: \
++ .pushsection __stf_exit_barrier_fixup,"a"; \
++ .align 2; \
++956: \
++ FTR_ENTRY_OFFSET 955b-956b; \
++ .popsection;
++
+ #define RFI_FLUSH_FIXUP_SECTION \
+ 951: \
+ .pushsection __rfi_flush_fixup,"a"; \
+@@ -199,6 +215,9 @@ label##3: \
+ #ifndef __ASSEMBLY__
+ #include <linux/types.h>
+
++extern long stf_barrier_fallback;
++extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup;
++extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup;
+ extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
+
+ void apply_feature_fixups(void);
+diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
+index eca3f9c68907..5a740feb7bd7 100644
+--- a/arch/powerpc/include/asm/hvcall.h
++++ b/arch/powerpc/include/asm/hvcall.h
+@@ -337,6 +337,9 @@
+ #define H_CPU_CHAR_L1D_FLUSH_ORI30 (1ull << 61) // IBM bit 2
+ #define H_CPU_CHAR_L1D_FLUSH_TRIG2 (1ull << 60) // IBM bit 3
+ #define H_CPU_CHAR_L1D_THREAD_PRIV (1ull << 59) // IBM bit 4
++#define H_CPU_CHAR_BRANCH_HINTS_HONORED (1ull << 58) // IBM bit 5
++#define H_CPU_CHAR_THREAD_RECONFIG_CTRL (1ull << 57) // IBM bit 6
++#define H_CPU_CHAR_COUNT_CACHE_DISABLED (1ull << 56) // IBM bit 7
+
+ #define H_CPU_BEHAV_FAVOUR_SECURITY (1ull << 63) // IBM bit 0
+ #define H_CPU_BEHAV_L1D_FLUSH_PR (1ull << 62) // IBM bit 1
+diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h
+new file mode 100644
+index 000000000000..44989b22383c
+--- /dev/null
++++ b/arch/powerpc/include/asm/security_features.h
+@@ -0,0 +1,85 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * Security related feature bit definitions.
++ *
++ * Copyright 2018, Michael Ellerman, IBM Corporation.
++ */
++
++#ifndef _ASM_POWERPC_SECURITY_FEATURES_H
++#define _ASM_POWERPC_SECURITY_FEATURES_H
++
++
++extern unsigned long powerpc_security_features;
++extern bool rfi_flush;
++
++/* These are bit flags */
++enum stf_barrier_type {
++ STF_BARRIER_NONE = 0x1,
++ STF_BARRIER_FALLBACK = 0x2,
++ STF_BARRIER_EIEIO = 0x4,
++ STF_BARRIER_SYNC_ORI = 0x8,
++};
++
++void setup_stf_barrier(void);
++void do_stf_barrier_fixups(enum stf_barrier_type types);
++
++static inline void security_ftr_set(unsigned long feature)
++{
++ powerpc_security_features |= feature;
++}
++
++static inline void security_ftr_clear(unsigned long feature)
++{
++ powerpc_security_features &= ~feature;
++}
++
++static inline bool security_ftr_enabled(unsigned long feature)
++{
++ return !!(powerpc_security_features & feature);
++}
++
++
++// Features indicating support for Spectre/Meltdown mitigations
++
++// The L1-D cache can be flushed with ori r30,r30,0
++#define SEC_FTR_L1D_FLUSH_ORI30 0x0000000000000001ull
++
++// The L1-D cache can be flushed with mtspr 882,r0 (aka SPRN_TRIG2)
++#define SEC_FTR_L1D_FLUSH_TRIG2 0x0000000000000002ull
++
++// ori r31,r31,0 acts as a speculation barrier
++#define SEC_FTR_SPEC_BAR_ORI31 0x0000000000000004ull
++
++// Speculation past bctr is disabled
++#define SEC_FTR_BCCTRL_SERIALISED 0x0000000000000008ull
++
++// Entries in L1-D are private to a SMT thread
++#define SEC_FTR_L1D_THREAD_PRIV 0x0000000000000010ull
++
++// Indirect branch prediction cache disabled
++#define SEC_FTR_COUNT_CACHE_DISABLED 0x0000000000000020ull
++
++
++// Features indicating need for Spectre/Meltdown mitigations
++
++// The L1-D cache should be flushed on MSR[HV] 1->0 transition (hypervisor to guest)
++#define SEC_FTR_L1D_FLUSH_HV 0x0000000000000040ull
++
++// The L1-D cache should be flushed on MSR[PR] 0->1 transition (kernel to userspace)
++#define SEC_FTR_L1D_FLUSH_PR 0x0000000000000080ull
++
++// A speculation barrier should be used for bounds checks (Spectre variant 1)
++#define SEC_FTR_BNDS_CHK_SPEC_BAR 0x0000000000000100ull
++
++// Firmware configuration indicates user favours security over performance
++#define SEC_FTR_FAVOUR_SECURITY 0x0000000000000200ull
++
++
++// Features enabled by default
++#define SEC_FTR_DEFAULT \
++ (SEC_FTR_L1D_FLUSH_HV | \
++ SEC_FTR_L1D_FLUSH_PR | \
++ SEC_FTR_BNDS_CHK_SPEC_BAR | \
++ SEC_FTR_FAVOUR_SECURITY)
++
++#endif /* _ASM_POWERPC_SECURITY_FEATURES_H */
+diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
+index 1b6bc7fba996..d458c45e5004 100644
+--- a/arch/powerpc/kernel/Makefile
++++ b/arch/powerpc/kernel/Makefile
+@@ -42,7 +42,7 @@ obj-$(CONFIG_VDSO32) += vdso32/
+ obj-$(CONFIG_PPC_WATCHDOG) += watchdog.o
+ obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
+ obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o
+-obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o
++obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o security.o
+ obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o
+ obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o
+ obj-$(CONFIG_PPC64) += vdso64/
+diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
+index 1ecfd8ffb098..bf9b94e376fd 100644
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -833,7 +833,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
+ #endif
+
+
+-EXC_REAL_MASKABLE(decrementer, 0x900, 0x80, IRQS_DISABLED)
++EXC_REAL_OOL_MASKABLE(decrementer, 0x900, 0x80, IRQS_DISABLED)
+ EXC_VIRT_MASKABLE(decrementer, 0x4900, 0x80, 0x900, IRQS_DISABLED)
+ TRAMP_KVM(PACA_EXGEN, 0x900)
+ EXC_COMMON_ASYNC(decrementer_common, 0x900, timer_interrupt)
+@@ -909,6 +909,7 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
+ mtctr r13; \
+ GET_PACA(r13); \
+ std r10,PACA_EXGEN+EX_R10(r13); \
++ INTERRUPT_TO_KERNEL; \
+ KVMTEST_PR(0xc00); /* uses r10, branch to do_kvm_0xc00_system_call */ \
+ HMT_MEDIUM; \
+ mfctr r9;
+@@ -917,7 +918,8 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
+ #define SYSCALL_KVMTEST \
+ HMT_MEDIUM; \
+ mr r9,r13; \
+- GET_PACA(r13);
++ GET_PACA(r13); \
++ INTERRUPT_TO_KERNEL;
+ #endif
+
+ #define LOAD_SYSCALL_HANDLER(reg) \
+@@ -1455,6 +1457,19 @@ masked_##_H##interrupt: \
+ b .; \
+ MASKED_DEC_HANDLER(_H)
+
++TRAMP_REAL_BEGIN(stf_barrier_fallback)
++ std r9,PACA_EXRFI+EX_R9(r13)
++ std r10,PACA_EXRFI+EX_R10(r13)
++ sync
++ ld r9,PACA_EXRFI+EX_R9(r13)
++ ld r10,PACA_EXRFI+EX_R10(r13)
++ ori 31,31,0
++ .rept 14
++ b 1f
++1:
++ .endr
++ blr
++
+ TRAMP_REAL_BEGIN(rfi_flush_fallback)
+ SET_SCRATCH0(r13);
+ GET_PACA(r13);
+diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
+new file mode 100644
+index 000000000000..b98a722da915
+--- /dev/null
++++ b/arch/powerpc/kernel/security.c
+@@ -0,0 +1,237 @@
++// SPDX-License-Identifier: GPL-2.0+
++//
++// Security related flags and so on.
++//
++// Copyright 2018, Michael Ellerman, IBM Corporation.
++
++#include <linux/kernel.h>
++#include <linux/device.h>
++#include <linux/seq_buf.h>
++
++#include <asm/debugfs.h>
++#include <asm/security_features.h>
++
++
++unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT;
++
++ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ bool thread_priv;
++
++ thread_priv = security_ftr_enabled(SEC_FTR_L1D_THREAD_PRIV);
++
++ if (rfi_flush || thread_priv) {
++ struct seq_buf s;
++ seq_buf_init(&s, buf, PAGE_SIZE - 1);
++
++ seq_buf_printf(&s, "Mitigation: ");
++
++ if (rfi_flush)
++ seq_buf_printf(&s, "RFI Flush");
++
++ if (rfi_flush && thread_priv)
++ seq_buf_printf(&s, ", ");
++
++ if (thread_priv)
++ seq_buf_printf(&s, "L1D private per thread");
++
++ seq_buf_printf(&s, "\n");
++
++ return s.len;
++ }
++
++ if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
++ !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
++ return sprintf(buf, "Not affected\n");
++
++ return sprintf(buf, "Vulnerable\n");
++}
++
++ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ if (!security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR))
++ return sprintf(buf, "Not affected\n");
++
++ return sprintf(buf, "Vulnerable\n");
++}
++
++ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ bool bcs, ccd, ori;
++ struct seq_buf s;
++
++ seq_buf_init(&s, buf, PAGE_SIZE - 1);
++
++ bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED);
++ ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED);
++ ori = security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31);
++
++ if (bcs || ccd) {
++ seq_buf_printf(&s, "Mitigation: ");
++
++ if (bcs)
++ seq_buf_printf(&s, "Indirect branch serialisation (kernel only)");
++
++ if (bcs && ccd)
++ seq_buf_printf(&s, ", ");
++
++ if (ccd)
++ seq_buf_printf(&s, "Indirect branch cache disabled");
++ } else
++ seq_buf_printf(&s, "Vulnerable");
++
++ if (ori)
++ seq_buf_printf(&s, ", ori31 speculation barrier enabled");
++
++ seq_buf_printf(&s, "\n");
++
++ return s.len;
++}
++
++/*
++ * Store-forwarding barrier support.
++ */
++
++static enum stf_barrier_type stf_enabled_flush_types;
++static bool no_stf_barrier;
++bool stf_barrier;
++
++static int __init handle_no_stf_barrier(char *p)
++{
++ pr_info("stf-barrier: disabled on command line.");
++ no_stf_barrier = true;
++ return 0;
++}
++
++early_param("no_stf_barrier", handle_no_stf_barrier);
++
++/* This is the generic flag used by other architectures */
++static int __init handle_ssbd(char *p)
++{
++ if (!p || strncmp(p, "auto", 5) == 0 || strncmp(p, "on", 2) == 0 ) {
++ /* Until firmware tells us, we have the barrier with auto */
++ return 0;
++ } else if (strncmp(p, "off", 3) == 0) {
++ handle_no_stf_barrier(NULL);
++ return 0;
++ } else
++ return 1;
++
++ return 0;
++}
++early_param("spec_store_bypass_disable", handle_ssbd);
++
++/* This is the generic flag used by other architectures */
++static int __init handle_no_ssbd(char *p)
++{
++ handle_no_stf_barrier(NULL);
++ return 0;
++}
++early_param("nospec_store_bypass_disable", handle_no_ssbd);
++
++static void stf_barrier_enable(bool enable)
++{
++ if (enable)
++ do_stf_barrier_fixups(stf_enabled_flush_types);
++ else
++ do_stf_barrier_fixups(STF_BARRIER_NONE);
++
++ stf_barrier = enable;
++}
++
++void setup_stf_barrier(void)
++{
++ enum stf_barrier_type type;
++ bool enable, hv;
++
++ hv = cpu_has_feature(CPU_FTR_HVMODE);
++
++ /* Default to fallback in case fw-features are not available */
++ if (cpu_has_feature(CPU_FTR_ARCH_300))
++ type = STF_BARRIER_EIEIO;
++ else if (cpu_has_feature(CPU_FTR_ARCH_207S))
++ type = STF_BARRIER_SYNC_ORI;
++ else if (cpu_has_feature(CPU_FTR_ARCH_206))
++ type = STF_BARRIER_FALLBACK;
++ else
++ type = STF_BARRIER_NONE;
++
++ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
++ (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) ||
++ (security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && hv));
++
++ if (type == STF_BARRIER_FALLBACK) {
++ pr_info("stf-barrier: fallback barrier available\n");
++ } else if (type == STF_BARRIER_SYNC_ORI) {
++ pr_info("stf-barrier: hwsync barrier available\n");
++ } else if (type == STF_BARRIER_EIEIO) {
++ pr_info("stf-barrier: eieio barrier available\n");
++ }
++
++ stf_enabled_flush_types = type;
++
++ if (!no_stf_barrier)
++ stf_barrier_enable(enable);
++}
++
++ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ if (stf_barrier && stf_enabled_flush_types != STF_BARRIER_NONE) {
++ const char *type;
++ switch (stf_enabled_flush_types) {
++ case STF_BARRIER_EIEIO:
++ type = "eieio";
++ break;
++ case STF_BARRIER_SYNC_ORI:
++ type = "hwsync";
++ break;
++ case STF_BARRIER_FALLBACK:
++ type = "fallback";
++ break;
++ default:
++ type = "unknown";
++ }
++ return sprintf(buf, "Mitigation: Kernel entry/exit barrier (%s)\n", type);
++ }
++
++ if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
++ !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
++ return sprintf(buf, "Not affected\n");
++
++ return sprintf(buf, "Vulnerable\n");
++}
++
++#ifdef CONFIG_DEBUG_FS
++static int stf_barrier_set(void *data, u64 val)
++{
++ bool enable;
++
++ if (val == 1)
++ enable = true;
++ else if (val == 0)
++ enable = false;
++ else
++ return -EINVAL;
++
++ /* Only do anything if we're changing state */
++ if (enable != stf_barrier)
++ stf_barrier_enable(enable);
++
++ return 0;
++}
++
++static int stf_barrier_get(void *data, u64 *val)
++{
++ *val = stf_barrier ? 1 : 0;
++ return 0;
++}
++
++DEFINE_SIMPLE_ATTRIBUTE(fops_stf_barrier, stf_barrier_get, stf_barrier_set, "%llu\n");
++
++static __init int stf_barrier_debugfs_init(void)
++{
++ debugfs_create_file("stf_barrier", 0600, powerpc_debugfs_root, NULL, &fops_stf_barrier);
++ return 0;
++}
++device_initcall(stf_barrier_debugfs_init);
++#endif /* CONFIG_DEBUG_FS */
+diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
+index c388cc3357fa..c27557aff394 100644
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -927,12 +927,4 @@ static __init int rfi_flush_debugfs_init(void)
+ }
+ device_initcall(rfi_flush_debugfs_init);
+ #endif
+-
+-ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
+-{
+- if (rfi_flush)
+- return sprintf(buf, "Mitigation: RFI Flush\n");
+-
+- return sprintf(buf, "Vulnerable\n");
+-}
+ #endif /* CONFIG_PPC_BOOK3S_64 */
+diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
+index c8af90ff49f0..b8d82678f8b4 100644
+--- a/arch/powerpc/kernel/vmlinux.lds.S
++++ b/arch/powerpc/kernel/vmlinux.lds.S
+@@ -133,6 +133,20 @@ SECTIONS
+ RO_DATA(PAGE_SIZE)
+
+ #ifdef CONFIG_PPC64
++ . = ALIGN(8);
++ __stf_entry_barrier_fixup : AT(ADDR(__stf_entry_barrier_fixup) - LOAD_OFFSET) {
++ __start___stf_entry_barrier_fixup = .;
++ *(__stf_entry_barrier_fixup)
++ __stop___stf_entry_barrier_fixup = .;
++ }
++
++ . = ALIGN(8);
++ __stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) {
++ __start___stf_exit_barrier_fixup = .;
++ *(__stf_exit_barrier_fixup)
++ __stop___stf_exit_barrier_fixup = .;
++ }
++
+ . = ALIGN(8);
+ __rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) {
+ __start___rfi_flush_fixup = .;
+diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
+index f61ff5a6bddb..6b3c2d405a6d 100644
+--- a/arch/powerpc/lib/feature-fixups.c
++++ b/arch/powerpc/lib/feature-fixups.c
+@@ -23,6 +23,7 @@
+ #include <asm/page.h>
+ #include <asm/sections.h>
+ #include <asm/setup.h>
++#include <asm/security_features.h>
+ #include <asm/firmware.h>
+
+ struct fixup_entry {
+@@ -117,6 +118,120 @@ void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
+ }
+
+ #ifdef CONFIG_PPC_BOOK3S_64
++void do_stf_entry_barrier_fixups(enum stf_barrier_type types)
++{
++ unsigned int instrs[3], *dest;
++ long *start, *end;
++ int i;
++
++ start = PTRRELOC(&__start___stf_entry_barrier_fixup),
++ end = PTRRELOC(&__stop___stf_entry_barrier_fixup);
++
++ instrs[0] = 0x60000000; /* nop */
++ instrs[1] = 0x60000000; /* nop */
++ instrs[2] = 0x60000000; /* nop */
++
++ i = 0;
++ if (types & STF_BARRIER_FALLBACK) {
++ instrs[i++] = 0x7d4802a6; /* mflr r10 */
++ instrs[i++] = 0x60000000; /* branch patched below */
++ instrs[i++] = 0x7d4803a6; /* mtlr r10 */
++ } else if (types & STF_BARRIER_EIEIO) {
++ instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
++ } else if (types & STF_BARRIER_SYNC_ORI) {
++ instrs[i++] = 0x7c0004ac; /* hwsync */
++ instrs[i++] = 0xe94d0000; /* ld r10,0(r13) */
++ instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
++ }
++
++ for (i = 0; start < end; start++, i++) {
++ dest = (void *)start + *start;
++
++ pr_devel("patching dest %lx\n", (unsigned long)dest);
++
++ patch_instruction(dest, instrs[0]);
++
++ if (types & STF_BARRIER_FALLBACK)
++ patch_branch(dest + 1, (unsigned long)&stf_barrier_fallback,
++ BRANCH_SET_LINK);
++ else
++ patch_instruction(dest + 1, instrs[1]);
++
++ patch_instruction(dest + 2, instrs[2]);
++ }
++
++ printk(KERN_DEBUG "stf-barrier: patched %d entry locations (%s barrier)\n", i,
++ (types == STF_BARRIER_NONE) ? "no" :
++ (types == STF_BARRIER_FALLBACK) ? "fallback" :
++ (types == STF_BARRIER_EIEIO) ? "eieio" :
++ (types == (STF_BARRIER_SYNC_ORI)) ? "hwsync"
++ : "unknown");
++}
++
++void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
++{
++ unsigned int instrs[6], *dest;
++ long *start, *end;
++ int i;
++
++ start = PTRRELOC(&__start___stf_exit_barrier_fixup),
++ end = PTRRELOC(&__stop___stf_exit_barrier_fixup);
++
++ instrs[0] = 0x60000000; /* nop */
++ instrs[1] = 0x60000000; /* nop */
++ instrs[2] = 0x60000000; /* nop */
++ instrs[3] = 0x60000000; /* nop */
++ instrs[4] = 0x60000000; /* nop */
++ instrs[5] = 0x60000000; /* nop */
++
++ i = 0;
++ if (types & STF_BARRIER_FALLBACK || types & STF_BARRIER_SYNC_ORI) {
++ if (cpu_has_feature(CPU_FTR_HVMODE)) {
++ instrs[i++] = 0x7db14ba6; /* mtspr 0x131, r13 (HSPRG1) */
++ instrs[i++] = 0x7db04aa6; /* mfspr r13, 0x130 (HSPRG0) */
++ } else {
++ instrs[i++] = 0x7db243a6; /* mtsprg 2,r13 */
++ instrs[i++] = 0x7db142a6; /* mfsprg r13,1 */
++ }
++ instrs[i++] = 0x7c0004ac; /* hwsync */
++ instrs[i++] = 0xe9ad0000; /* ld r13,0(r13) */
++ instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
++ if (cpu_has_feature(CPU_FTR_HVMODE)) {
++ instrs[i++] = 0x7db14aa6; /* mfspr r13, 0x131 (HSPRG1) */
++ } else {
++ instrs[i++] = 0x7db242a6; /* mfsprg r13,2 */
++ }
++ } else if (types & STF_BARRIER_EIEIO) {
++ instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
++ }
++
++ for (i = 0; start < end; start++, i++) {
++ dest = (void *)start + *start;
++
++ pr_devel("patching dest %lx\n", (unsigned long)dest);
++
++ patch_instruction(dest, instrs[0]);
++ patch_instruction(dest + 1, instrs[1]);
++ patch_instruction(dest + 2, instrs[2]);
++ patch_instruction(dest + 3, instrs[3]);
++ patch_instruction(dest + 4, instrs[4]);
++ patch_instruction(dest + 5, instrs[5]);
++ }
++ printk(KERN_DEBUG "stf-barrier: patched %d exit locations (%s barrier)\n", i,
++ (types == STF_BARRIER_NONE) ? "no" :
++ (types == STF_BARRIER_FALLBACK) ? "fallback" :
++ (types == STF_BARRIER_EIEIO) ? "eieio" :
++ (types == (STF_BARRIER_SYNC_ORI)) ? "hwsync"
++ : "unknown");
++}
++
++
++void do_stf_barrier_fixups(enum stf_barrier_type types)
++{
++ do_stf_entry_barrier_fixups(types);
++ do_stf_exit_barrier_fixups(types);
++}
++
+ void do_rfi_flush_fixups(enum l1d_flush_type types)
+ {
+ unsigned int instrs[3], *dest;
+diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
+index 092715b9674b..fc0412d59149 100644
+--- a/arch/powerpc/platforms/powernv/setup.c
++++ b/arch/powerpc/platforms/powernv/setup.c
+@@ -38,57 +38,92 @@
+ #include <asm/smp.h>
+ #include <asm/tm.h>
+ #include <asm/setup.h>
++#include <asm/security_features.h>
+
+ #include "powernv.h"
+
++
++static bool fw_feature_is(const char *state, const char *name,
++ struct device_node *fw_features)
++{
++ struct device_node *np;
++ bool rc = false;
++
++ np = of_get_child_by_name(fw_features, name);
++ if (np) {
++ rc = of_property_read_bool(np, state);
++ of_node_put(np);
++ }
++
++ return rc;
++}
++
++static void init_fw_feat_flags(struct device_node *np)
++{
++ if (fw_feature_is("enabled", "inst-spec-barrier-ori31,31,0", np))
++ security_ftr_set(SEC_FTR_SPEC_BAR_ORI31);
++
++ if (fw_feature_is("enabled", "fw-bcctrl-serialized", np))
++ security_ftr_set(SEC_FTR_BCCTRL_SERIALISED);
++
++ if (fw_feature_is("enabled", "inst-l1d-flush-ori30,30,0", np))
++ security_ftr_set(SEC_FTR_L1D_FLUSH_ORI30);
++
++ if (fw_feature_is("enabled", "inst-l1d-flush-trig2", np))
++ security_ftr_set(SEC_FTR_L1D_FLUSH_TRIG2);
++
++ if (fw_feature_is("enabled", "fw-l1d-thread-split", np))
++ security_ftr_set(SEC_FTR_L1D_THREAD_PRIV);
++
++ if (fw_feature_is("enabled", "fw-count-cache-disabled", np))
++ security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED);
++
++ /*
++ * The features below are enabled by default, so we instead look to see
++ * if firmware has *disabled* them, and clear them if so.
++ */
++ if (fw_feature_is("disabled", "speculation-policy-favor-security", np))
++ security_ftr_clear(SEC_FTR_FAVOUR_SECURITY);
++
++ if (fw_feature_is("disabled", "needs-l1d-flush-msr-pr-0-to-1", np))
++ security_ftr_clear(SEC_FTR_L1D_FLUSH_PR);
++
++ if (fw_feature_is("disabled", "needs-l1d-flush-msr-hv-1-to-0", np))
++ security_ftr_clear(SEC_FTR_L1D_FLUSH_HV);
++
++ if (fw_feature_is("disabled", "needs-spec-barrier-for-bound-checks", np))
++ security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR);
++}
++
+ static void pnv_setup_rfi_flush(void)
+ {
+ struct device_node *np, *fw_features;
+ enum l1d_flush_type type;
+- int enable;
++ bool enable;
+
+ /* Default to fallback in case fw-features are not available */
+ type = L1D_FLUSH_FALLBACK;
+- enable = 1;
+
+ np = of_find_node_by_name(NULL, "ibm,opal");
+ fw_features = of_get_child_by_name(np, "fw-features");
+ of_node_put(np);
+
+ if (fw_features) {
+- np = of_get_child_by_name(fw_features, "inst-l1d-flush-trig2");
+- if (np && of_property_read_bool(np, "enabled"))
+- type = L1D_FLUSH_MTTRIG;
++ init_fw_feat_flags(fw_features);
++ of_node_put(fw_features);
+
+- of_node_put(np);
++ if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_TRIG2))
++ type = L1D_FLUSH_MTTRIG;
+
+- np = of_get_child_by_name(fw_features, "inst-l1d-flush-ori30,30,0");
+- if (np && of_property_read_bool(np, "enabled"))
++ if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_ORI30))
+ type = L1D_FLUSH_ORI;
+-
+- of_node_put(np);
+-
+- /* Enable unless firmware says NOT to */
+- enable = 2;
+- np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-hv-1-to-0");
+- if (np && of_property_read_bool(np, "disabled"))
+- enable--;
+-
+- of_node_put(np);
+-
+- np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-pr-0-to-1");
+- if (np && of_property_read_bool(np, "disabled"))
+- enable--;
+-
+- np = of_get_child_by_name(fw_features, "speculation-policy-favor-security");
+- if (np && of_property_read_bool(np, "disabled"))
+- enable = 0;
+-
+- of_node_put(np);
+- of_node_put(fw_features);
+ }
+
+- setup_rfi_flush(type, enable > 0);
++ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && \
++ (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) || \
++ security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV));
++
++ setup_rfi_flush(type, enable);
+ }
+
+ static void __init pnv_setup_arch(void)
+@@ -96,6 +131,7 @@ static void __init pnv_setup_arch(void)
+ set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
+
+ pnv_setup_rfi_flush();
++ setup_stf_barrier();
+
+ /* Initialize SMP */
+ pnv_smp_init();
+diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
+index 1a527625acf7..21fed38bbbd5 100644
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -68,6 +68,7 @@
+ #include <asm/plpar_wrappers.h>
+ #include <asm/kexec.h>
+ #include <asm/isa-bridge.h>
++#include <asm/security_features.h>
+
+ #include "pseries.h"
+
+@@ -459,6 +460,40 @@ static void __init find_and_init_phbs(void)
+ of_pci_check_probe_only();
+ }
+
++static void init_cpu_char_feature_flags(struct h_cpu_char_result *result)
++{
++ if (result->character & H_CPU_CHAR_SPEC_BAR_ORI31)
++ security_ftr_set(SEC_FTR_SPEC_BAR_ORI31);
++
++ if (result->character & H_CPU_CHAR_BCCTRL_SERIALISED)
++ security_ftr_set(SEC_FTR_BCCTRL_SERIALISED);
++
++ if (result->character & H_CPU_CHAR_L1D_FLUSH_ORI30)
++ security_ftr_set(SEC_FTR_L1D_FLUSH_ORI30);
++
++ if (result->character & H_CPU_CHAR_L1D_FLUSH_TRIG2)
++ security_ftr_set(SEC_FTR_L1D_FLUSH_TRIG2);
++
++ if (result->character & H_CPU_CHAR_L1D_THREAD_PRIV)
++ security_ftr_set(SEC_FTR_L1D_THREAD_PRIV);
++
++ if (result->character & H_CPU_CHAR_COUNT_CACHE_DISABLED)
++ security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED);
++
++ /*
++ * The features below are enabled by default, so we instead look to see
++ * if firmware has *disabled* them, and clear them if so.
++ */
++ if (!(result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY))
++ security_ftr_clear(SEC_FTR_FAVOUR_SECURITY);
++
++ if (!(result->behaviour & H_CPU_BEHAV_L1D_FLUSH_PR))
++ security_ftr_clear(SEC_FTR_L1D_FLUSH_PR);
++
++ if (!(result->behaviour & H_CPU_BEHAV_BNDS_CHK_SPEC_BAR))
++ security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR);
++}
++
+ static void pseries_setup_rfi_flush(void)
+ {
+ struct h_cpu_char_result result;
+@@ -466,29 +501,26 @@ static void pseries_setup_rfi_flush(void)
+ bool enable;
+ long rc;
+
+- /* Enable by default */
+- enable = true;
+-
+ rc = plpar_get_cpu_characteristics(&result);
+- if (rc == H_SUCCESS) {
+- types = L1D_FLUSH_NONE;
++ if (rc == H_SUCCESS)
++ init_cpu_char_feature_flags(&result);
++
++ /*
++ * We're the guest so this doesn't apply to us, clear it to simplify
++ * handling of it elsewhere.
++ */
++ security_ftr_clear(SEC_FTR_L1D_FLUSH_HV);
+
+- if (result.character & H_CPU_CHAR_L1D_FLUSH_TRIG2)
+- types |= L1D_FLUSH_MTTRIG;
+- if (result.character & H_CPU_CHAR_L1D_FLUSH_ORI30)
+- types |= L1D_FLUSH_ORI;
++ types = L1D_FLUSH_FALLBACK;
+
+- /* Use fallback if nothing set in hcall */
+- if (types == L1D_FLUSH_NONE)
+- types = L1D_FLUSH_FALLBACK;
++ if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_TRIG2))
++ types |= L1D_FLUSH_MTTRIG;
+
+- if ((!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR)) ||
+- (!(result.behaviour & H_CPU_BEHAV_FAVOUR_SECURITY)))
+- enable = false;
+- } else {
+- /* Default to fallback if case hcall is not available */
+- types = L1D_FLUSH_FALLBACK;
+- }
++ if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_ORI30))
++ types |= L1D_FLUSH_ORI;
++
++ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && \
++ security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR);
+
+ setup_rfi_flush(types, enable);
+ }
+@@ -667,6 +699,7 @@ static void __init pSeries_setup_arch(void)
+ fwnmi_init();
+
+ pseries_setup_rfi_flush();
++ setup_stf_barrier();
+
+ /* By default, only probe PCI (can be overridden by rtas_pci) */
+ pci_add_flags(PCI_PROBE_ONLY);
+diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
+index 6e91e0d422ea..c94dd09a82d1 100644
+--- a/arch/s390/Kconfig
++++ b/arch/s390/Kconfig
+@@ -120,6 +120,7 @@ config S390
+ select GENERIC_CLOCKEVENTS
+ select GENERIC_CPU_AUTOPROBE
+ select GENERIC_CPU_DEVICES if !SMP
++ select GENERIC_CPU_VULNERABILITIES
+ select GENERIC_FIND_FIRST_BIT
+ select GENERIC_SMP_IDLE_THREAD
+ select GENERIC_TIME_VSYSCALL
+@@ -576,7 +577,7 @@ choice
+ config EXPOLINE_OFF
+ bool "spectre_v2=off"
+
+-config EXPOLINE_MEDIUM
++config EXPOLINE_AUTO
+ bool "spectre_v2=auto"
+
+ config EXPOLINE_FULL
+diff --git a/arch/s390/Makefile b/arch/s390/Makefile
+index 2ced3239cb84..e1bc722fba41 100644
+--- a/arch/s390/Makefile
++++ b/arch/s390/Makefile
+@@ -84,7 +84,7 @@ ifdef CONFIG_EXPOLINE
+ CC_FLAGS_EXPOLINE += -mfunction-return=thunk
+ CC_FLAGS_EXPOLINE += -mindirect-branch-table
+ export CC_FLAGS_EXPOLINE
+- cflags-y += $(CC_FLAGS_EXPOLINE)
++ cflags-y += $(CC_FLAGS_EXPOLINE) -DCC_USING_EXPOLINE
+ endif
+ endif
+
+diff --git a/arch/s390/crypto/crc32be-vx.S b/arch/s390/crypto/crc32be-vx.S
+index e8077f0971f8..2bf01ba44107 100644
+--- a/arch/s390/crypto/crc32be-vx.S
++++ b/arch/s390/crypto/crc32be-vx.S
+@@ -13,6 +13,7 @@
+ */
+
+ #include <linux/linkage.h>
++#include <asm/nospec-insn.h>
+ #include <asm/vx-insn.h>
+
+ /* Vector register range containing CRC-32 constants */
+@@ -67,6 +68,8 @@
+
+ .previous
+
++ GEN_BR_THUNK %r14
++
+ .text
+ /*
+ * The CRC-32 function(s) use these calling conventions:
+@@ -203,6 +206,6 @@ ENTRY(crc32_be_vgfm_16)
+
+ .Ldone:
+ VLGVF %r2,%v2,3
+- br %r14
++ BR_EX %r14
+
+ .previous
+diff --git a/arch/s390/crypto/crc32le-vx.S b/arch/s390/crypto/crc32le-vx.S
+index d8c67a58c0c5..7d6f568bd3ad 100644
+--- a/arch/s390/crypto/crc32le-vx.S
++++ b/arch/s390/crypto/crc32le-vx.S
+@@ -14,6 +14,7 @@
+ */
+
+ #include <linux/linkage.h>
++#include <asm/nospec-insn.h>
+ #include <asm/vx-insn.h>
+
+ /* Vector register range containing CRC-32 constants */
+@@ -76,6 +77,7 @@
+
+ .previous
+
++ GEN_BR_THUNK %r14
+
+ .text
+
+@@ -264,6 +266,6 @@ crc32_le_vgfm_generic:
+
+ .Ldone:
+ VLGVF %r2,%v2,2
+- br %r14
++ BR_EX %r14
+
+ .previous
+diff --git a/arch/s390/include/asm/alternative-asm.h b/arch/s390/include/asm/alternative-asm.h
+new file mode 100644
+index 000000000000..955d620db23e
+--- /dev/null
++++ b/arch/s390/include/asm/alternative-asm.h
+@@ -0,0 +1,108 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _ASM_S390_ALTERNATIVE_ASM_H
++#define _ASM_S390_ALTERNATIVE_ASM_H
++
++#ifdef __ASSEMBLY__
++
++/*
++ * Check the length of an instruction sequence. The length may not be larger
++ * than 254 bytes and it has to be divisible by 2.
++ */
++.macro alt_len_check start,end
++ .if ( \end - \start ) > 254
++ .error "cpu alternatives does not support instructions blocks > 254 bytes\n"
++ .endif
++ .if ( \end - \start ) % 2
++ .error "cpu alternatives instructions length is odd\n"
++ .endif
++.endm
++
++/*
++ * Issue one struct alt_instr descriptor entry (need to put it into
++ * the section .altinstructions, see below). This entry contains
++ * enough information for the alternatives patching code to patch an
++ * instruction. See apply_alternatives().
++ */
++.macro alt_entry orig_start, orig_end, alt_start, alt_end, feature
++ .long \orig_start - .
++ .long \alt_start - .
++ .word \feature
++ .byte \orig_end - \orig_start
++ .byte \alt_end - \alt_start
++.endm
++
++/*
++ * Fill up @bytes with nops. The macro emits 6-byte nop instructions
++ * for the bulk of the area, possibly followed by a 4-byte and/or
++ * a 2-byte nop if the size of the area is not divisible by 6.
++ */
++.macro alt_pad_fill bytes
++ .fill ( \bytes ) / 6, 6, 0xc0040000
++ .fill ( \bytes ) % 6 / 4, 4, 0x47000000
++ .fill ( \bytes ) % 6 % 4 / 2, 2, 0x0700
++.endm
++
++/*
++ * Fill up @bytes with nops. If the number of bytes is larger
++ * than 6, emit a jg instruction to branch over all nops, then
++ * fill an area of size (@bytes - 6) with nop instructions.
++ */
++.macro alt_pad bytes
++ .if ( \bytes > 0 )
++ .if ( \bytes > 6 )
++ jg . + \bytes
++ alt_pad_fill \bytes - 6
++ .else
++ alt_pad_fill \bytes
++ .endif
++ .endif
++.endm
++
++/*
++ * Define an alternative between two instructions. If @feature is
++ * present, early code in apply_alternatives() replaces @oldinstr with
++ * @newinstr. ".skip" directive takes care of proper instruction padding
++ * in case @newinstr is longer than @oldinstr.
++ */
++.macro ALTERNATIVE oldinstr, newinstr, feature
++ .pushsection .altinstr_replacement,"ax"
++770: \newinstr
++771: .popsection
++772: \oldinstr
++773: alt_len_check 770b, 771b
++ alt_len_check 772b, 773b
++ alt_pad ( ( 771b - 770b ) - ( 773b - 772b ) )
++774: .pushsection .altinstructions,"a"
++ alt_entry 772b, 774b, 770b, 771b, \feature
++ .popsection
++.endm
++
++/*
++ * Define an alternative between two instructions. If @feature is
++ * present, early code in apply_alternatives() replaces @oldinstr with
++ * @newinstr. ".skip" directive takes care of proper instruction padding
++ * in case @newinstr is longer than @oldinstr.
++ */
++.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2
++ .pushsection .altinstr_replacement,"ax"
++770: \newinstr1
++771: \newinstr2
++772: .popsection
++773: \oldinstr
++774: alt_len_check 770b, 771b
++ alt_len_check 771b, 772b
++ alt_len_check 773b, 774b
++ .if ( 771b - 770b > 772b - 771b )
++ alt_pad ( ( 771b - 770b ) - ( 774b - 773b ) )
++ .else
++ alt_pad ( ( 772b - 771b ) - ( 774b - 773b ) )
++ .endif
++775: .pushsection .altinstructions,"a"
++ alt_entry 773b, 775b, 770b, 771b,\feature1
++ alt_entry 773b, 775b, 771b, 772b,\feature2
++ .popsection
++.endm
++
++#endif /* __ASSEMBLY__ */
++
++#endif /* _ASM_S390_ALTERNATIVE_ASM_H */
+diff --git a/arch/s390/include/asm/nospec-branch.h b/arch/s390/include/asm/nospec-branch.h
+index 7df48e5cf36f..b4bd8c41e9d3 100644
+--- a/arch/s390/include/asm/nospec-branch.h
++++ b/arch/s390/include/asm/nospec-branch.h
+@@ -6,12 +6,11 @@
+
+ #include <linux/types.h>
+
+-extern int nospec_call_disable;
+-extern int nospec_return_disable;
++extern int nospec_disable;
+
+ void nospec_init_branches(void);
+-void nospec_call_revert(s32 *start, s32 *end);
+-void nospec_return_revert(s32 *start, s32 *end);
++void nospec_auto_detect(void);
++void nospec_revert(s32 *start, s32 *end);
+
+ #endif /* __ASSEMBLY__ */
+
+diff --git a/arch/s390/include/asm/nospec-insn.h b/arch/s390/include/asm/nospec-insn.h
+new file mode 100644
+index 000000000000..a01f81186e86
+--- /dev/null
++++ b/arch/s390/include/asm/nospec-insn.h
+@@ -0,0 +1,196 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _ASM_S390_NOSPEC_ASM_H
++#define _ASM_S390_NOSPEC_ASM_H
++
++#include <asm/alternative-asm.h>
++#include <asm/asm-offsets.h>
++#include <asm/dwarf.h>
++
++#ifdef __ASSEMBLY__
++
++#ifdef CONFIG_EXPOLINE
++
++_LC_BR_R1 = __LC_BR_R1
++
++/*
++ * The expoline macros are used to create thunks in the same format
++ * as gcc generates them. The 'comdat' section flag makes sure that
++ * the various thunks are merged into a single copy.
++ */
++ .macro __THUNK_PROLOG_NAME name
++ .pushsection .text.\name,"axG",@progbits,\name,comdat
++ .globl \name
++ .hidden \name
++ .type \name,@function
++\name:
++ CFI_STARTPROC
++ .endm
++
++ .macro __THUNK_EPILOG
++ CFI_ENDPROC
++ .popsection
++ .endm
++
++ .macro __THUNK_PROLOG_BR r1,r2
++ __THUNK_PROLOG_NAME __s390x_indirect_jump_r\r2\()use_r\r1
++ .endm
++
++ .macro __THUNK_PROLOG_BC d0,r1,r2
++ __THUNK_PROLOG_NAME __s390x_indirect_branch_\d0\()_\r2\()use_\r1
++ .endm
++
++ .macro __THUNK_BR r1,r2
++ jg __s390x_indirect_jump_r\r2\()use_r\r1
++ .endm
++
++ .macro __THUNK_BC d0,r1,r2
++ jg __s390x_indirect_branch_\d0\()_\r2\()use_\r1
++ .endm
++
++ .macro __THUNK_BRASL r1,r2,r3
++ brasl \r1,__s390x_indirect_jump_r\r3\()use_r\r2
++ .endm
++
++ .macro __DECODE_RR expand,reg,ruse
++ .set __decode_fail,1
++ .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
++ .ifc \reg,%r\r1
++ .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
++ .ifc \ruse,%r\r2
++ \expand \r1,\r2
++ .set __decode_fail,0
++ .endif
++ .endr
++ .endif
++ .endr
++ .if __decode_fail == 1
++ .error "__DECODE_RR failed"
++ .endif
++ .endm
++
++ .macro __DECODE_RRR expand,rsave,rtarget,ruse
++ .set __decode_fail,1
++ .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
++ .ifc \rsave,%r\r1
++ .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
++ .ifc \rtarget,%r\r2
++ .irp r3,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
++ .ifc \ruse,%r\r3
++ \expand \r1,\r2,\r3
++ .set __decode_fail,0
++ .endif
++ .endr
++ .endif
++ .endr
++ .endif
++ .endr
++ .if __decode_fail == 1
++ .error "__DECODE_RRR failed"
++ .endif
++ .endm
++
++ .macro __DECODE_DRR expand,disp,reg,ruse
++ .set __decode_fail,1
++ .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
++ .ifc \reg,%r\r1
++ .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
++ .ifc \ruse,%r\r2
++ \expand \disp,\r1,\r2
++ .set __decode_fail,0
++ .endif
++ .endr
++ .endif
++ .endr
++ .if __decode_fail == 1
++ .error "__DECODE_DRR failed"
++ .endif
++ .endm
++
++ .macro __THUNK_EX_BR reg,ruse
++ # Be very careful when adding instructions to this macro!
++ # The ALTERNATIVE replacement code has a .+10 which targets
++ # the "br \reg" after the code has been patched.
++#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
++ exrl 0,555f
++ j .
++#else
++ .ifc \reg,%r1
++ ALTERNATIVE "ex %r0,_LC_BR_R1", ".insn ril,0xc60000000000,0,.+10", 35
++ j .
++ .else
++ larl \ruse,555f
++ ex 0,0(\ruse)
++ j .
++ .endif
++#endif
++555: br \reg
++ .endm
++
++ .macro __THUNK_EX_BC disp,reg,ruse
++#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
++ exrl 0,556f
++ j .
++#else
++ larl \ruse,556f
++ ex 0,0(\ruse)
++ j .
++#endif
++556: b \disp(\reg)
++ .endm
++
++ .macro GEN_BR_THUNK reg,ruse=%r1
++ __DECODE_RR __THUNK_PROLOG_BR,\reg,\ruse
++ __THUNK_EX_BR \reg,\ruse
++ __THUNK_EPILOG
++ .endm
++
++ .macro GEN_B_THUNK disp,reg,ruse=%r1
++ __DECODE_DRR __THUNK_PROLOG_BC,\disp,\reg,\ruse
++ __THUNK_EX_BC \disp,\reg,\ruse
++ __THUNK_EPILOG
++ .endm
++
++ .macro BR_EX reg,ruse=%r1
++557: __DECODE_RR __THUNK_BR,\reg,\ruse
++ .pushsection .s390_indirect_branches,"a",@progbits
++ .long 557b-.
++ .popsection
++ .endm
++
++ .macro B_EX disp,reg,ruse=%r1
++558: __DECODE_DRR __THUNK_BC,\disp,\reg,\ruse
++ .pushsection .s390_indirect_branches,"a",@progbits
++ .long 558b-.
++ .popsection
++ .endm
++
++ .macro BASR_EX rsave,rtarget,ruse=%r1
++559: __DECODE_RRR __THUNK_BRASL,\rsave,\rtarget,\ruse
++ .pushsection .s390_indirect_branches,"a",@progbits
++ .long 559b-.
++ .popsection
++ .endm
++
++#else
++ .macro GEN_BR_THUNK reg,ruse=%r1
++ .endm
++
++ .macro GEN_B_THUNK disp,reg,ruse=%r1
++ .endm
++
++ .macro BR_EX reg,ruse=%r1
++ br \reg
++ .endm
++
++ .macro B_EX disp,reg,ruse=%r1
++ b \disp(\reg)
++ .endm
++
++ .macro BASR_EX rsave,rtarget,ruse=%r1
++ basr \rsave,\rtarget
++ .endm
++#endif
++
++#endif /* __ASSEMBLY__ */
++
++#endif /* _ASM_S390_NOSPEC_ASM_H */
+diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
+index 7f27e3da9709..a02bc90fe5f3 100644
+--- a/arch/s390/kernel/Makefile
++++ b/arch/s390/kernel/Makefile
+@@ -61,11 +61,12 @@ obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o als.o
+ obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
+ obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
+ obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o
++obj-y += nospec-branch.o
+
+ extra-y += head.o head64.o vmlinux.lds
+
+-obj-$(CONFIG_EXPOLINE) += nospec-branch.o
+-CFLAGS_REMOVE_expoline.o += $(CC_FLAGS_EXPOLINE)
++obj-$(CONFIG_SYSFS) += nospec-sysfs.o
++CFLAGS_REMOVE_nospec-branch.o += $(CC_FLAGS_EXPOLINE)
+
+ obj-$(CONFIG_MODULES) += module.o
+ obj-$(CONFIG_SMP) += smp.o
+diff --git a/arch/s390/kernel/alternative.c b/arch/s390/kernel/alternative.c
+index 22476135f738..8e1f2aee85ef 100644
+--- a/arch/s390/kernel/alternative.c
++++ b/arch/s390/kernel/alternative.c
+@@ -2,6 +2,7 @@
+ #include <linux/module.h>
+ #include <asm/alternative.h>
+ #include <asm/facility.h>
++#include <asm/nospec-branch.h>
+
+ #define MAX_PATCH_LEN (255 - 1)
+
+@@ -15,29 +16,6 @@ static int __init disable_alternative_instructions(char *str)
+
+ early_param("noaltinstr", disable_alternative_instructions);
+
+-static int __init nobp_setup_early(char *str)
+-{
+- bool enabled;
+- int rc;
+-
+- rc = kstrtobool(str, &enabled);
+- if (rc)
+- return rc;
+- if (enabled && test_facility(82))
+- __set_facility(82, S390_lowcore.alt_stfle_fac_list);
+- else
+- __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+- return 0;
+-}
+-early_param("nobp", nobp_setup_early);
+-
+-static int __init nospec_setup_early(char *str)
+-{
+- __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+- return 0;
+-}
+-early_param("nospec", nospec_setup_early);
+-
+ struct brcl_insn {
+ u16 opc;
+ s32 disp;
+diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
+index 587b195b588d..3fd0b4535a71 100644
+--- a/arch/s390/kernel/asm-offsets.c
++++ b/arch/s390/kernel/asm-offsets.c
+@@ -179,6 +179,7 @@ int main(void)
+ OFFSET(__LC_MACHINE_FLAGS, lowcore, machine_flags);
+ OFFSET(__LC_PREEMPT_COUNT, lowcore, preempt_count);
+ OFFSET(__LC_GMAP, lowcore, gmap);
++ OFFSET(__LC_BR_R1, lowcore, br_r1_trampoline);
+ /* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */
+ OFFSET(__LC_DUMP_REIPL, lowcore, ipib);
+ /* hardware defined lowcore locations 0x1000 - 0x18ff */
+diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
+index f6c56009e822..b65874b0b412 100644
+--- a/arch/s390/kernel/base.S
++++ b/arch/s390/kernel/base.S
+@@ -9,18 +9,22 @@
+
+ #include <linux/linkage.h>
+ #include <asm/asm-offsets.h>
++#include <asm/nospec-insn.h>
+ #include <asm/ptrace.h>
+ #include <asm/sigp.h>
+
++ GEN_BR_THUNK %r9
++ GEN_BR_THUNK %r14
++
+ ENTRY(s390_base_mcck_handler)
+ basr %r13,0
+ 0: lg %r15,__LC_PANIC_STACK # load panic stack
+ aghi %r15,-STACK_FRAME_OVERHEAD
+ larl %r1,s390_base_mcck_handler_fn
+- lg %r1,0(%r1)
+- ltgr %r1,%r1
++ lg %r9,0(%r1)
++ ltgr %r9,%r9
+ jz 1f
+- basr %r14,%r1
++ BASR_EX %r14,%r9
+ 1: la %r1,4095
+ lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)
+ lpswe __LC_MCK_OLD_PSW
+@@ -37,10 +41,10 @@ ENTRY(s390_base_ext_handler)
+ basr %r13,0
+ 0: aghi %r15,-STACK_FRAME_OVERHEAD
+ larl %r1,s390_base_ext_handler_fn
+- lg %r1,0(%r1)
+- ltgr %r1,%r1
++ lg %r9,0(%r1)
++ ltgr %r9,%r9
+ jz 1f
+- basr %r14,%r1
++ BASR_EX %r14,%r9
+ 1: lmg %r0,%r15,__LC_SAVE_AREA_ASYNC
+ ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit
+ lpswe __LC_EXT_OLD_PSW
+@@ -57,10 +61,10 @@ ENTRY(s390_base_pgm_handler)
+ basr %r13,0
+ 0: aghi %r15,-STACK_FRAME_OVERHEAD
+ larl %r1,s390_base_pgm_handler_fn
+- lg %r1,0(%r1)
+- ltgr %r1,%r1
++ lg %r9,0(%r1)
++ ltgr %r9,%r9
+ jz 1f
+- basr %r14,%r1
++ BASR_EX %r14,%r9
+ lmg %r0,%r15,__LC_SAVE_AREA_SYNC
+ lpswe __LC_PGM_OLD_PSW
+ 1: lpswe disabled_wait_psw-0b(%r13)
+@@ -117,7 +121,7 @@ ENTRY(diag308_reset)
+ larl %r4,.Lcontinue_psw # Restore PSW flags
+ lpswe 0(%r4)
+ .Lcontinue:
+- br %r14
++ BR_EX %r14
+ .align 16
+ .Lrestart_psw:
+ .long 0x00080000,0x80000000 + .Lrestart_part2
+diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
+index a5621ea6d123..d3e1a510c9c1 100644
+--- a/arch/s390/kernel/entry.S
++++ b/arch/s390/kernel/entry.S
+@@ -27,6 +27,7 @@
+ #include <asm/setup.h>
+ #include <asm/nmi.h>
+ #include <asm/export.h>
++#include <asm/nospec-insn.h>
+
+ __PT_R0 = __PT_GPRS
+ __PT_R1 = __PT_GPRS + 8
+@@ -223,67 +224,9 @@ _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
+ .popsection
+ .endm
+
+-#ifdef CONFIG_EXPOLINE
+-
+- .macro GEN_BR_THUNK name,reg,tmp
+- .section .text.\name,"axG",@progbits,\name,comdat
+- .globl \name
+- .hidden \name
+- .type \name,@function
+-\name:
+- CFI_STARTPROC
+-#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
+- exrl 0,0f
+-#else
+- larl \tmp,0f
+- ex 0,0(\tmp)
+-#endif
+- j .
+-0: br \reg
+- CFI_ENDPROC
+- .endm
+-
+- GEN_BR_THUNK __s390x_indirect_jump_r1use_r9,%r9,%r1
+- GEN_BR_THUNK __s390x_indirect_jump_r1use_r14,%r14,%r1
+- GEN_BR_THUNK __s390x_indirect_jump_r11use_r14,%r14,%r11
+-
+- .macro BASR_R14_R9
+-0: brasl %r14,__s390x_indirect_jump_r1use_r9
+- .pushsection .s390_indirect_branches,"a",@progbits
+- .long 0b-.
+- .popsection
+- .endm
+-
+- .macro BR_R1USE_R14
+-0: jg __s390x_indirect_jump_r1use_r14
+- .pushsection .s390_indirect_branches,"a",@progbits
+- .long 0b-.
+- .popsection
+- .endm
+-
+- .macro BR_R11USE_R14
+-0: jg __s390x_indirect_jump_r11use_r14
+- .pushsection .s390_indirect_branches,"a",@progbits
+- .long 0b-.
+- .popsection
+- .endm
+-
+-#else /* CONFIG_EXPOLINE */
+-
+- .macro BASR_R14_R9
+- basr %r14,%r9
+- .endm
+-
+- .macro BR_R1USE_R14
+- br %r14
+- .endm
+-
+- .macro BR_R11USE_R14
+- br %r14
+- .endm
+-
+-#endif /* CONFIG_EXPOLINE */
+-
++ GEN_BR_THUNK %r9
++ GEN_BR_THUNK %r14
++ GEN_BR_THUNK %r14,%r11
+
+ .section .kprobes.text, "ax"
+ .Ldummy:
+@@ -300,7 +243,7 @@ _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
+ ENTRY(__bpon)
+ .globl __bpon
+ BPON
+- BR_R1USE_R14
++ BR_EX %r14
+
+ /*
+ * Scheduler resume function, called by switch_to
+@@ -326,7 +269,7 @@ ENTRY(__switch_to)
+ TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
+ jz 0f
+ .insn s,0xb2800000,__LC_LPP # set program parameter
+-0: BR_R1USE_R14
++0: BR_EX %r14
+
+ .L__critical_start:
+
+@@ -393,7 +336,7 @@ sie_exit:
+ xgr %r5,%r5
+ lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
+ lg %r2,__SF_EMPTY+16(%r15) # return exit reason code
+- BR_R1USE_R14
++ BR_EX %r14
+ .Lsie_fault:
+ lghi %r14,-EFAULT
+ stg %r14,__SF_EMPTY+16(%r15) # set exit reason code
+@@ -452,7 +395,7 @@ ENTRY(system_call)
+ lgf %r9,0(%r8,%r10) # get system call add.
+ TSTMSK __TI_flags(%r12),_TIF_TRACE
+ jnz .Lsysc_tracesys
+- BASR_R14_R9 # call sys_xxxx
++ BASR_EX %r14,%r9 # call sys_xxxx
+ stg %r2,__PT_R2(%r11) # store return value
+
+ .Lsysc_return:
+@@ -637,7 +580,7 @@ ENTRY(system_call)
+ lmg %r3,%r7,__PT_R3(%r11)
+ stg %r7,STACK_FRAME_OVERHEAD(%r15)
+ lg %r2,__PT_ORIG_GPR2(%r11)
+- BASR_R14_R9 # call sys_xxx
++ BASR_EX %r14,%r9 # call sys_xxx
+ stg %r2,__PT_R2(%r11) # store return value
+ .Lsysc_tracenogo:
+ TSTMSK __TI_flags(%r12),_TIF_TRACE
+@@ -661,7 +604,7 @@ ENTRY(ret_from_fork)
+ lmg %r9,%r10,__PT_R9(%r11) # load gprs
+ ENTRY(kernel_thread_starter)
+ la %r2,0(%r10)
+- BASR_R14_R9
++ BASR_EX %r14,%r9
+ j .Lsysc_tracenogo
+
+ /*
+@@ -743,7 +686,7 @@ ENTRY(pgm_check_handler)
+ je .Lpgm_return
+ lgf %r9,0(%r10,%r1) # load address of handler routine
+ lgr %r2,%r11 # pass pointer to pt_regs
+- BASR_R14_R9 # branch to interrupt-handler
++ BASR_EX %r14,%r9 # branch to interrupt-handler
+ .Lpgm_return:
+ LOCKDEP_SYS_EXIT
+ tm __PT_PSW+1(%r11),0x01 # returning to user ?
+@@ -1061,7 +1004,7 @@ ENTRY(psw_idle)
+ stpt __TIMER_IDLE_ENTER(%r2)
+ .Lpsw_idle_lpsw:
+ lpswe __SF_EMPTY(%r15)
+- BR_R1USE_R14
++ BR_EX %r14
+ .Lpsw_idle_end:
+
+ /*
+@@ -1103,7 +1046,7 @@ ENTRY(save_fpu_regs)
+ .Lsave_fpu_regs_done:
+ oi __LC_CPU_FLAGS+7,_CIF_FPU
+ .Lsave_fpu_regs_exit:
+- BR_R1USE_R14
++ BR_EX %r14
+ .Lsave_fpu_regs_end:
+ EXPORT_SYMBOL(save_fpu_regs)
+
+@@ -1149,7 +1092,7 @@ load_fpu_regs:
+ .Lload_fpu_regs_done:
+ ni __LC_CPU_FLAGS+7,255-_CIF_FPU
+ .Lload_fpu_regs_exit:
+- BR_R1USE_R14
++ BR_EX %r14
+ .Lload_fpu_regs_end:
+
+ .L__critical_end:
+@@ -1366,7 +1309,7 @@ cleanup_critical:
+ jl 0f
+ clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end
+ jl .Lcleanup_load_fpu_regs
+-0: BR_R11USE_R14
++0: BR_EX %r14
+
+ .align 8
+ .Lcleanup_table:
+@@ -1402,7 +1345,7 @@ cleanup_critical:
+ ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
+ lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
+ larl %r9,sie_exit # skip forward to sie_exit
+- BR_R11USE_R14
++ BR_EX %r14
+ #endif
+
+ .Lcleanup_system_call:
+@@ -1456,7 +1399,7 @@ cleanup_critical:
+ stg %r15,56(%r11) # r15 stack pointer
+ # set new psw address and exit
+ larl %r9,.Lsysc_do_svc
+- BR_R11USE_R14
++ BR_EX %r14,%r11
+ .Lcleanup_system_call_insn:
+ .quad system_call
+ .quad .Lsysc_stmg
+@@ -1468,7 +1411,7 @@ cleanup_critical:
+
+ .Lcleanup_sysc_tif:
+ larl %r9,.Lsysc_tif
+- BR_R11USE_R14
++ BR_EX %r14,%r11
+
+ .Lcleanup_sysc_restore:
+ # check if stpt has been executed
+@@ -1485,14 +1428,14 @@ cleanup_critical:
+ mvc 0(64,%r11),__PT_R8(%r9)
+ lmg %r0,%r7,__PT_R0(%r9)
+ 1: lmg %r8,%r9,__LC_RETURN_PSW
+- BR_R11USE_R14
++ BR_EX %r14,%r11
+ .Lcleanup_sysc_restore_insn:
+ .quad .Lsysc_exit_timer
+ .quad .Lsysc_done - 4
+
+ .Lcleanup_io_tif:
+ larl %r9,.Lio_tif
+- BR_R11USE_R14
++ BR_EX %r14,%r11
+
+ .Lcleanup_io_restore:
+ # check if stpt has been executed
+@@ -1506,7 +1449,7 @@ cleanup_critical:
+ mvc 0(64,%r11),__PT_R8(%r9)
+ lmg %r0,%r7,__PT_R0(%r9)
+ 1: lmg %r8,%r9,__LC_RETURN_PSW
+- BR_R11USE_R14
++ BR_EX %r14,%r11
+ .Lcleanup_io_restore_insn:
+ .quad .Lio_exit_timer
+ .quad .Lio_done - 4
+@@ -1559,17 +1502,17 @@ cleanup_critical:
+ # prepare return psw
+ nihh %r8,0xfcfd # clear irq & wait state bits
+ lg %r9,48(%r11) # return from psw_idle
+- BR_R11USE_R14
++ BR_EX %r14,%r11
+ .Lcleanup_idle_insn:
+ .quad .Lpsw_idle_lpsw
+
+ .Lcleanup_save_fpu_regs:
+ larl %r9,save_fpu_regs
+- BR_R11USE_R14
++ BR_EX %r14,%r11
+
+ .Lcleanup_load_fpu_regs:
+ larl %r9,load_fpu_regs
+- BR_R11USE_R14
++ BR_EX %r14,%r11
+
+ /*
+ * Integer constants
+diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
+index 82df7d80fab2..27110f3294ed 100644
+--- a/arch/s390/kernel/mcount.S
++++ b/arch/s390/kernel/mcount.S
+@@ -9,13 +9,17 @@
+ #include <linux/linkage.h>
+ #include <asm/asm-offsets.h>
+ #include <asm/ftrace.h>
++#include <asm/nospec-insn.h>
+ #include <asm/ptrace.h>
+ #include <asm/export.h>
+
++ GEN_BR_THUNK %r1
++ GEN_BR_THUNK %r14
++
+ .section .kprobes.text, "ax"
+
+ ENTRY(ftrace_stub)
+- br %r14
++ BR_EX %r14
+
+ #define STACK_FRAME_SIZE (STACK_FRAME_OVERHEAD + __PT_SIZE)
+ #define STACK_PTREGS (STACK_FRAME_OVERHEAD)
+@@ -23,7 +27,7 @@ ENTRY(ftrace_stub)
+ #define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW)
+
+ ENTRY(_mcount)
+- br %r14
++ BR_EX %r14
+
+ EXPORT_SYMBOL(_mcount)
+
+@@ -53,7 +57,7 @@ ENTRY(ftrace_caller)
+ #endif
+ lgr %r3,%r14
+ la %r5,STACK_PTREGS(%r15)
+- basr %r14,%r1
++ BASR_EX %r14,%r1
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ # The j instruction gets runtime patched to a nop instruction.
+ # See ftrace_enable_ftrace_graph_caller.
+@@ -68,7 +72,7 @@ ftrace_graph_caller_end:
+ #endif
+ lg %r1,(STACK_PTREGS_PSW+8)(%r15)
+ lmg %r2,%r15,(STACK_PTREGS_GPRS+2*8)(%r15)
+- br %r1
++ BR_EX %r1
+
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+@@ -81,6 +85,6 @@ ENTRY(return_to_handler)
+ aghi %r15,STACK_FRAME_OVERHEAD
+ lgr %r14,%r2
+ lmg %r2,%r5,32(%r15)
+- br %r14
++ BR_EX %r14
+
+ #endif
+diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
+index 1fc6d1ff92d3..0dc8ac8548ee 100644
+--- a/arch/s390/kernel/module.c
++++ b/arch/s390/kernel/module.c
+@@ -159,7 +159,7 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
+ me->core_layout.size += me->arch.got_size;
+ me->arch.plt_offset = me->core_layout.size;
+ if (me->arch.plt_size) {
+- if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_call_disable)
++ if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable)
+ me->arch.plt_size += PLT_ENTRY_SIZE;
+ me->core_layout.size += me->arch.plt_size;
+ }
+@@ -318,8 +318,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+ info->plt_offset;
+ ip[0] = 0x0d10e310; /* basr 1,0 */
+ ip[1] = 0x100a0004; /* lg 1,10(1) */
+- if (IS_ENABLED(CONFIG_EXPOLINE) &&
+- !nospec_call_disable) {
++ if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable) {
+ unsigned int *ij;
+ ij = me->core_layout.base +
+ me->arch.plt_offset +
+@@ -440,7 +439,7 @@ int module_finalize(const Elf_Ehdr *hdr,
+ void *aseg;
+
+ if (IS_ENABLED(CONFIG_EXPOLINE) &&
+- !nospec_call_disable && me->arch.plt_size) {
++ !nospec_disable && me->arch.plt_size) {
+ unsigned int *ij;
+
+ ij = me->core_layout.base + me->arch.plt_offset +
+@@ -466,12 +465,12 @@ int module_finalize(const Elf_Ehdr *hdr,
+ apply_alternatives(aseg, aseg + s->sh_size);
+
+ if (IS_ENABLED(CONFIG_EXPOLINE) &&
+- (!strcmp(".nospec_call_table", secname)))
+- nospec_call_revert(aseg, aseg + s->sh_size);
++ (!strncmp(".s390_indirect", secname, 14)))
++ nospec_revert(aseg, aseg + s->sh_size);
+
+ if (IS_ENABLED(CONFIG_EXPOLINE) &&
+- (!strcmp(".nospec_return_table", secname)))
+- nospec_return_revert(aseg, aseg + s->sh_size);
++ (!strncmp(".s390_return", secname, 12)))
++ nospec_revert(aseg, aseg + s->sh_size);
+ }
+
+ jump_label_apply_nops(me);
+diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c
+index 9aff72d3abda..8ad6a7128b3a 100644
+--- a/arch/s390/kernel/nospec-branch.c
++++ b/arch/s390/kernel/nospec-branch.c
+@@ -1,32 +1,86 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include <linux/module.h>
++#include <linux/device.h>
+ #include <asm/nospec-branch.h>
+
+-int nospec_call_disable = IS_ENABLED(CONFIG_EXPOLINE_OFF);
+-int nospec_return_disable = !IS_ENABLED(CONFIG_EXPOLINE_FULL);
++static int __init nobp_setup_early(char *str)
++{
++ bool enabled;
++ int rc;
++
++ rc = kstrtobool(str, &enabled);
++ if (rc)
++ return rc;
++ if (enabled && test_facility(82)) {
++ /*
++ * The user explicitely requested nobp=1, enable it and
++ * disable the expoline support.
++ */
++ __set_facility(82, S390_lowcore.alt_stfle_fac_list);
++ if (IS_ENABLED(CONFIG_EXPOLINE))
++ nospec_disable = 1;
++ } else {
++ __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
++ }
++ return 0;
++}
++early_param("nobp", nobp_setup_early);
++
++static int __init nospec_setup_early(char *str)
++{
++ __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
++ return 0;
++}
++early_param("nospec", nospec_setup_early);
++
++static int __init nospec_report(void)
++{
++ if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
++ pr_info("Spectre V2 mitigation: execute trampolines.\n");
++ if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
++ pr_info("Spectre V2 mitigation: limited branch prediction.\n");
++ return 0;
++}
++arch_initcall(nospec_report);
++
++#ifdef CONFIG_EXPOLINE
++
++int nospec_disable = IS_ENABLED(CONFIG_EXPOLINE_OFF);
+
+ static int __init nospectre_v2_setup_early(char *str)
+ {
+- nospec_call_disable = 1;
+- nospec_return_disable = 1;
++ nospec_disable = 1;
+ return 0;
+ }
+ early_param("nospectre_v2", nospectre_v2_setup_early);
+
++void __init nospec_auto_detect(void)
++{
++ if (IS_ENABLED(CC_USING_EXPOLINE)) {
++ /*
++ * The kernel has been compiled with expolines.
++ * Keep expolines enabled and disable nobp.
++ */
++ nospec_disable = 0;
++ __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
++ }
++ /*
++ * If the kernel has not been compiled with expolines the
++ * nobp setting decides what is done, this depends on the
++ * CONFIG_KERNEL_NP option and the nobp/nospec parameters.
++ */
++}
++
+ static int __init spectre_v2_setup_early(char *str)
+ {
+ if (str && !strncmp(str, "on", 2)) {
+- nospec_call_disable = 0;
+- nospec_return_disable = 0;
+- }
+- if (str && !strncmp(str, "off", 3)) {
+- nospec_call_disable = 1;
+- nospec_return_disable = 1;
+- }
+- if (str && !strncmp(str, "auto", 4)) {
+- nospec_call_disable = 0;
+- nospec_return_disable = 1;
++ nospec_disable = 0;
++ __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+ }
++ if (str && !strncmp(str, "off", 3))
++ nospec_disable = 1;
++ if (str && !strncmp(str, "auto", 4))
++ nospec_auto_detect();
+ return 0;
+ }
+ early_param("spectre_v2", spectre_v2_setup_early);
+@@ -39,7 +93,6 @@ static void __init_or_module __nospec_revert(s32 *start, s32 *end)
+ s32 *epo;
+
+ /* Second part of the instruction replace is always a nop */
+- memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x00, 0x00 }, 4);
+ for (epo = start; epo < end; epo++) {
+ instr = (u8 *) epo + *epo;
+ if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x04)
+@@ -60,18 +113,34 @@ static void __init_or_module __nospec_revert(s32 *start, s32 *end)
+ br = thunk + (*(int *)(thunk + 2)) * 2;
+ else
+ continue;
+- if (br[0] != 0x07 || (br[1] & 0xf0) != 0xf0)
++ /* Check for unconditional branch 0x07f? or 0x47f???? */
++ if ((br[0] & 0xbf) != 0x07 || (br[1] & 0xf0) != 0xf0)
+ continue;
++
++ memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x07, 0x00 }, 4);
+ switch (type) {
+ case BRCL_EXPOLINE:
+- /* brcl to thunk, replace with br + nop */
+ insnbuf[0] = br[0];
+ insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
++ if (br[0] == 0x47) {
++ /* brcl to b, replace with bc + nopr */
++ insnbuf[2] = br[2];
++ insnbuf[3] = br[3];
++ } else {
++ /* brcl to br, replace with bcr + nop */
++ }
+ break;
+ case BRASL_EXPOLINE:
+- /* brasl to thunk, replace with basr + nop */
+- insnbuf[0] = 0x0d;
+ insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
++ if (br[0] == 0x47) {
++ /* brasl to b, replace with bas + nopr */
++ insnbuf[0] = 0x4d;
++ insnbuf[2] = br[2];
++ insnbuf[3] = br[3];
++ } else {
++ /* brasl to br, replace with basr + nop */
++ insnbuf[0] = 0x0d;
++ }
+ break;
+ }
+
+@@ -79,15 +148,9 @@ static void __init_or_module __nospec_revert(s32 *start, s32 *end)
+ }
+ }
+
+-void __init_or_module nospec_call_revert(s32 *start, s32 *end)
+-{
+- if (nospec_call_disable)
+- __nospec_revert(start, end);
+-}
+-
+-void __init_or_module nospec_return_revert(s32 *start, s32 *end)
++void __init_or_module nospec_revert(s32 *start, s32 *end)
+ {
+- if (nospec_return_disable)
++ if (nospec_disable)
+ __nospec_revert(start, end);
+ }
+
+@@ -95,6 +158,8 @@ extern s32 __nospec_call_start[], __nospec_call_end[];
+ extern s32 __nospec_return_start[], __nospec_return_end[];
+ void __init nospec_init_branches(void)
+ {
+- nospec_call_revert(__nospec_call_start, __nospec_call_end);
+- nospec_return_revert(__nospec_return_start, __nospec_return_end);
++ nospec_revert(__nospec_call_start, __nospec_call_end);
++ nospec_revert(__nospec_return_start, __nospec_return_end);
+ }
++
++#endif /* CONFIG_EXPOLINE */
+diff --git a/arch/s390/kernel/nospec-sysfs.c b/arch/s390/kernel/nospec-sysfs.c
+new file mode 100644
+index 000000000000..8affad5f18cb
+--- /dev/null
++++ b/arch/s390/kernel/nospec-sysfs.c
+@@ -0,0 +1,21 @@
++// SPDX-License-Identifier: GPL-2.0
++#include <linux/device.h>
++#include <linux/cpu.h>
++#include <asm/facility.h>
++#include <asm/nospec-branch.h>
++
++ssize_t cpu_show_spectre_v1(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ return sprintf(buf, "Mitigation: __user pointer sanitization\n");
++}
++
++ssize_t cpu_show_spectre_v2(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
++ return sprintf(buf, "Mitigation: execute trampolines\n");
++ if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
++ return sprintf(buf, "Mitigation: limited branch prediction\n");
++ return sprintf(buf, "Vulnerable\n");
++}
+diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S
+index a40ebd1d29d0..8e954c102639 100644
+--- a/arch/s390/kernel/reipl.S
++++ b/arch/s390/kernel/reipl.S
+@@ -7,8 +7,11 @@
+
+ #include <linux/linkage.h>
+ #include <asm/asm-offsets.h>
++#include <asm/nospec-insn.h>
+ #include <asm/sigp.h>
+
++ GEN_BR_THUNK %r9
++
+ #
+ # Issue "store status" for the current CPU to its prefix page
+ # and call passed function afterwards
+@@ -67,9 +70,9 @@ ENTRY(store_status)
+ st %r4,0(%r1)
+ st %r5,4(%r1)
+ stg %r2,8(%r1)
+- lgr %r1,%r2
++ lgr %r9,%r2
+ lgr %r2,%r3
+- br %r1
++ BR_EX %r9
+
+ .section .bss
+ .align 8
+diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
+index ce5ff4c4d435..0786a6b53f98 100644
+--- a/arch/s390/kernel/setup.c
++++ b/arch/s390/kernel/setup.c
+@@ -893,6 +893,9 @@ void __init setup_arch(char **cmdline_p)
+ init_mm.end_data = (unsigned long) &_edata;
+ init_mm.brk = (unsigned long) &_end;
+
++ if (IS_ENABLED(CONFIG_EXPOLINE_AUTO))
++ nospec_auto_detect();
++
+ parse_early_param();
+ #ifdef CONFIG_CRASH_DUMP
+ /* Deactivate elfcorehdr= kernel parameter */
+diff --git a/arch/s390/kernel/swsusp.S b/arch/s390/kernel/swsusp.S
+index e99187149f17..a049a7b9d6e8 100644
+--- a/arch/s390/kernel/swsusp.S
++++ b/arch/s390/kernel/swsusp.S
+@@ -13,6 +13,7 @@
+ #include <asm/ptrace.h>
+ #include <asm/thread_info.h>
+ #include <asm/asm-offsets.h>
++#include <asm/nospec-insn.h>
+ #include <asm/sigp.h>
+
+ /*
+@@ -24,6 +25,8 @@
+ * (see below) in the resume process.
+ * This function runs with disabled interrupts.
+ */
++ GEN_BR_THUNK %r14
++
+ .section .text
+ ENTRY(swsusp_arch_suspend)
+ stmg %r6,%r15,__SF_GPRS(%r15)
+@@ -103,7 +106,7 @@ ENTRY(swsusp_arch_suspend)
+ spx 0x318(%r1)
+ lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
+ lghi %r2,0
+- br %r14
++ BR_EX %r14
+
+ /*
+ * Restore saved memory image to correct place and restore register context.
+@@ -197,11 +200,10 @@ pgm_check_entry:
+ larl %r15,init_thread_union
+ ahi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER)
+ larl %r2,.Lpanic_string
+- larl %r3,sclp_early_printk
+ lghi %r1,0
+ sam31
+ sigp %r1,%r0,SIGP_SET_ARCHITECTURE
+- basr %r14,%r3
++ brasl %r14,sclp_early_printk
+ larl %r3,.Ldisabled_wait_31
+ lpsw 0(%r3)
+ 4:
+@@ -267,7 +269,7 @@ restore_registers:
+ /* Return 0 */
+ lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
+ lghi %r2,0
+- br %r14
++ BR_EX %r14
+
+ .section .data..nosave,"aw",@progbits
+ .align 8
+diff --git a/arch/s390/lib/mem.S b/arch/s390/lib/mem.S
+index 495c9c4bacc7..2311f15be9cf 100644
+--- a/arch/s390/lib/mem.S
++++ b/arch/s390/lib/mem.S
+@@ -7,6 +7,9 @@
+
+ #include <linux/linkage.h>
+ #include <asm/export.h>
++#include <asm/nospec-insn.h>
++
++ GEN_BR_THUNK %r14
+
+ /*
+ * void *memmove(void *dest, const void *src, size_t n)
+@@ -33,14 +36,14 @@ ENTRY(memmove)
+ .Lmemmove_forward_remainder:
+ larl %r5,.Lmemmove_mvc
+ ex %r4,0(%r5)
+- br %r14
++ BR_EX %r14
+ .Lmemmove_reverse:
+ ic %r0,0(%r4,%r3)
+ stc %r0,0(%r4,%r1)
+ brctg %r4,.Lmemmove_reverse
+ ic %r0,0(%r4,%r3)
+ stc %r0,0(%r4,%r1)
+- br %r14
++ BR_EX %r14
+ .Lmemmove_mvc:
+ mvc 0(1,%r1),0(%r3)
+ EXPORT_SYMBOL(memmove)
+@@ -77,7 +80,7 @@ ENTRY(memset)
+ .Lmemset_clear_remainder:
+ larl %r3,.Lmemset_xc
+ ex %r4,0(%r3)
+- br %r14
++ BR_EX %r14
+ .Lmemset_fill:
+ cghi %r4,1
+ lgr %r1,%r2
+@@ -95,10 +98,10 @@ ENTRY(memset)
+ stc %r3,0(%r1)
+ larl %r5,.Lmemset_mvc
+ ex %r4,0(%r5)
+- br %r14
++ BR_EX %r14
+ .Lmemset_fill_exit:
+ stc %r3,0(%r1)
+- br %r14
++ BR_EX %r14
+ .Lmemset_xc:
+ xc 0(1,%r1),0(%r1)
+ .Lmemset_mvc:
+@@ -121,7 +124,7 @@ ENTRY(memcpy)
+ .Lmemcpy_remainder:
+ larl %r5,.Lmemcpy_mvc
+ ex %r4,0(%r5)
+- br %r14
++ BR_EX %r14
+ .Lmemcpy_loop:
+ mvc 0(256,%r1),0(%r3)
+ la %r1,256(%r1)
+@@ -159,10 +162,10 @@ ENTRY(__memset\bits)
+ \insn %r3,0(%r1)
+ larl %r5,.L__memset_mvc\bits
+ ex %r4,0(%r5)
+- br %r14
++ BR_EX %r14
+ .L__memset_exit\bits:
+ \insn %r3,0(%r2)
+- br %r14
++ BR_EX %r14
+ .L__memset_mvc\bits:
+ mvc \bytes(1,%r1),0(%r1)
+ .endm
+diff --git a/arch/s390/net/bpf_jit.S b/arch/s390/net/bpf_jit.S
+index 25bb4643c4f4..9f794869c1b0 100644
+--- a/arch/s390/net/bpf_jit.S
++++ b/arch/s390/net/bpf_jit.S
+@@ -9,6 +9,7 @@
+ */
+
+ #include <linux/linkage.h>
++#include <asm/nospec-insn.h>
+ #include "bpf_jit.h"
+
+ /*
+@@ -54,7 +55,7 @@ ENTRY(sk_load_##NAME##_pos); \
+ clg %r3,STK_OFF_HLEN(%r15); /* Offset + SIZE > hlen? */ \
+ jh sk_load_##NAME##_slow; \
+ LOAD %r14,-SIZE(%r3,%r12); /* Get data from skb */ \
+- b OFF_OK(%r6); /* Return */ \
++ B_EX OFF_OK,%r6; /* Return */ \
+ \
+ sk_load_##NAME##_slow:; \
+ lgr %r2,%r7; /* Arg1 = skb pointer */ \
+@@ -64,11 +65,14 @@ sk_load_##NAME##_slow:; \
+ brasl %r14,skb_copy_bits; /* Get data from skb */ \
+ LOAD %r14,STK_OFF_TMP(%r15); /* Load from temp bufffer */ \
+ ltgr %r2,%r2; /* Set cc to (%r2 != 0) */ \
+- br %r6; /* Return */
++ BR_EX %r6; /* Return */
+
+ sk_load_common(word, 4, llgf) /* r14 = *(u32 *) (skb->data+offset) */
+ sk_load_common(half, 2, llgh) /* r14 = *(u16 *) (skb->data+offset) */
+
++ GEN_BR_THUNK %r6
++ GEN_B_THUNK OFF_OK,%r6
++
+ /*
+ * Load 1 byte from SKB (optimized version)
+ */
+@@ -80,7 +84,7 @@ ENTRY(sk_load_byte_pos)
+ clg %r3,STK_OFF_HLEN(%r15) # Offset >= hlen?
+ jnl sk_load_byte_slow
+ llgc %r14,0(%r3,%r12) # Get byte from skb
+- b OFF_OK(%r6) # Return OK
++ B_EX OFF_OK,%r6 # Return OK
+
+ sk_load_byte_slow:
+ lgr %r2,%r7 # Arg1 = skb pointer
+@@ -90,7 +94,7 @@ sk_load_byte_slow:
+ brasl %r14,skb_copy_bits # Get data from skb
+ llgc %r14,STK_OFF_TMP(%r15) # Load result from temp buffer
+ ltgr %r2,%r2 # Set cc to (%r2 != 0)
+- br %r6 # Return cc
++ BR_EX %r6 # Return cc
+
+ #define sk_negative_common(NAME, SIZE, LOAD) \
+ sk_load_##NAME##_slow_neg:; \
+@@ -104,7 +108,7 @@ sk_load_##NAME##_slow_neg:; \
+ jz bpf_error; \
+ LOAD %r14,0(%r2); /* Get data from pointer */ \
+ xr %r3,%r3; /* Set cc to zero */ \
+- br %r6; /* Return cc */
++ BR_EX %r6; /* Return cc */
+
+ sk_negative_common(word, 4, llgf)
+ sk_negative_common(half, 2, llgh)
+@@ -113,4 +117,4 @@ sk_negative_common(byte, 1, llgc)
+ bpf_error:
+ # force a return 0 from jit handler
+ ltgr %r15,%r15 # Set condition code
+- br %r6
++ BR_EX %r6
+diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
+index 78a19c93b380..dd2bcf0e7d00 100644
+--- a/arch/s390/net/bpf_jit_comp.c
++++ b/arch/s390/net/bpf_jit_comp.c
+@@ -25,6 +25,8 @@
+ #include <linux/bpf.h>
+ #include <asm/cacheflush.h>
+ #include <asm/dis.h>
++#include <asm/facility.h>
++#include <asm/nospec-branch.h>
+ #include <asm/set_memory.h>
+ #include "bpf_jit.h"
+
+@@ -41,6 +43,8 @@ struct bpf_jit {
+ int base_ip; /* Base address for literal pool */
+ int ret0_ip; /* Address of return 0 */
+ int exit_ip; /* Address of exit */
++ int r1_thunk_ip; /* Address of expoline thunk for 'br %r1' */
++ int r14_thunk_ip; /* Address of expoline thunk for 'br %r14' */
+ int tail_call_start; /* Tail call start offset */
+ int labels[1]; /* Labels for local jumps */
+ };
+@@ -250,6 +254,19 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
+ REG_SET_SEEN(b2); \
+ })
+
++#define EMIT6_PCREL_RILB(op, b, target) \
++({ \
++ int rel = (target - jit->prg) / 2; \
++ _EMIT6(op | reg_high(b) << 16 | rel >> 16, rel & 0xffff); \
++ REG_SET_SEEN(b); \
++})
++
++#define EMIT6_PCREL_RIL(op, target) \
++({ \
++ int rel = (target - jit->prg) / 2; \
++ _EMIT6(op | rel >> 16, rel & 0xffff); \
++})
++
+ #define _EMIT6_IMM(op, imm) \
+ ({ \
+ unsigned int __imm = (imm); \
+@@ -469,8 +486,45 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
+ EMIT4(0xb9040000, REG_2, BPF_REG_0);
+ /* Restore registers */
+ save_restore_regs(jit, REGS_RESTORE, stack_depth);
++ if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) {
++ jit->r14_thunk_ip = jit->prg;
++ /* Generate __s390_indirect_jump_r14 thunk */
++ if (test_facility(35)) {
++ /* exrl %r0,.+10 */
++ EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
++ } else {
++ /* larl %r1,.+14 */
++ EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
++ /* ex 0,0(%r1) */
++ EMIT4_DISP(0x44000000, REG_0, REG_1, 0);
++ }
++ /* j . */
++ EMIT4_PCREL(0xa7f40000, 0);
++ }
+ /* br %r14 */
+ _EMIT2(0x07fe);
++
++ if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable &&
++ (jit->seen & SEEN_FUNC)) {
++ jit->r1_thunk_ip = jit->prg;
++ /* Generate __s390_indirect_jump_r1 thunk */
++ if (test_facility(35)) {
++ /* exrl %r0,.+10 */
++ EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
++ /* j . */
++ EMIT4_PCREL(0xa7f40000, 0);
++ /* br %r1 */
++ _EMIT2(0x07f1);
++ } else {
++ /* larl %r1,.+14 */
++ EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
++ /* ex 0,S390_lowcore.br_r1_tampoline */
++ EMIT4_DISP(0x44000000, REG_0, REG_0,
++ offsetof(struct lowcore, br_r1_trampoline));
++ /* j . */
++ EMIT4_PCREL(0xa7f40000, 0);
++ }
++ }
+ }
+
+ /*
+@@ -966,8 +1020,13 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
+ /* lg %w1,<d(imm)>(%l) */
+ EMIT6_DISP_LH(0xe3000000, 0x0004, REG_W1, REG_0, REG_L,
+ EMIT_CONST_U64(func));
+- /* basr %r14,%w1 */
+- EMIT2(0x0d00, REG_14, REG_W1);
++ if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) {
++ /* brasl %r14,__s390_indirect_jump_r1 */
++ EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip);
++ } else {
++ /* basr %r14,%w1 */
++ EMIT2(0x0d00, REG_14, REG_W1);
++ }
+ /* lgr %b0,%r2: load return value into %b0 */
+ EMIT4(0xb9040000, BPF_REG_0, REG_2);
+ if ((jit->seen & SEEN_SKB) &&
+diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c
+index 1a0fa10cb6b7..32bae68e34c1 100644
+--- a/arch/sparc/kernel/vio.c
++++ b/arch/sparc/kernel/vio.c
+@@ -403,7 +403,7 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
+ if (err) {
+ printk(KERN_ERR "VIO: Could not register device %s, err=%d\n",
+ dev_name(&vdev->dev), err);
+- kfree(vdev);
++ put_device(&vdev->dev);
+ return NULL;
+ }
+ if (vdev->dp)
+diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
+index edfede768688..5167f3f74136 100644
+--- a/arch/x86/kernel/machine_kexec_32.c
++++ b/arch/x86/kernel/machine_kexec_32.c
+@@ -57,12 +57,17 @@ static void load_segments(void)
+ static void machine_kexec_free_page_tables(struct kimage *image)
+ {
+ free_page((unsigned long)image->arch.pgd);
++ image->arch.pgd = NULL;
+ #ifdef CONFIG_X86_PAE
+ free_page((unsigned long)image->arch.pmd0);
++ image->arch.pmd0 = NULL;
+ free_page((unsigned long)image->arch.pmd1);
++ image->arch.pmd1 = NULL;
+ #endif
+ free_page((unsigned long)image->arch.pte0);
++ image->arch.pte0 = NULL;
+ free_page((unsigned long)image->arch.pte1);
++ image->arch.pte1 = NULL;
+ }
+
+ static int machine_kexec_alloc_page_tables(struct kimage *image)
+@@ -79,7 +84,6 @@ static int machine_kexec_alloc_page_tables(struct kimage *image)
+ !image->arch.pmd0 || !image->arch.pmd1 ||
+ #endif
+ !image->arch.pte0 || !image->arch.pte1) {
+- machine_kexec_free_page_tables(image);
+ return -ENOMEM;
+ }
+ return 0;
+diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
+index 3b7427aa7d85..5bce2a88e8a3 100644
+--- a/arch/x86/kernel/machine_kexec_64.c
++++ b/arch/x86/kernel/machine_kexec_64.c
+@@ -38,9 +38,13 @@ static struct kexec_file_ops *kexec_file_loaders[] = {
+ static void free_transition_pgtable(struct kimage *image)
+ {
+ free_page((unsigned long)image->arch.p4d);
++ image->arch.p4d = NULL;
+ free_page((unsigned long)image->arch.pud);
++ image->arch.pud = NULL;
+ free_page((unsigned long)image->arch.pmd);
++ image->arch.pmd = NULL;
+ free_page((unsigned long)image->arch.pte);
++ image->arch.pte = NULL;
+ }
+
+ static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
+@@ -90,7 +94,6 @@ static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
+ set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC_NOENC));
+ return 0;
+ err:
+- free_transition_pgtable(image);
+ return result;
+ }
+
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index fe92cb972dd1..1629a2099adf 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -1171,21 +1171,17 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
+ static int
+ loop_get_status(struct loop_device *lo, struct loop_info64 *info)
+ {
+- struct file *file = lo->lo_backing_file;
++ struct file *file;
+ struct kstat stat;
+- int error;
++ int ret;
+
+- if (lo->lo_state != Lo_bound)
++ if (lo->lo_state != Lo_bound) {
++ mutex_unlock(&lo->lo_ctl_mutex);
+ return -ENXIO;
+- error = vfs_getattr(&file->f_path, &stat,
+- STATX_INO, AT_STATX_SYNC_AS_STAT);
+- if (error)
+- return error;
++ }
++
+ memset(info, 0, sizeof(*info));
+ info->lo_number = lo->lo_number;
+- info->lo_device = huge_encode_dev(stat.dev);
+- info->lo_inode = stat.ino;
+- info->lo_rdevice = huge_encode_dev(lo->lo_device ? stat.rdev : stat.dev);
+ info->lo_offset = lo->lo_offset;
+ info->lo_sizelimit = lo->lo_sizelimit;
+ info->lo_flags = lo->lo_flags;
+@@ -1198,7 +1194,19 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info)
+ memcpy(info->lo_encrypt_key, lo->lo_encrypt_key,
+ lo->lo_encrypt_key_size);
+ }
+- return 0;
++
++ /* Drop lo_ctl_mutex while we call into the filesystem. */
++ file = get_file(lo->lo_backing_file);
++ mutex_unlock(&lo->lo_ctl_mutex);
++ ret = vfs_getattr(&file->f_path, &stat, STATX_INO,
++ AT_STATX_SYNC_AS_STAT);
++ if (!ret) {
++ info->lo_device = huge_encode_dev(stat.dev);
++ info->lo_inode = stat.ino;
++ info->lo_rdevice = huge_encode_dev(stat.rdev);
++ }
++ fput(file);
++ return ret;
+ }
+
+ static void
+@@ -1279,12 +1287,13 @@ static int
+ loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) {
+ struct loop_info info;
+ struct loop_info64 info64;
+- int err = 0;
++ int err;
+
+- if (!arg)
+- err = -EINVAL;
+- if (!err)
+- err = loop_get_status(lo, &info64);
++ if (!arg) {
++ mutex_unlock(&lo->lo_ctl_mutex);
++ return -EINVAL;
++ }
++ err = loop_get_status(lo, &info64);
+ if (!err)
+ err = loop_info64_to_old(&info64, &info);
+ if (!err && copy_to_user(arg, &info, sizeof(info)))
+@@ -1296,12 +1305,13 @@ loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) {
+ static int
+ loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
+ struct loop_info64 info64;
+- int err = 0;
++ int err;
+
+- if (!arg)
+- err = -EINVAL;
+- if (!err)
+- err = loop_get_status(lo, &info64);
++ if (!arg) {
++ mutex_unlock(&lo->lo_ctl_mutex);
++ return -EINVAL;
++ }
++ err = loop_get_status(lo, &info64);
+ if (!err && copy_to_user(arg, &info64, sizeof(info64)))
+ err = -EFAULT;
+
+@@ -1378,7 +1388,8 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
+ break;
+ case LOOP_GET_STATUS:
+ err = loop_get_status_old(lo, (struct loop_info __user *) arg);
+- break;
++ /* loop_get_status() unlocks lo_ctl_mutex */
++ goto out_unlocked;
+ case LOOP_SET_STATUS64:
+ err = -EPERM;
+ if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
+@@ -1387,7 +1398,8 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
+ break;
+ case LOOP_GET_STATUS64:
+ err = loop_get_status64(lo, (struct loop_info64 __user *) arg);
+- break;
++ /* loop_get_status() unlocks lo_ctl_mutex */
++ goto out_unlocked;
+ case LOOP_SET_CAPACITY:
+ err = -EPERM;
+ if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
+@@ -1520,12 +1532,13 @@ loop_get_status_compat(struct loop_device *lo,
+ struct compat_loop_info __user *arg)
+ {
+ struct loop_info64 info64;
+- int err = 0;
++ int err;
+
+- if (!arg)
+- err = -EINVAL;
+- if (!err)
+- err = loop_get_status(lo, &info64);
++ if (!arg) {
++ mutex_unlock(&lo->lo_ctl_mutex);
++ return -EINVAL;
++ }
++ err = loop_get_status(lo, &info64);
+ if (!err)
+ err = loop_info64_to_compat(&info64, arg);
+ return err;
+@@ -1548,7 +1561,7 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
+ mutex_lock(&lo->lo_ctl_mutex);
+ err = loop_get_status_compat(
+ lo, (struct compat_loop_info __user *) arg);
+- mutex_unlock(&lo->lo_ctl_mutex);
++ /* loop_get_status() unlocks lo_ctl_mutex */
+ break;
+ case LOOP_SET_CAPACITY:
+ case LOOP_CLR_FD:
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index dcb982e3a41f..6bfb8088e5f5 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -340,6 +340,7 @@ static const struct usb_device_id blacklist_table[] = {
+
+ /* Intel Bluetooth devices */
+ { USB_DEVICE(0x8087, 0x0025), .driver_info = BTUSB_INTEL_NEW },
++ { USB_DEVICE(0x8087, 0x0026), .driver_info = BTUSB_INTEL_NEW },
+ { USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
+ { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
+ { USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
+@@ -367,6 +368,9 @@ static const struct usb_device_id blacklist_table[] = {
+ { USB_DEVICE(0x13d3, 0x3459), .driver_info = BTUSB_REALTEK },
+ { USB_DEVICE(0x13d3, 0x3494), .driver_info = BTUSB_REALTEK },
+
++ /* Additional Realtek 8723BU Bluetooth devices */
++ { USB_DEVICE(0x7392, 0xa611), .driver_info = BTUSB_REALTEK },
++
+ /* Additional Realtek 8821AE Bluetooth devices */
+ { USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK },
+ { USB_DEVICE(0x13d3, 0x3414), .driver_info = BTUSB_REALTEK },
+@@ -374,6 +378,9 @@ static const struct usb_device_id blacklist_table[] = {
+ { USB_DEVICE(0x13d3, 0x3461), .driver_info = BTUSB_REALTEK },
+ { USB_DEVICE(0x13d3, 0x3462), .driver_info = BTUSB_REALTEK },
+
++ /* Additional Realtek 8822BE Bluetooth devices */
++ { USB_DEVICE(0x0b05, 0x185c), .driver_info = BTUSB_REALTEK },
++
+ /* Silicon Wave based devices */
+ { USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE },
+
+@@ -2080,6 +2087,8 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
+ case 0x0c: /* WsP */
+ case 0x11: /* JfP */
+ case 0x12: /* ThP */
++ case 0x13: /* HrP */
++ case 0x14: /* QnJ, IcP */
+ break;
+ default:
+ BT_ERR("%s: Unsupported Intel hardware variant (%u)",
+@@ -2172,6 +2181,8 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
+ break;
+ case 0x11: /* JfP */
+ case 0x12: /* ThP */
++ case 0x13: /* HrP */
++ case 0x14: /* QnJ, IcP */
+ snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.sfi",
+ le16_to_cpu(ver.hw_variant),
+ le16_to_cpu(ver.hw_revision),
+@@ -2203,6 +2214,8 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
+ break;
+ case 0x11: /* JfP */
+ case 0x12: /* ThP */
++ case 0x13: /* HrP */
++ case 0x14: /* QnJ, IcP */
+ snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.ddc",
+ le16_to_cpu(ver.hw_variant),
+ le16_to_cpu(ver.hw_revision),
+diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
+index 076d4244d672..5698d2fac1af 100644
+--- a/drivers/clk/clk.c
++++ b/drivers/clk/clk.c
+@@ -2375,6 +2375,9 @@ static int clk_core_get_phase(struct clk_core *core)
+ int ret;
+
+ clk_prepare_lock();
++ /* Always try to update cached phase if possible */
++ if (core->ops->get_phase)
++ core->phase = core->ops->get_phase(core->hw);
+ ret = core->phase;
+ clk_prepare_unlock();
+
+diff --git a/drivers/clk/hisilicon/crg-hi3516cv300.c b/drivers/clk/hisilicon/crg-hi3516cv300.c
+index 2007123832bb..53450b651e4c 100644
+--- a/drivers/clk/hisilicon/crg-hi3516cv300.c
++++ b/drivers/clk/hisilicon/crg-hi3516cv300.c
+@@ -204,7 +204,7 @@ static const struct hisi_crg_funcs hi3516cv300_crg_funcs = {
+ /* hi3516CV300 sysctrl CRG */
+ #define HI3516CV300_SYSCTRL_NR_CLKS 16
+
+-static const char *wdt_mux_p[] __initconst = { "3m", "apb" };
++static const char *const wdt_mux_p[] __initconst = { "3m", "apb" };
+ static u32 wdt_mux_table[] = {0, 1};
+
+ static const struct hisi_mux_clock hi3516cv300_sysctrl_mux_clks[] = {
+diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c
+index 1294f3ad7cd5..3b8b53b279dc 100644
+--- a/drivers/clk/meson/axg.c
++++ b/drivers/clk/meson/axg.c
+@@ -129,6 +129,11 @@ static struct meson_clk_pll axg_fixed_pll = {
+ .shift = 16,
+ .width = 2,
+ },
++ .frac = {
++ .reg_off = HHI_MPLL_CNTL2,
++ .shift = 0,
++ .width = 12,
++ },
+ .lock = &meson_clk_lock,
+ .hw.init = &(struct clk_init_data){
+ .name = "fixed_pll",
+@@ -151,7 +156,7 @@ static struct meson_clk_pll axg_sys_pll = {
+ },
+ .od = {
+ .reg_off = HHI_SYS_PLL_CNTL,
+- .shift = 10,
++ .shift = 16,
+ .width = 2,
+ },
+ .rate_table = sys_pll_rate_table,
+diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c
+index 077fcdc7908b..fe7d9ed1d436 100644
+--- a/drivers/clk/rockchip/clk-mmc-phase.c
++++ b/drivers/clk/rockchip/clk-mmc-phase.c
+@@ -58,6 +58,12 @@ static int rockchip_mmc_get_phase(struct clk_hw *hw)
+ u16 degrees;
+ u32 delay_num = 0;
+
++ /* See the comment for rockchip_mmc_set_phase below */
++ if (!rate) {
++ pr_err("%s: invalid clk rate\n", __func__);
++ return -EINVAL;
++ }
++
+ raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift);
+
+ degrees = (raw_value & ROCKCHIP_MMC_DEGREE_MASK) * 90;
+@@ -84,6 +90,23 @@ static int rockchip_mmc_set_phase(struct clk_hw *hw, int degrees)
+ u32 raw_value;
+ u32 delay;
+
++ /*
++ * The below calculation is based on the output clock from
++ * MMC host to the card, which expects the phase clock inherits
++ * the clock rate from its parent, namely the output clock
++ * provider of MMC host. However, things may go wrong if
++ * (1) It is orphan.
++ * (2) It is assigned to the wrong parent.
++ *
++ * This check help debug the case (1), which seems to be the
++ * most likely problem we often face and which makes it difficult
++ * for people to debug unstable mmc tuning results.
++ */
++ if (!rate) {
++ pr_err("%s: invalid clk rate\n", __func__);
++ return -EINVAL;
++ }
++
+ nineties = degrees / 90;
+ remainder = (degrees % 90);
+
+diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c
+index 11e7f2d1c054..7af48184b022 100644
+--- a/drivers/clk/rockchip/clk-rk3228.c
++++ b/drivers/clk/rockchip/clk-rk3228.c
+@@ -387,7 +387,7 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
+ RK2928_CLKSEL_CON(23), 5, 2, MFLAGS, 0, 6, DFLAGS,
+ RK2928_CLKGATE_CON(2), 15, GFLAGS),
+
+- COMPOSITE(SCLK_SDMMC, "sclk_sdmmc0", mux_mmc_src_p, 0,
++ COMPOSITE(SCLK_SDMMC, "sclk_sdmmc", mux_mmc_src_p, 0,
+ RK2928_CLKSEL_CON(11), 8, 2, MFLAGS, 0, 8, DFLAGS,
+ RK2928_CLKGATE_CON(2), 11, GFLAGS),
+
+diff --git a/drivers/clk/samsung/clk-exynos3250.c b/drivers/clk/samsung/clk-exynos3250.c
+index 1b81e283f605..ed36728424a2 100644
+--- a/drivers/clk/samsung/clk-exynos3250.c
++++ b/drivers/clk/samsung/clk-exynos3250.c
+@@ -698,7 +698,7 @@ static const struct samsung_pll_rate_table exynos3250_epll_rates[] __initconst =
+ PLL_36XX_RATE(144000000, 96, 2, 3, 0),
+ PLL_36XX_RATE( 96000000, 128, 2, 4, 0),
+ PLL_36XX_RATE( 84000000, 112, 2, 4, 0),
+- PLL_36XX_RATE( 80000004, 106, 2, 4, 43691),
++ PLL_36XX_RATE( 80000003, 106, 2, 4, 43691),
+ PLL_36XX_RATE( 73728000, 98, 2, 4, 19923),
+ PLL_36XX_RATE( 67737598, 270, 3, 5, 62285),
+ PLL_36XX_RATE( 65535999, 174, 2, 5, 49982),
+@@ -734,7 +734,7 @@ static const struct samsung_pll_rate_table exynos3250_vpll_rates[] __initconst =
+ PLL_36XX_RATE(148352005, 98, 2, 3, 59070),
+ PLL_36XX_RATE(108000000, 144, 2, 4, 0),
+ PLL_36XX_RATE( 74250000, 99, 2, 4, 0),
+- PLL_36XX_RATE( 74176002, 98, 3, 4, 59070),
++ PLL_36XX_RATE( 74176002, 98, 2, 4, 59070),
+ PLL_36XX_RATE( 54054000, 216, 3, 5, 14156),
+ PLL_36XX_RATE( 54000000, 144, 2, 5, 0),
+ { /* sentinel */ }
+diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
+index 9b073c98a891..923c608b1b95 100644
+--- a/drivers/clk/samsung/clk-exynos5250.c
++++ b/drivers/clk/samsung/clk-exynos5250.c
+@@ -711,13 +711,13 @@ static const struct samsung_pll_rate_table epll_24mhz_tbl[] __initconst = {
+ /* sorted in descending order */
+ /* PLL_36XX_RATE(rate, m, p, s, k) */
+ PLL_36XX_RATE(192000000, 64, 2, 2, 0),
+- PLL_36XX_RATE(180633600, 90, 3, 2, 20762),
++ PLL_36XX_RATE(180633605, 90, 3, 2, 20762),
+ PLL_36XX_RATE(180000000, 90, 3, 2, 0),
+ PLL_36XX_RATE(73728000, 98, 2, 4, 19923),
+- PLL_36XX_RATE(67737600, 90, 2, 4, 20762),
++ PLL_36XX_RATE(67737602, 90, 2, 4, 20762),
+ PLL_36XX_RATE(49152000, 98, 3, 4, 19923),
+- PLL_36XX_RATE(45158400, 90, 3, 4, 20762),
+- PLL_36XX_RATE(32768000, 131, 3, 5, 4719),
++ PLL_36XX_RATE(45158401, 90, 3, 4, 20762),
++ PLL_36XX_RATE(32768001, 131, 3, 5, 4719),
+ { },
+ };
+
+diff --git a/drivers/clk/samsung/clk-exynos5260.c b/drivers/clk/samsung/clk-exynos5260.c
+index fd1d9bfc151b..8eae1752d700 100644
+--- a/drivers/clk/samsung/clk-exynos5260.c
++++ b/drivers/clk/samsung/clk-exynos5260.c
+@@ -65,7 +65,7 @@ static const struct samsung_pll_rate_table pll2650_24mhz_tbl[] __initconst = {
+ PLL_36XX_RATE(480000000, 160, 2, 2, 0),
+ PLL_36XX_RATE(432000000, 144, 2, 2, 0),
+ PLL_36XX_RATE(400000000, 200, 3, 2, 0),
+- PLL_36XX_RATE(394073130, 459, 7, 2, 49282),
++ PLL_36XX_RATE(394073128, 459, 7, 2, 49282),
+ PLL_36XX_RATE(333000000, 111, 2, 2, 0),
+ PLL_36XX_RATE(300000000, 100, 2, 2, 0),
+ PLL_36XX_RATE(266000000, 266, 3, 3, 0),
+diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c
+index db270908037a..335bebfa21c0 100644
+--- a/drivers/clk/samsung/clk-exynos5433.c
++++ b/drivers/clk/samsung/clk-exynos5433.c
+@@ -729,7 +729,7 @@ static const struct samsung_pll_rate_table exynos5433_pll_rates[] __initconst =
+ PLL_35XX_RATE(800000000U, 400, 6, 1),
+ PLL_35XX_RATE(733000000U, 733, 12, 1),
+ PLL_35XX_RATE(700000000U, 175, 3, 1),
+- PLL_35XX_RATE(667000000U, 222, 4, 1),
++ PLL_35XX_RATE(666000000U, 222, 4, 1),
+ PLL_35XX_RATE(633000000U, 211, 4, 1),
+ PLL_35XX_RATE(600000000U, 500, 5, 2),
+ PLL_35XX_RATE(552000000U, 460, 5, 2),
+@@ -757,12 +757,12 @@ static const struct samsung_pll_rate_table exynos5433_pll_rates[] __initconst =
+ /* AUD_PLL */
+ static const struct samsung_pll_rate_table exynos5433_aud_pll_rates[] __initconst = {
+ PLL_36XX_RATE(400000000U, 200, 3, 2, 0),
+- PLL_36XX_RATE(393216000U, 197, 3, 2, -25690),
++ PLL_36XX_RATE(393216003U, 197, 3, 2, -25690),
+ PLL_36XX_RATE(384000000U, 128, 2, 2, 0),
+- PLL_36XX_RATE(368640000U, 246, 4, 2, -15729),
+- PLL_36XX_RATE(361507200U, 181, 3, 2, -16148),
+- PLL_36XX_RATE(338688000U, 113, 2, 2, -6816),
+- PLL_36XX_RATE(294912000U, 98, 1, 3, 19923),
++ PLL_36XX_RATE(368639991U, 246, 4, 2, -15729),
++ PLL_36XX_RATE(361507202U, 181, 3, 2, -16148),
++ PLL_36XX_RATE(338687988U, 113, 2, 2, -6816),
++ PLL_36XX_RATE(294912002U, 98, 1, 3, 19923),
+ PLL_36XX_RATE(288000000U, 96, 1, 3, 0),
+ PLL_36XX_RATE(252000000U, 84, 1, 3, 0),
+ { /* sentinel */ }
+diff --git a/drivers/clk/samsung/clk-exynos7.c b/drivers/clk/samsung/clk-exynos7.c
+index 5931a4140c3d..bbfa57b4e017 100644
+--- a/drivers/clk/samsung/clk-exynos7.c
++++ b/drivers/clk/samsung/clk-exynos7.c
+@@ -140,7 +140,7 @@ static const struct samsung_div_clock topc_div_clks[] __initconst = {
+ };
+
+ static const struct samsung_pll_rate_table pll1460x_24mhz_tbl[] __initconst = {
+- PLL_36XX_RATE(491520000, 20, 1, 0, 31457),
++ PLL_36XX_RATE(491519897, 20, 1, 0, 31457),
+ {},
+ };
+
+diff --git a/drivers/clk/samsung/clk-s3c2410.c b/drivers/clk/samsung/clk-s3c2410.c
+index e0650c33863b..d8e58a659467 100644
+--- a/drivers/clk/samsung/clk-s3c2410.c
++++ b/drivers/clk/samsung/clk-s3c2410.c
+@@ -168,7 +168,7 @@ static struct samsung_pll_rate_table pll_s3c2410_12mhz_tbl[] __initdata = {
+ PLL_35XX_RATE(226000000, 105, 1, 1),
+ PLL_35XX_RATE(210000000, 132, 2, 1),
+ /* 2410 common */
+- PLL_35XX_RATE(203000000, 161, 3, 1),
++ PLL_35XX_RATE(202800000, 161, 3, 1),
+ PLL_35XX_RATE(192000000, 88, 1, 1),
+ PLL_35XX_RATE(186000000, 85, 1, 1),
+ PLL_35XX_RATE(180000000, 82, 1, 1),
+@@ -178,18 +178,18 @@ static struct samsung_pll_rate_table pll_s3c2410_12mhz_tbl[] __initdata = {
+ PLL_35XX_RATE(147000000, 90, 2, 1),
+ PLL_35XX_RATE(135000000, 82, 2, 1),
+ PLL_35XX_RATE(124000000, 116, 1, 2),
+- PLL_35XX_RATE(118000000, 150, 2, 2),
++ PLL_35XX_RATE(118500000, 150, 2, 2),
+ PLL_35XX_RATE(113000000, 105, 1, 2),
+- PLL_35XX_RATE(101000000, 127, 2, 2),
++ PLL_35XX_RATE(101250000, 127, 2, 2),
+ PLL_35XX_RATE(90000000, 112, 2, 2),
+- PLL_35XX_RATE(85000000, 105, 2, 2),
++ PLL_35XX_RATE(84750000, 105, 2, 2),
+ PLL_35XX_RATE(79000000, 71, 1, 2),
+- PLL_35XX_RATE(68000000, 82, 2, 2),
+- PLL_35XX_RATE(56000000, 142, 2, 3),
++ PLL_35XX_RATE(67500000, 82, 2, 2),
++ PLL_35XX_RATE(56250000, 142, 2, 3),
+ PLL_35XX_RATE(48000000, 120, 2, 3),
+- PLL_35XX_RATE(51000000, 161, 3, 3),
++ PLL_35XX_RATE(50700000, 161, 3, 3),
+ PLL_35XX_RATE(45000000, 82, 1, 3),
+- PLL_35XX_RATE(34000000, 82, 2, 3),
++ PLL_35XX_RATE(33750000, 82, 2, 3),
+ { /* sentinel */ },
+ };
+
+diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
+index 7c369e21c91c..830d1c87fa7c 100644
+--- a/drivers/clk/tegra/clk-pll.c
++++ b/drivers/clk/tegra/clk-pll.c
+@@ -1151,6 +1151,8 @@ static const struct clk_ops tegra_clk_pllu_ops = {
+ .enable = clk_pllu_enable,
+ .disable = clk_pll_disable,
+ .recalc_rate = clk_pll_recalc_rate,
++ .round_rate = clk_pll_round_rate,
++ .set_rate = clk_pll_set_rate,
+ };
+
+ static int _pll_fixed_mdiv(struct tegra_clk_pll_params *pll_params,
+diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
+index 691c6465b71e..8561cce67741 100644
+--- a/drivers/crypto/atmel-aes.c
++++ b/drivers/crypto/atmel-aes.c
+@@ -2155,7 +2155,7 @@ static int atmel_aes_authenc_setkey(struct crypto_aead *tfm, const u8 *key,
+
+ badkey:
+ crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+- memzero_explicit(&key, sizeof(keys));
++ memzero_explicit(&keys, sizeof(keys));
+ return -EINVAL;
+ }
+
+diff --git a/drivers/crypto/ccp/ccp-debugfs.c b/drivers/crypto/ccp/ccp-debugfs.c
+index 59d4ca4e72d8..1a734bd2070a 100644
+--- a/drivers/crypto/ccp/ccp-debugfs.c
++++ b/drivers/crypto/ccp/ccp-debugfs.c
+@@ -278,7 +278,7 @@ static const struct file_operations ccp_debugfs_stats_ops = {
+ };
+
+ static struct dentry *ccp_debugfs_dir;
+-static DEFINE_RWLOCK(ccp_debugfs_lock);
++static DEFINE_MUTEX(ccp_debugfs_lock);
+
+ #define MAX_NAME_LEN 20
+
+@@ -290,16 +290,15 @@ void ccp5_debugfs_setup(struct ccp_device *ccp)
+ struct dentry *debugfs_stats;
+ struct dentry *debugfs_q_instance;
+ struct dentry *debugfs_q_stats;
+- unsigned long flags;
+ int i;
+
+ if (!debugfs_initialized())
+ return;
+
+- write_lock_irqsave(&ccp_debugfs_lock, flags);
++ mutex_lock(&ccp_debugfs_lock);
+ if (!ccp_debugfs_dir)
+ ccp_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+- write_unlock_irqrestore(&ccp_debugfs_lock, flags);
++ mutex_unlock(&ccp_debugfs_lock);
+ if (!ccp_debugfs_dir)
+ return;
+
+diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
+index 0dd3a7ac1dd1..f4a76971b4ac 100644
+--- a/drivers/crypto/inside-secure/safexcel.c
++++ b/drivers/crypto/inside-secure/safexcel.c
+@@ -490,6 +490,15 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
+ if (backlog)
+ backlog->complete(backlog, -EINPROGRESS);
+
++ /* In case the send() helper did not issue any command to push
++ * to the engine because the input data was cached, continue to
++ * dequeue other requests as this is valid and not an error.
++ */
++ if (!commands && !results) {
++ kfree(request);
++ continue;
++ }
++
+ spin_lock_bh(&priv->ring[ring].egress_lock);
+ list_add_tail(&request->list, &priv->ring[ring].list);
+ spin_unlock_bh(&priv->ring[ring].egress_lock);
+@@ -514,8 +523,7 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
+
+ if (!priv->ring[ring].busy) {
+ nreq -= safexcel_try_push_requests(priv, ring, nreq);
+- if (nreq)
+- priv->ring[ring].busy = true;
++ priv->ring[ring].busy = true;
+ }
+
+ priv->ring[ring].requests_left += nreq;
+diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
+index 63a8768ed2ae..17a7725a6f6d 100644
+--- a/drivers/crypto/inside-secure/safexcel_cipher.c
++++ b/drivers/crypto/inside-secure/safexcel_cipher.c
+@@ -456,7 +456,7 @@ static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
+ queue_work(priv->ring[ring].workqueue,
+ &priv->ring[ring].work_data.work);
+
+- wait_for_completion_interruptible(&result.completion);
++ wait_for_completion(&result.completion);
+
+ if (result.error) {
+ dev_warn(priv->dev,
+diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
+index 122a2a58e98f..3e65bb5732da 100644
+--- a/drivers/crypto/inside-secure/safexcel_hash.c
++++ b/drivers/crypto/inside-secure/safexcel_hash.c
+@@ -21,7 +21,6 @@ struct safexcel_ahash_ctx {
+ struct safexcel_crypto_priv *priv;
+
+ u32 alg;
+- u32 digest;
+
+ u32 ipad[SHA1_DIGEST_SIZE / sizeof(u32)];
+ u32 opad[SHA1_DIGEST_SIZE / sizeof(u32)];
+@@ -35,6 +34,8 @@ struct safexcel_ahash_req {
+
+ int nents;
+
++ u32 digest;
++
+ u8 state_sz; /* expected sate size, only set once */
+ u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
+
+@@ -49,6 +50,8 @@ struct safexcel_ahash_export_state {
+ u64 len;
+ u64 processed;
+
++ u32 digest;
++
+ u32 state[SHA256_DIGEST_SIZE / sizeof(u32)];
+ u8 cache[SHA256_BLOCK_SIZE];
+ };
+@@ -82,9 +85,9 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
+
+ cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_OUT;
+ cdesc->control_data.control0 |= ctx->alg;
+- cdesc->control_data.control0 |= ctx->digest;
++ cdesc->control_data.control0 |= req->digest;
+
+- if (ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) {
++ if (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) {
+ if (req->processed) {
+ if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
+ cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(6);
+@@ -112,7 +115,7 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
+ if (req->finish)
+ ctx->base.ctxr->data[i] = cpu_to_le32(req->processed / blocksize);
+ }
+- } else if (ctx->digest == CONTEXT_CONTROL_DIGEST_HMAC) {
++ } else if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) {
+ cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(10);
+
+ memcpy(ctx->base.ctxr->data, ctx->ipad, digestsize);
+@@ -184,7 +187,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
+ int i, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
+
+ queued = len = req->len - req->processed;
+- if (queued < crypto_ahash_blocksize(ahash))
++ if (queued <= crypto_ahash_blocksize(ahash))
+ cache_len = queued;
+ else
+ cache_len = queued - areq->nbytes;
+@@ -198,7 +201,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
+ /* If this is not the last request and the queued data
+ * is a multiple of a block, cache the last one for now.
+ */
+- extra = queued - crypto_ahash_blocksize(ahash);
++ extra = crypto_ahash_blocksize(ahash);
+
+ if (extra) {
+ sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
+@@ -493,7 +496,7 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
+ queue_work(priv->ring[ring].workqueue,
+ &priv->ring[ring].work_data.work);
+
+- wait_for_completion_interruptible(&result.completion);
++ wait_for_completion(&result.completion);
+
+ if (result.error) {
+ dev_warn(priv->dev, "hash: completion error (%d)\n",
+@@ -550,7 +553,7 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
+ if (ctx->base.ctxr) {
+ if (priv->version == EIP197 &&
+ !ctx->base.needs_inv && req->processed &&
+- ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
++ req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
+ /* We're still setting needs_inv here, even though it is
+ * cleared right away, because the needs_inv flag can be
+ * set in other functions and we want to keep the same
+@@ -585,7 +588,6 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
+
+ static int safexcel_ahash_update(struct ahash_request *areq)
+ {
+- struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+
+@@ -601,7 +603,7 @@ static int safexcel_ahash_update(struct ahash_request *areq)
+ * We're not doing partial updates when performing an hmac request.
+ * Everything will be handled by the final() call.
+ */
+- if (ctx->digest == CONTEXT_CONTROL_DIGEST_HMAC)
++ if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
+ return 0;
+
+ if (req->hmac)
+@@ -660,6 +662,8 @@ static int safexcel_ahash_export(struct ahash_request *areq, void *out)
+ export->len = req->len;
+ export->processed = req->processed;
+
++ export->digest = req->digest;
++
+ memcpy(export->state, req->state, req->state_sz);
+ memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash));
+
+@@ -680,6 +684,8 @@ static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
+ req->len = export->len;
+ req->processed = export->processed;
+
++ req->digest = export->digest;
++
+ memcpy(req->cache, export->cache, crypto_ahash_blocksize(ahash));
+ memcpy(req->state, export->state, req->state_sz);
+
+@@ -716,7 +722,7 @@ static int safexcel_sha1_init(struct ahash_request *areq)
+ req->state[4] = SHA1_H4;
+
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
+- ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
++ req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+ req->state_sz = SHA1_DIGEST_SIZE;
+
+ return 0;
+@@ -783,10 +789,10 @@ struct safexcel_alg_template safexcel_alg_sha1 = {
+
+ static int safexcel_hmac_sha1_init(struct ahash_request *areq)
+ {
+- struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
++ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ safexcel_sha1_init(areq);
+- ctx->digest = CONTEXT_CONTROL_DIGEST_HMAC;
++ req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+ return 0;
+ }
+
+@@ -839,7 +845,7 @@ static int safexcel_hmac_init_pad(struct ahash_request *areq,
+ init_completion(&result.completion);
+
+ ret = crypto_ahash_digest(areq);
+- if (ret == -EINPROGRESS) {
++ if (ret == -EINPROGRESS || ret == -EBUSY) {
+ wait_for_completion_interruptible(&result.completion);
+ ret = result.error;
+ }
+@@ -1024,7 +1030,7 @@ static int safexcel_sha256_init(struct ahash_request *areq)
+ req->state[7] = SHA256_H7;
+
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
+- ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
++ req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+ req->state_sz = SHA256_DIGEST_SIZE;
+
+ return 0;
+@@ -1086,7 +1092,7 @@ static int safexcel_sha224_init(struct ahash_request *areq)
+ req->state[7] = SHA224_H7;
+
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
+- ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
++ req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+ req->state_sz = SHA256_DIGEST_SIZE;
+
+ return 0;
+diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
+index 1547cbe13dc2..a81d89b3b7d8 100644
+--- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c
++++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
+@@ -451,6 +451,7 @@ static struct platform_driver sun4i_ss_driver = {
+
+ module_platform_driver(sun4i_ss_driver);
+
++MODULE_ALIAS("platform:sun4i-ss");
+ MODULE_DESCRIPTION("Allwinner Security System cryptographic accelerator");
+ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Corentin LABBE <clabbe.montjoie@gmail.com>");
+diff --git a/drivers/media/common/videobuf2/videobuf2-vmalloc.c b/drivers/media/common/videobuf2/videobuf2-vmalloc.c
+index 3a7c80cd1a17..359fb9804d16 100644
+--- a/drivers/media/common/videobuf2/videobuf2-vmalloc.c
++++ b/drivers/media/common/videobuf2/videobuf2-vmalloc.c
+@@ -106,7 +106,7 @@ static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr,
+ if (nums[i-1] + 1 != nums[i])
+ goto fail_map;
+ buf->vaddr = (__force void *)
+- ioremap_nocache(nums[0] << PAGE_SHIFT, size);
++ ioremap_nocache(__pfn_to_phys(nums[0]), size + offset);
+ } else {
+ buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1,
+ PAGE_KERNEL);
+diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c
+index 6356815cf3e1..3642e6e4761e 100644
+--- a/drivers/media/dvb-frontends/lgdt3306a.c
++++ b/drivers/media/dvb-frontends/lgdt3306a.c
+@@ -1768,7 +1768,13 @@ static void lgdt3306a_release(struct dvb_frontend *fe)
+ struct lgdt3306a_state *state = fe->demodulator_priv;
+
+ dbg_info("\n");
+- kfree(state);
++
++ /*
++ * If state->muxc is not NULL, then we are an i2c device
++ * and lgdt3306a_remove will clean up state
++ */
++ if (!state->muxc)
++ kfree(state);
+ }
+
+ static const struct dvb_frontend_ops lgdt3306a_ops;
+@@ -2169,7 +2175,7 @@ static int lgdt3306a_probe(struct i2c_client *client,
+ sizeof(struct lgdt3306a_config));
+
+ config->i2c_addr = client->addr;
+- fe = lgdt3306a_attach(config, client->adapter);
++ fe = dvb_attach(lgdt3306a_attach, config, client->adapter);
+ if (fe == NULL) {
+ ret = -ENODEV;
+ goto err_fe;
+diff --git a/drivers/media/i2c/adv748x/adv748x-hdmi.c b/drivers/media/i2c/adv748x/adv748x-hdmi.c
+index 4da4253553fc..10d229a4f088 100644
+--- a/drivers/media/i2c/adv748x/adv748x-hdmi.c
++++ b/drivers/media/i2c/adv748x/adv748x-hdmi.c
+@@ -105,6 +105,9 @@ static void adv748x_hdmi_fill_format(struct adv748x_hdmi *hdmi,
+
+ fmt->width = hdmi->timings.bt.width;
+ fmt->height = hdmi->timings.bt.height;
++
++ if (fmt->field == V4L2_FIELD_ALTERNATE)
++ fmt->height /= 2;
+ }
+
+ static void adv748x_fill_optional_dv_timings(struct v4l2_dv_timings *timings)
+diff --git a/drivers/media/i2c/ov5645.c b/drivers/media/i2c/ov5645.c
+index d28845f7356f..a31fe18c71d6 100644
+--- a/drivers/media/i2c/ov5645.c
++++ b/drivers/media/i2c/ov5645.c
+@@ -1131,13 +1131,14 @@ static int ov5645_probe(struct i2c_client *client,
+
+ ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(endpoint),
+ &ov5645->ep);
++
++ of_node_put(endpoint);
++
+ if (ret < 0) {
+ dev_err(dev, "parsing endpoint node failed\n");
+ return ret;
+ }
+
+- of_node_put(endpoint);
+-
+ if (ov5645->ep.bus_type != V4L2_MBUS_CSI2) {
+ dev_err(dev, "invalid bus type, must be CSI2\n");
+ return -EINVAL;
+diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
+index 3622521431f5..7ec8de7aee4f 100644
+--- a/drivers/media/pci/cx23885/cx23885-cards.c
++++ b/drivers/media/pci/cx23885/cx23885-cards.c
+@@ -2286,6 +2286,10 @@ void cx23885_card_setup(struct cx23885_dev *dev)
+ &dev->i2c_bus[2].i2c_adap,
+ "cx25840", 0x88 >> 1, NULL);
+ if (dev->sd_cx25840) {
++ /* set host data for clk_freq configuration */
++ v4l2_set_subdev_hostdata(dev->sd_cx25840,
++ &dev->clk_freq);
++
+ dev->sd_cx25840->grp_id = CX23885_HW_AV_CORE;
+ v4l2_subdev_call(dev->sd_cx25840, core, load_fw);
+ }
+diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
+index 8f63df1cb418..4612f26fcd6d 100644
+--- a/drivers/media/pci/cx23885/cx23885-core.c
++++ b/drivers/media/pci/cx23885/cx23885-core.c
+@@ -873,6 +873,16 @@ static int cx23885_dev_setup(struct cx23885_dev *dev)
+ if (cx23885_boards[dev->board].clk_freq > 0)
+ dev->clk_freq = cx23885_boards[dev->board].clk_freq;
+
++ if (dev->board == CX23885_BOARD_HAUPPAUGE_IMPACTVCBE &&
++ dev->pci->subsystem_device == 0x7137) {
++ /* Hauppauge ImpactVCBe device ID 0x7137 is populated
++ * with an 888, and a 25Mhz crystal, instead of the
++ * usual third overtone 50Mhz. The default clock rate must
++ * be overridden so the cx25840 is properly configured
++ */
++ dev->clk_freq = 25000000;
++ }
++
+ dev->pci_bus = dev->pci->bus->number;
+ dev->pci_slot = PCI_SLOT(dev->pci->devfn);
+ cx23885_irq_add(dev, 0x001f00);
+diff --git a/drivers/media/pci/cx25821/cx25821-core.c b/drivers/media/pci/cx25821/cx25821-core.c
+index 04aa4a68a0ae..040c6c251d3a 100644
+--- a/drivers/media/pci/cx25821/cx25821-core.c
++++ b/drivers/media/pci/cx25821/cx25821-core.c
+@@ -867,6 +867,10 @@ static int cx25821_dev_setup(struct cx25821_dev *dev)
+ dev->nr = ++cx25821_devcount;
+ sprintf(dev->name, "cx25821[%d]", dev->nr);
+
++ if (dev->nr >= ARRAY_SIZE(card)) {
++ CX25821_INFO("dev->nr >= %zd", ARRAY_SIZE(card));
++ return -ENODEV;
++ }
+ if (dev->pci->device != 0x8210) {
+ pr_info("%s(): Exiting. Incorrect Hardware device = 0x%02x\n",
+ __func__, dev->pci->device);
+@@ -882,9 +886,6 @@ static int cx25821_dev_setup(struct cx25821_dev *dev)
+ dev->channels[i].sram_channels = &cx25821_sram_channels[i];
+ }
+
+- if (dev->nr > 1)
+- CX25821_INFO("dev->nr > 1!");
+-
+ /* board config */
+ dev->board = 1; /* card[dev->nr]; */
+ dev->_max_num_decoders = MAX_DECODERS;
+diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c
+index 437395a61065..9ab8e7ee2e1e 100644
+--- a/drivers/media/platform/s3c-camif/camif-capture.c
++++ b/drivers/media/platform/s3c-camif/camif-capture.c
+@@ -1256,16 +1256,17 @@ static void __camif_subdev_try_format(struct camif_dev *camif,
+ {
+ const struct s3c_camif_variant *variant = camif->variant;
+ const struct vp_pix_limits *pix_lim;
+- int i = ARRAY_SIZE(camif_mbus_formats);
++ unsigned int i;
+
+ /* FIXME: constraints against codec or preview path ? */
+ pix_lim = &variant->vp_pix_limits[VP_CODEC];
+
+- while (i-- >= 0)
++ for (i = 0; i < ARRAY_SIZE(camif_mbus_formats); i++)
+ if (camif_mbus_formats[i] == mf->code)
+ break;
+
+- mf->code = camif_mbus_formats[i];
++ if (i == ARRAY_SIZE(camif_mbus_formats))
++ mf->code = camif_mbus_formats[0];
+
+ if (pad == CAMIF_SD_PAD_SINK) {
+ v4l_bound_align_image(&mf->width, 8, CAMIF_MAX_PIX_WIDTH,
+diff --git a/drivers/media/platform/vivid/vivid-ctrls.c b/drivers/media/platform/vivid/vivid-ctrls.c
+index 3f9d354827af..c586c2ab9b31 100644
+--- a/drivers/media/platform/vivid/vivid-ctrls.c
++++ b/drivers/media/platform/vivid/vivid-ctrls.c
+@@ -1208,6 +1208,7 @@ static int vivid_radio_rx_s_ctrl(struct v4l2_ctrl *ctrl)
+ v4l2_ctrl_activate(dev->radio_rx_rds_ta, dev->radio_rx_rds_controls);
+ v4l2_ctrl_activate(dev->radio_rx_rds_tp, dev->radio_rx_rds_controls);
+ v4l2_ctrl_activate(dev->radio_rx_rds_ms, dev->radio_rx_rds_controls);
++ dev->radio_rx_dev.device_caps = dev->radio_rx_caps;
+ break;
+ case V4L2_CID_RDS_RECEPTION:
+ dev->radio_rx_rds_enabled = ctrl->val;
+@@ -1282,6 +1283,7 @@ static int vivid_radio_tx_s_ctrl(struct v4l2_ctrl *ctrl)
+ dev->radio_tx_caps &= ~V4L2_CAP_READWRITE;
+ if (!dev->radio_tx_rds_controls)
+ dev->radio_tx_caps |= V4L2_CAP_READWRITE;
++ dev->radio_tx_dev.device_caps = dev->radio_tx_caps;
+ break;
+ case V4L2_CID_RDS_TX_PTY:
+ if (dev->radio_rx_rds_controls)
+diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c
+index 7ce69f23f50a..ac85942162c1 100644
+--- a/drivers/media/platform/vsp1/vsp1_drm.c
++++ b/drivers/media/platform/vsp1/vsp1_drm.c
+@@ -530,6 +530,15 @@ void vsp1_du_atomic_flush(struct device *dev, unsigned int pipe_index)
+ struct vsp1_rwpf *rpf = vsp1->rpf[i];
+ unsigned int j;
+
++ /*
++ * Make sure we don't accept more inputs than the hardware can
++ * handle. This is a temporary fix to avoid display stall, we
++ * need to instead allocate the BRU or BRS to display pipelines
++ * dynamically based on the number of planes they each use.
++ */
++ if (pipe->num_inputs >= pipe->bru->source_pad)
++ pipe->inputs[i] = NULL;
++
+ if (!pipe->inputs[i])
+ continue;
+
+diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
+index 34e16f6ab4ac..545f9c1b6a58 100644
+--- a/drivers/media/usb/em28xx/em28xx-cards.c
++++ b/drivers/media/usb/em28xx/em28xx-cards.c
+@@ -507,8 +507,10 @@ static struct em28xx_reg_seq plex_px_bcud[] = {
+ };
+
+ /*
+- * 2040:0265 Hauppauge WinTV-dualHD DVB
+- * 2040:026d Hauppauge WinTV-dualHD ATSC/QAM
++ * 2040:0265 Hauppauge WinTV-dualHD DVB Isoc
++ * 2040:8265 Hauppauge WinTV-dualHD DVB Bulk
++ * 2040:026d Hauppauge WinTV-dualHD ATSC/QAM Isoc
++ * 2040:826d Hauppauge WinTV-dualHD ATSC/QAM Bulk
+ * reg 0x80/0x84:
+ * GPIO_0: Yellow LED tuner 1, 0=on, 1=off
+ * GPIO_1: Green LED tuner 1, 0=on, 1=off
+@@ -2391,7 +2393,8 @@ struct em28xx_board em28xx_boards[] = {
+ .has_dvb = 1,
+ },
+ /*
+- * 2040:0265 Hauppauge WinTV-dualHD (DVB version).
++ * 2040:0265 Hauppauge WinTV-dualHD (DVB version) Isoc.
++ * 2040:8265 Hauppauge WinTV-dualHD (DVB version) Bulk.
+ * Empia EM28274, 2x Silicon Labs Si2168, 2x Silicon Labs Si2157
+ */
+ [EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_DVB] = {
+@@ -2406,7 +2409,8 @@ struct em28xx_board em28xx_boards[] = {
+ .leds = hauppauge_dualhd_leds,
+ },
+ /*
+- * 2040:026d Hauppauge WinTV-dualHD (model 01595 - ATSC/QAM).
++ * 2040:026d Hauppauge WinTV-dualHD (model 01595 - ATSC/QAM) Isoc.
++ * 2040:826d Hauppauge WinTV-dualHD (model 01595 - ATSC/QAM) Bulk.
+ * Empia EM28274, 2x LG LGDT3306A, 2x Silicon Labs Si2157
+ */
+ [EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_01595] = {
+@@ -2547,8 +2551,12 @@ struct usb_device_id em28xx_id_table[] = {
+ .driver_info = EM2883_BOARD_HAUPPAUGE_WINTV_HVR_850 },
+ { USB_DEVICE(0x2040, 0x0265),
+ .driver_info = EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_DVB },
++ { USB_DEVICE(0x2040, 0x8265),
++ .driver_info = EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_DVB },
+ { USB_DEVICE(0x2040, 0x026d),
+ .driver_info = EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_01595 },
++ { USB_DEVICE(0x2040, 0x826d),
++ .driver_info = EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_01595 },
+ { USB_DEVICE(0x0438, 0xb002),
+ .driver_info = EM2880_BOARD_AMD_ATI_TV_WONDER_HD_600 },
+ { USB_DEVICE(0x2001, 0xf112),
+@@ -2609,7 +2617,11 @@ struct usb_device_id em28xx_id_table[] = {
+ .driver_info = EM28178_BOARD_PCTV_461E },
+ { USB_DEVICE(0x2013, 0x025f),
+ .driver_info = EM28178_BOARD_PCTV_292E },
+- { USB_DEVICE(0x2040, 0x0264), /* Hauppauge WinTV-soloHD */
++ { USB_DEVICE(0x2040, 0x0264), /* Hauppauge WinTV-soloHD Isoc */
++ .driver_info = EM28178_BOARD_PCTV_292E },
++ { USB_DEVICE(0x2040, 0x8264), /* Hauppauge OEM Generic WinTV-soloHD Bulk */
++ .driver_info = EM28178_BOARD_PCTV_292E },
++ { USB_DEVICE(0x2040, 0x8268), /* Hauppauge Retail WinTV-soloHD Bulk */
+ .driver_info = EM28178_BOARD_PCTV_292E },
+ { USB_DEVICE(0x0413, 0x6f07),
+ .driver_info = EM2861_BOARD_LEADTEK_VC100 },
+diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
+index 88084f24f033..094e83b6908d 100644
+--- a/drivers/media/usb/em28xx/em28xx.h
++++ b/drivers/media/usb/em28xx/em28xx.h
+@@ -191,7 +191,7 @@
+ USB 2.0 spec says bulk packet size is always 512 bytes
+ */
+ #define EM28XX_BULK_PACKET_MULTIPLIER 384
+-#define EM28XX_DVB_BULK_PACKET_MULTIPLIER 384
++#define EM28XX_DVB_BULK_PACKET_MULTIPLIER 94
+
+ #define EM28XX_INTERLACED_DEFAULT 1
+
+diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
+index 23b45da784cb..b89acaee12d4 100644
+--- a/drivers/net/dsa/bcm_sf2_cfp.c
++++ b/drivers/net/dsa/bcm_sf2_cfp.c
+@@ -354,10 +354,13 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
+ /* Locate the first rule available */
+ if (fs->location == RX_CLS_LOC_ANY)
+ rule_index = find_first_zero_bit(priv->cfp.used,
+- bcm_sf2_cfp_rule_size(priv));
++ priv->num_cfp_rules);
+ else
+ rule_index = fs->location;
+
++ if (rule_index > bcm_sf2_cfp_rule_size(priv))
++ return -ENOSPC;
++
+ layout = &udf_tcpip4_layout;
+ /* We only use one UDF slice for now */
+ slice_num = bcm_sf2_get_slice_number(layout, 0);
+@@ -562,19 +565,21 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
+ * first half because the HW search is by incrementing addresses.
+ */
+ if (fs->location == RX_CLS_LOC_ANY)
+- rule_index[0] = find_first_zero_bit(priv->cfp.used,
+- bcm_sf2_cfp_rule_size(priv));
++ rule_index[1] = find_first_zero_bit(priv->cfp.used,
++ priv->num_cfp_rules);
+ else
+- rule_index[0] = fs->location;
++ rule_index[1] = fs->location;
++ if (rule_index[1] > bcm_sf2_cfp_rule_size(priv))
++ return -ENOSPC;
+
+ /* Flag it as used (cleared on error path) such that we can immediately
+ * obtain a second one to chain from.
+ */
+- set_bit(rule_index[0], priv->cfp.used);
++ set_bit(rule_index[1], priv->cfp.used);
+
+- rule_index[1] = find_first_zero_bit(priv->cfp.used,
+- bcm_sf2_cfp_rule_size(priv));
+- if (rule_index[1] > bcm_sf2_cfp_rule_size(priv)) {
++ rule_index[0] = find_first_zero_bit(priv->cfp.used,
++ priv->num_cfp_rules);
++ if (rule_index[0] > bcm_sf2_cfp_rule_size(priv)) {
+ ret = -ENOSPC;
+ goto out_err;
+ }
+@@ -712,14 +717,14 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
+ /* Flag the second half rule as being used now, return it as the
+ * location, and flag it as unique while dumping rules
+ */
+- set_bit(rule_index[1], priv->cfp.used);
++ set_bit(rule_index[0], priv->cfp.used);
+ set_bit(rule_index[1], priv->cfp.unique);
+ fs->location = rule_index[1];
+
+ return ret;
+
+ out_err:
+- clear_bit(rule_index[0], priv->cfp.used);
++ clear_bit(rule_index[1], priv->cfp.used);
+ return ret;
+ }
+
+@@ -785,10 +790,6 @@ static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port,
+ int ret;
+ u32 reg;
+
+- /* Refuse deletion of unused rules, and the default reserved rule */
+- if (!test_bit(loc, priv->cfp.used) || loc == 0)
+- return -EINVAL;
+-
+ /* Indicate which rule we want to read */
+ bcm_sf2_cfp_rule_addr_set(priv, loc);
+
+@@ -826,6 +827,13 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port,
+ u32 next_loc = 0;
+ int ret;
+
++ /* Refuse deleting unused rules, and those that are not unique since
++ * that could leave IPv6 rules with one of the chained rule in the
++ * table.
++ */
++ if (!test_bit(loc, priv->cfp.unique) || loc == 0)
++ return -EINVAL;
++
+ ret = bcm_sf2_cfp_rule_del_one(priv, port, loc, &next_loc);
+ if (ret)
+ return ret;
+diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
+index 36c8950dbd2d..176861bd2252 100644
+--- a/drivers/net/ethernet/3com/3c59x.c
++++ b/drivers/net/ethernet/3com/3c59x.c
+@@ -1212,9 +1212,9 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
+ vp->mii.reg_num_mask = 0x1f;
+
+ /* Makes sure rings are at least 16 byte aligned. */
+- vp->rx_ring = pci_alloc_consistent(pdev, sizeof(struct boom_rx_desc) * RX_RING_SIZE
++ vp->rx_ring = dma_alloc_coherent(gendev, sizeof(struct boom_rx_desc) * RX_RING_SIZE
+ + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
+- &vp->rx_ring_dma);
++ &vp->rx_ring_dma, GFP_KERNEL);
+ retval = -ENOMEM;
+ if (!vp->rx_ring)
+ goto free_device;
+@@ -1476,11 +1476,10 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
+ return 0;
+
+ free_ring:
+- pci_free_consistent(pdev,
+- sizeof(struct boom_rx_desc) * RX_RING_SIZE
+- + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
+- vp->rx_ring,
+- vp->rx_ring_dma);
++ dma_free_coherent(&pdev->dev,
++ sizeof(struct boom_rx_desc) * RX_RING_SIZE +
++ sizeof(struct boom_tx_desc) * TX_RING_SIZE,
++ vp->rx_ring, vp->rx_ring_dma);
+ free_device:
+ free_netdev(dev);
+ pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval);
+@@ -1751,9 +1750,9 @@ vortex_open(struct net_device *dev)
+ break; /* Bad news! */
+
+ skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */
+- dma = pci_map_single(VORTEX_PCI(vp), skb->data,
+- PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+- if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma))
++ dma = dma_map_single(vp->gendev, skb->data,
++ PKT_BUF_SZ, DMA_FROM_DEVICE);
++ if (dma_mapping_error(vp->gendev, dma))
+ break;
+ vp->rx_ring[i].addr = cpu_to_le32(dma);
+ }
+@@ -2067,9 +2066,9 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ if (vp->bus_master) {
+ /* Set the bus-master controller to transfer the packet. */
+ int len = (skb->len + 3) & ~3;
+- vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len,
+- PCI_DMA_TODEVICE);
+- if (dma_mapping_error(&VORTEX_PCI(vp)->dev, vp->tx_skb_dma)) {
++ vp->tx_skb_dma = dma_map_single(vp->gendev, skb->data, len,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(vp->gendev, vp->tx_skb_dma)) {
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+@@ -2168,9 +2167,9 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum);
+
+ if (!skb_shinfo(skb)->nr_frags) {
+- dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len,
+- PCI_DMA_TODEVICE);
+- if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
++ dma_addr = dma_map_single(vp->gendev, skb->data, skb->len,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(vp->gendev, dma_addr))
+ goto out_dma_err;
+
+ vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
+@@ -2178,9 +2177,9 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ } else {
+ int i;
+
+- dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data,
+- skb_headlen(skb), PCI_DMA_TODEVICE);
+- if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
++ dma_addr = dma_map_single(vp->gendev, skb->data,
++ skb_headlen(skb), DMA_TO_DEVICE);
++ if (dma_mapping_error(vp->gendev, dma_addr))
+ goto out_dma_err;
+
+ vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
+@@ -2189,21 +2188,21 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+- dma_addr = skb_frag_dma_map(&VORTEX_PCI(vp)->dev, frag,
++ dma_addr = skb_frag_dma_map(vp->gendev, frag,
+ 0,
+ frag->size,
+ DMA_TO_DEVICE);
+- if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) {
++ if (dma_mapping_error(vp->gendev, dma_addr)) {
+ for(i = i-1; i >= 0; i--)
+- dma_unmap_page(&VORTEX_PCI(vp)->dev,
++ dma_unmap_page(vp->gendev,
+ le32_to_cpu(vp->tx_ring[entry].frag[i+1].addr),
+ le32_to_cpu(vp->tx_ring[entry].frag[i+1].length),
+ DMA_TO_DEVICE);
+
+- pci_unmap_single(VORTEX_PCI(vp),
++ dma_unmap_single(vp->gendev,
+ le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
+ le32_to_cpu(vp->tx_ring[entry].frag[0].length),
+- PCI_DMA_TODEVICE);
++ DMA_TO_DEVICE);
+
+ goto out_dma_err;
+ }
+@@ -2218,8 +2217,8 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ }
+ }
+ #else
+- dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE);
+- if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
++ dma_addr = dma_map_single(vp->gendev, skb->data, skb->len, DMA_TO_DEVICE);
++ if (dma_mapping_error(vp->gendev, dma_addr))
+ goto out_dma_err;
+ vp->tx_ring[entry].addr = cpu_to_le32(dma_addr);
+ vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
+@@ -2254,7 +2253,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ out:
+ return NETDEV_TX_OK;
+ out_dma_err:
+- dev_err(&VORTEX_PCI(vp)->dev, "Error mapping dma buffer\n");
++ dev_err(vp->gendev, "Error mapping dma buffer\n");
+ goto out;
+ }
+
+@@ -2322,7 +2321,7 @@ vortex_interrupt(int irq, void *dev_id)
+ if (status & DMADone) {
+ if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) {
+ iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
+- pci_unmap_single(VORTEX_PCI(vp), vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE);
++ dma_unmap_single(vp->gendev, vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, DMA_TO_DEVICE);
+ pkts_compl++;
+ bytes_compl += vp->tx_skb->len;
+ dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */
+@@ -2459,19 +2458,19 @@ boomerang_interrupt(int irq, void *dev_id)
+ struct sk_buff *skb = vp->tx_skbuff[entry];
+ #if DO_ZEROCOPY
+ int i;
+- pci_unmap_single(VORTEX_PCI(vp),
++ dma_unmap_single(vp->gendev,
+ le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
+ le32_to_cpu(vp->tx_ring[entry].frag[0].length)&0xFFF,
+- PCI_DMA_TODEVICE);
++ DMA_TO_DEVICE);
+
+ for (i=1; i<=skb_shinfo(skb)->nr_frags; i++)
+- pci_unmap_page(VORTEX_PCI(vp),
++ dma_unmap_page(vp->gendev,
+ le32_to_cpu(vp->tx_ring[entry].frag[i].addr),
+ le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF,
+- PCI_DMA_TODEVICE);
++ DMA_TO_DEVICE);
+ #else
+- pci_unmap_single(VORTEX_PCI(vp),
+- le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE);
++ dma_unmap_single(vp->gendev,
++ le32_to_cpu(vp->tx_ring[entry].addr), skb->len, DMA_TO_DEVICE);
+ #endif
+ pkts_compl++;
+ bytes_compl += skb->len;
+@@ -2561,14 +2560,14 @@ static int vortex_rx(struct net_device *dev)
+ /* 'skb_put()' points to the start of sk_buff data area. */
+ if (vp->bus_master &&
+ ! (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)) {
+- dma_addr_t dma = pci_map_single(VORTEX_PCI(vp), skb_put(skb, pkt_len),
+- pkt_len, PCI_DMA_FROMDEVICE);
++ dma_addr_t dma = dma_map_single(vp->gendev, skb_put(skb, pkt_len),
++ pkt_len, DMA_FROM_DEVICE);
+ iowrite32(dma, ioaddr + Wn7_MasterAddr);
+ iowrite16((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
+ iowrite16(StartDMAUp, ioaddr + EL3_CMD);
+ while (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)
+ ;
+- pci_unmap_single(VORTEX_PCI(vp), dma, pkt_len, PCI_DMA_FROMDEVICE);
++ dma_unmap_single(vp->gendev, dma, pkt_len, DMA_FROM_DEVICE);
+ } else {
+ ioread32_rep(ioaddr + RX_FIFO,
+ skb_put(skb, pkt_len),
+@@ -2635,11 +2634,11 @@ boomerang_rx(struct net_device *dev)
+ if (pkt_len < rx_copybreak &&
+ (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+- pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
++ dma_sync_single_for_cpu(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
+ /* 'skb_put()' points to the start of sk_buff data area. */
+ skb_put_data(skb, vp->rx_skbuff[entry]->data,
+ pkt_len);
+- pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
++ dma_sync_single_for_device(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
+ vp->rx_copy++;
+ } else {
+ /* Pre-allocate the replacement skb. If it or its
+@@ -2651,9 +2650,9 @@ boomerang_rx(struct net_device *dev)
+ dev->stats.rx_dropped++;
+ goto clear_complete;
+ }
+- newdma = pci_map_single(VORTEX_PCI(vp), newskb->data,
+- PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+- if (dma_mapping_error(&VORTEX_PCI(vp)->dev, newdma)) {
++ newdma = dma_map_single(vp->gendev, newskb->data,
++ PKT_BUF_SZ, DMA_FROM_DEVICE);
++ if (dma_mapping_error(vp->gendev, newdma)) {
+ dev->stats.rx_dropped++;
+ consume_skb(newskb);
+ goto clear_complete;
+@@ -2664,7 +2663,7 @@ boomerang_rx(struct net_device *dev)
+ vp->rx_skbuff[entry] = newskb;
+ vp->rx_ring[entry].addr = cpu_to_le32(newdma);
+ skb_put(skb, pkt_len);
+- pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
++ dma_unmap_single(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
+ vp->rx_nocopy++;
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+@@ -2761,8 +2760,8 @@ vortex_close(struct net_device *dev)
+ if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */
+ for (i = 0; i < RX_RING_SIZE; i++)
+ if (vp->rx_skbuff[i]) {
+- pci_unmap_single( VORTEX_PCI(vp), le32_to_cpu(vp->rx_ring[i].addr),
+- PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
++ dma_unmap_single(vp->gendev, le32_to_cpu(vp->rx_ring[i].addr),
++ PKT_BUF_SZ, DMA_FROM_DEVICE);
+ dev_kfree_skb(vp->rx_skbuff[i]);
+ vp->rx_skbuff[i] = NULL;
+ }
+@@ -2775,12 +2774,12 @@ vortex_close(struct net_device *dev)
+ int k;
+
+ for (k=0; k<=skb_shinfo(skb)->nr_frags; k++)
+- pci_unmap_single(VORTEX_PCI(vp),
++ dma_unmap_single(vp->gendev,
+ le32_to_cpu(vp->tx_ring[i].frag[k].addr),
+ le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF,
+- PCI_DMA_TODEVICE);
++ DMA_TO_DEVICE);
+ #else
+- pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[i].addr), skb->len, PCI_DMA_TODEVICE);
++ dma_unmap_single(vp->gendev, le32_to_cpu(vp->tx_ring[i].addr), skb->len, DMA_TO_DEVICE);
+ #endif
+ dev_kfree_skb(skb);
+ vp->tx_skbuff[i] = NULL;
+@@ -3288,11 +3287,10 @@ static void vortex_remove_one(struct pci_dev *pdev)
+
+ pci_iounmap(pdev, vp->ioaddr);
+
+- pci_free_consistent(pdev,
+- sizeof(struct boom_rx_desc) * RX_RING_SIZE
+- + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
+- vp->rx_ring,
+- vp->rx_ring_dma);
++ dma_free_coherent(&pdev->dev,
++ sizeof(struct boom_rx_desc) * RX_RING_SIZE +
++ sizeof(struct boom_tx_desc) * TX_RING_SIZE,
++ vp->rx_ring, vp->rx_ring_dma);
+
+ pci_release_regions(pdev);
+
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
+index b57acb8dc35b..dc25066c59a1 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
++++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
+@@ -419,15 +419,15 @@ static const u32 t6_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
+ {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */
+ {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */
+ {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */
+- {0x7b50, 0x7b54, 0x2900, 0x4, 0x4}, /* up_cim_2900_to_3d40 */
+- {0x7b50, 0x7b54, 0x2904, 0x4, 0x4}, /* up_cim_2904_to_3d44 */
+- {0x7b50, 0x7b54, 0x2908, 0x4, 0x4}, /* up_cim_2908_to_3d48 */
+- {0x7b50, 0x7b54, 0x2910, 0x4, 0x4}, /* up_cim_2910_to_3d4c */
+- {0x7b50, 0x7b54, 0x2914, 0x4, 0x4}, /* up_cim_2914_to_3d50 */
+- {0x7b50, 0x7b54, 0x2920, 0x10, 0x10}, /* up_cim_2920_to_2a10 */
+- {0x7b50, 0x7b54, 0x2924, 0x10, 0x10}, /* up_cim_2924_to_2a14 */
+- {0x7b50, 0x7b54, 0x2928, 0x10, 0x10}, /* up_cim_2928_to_2a18 */
+- {0x7b50, 0x7b54, 0x292c, 0x10, 0x10}, /* up_cim_292c_to_2a1c */
++ {0x7b50, 0x7b54, 0x4900, 0x4, 0x4}, /* up_cim_4900_to_4c60 */
++ {0x7b50, 0x7b54, 0x4904, 0x4, 0x4}, /* up_cim_4904_to_4c64 */
++ {0x7b50, 0x7b54, 0x4908, 0x4, 0x4}, /* up_cim_4908_to_4c68 */
++ {0x7b50, 0x7b54, 0x4910, 0x4, 0x4}, /* up_cim_4910_to_4c70 */
++ {0x7b50, 0x7b54, 0x4914, 0x4, 0x4}, /* up_cim_4914_to_4c74 */
++ {0x7b50, 0x7b54, 0x4920, 0x10, 0x10}, /* up_cim_4920_to_4a10 */
++ {0x7b50, 0x7b54, 0x4924, 0x10, 0x10}, /* up_cim_4924_to_4a14 */
++ {0x7b50, 0x7b54, 0x4928, 0x10, 0x10}, /* up_cim_4928_to_4a18 */
++ {0x7b50, 0x7b54, 0x492c, 0x10, 0x10}, /* up_cim_492c_to_4a1c */
+ };
+
+ static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
+@@ -444,16 +444,6 @@ static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
+ {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */
+ {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */
+ {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */
+- {0x7b50, 0x7b54, 0x2900, 0x4, 0x4}, /* up_cim_2900_to_3d40 */
+- {0x7b50, 0x7b54, 0x2904, 0x4, 0x4}, /* up_cim_2904_to_3d44 */
+- {0x7b50, 0x7b54, 0x2908, 0x4, 0x4}, /* up_cim_2908_to_3d48 */
+- {0x7b50, 0x7b54, 0x2910, 0x4, 0x4}, /* up_cim_2910_to_3d4c */
+- {0x7b50, 0x7b54, 0x2914, 0x4, 0x4}, /* up_cim_2914_to_3d50 */
+- {0x7b50, 0x7b54, 0x2918, 0x4, 0x4}, /* up_cim_2918_to_3d54 */
+- {0x7b50, 0x7b54, 0x291c, 0x4, 0x4}, /* up_cim_291c_to_3d58 */
+- {0x7b50, 0x7b54, 0x2924, 0x10, 0x10}, /* up_cim_2924_to_2914 */
+- {0x7b50, 0x7b54, 0x2928, 0x10, 0x10}, /* up_cim_2928_to_2a18 */
+- {0x7b50, 0x7b54, 0x292c, 0x10, 0x10}, /* up_cim_292c_to_2a1c */
+ };
+
+ static const u32 t6_hma_ireg_array[][IREG_NUM_ELEM] = {
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+index 3177b0c9bd2d..829dc8c5ddff 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+@@ -836,7 +836,7 @@ bool is_filter_exact_match(struct adapter *adap,
+ {
+ struct tp_params *tp = &adap->params.tp;
+ u64 hash_filter_mask = tp->hash_filter_mask;
+- u32 mask;
++ u64 ntuple_mask = 0;
+
+ if (!is_hashfilter(adap))
+ return false;
+@@ -865,73 +865,45 @@ bool is_filter_exact_match(struct adapter *adap,
+ if (!fs->val.fport || fs->mask.fport != 0xffff)
+ return false;
+
+- if (tp->fcoe_shift >= 0) {
+- mask = (hash_filter_mask >> tp->fcoe_shift) & FT_FCOE_W;
+- if (mask && !fs->mask.fcoe)
+- return false;
+- }
++ /* calculate tuple mask and compare with mask configured in hw */
++ if (tp->fcoe_shift >= 0)
++ ntuple_mask |= (u64)fs->mask.fcoe << tp->fcoe_shift;
+
+- if (tp->port_shift >= 0) {
+- mask = (hash_filter_mask >> tp->port_shift) & FT_PORT_W;
+- if (mask && !fs->mask.iport)
+- return false;
+- }
++ if (tp->port_shift >= 0)
++ ntuple_mask |= (u64)fs->mask.iport << tp->port_shift;
+
+ if (tp->vnic_shift >= 0) {
+- mask = (hash_filter_mask >> tp->vnic_shift) & FT_VNIC_ID_W;
+-
+- if ((adap->params.tp.ingress_config & VNIC_F)) {
+- if (mask && !fs->mask.pfvf_vld)
+- return false;
+- } else {
+- if (mask && !fs->mask.ovlan_vld)
+- return false;
+- }
++ if ((adap->params.tp.ingress_config & VNIC_F))
++ ntuple_mask |= (u64)fs->mask.pfvf_vld << tp->vnic_shift;
++ else
++ ntuple_mask |= (u64)fs->mask.ovlan_vld <<
++ tp->vnic_shift;
+ }
+
+- if (tp->vlan_shift >= 0) {
+- mask = (hash_filter_mask >> tp->vlan_shift) & FT_VLAN_W;
+- if (mask && !fs->mask.ivlan)
+- return false;
+- }
++ if (tp->vlan_shift >= 0)
++ ntuple_mask |= (u64)fs->mask.ivlan << tp->vlan_shift;
+
+- if (tp->tos_shift >= 0) {
+- mask = (hash_filter_mask >> tp->tos_shift) & FT_TOS_W;
+- if (mask && !fs->mask.tos)
+- return false;
+- }
++ if (tp->tos_shift >= 0)
++ ntuple_mask |= (u64)fs->mask.tos << tp->tos_shift;
+
+- if (tp->protocol_shift >= 0) {
+- mask = (hash_filter_mask >> tp->protocol_shift) & FT_PROTOCOL_W;
+- if (mask && !fs->mask.proto)
+- return false;
+- }
++ if (tp->protocol_shift >= 0)
++ ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift;
+
+- if (tp->ethertype_shift >= 0) {
+- mask = (hash_filter_mask >> tp->ethertype_shift) &
+- FT_ETHERTYPE_W;
+- if (mask && !fs->mask.ethtype)
+- return false;
+- }
++ if (tp->ethertype_shift >= 0)
++ ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;
+
+- if (tp->macmatch_shift >= 0) {
+- mask = (hash_filter_mask >> tp->macmatch_shift) & FT_MACMATCH_W;
+- if (mask && !fs->mask.macidx)
+- return false;
+- }
++ if (tp->macmatch_shift >= 0)
++ ntuple_mask |= (u64)fs->mask.macidx << tp->macmatch_shift;
++
++ if (tp->matchtype_shift >= 0)
++ ntuple_mask |= (u64)fs->mask.matchtype << tp->matchtype_shift;
++
++ if (tp->frag_shift >= 0)
++ ntuple_mask |= (u64)fs->mask.frag << tp->frag_shift;
++
++ if (ntuple_mask != hash_filter_mask)
++ return false;
+
+- if (tp->matchtype_shift >= 0) {
+- mask = (hash_filter_mask >> tp->matchtype_shift) &
+- FT_MPSHITTYPE_W;
+- if (mask && !fs->mask.matchtype)
+- return false;
+- }
+- if (tp->frag_shift >= 0) {
+- mask = (hash_filter_mask >> tp->frag_shift) &
+- FT_FRAGMENTATION_W;
+- if (mask && !fs->mask.frag)
+- return false;
+- }
+ return true;
+ }
+
+diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
+index 4d84cab77105..e8a3a45d0b53 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/main.c
++++ b/drivers/net/ethernet/mellanox/mlx4/main.c
+@@ -3007,6 +3007,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
+ mlx4_err(dev, "Failed to create file for port %d\n", port);
+ devlink_port_unregister(&info->devlink_port);
+ info->port = -1;
++ return err;
+ }
+
+ sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port);
+@@ -3028,9 +3029,10 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
+ &info->port_attr);
+ devlink_port_unregister(&info->devlink_port);
+ info->port = -1;
++ return err;
+ }
+
+- return err;
++ return 0;
+ }
+
+ static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+index c4f14fdc4e77..0161e01778f2 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+@@ -292,6 +292,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
+ struct qed_ll2_tx_packet *p_pkt = NULL;
+ struct qed_ll2_info *p_ll2_conn;
+ struct qed_ll2_tx_queue *p_tx;
++ unsigned long flags = 0;
+ dma_addr_t tx_frag;
+
+ p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
+@@ -300,6 +301,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
+
+ p_tx = &p_ll2_conn->tx_queue;
+
++ spin_lock_irqsave(&p_tx->lock, flags);
+ while (!list_empty(&p_tx->active_descq)) {
+ p_pkt = list_first_entry(&p_tx->active_descq,
+ struct qed_ll2_tx_packet, list_entry);
+@@ -309,6 +311,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
+ list_del(&p_pkt->list_entry);
+ b_last_packet = list_empty(&p_tx->active_descq);
+ list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
++ spin_unlock_irqrestore(&p_tx->lock, flags);
+ if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
+ struct qed_ooo_buffer *p_buffer;
+
+@@ -328,7 +331,9 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
+ b_last_frag,
+ b_last_packet);
+ }
++ spin_lock_irqsave(&p_tx->lock, flags);
+ }
++ spin_unlock_irqrestore(&p_tx->lock, flags);
+ }
+
+ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
+@@ -556,6 +561,7 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
+ struct qed_ll2_info *p_ll2_conn = NULL;
+ struct qed_ll2_rx_packet *p_pkt = NULL;
+ struct qed_ll2_rx_queue *p_rx;
++ unsigned long flags = 0;
+
+ p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
+ if (!p_ll2_conn)
+@@ -563,13 +569,14 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
+
+ p_rx = &p_ll2_conn->rx_queue;
+
++ spin_lock_irqsave(&p_rx->lock, flags);
+ while (!list_empty(&p_rx->active_descq)) {
+ p_pkt = list_first_entry(&p_rx->active_descq,
+ struct qed_ll2_rx_packet, list_entry);
+ if (!p_pkt)
+ break;
+-
+ list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
++ spin_unlock_irqrestore(&p_rx->lock, flags);
+
+ if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
+ struct qed_ooo_buffer *p_buffer;
+@@ -588,7 +595,9 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
+ cookie,
+ rx_buf_addr, b_last);
+ }
++ spin_lock_irqsave(&p_rx->lock, flags);
+ }
++ spin_unlock_irqrestore(&p_rx->lock, flags);
+ }
+
+ static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags)
+@@ -601,6 +610,27 @@ static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags)
+ return bd_flags;
+ }
+
++static bool
++qed_ll2_lb_rxq_handler_slowpath(struct qed_hwfn *p_hwfn,
++ struct core_rx_slow_path_cqe *p_cqe)
++{
++ struct ooo_opaque *iscsi_ooo;
++ u32 cid;
++
++ if (p_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH)
++ return false;
++
++ iscsi_ooo = (struct ooo_opaque *)&p_cqe->opaque_data;
++ if (iscsi_ooo->ooo_opcode != TCP_EVENT_DELETE_ISLES)
++ return false;
++
++ /* Need to make a flush */
++ cid = le32_to_cpu(iscsi_ooo->cid);
++ qed_ooo_release_connection_isles(p_hwfn, p_hwfn->p_ooo_info, cid);
++
++ return true;
++}
++
+ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn)
+ {
+@@ -627,6 +657,11 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
+ cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
+ cqe_type = cqe->rx_cqe_sp.type;
+
++ if (cqe_type == CORE_RX_CQE_TYPE_SLOW_PATH)
++ if (qed_ll2_lb_rxq_handler_slowpath(p_hwfn,
++ &cqe->rx_cqe_sp))
++ continue;
++
+ if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) {
+ DP_NOTICE(p_hwfn,
+ "Got a non-regular LB LL2 completion [type 0x%02x]\n",
+@@ -807,6 +842,9 @@ static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
+ struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
+ int rc;
+
++ if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
++ return 0;
++
+ rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn);
+ if (rc)
+ return rc;
+@@ -827,6 +865,9 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
+ u16 new_idx = 0, num_bds = 0;
+ int rc;
+
++ if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
++ return 0;
++
+ new_idx = le16_to_cpu(*p_tx->p_fw_cons);
+ num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
+
+@@ -1880,17 +1921,25 @@ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle)
+
+ /* Stop Tx & Rx of connection, if needed */
+ if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
++ p_ll2_conn->tx_queue.b_cb_registred = false;
++ smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
+ rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
+ if (rc)
+ goto out;
++
+ qed_ll2_txq_flush(p_hwfn, connection_handle);
++ qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
+ }
+
+ if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
++ p_ll2_conn->rx_queue.b_cb_registred = false;
++ smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
+ rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
+ if (rc)
+ goto out;
++
+ qed_ll2_rxq_flush(p_hwfn, connection_handle);
++ qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
+ }
+
+ if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)
+@@ -1938,16 +1987,6 @@ void qed_ll2_release_connection(void *cxt, u8 connection_handle)
+ if (!p_ll2_conn)
+ return;
+
+- if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
+- p_ll2_conn->rx_queue.b_cb_registred = false;
+- qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
+- }
+-
+- if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
+- p_ll2_conn->tx_queue.b_cb_registred = false;
+- qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
+- }
+-
+ kfree(p_ll2_conn->tx_queue.descq_mem);
+ qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
+
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 6c7bdd0c361a..ffae19714ffd 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -680,15 +680,6 @@ static void tun_queue_purge(struct tun_file *tfile)
+ skb_queue_purge(&tfile->sk.sk_error_queue);
+ }
+
+-static void tun_cleanup_tx_ring(struct tun_file *tfile)
+-{
+- if (tfile->tx_ring.queue) {
+- ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
+- xdp_rxq_info_unreg(&tfile->xdp_rxq);
+- memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring));
+- }
+-}
+-
+ static void __tun_detach(struct tun_file *tfile, bool clean)
+ {
+ struct tun_file *ntfile;
+@@ -735,7 +726,9 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
+ tun->dev->reg_state == NETREG_REGISTERED)
+ unregister_netdevice(tun->dev);
+ }
+- tun_cleanup_tx_ring(tfile);
++ if (tun)
++ xdp_rxq_info_unreg(&tfile->xdp_rxq);
++ ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
+ sock_put(&tfile->sk);
+ }
+ }
+@@ -775,14 +768,14 @@ static void tun_detach_all(struct net_device *dev)
+ tun_napi_del(tun, tfile);
+ /* Drop read queue */
+ tun_queue_purge(tfile);
++ xdp_rxq_info_unreg(&tfile->xdp_rxq);
+ sock_put(&tfile->sk);
+- tun_cleanup_tx_ring(tfile);
+ }
+ list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
+ tun_enable_queue(tfile);
+ tun_queue_purge(tfile);
++ xdp_rxq_info_unreg(&tfile->xdp_rxq);
+ sock_put(&tfile->sk);
+- tun_cleanup_tx_ring(tfile);
+ }
+ BUG_ON(tun->numdisabled != 0);
+
+@@ -826,7 +819,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
+ }
+
+ if (!tfile->detached &&
+- ptr_ring_init(&tfile->tx_ring, dev->tx_queue_len, GFP_KERNEL)) {
++ ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len,
++ GFP_KERNEL, tun_ptr_free)) {
+ err = -ENOMEM;
+ goto out;
+ }
+@@ -3131,6 +3125,11 @@ static int tun_chr_open(struct inode *inode, struct file * file)
+ &tun_proto, 0);
+ if (!tfile)
+ return -ENOMEM;
++ if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) {
++ sk_free(&tfile->sk);
++ return -ENOMEM;
++ }
++
+ RCU_INIT_POINTER(tfile->tun, NULL);
+ tfile->flags = 0;
+ tfile->ifindex = 0;
+@@ -3151,8 +3150,6 @@ static int tun_chr_open(struct inode *inode, struct file * file)
+
+ sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
+
+- memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring));
+-
+ return 0;
+ }
+
+diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
+index 9ebe2a689966..27a9bb8c9611 100644
+--- a/drivers/net/vmxnet3/vmxnet3_drv.c
++++ b/drivers/net/vmxnet3/vmxnet3_drv.c
+@@ -369,6 +369,11 @@ vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
+
+ gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
+ while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
++ /* Prevent any &gdesc->tcd field from being (speculatively)
++ * read before (&gdesc->tcd)->gen is read.
++ */
++ dma_rmb();
++
+ completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
+ &gdesc->tcd), tq, adapter->pdev,
+ adapter);
+@@ -1103,6 +1108,11 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
+ gdesc->txd.tci = skb_vlan_tag_get(skb);
+ }
+
++ /* Ensure that the write to (&gdesc->txd)->gen will be observed after
++ * all other writes to &gdesc->txd.
++ */
++ dma_wmb();
++
+ /* finally flips the GEN bit of the SOP desc. */
+ gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
+ VMXNET3_TXD_GEN);
+@@ -1298,6 +1308,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
+ */
+ break;
+ }
++
++ /* Prevent any rcd field from being (speculatively) read before
++ * rcd->gen is read.
++ */
++ dma_rmb();
++
+ BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 &&
+ rcd->rqID != rq->dataRingQid);
+ idx = rcd->rxdIdx;
+@@ -1528,6 +1544,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
+ ring->next2comp = idx;
+ num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
+ ring = rq->rx_ring + ring_idx;
++
++ /* Ensure that the writes to rxd->gen bits will be observed
++ * after all other writes to rxd objects.
++ */
++ dma_wmb();
++
+ while (num_to_alloc) {
+ vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
+ &rxCmdDesc);
+@@ -2688,7 +2710,7 @@ vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
+ /* ==================== initialization and cleanup routines ============ */
+
+ static int
+-vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
++vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter)
+ {
+ int err;
+ unsigned long mmio_start, mmio_len;
+@@ -2700,30 +2722,12 @@ vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
+ return err;
+ }
+
+- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
+- if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
+- dev_err(&pdev->dev,
+- "pci_set_consistent_dma_mask failed\n");
+- err = -EIO;
+- goto err_set_mask;
+- }
+- *dma64 = true;
+- } else {
+- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
+- dev_err(&pdev->dev,
+- "pci_set_dma_mask failed\n");
+- err = -EIO;
+- goto err_set_mask;
+- }
+- *dma64 = false;
+- }
+-
+ err = pci_request_selected_regions(pdev, (1 << 2) - 1,
+ vmxnet3_driver_name);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to request region for adapter: error %d\n", err);
+- goto err_set_mask;
++ goto err_enable_device;
+ }
+
+ pci_set_master(pdev);
+@@ -2751,7 +2755,7 @@ vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
+ iounmap(adapter->hw_addr0);
+ err_ioremap:
+ pci_release_selected_regions(pdev, (1 << 2) - 1);
+-err_set_mask:
++err_enable_device:
+ pci_disable_device(pdev);
+ return err;
+ }
+@@ -3254,7 +3258,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
+ #endif
+ };
+ int err;
+- bool dma64 = false; /* stupid gcc */
++ bool dma64;
+ u32 ver;
+ struct net_device *netdev;
+ struct vmxnet3_adapter *adapter;
+@@ -3300,6 +3304,24 @@ vmxnet3_probe_device(struct pci_dev *pdev,
+ adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
+ adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
+
++ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
++ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
++ dev_err(&pdev->dev,
++ "pci_set_consistent_dma_mask failed\n");
++ err = -EIO;
++ goto err_set_mask;
++ }
++ dma64 = true;
++ } else {
++ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
++ dev_err(&pdev->dev,
++ "pci_set_dma_mask failed\n");
++ err = -EIO;
++ goto err_set_mask;
++ }
++ dma64 = false;
++ }
++
+ spin_lock_init(&adapter->cmd_lock);
+ adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
+ sizeof(struct vmxnet3_adapter),
+@@ -3307,7 +3329,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
+ if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) {
+ dev_err(&pdev->dev, "Failed to map dma\n");
+ err = -EFAULT;
+- goto err_dma_map;
++ goto err_set_mask;
+ }
+ adapter->shared = dma_alloc_coherent(
+ &adapter->pdev->dev,
+@@ -3358,7 +3380,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
+ }
+ #endif /* VMXNET3_RSS */
+
+- err = vmxnet3_alloc_pci_resources(adapter, &dma64);
++ err = vmxnet3_alloc_pci_resources(adapter);
+ if (err < 0)
+ goto err_alloc_pci;
+
+@@ -3504,7 +3526,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
+ err_alloc_shared:
+ dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
+ sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
+-err_dma_map:
++err_set_mask:
+ free_netdev(netdev);
+ return err;
+ }
+diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
+index a3326463b71f..a2c554f8a61b 100644
+--- a/drivers/net/vmxnet3/vmxnet3_int.h
++++ b/drivers/net/vmxnet3/vmxnet3_int.h
+@@ -69,10 +69,12 @@
+ /*
+ * Version numbers
+ */
+-#define VMXNET3_DRIVER_VERSION_STRING "1.4.14.0-k"
++#define VMXNET3_DRIVER_VERSION_STRING "1.4.16.0-k"
+
+-/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
+-#define VMXNET3_DRIVER_VERSION_NUM 0x01040e00
++/* Each byte of this 32-bit integer encodes a version number in
++ * VMXNET3_DRIVER_VERSION_STRING.
++ */
++#define VMXNET3_DRIVER_VERSION_NUM 0x01041000
+
+ #if defined(CONFIG_PCI_MSI)
+ /* RSS only makes sense if MSI-X is supported. */
+diff --git a/drivers/rtc/hctosys.c b/drivers/rtc/hctosys.c
+index e1cfa06810ef..e79f2a181ad2 100644
+--- a/drivers/rtc/hctosys.c
++++ b/drivers/rtc/hctosys.c
+@@ -49,6 +49,11 @@ static int __init rtc_hctosys(void)
+
+ tv64.tv_sec = rtc_tm_to_time64(&tm);
+
++#if BITS_PER_LONG == 32
++ if (tv64.tv_sec > INT_MAX)
++ goto err_read;
++#endif
++
+ err = do_settimeofday64(&tv64);
+
+ dev_info(rtc->dev.parent,
+diff --git a/drivers/rtc/rtc-goldfish.c b/drivers/rtc/rtc-goldfish.c
+index d67769265185..a1c44d0c8557 100644
+--- a/drivers/rtc/rtc-goldfish.c
++++ b/drivers/rtc/rtc-goldfish.c
+@@ -235,3 +235,5 @@ static struct platform_driver goldfish_rtc = {
+ };
+
+ module_platform_driver(goldfish_rtc);
++
++MODULE_LICENSE("GPL v2");
+diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
+index c90fba3ed861..6620016869cf 100644
+--- a/drivers/rtc/rtc-m41t80.c
++++ b/drivers/rtc/rtc-m41t80.c
+@@ -885,7 +885,6 @@ static int m41t80_probe(struct i2c_client *client,
+ {
+ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ int rc = 0;
+- struct rtc_device *rtc = NULL;
+ struct rtc_time tm;
+ struct m41t80_data *m41t80_data = NULL;
+ bool wakeup_source = false;
+@@ -909,6 +908,10 @@ static int m41t80_probe(struct i2c_client *client,
+ m41t80_data->features = id->driver_data;
+ i2c_set_clientdata(client, m41t80_data);
+
++ m41t80_data->rtc = devm_rtc_allocate_device(&client->dev);
++ if (IS_ERR(m41t80_data->rtc))
++ return PTR_ERR(m41t80_data->rtc);
++
+ #ifdef CONFIG_OF
+ wakeup_source = of_property_read_bool(client->dev.of_node,
+ "wakeup-source");
+@@ -932,15 +935,11 @@ static int m41t80_probe(struct i2c_client *client,
+ device_init_wakeup(&client->dev, true);
+ }
+
+- rtc = devm_rtc_device_register(&client->dev, client->name,
+- &m41t80_rtc_ops, THIS_MODULE);
+- if (IS_ERR(rtc))
+- return PTR_ERR(rtc);
++ m41t80_data->rtc->ops = &m41t80_rtc_ops;
+
+- m41t80_data->rtc = rtc;
+ if (client->irq <= 0) {
+ /* We cannot support UIE mode if we do not have an IRQ line */
+- rtc->uie_unsupported = 1;
++ m41t80_data->rtc->uie_unsupported = 1;
+ }
+
+ /* Make sure HT (Halt Update) bit is cleared */
+@@ -993,6 +992,11 @@ static int m41t80_probe(struct i2c_client *client,
+ if (m41t80_data->features & M41T80_FEATURE_SQ)
+ m41t80_sqw_register_clk(m41t80_data);
+ #endif
++
++ rc = rtc_register_device(m41t80_data->rtc);
++ if (rc)
++ return rc;
++
+ return 0;
+ }
+
+diff --git a/drivers/rtc/rtc-rk808.c b/drivers/rtc/rtc-rk808.c
+index 35c9aada07c8..79c8da54e922 100644
+--- a/drivers/rtc/rtc-rk808.c
++++ b/drivers/rtc/rtc-rk808.c
+@@ -416,12 +416,11 @@ static int rk808_rtc_probe(struct platform_device *pdev)
+
+ device_init_wakeup(&pdev->dev, 1);
+
+- rk808_rtc->rtc = devm_rtc_device_register(&pdev->dev, "rk808-rtc",
+- &rk808_rtc_ops, THIS_MODULE);
+- if (IS_ERR(rk808_rtc->rtc)) {
+- ret = PTR_ERR(rk808_rtc->rtc);
+- return ret;
+- }
++ rk808_rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
++ if (IS_ERR(rk808_rtc->rtc))
++ return PTR_ERR(rk808_rtc->rtc);
++
++ rk808_rtc->rtc->ops = &rk808_rtc_ops;
+
+ rk808_rtc->irq = platform_get_irq(pdev, 0);
+ if (rk808_rtc->irq < 0) {
+@@ -438,9 +437,10 @@ static int rk808_rtc_probe(struct platform_device *pdev)
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request alarm IRQ %d: %d\n",
+ rk808_rtc->irq, ret);
++ return ret;
+ }
+
+- return ret;
++ return rtc_register_device(rk808_rtc->rtc);
+ }
+
+ static struct platform_driver rk808_rtc_driver = {
+diff --git a/drivers/rtc/rtc-rp5c01.c b/drivers/rtc/rtc-rp5c01.c
+index 026035373ae6..38a12435b5a0 100644
+--- a/drivers/rtc/rtc-rp5c01.c
++++ b/drivers/rtc/rtc-rp5c01.c
+@@ -249,16 +249,24 @@ static int __init rp5c01_rtc_probe(struct platform_device *dev)
+
+ platform_set_drvdata(dev, priv);
+
+- rtc = devm_rtc_device_register(&dev->dev, "rtc-rp5c01", &rp5c01_rtc_ops,
+- THIS_MODULE);
++ rtc = devm_rtc_allocate_device(&dev->dev);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
++
++ rtc->ops = &rp5c01_rtc_ops;
++
+ priv->rtc = rtc;
+
+ error = sysfs_create_bin_file(&dev->dev.kobj, &priv->nvram_attr);
+ if (error)
+ return error;
+
++ error = rtc_register_device(rtc);
++ if (error) {
++ sysfs_remove_bin_file(&dev->dev.kobj, &priv->nvram_attr);
++ return error;
++ }
++
+ return 0;
+ }
+
+diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c
+index d8ef9e052c4f..9af591d5223c 100644
+--- a/drivers/rtc/rtc-snvs.c
++++ b/drivers/rtc/rtc-snvs.c
+@@ -132,20 +132,23 @@ static int snvs_rtc_set_time(struct device *dev, struct rtc_time *tm)
+ {
+ struct snvs_rtc_data *data = dev_get_drvdata(dev);
+ unsigned long time;
++ int ret;
+
+ rtc_tm_to_time(tm, &time);
+
+ /* Disable RTC first */
+- snvs_rtc_enable(data, false);
++ ret = snvs_rtc_enable(data, false);
++ if (ret)
++ return ret;
+
+ /* Write 32-bit time to 47-bit timer, leaving 15 LSBs blank */
+ regmap_write(data->regmap, data->offset + SNVS_LPSRTCLR, time << CNTR_TO_SECS_SH);
+ regmap_write(data->regmap, data->offset + SNVS_LPSRTCMR, time >> (32 - CNTR_TO_SECS_SH));
+
+ /* Enable RTC again */
+- snvs_rtc_enable(data, true);
++ ret = snvs_rtc_enable(data, true);
+
+- return 0;
++ return ret;
+ }
+
+ static int snvs_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+@@ -288,7 +291,11 @@ static int snvs_rtc_probe(struct platform_device *pdev)
+ regmap_write(data->regmap, data->offset + SNVS_LPSR, 0xffffffff);
+
+ /* Enable RTC */
+- snvs_rtc_enable(data, true);
++ ret = snvs_rtc_enable(data, true);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to enable rtc %d\n", ret);
++ goto error_rtc_device_register;
++ }
+
+ device_init_wakeup(&pdev->dev, true);
+
+diff --git a/drivers/rtc/rtc-tx4939.c b/drivers/rtc/rtc-tx4939.c
+index 560d9a5e0225..a9528083061d 100644
+--- a/drivers/rtc/rtc-tx4939.c
++++ b/drivers/rtc/rtc-tx4939.c
+@@ -86,7 +86,8 @@ static int tx4939_rtc_read_time(struct device *dev, struct rtc_time *tm)
+ for (i = 2; i < 6; i++)
+ buf[i] = __raw_readl(&rtcreg->dat);
+ spin_unlock_irq(&pdata->lock);
+- sec = (buf[5] << 24) | (buf[4] << 16) | (buf[3] << 8) | buf[2];
++ sec = ((unsigned long)buf[5] << 24) | (buf[4] << 16) |
++ (buf[3] << 8) | buf[2];
+ rtc_time_to_tm(sec, tm);
+ return rtc_valid_tm(tm);
+ }
+@@ -147,7 +148,8 @@ static int tx4939_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+ alrm->enabled = (ctl & TX4939_RTCCTL_ALME) ? 1 : 0;
+ alrm->pending = (ctl & TX4939_RTCCTL_ALMD) ? 1 : 0;
+ spin_unlock_irq(&pdata->lock);
+- sec = (buf[5] << 24) | (buf[4] << 16) | (buf[3] << 8) | buf[2];
++ sec = ((unsigned long)buf[5] << 24) | (buf[4] << 16) |
++ (buf[3] << 8) | buf[2];
+ rtc_time_to_tm(sec, &alrm->time);
+ return rtc_valid_tm(&alrm->time);
+ }
+diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
+index a8b831000b2d..18c4f933e8b9 100644
+--- a/drivers/s390/scsi/zfcp_dbf.c
++++ b/drivers/s390/scsi/zfcp_dbf.c
+@@ -4,7 +4,7 @@
+ *
+ * Debug traces for zfcp.
+ *
+- * Copyright IBM Corp. 2002, 2017
++ * Copyright IBM Corp. 2002, 2018
+ */
+
+ #define KMSG_COMPONENT "zfcp"
+@@ -308,6 +308,27 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
+ spin_unlock_irqrestore(&dbf->rec_lock, flags);
+ }
+
++/**
++ * zfcp_dbf_rec_trig_lock - trace event related to triggered recovery with lock
++ * @tag: identifier for event
++ * @adapter: adapter on which the erp_action should run
++ * @port: remote port involved in the erp_action
++ * @sdev: scsi device involved in the erp_action
++ * @want: wanted erp_action
++ * @need: required erp_action
++ *
++ * The adapter->erp_lock must not be held.
++ */
++void zfcp_dbf_rec_trig_lock(char *tag, struct zfcp_adapter *adapter,
++ struct zfcp_port *port, struct scsi_device *sdev,
++ u8 want, u8 need)
++{
++ unsigned long flags;
++
++ read_lock_irqsave(&adapter->erp_lock, flags);
++ zfcp_dbf_rec_trig(tag, adapter, port, sdev, want, need);
++ read_unlock_irqrestore(&adapter->erp_lock, flags);
++}
+
+ /**
+ * zfcp_dbf_rec_run_lvl - trace event related to running recovery
+diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
+index bf8ea4df2bb8..e5eed8aac0ce 100644
+--- a/drivers/s390/scsi/zfcp_ext.h
++++ b/drivers/s390/scsi/zfcp_ext.h
+@@ -4,7 +4,7 @@
+ *
+ * External function declarations.
+ *
+- * Copyright IBM Corp. 2002, 2016
++ * Copyright IBM Corp. 2002, 2018
+ */
+
+ #ifndef ZFCP_EXT_H
+@@ -35,6 +35,9 @@ extern int zfcp_dbf_adapter_register(struct zfcp_adapter *);
+ extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *);
+ extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
+ struct zfcp_port *, struct scsi_device *, u8, u8);
++extern void zfcp_dbf_rec_trig_lock(char *tag, struct zfcp_adapter *adapter,
++ struct zfcp_port *port,
++ struct scsi_device *sdev, u8 want, u8 need);
+ extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
+ extern void zfcp_dbf_rec_run_lvl(int level, char *tag,
+ struct zfcp_erp_action *erp);
+diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
+index 4d2ba5682493..22f9562f415c 100644
+--- a/drivers/s390/scsi/zfcp_scsi.c
++++ b/drivers/s390/scsi/zfcp_scsi.c
+@@ -4,7 +4,7 @@
+ *
+ * Interface to Linux SCSI midlayer.
+ *
+- * Copyright IBM Corp. 2002, 2017
++ * Copyright IBM Corp. 2002, 2018
+ */
+
+ #define KMSG_COMPONENT "zfcp"
+@@ -618,9 +618,9 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port)
+ ids.port_id = port->d_id;
+ ids.roles = FC_RPORT_ROLE_FCP_TARGET;
+
+- zfcp_dbf_rec_trig("scpaddy", port->adapter, port, NULL,
+- ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD,
+- ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD);
++ zfcp_dbf_rec_trig_lock("scpaddy", port->adapter, port, NULL,
++ ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD,
++ ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD);
+ rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids);
+ if (!rport) {
+ dev_err(&port->adapter->ccw_device->dev,
+@@ -642,9 +642,9 @@ static void zfcp_scsi_rport_block(struct zfcp_port *port)
+ struct fc_rport *rport = port->rport;
+
+ if (rport) {
+- zfcp_dbf_rec_trig("scpdely", port->adapter, port, NULL,
+- ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL,
+- ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL);
++ zfcp_dbf_rec_trig_lock("scpdely", port->adapter, port, NULL,
++ ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL,
++ ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL);
+ fc_remote_port_delete(rport);
+ port->rport = NULL;
+ }
+diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
+index d9f2229664ad..d62ddd63f4fe 100644
+--- a/drivers/scsi/aacraid/commsup.c
++++ b/drivers/scsi/aacraid/commsup.c
+@@ -1502,9 +1502,10 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
+ host = aac->scsi_host_ptr;
+ scsi_block_requests(host);
+ aac_adapter_disable_int(aac);
+- if (aac->thread->pid != current->pid) {
++ if (aac->thread && aac->thread->pid != current->pid) {
+ spin_unlock_irq(host->host_lock);
+ kthread_stop(aac->thread);
++ aac->thread = NULL;
+ jafo = 1;
+ }
+
+@@ -1591,6 +1592,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
+ aac->name);
+ if (IS_ERR(aac->thread)) {
+ retval = PTR_ERR(aac->thread);
++ aac->thread = NULL;
+ goto out;
+ }
+ }
+diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
+index 2664ea0df35f..f24fb942065d 100644
+--- a/drivers/scsi/aacraid/linit.c
++++ b/drivers/scsi/aacraid/linit.c
+@@ -1562,6 +1562,7 @@ static void __aac_shutdown(struct aac_dev * aac)
+ up(&fib->event_wait);
+ }
+ kthread_stop(aac->thread);
++ aac->thread = NULL;
+ }
+
+ aac_send_shutdown(aac);
+diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
+index ac77081e6e9e..b07612562c39 100644
+--- a/drivers/scsi/lpfc/lpfc_attr.c
++++ b/drivers/scsi/lpfc/lpfc_attr.c
+@@ -905,7 +905,12 @@ lpfc_issue_lip(struct Scsi_Host *shost)
+ LPFC_MBOXQ_t *pmboxq;
+ int mbxstatus = MBXERR_ERROR;
+
++ /*
++ * If the link is offline, disabled or BLOCK_MGMT_IO
++ * it doesn't make any sense to allow issue_lip
++ */
+ if ((vport->fc_flag & FC_OFFLINE_MODE) ||
++ (phba->hba_flag & LINK_DISABLED) ||
+ (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO))
+ return -EPERM;
+
+diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
+index b159a5c4e388..9265906d956e 100644
+--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
++++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
+@@ -696,8 +696,9 @@ lpfc_work_done(struct lpfc_hba *phba)
+ phba->hba_flag & HBA_SP_QUEUE_EVT)) {
+ if (pring->flag & LPFC_STOP_IOCB_EVENT) {
+ pring->flag |= LPFC_DEFERRED_RING_EVENT;
+- /* Set the lpfc data pending flag */
+- set_bit(LPFC_DATA_READY, &phba->data_flags);
++ /* Preserve legacy behavior. */
++ if (!(phba->hba_flag & HBA_SP_QUEUE_EVT))
++ set_bit(LPFC_DATA_READY, &phba->data_flags);
+ } else {
+ if (phba->link_state >= LPFC_LINK_UP ||
+ phba->link_flag & LS_MDS_LOOPBACK) {
+diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
+index d841aa42f607..730393a65e25 100644
+--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
++++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
+@@ -1998,8 +1998,14 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ ndlp->nlp_type |= NLP_NVME_TARGET;
+ if (bf_get_be32(prli_disc, nvpr))
+ ndlp->nlp_type |= NLP_NVME_DISCOVERY;
++
++ /*
++ * If prli_fba is set, the Target supports FirstBurst.
++ * If prli_fb_sz is 0, the FirstBurst size is unlimited,
++ * otherwise it defines the actual size supported by
++ * the NVME Target.
++ */
+ if ((bf_get_be32(prli_fba, nvpr) == 1) &&
+- (bf_get_be32(prli_fb_sz, nvpr) > 0) &&
+ (phba->cfg_nvme_enable_fb) &&
+ (!phba->nvmet_support)) {
+ /* Both sides support FB. The target's first
+@@ -2008,6 +2014,13 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ ndlp->nlp_flag |= NLP_FIRSTBURST;
+ ndlp->nvme_fb_size = bf_get_be32(prli_fb_sz,
+ nvpr);
++
++ /* Expressed in units of 512 bytes */
++ if (ndlp->nvme_fb_size)
++ ndlp->nvme_fb_size <<=
++ LPFC_NVME_FB_SHIFT;
++ else
++ ndlp->nvme_fb_size = LPFC_NVME_MAX_FB;
+ }
+ }
+
+diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
+index 81e3a4f10c3c..6327f858c4c8 100644
+--- a/drivers/scsi/lpfc/lpfc_nvme.c
++++ b/drivers/scsi/lpfc/lpfc_nvme.c
+@@ -241,10 +241,11 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
+ ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
+ "6047 nvme cmpl Enter "
+- "Data %p DID %x Xri: %x status %x cmd:%p lsreg:%p "
+- "bmp:%p ndlp:%p\n",
++ "Data %p DID %x Xri: %x status %x reason x%x cmd:%p "
++ "lsreg:%p bmp:%p ndlp:%p\n",
+ pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
+ cmdwqe->sli4_xritag, status,
++ (wcqe->parameter & 0xffff),
+ cmdwqe, pnvme_lsreq, cmdwqe->context3, ndlp);
+
+ lpfc_nvmeio_data(phba, "NVME LS CMPL: xri x%x stat x%x parm x%x\n",
+@@ -419,6 +420,7 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
+ {
+ int ret = 0;
+ struct lpfc_nvme_lport *lport;
++ struct lpfc_nvme_rport *rport;
+ struct lpfc_vport *vport;
+ struct lpfc_nodelist *ndlp;
+ struct ulp_bde64 *bpl;
+@@ -437,19 +439,18 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
+ */
+
+ lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
++ rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
+ vport = lport->vport;
+
+ if (vport->load_flag & FC_UNLOADING)
+ return -ENODEV;
+
+- if (vport->load_flag & FC_UNLOADING)
+- return -ENODEV;
+-
+- ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
++ /* Need the ndlp. It is stored in the driver's rport. */
++ ndlp = rport->ndlp;
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
+- "6051 DID x%06x not an active rport.\n",
+- pnvme_rport->port_id);
++ "6051 Remoteport %p, rport has invalid ndlp. "
++ "Failing LS Req\n", pnvme_rport);
+ return -ENODEV;
+ }
+
+@@ -500,8 +501,9 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
+
+ /* Expand print to include key fields. */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
+- "6149 ENTER. lport %p, rport %p lsreq%p rqstlen:%d "
+- "rsplen:%d %pad %pad\n",
++ "6149 Issue LS Req to DID 0x%06x lport %p, rport %p "
++ "lsreq%p rqstlen:%d rsplen:%d %pad %pad\n",
++ ndlp->nlp_DID,
+ pnvme_lport, pnvme_rport,
+ pnvme_lsreq, pnvme_lsreq->rqstlen,
+ pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
+@@ -517,7 +519,7 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
+ ndlp, 2, 30, 0);
+ if (ret != WQE_SUCCESS) {
+ atomic_inc(&lport->xmt_ls_err);
+- lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
++ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
+ "6052 EXIT. issue ls wqe failed lport %p, "
+ "rport %p lsreq%p Status %x DID %x\n",
+ pnvme_lport, pnvme_rport, pnvme_lsreq,
+@@ -980,14 +982,14 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
+ phba->cpucheck_cmpl_io[lpfc_ncmd->cpu]++;
+ }
+ #endif
+- freqpriv = nCmd->private;
+- freqpriv->nvme_buf = NULL;
+
+ /* NVME targets need completion held off until the abort exchange
+ * completes unless the NVME Rport is getting unregistered.
+ */
+
+ if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
++ freqpriv = nCmd->private;
++ freqpriv->nvme_buf = NULL;
+ nCmd->done(nCmd);
+ lpfc_ncmd->nvmeCmd = NULL;
+ }
+diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h
+index e79f8f75758c..48b0229ebc99 100644
+--- a/drivers/scsi/lpfc/lpfc_nvme.h
++++ b/drivers/scsi/lpfc/lpfc_nvme.h
+@@ -27,6 +27,8 @@
+
+ #define LPFC_NVME_WAIT_TMO 10
+ #define LPFC_NVME_EXPEDITE_XRICNT 8
++#define LPFC_NVME_FB_SHIFT 9
++#define LPFC_NVME_MAX_FB (1 << 20) /* 1M */
+
+ struct lpfc_nvme_qhandle {
+ uint32_t index; /* WQ index to use */
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index 5f5528a12308..149f21f53b13 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -129,6 +129,8 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
+ /* set consumption flag every once in a while */
+ if (!((q->host_index + 1) % q->entry_repost))
+ bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
++ else
++ bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
+ if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
+ bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
+ lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
+diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c
+index 7de5d8d75480..eb5471bc7263 100644
+--- a/drivers/scsi/mvsas/mv_94xx.c
++++ b/drivers/scsi/mvsas/mv_94xx.c
+@@ -1080,16 +1080,16 @@ static int mvs_94xx_gpio_write(struct mvs_prv_info *mvs_prv,
+ void __iomem *regs = mvi->regs_ex - 0x10200;
+
+ int drive = (i/3) & (4-1); /* drive number on host */
+- u32 block = mr32(MVS_SGPIO_DCTRL +
++ int driveshift = drive * 8; /* bit offset of drive */
++ u32 block = ioread32be(regs + MVS_SGPIO_DCTRL +
+ MVS_SGPIO_HOST_OFFSET * mvi->id);
+
+-
+ /*
+ * if bit is set then create a mask with the first
+ * bit of the drive set in the mask ...
+ */
+- u32 bit = (write_data[i/8] & (1 << (i&(8-1)))) ?
+- 1<<(24-drive*8) : 0;
++ u32 bit = get_unaligned_be32(write_data) & (1 << i) ?
++ 1 << driveshift : 0;
+
+ /*
+ * ... and then shift it to the right position based
+@@ -1098,26 +1098,27 @@ static int mvs_94xx_gpio_write(struct mvs_prv_info *mvs_prv,
+ switch (i%3) {
+ case 0: /* activity */
+ block &= ~((0x7 << MVS_SGPIO_DCTRL_ACT_SHIFT)
+- << (24-drive*8));
++ << driveshift);
+ /* hardwire activity bit to SOF */
+ block |= LED_BLINKA_SOF << (
+ MVS_SGPIO_DCTRL_ACT_SHIFT +
+- (24-drive*8));
++ driveshift);
+ break;
+ case 1: /* id */
+ block &= ~((0x3 << MVS_SGPIO_DCTRL_LOC_SHIFT)
+- << (24-drive*8));
++ << driveshift);
+ block |= bit << MVS_SGPIO_DCTRL_LOC_SHIFT;
+ break;
+ case 2: /* fail */
+ block &= ~((0x7 << MVS_SGPIO_DCTRL_ERR_SHIFT)
+- << (24-drive*8));
++ << driveshift);
+ block |= bit << MVS_SGPIO_DCTRL_ERR_SHIFT;
+ break;
+ }
+
+- mw32(MVS_SGPIO_DCTRL + MVS_SGPIO_HOST_OFFSET * mvi->id,
+- block);
++ iowrite32be(block,
++ regs + MVS_SGPIO_DCTRL +
++ MVS_SGPIO_HOST_OFFSET * mvi->id);
+
+ }
+
+@@ -1132,7 +1133,7 @@ static int mvs_94xx_gpio_write(struct mvs_prv_info *mvs_prv,
+ void __iomem *regs = mvi->regs_ex - 0x10200;
+
+ mw32(MVS_SGPIO_DCTRL + MVS_SGPIO_HOST_OFFSET * mvi->id,
+- be32_to_cpu(((u32 *) write_data)[i]));
++ ((u32 *) write_data)[i]);
+ }
+ return reg_count;
+ }
+diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
+index f3b117246d47..4a2d276c42eb 100644
+--- a/drivers/scsi/scsi_devinfo.c
++++ b/drivers/scsi/scsi_devinfo.c
+@@ -189,6 +189,7 @@ static struct {
+ {"HP", "C5713A", NULL, BLIST_NOREPORTLUN},
+ {"HP", "DF400", "*", BLIST_REPORTLUN2},
+ {"HP", "DF500", "*", BLIST_REPORTLUN2},
++ {"HP", "DISK-SUBSYSTEM", "*", BLIST_REPORTLUN2},
+ {"HP", "OP-C-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
+ {"HP", "3380-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
+ {"HP", "3390-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index 912eacdc2d83..e93e9178978c 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -856,6 +856,17 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
+ /* for passthrough error may be set */
+ error = BLK_STS_OK;
+ }
++ /*
++ * Another corner case: the SCSI status byte is non-zero but 'good'.
++ * Example: PRE-FETCH command returns SAM_STAT_CONDITION_MET when
++ * it is able to fit nominated LBs in its cache (and SAM_STAT_GOOD
++ * if it can't fit). Treat SAM_STAT_CONDITION_MET and the related
++ * intermediate statuses (both obsolete in SAM-4) as good.
++ */
++ if (status_byte(result) && scsi_status_is_good(result)) {
++ result = 0;
++ error = BLK_STS_OK;
++ }
+
+ /*
+ * special case: failed zero length commands always need to
+diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
+index c198b96368dd..5c40d809830f 100644
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -1894,7 +1894,7 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
+ num = (rem_sz > scatter_elem_sz_prev) ?
+ scatter_elem_sz_prev : rem_sz;
+
+- schp->pages[k] = alloc_pages(gfp_mask, order);
++ schp->pages[k] = alloc_pages(gfp_mask | __GFP_ZERO, order);
+ if (!schp->pages[k])
+ goto out;
+
+diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
+index 2817e67df3d5..98a51521d853 100644
+--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
+@@ -324,7 +324,7 @@ static int consume_frames(struct dpaa2_eth_channel *ch)
+ }
+
+ fd = dpaa2_dq_fd(dq);
+- fq = (struct dpaa2_eth_fq *)dpaa2_dq_fqd_ctx(dq);
++ fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
+ fq->stats.frames++;
+
+ fq->consume(priv, ch, fd, &ch->napi, fq->flowid);
+@@ -374,12 +374,14 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv,
+ /* Prepare the HW SGT structure */
+ sgt_buf_size = priv->tx_data_offset +
+ sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
+- sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN, GFP_ATOMIC);
++ sgt_buf = netdev_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN);
+ if (unlikely(!sgt_buf)) {
+ err = -ENOMEM;
+ goto sgt_buf_alloc_failed;
+ }
+ sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
++ memset(sgt_buf, 0, sgt_buf_size);
++
+ sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
+
+ /* Fill in the HW SGT structure.
+@@ -421,7 +423,7 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv,
+ return 0;
+
+ dma_map_single_failed:
+- kfree(sgt_buf);
++ skb_free_frag(sgt_buf);
+ sgt_buf_alloc_failed:
+ dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
+ dma_map_sg_failed:
+@@ -525,9 +527,9 @@ static void free_tx_fd(const struct dpaa2_eth_priv *priv,
+ return;
+ }
+
+- /* Free SGT buffer kmalloc'ed on tx */
++ /* Free SGT buffer allocated on tx */
+ if (fd_format != dpaa2_fd_single)
+- kfree(skbh);
++ skb_free_frag(skbh);
+
+ /* Move on with skb release */
+ dev_kfree_skb(skb);
+@@ -1906,7 +1908,7 @@ static int setup_rx_flow(struct dpaa2_eth_priv *priv,
+ queue.destination.id = fq->channel->dpcon_id;
+ queue.destination.type = DPNI_DEST_DPCON;
+ queue.destination.priority = 1;
+- queue.user_context = (u64)fq;
++ queue.user_context = (u64)(uintptr_t)fq;
+ err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_RX, 0, fq->flowid,
+ DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
+@@ -1958,7 +1960,7 @@ static int setup_tx_flow(struct dpaa2_eth_priv *priv,
+ queue.destination.id = fq->channel->dpcon_id;
+ queue.destination.type = DPNI_DEST_DPCON;
+ queue.destination.priority = 0;
+- queue.user_context = (u64)fq;
++ queue.user_context = (u64)(uintptr_t)fq;
+ err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
+ DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
+diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c
+index 975dbbb3abd0..7da3eb4ca4be 100644
+--- a/drivers/staging/ks7010/ks_hostif.c
++++ b/drivers/staging/ks7010/ks_hostif.c
+@@ -242,9 +242,8 @@ int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info,
+ offset = 0;
+
+ while (bsize > offset) {
+- /* DPRINTK(4, "Element ID=%d\n",*bp); */
+- switch (*bp) {
+- case 0: /* ssid */
++ switch (*bp) { /* Information Element ID */
++ case WLAN_EID_SSID:
+ if (*(bp + 1) <= SSID_MAX_SIZE) {
+ ap->ssid.size = *(bp + 1);
+ } else {
+@@ -254,8 +253,8 @@ int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info,
+ }
+ memcpy(ap->ssid.body, bp + 2, ap->ssid.size);
+ break;
+- case 1: /* rate */
+- case 50: /* ext rate */
++ case WLAN_EID_SUPP_RATES:
++ case WLAN_EID_EXT_SUPP_RATES:
+ if ((*(bp + 1) + ap->rate_set.size) <=
+ RATE_SET_MAX_SIZE) {
+ memcpy(&ap->rate_set.body[ap->rate_set.size],
+@@ -271,9 +270,9 @@ int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info,
+ (RATE_SET_MAX_SIZE - ap->rate_set.size);
+ }
+ break;
+- case 3: /* DS parameter */
++ case WLAN_EID_DS_PARAMS:
+ break;
+- case 48: /* RSN(WPA2) */
++ case WLAN_EID_RSN:
+ ap->rsn_ie.id = *bp;
+ if (*(bp + 1) <= RSN_IE_BODY_MAX) {
+ ap->rsn_ie.size = *(bp + 1);
+@@ -284,8 +283,8 @@ int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info,
+ }
+ memcpy(ap->rsn_ie.body, bp + 2, ap->rsn_ie.size);
+ break;
+- case 221: /* WPA */
+- if (memcmp(bp + 2, "\x00\x50\xf2\x01", 4) == 0) { /* WPA OUI check */
++ case WLAN_EID_VENDOR_SPECIFIC: /* WPA */
++ if (memcmp(bp + 2, "\x00\x50\xf2\x01", 4) == 0) { /* WPA OUI check */
+ ap->wpa_ie.id = *bp;
+ if (*(bp + 1) <= RSN_IE_BODY_MAX) {
+ ap->wpa_ie.size = *(bp + 1);
+@@ -300,18 +299,18 @@ int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info,
+ }
+ break;
+
+- case 2: /* FH parameter */
+- case 4: /* CF parameter */
+- case 5: /* TIM */
+- case 6: /* IBSS parameter */
+- case 7: /* Country */
+- case 42: /* ERP information */
+- case 47: /* Reserve ID 47 Broadcom AP */
++ case WLAN_EID_FH_PARAMS:
++ case WLAN_EID_CF_PARAMS:
++ case WLAN_EID_TIM:
++ case WLAN_EID_IBSS_PARAMS:
++ case WLAN_EID_COUNTRY:
++ case WLAN_EID_ERP_INFO:
+ break;
+ default:
+ DPRINTK(4, "unknown Element ID=%d\n", *bp);
+ break;
+ }
++
+ offset += 2; /* id & size field */
+ offset += *(bp + 1); /* +size offset */
+ bp += (*(bp + 1) + 2); /* pointer update */
+diff --git a/drivers/staging/ks7010/ks_hostif.h b/drivers/staging/ks7010/ks_hostif.h
+index 5bae8d468e23..9ac317e4b507 100644
+--- a/drivers/staging/ks7010/ks_hostif.h
++++ b/drivers/staging/ks7010/ks_hostif.h
+@@ -13,6 +13,7 @@
+ #define _KS_HOSTIF_H_
+
+ #include <linux/compiler.h>
++#include <linux/ieee80211.h>
+
+ /*
+ * HOST-MAC I/F events
+diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
+index 4368f4e9f208..f1233ca7d337 100644
+--- a/drivers/staging/lustre/lustre/include/obd.h
++++ b/drivers/staging/lustre/lustre/include/obd.h
+@@ -191,7 +191,7 @@ struct client_obd {
+ struct sptlrpc_flavor cl_flvr_mgc; /* fixed flavor of mgc->mgs */
+
+ /* the grant values are protected by loi_list_lock below */
+- unsigned long cl_dirty_pages; /* all _dirty_ in pahges */
++ unsigned long cl_dirty_pages; /* all _dirty_ in pages */
+ unsigned long cl_dirty_max_pages; /* allowed w/o rpc */
+ unsigned long cl_dirty_transit; /* dirty synchronous */
+ unsigned long cl_avail_grant; /* bytes of credit for ost */
+diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+index c2c57f65431e..ff9c2f96bada 100644
+--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
++++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+@@ -2695,7 +2695,7 @@ static int lmv_unpackmd(struct obd_export *exp, struct lmv_stripe_md **lsmp,
+ if (lsm && !lmm) {
+ int i;
+
+- for (i = 1; i < lsm->lsm_md_stripe_count; i++) {
++ for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
+ /*
+ * For migrating inode, the master stripe and master
+ * object will be the same, so do not need iput, see
+diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
+index 5767ac2a7d16..a907d956443f 100644
+--- a/drivers/staging/lustre/lustre/osc/osc_cache.c
++++ b/drivers/staging/lustre/lustre/osc/osc_cache.c
+@@ -1530,7 +1530,7 @@ static int osc_enter_cache_try(struct client_obd *cli,
+ if (rc < 0)
+ return 0;
+
+- if (cli->cl_dirty_pages <= cli->cl_dirty_max_pages &&
++ if (cli->cl_dirty_pages < cli->cl_dirty_max_pages &&
+ atomic_long_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages) {
+ osc_consume_write_grant(cli, &oap->oap_brw_page);
+ if (transient) {
+diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
+index 3c300f7b6a62..d607c59761cf 100644
+--- a/drivers/staging/rtl8192u/r8192U_core.c
++++ b/drivers/staging/rtl8192u/r8192U_core.c
+@@ -1706,6 +1706,8 @@ static short rtl8192_usb_initendpoints(struct net_device *dev)
+
+ priv->rx_urb[16] = usb_alloc_urb(0, GFP_KERNEL);
+ priv->oldaddr = kmalloc(16, GFP_KERNEL);
++ if (!priv->oldaddr)
++ return -ENOMEM;
+ oldaddr = priv->oldaddr;
+ align = ((long)oldaddr) & 3;
+ if (align) {
+diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c
+index 045d577fe4f8..0ed21dd08170 100644
+--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c
++++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c
+@@ -25,6 +25,10 @@ MODULE_PARM_DESC(enable_compat_alsa,
+ static void snd_devm_unregister_child(struct device *dev, void *res)
+ {
+ struct device *childdev = *(struct device **)res;
++ struct bcm2835_chip *chip = dev_get_drvdata(childdev);
++ struct snd_card *card = chip->card;
++
++ snd_card_free(card);
+
+ device_unregister(childdev);
+ }
+@@ -50,6 +54,13 @@ static int snd_devm_add_child(struct device *dev, struct device *child)
+ return 0;
+ }
+
++static void snd_bcm2835_release(struct device *dev)
++{
++ struct bcm2835_chip *chip = dev_get_drvdata(dev);
++
++ kfree(chip);
++}
++
+ static struct device *
+ snd_create_device(struct device *parent,
+ struct device_driver *driver,
+@@ -65,6 +76,7 @@ snd_create_device(struct device *parent,
+ device_initialize(device);
+ device->parent = parent;
+ device->driver = driver;
++ device->release = snd_bcm2835_release;
+
+ dev_set_name(device, "%s", name);
+
+@@ -75,18 +87,19 @@ snd_create_device(struct device *parent,
+ return device;
+ }
+
+-static int snd_bcm2835_free(struct bcm2835_chip *chip)
+-{
+- kfree(chip);
+- return 0;
+-}
+-
+ /* component-destructor
+ * (see "Management of Cards and Components")
+ */
+ static int snd_bcm2835_dev_free(struct snd_device *device)
+ {
+- return snd_bcm2835_free(device->device_data);
++ struct bcm2835_chip *chip = device->device_data;
++ struct snd_card *card = chip->card;
++
++ /* TODO: free pcm, ctl */
++
++ snd_device_free(card, chip);
++
++ return 0;
+ }
+
+ /* chip-specific constructor
+@@ -111,7 +124,7 @@ static int snd_bcm2835_create(struct snd_card *card,
+
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
+ if (err) {
+- snd_bcm2835_free(chip);
++ kfree(chip);
+ return err;
+ }
+
+@@ -119,31 +132,14 @@ static int snd_bcm2835_create(struct snd_card *card,
+ return 0;
+ }
+
+-static void snd_devm_card_free(struct device *dev, void *res)
++static struct snd_card *snd_bcm2835_card_new(struct device *dev)
+ {
+- struct snd_card *snd_card = *(struct snd_card **)res;
+-
+- snd_card_free(snd_card);
+-}
+-
+-static struct snd_card *snd_devm_card_new(struct device *dev)
+-{
+- struct snd_card **dr;
+ struct snd_card *card;
+ int ret;
+
+- dr = devres_alloc(snd_devm_card_free, sizeof(*dr), GFP_KERNEL);
+- if (!dr)
+- return ERR_PTR(-ENOMEM);
+-
+ ret = snd_card_new(dev, -1, NULL, THIS_MODULE, 0, &card);
+- if (ret) {
+- devres_free(dr);
++ if (ret)
+ return ERR_PTR(ret);
+- }
+-
+- *dr = card;
+- devres_add(dev, dr);
+
+ return card;
+ }
+@@ -260,7 +256,7 @@ static int snd_add_child_device(struct device *device,
+ return PTR_ERR(child);
+ }
+
+- card = snd_devm_card_new(child);
++ card = snd_bcm2835_card_new(child);
+ if (IS_ERR(card)) {
+ dev_err(child, "Failed to create card");
+ return PTR_ERR(card);
+@@ -302,7 +298,7 @@ static int snd_add_child_device(struct device *device,
+ return err;
+ }
+
+- dev_set_drvdata(child, card);
++ dev_set_drvdata(child, chip);
+ dev_info(child, "card created with %d channels\n", numchans);
+
+ return 0;
+diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
+index 804c1af6fd33..95833cbc4338 100644
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -1867,7 +1867,8 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
+
+ status = serial_port_in(port, UART_LSR);
+
+- if (status & (UART_LSR_DR | UART_LSR_BI)) {
++ if (status & (UART_LSR_DR | UART_LSR_BI) &&
++ iir & UART_IIR_RDI) {
+ if (!up->dma || handle_rx_dma(up, iir))
+ status = serial8250_rx_chars(up, status);
+ }
+diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
+index b88b05f8e81e..ae30398fcf56 100644
+--- a/drivers/tty/serial/altera_uart.c
++++ b/drivers/tty/serial/altera_uart.c
+@@ -327,7 +327,7 @@ static int altera_uart_startup(struct uart_port *port)
+
+ /* Enable RX interrupts now */
+ pp->imr = ALTERA_UART_CONTROL_RRDY_MSK;
+- writel(pp->imr, port->membase + ALTERA_UART_CONTROL_REG);
++ altera_uart_writel(port, pp->imr, ALTERA_UART_CONTROL_REG);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+@@ -343,7 +343,7 @@ static void altera_uart_shutdown(struct uart_port *port)
+
+ /* Disable all interrupts now */
+ pp->imr = 0;
+- writel(pp->imr, port->membase + ALTERA_UART_CONTROL_REG);
++ altera_uart_writel(port, pp->imr, ALTERA_UART_CONTROL_REG);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+@@ -432,7 +432,7 @@ static void altera_uart_console_putc(struct uart_port *port, int c)
+ ALTERA_UART_STATUS_TRDY_MSK))
+ cpu_relax();
+
+- writel(c, port->membase + ALTERA_UART_TXDATA_REG);
++ altera_uart_writel(port, c, ALTERA_UART_TXDATA_REG);
+ }
+
+ static void altera_uart_console_write(struct console *co, const char *s,
+@@ -502,13 +502,13 @@ static int __init altera_uart_earlycon_setup(struct earlycon_device *dev,
+ return -ENODEV;
+
+ /* Enable RX interrupts now */
+- writel(ALTERA_UART_CONTROL_RRDY_MSK,
+- port->membase + ALTERA_UART_CONTROL_REG);
++ altera_uart_writel(port, ALTERA_UART_CONTROL_RRDY_MSK,
++ ALTERA_UART_CONTROL_REG);
+
+ if (dev->baud) {
+ unsigned int baudclk = port->uartclk / dev->baud;
+
+- writel(baudclk, port->membase + ALTERA_UART_DIVISOR_REG);
++ altera_uart_writel(port, baudclk, ALTERA_UART_DIVISOR_REG);
+ }
+
+ dev->con->write = altera_uart_earlycon_write;
+diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c
+index 2599f9ecccfe..d904a3a345e7 100644
+--- a/drivers/tty/serial/arc_uart.c
++++ b/drivers/tty/serial/arc_uart.c
+@@ -593,6 +593,11 @@ static int arc_serial_probe(struct platform_device *pdev)
+ if (dev_id < 0)
+ dev_id = 0;
+
++ if (dev_id >= ARRAY_SIZE(arc_uart_ports)) {
++ dev_err(&pdev->dev, "serial%d out of range\n", dev_id);
++ return -EINVAL;
++ }
++
+ uart = &arc_uart_ports[dev_id];
+ port = &uart->port;
+
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index 8cf112f2efc3..51e47a63d61a 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -2145,6 +2145,10 @@ static int lpuart_probe(struct platform_device *pdev)
+ dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret);
+ return ret;
+ }
++ if (ret >= ARRAY_SIZE(lpuart_ports)) {
++ dev_err(&pdev->dev, "serial%d out of range\n", ret);
++ return -EINVAL;
++ }
+ sport->port.line = ret;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sport->port.membase = devm_ioremap_resource(&pdev->dev, res);
+diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
+index a33c685af990..961ab7d2add5 100644
+--- a/drivers/tty/serial/imx.c
++++ b/drivers/tty/serial/imx.c
+@@ -2042,6 +2042,12 @@ static int serial_imx_probe(struct platform_device *pdev)
+ else if (ret < 0)
+ return ret;
+
++ if (sport->port.line >= ARRAY_SIZE(imx_ports)) {
++ dev_err(&pdev->dev, "serial%d out of range\n",
++ sport->port.line);
++ return -EINVAL;
++ }
++
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
+index 03d26aabb0c4..2581461f92bf 100644
+--- a/drivers/tty/serial/mvebu-uart.c
++++ b/drivers/tty/serial/mvebu-uart.c
+@@ -617,7 +617,7 @@ static void wait_for_xmitr(struct uart_port *port)
+ u32 val;
+
+ readl_poll_timeout_atomic(port->membase + UART_STAT, val,
+- (val & STAT_TX_EMP), 1, 10000);
++ (val & STAT_TX_RDY(port)), 1, 10000);
+ }
+
+ static void mvebu_uart_console_putchar(struct uart_port *port, int ch)
+diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
+index 079dc47aa142..caa8a41b6e71 100644
+--- a/drivers/tty/serial/mxs-auart.c
++++ b/drivers/tty/serial/mxs-auart.c
+@@ -1663,6 +1663,10 @@ static int mxs_auart_probe(struct platform_device *pdev)
+ s->port.line = pdev->id < 0 ? 0 : pdev->id;
+ else if (ret < 0)
+ return ret;
++ if (s->port.line >= ARRAY_SIZE(auart_port)) {
++ dev_err(&pdev->dev, "serial%d out of range\n", s->port.line);
++ return -EINVAL;
++ }
+
+ if (of_id) {
+ pdev->id_entry = of_id->data;
+diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
+index f9fecc5ed0ce..3f2f8c118ce0 100644
+--- a/drivers/tty/serial/samsung.c
++++ b/drivers/tty/serial/samsung.c
+@@ -1818,6 +1818,10 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
+
+ dbg("s3c24xx_serial_probe(%p) %d\n", pdev, index);
+
++ if (index >= ARRAY_SIZE(s3c24xx_serial_ports)) {
++ dev_err(&pdev->dev, "serial%d out of range\n", index);
++ return -EINVAL;
++ }
+ ourport = &s3c24xx_serial_ports[index];
+
+ ourport->drv_data = s3c24xx_get_driver_data(pdev);
+diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
+index 44adf9db38f8..ab757546c6db 100644
+--- a/drivers/tty/serial/sh-sci.c
++++ b/drivers/tty/serial/sh-sci.c
+@@ -3098,6 +3098,10 @@ static struct plat_sci_port *sci_parse_dt(struct platform_device *pdev,
+ dev_err(&pdev->dev, "failed to get alias id (%d)\n", id);
+ return NULL;
+ }
++ if (id >= ARRAY_SIZE(sci_ports)) {
++ dev_err(&pdev->dev, "serial%d out of range\n", id);
++ return NULL;
++ }
+
+ sp = &sci_ports[id];
+ *dev_id = id;
+diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
+index b9b2bc76bcac..abcb4d09a2d8 100644
+--- a/drivers/tty/serial/xilinx_uartps.c
++++ b/drivers/tty/serial/xilinx_uartps.c
+@@ -1110,7 +1110,7 @@ static struct uart_port *cdns_uart_get_port(int id)
+ struct uart_port *port;
+
+ /* Try the given port id if failed use default method */
+- if (cdns_uart_port[id].mapbase != 0) {
++ if (id < CDNS_UART_NR_PORTS && cdns_uart_port[id].mapbase != 0) {
+ /* Find the next unused port */
+ for (id = 0; id < CDNS_UART_NR_PORTS; id++)
+ if (cdns_uart_port[id].mapbase == 0)
+diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
+index cd77af3b1565..d939b24ae92a 100644
+--- a/drivers/usb/dwc2/core.h
++++ b/drivers/usb/dwc2/core.h
+@@ -217,7 +217,7 @@ struct dwc2_hsotg_ep {
+ unsigned char dir_in;
+ unsigned char index;
+ unsigned char mc;
+- unsigned char interval;
++ u16 interval;
+
+ unsigned int halted:1;
+ unsigned int periodic:1;
+diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
+index a5d72fcd1603..7ee7320d3c24 100644
+--- a/drivers/usb/dwc2/hcd.c
++++ b/drivers/usb/dwc2/hcd.c
+@@ -989,6 +989,24 @@ void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
+
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "%s()\n", __func__);
++
++ /*
++ * In buffer DMA or external DMA mode channel can't be halted
++ * for non-split periodic channels. At the end of the next
++ * uframe/frame (in the worst case), the core generates a channel
++ * halted and disables the channel automatically.
++ */
++ if ((hsotg->params.g_dma && !hsotg->params.g_dma_desc) ||
++ hsotg->hw_params.arch == GHWCFG2_EXT_DMA_ARCH) {
++ if (!chan->do_split &&
++ (chan->ep_type == USB_ENDPOINT_XFER_ISOC ||
++ chan->ep_type == USB_ENDPOINT_XFER_INT)) {
++ dev_err(hsotg->dev, "%s() Channel can't be halted\n",
++ __func__);
++ return;
++ }
++ }
++
+ if (halt_status == DWC2_HC_XFER_NO_HALT_STATUS)
+ dev_err(hsotg->dev, "!!! halt_status = %d !!!\n", halt_status);
+
+@@ -2322,10 +2340,22 @@ static int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup)
+ */
+ static void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
+ {
+- u32 hcfg, hfir, otgctl;
++ u32 hcfg, hfir, otgctl, usbcfg;
+
+ dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
+
++ /* Set HS/FS Timeout Calibration to 7 (max available value).
++ * The number of PHY clocks that the application programs in
++ * this field is added to the high/full speed interpacket timeout
++ * duration in the core to account for any additional delays
++ * introduced by the PHY. This can be required, because the delay
++ * introduced by the PHY in generating the linestate condition
++ * can vary from one PHY to another.
++ */
++ usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
++ usbcfg |= GUSBCFG_TOUTCAL(7);
++ dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
++
+ /* Restart the Phy Clock */
+ dwc2_writel(0, hsotg->regs + PCGCTL);
+
+diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile
+index 7ac725038f8d..025bc68094fc 100644
+--- a/drivers/usb/dwc3/Makefile
++++ b/drivers/usb/dwc3/Makefile
+@@ -6,7 +6,7 @@ obj-$(CONFIG_USB_DWC3) += dwc3.o
+
+ dwc3-y := core.o
+
+-ifneq ($(CONFIG_FTRACE),)
++ifneq ($(CONFIG_TRACING),)
+ dwc3-y += trace.o
+ endif
+
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index df4569df7eaf..ddef1ae0c708 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -232,7 +232,7 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
+ do {
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ if (!(reg & DWC3_DCTL_CSFTRST))
+- return 0;
++ goto done;
+
+ udelay(1);
+ } while (--retries);
+@@ -241,6 +241,17 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
+ phy_exit(dwc->usb2_generic_phy);
+
+ return -ETIMEDOUT;
++
++done:
++ /*
++ * For DWC_usb31 controller, once DWC3_DCTL_CSFTRST bit is cleared,
++ * we must wait at least 50ms before accessing the PHY domain
++ * (synchronization delay). DWC_usb31 programming guide section 1.3.2.
++ */
++ if (dwc3_is_usb31(dwc))
++ msleep(50);
++
++ return 0;
+ }
+
+ /*
+diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
+index 860d2bc184d1..cdd609930443 100644
+--- a/drivers/usb/dwc3/core.h
++++ b/drivers/usb/dwc3/core.h
+@@ -241,6 +241,8 @@
+ #define DWC3_GUSB3PIPECTL_TX_DEEPH(n) ((n) << 1)
+
+ /* Global TX Fifo Size Register */
++#define DWC31_GTXFIFOSIZ_TXFRAMNUM BIT(15) /* DWC_usb31 only */
++#define DWC31_GTXFIFOSIZ_TXFDEF(n) ((n) & 0x7fff) /* DWC_usb31 only */
+ #define DWC3_GTXFIFOSIZ_TXFDEF(n) ((n) & 0xffff)
+ #define DWC3_GTXFIFOSIZ_TXFSTADDR(n) ((n) & 0xffff0000)
+
+diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
+index 77c7ecca816a..b8b629c615d3 100644
+--- a/drivers/usb/gadget/composite.c
++++ b/drivers/usb/gadget/composite.c
+@@ -1422,7 +1422,7 @@ static int count_ext_compat(struct usb_configuration *c)
+ return res;
+ }
+
+-static void fill_ext_compat(struct usb_configuration *c, u8 *buf)
++static int fill_ext_compat(struct usb_configuration *c, u8 *buf)
+ {
+ int i, count;
+
+@@ -1449,10 +1449,12 @@ static void fill_ext_compat(struct usb_configuration *c, u8 *buf)
+ buf += 23;
+ }
+ count += 24;
+- if (count >= 4096)
+- return;
++ if (count + 24 >= USB_COMP_EP0_OS_DESC_BUFSIZ)
++ return count;
+ }
+ }
++
++ return count;
+ }
+
+ static int count_ext_prop(struct usb_configuration *c, int interface)
+@@ -1497,25 +1499,20 @@ static int fill_ext_prop(struct usb_configuration *c, int interface, u8 *buf)
+ struct usb_os_desc *d;
+ struct usb_os_desc_ext_prop *ext_prop;
+ int j, count, n, ret;
+- u8 *start = buf;
+
+ f = c->interface[interface];
++ count = 10; /* header length */
+ for (j = 0; j < f->os_desc_n; ++j) {
+ if (interface != f->os_desc_table[j].if_id)
+ continue;
+ d = f->os_desc_table[j].os_desc;
+ if (d)
+ list_for_each_entry(ext_prop, &d->ext_prop, entry) {
+- /* 4kB minus header length */
+- n = buf - start;
+- if (n >= 4086)
+- return 0;
+-
+- count = ext_prop->data_len +
++ n = ext_prop->data_len +
+ ext_prop->name_len + 14;
+- if (count > 4086 - n)
+- return -EINVAL;
+- usb_ext_prop_put_size(buf, count);
++ if (count + n >= USB_COMP_EP0_OS_DESC_BUFSIZ)
++ return count;
++ usb_ext_prop_put_size(buf, n);
+ usb_ext_prop_put_type(buf, ext_prop->type);
+ ret = usb_ext_prop_put_name(buf, ext_prop->name,
+ ext_prop->name_len);
+@@ -1541,11 +1538,12 @@ static int fill_ext_prop(struct usb_configuration *c, int interface, u8 *buf)
+ default:
+ return -EINVAL;
+ }
+- buf += count;
++ buf += n;
++ count += n;
+ }
+ }
+
+- return 0;
++ return count;
+ }
+
+ /*
+@@ -1827,6 +1825,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
+ req->complete = composite_setup_complete;
+ buf = req->buf;
+ os_desc_cfg = cdev->os_desc_config;
++ w_length = min_t(u16, w_length, USB_COMP_EP0_OS_DESC_BUFSIZ);
+ memset(buf, 0, w_length);
+ buf[5] = 0x01;
+ switch (ctrl->bRequestType & USB_RECIP_MASK) {
+@@ -1850,8 +1849,8 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
+ count += 16; /* header */
+ put_unaligned_le32(count, buf);
+ buf += 16;
+- fill_ext_compat(os_desc_cfg, buf);
+- value = w_length;
++ value = fill_ext_compat(os_desc_cfg, buf);
++ value = min_t(u16, w_length, value);
+ }
+ break;
+ case USB_RECIP_INTERFACE:
+@@ -1880,8 +1879,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
+ interface, buf);
+ if (value < 0)
+ return value;
+-
+- value = w_length;
++ value = min_t(u16, w_length, value);
+ }
+ break;
+ }
+@@ -2156,8 +2154,8 @@ int composite_os_desc_req_prepare(struct usb_composite_dev *cdev,
+ goto end;
+ }
+
+- /* OS feature descriptor length <= 4kB */
+- cdev->os_desc_req->buf = kmalloc(4096, GFP_KERNEL);
++ cdev->os_desc_req->buf = kmalloc(USB_COMP_EP0_OS_DESC_BUFSIZ,
++ GFP_KERNEL);
+ if (!cdev->os_desc_req->buf) {
+ ret = -ENOMEM;
+ usb_ep_free_request(ep0, cdev->os_desc_req);
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index d2428a9e8900..0294e4f18873 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -758,9 +758,13 @@ static void ffs_user_copy_worker(struct work_struct *work)
+ bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
+
+ if (io_data->read && ret > 0) {
++ mm_segment_t oldfs = get_fs();
++
++ set_fs(USER_DS);
+ use_mm(io_data->mm);
+ ret = ffs_copy_to_iter(io_data->buf, ret, &io_data->data);
+ unuse_mm(io_data->mm);
++ set_fs(oldfs);
+ }
+
+ io_data->kiocb->ki_complete(io_data->kiocb, ret, ret);
+@@ -3238,7 +3242,7 @@ static int ffs_func_setup(struct usb_function *f,
+ __ffs_event_add(ffs, FUNCTIONFS_SETUP);
+ spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
+
+- return 0;
++ return USB_GADGET_DELAYED_STATUS;
+ }
+
+ static bool ffs_func_req_match(struct usb_function *f,
+diff --git a/drivers/usb/gadget/udc/goku_udc.h b/drivers/usb/gadget/udc/goku_udc.h
+index 26601bf4e7a9..70023d401079 100644
+--- a/drivers/usb/gadget/udc/goku_udc.h
++++ b/drivers/usb/gadget/udc/goku_udc.h
+@@ -25,7 +25,7 @@ struct goku_udc_regs {
+ # define INT_EP1DATASET 0x00040
+ # define INT_EP2DATASET 0x00080
+ # define INT_EP3DATASET 0x00100
+-#define INT_EPnNAK(n) (0x00100 < (n)) /* 0 < n < 4 */
++#define INT_EPnNAK(n) (0x00100 << (n)) /* 0 < n < 4 */
+ # define INT_EP1NAK 0x00200
+ # define INT_EP2NAK 0x00400
+ # define INT_EP3NAK 0x00800
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 332420d10be9..e5ace8995b3b 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -913,6 +913,8 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
+ if (dev->out_ctx)
+ xhci_free_container_ctx(xhci, dev->out_ctx);
+
++ if (dev->udev && dev->udev->slot_id)
++ dev->udev->slot_id = 0;
+ kfree(xhci->devs[slot_id]);
+ xhci->devs[slot_id] = NULL;
+ }
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index b60a02c50b89..bd281a96485c 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -4769,6 +4769,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
+ * quirks
+ */
+ struct device *dev = hcd->self.sysdev;
++ unsigned int minor_rev;
+ int retval;
+
+ /* Accept arbitrarily long scatter-gather lists */
+@@ -4796,12 +4797,19 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
+ */
+ hcd->has_tt = 1;
+ } else {
+- /* Some 3.1 hosts return sbrn 0x30, can't rely on sbrn alone */
+- if (xhci->sbrn == 0x31 || xhci->usb3_rhub.min_rev >= 1) {
+- xhci_info(xhci, "Host supports USB 3.1 Enhanced SuperSpeed\n");
++ /*
++ * Some 3.1 hosts return sbrn 0x30, use xhci supported protocol
++ * minor revision instead of sbrn
++ */
++ minor_rev = xhci->usb3_rhub.min_rev;
++ if (minor_rev) {
+ hcd->speed = HCD_USB31;
+ hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
+ }
++ xhci_info(xhci, "Host supports USB 3.%x %s SuperSpeed\n",
++ minor_rev,
++ minor_rev ? "Enhanced" : "");
++
+ /* xHCI private pointer was set in xhci_pci_probe for the second
+ * registered roothub.
+ */
+diff --git a/drivers/usb/usbip/Kconfig b/drivers/usb/usbip/Kconfig
+index eeefa29f8aa2..a20b65cb6678 100644
+--- a/drivers/usb/usbip/Kconfig
++++ b/drivers/usb/usbip/Kconfig
+@@ -27,7 +27,7 @@ config USBIP_VHCI_HCD
+
+ config USBIP_VHCI_HC_PORTS
+ int "Number of ports per USB/IP virtual host controller"
+- range 1 31
++ range 1 15
+ default 8
+ depends on USBIP_VHCI_HCD
+ ---help---
+diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
+index 9b2ac55ac34f..8cf2aa973b50 100644
+--- a/fs/ext2/inode.c
++++ b/fs/ext2/inode.c
+@@ -1261,21 +1261,11 @@ static void __ext2_truncate_blocks(struct inode *inode, loff_t offset)
+
+ static void ext2_truncate_blocks(struct inode *inode, loff_t offset)
+ {
+- /*
+- * XXX: it seems like a bug here that we don't allow
+- * IS_APPEND inode to have blocks-past-i_size trimmed off.
+- * review and fix this.
+- *
+- * Also would be nice to be able to handle IO errors and such,
+- * but that's probably too much to ask.
+- */
+ if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+ S_ISLNK(inode->i_mode)))
+ return;
+ if (ext2_inode_is_fast_symlink(inode))
+ return;
+- if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
+- return;
+
+ dax_sem_down_write(EXT2_I(inode));
+ __ext2_truncate_blocks(inode, offset);
+diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
+index 513c357c734b..a6c0f54c48c3 100644
+--- a/fs/hfsplus/super.c
++++ b/fs/hfsplus/super.c
+@@ -588,6 +588,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
+ return 0;
+
+ out_put_hidden_dir:
++ cancel_delayed_work_sync(&sbi->sync_work);
+ iput(sbi->hidden_dir);
+ out_put_root:
+ dput(sb->s_root);
+diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
+index 1352b1b990a7..3ebb2f6ace79 100644
+--- a/include/linux/mlx5/driver.h
++++ b/include/linux/mlx5/driver.h
+@@ -1271,17 +1271,7 @@ enum {
+ static inline const struct cpumask *
+ mlx5_get_vector_affinity_hint(struct mlx5_core_dev *dev, int vector)
+ {
+- struct irq_desc *desc;
+- unsigned int irq;
+- int eqn;
+- int err;
+-
+- err = mlx5_vector2eqn(dev, vector, &eqn, &irq);
+- if (err)
+- return NULL;
+-
+- desc = irq_to_desc(irq);
+- return desc->affinity_hint;
++ return dev->priv.irq_info[vector].mask;
+ }
+
+ #endif /* MLX5_DRIVER_H */
+diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
+index cef0e44601f8..4b6b9283fa7b 100644
+--- a/include/linux/usb/composite.h
++++ b/include/linux/usb/composite.h
+@@ -54,6 +54,9 @@
+ /* big enough to hold our biggest descriptor */
+ #define USB_COMP_EP0_BUFSIZ 1024
+
++/* OS feature descriptor length <= 4kB */
++#define USB_COMP_EP0_OS_DESC_BUFSIZ 4096
++
+ #define USB_MS_TO_HS_INTERVAL(x) (ilog2((x * 1000 / 125)) + 1)
+ struct usb_configuration;
+
+diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
+index cb85eddb47ea..eb7853c1a23b 100644
+--- a/include/scsi/scsi.h
++++ b/include/scsi/scsi.h
+@@ -47,6 +47,8 @@ static inline int scsi_status_is_good(int status)
+ */
+ status &= 0xfe;
+ return ((status == SAM_STAT_GOOD) ||
++ (status == SAM_STAT_CONDITION_MET) ||
++ /* Next two "intermediate" statuses are obsolete in SAM-4 */
+ (status == SAM_STAT_INTERMEDIATE) ||
+ (status == SAM_STAT_INTERMEDIATE_CONDITION_MET) ||
+ /* FIXME: this is obsolete in SAM-3 */
+diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
+index c587a61c32bf..2e08c6f3ac3e 100644
+--- a/include/uapi/linux/nl80211.h
++++ b/include/uapi/linux/nl80211.h
+@@ -2618,6 +2618,8 @@ enum nl80211_attrs {
+ #define NL80211_ATTR_KEYS NL80211_ATTR_KEYS
+ #define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS
+
++#define NL80211_WIPHY_NAME_MAXLEN 128
++
+ #define NL80211_MAX_SUPP_RATES 32
+ #define NL80211_MAX_SUPP_HT_RATES 77
+ #define NL80211_MAX_SUPP_REG_RULES 64
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 3e550507e9f0..ace13bea3e50 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2097,7 +2097,7 @@ static bool remove_xps_queue_cpu(struct net_device *dev,
+ int i, j;
+
+ for (i = count, j = offset; i--; j++) {
+- if (!remove_xps_queue(dev_maps, cpu, j))
++ if (!remove_xps_queue(dev_maps, tci, j))
+ break;
+ }
+
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 85b0b64e7f9d..81c2df84f953 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1603,7 +1603,7 @@ static void __sk_free(struct sock *sk)
+ if (likely(sk->sk_net_refcnt))
+ sock_inuse_add(sock_net(sk), -1);
+
+- if (unlikely(sock_diag_has_destroy_listeners(sk) && sk->sk_net_refcnt))
++ if (unlikely(sk->sk_net_refcnt && sock_diag_has_destroy_listeners(sk)))
+ sock_diag_broadcast_destroy(sk);
+ else
+ sk_destruct(sk);
+diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
+index adf50fbc4c13..47725250b4ca 100644
+--- a/net/dsa/dsa2.c
++++ b/net/dsa/dsa2.c
+@@ -258,11 +258,13 @@ static void dsa_tree_teardown_default_cpu(struct dsa_switch_tree *dst)
+ static int dsa_port_setup(struct dsa_port *dp)
+ {
+ struct dsa_switch *ds = dp->ds;
+- int err;
++ int err = 0;
+
+ memset(&dp->devlink_port, 0, sizeof(dp->devlink_port));
+
+- err = devlink_port_register(ds->devlink, &dp->devlink_port, dp->index);
++ if (dp->type != DSA_PORT_TYPE_UNUSED)
++ err = devlink_port_register(ds->devlink, &dp->devlink_port,
++ dp->index);
+ if (err)
+ return err;
+
+@@ -293,7 +295,8 @@ static int dsa_port_setup(struct dsa_port *dp)
+
+ static void dsa_port_teardown(struct dsa_port *dp)
+ {
+- devlink_port_unregister(&dp->devlink_port);
++ if (dp->type != DSA_PORT_TYPE_UNUSED)
++ devlink_port_unregister(&dp->devlink_port);
+
+ switch (dp->type) {
+ case DSA_PORT_TYPE_UNUSED:
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index 66340ab750e6..e7daec7c7421 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -1040,7 +1040,8 @@ static int __ip_append_data(struct sock *sk,
+ if (copy > length)
+ copy = length;
+
+- if (!(rt->dst.dev->features&NETIF_F_SG)) {
++ if (!(rt->dst.dev->features&NETIF_F_SG) &&
++ skb_tailroom(skb) >= copy) {
+ unsigned int off;
+
+ off = skb->len;
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 6818042cd8a9..3a0211692c28 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -2860,8 +2860,10 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
+ return -EBUSY;
+
+ if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
+- if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
+- BUG();
++ if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) {
++ WARN_ON_ONCE(1);
++ return -EINVAL;
++ }
+ if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
+ return -ENOMEM;
+ }
+@@ -3369,6 +3371,7 @@ static void tcp_connect_init(struct sock *sk)
+ sock_reset_flag(sk, SOCK_DONE);
+ tp->snd_wnd = 0;
+ tcp_init_wl(tp, 0);
++ tcp_write_queue_purge(sk);
+ tp->snd_una = tp->write_seq;
+ tp->snd_sml = tp->write_seq;
+ tp->snd_up = tp->write_seq;
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index 197fcae855ca..9539bdb15edb 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -71,6 +71,7 @@ struct ip6gre_net {
+ struct ip6_tnl __rcu *tunnels[4][IP6_GRE_HASH_SIZE];
+
+ struct ip6_tnl __rcu *collect_md_tun;
++ struct ip6_tnl __rcu *collect_md_tun_erspan;
+ struct net_device *fb_tunnel_dev;
+ };
+
+@@ -81,6 +82,7 @@ static int ip6gre_tunnel_init(struct net_device *dev);
+ static void ip6gre_tunnel_setup(struct net_device *dev);
+ static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
+ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu);
++static void ip6erspan_tnl_link_config(struct ip6_tnl *t, int set_mtu);
+
+ /* Tunnel hash table */
+
+@@ -232,7 +234,12 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
+ if (cand)
+ return cand;
+
+- t = rcu_dereference(ign->collect_md_tun);
++ if (gre_proto == htons(ETH_P_ERSPAN) ||
++ gre_proto == htons(ETH_P_ERSPAN2))
++ t = rcu_dereference(ign->collect_md_tun_erspan);
++ else
++ t = rcu_dereference(ign->collect_md_tun);
++
+ if (t && t->dev->flags & IFF_UP)
+ return t;
+
+@@ -261,6 +268,31 @@ static struct ip6_tnl __rcu **__ip6gre_bucket(struct ip6gre_net *ign,
+ return &ign->tunnels[prio][h];
+ }
+
++static void ip6gre_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t)
++{
++ if (t->parms.collect_md)
++ rcu_assign_pointer(ign->collect_md_tun, t);
++}
++
++static void ip6erspan_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t)
++{
++ if (t->parms.collect_md)
++ rcu_assign_pointer(ign->collect_md_tun_erspan, t);
++}
++
++static void ip6gre_tunnel_unlink_md(struct ip6gre_net *ign, struct ip6_tnl *t)
++{
++ if (t->parms.collect_md)
++ rcu_assign_pointer(ign->collect_md_tun, NULL);
++}
++
++static void ip6erspan_tunnel_unlink_md(struct ip6gre_net *ign,
++ struct ip6_tnl *t)
++{
++ if (t->parms.collect_md)
++ rcu_assign_pointer(ign->collect_md_tun_erspan, NULL);
++}
++
+ static inline struct ip6_tnl __rcu **ip6gre_bucket(struct ip6gre_net *ign,
+ const struct ip6_tnl *t)
+ {
+@@ -271,9 +303,6 @@ static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t)
+ {
+ struct ip6_tnl __rcu **tp = ip6gre_bucket(ign, t);
+
+- if (t->parms.collect_md)
+- rcu_assign_pointer(ign->collect_md_tun, t);
+-
+ rcu_assign_pointer(t->next, rtnl_dereference(*tp));
+ rcu_assign_pointer(*tp, t);
+ }
+@@ -283,9 +312,6 @@ static void ip6gre_tunnel_unlink(struct ip6gre_net *ign, struct ip6_tnl *t)
+ struct ip6_tnl __rcu **tp;
+ struct ip6_tnl *iter;
+
+- if (t->parms.collect_md)
+- rcu_assign_pointer(ign->collect_md_tun, NULL);
+-
+ for (tp = ip6gre_bucket(ign, t);
+ (iter = rtnl_dereference(*tp)) != NULL;
+ tp = &iter->next) {
+@@ -374,11 +400,23 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
+ return NULL;
+ }
+
++static void ip6erspan_tunnel_uninit(struct net_device *dev)
++{
++ struct ip6_tnl *t = netdev_priv(dev);
++ struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
++
++ ip6erspan_tunnel_unlink_md(ign, t);
++ ip6gre_tunnel_unlink(ign, t);
++ dst_cache_reset(&t->dst_cache);
++ dev_put(dev);
++}
++
+ static void ip6gre_tunnel_uninit(struct net_device *dev)
+ {
+ struct ip6_tnl *t = netdev_priv(dev);
+ struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
+
++ ip6gre_tunnel_unlink_md(ign, t);
+ ip6gre_tunnel_unlink(ign, t);
+ dst_cache_reset(&t->dst_cache);
+ dev_put(dev);
+@@ -701,6 +739,9 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
+ if (tunnel->parms.o_flags & TUNNEL_SEQ)
+ tunnel->o_seqno++;
+
++ if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen))
++ return -ENOMEM;
++
+ /* Push GRE header. */
+ protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto;
+
+@@ -905,7 +946,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
+ truncate = true;
+ }
+
+- if (skb_cow_head(skb, dev->needed_headroom))
++ if (skb_cow_head(skb, dev->needed_headroom ?: t->hlen))
+ goto tx_err;
+
+ t->parms.o_flags &= ~TUNNEL_KEY;
+@@ -1016,12 +1057,11 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
+ return NETDEV_TX_OK;
+ }
+
+-static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
++static void ip6gre_tnl_link_config_common(struct ip6_tnl *t)
+ {
+ struct net_device *dev = t->dev;
+ struct __ip6_tnl_parm *p = &t->parms;
+ struct flowi6 *fl6 = &t->fl.u.ip6;
+- int t_hlen;
+
+ if (dev->type != ARPHRD_ETHER) {
+ memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
+@@ -1048,12 +1088,13 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
+ dev->flags |= IFF_POINTOPOINT;
+ else
+ dev->flags &= ~IFF_POINTOPOINT;
++}
+
+- t->tun_hlen = gre_calc_hlen(t->parms.o_flags);
+-
+- t->hlen = t->encap_hlen + t->tun_hlen;
+-
+- t_hlen = t->hlen + sizeof(struct ipv6hdr);
++static void ip6gre_tnl_link_config_route(struct ip6_tnl *t, int set_mtu,
++ int t_hlen)
++{
++ const struct __ip6_tnl_parm *p = &t->parms;
++ struct net_device *dev = t->dev;
+
+ if (p->flags & IP6_TNL_F_CAP_XMIT) {
+ int strict = (ipv6_addr_type(&p->raddr) &
+@@ -1085,8 +1126,26 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
+ }
+ }
+
+-static int ip6gre_tnl_change(struct ip6_tnl *t,
+- const struct __ip6_tnl_parm *p, int set_mtu)
++static int ip6gre_calc_hlen(struct ip6_tnl *tunnel)
++{
++ int t_hlen;
++
++ tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
++ tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
++
++ t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
++ tunnel->dev->hard_header_len = LL_MAX_HEADER + t_hlen;
++ return t_hlen;
++}
++
++static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
++{
++ ip6gre_tnl_link_config_common(t);
++ ip6gre_tnl_link_config_route(t, set_mtu, ip6gre_calc_hlen(t));
++}
++
++static void ip6gre_tnl_copy_tnl_parm(struct ip6_tnl *t,
++ const struct __ip6_tnl_parm *p)
+ {
+ t->parms.laddr = p->laddr;
+ t->parms.raddr = p->raddr;
+@@ -1102,6 +1161,12 @@ static int ip6gre_tnl_change(struct ip6_tnl *t,
+ t->parms.o_flags = p->o_flags;
+ t->parms.fwmark = p->fwmark;
+ dst_cache_reset(&t->dst_cache);
++}
++
++static int ip6gre_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p,
++ int set_mtu)
++{
++ ip6gre_tnl_copy_tnl_parm(t, p);
+ ip6gre_tnl_link_config(t, set_mtu);
+ return 0;
+ }
+@@ -1378,11 +1443,7 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
+ return ret;
+ }
+
+- tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
+- tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
+- t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
+-
+- dev->hard_header_len = LL_MAX_HEADER + t_hlen;
++ t_hlen = ip6gre_calc_hlen(tunnel);
+ dev->mtu = ETH_DATA_LEN - t_hlen;
+ if (dev->type == ARPHRD_ETHER)
+ dev->mtu -= ETH_HLEN;
+@@ -1723,6 +1784,19 @@ static const struct net_device_ops ip6gre_tap_netdev_ops = {
+ .ndo_get_iflink = ip6_tnl_get_iflink,
+ };
+
++static int ip6erspan_calc_hlen(struct ip6_tnl *tunnel)
++{
++ int t_hlen;
++
++ tunnel->tun_hlen = 8;
++ tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
++ erspan_hdr_len(tunnel->parms.erspan_ver);
++
++ t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
++ tunnel->dev->hard_header_len = LL_MAX_HEADER + t_hlen;
++ return t_hlen;
++}
++
+ static int ip6erspan_tap_init(struct net_device *dev)
+ {
+ struct ip6_tnl *tunnel;
+@@ -1746,12 +1820,7 @@ static int ip6erspan_tap_init(struct net_device *dev)
+ return ret;
+ }
+
+- tunnel->tun_hlen = 8;
+- tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
+- erspan_hdr_len(tunnel->parms.erspan_ver);
+- t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
+-
+- dev->hard_header_len = LL_MAX_HEADER + t_hlen;
++ t_hlen = ip6erspan_calc_hlen(tunnel);
+ dev->mtu = ETH_DATA_LEN - t_hlen;
+ if (dev->type == ARPHRD_ETHER)
+ dev->mtu -= ETH_HLEN;
+@@ -1760,14 +1829,14 @@ static int ip6erspan_tap_init(struct net_device *dev)
+
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ tunnel = netdev_priv(dev);
+- ip6gre_tnl_link_config(tunnel, 1);
++ ip6erspan_tnl_link_config(tunnel, 1);
+
+ return 0;
+ }
+
+ static const struct net_device_ops ip6erspan_netdev_ops = {
+ .ndo_init = ip6erspan_tap_init,
+- .ndo_uninit = ip6gre_tunnel_uninit,
++ .ndo_uninit = ip6erspan_tunnel_uninit,
+ .ndo_start_xmit = ip6erspan_tunnel_xmit,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+@@ -1825,13 +1894,11 @@ static bool ip6gre_netlink_encap_parms(struct nlattr *data[],
+ return ret;
+ }
+
+-static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
+- struct nlattr *tb[], struct nlattr *data[],
+- struct netlink_ext_ack *extack)
++static int ip6gre_newlink_common(struct net *src_net, struct net_device *dev,
++ struct nlattr *tb[], struct nlattr *data[],
++ struct netlink_ext_ack *extack)
+ {
+ struct ip6_tnl *nt;
+- struct net *net = dev_net(dev);
+- struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+ struct ip_tunnel_encap ipencap;
+ int err;
+
+@@ -1844,16 +1911,6 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
+ return err;
+ }
+
+- ip6gre_netlink_parms(data, &nt->parms);
+-
+- if (nt->parms.collect_md) {
+- if (rtnl_dereference(ign->collect_md_tun))
+- return -EEXIST;
+- } else {
+- if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
+- return -EEXIST;
+- }
+-
+ if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
+ eth_hw_addr_random(dev);
+
+@@ -1864,51 +1921,94 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
+ if (err)
+ goto out;
+
+- ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
+-
+ if (tb[IFLA_MTU])
+ ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
+
+ dev_hold(dev);
+- ip6gre_tunnel_link(ign, nt);
+
+ out:
+ return err;
+ }
+
+-static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
+- struct nlattr *data[],
+- struct netlink_ext_ack *extack)
++static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
++ struct nlattr *tb[], struct nlattr *data[],
++ struct netlink_ext_ack *extack)
++{
++ struct ip6_tnl *nt = netdev_priv(dev);
++ struct net *net = dev_net(dev);
++ struct ip6gre_net *ign;
++ int err;
++
++ ip6gre_netlink_parms(data, &nt->parms);
++ ign = net_generic(net, ip6gre_net_id);
++
++ if (nt->parms.collect_md) {
++ if (rtnl_dereference(ign->collect_md_tun))
++ return -EEXIST;
++ } else {
++ if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
++ return -EEXIST;
++ }
++
++ err = ip6gre_newlink_common(src_net, dev, tb, data, extack);
++ if (!err) {
++ ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
++ ip6gre_tunnel_link_md(ign, nt);
++ ip6gre_tunnel_link(net_generic(net, ip6gre_net_id), nt);
++ }
++ return err;
++}
++
++static struct ip6_tnl *
++ip6gre_changelink_common(struct net_device *dev, struct nlattr *tb[],
++ struct nlattr *data[], struct __ip6_tnl_parm *p_p,
++ struct netlink_ext_ack *extack)
+ {
+ struct ip6_tnl *t, *nt = netdev_priv(dev);
+ struct net *net = nt->net;
+ struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+- struct __ip6_tnl_parm p;
+ struct ip_tunnel_encap ipencap;
+
+ if (dev == ign->fb_tunnel_dev)
+- return -EINVAL;
++ return ERR_PTR(-EINVAL);
+
+ if (ip6gre_netlink_encap_parms(data, &ipencap)) {
+ int err = ip6_tnl_encap_setup(nt, &ipencap);
+
+ if (err < 0)
+- return err;
++ return ERR_PTR(err);
+ }
+
+- ip6gre_netlink_parms(data, &p);
++ ip6gre_netlink_parms(data, p_p);
+
+- t = ip6gre_tunnel_locate(net, &p, 0);
++ t = ip6gre_tunnel_locate(net, p_p, 0);
+
+ if (t) {
+ if (t->dev != dev)
+- return -EEXIST;
++ return ERR_PTR(-EEXIST);
+ } else {
+ t = nt;
+ }
+
++ return t;
++}
++
++static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
++ struct nlattr *data[],
++ struct netlink_ext_ack *extack)
++{
++ struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id);
++ struct __ip6_tnl_parm p;
++ struct ip6_tnl *t;
++
++ t = ip6gre_changelink_common(dev, tb, data, &p, extack);
++ if (IS_ERR(t))
++ return PTR_ERR(t);
++
++ ip6gre_tunnel_unlink_md(ign, t);
+ ip6gre_tunnel_unlink(ign, t);
+ ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]);
++ ip6gre_tunnel_link_md(ign, t);
+ ip6gre_tunnel_link(ign, t);
+ return 0;
+ }
+@@ -2058,6 +2158,69 @@ static void ip6erspan_tap_setup(struct net_device *dev)
+ netif_keep_dst(dev);
+ }
+
++static int ip6erspan_newlink(struct net *src_net, struct net_device *dev,
++ struct nlattr *tb[], struct nlattr *data[],
++ struct netlink_ext_ack *extack)
++{
++ struct ip6_tnl *nt = netdev_priv(dev);
++ struct net *net = dev_net(dev);
++ struct ip6gre_net *ign;
++ int err;
++
++ ip6gre_netlink_parms(data, &nt->parms);
++ ign = net_generic(net, ip6gre_net_id);
++
++ if (nt->parms.collect_md) {
++ if (rtnl_dereference(ign->collect_md_tun_erspan))
++ return -EEXIST;
++ } else {
++ if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
++ return -EEXIST;
++ }
++
++ err = ip6gre_newlink_common(src_net, dev, tb, data, extack);
++ if (!err) {
++ ip6erspan_tnl_link_config(nt, !tb[IFLA_MTU]);
++ ip6erspan_tunnel_link_md(ign, nt);
++ ip6gre_tunnel_link(net_generic(net, ip6gre_net_id), nt);
++ }
++ return err;
++}
++
++static void ip6erspan_tnl_link_config(struct ip6_tnl *t, int set_mtu)
++{
++ ip6gre_tnl_link_config_common(t);
++ ip6gre_tnl_link_config_route(t, set_mtu, ip6erspan_calc_hlen(t));
++}
++
++static int ip6erspan_tnl_change(struct ip6_tnl *t,
++ const struct __ip6_tnl_parm *p, int set_mtu)
++{
++ ip6gre_tnl_copy_tnl_parm(t, p);
++ ip6erspan_tnl_link_config(t, set_mtu);
++ return 0;
++}
++
++static int ip6erspan_changelink(struct net_device *dev, struct nlattr *tb[],
++ struct nlattr *data[],
++ struct netlink_ext_ack *extack)
++{
++ struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id);
++ struct __ip6_tnl_parm p;
++ struct ip6_tnl *t;
++
++ t = ip6gre_changelink_common(dev, tb, data, &p, extack);
++ if (IS_ERR(t))
++ return PTR_ERR(t);
++
++ ip6gre_tunnel_unlink_md(ign, t);
++ ip6gre_tunnel_unlink(ign, t);
++ ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]);
++ ip6erspan_tunnel_link_md(ign, t);
++ ip6gre_tunnel_link(ign, t);
++ return 0;
++}
++
+ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
+ .kind = "ip6gre",
+ .maxtype = IFLA_GRE_MAX,
+@@ -2094,8 +2257,8 @@ static struct rtnl_link_ops ip6erspan_tap_ops __read_mostly = {
+ .priv_size = sizeof(struct ip6_tnl),
+ .setup = ip6erspan_tap_setup,
+ .validate = ip6erspan_tap_validate,
+- .newlink = ip6gre_newlink,
+- .changelink = ip6gre_changelink,
++ .newlink = ip6erspan_newlink,
++ .changelink = ip6erspan_changelink,
+ .get_size = ip6gre_get_size,
+ .fill_info = ip6gre_fill_info,
+ .get_link_net = ip6_tnl_get_link_net,
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 4065ae0c32a0..072333760a52 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1489,7 +1489,8 @@ static int __ip6_append_data(struct sock *sk,
+ if (copy > length)
+ copy = length;
+
+- if (!(rt->dst.dev->features&NETIF_F_SG)) {
++ if (!(rt->dst.dev->features&NETIF_F_SG) &&
++ skb_tailroom(skb) >= copy) {
+ unsigned int off;
+
+ off = skb->len;
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 3b43b1fcd618..c6a2dd890de3 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2903,13 +2903,15 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
+ if (skb == NULL)
+ goto out_unlock;
+
+- skb_set_network_header(skb, reserve);
++ skb_reset_network_header(skb);
+
+ err = -EINVAL;
+ if (sock->type == SOCK_DGRAM) {
+ offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
+ if (unlikely(offset < 0))
+ goto out_free;
++ } else if (reserve) {
++ skb_push(skb, reserve);
+ }
+
+ /* Returns -EFAULT on error */
+diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
+index c49cb61adedf..64ca017f2e00 100644
+--- a/net/sched/act_vlan.c
++++ b/net/sched/act_vlan.c
+@@ -161,6 +161,8 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
+ case htons(ETH_P_8021AD):
+ break;
+ default:
++ if (exists)
++ tcf_idr_release(*a, bind);
+ return -EPROTONOSUPPORT;
+ }
+ } else {
+diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
+index 16644b3d2362..56c181c3feeb 100644
+--- a/net/sched/sch_red.c
++++ b/net/sched/sch_red.c
+@@ -222,10 +222,11 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt,
+ extack);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+- }
+
+- if (child != &noop_qdisc)
++ /* child is fifo, no need to check for noop_qdisc */
+ qdisc_hash_add(child, true);
++ }
++
+ sch_tree_lock(sch);
+ q->flags = ctl->flags;
+ q->limit = ctl->limit;
+diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
+index 03225a8df973..6f74a426f159 100644
+--- a/net/sched/sch_tbf.c
++++ b/net/sched/sch_tbf.c
+@@ -383,6 +383,9 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
+ err = PTR_ERR(child);
+ goto done;
+ }
++
++ /* child is fifo, no need to check for noop_qdisc */
++ qdisc_hash_add(child, true);
+ }
+
+ sch_tree_lock(sch);
+@@ -391,8 +394,6 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
+ q->qdisc->qstats.backlog);
+ qdisc_destroy(q->qdisc);
+ q->qdisc = child;
+- if (child != &noop_qdisc)
+- qdisc_hash_add(child, true);
+ }
+ q->limit = qopt->limit;
+ if (tb[TCA_TBF_PBURST])
+diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
+index 74568cdbca70..d7b88b2d1b22 100644
+--- a/net/smc/smc_pnet.c
++++ b/net/smc/smc_pnet.c
+@@ -245,40 +245,45 @@ static struct smc_ib_device *smc_pnet_find_ib(char *ib_name)
+ static int smc_pnet_fill_entry(struct net *net, struct smc_pnetentry *pnetelem,
+ struct nlattr *tb[])
+ {
+- char *string, *ibname = NULL;
+- int rc = 0;
++ char *string, *ibname;
++ int rc;
+
+ memset(pnetelem, 0, sizeof(*pnetelem));
+ INIT_LIST_HEAD(&pnetelem->list);
+- if (tb[SMC_PNETID_NAME]) {
+- string = (char *)nla_data(tb[SMC_PNETID_NAME]);
+- if (!smc_pnetid_valid(string, pnetelem->pnet_name)) {
+- rc = -EINVAL;
+- goto error;
+- }
+- }
+- if (tb[SMC_PNETID_ETHNAME]) {
+- string = (char *)nla_data(tb[SMC_PNETID_ETHNAME]);
+- pnetelem->ndev = dev_get_by_name(net, string);
+- if (!pnetelem->ndev)
+- return -ENOENT;
+- }
+- if (tb[SMC_PNETID_IBNAME]) {
+- ibname = (char *)nla_data(tb[SMC_PNETID_IBNAME]);
+- ibname = strim(ibname);
+- pnetelem->smcibdev = smc_pnet_find_ib(ibname);
+- if (!pnetelem->smcibdev) {
+- rc = -ENOENT;
+- goto error;
+- }
+- }
+- if (tb[SMC_PNETID_IBPORT]) {
+- pnetelem->ib_port = nla_get_u8(tb[SMC_PNETID_IBPORT]);
+- if (pnetelem->ib_port > SMC_MAX_PORTS) {
+- rc = -EINVAL;
+- goto error;
+- }
+- }
++
++ rc = -EINVAL;
++ if (!tb[SMC_PNETID_NAME])
++ goto error;
++ string = (char *)nla_data(tb[SMC_PNETID_NAME]);
++ if (!smc_pnetid_valid(string, pnetelem->pnet_name))
++ goto error;
++
++ rc = -EINVAL;
++ if (!tb[SMC_PNETID_ETHNAME])
++ goto error;
++ rc = -ENOENT;
++ string = (char *)nla_data(tb[SMC_PNETID_ETHNAME]);
++ pnetelem->ndev = dev_get_by_name(net, string);
++ if (!pnetelem->ndev)
++ goto error;
++
++ rc = -EINVAL;
++ if (!tb[SMC_PNETID_IBNAME])
++ goto error;
++ rc = -ENOENT;
++ ibname = (char *)nla_data(tb[SMC_PNETID_IBNAME]);
++ ibname = strim(ibname);
++ pnetelem->smcibdev = smc_pnet_find_ib(ibname);
++ if (!pnetelem->smcibdev)
++ goto error;
++
++ rc = -EINVAL;
++ if (!tb[SMC_PNETID_IBPORT])
++ goto error;
++ pnetelem->ib_port = nla_get_u8(tb[SMC_PNETID_IBPORT]);
++ if (pnetelem->ib_port < 1 || pnetelem->ib_port > SMC_MAX_PORTS)
++ goto error;
++
+ return 0;
+
+ error:
+@@ -307,6 +312,8 @@ static int smc_pnet_get(struct sk_buff *skb, struct genl_info *info)
+ void *hdr;
+ int rc;
+
++ if (!info->attrs[SMC_PNETID_NAME])
++ return -EINVAL;
+ pnetelem = smc_pnet_find_pnetid(
+ (char *)nla_data(info->attrs[SMC_PNETID_NAME]));
+ if (!pnetelem)
+@@ -359,6 +366,8 @@ static int smc_pnet_add(struct sk_buff *skb, struct genl_info *info)
+
+ static int smc_pnet_del(struct sk_buff *skb, struct genl_info *info)
+ {
++ if (!info->attrs[SMC_PNETID_NAME])
++ return -EINVAL;
+ return smc_pnet_remove_by_pnetid(
+ (char *)nla_data(info->attrs[SMC_PNETID_NAME]));
+ }
+diff --git a/net/wireless/core.c b/net/wireless/core.c
+index a6f3cac8c640..c0fd8a85e7f7 100644
+--- a/net/wireless/core.c
++++ b/net/wireless/core.c
+@@ -95,6 +95,9 @@ static int cfg80211_dev_check_name(struct cfg80211_registered_device *rdev,
+
+ ASSERT_RTNL();
+
++ if (strlen(newname) > NL80211_WIPHY_NAME_MAXLEN)
++ return -EINVAL;
++
+ /* prohibit calling the thing phy%d when %d is not its number */
+ sscanf(newname, PHY_NAME "%d%n", &wiphy_idx, &taken);
+ if (taken == strlen(newname) && wiphy_idx != rdev->wiphy_idx) {
+diff --git a/sound/soc/rockchip/Kconfig b/sound/soc/rockchip/Kconfig
+index b0825370d262..957046ac6c8c 100644
+--- a/sound/soc/rockchip/Kconfig
++++ b/sound/soc/rockchip/Kconfig
+@@ -56,6 +56,9 @@ config SND_SOC_RK3288_HDMI_ANALOG
+ depends on SND_SOC_ROCKCHIP && I2C && GPIOLIB && CLKDEV_LOOKUP
+ select SND_SOC_ROCKCHIP_I2S
+ select SND_SOC_HDMI_CODEC
++ select SND_SOC_ES8328_I2C
++ select SND_SOC_ES8328_SPI if SPI_MASTER
++ select DRM_DW_HDMI_I2S_AUDIO if DRM_DW_HDMI
+ help
+ Say Y or M here if you want to add support for SoC audio on Rockchip
+ RK3288 boards using an analog output and the built-in HDMI audio.
+diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
+index 233f1c9a4b6c..aeba0ae890ea 100644
+--- a/sound/soc/samsung/i2s.c
++++ b/sound/soc/samsung/i2s.c
+@@ -656,8 +656,12 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
+ tmp |= mod_slave;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+- /* Set default source clock in Master mode */
+- if (i2s->rclk_srcrate == 0)
++ /*
++ * Set default source clock in Master mode, only when the
++ * CLK_I2S_RCLK_SRC clock is not exposed so we ensure any
++ * clock configuration assigned in DT is not overwritten.
++ */
++ if (i2s->rclk_srcrate == 0 && i2s->clk_data.clks == NULL)
+ i2s_set_sysclk(dai, SAMSUNG_I2S_RCLKSRC_0,
+ 0, SND_SOC_CLOCK_IN);
+ break;
+@@ -881,6 +885,11 @@ static int config_setup(struct i2s_dai *i2s)
+ return 0;
+
+ if (!(i2s->quirks & QUIRK_NO_MUXPSR)) {
++ struct clk *rclksrc = i2s->clk_table[CLK_I2S_RCLK_SRC];
++
++ if (i2s->rclk_srcrate == 0 && rclksrc && !IS_ERR(rclksrc))
++ i2s->rclk_srcrate = clk_get_rate(rclksrc);
++
+ psr = i2s->rclk_srcrate / i2s->frmclk / rfs;
+ writel(((psr - 1) << 8) | PSR_PSREN, i2s->addr + I2SPSR);
+ dev_dbg(&i2s->pdev->dev,
+diff --git a/sound/soc/samsung/odroid.c b/sound/soc/samsung/odroid.c
+index 44b6de5a331a..06a31a9585a0 100644
+--- a/sound/soc/samsung/odroid.c
++++ b/sound/soc/samsung/odroid.c
+@@ -36,23 +36,26 @@ static int odroid_card_hw_params(struct snd_pcm_substream *substream,
+ {
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct odroid_priv *priv = snd_soc_card_get_drvdata(rtd->card);
+- unsigned int pll_freq, rclk_freq;
++ unsigned int pll_freq, rclk_freq, rfs;
+ int ret;
+
+ switch (params_rate(params)) {
+- case 32000:
+ case 64000:
+- pll_freq = 131072006U;
++ pll_freq = 196608001U;
++ rfs = 384;
+ break;
+ case 44100:
+ case 88200:
+ case 176400:
+ pll_freq = 180633609U;
++ rfs = 512;
+ break;
++ case 32000:
+ case 48000:
+ case 96000:
+ case 192000:
+ pll_freq = 196608001U;
++ rfs = 512;
+ break;
+ default:
+ return -EINVAL;
+@@ -67,7 +70,7 @@ static int odroid_card_hw_params(struct snd_pcm_substream *substream,
+ * frequency values due to the EPLL output frequency not being exact
+ * multiple of the audio sampling rate.
+ */
+- rclk_freq = params_rate(params) * 256 + 1;
++ rclk_freq = params_rate(params) * rfs + 1;
+
+ ret = clk_set_rate(priv->sclk_i2s, rclk_freq);
+ if (ret < 0)
+diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
+index 782c580b7aa3..e5049fbfc4f1 100644
+--- a/sound/soc/soc-topology.c
++++ b/sound/soc/soc-topology.c
+@@ -1276,6 +1276,9 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_dmixer_create(
+ kfree(sm);
+ continue;
+ }
++
++ /* create any TLV data */
++ soc_tplg_create_tlv(tplg, &kc[i], &mc->hdr);
+ }
+ return kc;
+
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 794224e1d6df..006da37ad0d9 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1149,24 +1149,27 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
+ return false;
+ }
+
+-/* Marantz/Denon USB DACs need a vendor cmd to switch
++/* ITF-USB DSD based DACs need a vendor cmd to switch
+ * between PCM and native DSD mode
++ * (2 altsets version)
+ */
+-static bool is_marantz_denon_dac(unsigned int id)
++static bool is_itf_usb_dsd_2alts_dac(unsigned int id)
+ {
+ switch (id) {
+ case USB_ID(0x154e, 0x1003): /* Denon DA-300USB */
+ case USB_ID(0x154e, 0x3005): /* Marantz HD-DAC1 */
+ case USB_ID(0x154e, 0x3006): /* Marantz SA-14S1 */
++ case USB_ID(0x1852, 0x5065): /* Luxman DA-06 */
+ return true;
+ }
+ return false;
+ }
+
+-/* TEAC UD-501/UD-503/NT-503 USB DACs need a vendor cmd to switch
+- * between PCM/DOP and native DSD mode
++/* ITF-USB DSD based DACs need a vendor cmd to switch
++ * between PCM and native DSD mode
++ * (3 altsets version)
+ */
+-static bool is_teac_dsd_dac(unsigned int id)
++static bool is_itf_usb_dsd_3alts_dac(unsigned int id)
+ {
+ switch (id) {
+ case USB_ID(0x0644, 0x8043): /* TEAC UD-501/UD-503/NT-503 */
+@@ -1183,7 +1186,7 @@ int snd_usb_select_mode_quirk(struct snd_usb_substream *subs,
+ struct usb_device *dev = subs->dev;
+ int err;
+
+- if (is_marantz_denon_dac(subs->stream->chip->usb_id)) {
++ if (is_itf_usb_dsd_2alts_dac(subs->stream->chip->usb_id)) {
+ /* First switch to alt set 0, otherwise the mode switch cmd
+ * will not be accepted by the DAC
+ */
+@@ -1204,7 +1207,7 @@ int snd_usb_select_mode_quirk(struct snd_usb_substream *subs,
+ break;
+ }
+ mdelay(20);
+- } else if (is_teac_dsd_dac(subs->stream->chip->usb_id)) {
++ } else if (is_itf_usb_dsd_3alts_dac(subs->stream->chip->usb_id)) {
+ /* Vendor mode switch cmd is required. */
+ switch (fmt->altsetting) {
+ case 3: /* DSD mode (DSD_U32) requested */
+@@ -1300,10 +1303,10 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
+ (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
+ mdelay(20);
+
+- /* Marantz/Denon devices with USB DAC functionality need a delay
++ /* ITF-USB DSD based DACs functionality need a delay
+ * after each class compliant request
+ */
+- if (is_marantz_denon_dac(chip->usb_id)
++ if (is_itf_usb_dsd_2alts_dac(chip->usb_id)
+ && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
+ mdelay(20);
+
+@@ -1390,14 +1393,14 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
+ break;
+ }
+
+- /* Denon/Marantz devices with USB DAC functionality */
+- if (is_marantz_denon_dac(chip->usb_id)) {
++ /* ITF-USB DSD based DACs (2 altsets version) */
++ if (is_itf_usb_dsd_2alts_dac(chip->usb_id)) {
+ if (fp->altsetting == 2)
+ return SNDRV_PCM_FMTBIT_DSD_U32_BE;
+ }
+
+- /* TEAC devices with USB DAC functionality */
+- if (is_teac_dsd_dac(chip->usb_id)) {
++ /* ITF-USB DSD based DACs (3 altsets version) */
++ if (is_itf_usb_dsd_3alts_dac(chip->usb_id)) {
+ if (fp->altsetting == 3)
+ return SNDRV_PCM_FMTBIT_DSD_U32_BE;
+ }
next reply other threads:[~2018-05-25 15:37 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-05-25 15:37 Mike Pagano [this message]
-- strict thread matches above, loose matches on Subject: below --
2018-06-26 16:13 [gentoo-commits] proj/linux-patches:4.16 commit in: / Alice Ferrazzi
2018-06-20 19:44 Mike Pagano
2018-06-16 15:45 Mike Pagano
2018-06-11 21:48 Mike Pagano
2018-06-05 11:23 Mike Pagano
2018-05-30 11:44 Mike Pagano
2018-05-22 19:13 Mike Pagano
2018-05-20 22:22 Mike Pagano
2018-05-16 10:25 Mike Pagano
2018-05-09 10:57 Mike Pagano
2018-05-02 16:15 Mike Pagano
2018-04-30 10:30 Mike Pagano
2018-04-26 10:22 Mike Pagano
2018-04-24 11:31 Mike Pagano
2018-04-19 10:45 Mike Pagano
2018-04-12 12:21 Mike Pagano
2018-04-08 14:33 Mike Pagano
2018-03-09 19:24 Mike Pagano
2018-02-12 20:46 Mike Pagano
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1527262661.ae8ff671b8db0c142f5db55ac3e30eaf78171d29.mpagano@gentoo \
--to=mpagano@gentoo.org \
--cc=gentoo-commits@lists.gentoo.org \
--cc=gentoo-dev@lists.gentoo.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox